max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
ais3-pre-exam-2022-writeup/Misc/JeetQode/chall/problems/astmath.py | Jimmy01240397/balsn-2021-writeup | 0 | 7700 | from problem import Problem
from typing import Any, Tuple
from random import randint
import ast
import json
def gen_num():
return str(randint(1, 9))
def gen_op():
return "+-*/"[randint(0, 3)]
def gen_expr(depth):
if randint(0, depth) == 0:
l = gen_expr(depth + 1)
r = gen_expr(depth + 1)
op = gen_op()
return f"({l}{op}{r})"
return f"({gen_num()})"
class ASTMath(Problem):
@property
def name(self) -> str:
return "AST Math"
@property
def desciption(self) -> str:
return """
Input: An AST of Python's arithmetic expression (only +,-,*,/)
Output: Result number
Examples:
Input: {"body": {"left": {"value": 1, "kind": null, "lineno": 1, "col_offset": 0, "end_lineno": 1, "end_col_offset": 1}, "op": "<_ast.Add object at 0x7f0387ccde20>", "right": {"value": 2, "kind": null, "lineno": 1, "col_offset": 2, "end_lineno": 1, "end_col_offset": 3}, "lineno": 1, "col_offset": 0, "end_lineno": 1, "end_col_offset": 3}}
Output: 3
Input: {"body": {"left": {"left": {"value": 8, "kind": null, "lineno": 1, "col_offset": 1, "end_lineno": 1, "end_col_offset": 2}, "op": "<_ast.Mult object at 0x7f20eb76aee0>", "right": {"value": 7, "kind": null, "lineno": 1, "col_offset": 3, "end_lineno": 1, "end_col_offset": 4}, "lineno": 1, "col_offset": 1, "end_lineno": 1, "end_col_offset": 4}, "op": "<_ast.Sub object at 0x7f20eb76ae80>", "right": {"left": {"value": 6, "kind": null, "lineno": 1, "col_offset": 7, "end_lineno": 1, "end_col_offset": 8}, "op": "<_ast.Mult object at 0x7f20eb76aee0>", "right": {"value": 3, "kind": null, "lineno": 1, "col_offset": 9, "end_lineno": 1, "end_col_offset": 10}, "lineno": 1, "col_offset": 7, "end_lineno": 1, "end_col_offset": 10}, "lineno": 1, "col_offset": 0, "end_lineno": 1, "end_col_offset": 11}}
Output: 38
"""
@property
def rounds(self) -> int:
return 10
def dumps(self, x):
return json.dumps(
x, default=lambda x: x.__dict__ if len(x.__dict__) else str(x)
)
def generate_testcase(self) -> Tuple[bool, Any]:
l = gen_expr(1)
r = gen_expr(1)
op = gen_op()
expr = f"{l}{op}{r}"
try:
result = eval(expr)
except ZeroDivisionError:
return self.generate_testcase()
return ast.parse(expr, mode="eval"), result
| from problem import Problem
from typing import Any, Tuple
from random import randint
import ast
import json
def gen_num():
return str(randint(1, 9))
def gen_op():
return "+-*/"[randint(0, 3)]
def gen_expr(depth):
if randint(0, depth) == 0:
l = gen_expr(depth + 1)
r = gen_expr(depth + 1)
op = gen_op()
return f"({l}{op}{r})"
return f"({gen_num()})"
class ASTMath(Problem):
@property
def name(self) -> str:
return "AST Math"
@property
def desciption(self) -> str:
return """
Input: An AST of Python's arithmetic expression (only +,-,*,/)
Output: Result number
Examples:
Input: {"body": {"left": {"value": 1, "kind": null, "lineno": 1, "col_offset": 0, "end_lineno": 1, "end_col_offset": 1}, "op": "<_ast.Add object at 0x7f0387ccde20>", "right": {"value": 2, "kind": null, "lineno": 1, "col_offset": 2, "end_lineno": 1, "end_col_offset": 3}, "lineno": 1, "col_offset": 0, "end_lineno": 1, "end_col_offset": 3}}
Output: 3
Input: {"body": {"left": {"left": {"value": 8, "kind": null, "lineno": 1, "col_offset": 1, "end_lineno": 1, "end_col_offset": 2}, "op": "<_ast.Mult object at 0x7f20eb76aee0>", "right": {"value": 7, "kind": null, "lineno": 1, "col_offset": 3, "end_lineno": 1, "end_col_offset": 4}, "lineno": 1, "col_offset": 1, "end_lineno": 1, "end_col_offset": 4}, "op": "<_ast.Sub object at 0x7f20eb76ae80>", "right": {"left": {"value": 6, "kind": null, "lineno": 1, "col_offset": 7, "end_lineno": 1, "end_col_offset": 8}, "op": "<_ast.Mult object at 0x7f20eb76aee0>", "right": {"value": 3, "kind": null, "lineno": 1, "col_offset": 9, "end_lineno": 1, "end_col_offset": 10}, "lineno": 1, "col_offset": 7, "end_lineno": 1, "end_col_offset": 10}, "lineno": 1, "col_offset": 0, "end_lineno": 1, "end_col_offset": 11}}
Output: 38
"""
@property
def rounds(self) -> int:
return 10
def dumps(self, x):
return json.dumps(
x, default=lambda x: x.__dict__ if len(x.__dict__) else str(x)
)
def generate_testcase(self) -> Tuple[bool, Any]:
l = gen_expr(1)
r = gen_expr(1)
op = gen_op()
expr = f"{l}{op}{r}"
try:
result = eval(expr)
except ZeroDivisionError:
return self.generate_testcase()
return ast.parse(expr, mode="eval"), result
| en | 0.3405 | Input: An AST of Python's arithmetic expression (only +,-,*,/) Output: Result number Examples: Input: {"body": {"left": {"value": 1, "kind": null, "lineno": 1, "col_offset": 0, "end_lineno": 1, "end_col_offset": 1}, "op": "<_ast.Add object at 0x7f0387ccde20>", "right": {"value": 2, "kind": null, "lineno": 1, "col_offset": 2, "end_lineno": 1, "end_col_offset": 3}, "lineno": 1, "col_offset": 0, "end_lineno": 1, "end_col_offset": 3}} Output: 3 Input: {"body": {"left": {"left": {"value": 8, "kind": null, "lineno": 1, "col_offset": 1, "end_lineno": 1, "end_col_offset": 2}, "op": "<_ast.Mult object at 0x7f20eb76aee0>", "right": {"value": 7, "kind": null, "lineno": 1, "col_offset": 3, "end_lineno": 1, "end_col_offset": 4}, "lineno": 1, "col_offset": 1, "end_lineno": 1, "end_col_offset": 4}, "op": "<_ast.Sub object at 0x7f20eb76ae80>", "right": {"left": {"value": 6, "kind": null, "lineno": 1, "col_offset": 7, "end_lineno": 1, "end_col_offset": 8}, "op": "<_ast.Mult object at 0x7f20eb76aee0>", "right": {"value": 3, "kind": null, "lineno": 1, "col_offset": 9, "end_lineno": 1, "end_col_offset": 10}, "lineno": 1, "col_offset": 7, "end_lineno": 1, "end_col_offset": 10}, "lineno": 1, "col_offset": 0, "end_lineno": 1, "end_col_offset": 11}} Output: 38 | 3.271408 | 3 |
pyllusion/movement/movement_circles.py | RebeccaHirst/Pyllusion | 0 | 7701 | <filename>pyllusion/movement/movement_circles.py
import numpy as np
from .movement_matrix import movement_matrix
from ..image import image_circles
def movement_circles(n=50, duration=2, fps=30, width=500, height=500, **kwargs):
"""
>>> import pyllusion as ill
>>>
>>> images = ill.movement_circles(n=50, duration=4, fps=30, color="black", size=0.05)
>>> #ill.images_to_gif(images, path="mygif.gif", fps=30)
"""
n_frames = int(duration * fps)
x, y = movement_matrix(n_frames=n_frames, **kwargs)
# Generate PIL images
images = []
for i in range(n_frames):
images.append(
image_circles(width=width, height=height, n=n, x=x[i], y=y[i], **kwargs)
)
return images
| <filename>pyllusion/movement/movement_circles.py
import numpy as np
from .movement_matrix import movement_matrix
from ..image import image_circles
def movement_circles(n=50, duration=2, fps=30, width=500, height=500, **kwargs):
"""
>>> import pyllusion as ill
>>>
>>> images = ill.movement_circles(n=50, duration=4, fps=30, color="black", size=0.05)
>>> #ill.images_to_gif(images, path="mygif.gif", fps=30)
"""
n_frames = int(duration * fps)
x, y = movement_matrix(n_frames=n_frames, **kwargs)
# Generate PIL images
images = []
for i in range(n_frames):
images.append(
image_circles(width=width, height=height, n=n, x=x[i], y=y[i], **kwargs)
)
return images
| en | 0.473067 | >>> import pyllusion as ill >>> >>> images = ill.movement_circles(n=50, duration=4, fps=30, color="black", size=0.05) >>> #ill.images_to_gif(images, path="mygif.gif", fps=30) # Generate PIL images | 3.122378 | 3 |
sce.py | hzwfl2/Semantic-consistent-Embedding | 2 | 7702 | <gh_stars>1-10
#%%
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC,LinearSVC
from torch import device
from torch.optim import optimizer
from torch.utils.data import DataLoader, Dataset
from read_data import create_data
#%%
class my_dataset(Dataset):
def __init__(self,data,attribute_label):
super(my_dataset,self).__init__()
self.data=data
self.attribute_label=attribute_label
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index):
batch_data=self.data[index]
batch_label=self.attribute_label[index]
return batch_data,batch_label
#%%
device=torch.device('cuda')
np.random.seed(904)
def pre_model(model, traindata, train_attributelabel, testdata, testlabel, attribute_matrix):
model_dict = {'rf': RandomForestClassifier(n_estimators=100),'NB': GaussianNB(),'SVC_linear': SVC(kernel='linear'),'LinearSVC':LinearSVC()}
res_list = []
for i in range(train_attributelabel.shape[1]):
clf = model_dict[model]
if max(train_attributelabel[:, i]) != 0:
clf.fit(traindata, train_attributelabel[:, i])
res = clf.predict(testdata)
else:
res = np.zeros(testdata.shape[0])
res_list.append(res.T)
test_pre_attribute = np.mat(np.row_stack(res_list)).T
label_lis = []
for i in range(test_pre_attribute.shape[0]):
pre_res = test_pre_attribute[i, :]
loc = (np.sum(np.square(attribute_matrix - pre_res), axis=1)).argmin()
label_lis.append(np.unique(testlabel)[loc])
label_lis = np.mat(np.row_stack(label_lis))
return test_pre_attribute,label_lis, testlabel
#%%
def off_diagonal(x):
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
#%%
class Embedding_Net(nn.Module):
def __init__(self,dim,lambda_):
super(Embedding_Net,self).__init__()
self.l11=nn.Linear(6,dim[0])
self.l12=nn.Linear(dim[0],dim[1])
self.l13=nn.Linear(2*dim[1],6)
self.l21=nn.Linear(4,dim[0])
self.l22=nn.Linear(dim[0],dim[1])
self.l23=nn.Linear(2*dim[1],4)
self.bn1=nn.BatchNorm1d(dim[0])
self.bn2=nn.BatchNorm1d(dim[1])
self.lambda_=lambda_
def compability_loss(self,z1,z2):
N,D=z1.shape
c=self.bn2(z1).T @ self.bn2(z2)/N
on_diag=torch.diagonal(c).add_(-1).pow_(2).sum()
off_diag=off_diagonal(c).pow_(2).sum()
loss=on_diag+self.lambda_[3]*off_diag
return loss
def compute_loss(self,z1,z2,x,a,x_,a_):
loss_R1=self.lambda_[0]*F.mse_loss(a,a_)
loss_R2=self.lambda_[1]*F.mse_loss(x,x_)
loss_CM=self.compability_loss(z1,z2)
loss_CM=self.lambda_[2]*loss_CM
loss=loss_R1+loss_R2+loss_CM
return loss_R1,loss_R2,loss_CM,loss
def transform(self,x,a):
z1=self.l11(x)
z1=torch.relu(self.bn1(z1))
z1=self.l12(z1)
z2=self.l21(a)
z2=torch.relu(self.bn1(z2))
z2=self.l22(z2)
return z1,z2
def reconstruction(self,z1,z2):
f1=torch.cat([z1,z2],dim=1)
f2=torch.cat([z2,z1],dim=1)
x_=self.l13(f1)
a_=torch.sigmoid(self.l23(f2))
return x_,a_
def forward(self,x,a):
z1,z2=self.transform(x,a)
x_,a_=self.reconstruction(z1,z2)
loss_R1,loss_R2,loss_CM,loss=self.compute_loss(z1,z2,x,a,x_,a_)
package={'z1':z1,'z2':z2,'x':x,'x_':x_,'r1':loss_R1,
'r2':loss_R2,'cm':loss_CM,'loss':loss}
return package
#%%
datapath='data/classData.csv'
modes=['NB'] #'rf'
test_classes={'test_class':[2,3]}
for key,value in test_classes.items():
print('========================================{}:[{}:{}]========================================='.format(modes,key,value))
df = pd.read_csv(datapath)
df['fault_type'] = df['G'].astype('str') + df['C'].astype('str') + df['B'].astype('str') + df['A'].astype('str')
traindata,trainlabel,train_attributelabel, train_attributematrix,testdata,testlabel,test_attributelabel,test_attributematrix,attribute_matrix=create_data(df,value)
_,y_pre,y_true=pre_model(modes[0], traindata, train_attributelabel, testdata, testlabel, test_attributematrix)
original_acc=accuracy_score(y_pre,y_true)
traindata=torch.from_numpy(traindata).float().to(device)
label=torch.from_numpy(trainlabel.squeeze()).long().to(device)
testdata=torch.from_numpy(testdata).float().to(device)
batch_size=400
trainset=my_dataset(traindata,torch.from_numpy(train_attributelabel).float().to(device))
train_loader=DataLoader(trainset,batch_size=batch_size,shuffle=True)
lambda_=[1,1e-5,1,0.25]
dim=[6,12]
model=Embedding_Net(dim,lambda_=lambda_)
model.to(device)
optimizer=optim.RMSprop(model.parameters(),lr=1e-2)
L1,L2,L3,L=[],[],[],[]
model.train()
accs=[]
best_acc=0
for epoch in range(200):
model.train()
for batch,(batch_data,batch_label) in enumerate(train_loader):
optimizer.zero_grad()
package=model(batch_data,batch_label)
loss_R1,loss_R2,loss_CM,loss=package['r1'],package['r2'],package['cm'],package['loss']
loss.backward()
optimizer.step()
L1.append(loss_R1.item())
L2.append(loss_R2.item())
L3.append(loss_CM.item())
L.append(loss.item())
model.eval()
with torch.no_grad():
train_package=model(traindata,torch.from_numpy(train_attributelabel).float().to(device))
f_train=train_package['z1']
f_train=torch.cat([f_train,traindata],dim=1).detach().cpu().numpy()
test_package=model(testdata,torch.from_numpy(test_attributelabel).float().to(device))
f_test=test_package['z1']
f_test=torch.cat([f_test,testdata],dim=1).detach().cpu().numpy()
test_preattribute,label_lis, testlabel=pre_model(modes[0], f_train, train_attributelabel, f_test, testlabel, test_attributematrix)
acc=accuracy_score(label_lis, testlabel)
accs.append(acc)
if acc>best_acc:
best_acc=acc
print('epoch:{:d}, best_acc:{:.4f}'.format(epoch,best_acc))
print('finished! FDAT:{:.4f}, SCE:{:.4f}'.format(original_acc,best_acc))
# %% | #%%
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC,LinearSVC
from torch import device
from torch.optim import optimizer
from torch.utils.data import DataLoader, Dataset
from read_data import create_data
#%%
class my_dataset(Dataset):
def __init__(self,data,attribute_label):
super(my_dataset,self).__init__()
self.data=data
self.attribute_label=attribute_label
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index):
batch_data=self.data[index]
batch_label=self.attribute_label[index]
return batch_data,batch_label
#%%
device=torch.device('cuda')
np.random.seed(904)
def pre_model(model, traindata, train_attributelabel, testdata, testlabel, attribute_matrix):
model_dict = {'rf': RandomForestClassifier(n_estimators=100),'NB': GaussianNB(),'SVC_linear': SVC(kernel='linear'),'LinearSVC':LinearSVC()}
res_list = []
for i in range(train_attributelabel.shape[1]):
clf = model_dict[model]
if max(train_attributelabel[:, i]) != 0:
clf.fit(traindata, train_attributelabel[:, i])
res = clf.predict(testdata)
else:
res = np.zeros(testdata.shape[0])
res_list.append(res.T)
test_pre_attribute = np.mat(np.row_stack(res_list)).T
label_lis = []
for i in range(test_pre_attribute.shape[0]):
pre_res = test_pre_attribute[i, :]
loc = (np.sum(np.square(attribute_matrix - pre_res), axis=1)).argmin()
label_lis.append(np.unique(testlabel)[loc])
label_lis = np.mat(np.row_stack(label_lis))
return test_pre_attribute,label_lis, testlabel
#%%
def off_diagonal(x):
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
#%%
class Embedding_Net(nn.Module):
def __init__(self,dim,lambda_):
super(Embedding_Net,self).__init__()
self.l11=nn.Linear(6,dim[0])
self.l12=nn.Linear(dim[0],dim[1])
self.l13=nn.Linear(2*dim[1],6)
self.l21=nn.Linear(4,dim[0])
self.l22=nn.Linear(dim[0],dim[1])
self.l23=nn.Linear(2*dim[1],4)
self.bn1=nn.BatchNorm1d(dim[0])
self.bn2=nn.BatchNorm1d(dim[1])
self.lambda_=lambda_
def compability_loss(self,z1,z2):
N,D=z1.shape
c=self.bn2(z1).T @ self.bn2(z2)/N
on_diag=torch.diagonal(c).add_(-1).pow_(2).sum()
off_diag=off_diagonal(c).pow_(2).sum()
loss=on_diag+self.lambda_[3]*off_diag
return loss
def compute_loss(self,z1,z2,x,a,x_,a_):
loss_R1=self.lambda_[0]*F.mse_loss(a,a_)
loss_R2=self.lambda_[1]*F.mse_loss(x,x_)
loss_CM=self.compability_loss(z1,z2)
loss_CM=self.lambda_[2]*loss_CM
loss=loss_R1+loss_R2+loss_CM
return loss_R1,loss_R2,loss_CM,loss
def transform(self,x,a):
z1=self.l11(x)
z1=torch.relu(self.bn1(z1))
z1=self.l12(z1)
z2=self.l21(a)
z2=torch.relu(self.bn1(z2))
z2=self.l22(z2)
return z1,z2
def reconstruction(self,z1,z2):
f1=torch.cat([z1,z2],dim=1)
f2=torch.cat([z2,z1],dim=1)
x_=self.l13(f1)
a_=torch.sigmoid(self.l23(f2))
return x_,a_
def forward(self,x,a):
z1,z2=self.transform(x,a)
x_,a_=self.reconstruction(z1,z2)
loss_R1,loss_R2,loss_CM,loss=self.compute_loss(z1,z2,x,a,x_,a_)
package={'z1':z1,'z2':z2,'x':x,'x_':x_,'r1':loss_R1,
'r2':loss_R2,'cm':loss_CM,'loss':loss}
return package
#%%
datapath='data/classData.csv'
modes=['NB'] #'rf'
test_classes={'test_class':[2,3]}
for key,value in test_classes.items():
print('========================================{}:[{}:{}]========================================='.format(modes,key,value))
df = pd.read_csv(datapath)
df['fault_type'] = df['G'].astype('str') + df['C'].astype('str') + df['B'].astype('str') + df['A'].astype('str')
traindata,trainlabel,train_attributelabel, train_attributematrix,testdata,testlabel,test_attributelabel,test_attributematrix,attribute_matrix=create_data(df,value)
_,y_pre,y_true=pre_model(modes[0], traindata, train_attributelabel, testdata, testlabel, test_attributematrix)
original_acc=accuracy_score(y_pre,y_true)
traindata=torch.from_numpy(traindata).float().to(device)
label=torch.from_numpy(trainlabel.squeeze()).long().to(device)
testdata=torch.from_numpy(testdata).float().to(device)
batch_size=400
trainset=my_dataset(traindata,torch.from_numpy(train_attributelabel).float().to(device))
train_loader=DataLoader(trainset,batch_size=batch_size,shuffle=True)
lambda_=[1,1e-5,1,0.25]
dim=[6,12]
model=Embedding_Net(dim,lambda_=lambda_)
model.to(device)
optimizer=optim.RMSprop(model.parameters(),lr=1e-2)
L1,L2,L3,L=[],[],[],[]
model.train()
accs=[]
best_acc=0
for epoch in range(200):
model.train()
for batch,(batch_data,batch_label) in enumerate(train_loader):
optimizer.zero_grad()
package=model(batch_data,batch_label)
loss_R1,loss_R2,loss_CM,loss=package['r1'],package['r2'],package['cm'],package['loss']
loss.backward()
optimizer.step()
L1.append(loss_R1.item())
L2.append(loss_R2.item())
L3.append(loss_CM.item())
L.append(loss.item())
model.eval()
with torch.no_grad():
train_package=model(traindata,torch.from_numpy(train_attributelabel).float().to(device))
f_train=train_package['z1']
f_train=torch.cat([f_train,traindata],dim=1).detach().cpu().numpy()
test_package=model(testdata,torch.from_numpy(test_attributelabel).float().to(device))
f_test=test_package['z1']
f_test=torch.cat([f_test,testdata],dim=1).detach().cpu().numpy()
test_preattribute,label_lis, testlabel=pre_model(modes[0], f_train, train_attributelabel, f_test, testlabel, test_attributematrix)
acc=accuracy_score(label_lis, testlabel)
accs.append(acc)
if acc>best_acc:
best_acc=acc
print('epoch:{:d}, best_acc:{:.4f}'.format(epoch,best_acc))
print('finished! FDAT:{:.4f}, SCE:{:.4f}'.format(original_acc,best_acc))
# %% | el | 0.271039 | #%% #%% #%% #%% #%% #%% #'rf' # %% | 2.442718 | 2 |
graphsage/partition_predict.py | colirain/GraphSAGE | 0 | 7703 |
import tensorflow as tf
import numpy as np
from graphsage.models import FCPartition
from graphsage.partition_train import construct_placeholders
from graphsage.utils import load_graph_data, load_embedded_data, load_embedded_idmap
flags = tf.app.flags
FLAGS = flags.FLAGS
# flags.DEFINE_integer('dim_1', 128, 'Size of output dim (final is 2x this, if using concat)')
# DIR = 'trained_models'
# MODEL = 'partition'
# with tf.Session() as sess:
# new_saver = tf.train.import_meta_graph(DIR+'/'+MODEL+'.ckpt.meta')
# new_saver.restore(sess, tf.train.latest_checkpoint(DIR + '/./'))
# new_saver.run()
# print(new_saver)
def predict(train_data, id_map):
num_classes = 3
placeholders = construct_placeholders(num_classes)
placeholders['features'] = train_data
# feed_dict = dict()
# train_data = train_data.astype('float32')
# feed_dict.update({placeholders['features']: train_data})
dim = []
# print("f:{}".format(len(train_data[0])))
dim.append(len(train_data[0]))
dim.append(FLAGS.dim_1)
dim.append(num_classes)
model = FCPartition(placeholders, dim)
sess = tf.Session()
model.load(sess)
results = model.predict()
results_np = results.eval(session=sess)
# print(results.eval(session=sess))
# print(results_np.shape)
id_map = id_map.astype('int')
results_np = np.expand_dims(results_np, axis=1)
results_np = np.insert(results_np, 0, id_map, axis=1)
results_np = results_np[results_np[:,0].argsort()]
print(results_np)
np.save(FLAGS.outDir+'/predict_predict.npy', results_np)
def main():
print("load data ...")
train_data = load_embedded_data(FLAGS.train_prefix)
id_map = load_embedded_idmap(FLAGS.train_prefix)
predict(train_data, id_map)
if __name__ == '__main__':
main() |
import tensorflow as tf
import numpy as np
from graphsage.models import FCPartition
from graphsage.partition_train import construct_placeholders
from graphsage.utils import load_graph_data, load_embedded_data, load_embedded_idmap
flags = tf.app.flags
FLAGS = flags.FLAGS
# flags.DEFINE_integer('dim_1', 128, 'Size of output dim (final is 2x this, if using concat)')
# DIR = 'trained_models'
# MODEL = 'partition'
# with tf.Session() as sess:
# new_saver = tf.train.import_meta_graph(DIR+'/'+MODEL+'.ckpt.meta')
# new_saver.restore(sess, tf.train.latest_checkpoint(DIR + '/./'))
# new_saver.run()
# print(new_saver)
def predict(train_data, id_map):
num_classes = 3
placeholders = construct_placeholders(num_classes)
placeholders['features'] = train_data
# feed_dict = dict()
# train_data = train_data.astype('float32')
# feed_dict.update({placeholders['features']: train_data})
dim = []
# print("f:{}".format(len(train_data[0])))
dim.append(len(train_data[0]))
dim.append(FLAGS.dim_1)
dim.append(num_classes)
model = FCPartition(placeholders, dim)
sess = tf.Session()
model.load(sess)
results = model.predict()
results_np = results.eval(session=sess)
# print(results.eval(session=sess))
# print(results_np.shape)
id_map = id_map.astype('int')
results_np = np.expand_dims(results_np, axis=1)
results_np = np.insert(results_np, 0, id_map, axis=1)
results_np = results_np[results_np[:,0].argsort()]
print(results_np)
np.save(FLAGS.outDir+'/predict_predict.npy', results_np)
def main():
print("load data ...")
train_data = load_embedded_data(FLAGS.train_prefix)
id_map = load_embedded_idmap(FLAGS.train_prefix)
predict(train_data, id_map)
if __name__ == '__main__':
main() | en | 0.348444 | # flags.DEFINE_integer('dim_1', 128, 'Size of output dim (final is 2x this, if using concat)') # DIR = 'trained_models' # MODEL = 'partition' # with tf.Session() as sess: # new_saver = tf.train.import_meta_graph(DIR+'/'+MODEL+'.ckpt.meta') # new_saver.restore(sess, tf.train.latest_checkpoint(DIR + '/./')) # new_saver.run() # print(new_saver) # feed_dict = dict() # train_data = train_data.astype('float32') # feed_dict.update({placeholders['features']: train_data}) # print("f:{}".format(len(train_data[0]))) # print(results.eval(session=sess)) # print(results_np.shape) | 2.04703 | 2 |
scripts/generate_XML_files/DS1/annotatedsen_to_xml.py | AmmarQaseem/CPI-Pipeline-test | 0 | 7704 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Copyright (c) 2015, <NAME> <<EMAIL>>, <NAME> <<EMAIL>>
This parser reads annotated sentences (output from get_relations.py) in a tab-separated format to generate a unified XML format (Tikk et al., 2010. A comprehensive benchmark of kernel methods to extract protein-protein interactions from literature. PLoS Comput. Biol).
"""
# module to make use of regular expressions
import re
# set the default encoding to utf8 and ignore all decoding/encoding steps.
# (ToDo: check whether the encoding command is needed - debug)
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
# optparse - Parser for command-line options
from optparse import OptionParser
# import this function to add quotation arround the input text and ignore the extra quotations inside the sentence text
#from xml.sax.saxutils import escape # (ToDo: not needed - debug)
from xml.sax.saxutils import quoteattr
### MAIN PART OF THE SCRIPT ###
if __name__=="__main__":
# configure parsing of command-line arguments
parser= OptionParser()
parser.add_option("-i", "--input", dest="i", help='name of the input file',default="training_dataset_sorted.csv")
parser.add_option("-o", "--output", dest="o", help='name of the output file',default="DS1.xml")
(options,args)=parser.parse_args()
# save parameters in an extra variable
input_file= options.i
output_file = options.o
# open input file with annotated sentences
infile = open(input_file,"r")
# open output file
outfile = open(output_file,"w")
#example for the input format:
#18227838-359 The mood stabilizers <compound-id="28486,3028194">lithium</compound-id> and <compound-id="3121">valproate</compound-id> activate the <protein-id="P29323">ERK</protein-id> pathway in prefrontal cortex and hippocampus and potentiate <protein-id="P29323">ERK</protein-id> pathway-mediated neurite growth, neuronal survival and hippocampal neurogenesis. lithium__ERK__no_interaction valproate__ERK__interaction
#example for the output format
"""
<?xml version="1.0" encoding="UTF-8">
<corpus source="DS1">
<document id="DS1.d0" origId="18227838">
<sentence id="DS1.d0.s0" origId="18227838-359" text="The mood stabilizers lithium and valproate activate the ERK pathway in prefrontal cortex and hippocampus and potentiate ERK pathway-mediated neurite growth, neuronal survival and hippocampal neurogenesis."/>
<entity id="DS1.d0.s0.e0" origId="28486,3028194" charOffset="x1-y1" type="compound" text="lithium"/>
<entity id="DS1.d0.s0.e1" origId="3121" charOffset="x2-y2" type="compound" text="valproate"/>
<entity id="DS1.d0.s0.e2" origId="P29323" charOffset="x3-y3" type="protein" text="ERK"/>
<interaction id="DS1.d0.s0.i0" e1="DS1.do.s0.e0" e2="DS1.do.s0.e2" type="no_interaction" directed="False" />
<interaction id="DS1.d0.s0.i1" e1="DS1.do.s0.e1" e2="DS1.do.s0.e2" type="interaction" directed="False" />
</sentence>
[...]
</document>
[...]
</corpus>
"""
# add XML header and define corpus source
outfile.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>"+"\n")
outfile.write("<corpus source=\"DS1\">"+"\n")
# variable to store and compare the last read PubMed ID to notice whether there are multiple sentences with the same PubMed ID or not
# the document ID refers to the PubMed ID (origID)
pre_pmid=""
# doc_num counts the number of created documents
doc_num =0
# read lines in CSV file
for line in infile :
# tab-separated format
temp = line.strip().split("\t")
# get PubMed ID, sentences ID, and the sentence itself
# (ToDo: use a split command instead of this regular expression - debug)
curr_pmid = re.match('(\d{8})',temp[0]).group(0)
pmid_sent_num = temp[0]
sentence_text = temp[1]
# find all annotated proteins and compounds by matching their tags
pro_positions= [(a.start(), a.end()) for a in list(re.finditer('<protein-id="(.*?)">(.*?)</protein-id>',sentence_text))]
cmp_positions = [(a.start(), a.end()) for a in list(re.finditer('<compound-id="(.*?)">(.*?)</compound-id>',sentence_text))]
# join the two lists
positions = pro_positions + cmp_positions
positions.sort()
#Initialize the list with the number of identified tags
entity_list =[]
entity_list=[0]*len(positions)
# iterate over all identified positions of the identified tags
for i in range(len(positions)):
# initialze the second dimension of the list with a length of four (entity_type,entity_id,entity_text,entity_charoffset)
entity_list[i]=[0]*4
# store these four elements with grouping in the regular expression
obj = re.match('<(protein|compound)-id="(.*?)">(.*?)</(protein-id|compound-id)>',sentence_text[positions[i][0]:positions[i][1]])
entity_list[i][0]=obj.group(1) #entity_type
entity_list[i][1]=obj.group(2) #entity_id
entity_list[i][2]=obj.group(3) #entity_text
entity_list[i][2]=entity_list[i][2].replace("[","(").replace("]",")")
# the entity_charoffset will be assign later after having the pure sentence text generated (without any tags)
# the sentence without any tags will be generated by deleting all tags via text concatenation
# initialize (ToDo: initialization like this not needed - debug)
pur_sent_text = sentence_text
# enumerate over the list of positions (index, value)
for i,e in reversed(list(enumerate(positions))):
pur_sent_text = pur_sent_text[0:positions[i][0]]+entity_list[i][2]+pur_sent_text[positions[i][1]:]
# get the character offset of all identified synonyms
# decode the sentences to UTF8 to prevent the usage of more than one character for special letters, symbols, etc.
# make use of a list of repeated synonyms and synonym positions
repeated_syn_pos =[]
rep_syn =[]
for i in range(len(entity_list)) :
# check whether this is the fist occurrence of the current synonym
if not entity_list[i][2] in rep_syn :
# get the list of positions of all occurences of the current synonym
u_pur_sent_text = pur_sent_text.decode("utf8")
charoffset_value = [(a.start(), a.end()) for a in list(re.finditer(re.escape(entity_list[i][2]),u_pur_sent_text))]
# check whether it occures only once such that the charoffsetone directly be assigned
if len(charoffset_value) == 1 :
entity_list[i][3] = str(charoffset_value[0][0])+"-"+str(charoffset_value[0][1])
else:
# if it occures more than one time, the charoffset has to be assigned according to the first pair of positions
entity_list[i][3] = str(charoffset_value[0][0])+"-"+str(charoffset_value[0][1])
# append this synonym to the rep_syn list to store all repeated synonyms in this sentence
rep_syn.append(entity_list[i][2])
# delete the fist pair of positions from the list
charoffset_value = charoffset_value[1:]
# add the rest of positions pairs for the current synonym to another list
for j in range(len(charoffset_value)):
repeated_syn_pos.append([entity_list[i][2],charoffset_value[j][0],charoffset_value[j][1]])
else:
# this case refers to at least the second occurrence of the synonym
# for each repeated synonym, assign the first position pair from the repeated_syn_pos list
for k in range(len(repeated_syn_pos)):
if repeated_syn_pos[k][0] == entity_list[i][2]:
break
entity_list[i][3] = str(repeated_syn_pos[k][1])+"-"+str(repeated_syn_pos[k][2])
# get pairs and their interaction status (separated by a double underscore)
listof_int_noint = temp[2:]
interaction_list=[0]*len(listof_int_noint)
for i in range(len(listof_int_noint)):
interaction_list[i]=listof_int_noint[i].split('__')
# interaction/no_interaction corresponds to True/False
TF_int_list=[0]*len(interaction_list)
for intid in range(len(interaction_list)) :
if interaction_list[intid][2]=="interaction" :
TF_int_list[intid]="True"
else :
TF_int_list[intid]="False"
# debug:
# print TF_int_list
# build XML structure
# check whether the PubMed ID changed in comparision to the last parsed sentence
if curr_pmid == pre_pmid :
# if this is the case, only the sentence ID has to be increased
sent_num +=1
# add sentence ID using the current document number
# (doc_num has to be decreased by one, because this index is automatically increased after each sentence)
# all openning and closing squared brackets ([,]) should be replaced with round brackets, because they will make problems in the tokenization step of the (preprocessing) pipeline
pur_sent_text = pur_sent_text.replace("[","(").replace("]",")")
outfile.write(" <sentence id=\"DS1.d"+str(doc_num-1)+".s"+str(sent_num)+"\" origId=\""+str(pmid_sent_num)+"\" text="+quoteattr(pur_sent_text)+">"+"\n")
# build entity tags according to the list identified tags from the CSV file (entity_list)
for i in range(0,len(entity_list)) :
outfile.write(" <entity id=\"DS1.d"+str(doc_num-1)+".s"+str(sent_num)+".e"+str(i)+"\" origId=\""+entity_list[i][1]+"\" charOffset=\""+entity_list[i][3]+"\" type=\""+entity_list[i][0]+"\" text=\""+entity_list[i][2]+"\"/>"+"\n")
# insert types of interaction for each pair of entities
# get the index of the synonym interactions in entity_list
origId = "DS1.d"+str(doc_num-1)+".s"+str(sent_num)
for int_id in range(len(interaction_list)) :
for ent_id in range(len(entity_list)):
if interaction_list[int_id][0] in entity_list[ent_id]:
break
first_entity=ent_id
for k in range(len(entity_list)):
if interaction_list[int_id][1] in entity_list[k]:
break
second_entity=k
outfile.write(" <pair e1=\""+origId+".e"+str(first_entity)+"\" e2=\""+origId+".e"+str(second_entity)+"\" id=\""+origId+".i"+str(int_id)+"\" interaction=\""+TF_int_list[int_id]+"\" />"+"\n")
# close sentence tag
outfile.write(" </sentence>\n")
# if the current PubMed ID changed in comparison to the last parsed sentences
else :
if not doc_num == 0 :
outfile.write(" </document>\n")
sent_num =0
# a new document tag has to be opened and the sentences can be added
outfile.write(" <document id=\"DS1.d"+str(doc_num)+"\" origId=\""+str(curr_pmid)+"\">"+"\n")
# replace squared brackets ([,]) with round brackets
pur_sent_text = pur_sent_text.replace("[","(").replace("]",")")
outfile.write(" <sentence id=\"DS1.d"+str(doc_num)+".s"+str(sent_num)+"\" origId=\""+str(pmid_sent_num)+"\" text="+quoteattr(pur_sent_text)+">"+"\n")
# now have to make entity tags according to entity_list data.
for i in range(0,len(entity_list)) :
outfile.write(" <entity id=\"DS1.d"+str(doc_num)+".s"+str(sent_num)+".e"+str(i)+"\" origId=\""+entity_list[i][1]+"\" charOffset=\""+entity_list[i][3]+"\" type=\""+entity_list[i][0]+"\" text=\""+entity_list[i][2]+"\"/>"+"\n")
# build entity tags
origId = "DS1.d"+str(doc_num)+".s"+str(sent_num)
for int_id in range(len(interaction_list)) :
for ent_id in range(len(entity_list)):
if interaction_list[int_id][0] in entity_list[ent_id]:
break
first_entity=ent_id
for k in range(len(entity_list)):
if interaction_list[int_id][1] in entity_list[k]:
break
second_entity=k
outfile.write(" <pair e1=\""+origId+".e"+str(first_entity)+"\" e2=\""+origId+".e"+str(second_entity)+"\" id=\""+origId+".i"+str(int_id)+"\" interaction=\""+TF_int_list[int_id]+"\" />"+"\n")
# close sentence tag
outfile.write(" </sentence>\n")
# set new PubMed ID as the last parsed document ID and increase document index
pre_pmid = curr_pmid
doc_num+=1
# close document tag
outfile.write("</document>\n")
# close corpus tag
outfile.write("</corpus>\n")
# close files
infile.close()
outfile.close()
| #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Copyright (c) 2015, <NAME> <<EMAIL>>, <NAME> <<EMAIL>>
This parser reads annotated sentences (output from get_relations.py) in a tab-separated format to generate a unified XML format (Tikk et al., 2010. A comprehensive benchmark of kernel methods to extract protein-protein interactions from literature. PLoS Comput. Biol).
"""
# module to make use of regular expressions
import re
# set the default encoding to utf8 and ignore all decoding/encoding steps.
# (ToDo: check whether the encoding command is needed - debug)
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
# optparse - Parser for command-line options
from optparse import OptionParser
# import this function to add quotation arround the input text and ignore the extra quotations inside the sentence text
#from xml.sax.saxutils import escape # (ToDo: not needed - debug)
from xml.sax.saxutils import quoteattr
### MAIN PART OF THE SCRIPT ###
if __name__=="__main__":
# configure parsing of command-line arguments
parser= OptionParser()
parser.add_option("-i", "--input", dest="i", help='name of the input file',default="training_dataset_sorted.csv")
parser.add_option("-o", "--output", dest="o", help='name of the output file',default="DS1.xml")
(options,args)=parser.parse_args()
# save parameters in an extra variable
input_file= options.i
output_file = options.o
# open input file with annotated sentences
infile = open(input_file,"r")
# open output file
outfile = open(output_file,"w")
#example for the input format:
#18227838-359 The mood stabilizers <compound-id="28486,3028194">lithium</compound-id> and <compound-id="3121">valproate</compound-id> activate the <protein-id="P29323">ERK</protein-id> pathway in prefrontal cortex and hippocampus and potentiate <protein-id="P29323">ERK</protein-id> pathway-mediated neurite growth, neuronal survival and hippocampal neurogenesis. lithium__ERK__no_interaction valproate__ERK__interaction
#example for the output format
"""
<?xml version="1.0" encoding="UTF-8">
<corpus source="DS1">
<document id="DS1.d0" origId="18227838">
<sentence id="DS1.d0.s0" origId="18227838-359" text="The mood stabilizers lithium and valproate activate the ERK pathway in prefrontal cortex and hippocampus and potentiate ERK pathway-mediated neurite growth, neuronal survival and hippocampal neurogenesis."/>
<entity id="DS1.d0.s0.e0" origId="28486,3028194" charOffset="x1-y1" type="compound" text="lithium"/>
<entity id="DS1.d0.s0.e1" origId="3121" charOffset="x2-y2" type="compound" text="valproate"/>
<entity id="DS1.d0.s0.e2" origId="P29323" charOffset="x3-y3" type="protein" text="ERK"/>
<interaction id="DS1.d0.s0.i0" e1="DS1.do.s0.e0" e2="DS1.do.s0.e2" type="no_interaction" directed="False" />
<interaction id="DS1.d0.s0.i1" e1="DS1.do.s0.e1" e2="DS1.do.s0.e2" type="interaction" directed="False" />
</sentence>
[...]
</document>
[...]
</corpus>
"""
# add XML header and define corpus source
outfile.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>"+"\n")
outfile.write("<corpus source=\"DS1\">"+"\n")
# variable to store and compare the last read PubMed ID to notice whether there are multiple sentences with the same PubMed ID or not
# the document ID refers to the PubMed ID (origID)
pre_pmid=""
# doc_num counts the number of created documents
doc_num =0
# read lines in CSV file
for line in infile :
# tab-separated format
temp = line.strip().split("\t")
# get PubMed ID, sentences ID, and the sentence itself
# (ToDo: use a split command instead of this regular expression - debug)
curr_pmid = re.match('(\d{8})',temp[0]).group(0)
pmid_sent_num = temp[0]
sentence_text = temp[1]
# find all annotated proteins and compounds by matching their tags
pro_positions= [(a.start(), a.end()) for a in list(re.finditer('<protein-id="(.*?)">(.*?)</protein-id>',sentence_text))]
cmp_positions = [(a.start(), a.end()) for a in list(re.finditer('<compound-id="(.*?)">(.*?)</compound-id>',sentence_text))]
# join the two lists
positions = pro_positions + cmp_positions
positions.sort()
#Initialize the list with the number of identified tags
entity_list =[]
entity_list=[0]*len(positions)
# iterate over all identified positions of the identified tags
for i in range(len(positions)):
# initialze the second dimension of the list with a length of four (entity_type,entity_id,entity_text,entity_charoffset)
entity_list[i]=[0]*4
# store these four elements with grouping in the regular expression
obj = re.match('<(protein|compound)-id="(.*?)">(.*?)</(protein-id|compound-id)>',sentence_text[positions[i][0]:positions[i][1]])
entity_list[i][0]=obj.group(1) #entity_type
entity_list[i][1]=obj.group(2) #entity_id
entity_list[i][2]=obj.group(3) #entity_text
entity_list[i][2]=entity_list[i][2].replace("[","(").replace("]",")")
# the entity_charoffset will be assign later after having the pure sentence text generated (without any tags)
# the sentence without any tags will be generated by deleting all tags via text concatenation
# initialize (ToDo: initialization like this not needed - debug)
pur_sent_text = sentence_text
# enumerate over the list of positions (index, value)
for i,e in reversed(list(enumerate(positions))):
pur_sent_text = pur_sent_text[0:positions[i][0]]+entity_list[i][2]+pur_sent_text[positions[i][1]:]
# get the character offset of all identified synonyms
# decode the sentences to UTF8 to prevent the usage of more than one character for special letters, symbols, etc.
# make use of a list of repeated synonyms and synonym positions
repeated_syn_pos =[]
rep_syn =[]
for i in range(len(entity_list)) :
# check whether this is the fist occurrence of the current synonym
if not entity_list[i][2] in rep_syn :
# get the list of positions of all occurences of the current synonym
u_pur_sent_text = pur_sent_text.decode("utf8")
charoffset_value = [(a.start(), a.end()) for a in list(re.finditer(re.escape(entity_list[i][2]),u_pur_sent_text))]
# check whether it occures only once such that the charoffsetone directly be assigned
if len(charoffset_value) == 1 :
entity_list[i][3] = str(charoffset_value[0][0])+"-"+str(charoffset_value[0][1])
else:
# if it occures more than one time, the charoffset has to be assigned according to the first pair of positions
entity_list[i][3] = str(charoffset_value[0][0])+"-"+str(charoffset_value[0][1])
# append this synonym to the rep_syn list to store all repeated synonyms in this sentence
rep_syn.append(entity_list[i][2])
# delete the fist pair of positions from the list
charoffset_value = charoffset_value[1:]
# add the rest of positions pairs for the current synonym to another list
for j in range(len(charoffset_value)):
repeated_syn_pos.append([entity_list[i][2],charoffset_value[j][0],charoffset_value[j][1]])
else:
# this case refers to at least the second occurrence of the synonym
# for each repeated synonym, assign the first position pair from the repeated_syn_pos list
for k in range(len(repeated_syn_pos)):
if repeated_syn_pos[k][0] == entity_list[i][2]:
break
entity_list[i][3] = str(repeated_syn_pos[k][1])+"-"+str(repeated_syn_pos[k][2])
# get pairs and their interaction status (separated by a double underscore)
listof_int_noint = temp[2:]
interaction_list=[0]*len(listof_int_noint)
for i in range(len(listof_int_noint)):
interaction_list[i]=listof_int_noint[i].split('__')
# interaction/no_interaction corresponds to True/False
TF_int_list=[0]*len(interaction_list)
for intid in range(len(interaction_list)) :
if interaction_list[intid][2]=="interaction" :
TF_int_list[intid]="True"
else :
TF_int_list[intid]="False"
# debug:
# print TF_int_list
# build XML structure
# check whether the PubMed ID changed in comparision to the last parsed sentence
if curr_pmid == pre_pmid :
# if this is the case, only the sentence ID has to be increased
sent_num +=1
# add sentence ID using the current document number
# (doc_num has to be decreased by one, because this index is automatically increased after each sentence)
# all openning and closing squared brackets ([,]) should be replaced with round brackets, because they will make problems in the tokenization step of the (preprocessing) pipeline
pur_sent_text = pur_sent_text.replace("[","(").replace("]",")")
outfile.write(" <sentence id=\"DS1.d"+str(doc_num-1)+".s"+str(sent_num)+"\" origId=\""+str(pmid_sent_num)+"\" text="+quoteattr(pur_sent_text)+">"+"\n")
# build entity tags according to the list identified tags from the CSV file (entity_list)
for i in range(0,len(entity_list)) :
outfile.write(" <entity id=\"DS1.d"+str(doc_num-1)+".s"+str(sent_num)+".e"+str(i)+"\" origId=\""+entity_list[i][1]+"\" charOffset=\""+entity_list[i][3]+"\" type=\""+entity_list[i][0]+"\" text=\""+entity_list[i][2]+"\"/>"+"\n")
# insert types of interaction for each pair of entities
# get the index of the synonym interactions in entity_list
origId = "DS1.d"+str(doc_num-1)+".s"+str(sent_num)
for int_id in range(len(interaction_list)) :
for ent_id in range(len(entity_list)):
if interaction_list[int_id][0] in entity_list[ent_id]:
break
first_entity=ent_id
for k in range(len(entity_list)):
if interaction_list[int_id][1] in entity_list[k]:
break
second_entity=k
outfile.write(" <pair e1=\""+origId+".e"+str(first_entity)+"\" e2=\""+origId+".e"+str(second_entity)+"\" id=\""+origId+".i"+str(int_id)+"\" interaction=\""+TF_int_list[int_id]+"\" />"+"\n")
# close sentence tag
outfile.write(" </sentence>\n")
# if the current PubMed ID changed in comparison to the last parsed sentences
else :
if not doc_num == 0 :
outfile.write(" </document>\n")
sent_num =0
# a new document tag has to be opened and the sentences can be added
outfile.write(" <document id=\"DS1.d"+str(doc_num)+"\" origId=\""+str(curr_pmid)+"\">"+"\n")
# replace squared brackets ([,]) with round brackets
pur_sent_text = pur_sent_text.replace("[","(").replace("]",")")
outfile.write(" <sentence id=\"DS1.d"+str(doc_num)+".s"+str(sent_num)+"\" origId=\""+str(pmid_sent_num)+"\" text="+quoteattr(pur_sent_text)+">"+"\n")
# now have to make entity tags according to entity_list data.
for i in range(0,len(entity_list)) :
outfile.write(" <entity id=\"DS1.d"+str(doc_num)+".s"+str(sent_num)+".e"+str(i)+"\" origId=\""+entity_list[i][1]+"\" charOffset=\""+entity_list[i][3]+"\" type=\""+entity_list[i][0]+"\" text=\""+entity_list[i][2]+"\"/>"+"\n")
# build entity tags
origId = "DS1.d"+str(doc_num)+".s"+str(sent_num)
for int_id in range(len(interaction_list)) :
for ent_id in range(len(entity_list)):
if interaction_list[int_id][0] in entity_list[ent_id]:
break
first_entity=ent_id
for k in range(len(entity_list)):
if interaction_list[int_id][1] in entity_list[k]:
break
second_entity=k
outfile.write(" <pair e1=\""+origId+".e"+str(first_entity)+"\" e2=\""+origId+".e"+str(second_entity)+"\" id=\""+origId+".i"+str(int_id)+"\" interaction=\""+TF_int_list[int_id]+"\" />"+"\n")
# close sentence tag
outfile.write(" </sentence>\n")
# set new PubMed ID as the last parsed document ID and increase document index
pre_pmid = curr_pmid
doc_num+=1
# close document tag
outfile.write("</document>\n")
# close corpus tag
outfile.write("</corpus>\n")
# close files
infile.close()
outfile.close()
| en | 0.719641 | #!/usr/bin/env python # -*- coding: UTF-8 -*- Copyright (c) 2015, <NAME> <<EMAIL>>, <NAME> <<EMAIL>> This parser reads annotated sentences (output from get_relations.py) in a tab-separated format to generate a unified XML format (Tikk et al., 2010. A comprehensive benchmark of kernel methods to extract protein-protein interactions from literature. PLoS Comput. Biol). # module to make use of regular expressions # set the default encoding to utf8 and ignore all decoding/encoding steps. # (ToDo: check whether the encoding command is needed - debug) # optparse - Parser for command-line options # import this function to add quotation arround the input text and ignore the extra quotations inside the sentence text #from xml.sax.saxutils import escape # (ToDo: not needed - debug) ### MAIN PART OF THE SCRIPT ### # configure parsing of command-line arguments # save parameters in an extra variable # open input file with annotated sentences # open output file #example for the input format: #18227838-359 The mood stabilizers <compound-id="28486,3028194">lithium</compound-id> and <compound-id="3121">valproate</compound-id> activate the <protein-id="P29323">ERK</protein-id> pathway in prefrontal cortex and hippocampus and potentiate <protein-id="P29323">ERK</protein-id> pathway-mediated neurite growth, neuronal survival and hippocampal neurogenesis. lithium__ERK__no_interaction valproate__ERK__interaction #example for the output format <?xml version="1.0" encoding="UTF-8"> <corpus source="DS1"> <document id="DS1.d0" origId="18227838"> <sentence id="DS1.d0.s0" origId="18227838-359" text="The mood stabilizers lithium and valproate activate the ERK pathway in prefrontal cortex and hippocampus and potentiate ERK pathway-mediated neurite growth, neuronal survival and hippocampal neurogenesis."/> <entity id="DS1.d0.s0.e0" origId="28486,3028194" charOffset="x1-y1" type="compound" text="lithium"/> <entity id="DS1.d0.s0.e1" origId="3121" charOffset="x2-y2" type="compound" text="valproate"/> <entity id="DS1.d0.s0.e2" origId="P29323" charOffset="x3-y3" type="protein" text="ERK"/> <interaction id="DS1.d0.s0.i0" e1="DS1.do.s0.e0" e2="DS1.do.s0.e2" type="no_interaction" directed="False" /> <interaction id="DS1.d0.s0.i1" e1="DS1.do.s0.e1" e2="DS1.do.s0.e2" type="interaction" directed="False" /> </sentence> [...] </document> [...] </corpus> # add XML header and define corpus source # variable to store and compare the last read PubMed ID to notice whether there are multiple sentences with the same PubMed ID or not # the document ID refers to the PubMed ID (origID) # doc_num counts the number of created documents # read lines in CSV file # tab-separated format # get PubMed ID, sentences ID, and the sentence itself # (ToDo: use a split command instead of this regular expression - debug) # find all annotated proteins and compounds by matching their tags # join the two lists #Initialize the list with the number of identified tags # iterate over all identified positions of the identified tags # initialze the second dimension of the list with a length of four (entity_type,entity_id,entity_text,entity_charoffset) # store these four elements with grouping in the regular expression #entity_type #entity_id #entity_text # the entity_charoffset will be assign later after having the pure sentence text generated (without any tags) # the sentence without any tags will be generated by deleting all tags via text concatenation # initialize (ToDo: initialization like this not needed - debug) # enumerate over the list of positions (index, value) # get the character offset of all identified synonyms # decode the sentences to UTF8 to prevent the usage of more than one character for special letters, symbols, etc. # make use of a list of repeated synonyms and synonym positions # check whether this is the fist occurrence of the current synonym # get the list of positions of all occurences of the current synonym # check whether it occures only once such that the charoffsetone directly be assigned # if it occures more than one time, the charoffset has to be assigned according to the first pair of positions # append this synonym to the rep_syn list to store all repeated synonyms in this sentence # delete the fist pair of positions from the list # add the rest of positions pairs for the current synonym to another list # this case refers to at least the second occurrence of the synonym # for each repeated synonym, assign the first position pair from the repeated_syn_pos list # get pairs and their interaction status (separated by a double underscore) # interaction/no_interaction corresponds to True/False # debug: # print TF_int_list # build XML structure # check whether the PubMed ID changed in comparision to the last parsed sentence # if this is the case, only the sentence ID has to be increased # add sentence ID using the current document number # (doc_num has to be decreased by one, because this index is automatically increased after each sentence) # all openning and closing squared brackets ([,]) should be replaced with round brackets, because they will make problems in the tokenization step of the (preprocessing) pipeline # build entity tags according to the list identified tags from the CSV file (entity_list) # insert types of interaction for each pair of entities # get the index of the synonym interactions in entity_list # close sentence tag # if the current PubMed ID changed in comparison to the last parsed sentences # a new document tag has to be opened and the sentences can be added # replace squared brackets ([,]) with round brackets # now have to make entity tags according to entity_list data. # build entity tags # close sentence tag # set new PubMed ID as the last parsed document ID and increase document index # close document tag # close corpus tag # close files | 2.754461 | 3 |
tests/test_add_contact.py | SergeyDorokhov/python_training | 0 | 7705 | def test_add_contact(app, db, json_contacts, check_ui):
contact = json_contacts
list_before = db.get_contact_list()
contact.id_contact = app.contact.get_next_id(list_before)
app.contact.create(contact)
assert len(list_before) + 1 == len(db.get_contact_list())
list_after = db.get_contact_list()
list_before.append(contact)
assert sorted(list_before) == sorted(list_after)
if check_ui:
assert sorted(list_after) == sorted(app.contact.get_list()) | def test_add_contact(app, db, json_contacts, check_ui):
contact = json_contacts
list_before = db.get_contact_list()
contact.id_contact = app.contact.get_next_id(list_before)
app.contact.create(contact)
assert len(list_before) + 1 == len(db.get_contact_list())
list_after = db.get_contact_list()
list_before.append(contact)
assert sorted(list_before) == sorted(list_after)
if check_ui:
assert sorted(list_after) == sorted(app.contact.get_list()) | none | 1 | 2.594312 | 3 |
|
website/members/urls.py | eamanu/asoc_members | 0 | 7706 | <reponame>eamanu/asoc_members
from django.conf import settings
from django.conf.urls.static import static
from django.urls import path
from members import views
urlpatterns = [
path('solicitud-alta/', views.signup_initial, name='signup'),
path('solicitud-alta/persona/', views.signup_form_person, name='signup_person'),
path('solicitud-alta/organizacion',
views.signup_form_organization, name='signup_organization'),
path('solicitud-alta/gracias', views.signup_thankyou, name='signup_thankyou'),
path('reportes/', views.reports_main, name='reports_main'),
path('reportes/deudas', views.report_debts, name='report_debts'),
path('reportes/completos', views.report_complete, name='report_complete'),
path('reportes/incompletos', views.report_missing, name='report_missing'),
path('reportes/ingcuotas', views.report_income_quotas, name='report_income_quotas'),
path('reportes/ingdinero', views.report_income_money, name='report_income_money'),
path('reportes/miembros', views.members_list, name="members_list"),
path('reportes/miembros/<pk>/', views.member_detail, name='member_detail'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| from django.conf import settings
from django.conf.urls.static import static
from django.urls import path
from members import views
urlpatterns = [
path('solicitud-alta/', views.signup_initial, name='signup'),
path('solicitud-alta/persona/', views.signup_form_person, name='signup_person'),
path('solicitud-alta/organizacion',
views.signup_form_organization, name='signup_organization'),
path('solicitud-alta/gracias', views.signup_thankyou, name='signup_thankyou'),
path('reportes/', views.reports_main, name='reports_main'),
path('reportes/deudas', views.report_debts, name='report_debts'),
path('reportes/completos', views.report_complete, name='report_complete'),
path('reportes/incompletos', views.report_missing, name='report_missing'),
path('reportes/ingcuotas', views.report_income_quotas, name='report_income_quotas'),
path('reportes/ingdinero', views.report_income_money, name='report_income_money'),
path('reportes/miembros', views.members_list, name="members_list"),
path('reportes/miembros/<pk>/', views.member_detail, name='member_detail'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | none | 1 | 1.734609 | 2 |
|
Benchmarking/Keras/Tensorflow/TF_dataforcomparisongraphss.py | vais-ral/CCPi-ML | 0 | 7707 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 18 14:04:03 2018
@author: zyv57124
"""
import scipy.io as sio
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from tensorflow.python.training import gradient_descent
from time import time
class TimingCallback(keras.callbacks.Callback):
def __init__(self):
self.logs=[]
def on_epoch_begin(self, epoch, logs={}):
self.starttime=time()
def on_epoch_end(self, epoch, logs={}):
self.logs.append(time()-self.starttime)
#Load data ------------------------------------------------------
def loadMATData(file1):
return sio.loadmat(file1)
#Load Data-------------------------------------------------------
data = loadMATData('ex3data1.mat')
features = data['X']
labels = data['y']
filter = labels ==10
labels[filter] = 0
#shuffle data---------------------------------------------------
ran = np.arange(features.shape[0])
np.random.shuffle(ran)
features = features[ran]
labels = labels[ran]
training_features = features[:3500]
training_labels = labels[:3500]
test_features = features[3501:]
test_labels = labels[3501:]
for i in np.arange(0,500, 10):
#TF Neaural Network Builder--------------------------------------
model = keras.Sequential([
keras.layers.Dense(400, activation=tf.nn.relu),
keras.layers.Dense(25, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.01), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
predictions = model.predict(test_features)
cb=TimingCallback()
history = model.fit(training_features, training_labels, batch_size=i+1, epochs=100, verbose=2, callbacks=[cb])
#Store eoch number and loss values in .txt file
loss_data = (history.history['loss'])
f = open("TF_loss_data_batchnum_"+str(i+1)+".txt","w")
for xx in range(1,len(loss_data)+1):
if xx==1:
delta_loss = 'Nan'
else:
delta_loss = (loss_data[xx-2] - loss_data[xx-1])
#Epoch #Loss #Batch size #Time #Change in loss
f.write(str(xx) + "," + str(loss_data[xx-1]) + "," + str(i+1) + "," + str(cb.logs[xx-1]) + "," + str(delta_loss) + "\n" )
f.close() | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 18 14:04:03 2018
@author: zyv57124
"""
import scipy.io as sio
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from tensorflow.python.training import gradient_descent
from time import time
class TimingCallback(keras.callbacks.Callback):
def __init__(self):
self.logs=[]
def on_epoch_begin(self, epoch, logs={}):
self.starttime=time()
def on_epoch_end(self, epoch, logs={}):
self.logs.append(time()-self.starttime)
#Load data ------------------------------------------------------
def loadMATData(file1):
return sio.loadmat(file1)
#Load Data-------------------------------------------------------
data = loadMATData('ex3data1.mat')
features = data['X']
labels = data['y']
filter = labels ==10
labels[filter] = 0
#shuffle data---------------------------------------------------
ran = np.arange(features.shape[0])
np.random.shuffle(ran)
features = features[ran]
labels = labels[ran]
training_features = features[:3500]
training_labels = labels[:3500]
test_features = features[3501:]
test_labels = labels[3501:]
for i in np.arange(0,500, 10):
#TF Neaural Network Builder--------------------------------------
model = keras.Sequential([
keras.layers.Dense(400, activation=tf.nn.relu),
keras.layers.Dense(25, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.01), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
predictions = model.predict(test_features)
cb=TimingCallback()
history = model.fit(training_features, training_labels, batch_size=i+1, epochs=100, verbose=2, callbacks=[cb])
#Store eoch number and loss values in .txt file
loss_data = (history.history['loss'])
f = open("TF_loss_data_batchnum_"+str(i+1)+".txt","w")
for xx in range(1,len(loss_data)+1):
if xx==1:
delta_loss = 'Nan'
else:
delta_loss = (loss_data[xx-2] - loss_data[xx-1])
#Epoch #Loss #Batch size #Time #Change in loss
f.write(str(xx) + "," + str(loss_data[xx-1]) + "," + str(i+1) + "," + str(cb.logs[xx-1]) + "," + str(delta_loss) + "\n" )
f.close() | en | 0.388311 | # -*- coding: utf-8 -*- Created on Wed Jul 18 14:04:03 2018 @author: zyv57124 #Load data ------------------------------------------------------ #Load Data------------------------------------------------------- #shuffle data--------------------------------------------------- #TF Neaural Network Builder-------------------------------------- #Store eoch number and loss values in .txt file #Epoch #Loss #Batch size #Time #Change in loss | 2.670454 | 3 |
Exercise_8.py | aurimas13/Python-stuff | 1 | 7708 | <filename>Exercise_8.py
# Solution of Exercise 8 - Exercise_8.py
#
# Uploaded by <NAME> on 11/23/20.
# Updated by <NAME> on 11/06/21.
formatter = "%r %r %r %r"
print formatter % (1, 2, 3, 4)
print formatter % ("one", "two", "three", "four")
print formatter % (True, False, False, True)
print formatter % (formatter, formatter, formatter, formatter)
print formatter % (
"I had this thing.",
"That you could type up right.",
"But it didn't sing.",
"So I said goodnight."
)
| <filename>Exercise_8.py
# Solution of Exercise 8 - Exercise_8.py
#
# Uploaded by <NAME> on 11/23/20.
# Updated by <NAME> on 11/06/21.
formatter = "%r %r %r %r"
print formatter % (1, 2, 3, 4)
print formatter % ("one", "two", "three", "four")
print formatter % (True, False, False, True)
print formatter % (formatter, formatter, formatter, formatter)
print formatter % (
"I had this thing.",
"That you could type up right.",
"But it didn't sing.",
"So I said goodnight."
)
| en | 0.889526 | # Solution of Exercise 8 - Exercise_8.py # # Uploaded by <NAME> on 11/23/20. # Updated by <NAME> on 11/06/21. | 3.556958 | 4 |
Easy/two-numbers-sum/solution-1.py | MCFrank16/python-algo | 0 | 7709 | <gh_stars>0
# solution 1: Brute Force
# time complexity: O(n^2)
# space complexity: O(1)
def twoNumberSum(arr, n):
for i in range(len(arr) - 1):
firstNum = arr[i]
for j in range(i + 1, len(arr)):
secondNum = arr[j]
if firstNum + secondNum == n:
return [firstNum, secondNum]
return []
print(twoNumberSum([3,5,-4,8,11,1,-1,6], 10))
| # solution 1: Brute Force
# time complexity: O(n^2)
# space complexity: O(1)
def twoNumberSum(arr, n):
for i in range(len(arr) - 1):
firstNum = arr[i]
for j in range(i + 1, len(arr)):
secondNum = arr[j]
if firstNum + secondNum == n:
return [firstNum, secondNum]
return []
print(twoNumberSum([3,5,-4,8,11,1,-1,6], 10)) | en | 0.781855 | # solution 1: Brute Force # time complexity: O(n^2) # space complexity: O(1) | 3.817336 | 4 |
python/cac_tripplanner/destinations/migrations/0021_event.py | maurizi/cac-tripplanner | 0 | 7710 | <filename>python/cac_tripplanner/destinations/migrations/0021_event.py<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-28 17:32
from __future__ import unicode_literals
import ckeditor.fields
import destinations.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('destinations', '0020_auto_20170203_1251'),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('website_url', models.URLField(blank=True, null=True)),
('description', ckeditor.fields.RichTextField()),
('start_date', models.DateTimeField()),
('end_date', models.DateTimeField()),
('image', models.ImageField(help_text=b'The small image. Will be displayed at 310x155.', null=True, upload_to=destinations.models.generate_filename)),
('wide_image', models.ImageField(help_text=b'The large image. Will be displayed at 680x400.', null=True, upload_to=destinations.models.generate_filename)),
('published', models.BooleanField(default=False)),
('priority', models.IntegerField(default=9999)),
('destination', models.ForeignKey(null=True, blank=True, on_delete=django.db.models.deletion.SET_NULL, to='destinations.Destination')),
],
options={
'ordering': ['priority', '-start_date'],
},
),
]
| <filename>python/cac_tripplanner/destinations/migrations/0021_event.py<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-28 17:32
from __future__ import unicode_literals
import ckeditor.fields
import destinations.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('destinations', '0020_auto_20170203_1251'),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('website_url', models.URLField(blank=True, null=True)),
('description', ckeditor.fields.RichTextField()),
('start_date', models.DateTimeField()),
('end_date', models.DateTimeField()),
('image', models.ImageField(help_text=b'The small image. Will be displayed at 310x155.', null=True, upload_to=destinations.models.generate_filename)),
('wide_image', models.ImageField(help_text=b'The large image. Will be displayed at 680x400.', null=True, upload_to=destinations.models.generate_filename)),
('published', models.BooleanField(default=False)),
('priority', models.IntegerField(default=9999)),
('destination', models.ForeignKey(null=True, blank=True, on_delete=django.db.models.deletion.SET_NULL, to='destinations.Destination')),
],
options={
'ordering': ['priority', '-start_date'],
},
),
]
| en | 0.76967 | # -*- coding: utf-8 -*- # Generated by Django 1.11.7 on 2017-11-28 17:32 | 1.70117 | 2 |
data_extraction/scripts/bnf_adr_extraction.py | elpidakon/CRESCENDDI | 0 | 7711 | # Kontsioti, Maskell, Dutta & Pirmohamed, A reference set of clinically relevant
# adverse drug-drug interactions (2021)
# Code to extract single-drug side effect data from the BNF website
from bs4 import BeautifulSoup
import urllib
import os, csv
import numpy as np
import pandas as pd
import re
from tqdm import tqdm
URL_BEGINNING = 'https://bnf.nice.org.uk/drug/'
print('beginning scrape for individual drugs...')
# Fetch the HTML containing the full list of APIs.
r = urllib.request.urlopen(URL_BEGINNING).read()
soup1 = BeautifulSoup(r, 'lxml')
# Extract the full URL list.
URL_list = []
for s in soup1.find_all('div', {'class': 'span11'}):
for ai in s(href=True):
temp = URL_BEGINNING + ai['href']
URL_list.append(temp)
print(URL_list)
# Create an empty dataframe for storing the extracted data for APIs.
scraped_API_count = 0
scraped_API = pd.DataFrame(np.nan, index = range(0,160000), columns = ['API', 'AE', 'Frequency'], dtype = str)
row_count = 0
# Empty list to store API mappings to their drug class (if applicable).
API_to_drugclass = []
# Scrape individual drug (API) side effects.
HIGHEST_API_ID = len(URL_list)
for id in tqdm(range(0, HIGHEST_API_ID)):
# Try to fetch the HTML for each API.
try:
l = urllib.request.urlopen(URL_list[id]).read()
# If the page returns a 404 error, skip this id.
except urllib.error.HTTPError as e:
if e.getcode() == 404:
continue
raise
# Add one to the count of succesfully scraped products.
scraped_API_count += 1
soup2 = BeautifulSoup(l, 'lxml')
API = soup2.find('h1', id= '').span.getText()
# Extract the relevant information to a dataframe.
# In case the API contains a side effect section.
if soup2.find('section', {'id':'sideEffects'}):
ae_list = soup2.find_all('span', {'class': 'sideEffect'})
for a in ae_list:
adv_event = a.getText()
scraped_API.at[row_count, 'API'] = API
scraped_API.at[row_count,'AE'] = adv_event
freq = a.parent.parent.parent.h4.getText()
scraped_API.at[row_count, 'Frequency'] = freq
row_count += 1
# Check if the drug belongs to a specific drug class. If yes, extract
# the drug class name and the link to the corresponding webpage.
if soup2.find('section', {'id':'sideEffects'}).find('a', href = re.compile(r'.*/drug-class/.*')):
temp = []
temp.append(API)
drug_class = soup2.find('a', href = re.compile(r'.*/drug-class/.*')).span.getText()
temp.append(drug_class)
li = soup2.find('section', {'id':'sideEffects'}).find('a', href = re.compile(r'.*/drug-class/.*'))['href']
drug_class_link = 'https://bnf.nice.org.uk' + str(li)
temp.append(drug_class_link)
API_to_drugclass.append(temp)
# In case the API does not contain a side effect section.
else:
adv_event = 'NO AEs MENTIONED'
scraped_API.at[row_count, 'API'] = API
scraped_API.at[row_count,'AE'] = adv_event
scraped_API.at[row_count,'Frequency'] = ''
row_count += 1
# Remove empty rows from the dataframe that contains the extracted data.
scraped_API_dropna = scraped_API[~scraped_API.isin(['n']).any(axis=1)]
# Remove spaces at the beginning and at the end of the text fields.
scraped_API_dropna['API'] = scraped_API_dropna['API'].str.strip()
scraped_API_dropna['AE'] = scraped_API_dropna['AE'].str.strip()
scraped_API_dropna['Frequency'] = scraped_API_dropna['Frequency'].str.strip()
print('BNF individual side effects succesfully scraped.')
print('beginning scrape for drug classes...')
# Create a dataframe with drug names, drug classes and related URLs (where applicable).
API_class_df = pd.DataFrame(API_to_drugclass, columns = ['API','Drug_Class','Link'])
# Create a list with all the links for the drug class webpages.
class_links = API_class_df['Link'].unique().tolist()
# Scrape drug class side effects.
HIGHEST_DRUG_CLASS_ID = len(class_links)
scraped_class_count = 0
# Create an empty dataframe for storing the extracted data for drug classes.
scraped_class = pd.DataFrame(np.nan, index = range(0,160000), columns = ['Drug_Class', 'AE', 'Frequency'], dtype = str)
row_count_2 = 0
for id in tqdm(range(0, HIGHEST_DRUG_CLASS_ID)):
# Try to fetch the HTML for each drug class.
try:
l = urllib.request.urlopen(class_links[id]).read()
# If the page returns a 404 error, skip this id.
except urllib.error.HTTPError as e:
if e.getcode() == 404:
continue
raise
# Add one to the count of succesfully scraped drug classes.
scraped_class_count += 1
soup3 = BeautifulSoup(l, 'lxml')
# Extract the drug class name.
class_name = soup3.find('h1', id= '').span.getText()
# Extract the relevant information to a dataframe.
class_ae_list = soup3.find_all('span', {'class': 'sideEffect'})
for a in class_ae_list:
adv_event = a.getText()
scraped_class.at[row_count_2, 'Drug_Class'] = class_name
scraped_class.at[row_count_2,'AE'] = adv_event
freq = a.parent.parent.parent.h4.getText()
scraped_class.at[row_count_2, 'Frequency'] = freq
row_count_2 += 1
# Remove empty rows from the dataframe that contains the extracted data.
scraped_class_dropna = scraped_class[~scraped_class.isin(['n']).any(axis=1)]
# Remove spaces at the beginning and at the end of the text fields.
scraped_class_dropna['Drug_Class'] = scraped_class_dropna['Drug_Class'].str.strip()
scraped_class_dropna['AE'] = scraped_class_dropna['AE'].str.strip()
scraped_class_dropna['Frequency'] = scraped_class_dropna['Frequency'].str.strip()
print('BNF drug class side effects succesfully scraped.')
print('combine extracted data...')
## Combine both tables by adding drug class side effects to the individual
## ingredients of each drug class.
# Create a dictionary that contains all drug classes as keys and side effects
# with associated frequencies as values.
AEs_by_class_dict = scraped_class_dropna.groupby('Drug_Class')[['AE', 'Frequency']].apply(lambda g: list(map(tuple, g.values.tolist()))).to_dict()
# Remove URL column
API_class_df.drop(columns = 'Link', inplace = True)
# Create a dataframe with drug class as the index of APIs (if available)
# and add their drug class side effects and associated frequencies.
API_class_df['Drug_Class'] = API_class_df['Drug_Class'].str.strip()
API_class_df.set_index('Drug_Class', inplace = True)
API_class_df['AE_freq_tuple'] = API_class_df.index.to_series().map(AEs_by_class_dict)
API_class_df.reset_index(inplace=True)
# Create a new dataframe to store drug class side effect data for each API.
AEs_from_class_df = API_class_df.explode('AE_freq_tuple').reset_index(drop=True)
AEs_from_class_df[['AE', 'Frequency']] = pd.DataFrame(AEs_from_class_df['AE_freq_tuple'].tolist(), index = AEs_from_class_df.index)
AEs_from_class_df['from_drug_class'] = 'Yes'
AEs_from_class_df.drop(columns = ['AE_freq_tuple','Drug_Class'], inplace = True)
# Fill NAs in Frequency column if no side effects are mentioned.
scraped_API_dropna.loc[scraped_API_dropna.AE == 'NO AEs MENTIONED', 'Frequency'] = 'N/A'
# Fill NAs in drug class indicator if no side effects are mentioned. Otherwise, put 'No'.
scraped_API_dropna['from_drug_class'] = np.where(scraped_API_dropna['AE'] == 'NO AEs MENTIONED', 'N/A', 'No')
# Concatenate the two dataframes to get a final one.
final_df = pd.concat([scraped_API_dropna, AEs_from_class_df])
# Remove any rows that do not contain side effects.
final_df = final_df[final_df.AE != 'NO AEs MENTIONED']
# Convert dataframe to lowercase.
final_df = final_df.apply(lambda x: x.astype(str).str.lower())
# Sort alphabetically.
final_df = final_df.sort_values(by=['API', 'from_drug_class'])
# Remove any duplicates.
final_df.drop_duplicates(subset = ['API', 'AE', 'Frequency'], keep = 'first', inplace = True)
# Rename columns.
final_df.columns = ['Drug_name', 'AE', 'Frequency', 'from_drug_class']
FILE_NAME = 'data_extraction/output/bnf_single_data.csv'
print('saving to file...')
# Save the dataset to a csv file.
final_df.to_csv(FILE_NAME, index=False, encoding = "utf-8")
| # Kontsioti, Maskell, Dutta & Pirmohamed, A reference set of clinically relevant
# adverse drug-drug interactions (2021)
# Code to extract single-drug side effect data from the BNF website
from bs4 import BeautifulSoup
import urllib
import os, csv
import numpy as np
import pandas as pd
import re
from tqdm import tqdm
URL_BEGINNING = 'https://bnf.nice.org.uk/drug/'
print('beginning scrape for individual drugs...')
# Fetch the HTML containing the full list of APIs.
r = urllib.request.urlopen(URL_BEGINNING).read()
soup1 = BeautifulSoup(r, 'lxml')
# Extract the full URL list.
URL_list = []
for s in soup1.find_all('div', {'class': 'span11'}):
for ai in s(href=True):
temp = URL_BEGINNING + ai['href']
URL_list.append(temp)
print(URL_list)
# Create an empty dataframe for storing the extracted data for APIs.
scraped_API_count = 0
scraped_API = pd.DataFrame(np.nan, index = range(0,160000), columns = ['API', 'AE', 'Frequency'], dtype = str)
row_count = 0
# Empty list to store API mappings to their drug class (if applicable).
API_to_drugclass = []
# Scrape individual drug (API) side effects.
HIGHEST_API_ID = len(URL_list)
for id in tqdm(range(0, HIGHEST_API_ID)):
# Try to fetch the HTML for each API.
try:
l = urllib.request.urlopen(URL_list[id]).read()
# If the page returns a 404 error, skip this id.
except urllib.error.HTTPError as e:
if e.getcode() == 404:
continue
raise
# Add one to the count of succesfully scraped products.
scraped_API_count += 1
soup2 = BeautifulSoup(l, 'lxml')
API = soup2.find('h1', id= '').span.getText()
# Extract the relevant information to a dataframe.
# In case the API contains a side effect section.
if soup2.find('section', {'id':'sideEffects'}):
ae_list = soup2.find_all('span', {'class': 'sideEffect'})
for a in ae_list:
adv_event = a.getText()
scraped_API.at[row_count, 'API'] = API
scraped_API.at[row_count,'AE'] = adv_event
freq = a.parent.parent.parent.h4.getText()
scraped_API.at[row_count, 'Frequency'] = freq
row_count += 1
# Check if the drug belongs to a specific drug class. If yes, extract
# the drug class name and the link to the corresponding webpage.
if soup2.find('section', {'id':'sideEffects'}).find('a', href = re.compile(r'.*/drug-class/.*')):
temp = []
temp.append(API)
drug_class = soup2.find('a', href = re.compile(r'.*/drug-class/.*')).span.getText()
temp.append(drug_class)
li = soup2.find('section', {'id':'sideEffects'}).find('a', href = re.compile(r'.*/drug-class/.*'))['href']
drug_class_link = 'https://bnf.nice.org.uk' + str(li)
temp.append(drug_class_link)
API_to_drugclass.append(temp)
# In case the API does not contain a side effect section.
else:
adv_event = 'NO AEs MENTIONED'
scraped_API.at[row_count, 'API'] = API
scraped_API.at[row_count,'AE'] = adv_event
scraped_API.at[row_count,'Frequency'] = ''
row_count += 1
# Remove empty rows from the dataframe that contains the extracted data.
scraped_API_dropna = scraped_API[~scraped_API.isin(['n']).any(axis=1)]
# Remove spaces at the beginning and at the end of the text fields.
scraped_API_dropna['API'] = scraped_API_dropna['API'].str.strip()
scraped_API_dropna['AE'] = scraped_API_dropna['AE'].str.strip()
scraped_API_dropna['Frequency'] = scraped_API_dropna['Frequency'].str.strip()
print('BNF individual side effects succesfully scraped.')
print('beginning scrape for drug classes...')
# Create a dataframe with drug names, drug classes and related URLs (where applicable).
API_class_df = pd.DataFrame(API_to_drugclass, columns = ['API','Drug_Class','Link'])
# Create a list with all the links for the drug class webpages.
class_links = API_class_df['Link'].unique().tolist()
# Scrape drug class side effects.
HIGHEST_DRUG_CLASS_ID = len(class_links)
scraped_class_count = 0
# Create an empty dataframe for storing the extracted data for drug classes.
scraped_class = pd.DataFrame(np.nan, index = range(0,160000), columns = ['Drug_Class', 'AE', 'Frequency'], dtype = str)
row_count_2 = 0
for id in tqdm(range(0, HIGHEST_DRUG_CLASS_ID)):
# Try to fetch the HTML for each drug class.
try:
l = urllib.request.urlopen(class_links[id]).read()
# If the page returns a 404 error, skip this id.
except urllib.error.HTTPError as e:
if e.getcode() == 404:
continue
raise
# Add one to the count of succesfully scraped drug classes.
scraped_class_count += 1
soup3 = BeautifulSoup(l, 'lxml')
# Extract the drug class name.
class_name = soup3.find('h1', id= '').span.getText()
# Extract the relevant information to a dataframe.
class_ae_list = soup3.find_all('span', {'class': 'sideEffect'})
for a in class_ae_list:
adv_event = a.getText()
scraped_class.at[row_count_2, 'Drug_Class'] = class_name
scraped_class.at[row_count_2,'AE'] = adv_event
freq = a.parent.parent.parent.h4.getText()
scraped_class.at[row_count_2, 'Frequency'] = freq
row_count_2 += 1
# Remove empty rows from the dataframe that contains the extracted data.
scraped_class_dropna = scraped_class[~scraped_class.isin(['n']).any(axis=1)]
# Remove spaces at the beginning and at the end of the text fields.
scraped_class_dropna['Drug_Class'] = scraped_class_dropna['Drug_Class'].str.strip()
scraped_class_dropna['AE'] = scraped_class_dropna['AE'].str.strip()
scraped_class_dropna['Frequency'] = scraped_class_dropna['Frequency'].str.strip()
print('BNF drug class side effects succesfully scraped.')
print('combine extracted data...')
## Combine both tables by adding drug class side effects to the individual
## ingredients of each drug class.
# Create a dictionary that contains all drug classes as keys and side effects
# with associated frequencies as values.
AEs_by_class_dict = scraped_class_dropna.groupby('Drug_Class')[['AE', 'Frequency']].apply(lambda g: list(map(tuple, g.values.tolist()))).to_dict()
# Remove URL column
API_class_df.drop(columns = 'Link', inplace = True)
# Create a dataframe with drug class as the index of APIs (if available)
# and add their drug class side effects and associated frequencies.
API_class_df['Drug_Class'] = API_class_df['Drug_Class'].str.strip()
API_class_df.set_index('Drug_Class', inplace = True)
API_class_df['AE_freq_tuple'] = API_class_df.index.to_series().map(AEs_by_class_dict)
API_class_df.reset_index(inplace=True)
# Create a new dataframe to store drug class side effect data for each API.
AEs_from_class_df = API_class_df.explode('AE_freq_tuple').reset_index(drop=True)
AEs_from_class_df[['AE', 'Frequency']] = pd.DataFrame(AEs_from_class_df['AE_freq_tuple'].tolist(), index = AEs_from_class_df.index)
AEs_from_class_df['from_drug_class'] = 'Yes'
AEs_from_class_df.drop(columns = ['AE_freq_tuple','Drug_Class'], inplace = True)
# Fill NAs in Frequency column if no side effects are mentioned.
scraped_API_dropna.loc[scraped_API_dropna.AE == 'NO AEs MENTIONED', 'Frequency'] = 'N/A'
# Fill NAs in drug class indicator if no side effects are mentioned. Otherwise, put 'No'.
scraped_API_dropna['from_drug_class'] = np.where(scraped_API_dropna['AE'] == 'NO AEs MENTIONED', 'N/A', 'No')
# Concatenate the two dataframes to get a final one.
final_df = pd.concat([scraped_API_dropna, AEs_from_class_df])
# Remove any rows that do not contain side effects.
final_df = final_df[final_df.AE != 'NO AEs MENTIONED']
# Convert dataframe to lowercase.
final_df = final_df.apply(lambda x: x.astype(str).str.lower())
# Sort alphabetically.
final_df = final_df.sort_values(by=['API', 'from_drug_class'])
# Remove any duplicates.
final_df.drop_duplicates(subset = ['API', 'AE', 'Frequency'], keep = 'first', inplace = True)
# Rename columns.
final_df.columns = ['Drug_name', 'AE', 'Frequency', 'from_drug_class']
FILE_NAME = 'data_extraction/output/bnf_single_data.csv'
print('saving to file...')
# Save the dataset to a csv file.
final_df.to_csv(FILE_NAME, index=False, encoding = "utf-8")
| en | 0.810669 | # Kontsioti, Maskell, Dutta & Pirmohamed, A reference set of clinically relevant # adverse drug-drug interactions (2021) # Code to extract single-drug side effect data from the BNF website # Fetch the HTML containing the full list of APIs. # Extract the full URL list. # Create an empty dataframe for storing the extracted data for APIs. # Empty list to store API mappings to their drug class (if applicable). # Scrape individual drug (API) side effects. # Try to fetch the HTML for each API. # If the page returns a 404 error, skip this id. # Add one to the count of succesfully scraped products. # Extract the relevant information to a dataframe. # In case the API contains a side effect section. # Check if the drug belongs to a specific drug class. If yes, extract # the drug class name and the link to the corresponding webpage. # In case the API does not contain a side effect section. # Remove empty rows from the dataframe that contains the extracted data. # Remove spaces at the beginning and at the end of the text fields. # Create a dataframe with drug names, drug classes and related URLs (where applicable). # Create a list with all the links for the drug class webpages. # Scrape drug class side effects. # Create an empty dataframe for storing the extracted data for drug classes. # Try to fetch the HTML for each drug class. # If the page returns a 404 error, skip this id. # Add one to the count of succesfully scraped drug classes. # Extract the drug class name. # Extract the relevant information to a dataframe. # Remove empty rows from the dataframe that contains the extracted data. # Remove spaces at the beginning and at the end of the text fields. ## Combine both tables by adding drug class side effects to the individual ## ingredients of each drug class. # Create a dictionary that contains all drug classes as keys and side effects # with associated frequencies as values. # Remove URL column # Create a dataframe with drug class as the index of APIs (if available) # and add their drug class side effects and associated frequencies. # Create a new dataframe to store drug class side effect data for each API. # Fill NAs in Frequency column if no side effects are mentioned. # Fill NAs in drug class indicator if no side effects are mentioned. Otherwise, put 'No'. # Concatenate the two dataframes to get a final one. # Remove any rows that do not contain side effects. # Convert dataframe to lowercase. # Sort alphabetically. # Remove any duplicates. # Rename columns. # Save the dataset to a csv file. | 3.174306 | 3 |
core/forms.py | nicoknoll/howimetcorona | 1 | 7712 | <reponame>nicoknoll/howimetcorona<filename>core/forms.py
from django import forms
class BaseFileForm(forms.Form):
# we try to minify the file to only submit the data
points_file = forms.FileField(
required=False,
widget=forms.FileInput(attrs={'required': 'required'}),
label="Location History File (.json)"
)
points_data = forms.CharField(widget=forms.HiddenInput(), required=False)
def clean(self):
points_file = self.cleaned_data.get('points_file')
points_data = self.cleaned_data.get('points_data')
if not points_file and not points_data:
raise forms.ValidationError({'points_file': 'File is required.'})
return self.cleaned_data
class ReportForm(BaseFileForm):
symptoms_at = forms.DateField(widget=forms.TextInput(attrs={
'placeholder': 'YYYY-MM-DD',
'pattern': '[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}',
'title': 'YYYY-MM-DD'
}))
is_verified = forms.BooleanField(required=False)
class CheckForm(BaseFileForm):
pass
class DeleteForm(forms.Form):
delete_token = forms.CharField(label="Delete token")
| from django import forms
class BaseFileForm(forms.Form):
# we try to minify the file to only submit the data
points_file = forms.FileField(
required=False,
widget=forms.FileInput(attrs={'required': 'required'}),
label="Location History File (.json)"
)
points_data = forms.CharField(widget=forms.HiddenInput(), required=False)
def clean(self):
points_file = self.cleaned_data.get('points_file')
points_data = self.cleaned_data.get('points_data')
if not points_file and not points_data:
raise forms.ValidationError({'points_file': 'File is required.'})
return self.cleaned_data
class ReportForm(BaseFileForm):
symptoms_at = forms.DateField(widget=forms.TextInput(attrs={
'placeholder': 'YYYY-MM-DD',
'pattern': '[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}',
'title': 'YYYY-MM-DD'
}))
is_verified = forms.BooleanField(required=False)
class CheckForm(BaseFileForm):
pass
class DeleteForm(forms.Form):
delete_token = forms.CharField(label="Delete token") | en | 0.894773 | # we try to minify the file to only submit the data | 2.447503 | 2 |
bartender/drinks/generators.py | autiwg/bartender | 0 | 7713 | from django.utils import timezone
from django.utils.text import slugify
def generate_billed_document_path(instance, filename):
cur_time = timezone.now()
return f"{cur_time.strftime('%Y/%m')}/{slugify(instance.name)}-{cur_time.strftime('%d.%m.%Y %H:%M')}.csv"
| from django.utils import timezone
from django.utils.text import slugify
def generate_billed_document_path(instance, filename):
cur_time = timezone.now()
return f"{cur_time.strftime('%Y/%m')}/{slugify(instance.name)}-{cur_time.strftime('%d.%m.%Y %H:%M')}.csv"
| none | 1 | 2.426719 | 2 |
|
papers/wdmerger_I/plots/sponge.py | AMReX-Astro/wdmerger | 2 | 7714 | <gh_stars>1-10
# This Python program is used to create a plot displaying the sponge
# function we use in the CASTRO hydrodynamics for the wdmerger problem.
import numpy as np
import matplotlib.pyplot as plt
def sponge(r):
sp
rs = 0.75
rt = 0.85
r = np.linspace(0.0, 1.0, 1000)
f = np.zeros(len(r))
idx = np.where(r < rs)
f[idx] = 0.0
idx = np.where(r < rt)
idx = np.where(r[idx] >= rs)
f[idx] = 0.5 * (1.0 - np.cos(np.pi * (r[idx] - rs) / (rt - rs)))
idx = np.where(r >= rt)
f[idx] = 1.0
plt.plot(r, 1.0 - f, linewidth=4.0)
plt.xlabel('Radius', fontsize=20)
plt.ylabel(r'$1 - f_S$', fontsize=20)
plt.xlim([0.0, 1.0])
plt.ylim([-0.05, 1.05])
plt.tick_params(labelsize=16)
plt.tight_layout()
plt.savefig('sponge.eps')
| # This Python program is used to create a plot displaying the sponge
# function we use in the CASTRO hydrodynamics for the wdmerger problem.
import numpy as np
import matplotlib.pyplot as plt
def sponge(r):
sp
rs = 0.75
rt = 0.85
r = np.linspace(0.0, 1.0, 1000)
f = np.zeros(len(r))
idx = np.where(r < rs)
f[idx] = 0.0
idx = np.where(r < rt)
idx = np.where(r[idx] >= rs)
f[idx] = 0.5 * (1.0 - np.cos(np.pi * (r[idx] - rs) / (rt - rs)))
idx = np.where(r >= rt)
f[idx] = 1.0
plt.plot(r, 1.0 - f, linewidth=4.0)
plt.xlabel('Radius', fontsize=20)
plt.ylabel(r'$1 - f_S$', fontsize=20)
plt.xlim([0.0, 1.0])
plt.ylim([-0.05, 1.05])
plt.tick_params(labelsize=16)
plt.tight_layout()
plt.savefig('sponge.eps') | en | 0.703166 | # This Python program is used to create a plot displaying the sponge # function we use in the CASTRO hydrodynamics for the wdmerger problem. | 3.094132 | 3 |
Python/110-1/Midterm Additional HW/005.py | JenFuChen/NKUST | 3 | 7715 | <gh_stars>1-10
# 005 印出菱形
while(1):
level = int(input())
if(level <= 0):
break
L = 2*level-1
mid = int((L - 1) / 2)
inspa = mid * 2 - 1
for i in range(L):
spa = level - i - 1
if spa >= 0:
print(" " * spa, end='')
print('*', end='')
if spa < 0:
spa = -spa
print(" " * spa, end='')
print('*', end='')
if(i > 0 and i <= mid):
for j in range(i*2-1):
print(" ", end='')
print('*', end='')
if(i > 0 and i > mid and i != L-1):
inspa = inspa - 2
for j in range(inspa):
print(" ", end='')
print('*', end='')
print()
| # 005 印出菱形
while(1):
level = int(input())
if(level <= 0):
break
L = 2*level-1
mid = int((L - 1) / 2)
inspa = mid * 2 - 1
for i in range(L):
spa = level - i - 1
if spa >= 0:
print(" " * spa, end='')
print('*', end='')
if spa < 0:
spa = -spa
print(" " * spa, end='')
print('*', end='')
if(i > 0 and i <= mid):
for j in range(i*2-1):
print(" ", end='')
print('*', end='')
if(i > 0 and i > mid and i != L-1):
inspa = inspa - 2
for j in range(inspa):
print(" ", end='')
print('*', end='')
print() | ja | 0.781651 | # 005 印出菱形 | 3.545472 | 4 |
dynamo/plot/pseudotime.py | davisidarta/dynamo-release | 0 | 7716 | <reponame>davisidarta/dynamo-release<gh_stars>0
import numpy as np
from ..tools.utils import update_dict
from .utils import save_fig
def plot_direct_graph(adata,
layout=None,
figsize=[6, 4],
save_show_or_return='show',
save_kwargs={},
):
df_mat = adata.uns["df_mat"]
import matplotlib.pyplot as plt
import networkx as nx
edge_color = "gray"
G = nx.from_pandas_edgelist(
df_mat,
source="source",
target="target",
edge_attr="weight",
create_using=nx.DiGraph(),
)
G.nodes()
W = []
for n, nbrs in G.adj.items():
for nbr, eattr in nbrs.items():
W.append(eattr["weight"])
options = {
"width": 300,
"arrowstyle": "-|>",
"arrowsize": 1000,
}
plt.figure(figsize=figsize)
if layout is None:
# pos : dictionary, optional
# A dictionary with nodes as keys and positions as values.
# If not specified a spring layout positioning will be computed.
# See :py:mod:`networkx.drawing.layout` for functions that
# compute node positions.
g = nx.draw(
G,
with_labels=True,
node_color="skyblue",
node_size=100,
edge_color=edge_color,
width=W / np.max(W) * 5,
edge_cmap=plt.cm.Blues,
options=options,
)
else:
raise Exception("layout", layout, " is not supported.")
if save_show_or_return == "save":
s_kwargs = {"path": None, "prefix": 'plot_direct_graph', "dpi": None,
"ext": 'pdf', "transparent": True, "close": True, "verbose": True}
s_kwargs = update_dict(s_kwargs, save_kwargs)
save_fig(**s_kwargs)
elif save_show_or_return == "show":
plt.tight_layout()
plt.show()
elif save_show_or_return == "return":
return g
| import numpy as np
from ..tools.utils import update_dict
from .utils import save_fig
def plot_direct_graph(adata,
layout=None,
figsize=[6, 4],
save_show_or_return='show',
save_kwargs={},
):
df_mat = adata.uns["df_mat"]
import matplotlib.pyplot as plt
import networkx as nx
edge_color = "gray"
G = nx.from_pandas_edgelist(
df_mat,
source="source",
target="target",
edge_attr="weight",
create_using=nx.DiGraph(),
)
G.nodes()
W = []
for n, nbrs in G.adj.items():
for nbr, eattr in nbrs.items():
W.append(eattr["weight"])
options = {
"width": 300,
"arrowstyle": "-|>",
"arrowsize": 1000,
}
plt.figure(figsize=figsize)
if layout is None:
# pos : dictionary, optional
# A dictionary with nodes as keys and positions as values.
# If not specified a spring layout positioning will be computed.
# See :py:mod:`networkx.drawing.layout` for functions that
# compute node positions.
g = nx.draw(
G,
with_labels=True,
node_color="skyblue",
node_size=100,
edge_color=edge_color,
width=W / np.max(W) * 5,
edge_cmap=plt.cm.Blues,
options=options,
)
else:
raise Exception("layout", layout, " is not supported.")
if save_show_or_return == "save":
s_kwargs = {"path": None, "prefix": 'plot_direct_graph', "dpi": None,
"ext": 'pdf', "transparent": True, "close": True, "verbose": True}
s_kwargs = update_dict(s_kwargs, save_kwargs)
save_fig(**s_kwargs)
elif save_show_or_return == "show":
plt.tight_layout()
plt.show()
elif save_show_or_return == "return":
return g | en | 0.687852 | # pos : dictionary, optional # A dictionary with nodes as keys and positions as values. # If not specified a spring layout positioning will be computed. # See :py:mod:`networkx.drawing.layout` for functions that # compute node positions. | 2.411452 | 2 |
ocean_lib/web3_internal/utils.py | joshualyguessennd/ocean.py | 0 | 7717 | <reponame>joshualyguessennd/ocean.py<filename>ocean_lib/web3_internal/utils.py
# Copyright 2018 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
import json
import logging
import os
from collections import namedtuple
import eth_account
import eth_keys
import eth_utils
from eth_keys import KeyAPI
from eth_utils import big_endian_to_int
from ocean_lib.web3_internal.web3_provider import Web3Provider
from web3 import Web3
Signature = namedtuple("Signature", ("v", "r", "s"))
logger = logging.getLogger(__name__)
def generate_multi_value_hash(types, values):
"""
Return the hash of the given list of values.
This is equivalent to packing and hashing values in a solidity smart contract
hence the use of `soliditySha3`.
:param types: list of solidity types expressed as strings
:param values: list of values matching the `types` list
:return: bytes
"""
assert len(types) == len(values)
return Web3.soliditySha3(types, values)
def prepare_prefixed_hash(msg_hash):
"""
:param msg_hash:
:return:
"""
return generate_multi_value_hash(
["string", "bytes32"], ["\x19Ethereum Signed Message:\n32", msg_hash]
)
def add_ethereum_prefix_and_hash_msg(text):
"""
This method of adding the ethereum prefix seems to be used in web3.personal.sign/ecRecover.
:param text: str any str to be signed / used in recovering address from a signature
:return: hash of prefixed text according to the recommended ethereum prefix
"""
prefixed_msg = f"\x19Ethereum Signed Message:\n{len(text)}{text}"
return Web3.sha3(text=prefixed_msg)
def get_public_key_from_address(web3, account):
"""
:param web3:
:param account:
:return:
"""
_hash = web3.sha3(text="verify signature.")
signature = web3.personal.sign(_hash, account.address, account.password)
signature = split_signature(web3, web3.toBytes(hexstr=signature))
signature_vrs = Signature(
signature.v % 27, big_endian_to_int(signature.r), big_endian_to_int(signature.s)
)
prefixed_hash = prepare_prefixed_hash(_hash)
pub_key = KeyAPI.PublicKey.recover_from_msg_hash(
prefixed_hash, KeyAPI.Signature(vrs=signature_vrs)
)
assert (
pub_key.to_checksum_address() == account.address
), "recovered address does not match signing address."
return pub_key
def to_32byte_hex(web3, val):
"""
:param web3:
:param val:
:return:
"""
return web3.toBytes(val).rjust(32, b"\0")
def split_signature(web3, signature):
"""
:param web3:
:param signature: signed message hash, hex str
:return:
"""
assert len(signature) == 65, (
f"invalid signature, " f"expecting bytes of length 65, got {len(signature)}"
)
v = web3.toInt(signature[-1])
r = to_32byte_hex(web3, int.from_bytes(signature[:32], "big"))
s = to_32byte_hex(web3, int.from_bytes(signature[32:64], "big"))
if v != 27 and v != 28:
v = 27 + v % 2
return Signature(v, r, s)
def get_wallet(index):
name = "PARITY_ADDRESS" if not index else f"PARITY_ADDRESS{index}"
pswrd_name = "PARITY_PASSWORD" if not index else f"PARITY_PASSWORD{index}"
key_name = "PARITY_KEY" if not index else f"PARITY_KEY{index}"
encrypted_key_name = (
"PARITY_ENCRYPTED_KEY" if not index else f"PARITY_ENCRYPTED_KEY{index}"
)
keyfile_name = "PARITY_KEYFILE" if not index else f"PARITY_KEYFILE{index}"
address = os.getenv(name)
if not address:
return None
pswrd = os.getenv(pswrd_name)
key = os.getenv(key_name)
encr_key = os.getenv(encrypted_key_name)
key_file = os.getenv(keyfile_name)
if key_file and not encr_key:
with open(key_file) as _file:
encr_key = json.loads(_file.read())
from ocean_lib.web3_internal.wallet import Wallet
return Wallet(
Web3Provider.get_web3(),
private_key=key,
encrypted_key=encr_key,
address=Web3.toChecksumAddress(address),
password=<PASSWORD>,
)
def privateKeyToAddress(private_key: str) -> str:
return eth_account.Account().privateKeyToAccount(private_key).address
def privateKeyToPublicKey(private_key: str):
private_key_bytes = eth_utils.decode_hex(private_key)
private_key_object = eth_keys.keys.PrivateKey(private_key_bytes)
return private_key_object.public_key
| # Copyright 2018 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
import json
import logging
import os
from collections import namedtuple
import eth_account
import eth_keys
import eth_utils
from eth_keys import KeyAPI
from eth_utils import big_endian_to_int
from ocean_lib.web3_internal.web3_provider import Web3Provider
from web3 import Web3
Signature = namedtuple("Signature", ("v", "r", "s"))
logger = logging.getLogger(__name__)
def generate_multi_value_hash(types, values):
"""
Return the hash of the given list of values.
This is equivalent to packing and hashing values in a solidity smart contract
hence the use of `soliditySha3`.
:param types: list of solidity types expressed as strings
:param values: list of values matching the `types` list
:return: bytes
"""
assert len(types) == len(values)
return Web3.soliditySha3(types, values)
def prepare_prefixed_hash(msg_hash):
"""
:param msg_hash:
:return:
"""
return generate_multi_value_hash(
["string", "bytes32"], ["\x19Ethereum Signed Message:\n32", msg_hash]
)
def add_ethereum_prefix_and_hash_msg(text):
"""
This method of adding the ethereum prefix seems to be used in web3.personal.sign/ecRecover.
:param text: str any str to be signed / used in recovering address from a signature
:return: hash of prefixed text according to the recommended ethereum prefix
"""
prefixed_msg = f"\x19Ethereum Signed Message:\n{len(text)}{text}"
return Web3.sha3(text=prefixed_msg)
def get_public_key_from_address(web3, account):
"""
:param web3:
:param account:
:return:
"""
_hash = web3.sha3(text="verify signature.")
signature = web3.personal.sign(_hash, account.address, account.password)
signature = split_signature(web3, web3.toBytes(hexstr=signature))
signature_vrs = Signature(
signature.v % 27, big_endian_to_int(signature.r), big_endian_to_int(signature.s)
)
prefixed_hash = prepare_prefixed_hash(_hash)
pub_key = KeyAPI.PublicKey.recover_from_msg_hash(
prefixed_hash, KeyAPI.Signature(vrs=signature_vrs)
)
assert (
pub_key.to_checksum_address() == account.address
), "recovered address does not match signing address."
return pub_key
def to_32byte_hex(web3, val):
"""
:param web3:
:param val:
:return:
"""
return web3.toBytes(val).rjust(32, b"\0")
def split_signature(web3, signature):
"""
:param web3:
:param signature: signed message hash, hex str
:return:
"""
assert len(signature) == 65, (
f"invalid signature, " f"expecting bytes of length 65, got {len(signature)}"
)
v = web3.toInt(signature[-1])
r = to_32byte_hex(web3, int.from_bytes(signature[:32], "big"))
s = to_32byte_hex(web3, int.from_bytes(signature[32:64], "big"))
if v != 27 and v != 28:
v = 27 + v % 2
return Signature(v, r, s)
def get_wallet(index):
name = "PARITY_ADDRESS" if not index else f"PARITY_ADDRESS{index}"
pswrd_name = "PARITY_PASSWORD" if not index else f"PARITY_PASSWORD{index}"
key_name = "PARITY_KEY" if not index else f"PARITY_KEY{index}"
encrypted_key_name = (
"PARITY_ENCRYPTED_KEY" if not index else f"PARITY_ENCRYPTED_KEY{index}"
)
keyfile_name = "PARITY_KEYFILE" if not index else f"PARITY_KEYFILE{index}"
address = os.getenv(name)
if not address:
return None
pswrd = os.getenv(pswrd_name)
key = os.getenv(key_name)
encr_key = os.getenv(encrypted_key_name)
key_file = os.getenv(keyfile_name)
if key_file and not encr_key:
with open(key_file) as _file:
encr_key = json.loads(_file.read())
from ocean_lib.web3_internal.wallet import Wallet
return Wallet(
Web3Provider.get_web3(),
private_key=key,
encrypted_key=encr_key,
address=Web3.toChecksumAddress(address),
password=<PASSWORD>,
)
def privateKeyToAddress(private_key: str) -> str:
return eth_account.Account().privateKeyToAccount(private_key).address
def privateKeyToPublicKey(private_key: str):
private_key_bytes = eth_utils.decode_hex(private_key)
private_key_object = eth_keys.keys.PrivateKey(private_key_bytes)
return private_key_object.public_key | en | 0.671584 | # Copyright 2018 Ocean Protocol Foundation # SPDX-License-Identifier: Apache-2.0 Return the hash of the given list of values. This is equivalent to packing and hashing values in a solidity smart contract hence the use of `soliditySha3`. :param types: list of solidity types expressed as strings :param values: list of values matching the `types` list :return: bytes :param msg_hash: :return: This method of adding the ethereum prefix seems to be used in web3.personal.sign/ecRecover. :param text: str any str to be signed / used in recovering address from a signature :return: hash of prefixed text according to the recommended ethereum prefix :param web3: :param account: :return: :param web3: :param val: :return: :param web3: :param signature: signed message hash, hex str :return: | 2.470388 | 2 |
autofront/__init__.py | JimmyLamothe/autofront | 1 | 7718 | <gh_stars>1-10
import autofront.autofront as autofront
import autofront.utilities as utilities
initialize = autofront.initialize
add = autofront.add
run = autofront.run
get_display = utilities.get_display
| import autofront.autofront as autofront
import autofront.utilities as utilities
initialize = autofront.initialize
add = autofront.add
run = autofront.run
get_display = utilities.get_display | none | 1 | 1.232726 | 1 |
|
src/main.py | ketsonroberto/PBDO | 0 | 7719 | <reponame>ketsonroberto/PBDO<filename>src/main.py
# THIS IS A FILE TO TEST THE CODE. DO NOT USE IT AS PART OF THE CODE.
import matplotlib.pyplot as plt
import numpy as np
from StochasticMechanics import Stochastic
from scipy.optimize import minimize
from Performance import PerformanceOpt
from Hazards import Stationary
from Building import *
from BuildingProperties import *
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from scipy import optimize
freq = np.linspace(0.00001, 20, 500)
gamma = np.ones((ndof)) * [0.5]
nu = np.ones((ndof)) * [0.5]
alpha = np.ones((ndof)) * [1]
m = np.ones((ndof)) * [1]
c = np.ones((ndof)) * [1]
k = np.ones((ndof)) * [200]
a = np.ones((ndof)) * [0.8] #0.01
ksi = np.ones((ndof)) * [0.05]
# ksi = [0.05, 0.05]
im_max = 30
B_max = 1
# S1 = np.ones(ndof)
# Ps = Stationary(power_spectrum_object='white_noise', ndof=ndof)
# power_spectrum = Ps.power_spectrum_excitation(freq=freq, S0=S1)
# <NAME>
Ps = Stationary(power_spectrum_object='windpsd', ndof=ndof)
power_spectrum, U = Ps.power_spectrum_excitation(u10=6.2371, freq=freq, z=z)
# plt.semilogy(freq/(2*np.pi), power_spectrum[:,0])
# plt.show()
# columns["area"] = 0.001
# columns.update({"area": 0.001})
ks = []
ms = []
msf = []
#cost = []
nlc = 100
lc = np.linspace(0.05, 2, nlc)
# fig, (ax1, ax2, ax3) = plt.subplots(1, 3)
# fig.suptitle('Mass and Stiffness')
# ax1.plot(lc,ms)
# ax1.plot(lc,msf)
# ax2.plot(lc,ks)
# ax3.plot(ks,cost)
# plt.show()
columns = update_columns(columns=columns, lx=0.4, ly=0.4)
Building = Structure(building, columns, slabs, core, concrete, steel)
k_story = Building.stiffness_story()
m_story = Building.mass_storey(top_story=False)
m_story_f = Building.mass_storey(top_story=True)
k = np.ones(ndof) * [k_story]
m = np.ones(ndof) * [m_story]
m[-1] = m_story_f
length = 0.3
size_col = np.ones(ndof) * [length]
Sto = Stochastic(power_spectrum=power_spectrum, model='bouc_wen', ndof=ndof, freq=freq)
#Opt = PerformanceOpt(power_spectrum=power_spectrum, model='bouc_wen', freq=freq, tol=1e-5, maxiter=100,
# design_life=1) # design_life = 50
# total_cost = Opt.objective_function(size_col=size_col, ksi=ksi, im_max=im_max, B_max=B_max, gamma=gamma, nu=nu,
# alpha=alpha, a=a)
#CostFailure = Costs(building=building, columns=columns, slabs=slabs, core=core, concrete=concrete,
# steel=steel, cost=cost)
#size_col = np.ones(ndof) * [0.5]
#size_col = np.array([1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
#size_col = np.array([0.1, 0.2, 0.3])
args=[ksi, im_max, B_max, gamma, nu, alpha, a]
sizea = 0.1
sizeb = 1
wa = 0.1
wb=100
npar = 10
nw = 10
X = np.zeros((npar * nw, 3 * ndof + 1))
y = np.zeros((npar * nw, 2 * ndof))
ct=0
ct1=0
for kk in range(npar):
size_col = sizea+(sizeb-sizea)*np.random.rand(ndof)
M, C, K, m, c, k = Sto.get_MCK(size_col=size_col, args=args, columns=columns)
for i in range(nw):
im = wa + (wb - wa) * np.random.rand(1)[0]
idd = 0
for j in np.arange(0, 3 * ndof, 3):
X[ct, j] = m[idd]
X[ct, j + 1] = c[idd]
X[ct, j + 2] = k[idd]
idd = idd + 1
X[ct, -1] = im
ct = ct + 1
Ps = Stationary(power_spectrum_object='windpsd', ndof=ndof)
power_spectrum, ub = Ps.power_spectrum_excitation(u10=im, freq=freq, z=z)
Var, Vard = Sto.statistical_linearization(M=M, C=C, K=K, power_sp=power_spectrum, tol=0.01, maxiter=100,
gamma=gamma, nu=nu, alpha=alpha, a=a)
idd = 0
for j in np.arange(0, 2 * ndof, 2):
y[ct1, j] = Var[idd][0]
y[ct1, j + 1] = Vard[idd][0]
idd = idd + 1
ct1 = ct1 + 1
print(np.shape(y))
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic,
ExpSineSquared, DotProduct,
ConstantKernel)
kernels_U = [None,
ConstantKernel(1.0, (1e-4, 1e4)) * RBF(1, (1e-4, 1e4)),
1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1),
1.0 * ExpSineSquared(length_scale=1.0, periodicity=1,
length_scale_bounds=(1.0e-5, 100.0),
periodicity_bounds=(1.0, 10.0)),
ConstantKernel(0.1, (0.01, 10.0))
* (DotProduct(sigma_0=1.0, sigma_0_bounds=(0.0, 10.0)) ** 2),
1.0 * Matern(length_scale=1.0, nu=1.5)]
gp = GaussianProcessRegressor(kernel=kernels_U[0], n_restarts_optimizer=10, normalize_y=False)
gp.fit(X, y)
r2 = gp.score(X, y)
print(r2)
yp = gp.predict(np.array(X[2].reshape(1, -1)))
val = X[2]
val[-1]=100.0
print(val)
yp = gp.predict(val.reshape(1, -1))
print(yp)
#print(np.shape(X))
#print(np.shape(y))
#nn_architecture = [
# {"input_dim": 10, "output_dim": 25, "activation": "relu"},
# {"input_dim": 25, "output_dim": 50, "activation": "relu"},
# {"input_dim": 50, "output_dim": 50, "activation": "relu"},
# {"input_dim": 50, "output_dim": 25, "activation": "relu"},
# {"input_dim": 25, "output_dim": 6, "activation": "relu"},
#]
#from neural import NeuralNets
#from sklearn.model_selection import train_test_split
#NN = NeuralNets(nn_architecture)
#TEST_SIZE = 0.1
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=TEST_SIZE, random_state=132)
##print(X_train)
#params_values, cost_history = NN.train(X=np.transpose(X_train), Y=np.transpose(y_train), epochs=1000,
# learning_rate=1, verbose=True)
"""
b0 = np.linspace(0.1, 0.5, 20)
cost_f = []
cost_i = []
cost_t = []
mm = []
pp = []
args=[ksi, im_max, B_max, gamma, nu, alpha, a]
for i in range(len(b0)):
Cf = CostFailure.cost_damage(b=b0[i], col_size=size_col[0], L=columns["height"], ncolumns=columns["quantity"],
dry_wall_area=dry_wall_area)
Ci = CostFailure.initial_cost_stiffness(col_size=b0[i], par0=25.55133, par1=0.33127)
scol = np.array([b0[i], b0[i]])
Ct = Opt.objective_function(size_col=scol, args=args)
#mom, phi = Building.compression(col_size=b0[i], L=columns["height"])
cost_f.append(Cf)
cost_i.append(Ci)
cost_t.append(Ct)
fig = plt.figure()
plt.plot(b0, cost_t,'-o')
plt.show()
#fig = plt.figure()
#plt.plot(phi, mom,'-o')
#plt.show()
"""
"""
b0 = np.linspace(0.05,0.5,5)
b1 = np.linspace(0.05,0.5,5)
B0, B1 = np.meshgrid(b0, b1)
args=[ksi, im_max, B_max, gamma, nu, alpha, a]
tc = np.zeros((5, 5))
for i in range(len(b0)):
print(i)
for j in range(len(b1)):
size_col = np.array([b0[i], b1[j]])
resp = Opt.objective_function(size_col=size_col, args=args)
tc[i,j] = resp
Z = tc.reshape(B0.shape)
Z = np.array(Z)
nd = np.unravel_index(np.argmin(Z, axis=None), Z.shape)
print([B0[nd], B1[nd]])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(B0, B1, np.log(Z), cmap=plt.cm.get_cmap('plasma'),linewidth=0, antialiased=False)
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
"""
#size_col = np.ones(ndof) * [0.2]
#args=[ksi, im_max, B_max, gamma, nu, alpha, a]
##args = {"ksi": ksi, "im_max": im_max, "B_max": B_max, "gamma": gamma, "nu": nu, "alpha": alpha, "a": a}
#bnds = []
#for i in range(ndof):
# bnds.append((0.1, 1))
#bnds=tuple(bnds)
###from scipy import optimize
###res = optimize.fmin(Opt.objective_function, x0=size_col)
#res = minimize(Opt.objective_function, x0=size_col, args=args, bounds=bnds)
###from scipy.optimize import basinhopping
###minimizer_kwargs = {"method": "BFGS", "args": args}
###ret = basinhopping(Opt.objective_function, x0=size_col, minimizer_kwargs=minimizer_kwargs, niter=200)
#print(res)
### Global methods.
###from scipy.optimize import rosen, shgo
###from scipy.optimize import dual_annealing
###ret = dual_annealing(Opt.objective_function, bounds=bnds)
###print((ret.x, ret.fun))
#c = Opt.linear_damping(m=m, k=k, ksi=ksi)
#M, C, K = Opt.create_mck(m=m, c=c, k=k, gamma=gamma, nu=nu, alpha=alpha, a=a)
#financial_loss_rate = Opt.stochastic_financial_loss(M=M, C=C, K=K, stiff=k, im_max=im_max,
# B_max=B_max, size_col=size_col, Nim=1, NB=1, gamma=gamma, nu=nu,
# alpha=alpha, a=a)
| # THIS IS A FILE TO TEST THE CODE. DO NOT USE IT AS PART OF THE CODE.
import matplotlib.pyplot as plt
import numpy as np
from StochasticMechanics import Stochastic
from scipy.optimize import minimize
from Performance import PerformanceOpt
from Hazards import Stationary
from Building import *
from BuildingProperties import *
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from scipy import optimize
freq = np.linspace(0.00001, 20, 500)
gamma = np.ones((ndof)) * [0.5]
nu = np.ones((ndof)) * [0.5]
alpha = np.ones((ndof)) * [1]
m = np.ones((ndof)) * [1]
c = np.ones((ndof)) * [1]
k = np.ones((ndof)) * [200]
a = np.ones((ndof)) * [0.8] #0.01
ksi = np.ones((ndof)) * [0.05]
# ksi = [0.05, 0.05]
im_max = 30
B_max = 1
# S1 = np.ones(ndof)
# Ps = Stationary(power_spectrum_object='white_noise', ndof=ndof)
# power_spectrum = Ps.power_spectrum_excitation(freq=freq, S0=S1)
# <NAME>
Ps = Stationary(power_spectrum_object='windpsd', ndof=ndof)
power_spectrum, U = Ps.power_spectrum_excitation(u10=6.2371, freq=freq, z=z)
# plt.semilogy(freq/(2*np.pi), power_spectrum[:,0])
# plt.show()
# columns["area"] = 0.001
# columns.update({"area": 0.001})
ks = []
ms = []
msf = []
#cost = []
nlc = 100
lc = np.linspace(0.05, 2, nlc)
# fig, (ax1, ax2, ax3) = plt.subplots(1, 3)
# fig.suptitle('Mass and Stiffness')
# ax1.plot(lc,ms)
# ax1.plot(lc,msf)
# ax2.plot(lc,ks)
# ax3.plot(ks,cost)
# plt.show()
columns = update_columns(columns=columns, lx=0.4, ly=0.4)
Building = Structure(building, columns, slabs, core, concrete, steel)
k_story = Building.stiffness_story()
m_story = Building.mass_storey(top_story=False)
m_story_f = Building.mass_storey(top_story=True)
k = np.ones(ndof) * [k_story]
m = np.ones(ndof) * [m_story]
m[-1] = m_story_f
length = 0.3
size_col = np.ones(ndof) * [length]
Sto = Stochastic(power_spectrum=power_spectrum, model='bouc_wen', ndof=ndof, freq=freq)
#Opt = PerformanceOpt(power_spectrum=power_spectrum, model='bouc_wen', freq=freq, tol=1e-5, maxiter=100,
# design_life=1) # design_life = 50
# total_cost = Opt.objective_function(size_col=size_col, ksi=ksi, im_max=im_max, B_max=B_max, gamma=gamma, nu=nu,
# alpha=alpha, a=a)
#CostFailure = Costs(building=building, columns=columns, slabs=slabs, core=core, concrete=concrete,
# steel=steel, cost=cost)
#size_col = np.ones(ndof) * [0.5]
#size_col = np.array([1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
#size_col = np.array([0.1, 0.2, 0.3])
args=[ksi, im_max, B_max, gamma, nu, alpha, a]
sizea = 0.1
sizeb = 1
wa = 0.1
wb=100
npar = 10
nw = 10
X = np.zeros((npar * nw, 3 * ndof + 1))
y = np.zeros((npar * nw, 2 * ndof))
ct=0
ct1=0
for kk in range(npar):
size_col = sizea+(sizeb-sizea)*np.random.rand(ndof)
M, C, K, m, c, k = Sto.get_MCK(size_col=size_col, args=args, columns=columns)
for i in range(nw):
im = wa + (wb - wa) * np.random.rand(1)[0]
idd = 0
for j in np.arange(0, 3 * ndof, 3):
X[ct, j] = m[idd]
X[ct, j + 1] = c[idd]
X[ct, j + 2] = k[idd]
idd = idd + 1
X[ct, -1] = im
ct = ct + 1
Ps = Stationary(power_spectrum_object='windpsd', ndof=ndof)
power_spectrum, ub = Ps.power_spectrum_excitation(u10=im, freq=freq, z=z)
Var, Vard = Sto.statistical_linearization(M=M, C=C, K=K, power_sp=power_spectrum, tol=0.01, maxiter=100,
gamma=gamma, nu=nu, alpha=alpha, a=a)
idd = 0
for j in np.arange(0, 2 * ndof, 2):
y[ct1, j] = Var[idd][0]
y[ct1, j + 1] = Vard[idd][0]
idd = idd + 1
ct1 = ct1 + 1
print(np.shape(y))
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic,
ExpSineSquared, DotProduct,
ConstantKernel)
kernels_U = [None,
ConstantKernel(1.0, (1e-4, 1e4)) * RBF(1, (1e-4, 1e4)),
1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1),
1.0 * ExpSineSquared(length_scale=1.0, periodicity=1,
length_scale_bounds=(1.0e-5, 100.0),
periodicity_bounds=(1.0, 10.0)),
ConstantKernel(0.1, (0.01, 10.0))
* (DotProduct(sigma_0=1.0, sigma_0_bounds=(0.0, 10.0)) ** 2),
1.0 * Matern(length_scale=1.0, nu=1.5)]
gp = GaussianProcessRegressor(kernel=kernels_U[0], n_restarts_optimizer=10, normalize_y=False)
gp.fit(X, y)
r2 = gp.score(X, y)
print(r2)
yp = gp.predict(np.array(X[2].reshape(1, -1)))
val = X[2]
val[-1]=100.0
print(val)
yp = gp.predict(val.reshape(1, -1))
print(yp)
#print(np.shape(X))
#print(np.shape(y))
#nn_architecture = [
# {"input_dim": 10, "output_dim": 25, "activation": "relu"},
# {"input_dim": 25, "output_dim": 50, "activation": "relu"},
# {"input_dim": 50, "output_dim": 50, "activation": "relu"},
# {"input_dim": 50, "output_dim": 25, "activation": "relu"},
# {"input_dim": 25, "output_dim": 6, "activation": "relu"},
#]
#from neural import NeuralNets
#from sklearn.model_selection import train_test_split
#NN = NeuralNets(nn_architecture)
#TEST_SIZE = 0.1
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=TEST_SIZE, random_state=132)
##print(X_train)
#params_values, cost_history = NN.train(X=np.transpose(X_train), Y=np.transpose(y_train), epochs=1000,
# learning_rate=1, verbose=True)
"""
b0 = np.linspace(0.1, 0.5, 20)
cost_f = []
cost_i = []
cost_t = []
mm = []
pp = []
args=[ksi, im_max, B_max, gamma, nu, alpha, a]
for i in range(len(b0)):
Cf = CostFailure.cost_damage(b=b0[i], col_size=size_col[0], L=columns["height"], ncolumns=columns["quantity"],
dry_wall_area=dry_wall_area)
Ci = CostFailure.initial_cost_stiffness(col_size=b0[i], par0=25.55133, par1=0.33127)
scol = np.array([b0[i], b0[i]])
Ct = Opt.objective_function(size_col=scol, args=args)
#mom, phi = Building.compression(col_size=b0[i], L=columns["height"])
cost_f.append(Cf)
cost_i.append(Ci)
cost_t.append(Ct)
fig = plt.figure()
plt.plot(b0, cost_t,'-o')
plt.show()
#fig = plt.figure()
#plt.plot(phi, mom,'-o')
#plt.show()
"""
"""
b0 = np.linspace(0.05,0.5,5)
b1 = np.linspace(0.05,0.5,5)
B0, B1 = np.meshgrid(b0, b1)
args=[ksi, im_max, B_max, gamma, nu, alpha, a]
tc = np.zeros((5, 5))
for i in range(len(b0)):
print(i)
for j in range(len(b1)):
size_col = np.array([b0[i], b1[j]])
resp = Opt.objective_function(size_col=size_col, args=args)
tc[i,j] = resp
Z = tc.reshape(B0.shape)
Z = np.array(Z)
nd = np.unravel_index(np.argmin(Z, axis=None), Z.shape)
print([B0[nd], B1[nd]])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(B0, B1, np.log(Z), cmap=plt.cm.get_cmap('plasma'),linewidth=0, antialiased=False)
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
"""
#size_col = np.ones(ndof) * [0.2]
#args=[ksi, im_max, B_max, gamma, nu, alpha, a]
##args = {"ksi": ksi, "im_max": im_max, "B_max": B_max, "gamma": gamma, "nu": nu, "alpha": alpha, "a": a}
#bnds = []
#for i in range(ndof):
# bnds.append((0.1, 1))
#bnds=tuple(bnds)
###from scipy import optimize
###res = optimize.fmin(Opt.objective_function, x0=size_col)
#res = minimize(Opt.objective_function, x0=size_col, args=args, bounds=bnds)
###from scipy.optimize import basinhopping
###minimizer_kwargs = {"method": "BFGS", "args": args}
###ret = basinhopping(Opt.objective_function, x0=size_col, minimizer_kwargs=minimizer_kwargs, niter=200)
#print(res)
### Global methods.
###from scipy.optimize import rosen, shgo
###from scipy.optimize import dual_annealing
###ret = dual_annealing(Opt.objective_function, bounds=bnds)
###print((ret.x, ret.fun))
#c = Opt.linear_damping(m=m, k=k, ksi=ksi)
#M, C, K = Opt.create_mck(m=m, c=c, k=k, gamma=gamma, nu=nu, alpha=alpha, a=a)
#financial_loss_rate = Opt.stochastic_financial_loss(M=M, C=C, K=K, stiff=k, im_max=im_max,
# B_max=B_max, size_col=size_col, Nim=1, NB=1, gamma=gamma, nu=nu,
# alpha=alpha, a=a) | en | 0.338094 | # THIS IS A FILE TO TEST THE CODE. DO NOT USE IT AS PART OF THE CODE. #0.01 # ksi = [0.05, 0.05] # S1 = np.ones(ndof) # Ps = Stationary(power_spectrum_object='white_noise', ndof=ndof) # power_spectrum = Ps.power_spectrum_excitation(freq=freq, S0=S1) # <NAME> # plt.semilogy(freq/(2*np.pi), power_spectrum[:,0]) # plt.show() # columns["area"] = 0.001 # columns.update({"area": 0.001}) #cost = [] # fig, (ax1, ax2, ax3) = plt.subplots(1, 3) # fig.suptitle('Mass and Stiffness') # ax1.plot(lc,ms) # ax1.plot(lc,msf) # ax2.plot(lc,ks) # ax3.plot(ks,cost) # plt.show() #Opt = PerformanceOpt(power_spectrum=power_spectrum, model='bouc_wen', freq=freq, tol=1e-5, maxiter=100, # design_life=1) # design_life = 50 # total_cost = Opt.objective_function(size_col=size_col, ksi=ksi, im_max=im_max, B_max=B_max, gamma=gamma, nu=nu, # alpha=alpha, a=a) #CostFailure = Costs(building=building, columns=columns, slabs=slabs, core=core, concrete=concrete, # steel=steel, cost=cost) #size_col = np.ones(ndof) * [0.5] #size_col = np.array([1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]) #size_col = np.array([0.1, 0.2, 0.3]) #print(np.shape(X)) #print(np.shape(y)) #nn_architecture = [ # {"input_dim": 10, "output_dim": 25, "activation": "relu"}, # {"input_dim": 25, "output_dim": 50, "activation": "relu"}, # {"input_dim": 50, "output_dim": 50, "activation": "relu"}, # {"input_dim": 50, "output_dim": 25, "activation": "relu"}, # {"input_dim": 25, "output_dim": 6, "activation": "relu"}, #] #from neural import NeuralNets #from sklearn.model_selection import train_test_split #NN = NeuralNets(nn_architecture) #TEST_SIZE = 0.1 #X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=TEST_SIZE, random_state=132) ##print(X_train) #params_values, cost_history = NN.train(X=np.transpose(X_train), Y=np.transpose(y_train), epochs=1000, # learning_rate=1, verbose=True) b0 = np.linspace(0.1, 0.5, 20) cost_f = [] cost_i = [] cost_t = [] mm = [] pp = [] args=[ksi, im_max, B_max, gamma, nu, alpha, a] for i in range(len(b0)): Cf = CostFailure.cost_damage(b=b0[i], col_size=size_col[0], L=columns["height"], ncolumns=columns["quantity"], dry_wall_area=dry_wall_area) Ci = CostFailure.initial_cost_stiffness(col_size=b0[i], par0=25.55133, par1=0.33127) scol = np.array([b0[i], b0[i]]) Ct = Opt.objective_function(size_col=scol, args=args) #mom, phi = Building.compression(col_size=b0[i], L=columns["height"]) cost_f.append(Cf) cost_i.append(Ci) cost_t.append(Ct) fig = plt.figure() plt.plot(b0, cost_t,'-o') plt.show() #fig = plt.figure() #plt.plot(phi, mom,'-o') #plt.show() b0 = np.linspace(0.05,0.5,5) b1 = np.linspace(0.05,0.5,5) B0, B1 = np.meshgrid(b0, b1) args=[ksi, im_max, B_max, gamma, nu, alpha, a] tc = np.zeros((5, 5)) for i in range(len(b0)): print(i) for j in range(len(b1)): size_col = np.array([b0[i], b1[j]]) resp = Opt.objective_function(size_col=size_col, args=args) tc[i,j] = resp Z = tc.reshape(B0.shape) Z = np.array(Z) nd = np.unravel_index(np.argmin(Z, axis=None), Z.shape) print([B0[nd], B1[nd]]) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') surf = ax.plot_surface(B0, B1, np.log(Z), cmap=plt.cm.get_cmap('plasma'),linewidth=0, antialiased=False) ax.set_xlabel('X Label') ax.set_ylabel('Y Label') ax.set_zlabel('Z Label') fig.colorbar(surf, shrink=0.5, aspect=5) plt.show() #size_col = np.ones(ndof) * [0.2] #args=[ksi, im_max, B_max, gamma, nu, alpha, a] ##args = {"ksi": ksi, "im_max": im_max, "B_max": B_max, "gamma": gamma, "nu": nu, "alpha": alpha, "a": a} #bnds = [] #for i in range(ndof): # bnds.append((0.1, 1)) #bnds=tuple(bnds) ###from scipy import optimize ###res = optimize.fmin(Opt.objective_function, x0=size_col) #res = minimize(Opt.objective_function, x0=size_col, args=args, bounds=bnds) ###from scipy.optimize import basinhopping ###minimizer_kwargs = {"method": "BFGS", "args": args} ###ret = basinhopping(Opt.objective_function, x0=size_col, minimizer_kwargs=minimizer_kwargs, niter=200) #print(res) ### Global methods. ###from scipy.optimize import rosen, shgo ###from scipy.optimize import dual_annealing ###ret = dual_annealing(Opt.objective_function, bounds=bnds) ###print((ret.x, ret.fun)) #c = Opt.linear_damping(m=m, k=k, ksi=ksi) #M, C, K = Opt.create_mck(m=m, c=c, k=k, gamma=gamma, nu=nu, alpha=alpha, a=a) #financial_loss_rate = Opt.stochastic_financial_loss(M=M, C=C, K=K, stiff=k, im_max=im_max, # B_max=B_max, size_col=size_col, Nim=1, NB=1, gamma=gamma, nu=nu, # alpha=alpha, a=a) | 2.027796 | 2 |
categorical_embedder/embedders/core/aux/custom_object_handler.py | erelcan/categorical-embedder | 3 | 7720 | from categorical_embedder.embedders.core.aux.custom_layers import get_custom_layer_class
from categorical_embedder.embedders.core.aux.loss_factory import get_loss_function
def prepare_custom_objects(custom_object_info):
custom_objects = {}
custom_objects.update(_prepare_custom_layers(custom_object_info["layer_info"]))
if not custom_object_info["has_implicit_loss"]:
custom_objects.update(_prepare_custom_loss(custom_object_info["loss_info"]))
return custom_objects
def _prepare_custom_layers(layer_info):
custom_layers = {}
for layer_name in layer_info:
custom_layers[layer_name] = get_custom_layer_class(layer_name)
return custom_layers
def _prepare_custom_loss(loss_info):
return {"loss": get_loss_function(loss_info)}
| from categorical_embedder.embedders.core.aux.custom_layers import get_custom_layer_class
from categorical_embedder.embedders.core.aux.loss_factory import get_loss_function
def prepare_custom_objects(custom_object_info):
custom_objects = {}
custom_objects.update(_prepare_custom_layers(custom_object_info["layer_info"]))
if not custom_object_info["has_implicit_loss"]:
custom_objects.update(_prepare_custom_loss(custom_object_info["loss_info"]))
return custom_objects
def _prepare_custom_layers(layer_info):
custom_layers = {}
for layer_name in layer_info:
custom_layers[layer_name] = get_custom_layer_class(layer_name)
return custom_layers
def _prepare_custom_loss(loss_info):
return {"loss": get_loss_function(loss_info)}
| none | 1 | 2.562579 | 3 |
|
osprofiler/cmd/shell.py | charliebr30/osprofiler | 0 | 7721 | <reponame>charliebr30/osprofiler<filename>osprofiler/cmd/shell.py
# Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Command-line interface to the OpenStack Profiler.
"""
import argparse
import inspect
import sys
from oslo_config import cfg
import osprofiler
from osprofiler.cmd import cliutils
from osprofiler.cmd import commands
from osprofiler import exc
from osprofiler import opts
class OSProfilerShell(object):
def __init__(self, argv):
args = self._get_base_parser().parse_args(argv)
opts.set_defaults(cfg.CONF)
if not (args.os_auth_token and args.ceilometer_url):
if not args.os_username:
raise exc.CommandError(
"You must provide a username via either --os-username or "
"via env[OS_USERNAME]")
if not args.os_password:
raise exc.CommandError(
"You must provide a password via either --os-password or "
"via env[OS_PASSWORD]")
if self._no_project_and_domain_set(args):
# steer users towards Keystone V3 API
raise exc.CommandError(
"You must provide a project_id via either --os-project-id "
"or via env[OS_PROJECT_ID] and a domain_name via either "
"--os-user-domain-name or via env[OS_USER_DOMAIN_NAME] or "
"a domain_id via either --os-user-domain-id or via "
"env[OS_USER_DOMAIN_ID]")
if not args.os_auth_url:
raise exc.CommandError(
"You must provide an auth url via either --os-auth-url or "
"via env[OS_AUTH_URL]")
args.func(args)
def _get_base_parser(self):
parser = argparse.ArgumentParser(
prog="osprofiler",
description=__doc__.strip(),
add_help=True
)
parser.add_argument("-v", "--version",
action="version",
version=osprofiler.__version__)
self._append_ceilometer_args(parser)
self._append_identity_args(parser)
self._append_subcommands(parser)
return parser
def _append_ceilometer_args(self, parent_parser):
parser = parent_parser.add_argument_group("ceilometer")
parser.add_argument(
"--ceilometer-url", default=cliutils.env("CEILOMETER_URL"),
help="Defaults to env[CEILOMETER_URL].")
parser.add_argument(
"--ceilometer-api-version",
default=cliutils.env("CEILOMETER_API_VERSION", default="2"),
help="Defaults to env[CEILOMETER_API_VERSION] or 2.")
def _append_identity_args(self, parent_parser):
# FIXME(fabgia): identity related parameters should be passed by the
# Keystone client itself to avoid constant update in all the services
# clients. When this fix is merged this method can be made obsolete.
# Bug: https://bugs.launchpad.net/python-keystoneclient/+bug/1332337
parser = parent_parser.add_argument_group("identity")
parser.add_argument("-k", "--insecure",
default=False,
action="store_true",
help="Explicitly allow osprofiler to "
"perform \"insecure\" SSL (https) requests. "
"The server's certificate will "
"not be verified against any certificate "
"authorities. This option should be used with "
"caution.")
# User related options
parser.add_argument("--os-username",
default=cliutils.env("OS_USERNAME"),
help="Defaults to env[OS_USERNAME].")
parser.add_argument("--os-user-id",
default=cliutils.env("OS_USER_ID"),
help="Defaults to env[OS_USER_ID].")
parser.add_argument("--os-password",
default=cliutils.env("OS_PASSWORD"),
help="Defaults to env[OS_PASSWORD].")
# Domain related options
parser.add_argument("--os-user-domain-id",
default=cliutils.env("OS_USER_DOMAIN_ID"),
help="Defaults to env[OS_USER_DOMAIN_ID].")
parser.add_argument("--os-user-domain-name",
default=cliutils.env("OS_USER_DOMAIN_NAME"),
help="Defaults to env[OS_USER_DOMAIN_NAME].")
parser.add_argument("--os-project-domain-id",
default=cliutils.env("OS_PROJECT_DOMAIN_ID"),
help="Defaults to env[OS_PROJECT_DOMAIN_ID].")
parser.add_argument("--os-project-domain-name",
default=cliutils.env("OS_PROJECT_DOMAIN_NAME"),
help="Defaults to env[OS_PROJECT_DOMAIN_NAME].")
# Project V3 or Tenant V2 related options
parser.add_argument("--os-project-id",
default=cliutils.env("OS_PROJECT_ID"),
help="Another way to specify tenant ID. "
"This option is mutually exclusive with "
" --os-tenant-id. "
"Defaults to env[OS_PROJECT_ID].")
parser.add_argument("--os-project-name",
default=cliutils.env("OS_PROJECT_NAME"),
help="Another way to specify tenant name. "
"This option is mutually exclusive with "
" --os-tenant-name. "
"Defaults to env[OS_PROJECT_NAME].")
parser.add_argument("--os-tenant-id",
default=cliutils.env("OS_TENANT_ID"),
help="This option is mutually exclusive with "
" --os-project-id. "
"Defaults to env[OS_PROJECT_ID].")
parser.add_argument("--os-tenant-name",
default=cliutils.env("OS_TENANT_NAME"),
help="Defaults to env[OS_TENANT_NAME].")
# Auth related options
parser.add_argument("--os-auth-url",
default=cliutils.env("OS_AUTH_URL"),
help="Defaults to env[OS_AUTH_URL].")
parser.add_argument("--os-auth-token",
default=cliutils.env("OS_AUTH_TOKEN"),
help="Defaults to env[OS_AUTH_TOKEN].")
parser.add_argument("--os-cacert",
metavar="<ca-certificate-file>",
dest="os_cacert",
default=cliutils.env("OS_CACERT"),
help="Path of CA TLS certificate(s) used to verify"
" the remote server\"s certificate. Without this "
"option ceilometer looks for the default system CA"
" certificates.")
parser.add_argument("--os-cert",
help="Path of certificate file to use in SSL "
"connection. This file can optionally be "
"prepended with the private key.")
parser.add_argument("--os-key",
help="Path of client key to use in SSL "
"connection. This option is not necessary "
"if your key is prepended to your cert file.")
# Service Catalog related options
parser.add_argument("--os-service-type",
default=cliutils.env("OS_SERVICE_TYPE"),
help="Defaults to env[OS_SERVICE_TYPE].")
parser.add_argument("--os-endpoint-type",
default=cliutils.env("OS_ENDPOINT_TYPE"),
help="Defaults to env[OS_ENDPOINT_TYPE].")
parser.add_argument("--os-region-name",
default=cliutils.env("OS_REGION_NAME"),
help="Defaults to env[OS_REGION_NAME].")
def _append_subcommands(self, parent_parser):
subcommands = parent_parser.add_subparsers(help="<subcommands>")
for group_cls in commands.BaseCommand.__subclasses__():
group_parser = subcommands.add_parser(group_cls.group_name)
subcommand_parser = group_parser.add_subparsers()
for name, callback in inspect.getmembers(
group_cls(), predicate=inspect.ismethod):
command = name.replace("_", "-")
desc = callback.__doc__ or ""
help_message = desc.strip().split("\n")[0]
arguments = getattr(callback, "arguments", [])
command_parser = subcommand_parser.add_parser(
command, help=help_message, description=desc)
for (args, kwargs) in arguments:
command_parser.add_argument(*args, **kwargs)
command_parser.set_defaults(func=callback)
def _no_project_and_domain_set(self, args):
if not (args.os_project_id or (args.os_project_name and
(args.os_user_domain_name or args.os_user_domain_id)) or
(args.os_tenant_id or args.os_tenant_name)):
return True
else:
return False
def main(args=None):
if args is None:
args = sys.argv[1:]
try:
OSProfilerShell(args)
except exc.CommandError as e:
print(e.message)
return 1
if __name__ == "__main__":
main()
| # Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Command-line interface to the OpenStack Profiler.
"""
import argparse
import inspect
import sys
from oslo_config import cfg
import osprofiler
from osprofiler.cmd import cliutils
from osprofiler.cmd import commands
from osprofiler import exc
from osprofiler import opts
class OSProfilerShell(object):
def __init__(self, argv):
args = self._get_base_parser().parse_args(argv)
opts.set_defaults(cfg.CONF)
if not (args.os_auth_token and args.ceilometer_url):
if not args.os_username:
raise exc.CommandError(
"You must provide a username via either --os-username or "
"via env[OS_USERNAME]")
if not args.os_password:
raise exc.CommandError(
"You must provide a password via either --os-password or "
"via env[OS_PASSWORD]")
if self._no_project_and_domain_set(args):
# steer users towards Keystone V3 API
raise exc.CommandError(
"You must provide a project_id via either --os-project-id "
"or via env[OS_PROJECT_ID] and a domain_name via either "
"--os-user-domain-name or via env[OS_USER_DOMAIN_NAME] or "
"a domain_id via either --os-user-domain-id or via "
"env[OS_USER_DOMAIN_ID]")
if not args.os_auth_url:
raise exc.CommandError(
"You must provide an auth url via either --os-auth-url or "
"via env[OS_AUTH_URL]")
args.func(args)
def _get_base_parser(self):
parser = argparse.ArgumentParser(
prog="osprofiler",
description=__doc__.strip(),
add_help=True
)
parser.add_argument("-v", "--version",
action="version",
version=osprofiler.__version__)
self._append_ceilometer_args(parser)
self._append_identity_args(parser)
self._append_subcommands(parser)
return parser
def _append_ceilometer_args(self, parent_parser):
parser = parent_parser.add_argument_group("ceilometer")
parser.add_argument(
"--ceilometer-url", default=cliutils.env("CEILOMETER_URL"),
help="Defaults to env[CEILOMETER_URL].")
parser.add_argument(
"--ceilometer-api-version",
default=cliutils.env("CEILOMETER_API_VERSION", default="2"),
help="Defaults to env[CEILOMETER_API_VERSION] or 2.")
def _append_identity_args(self, parent_parser):
# FIXME(fabgia): identity related parameters should be passed by the
# Keystone client itself to avoid constant update in all the services
# clients. When this fix is merged this method can be made obsolete.
# Bug: https://bugs.launchpad.net/python-keystoneclient/+bug/1332337
parser = parent_parser.add_argument_group("identity")
parser.add_argument("-k", "--insecure",
default=False,
action="store_true",
help="Explicitly allow osprofiler to "
"perform \"insecure\" SSL (https) requests. "
"The server's certificate will "
"not be verified against any certificate "
"authorities. This option should be used with "
"caution.")
# User related options
parser.add_argument("--os-username",
default=cliutils.env("OS_USERNAME"),
help="Defaults to env[OS_USERNAME].")
parser.add_argument("--os-user-id",
default=cliutils.env("OS_USER_ID"),
help="Defaults to env[OS_USER_ID].")
parser.add_argument("--os-password",
default=cliutils.env("OS_PASSWORD"),
help="Defaults to env[OS_PASSWORD].")
# Domain related options
parser.add_argument("--os-user-domain-id",
default=cliutils.env("OS_USER_DOMAIN_ID"),
help="Defaults to env[OS_USER_DOMAIN_ID].")
parser.add_argument("--os-user-domain-name",
default=cliutils.env("OS_USER_DOMAIN_NAME"),
help="Defaults to env[OS_USER_DOMAIN_NAME].")
parser.add_argument("--os-project-domain-id",
default=cliutils.env("OS_PROJECT_DOMAIN_ID"),
help="Defaults to env[OS_PROJECT_DOMAIN_ID].")
parser.add_argument("--os-project-domain-name",
default=cliutils.env("OS_PROJECT_DOMAIN_NAME"),
help="Defaults to env[OS_PROJECT_DOMAIN_NAME].")
# Project V3 or Tenant V2 related options
parser.add_argument("--os-project-id",
default=cliutils.env("OS_PROJECT_ID"),
help="Another way to specify tenant ID. "
"This option is mutually exclusive with "
" --os-tenant-id. "
"Defaults to env[OS_PROJECT_ID].")
parser.add_argument("--os-project-name",
default=cliutils.env("OS_PROJECT_NAME"),
help="Another way to specify tenant name. "
"This option is mutually exclusive with "
" --os-tenant-name. "
"Defaults to env[OS_PROJECT_NAME].")
parser.add_argument("--os-tenant-id",
default=cliutils.env("OS_TENANT_ID"),
help="This option is mutually exclusive with "
" --os-project-id. "
"Defaults to env[OS_PROJECT_ID].")
parser.add_argument("--os-tenant-name",
default=cliutils.env("OS_TENANT_NAME"),
help="Defaults to env[OS_TENANT_NAME].")
# Auth related options
parser.add_argument("--os-auth-url",
default=cliutils.env("OS_AUTH_URL"),
help="Defaults to env[OS_AUTH_URL].")
parser.add_argument("--os-auth-token",
default=cliutils.env("OS_AUTH_TOKEN"),
help="Defaults to env[OS_AUTH_TOKEN].")
parser.add_argument("--os-cacert",
metavar="<ca-certificate-file>",
dest="os_cacert",
default=cliutils.env("OS_CACERT"),
help="Path of CA TLS certificate(s) used to verify"
" the remote server\"s certificate. Without this "
"option ceilometer looks for the default system CA"
" certificates.")
parser.add_argument("--os-cert",
help="Path of certificate file to use in SSL "
"connection. This file can optionally be "
"prepended with the private key.")
parser.add_argument("--os-key",
help="Path of client key to use in SSL "
"connection. This option is not necessary "
"if your key is prepended to your cert file.")
# Service Catalog related options
parser.add_argument("--os-service-type",
default=cliutils.env("OS_SERVICE_TYPE"),
help="Defaults to env[OS_SERVICE_TYPE].")
parser.add_argument("--os-endpoint-type",
default=cliutils.env("OS_ENDPOINT_TYPE"),
help="Defaults to env[OS_ENDPOINT_TYPE].")
parser.add_argument("--os-region-name",
default=cliutils.env("OS_REGION_NAME"),
help="Defaults to env[OS_REGION_NAME].")
def _append_subcommands(self, parent_parser):
subcommands = parent_parser.add_subparsers(help="<subcommands>")
for group_cls in commands.BaseCommand.__subclasses__():
group_parser = subcommands.add_parser(group_cls.group_name)
subcommand_parser = group_parser.add_subparsers()
for name, callback in inspect.getmembers(
group_cls(), predicate=inspect.ismethod):
command = name.replace("_", "-")
desc = callback.__doc__ or ""
help_message = desc.strip().split("\n")[0]
arguments = getattr(callback, "arguments", [])
command_parser = subcommand_parser.add_parser(
command, help=help_message, description=desc)
for (args, kwargs) in arguments:
command_parser.add_argument(*args, **kwargs)
command_parser.set_defaults(func=callback)
def _no_project_and_domain_set(self, args):
if not (args.os_project_id or (args.os_project_name and
(args.os_user_domain_name or args.os_user_domain_id)) or
(args.os_tenant_id or args.os_tenant_name)):
return True
else:
return False
def main(args=None):
if args is None:
args = sys.argv[1:]
try:
OSProfilerShell(args)
except exc.CommandError as e:
print(e.message)
return 1
if __name__ == "__main__":
main() | en | 0.782992 | # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Command-line interface to the OpenStack Profiler. # steer users towards Keystone V3 API # FIXME(fabgia): identity related parameters should be passed by the # Keystone client itself to avoid constant update in all the services # clients. When this fix is merged this method can be made obsolete. # Bug: https://bugs.launchpad.net/python-keystoneclient/+bug/1332337 # User related options # Domain related options # Project V3 or Tenant V2 related options # Auth related options # Service Catalog related options | 1.788615 | 2 |
bmt/util.py | patrickkwang/bmt-lite | 0 | 7722 | """Utilities."""
from functools import wraps
import re
from typing import Callable, List, Optional, TypeVar, Union
from .data import (
all_classes, all_slots,
)
def pascal_to_snake(s: str, sep: str = "_") -> str:
"""Convert Pascal case to snake case.
Assumes that
a) all words are either all-lowercase or all-uppercase
b) all 1-letter words are lowercase
c) there are no adjacent 1-letter words
d) there are no adjacent uppercase words
Examples:
PhenotypicFeature -> phenotypic_feature
RNAProduct -> RNA_product
FeedACamel -> feed_a_camel
Optionally specify `sep` (default "_").
"""
# add an underscore before each capital letter
underscored = re.sub(
r"(?<!^)(?=[A-Z])",
sep,
s,
)
# collapse any adjacent one-letter words
collapsed = re.sub(
r"(?<![a-zA-Z])[A-Z](?:_[A-Z](?=$|_))+",
lambda match: match.group(0).replace("_", ""),
underscored,
)
# lower-case any words containing only one uppercase letter
lowercased = re.sub(
r"(?<![A-Z])[A-Z](?![A-Z])",
lambda match: match.group(0).lower(),
collapsed,
)
return lowercased
def snake_to_pascal(s: str, sep: str = "_") -> str:
"""Convert snake case to Pascal case.
This is the inverse of pascal_to_snake() when its assumptions
are true.
Optionally specify `sep` (default "_").
"""
return re.sub(
fr"(?:^|{sep})([a-zA-Z])",
lambda match: match.group(1).upper(),
s
)
def guess_casing(s: str) -> str:
"""Guess snake case or Pascal case."""
if "_" in s:
return "snake"
if any(c.isupper() for c in s):
return "pascal"
return "snake"
def normalize(s: str) -> str:
"""Normalize string input."""
if s.startswith("biolink:"):
s = s[8:]
if "_" in s:
# it's snake case
return s.replace("_", " ")
if " " in s:
return s
return pascal_to_snake(s, " ")
T = TypeVar("T")
def listify(func: Callable) -> Callable:
"""Expand function to take list of arguments."""
@wraps(func)
def wrapper(arg: Union[T, List[T]], **kwargs) -> Union[T, List[T]]:
"""Apply function to each element in list."""
if isinstance(arg, list):
return [
func(el, **kwargs)
for el in arg
]
else:
return func(arg, **kwargs)
return wrapper
@listify
def format(s: str, case: Optional[str] = None, **kwargs) -> str:
"""Format space-case string as biolink CURIE."""
if isinstance(case, str) and case.lower() == "pascal":
return "biolink:" + snake_to_pascal(s, " ")
elif isinstance(case, str) and case.lower() == "snake":
return "biolink:" + s.replace(" ", "_")
else:
return "biolink:" + s
def with_formatting():
"""Add format conversions to method."""
def decorator(func: Callable) -> Callable:
"""Generate decorator."""
@wraps(func)
def wrapper(self, s: str, *args, formatted=False, **kwargs):
"""Wrap in format conversions."""
case = guess_casing(s)
normalized = normalize(s)
output: Union[str, List[str]] = func(self, normalized, *args, **kwargs)
if formatted:
if normalized in all_classes:
output = format(output, case="pascal")
elif normalized in all_slots:
output = format(output, case="snake")
else:
output = format(output, case=case)
return output
return wrapper
return decorator
| """Utilities."""
from functools import wraps
import re
from typing import Callable, List, Optional, TypeVar, Union
from .data import (
all_classes, all_slots,
)
def pascal_to_snake(s: str, sep: str = "_") -> str:
"""Convert Pascal case to snake case.
Assumes that
a) all words are either all-lowercase or all-uppercase
b) all 1-letter words are lowercase
c) there are no adjacent 1-letter words
d) there are no adjacent uppercase words
Examples:
PhenotypicFeature -> phenotypic_feature
RNAProduct -> RNA_product
FeedACamel -> feed_a_camel
Optionally specify `sep` (default "_").
"""
# add an underscore before each capital letter
underscored = re.sub(
r"(?<!^)(?=[A-Z])",
sep,
s,
)
# collapse any adjacent one-letter words
collapsed = re.sub(
r"(?<![a-zA-Z])[A-Z](?:_[A-Z](?=$|_))+",
lambda match: match.group(0).replace("_", ""),
underscored,
)
# lower-case any words containing only one uppercase letter
lowercased = re.sub(
r"(?<![A-Z])[A-Z](?![A-Z])",
lambda match: match.group(0).lower(),
collapsed,
)
return lowercased
def snake_to_pascal(s: str, sep: str = "_") -> str:
"""Convert snake case to Pascal case.
This is the inverse of pascal_to_snake() when its assumptions
are true.
Optionally specify `sep` (default "_").
"""
return re.sub(
fr"(?:^|{sep})([a-zA-Z])",
lambda match: match.group(1).upper(),
s
)
def guess_casing(s: str) -> str:
"""Guess snake case or Pascal case."""
if "_" in s:
return "snake"
if any(c.isupper() for c in s):
return "pascal"
return "snake"
def normalize(s: str) -> str:
"""Normalize string input."""
if s.startswith("biolink:"):
s = s[8:]
if "_" in s:
# it's snake case
return s.replace("_", " ")
if " " in s:
return s
return pascal_to_snake(s, " ")
T = TypeVar("T")
def listify(func: Callable) -> Callable:
"""Expand function to take list of arguments."""
@wraps(func)
def wrapper(arg: Union[T, List[T]], **kwargs) -> Union[T, List[T]]:
"""Apply function to each element in list."""
if isinstance(arg, list):
return [
func(el, **kwargs)
for el in arg
]
else:
return func(arg, **kwargs)
return wrapper
@listify
def format(s: str, case: Optional[str] = None, **kwargs) -> str:
"""Format space-case string as biolink CURIE."""
if isinstance(case, str) and case.lower() == "pascal":
return "biolink:" + snake_to_pascal(s, " ")
elif isinstance(case, str) and case.lower() == "snake":
return "biolink:" + s.replace(" ", "_")
else:
return "biolink:" + s
def with_formatting():
"""Add format conversions to method."""
def decorator(func: Callable) -> Callable:
"""Generate decorator."""
@wraps(func)
def wrapper(self, s: str, *args, formatted=False, **kwargs):
"""Wrap in format conversions."""
case = guess_casing(s)
normalized = normalize(s)
output: Union[str, List[str]] = func(self, normalized, *args, **kwargs)
if formatted:
if normalized in all_classes:
output = format(output, case="pascal")
elif normalized in all_slots:
output = format(output, case="snake")
else:
output = format(output, case=case)
return output
return wrapper
return decorator
| en | 0.633309 | Utilities. Convert Pascal case to snake case. Assumes that a) all words are either all-lowercase or all-uppercase b) all 1-letter words are lowercase c) there are no adjacent 1-letter words d) there are no adjacent uppercase words Examples: PhenotypicFeature -> phenotypic_feature RNAProduct -> RNA_product FeedACamel -> feed_a_camel Optionally specify `sep` (default "_"). # add an underscore before each capital letter # collapse any adjacent one-letter words # lower-case any words containing only one uppercase letter Convert snake case to Pascal case. This is the inverse of pascal_to_snake() when its assumptions are true. Optionally specify `sep` (default "_"). Guess snake case or Pascal case. Normalize string input. # it's snake case Expand function to take list of arguments. Apply function to each element in list. Format space-case string as biolink CURIE. Add format conversions to method. Generate decorator. Wrap in format conversions. | 3.691278 | 4 |
src/py_to_json/__init__.py | jlevitt/py-to-json | 0 | 7723 | #
# OMNIVORE CONFIDENTIAL
# __________________
#
# [2013] - [2019] Omnivore Technologies
# All Rights Reserved.
#
# NOTICE: All information contained herein is, and remains
# the property of Omnivore Technologies and its suppliers,
# if any. The intellectual and technical concepts contained
# herein are proprietary to Omnivore Technologies
# and its suppliers and may be covered by U.S. and Foreign Patents,
# patents in process, and are protected by trade secret or copyright law.
# Dissemination of this information or reproduction of this material
# is strictly forbidden unless prior written permission is obtained
# from Omnivore Technologies.
#
| #
# OMNIVORE CONFIDENTIAL
# __________________
#
# [2013] - [2019] Omnivore Technologies
# All Rights Reserved.
#
# NOTICE: All information contained herein is, and remains
# the property of Omnivore Technologies and its suppliers,
# if any. The intellectual and technical concepts contained
# herein are proprietary to Omnivore Technologies
# and its suppliers and may be covered by U.S. and Foreign Patents,
# patents in process, and are protected by trade secret or copyright law.
# Dissemination of this information or reproduction of this material
# is strictly forbidden unless prior written permission is obtained
# from Omnivore Technologies.
#
| en | 0.867307 | # # OMNIVORE CONFIDENTIAL # __________________ # # [2013] - [2019] Omnivore Technologies # All Rights Reserved. # # NOTICE: All information contained herein is, and remains # the property of Omnivore Technologies and its suppliers, # if any. The intellectual and technical concepts contained # herein are proprietary to Omnivore Technologies # and its suppliers and may be covered by U.S. and Foreign Patents, # patents in process, and are protected by trade secret or copyright law. # Dissemination of this information or reproduction of this material # is strictly forbidden unless prior written permission is obtained # from Omnivore Technologies. # | 0.734474 | 1 |
sktime/utils/time_series.py | brettkoonce/sktime | 1 | 7724 | <filename>sktime/utils/time_series.py
__author__ = ["<NAME>"]
__all__ = [
"compute_relative_to_n_timepoints",
"time_series_slope",
"fit_trend",
"remove_trend",
"add_trend"
]
import numpy as np
from sklearn.utils import check_array
from sktime.utils.validation.forecasting import check_time_index
def compute_relative_to_n_timepoints(n_timepoints, n="sqrt"):
"""
Get number of intervals from number of time points for various allowed
input arguments.
Helpful to compute number of intervals relative to time series length,
e.g. using floats or functions.
Parameters
----------
n_timepoints : int
n : {int, float, str, callable}
Returns
-------
n_intervals_ : int
Computed number of intervals
"""
# check input: n_timepoints
if not np.issubdtype(type(n_timepoints), np.dtype(int).type):
raise ValueError(
f"`n_timepoints` must be an integer, but found: "
f"{type(n_timepoints)}")
if not n_timepoints >= 1:
raise ValueError(
f"`n_timepoints` must be >= 1, but found: {n_timepoints}")
# compute number of splits
allowed_strings = ["sqrt", "log"]
# integer
if np.issubdtype(type(n), np.dtype(int).type):
if not n <= n_timepoints:
raise ValueError(
f"If `n_intervals` is an integer, it must be smaller "
f"than `n_timepoints`, but found: `n_intervals`={n} "
f"and `n_timepoints`={n_timepoints}")
if n < 1:
raise ValueError(f"If `n_intervals` is an integer, "
f"`n_intervals` must be >= 1, but found: {n}")
n_intervals_ = n
# function
elif callable(n):
n_intervals_ = n(n_timepoints)
# string
elif isinstance(n, str):
if n not in allowed_strings:
raise ValueError(
f"If `n_intervals` is a string, `n_intervals` must be "
f"in {allowed_strings}, but found: {n}")
str_func_map = {
"sqrt": np.sqrt,
"log": np.log
}
func = str_func_map[n]
n_intervals_ = func(n_timepoints)
# float
elif isinstance(n, float):
if not (0 < n <= 1):
raise ValueError(
f"If `n_intervals` is a float, `n_intervals` must be > 0 "
f"and <= 1, but found: {n}")
n_intervals_ = n * n_timepoints
else:
raise ValueError(
f"`n_intervals` must be either one of the allowed string options "
f"in "
f"{allowed_strings}, an integer or a float number.")
# make sure n_intervals is an integer and there is at least one interval
n_intervals_ = np.maximum(1, np.int(n_intervals_))
return n_intervals_
def time_series_slope(y):
"""
Compute slope of time series (y) using ordinary least squares.
Parameters
----------
y : array_like
Time-series.
axis : int
Axis along which the time-series slope is computed.
Returns
-------
slope : float
Slope of time-series.
"""
y = np.asarray(y).ravel()
len_series = len(y)
if len_series < 2:
return 0
else:
x = np.arange(len_series) # time index
x_mean = (len_series - 1) / 2 # faster than x.mean()
return (np.mean(x * y) - x_mean * np.mean(y)) / (
np.mean(x ** 2) - x_mean ** 2)
def fit_trend(x, order=0):
"""Fit linear regression with polynomial terms of given order
x : array_like, shape=[n_samples, n_obs]
Time series data, each sample is fitted separately
order : int
The polynomial order of the trend, zero is constant (mean), one is
linear trend, two is quadratic trend, and so on.
Returns
-------
coefs : ndarray, shape=[n_samples, order + 1]
Fitted coefficients of polynomial order for each sample, one column
means order zero, two columns mean order 1
(linear), three columns mean order 2 (quadratic), etc
See Also
-------
add_trend
remove_trend
"""
x = check_array(x)
if order == 0:
coefs = np.mean(x, axis=1).reshape(-1, 1)
else:
n_obs = x.shape[1]
index = np.arange(n_obs)
poly_terms = np.vander(index, N=order + 1)
# linear least squares fitting using numpy's optimised routine,
# assuming samples in columns
# coefs = np.linalg.pinv(poly_terms).dot(x.T).T
coefs, _, _, _ = np.linalg.lstsq(poly_terms, x.T, rcond=None)
# returning fitted coefficients in expected format with samples in rows
coefs = coefs.T
return coefs
def remove_trend(x, coefs, time_index=None):
"""Remove trend from an array with a trend of given order along axis 0 or 1
Parameters
----------
x : array_like, shape=[n_samples, n_obs]
Time series data, each sample is de-trended separately
coefs : ndarray, shape=[n_samples, order + 1]
Fitted coefficients for each sample, single column means order zero,
two columns mean order 1
(linear), three columns mean order 2 (quadratic), etc
time_index : array-like, shape=[n_obs], optional (default=None)
Time series index for which to add the trend components
Returns
-------
xt : ndarray
The de-trended series is the residual of the linear regression of the
data on the trend of given order.
See Also
--------
fit_trend
add_trend
References
----------
Adapted from statsmodels (0.9.0), see
https://www.statsmodels.org/dev/_modules/statsmodels/tsa/tsatools.html
#detrend
"""
x = check_array(x)
# infer order from shape of given coefficients
order = coefs.shape[1] - 1
# special case, remove mean
if order == 0:
xt = x - coefs
return xt
else:
if time_index is None:
# if no time index is given, create range index
n_obs = x.shape[1]
time_index = np.arange(n_obs)
else:
# validate given time index
time_index = check_time_index(time_index)
if not len(time_index) == x.shape[1]:
raise ValueError(
'Length of passed index does not match length of passed x')
poly_terms = np.vander(time_index, N=order + 1)
xt = x - np.dot(poly_terms, coefs.T).T
return xt
def add_trend(x, coefs, time_index=None):
"""Add trend to array for given fitted coefficients along axis 0 or 1,
inverse function to `remove_trend()`
Parameters
----------
x : array_like, shape=[n_samples, n_obs]
Time series data, each sample is treated separately
coefs : array-like, shape=[n_samples, order + 1]
fitted coefficients of polynomial order for each sample, one column
means order zero, two columns mean order 1
(linear), three columns mean order 2 (quadratic), etc
time_index : array-like, shape=[n_obs], optional (default=None)
Time series index for which to add the trend components
Returns
-------
xt : ndarray
The series with added trend.
See Also
-------
fit_trend
remove_trend
"""
x = check_array(x)
# infer order from shape of given coefficients
order = coefs.shape[1] - 1
# special case, add mean
if order == 0:
xt = x + coefs
else:
if time_index is None:
n_obs = x.shape[1]
time_index = np.arange(n_obs)
else:
# validate given time index
time_index = check_time_index(time_index)
if not len(time_index) == x.shape[1]:
raise ValueError(
'Length of passed index does not match length of passed x')
poly_terms = np.vander(time_index, N=order + 1)
xt = x + np.dot(poly_terms, coefs.T).T
return xt
| <filename>sktime/utils/time_series.py
__author__ = ["<NAME>"]
__all__ = [
"compute_relative_to_n_timepoints",
"time_series_slope",
"fit_trend",
"remove_trend",
"add_trend"
]
import numpy as np
from sklearn.utils import check_array
from sktime.utils.validation.forecasting import check_time_index
def compute_relative_to_n_timepoints(n_timepoints, n="sqrt"):
"""
Get number of intervals from number of time points for various allowed
input arguments.
Helpful to compute number of intervals relative to time series length,
e.g. using floats or functions.
Parameters
----------
n_timepoints : int
n : {int, float, str, callable}
Returns
-------
n_intervals_ : int
Computed number of intervals
"""
# check input: n_timepoints
if not np.issubdtype(type(n_timepoints), np.dtype(int).type):
raise ValueError(
f"`n_timepoints` must be an integer, but found: "
f"{type(n_timepoints)}")
if not n_timepoints >= 1:
raise ValueError(
f"`n_timepoints` must be >= 1, but found: {n_timepoints}")
# compute number of splits
allowed_strings = ["sqrt", "log"]
# integer
if np.issubdtype(type(n), np.dtype(int).type):
if not n <= n_timepoints:
raise ValueError(
f"If `n_intervals` is an integer, it must be smaller "
f"than `n_timepoints`, but found: `n_intervals`={n} "
f"and `n_timepoints`={n_timepoints}")
if n < 1:
raise ValueError(f"If `n_intervals` is an integer, "
f"`n_intervals` must be >= 1, but found: {n}")
n_intervals_ = n
# function
elif callable(n):
n_intervals_ = n(n_timepoints)
# string
elif isinstance(n, str):
if n not in allowed_strings:
raise ValueError(
f"If `n_intervals` is a string, `n_intervals` must be "
f"in {allowed_strings}, but found: {n}")
str_func_map = {
"sqrt": np.sqrt,
"log": np.log
}
func = str_func_map[n]
n_intervals_ = func(n_timepoints)
# float
elif isinstance(n, float):
if not (0 < n <= 1):
raise ValueError(
f"If `n_intervals` is a float, `n_intervals` must be > 0 "
f"and <= 1, but found: {n}")
n_intervals_ = n * n_timepoints
else:
raise ValueError(
f"`n_intervals` must be either one of the allowed string options "
f"in "
f"{allowed_strings}, an integer or a float number.")
# make sure n_intervals is an integer and there is at least one interval
n_intervals_ = np.maximum(1, np.int(n_intervals_))
return n_intervals_
def time_series_slope(y):
"""
Compute slope of time series (y) using ordinary least squares.
Parameters
----------
y : array_like
Time-series.
axis : int
Axis along which the time-series slope is computed.
Returns
-------
slope : float
Slope of time-series.
"""
y = np.asarray(y).ravel()
len_series = len(y)
if len_series < 2:
return 0
else:
x = np.arange(len_series) # time index
x_mean = (len_series - 1) / 2 # faster than x.mean()
return (np.mean(x * y) - x_mean * np.mean(y)) / (
np.mean(x ** 2) - x_mean ** 2)
def fit_trend(x, order=0):
"""Fit linear regression with polynomial terms of given order
x : array_like, shape=[n_samples, n_obs]
Time series data, each sample is fitted separately
order : int
The polynomial order of the trend, zero is constant (mean), one is
linear trend, two is quadratic trend, and so on.
Returns
-------
coefs : ndarray, shape=[n_samples, order + 1]
Fitted coefficients of polynomial order for each sample, one column
means order zero, two columns mean order 1
(linear), three columns mean order 2 (quadratic), etc
See Also
-------
add_trend
remove_trend
"""
x = check_array(x)
if order == 0:
coefs = np.mean(x, axis=1).reshape(-1, 1)
else:
n_obs = x.shape[1]
index = np.arange(n_obs)
poly_terms = np.vander(index, N=order + 1)
# linear least squares fitting using numpy's optimised routine,
# assuming samples in columns
# coefs = np.linalg.pinv(poly_terms).dot(x.T).T
coefs, _, _, _ = np.linalg.lstsq(poly_terms, x.T, rcond=None)
# returning fitted coefficients in expected format with samples in rows
coefs = coefs.T
return coefs
def remove_trend(x, coefs, time_index=None):
"""Remove trend from an array with a trend of given order along axis 0 or 1
Parameters
----------
x : array_like, shape=[n_samples, n_obs]
Time series data, each sample is de-trended separately
coefs : ndarray, shape=[n_samples, order + 1]
Fitted coefficients for each sample, single column means order zero,
two columns mean order 1
(linear), three columns mean order 2 (quadratic), etc
time_index : array-like, shape=[n_obs], optional (default=None)
Time series index for which to add the trend components
Returns
-------
xt : ndarray
The de-trended series is the residual of the linear regression of the
data on the trend of given order.
See Also
--------
fit_trend
add_trend
References
----------
Adapted from statsmodels (0.9.0), see
https://www.statsmodels.org/dev/_modules/statsmodels/tsa/tsatools.html
#detrend
"""
x = check_array(x)
# infer order from shape of given coefficients
order = coefs.shape[1] - 1
# special case, remove mean
if order == 0:
xt = x - coefs
return xt
else:
if time_index is None:
# if no time index is given, create range index
n_obs = x.shape[1]
time_index = np.arange(n_obs)
else:
# validate given time index
time_index = check_time_index(time_index)
if not len(time_index) == x.shape[1]:
raise ValueError(
'Length of passed index does not match length of passed x')
poly_terms = np.vander(time_index, N=order + 1)
xt = x - np.dot(poly_terms, coefs.T).T
return xt
def add_trend(x, coefs, time_index=None):
"""Add trend to array for given fitted coefficients along axis 0 or 1,
inverse function to `remove_trend()`
Parameters
----------
x : array_like, shape=[n_samples, n_obs]
Time series data, each sample is treated separately
coefs : array-like, shape=[n_samples, order + 1]
fitted coefficients of polynomial order for each sample, one column
means order zero, two columns mean order 1
(linear), three columns mean order 2 (quadratic), etc
time_index : array-like, shape=[n_obs], optional (default=None)
Time series index for which to add the trend components
Returns
-------
xt : ndarray
The series with added trend.
See Also
-------
fit_trend
remove_trend
"""
x = check_array(x)
# infer order from shape of given coefficients
order = coefs.shape[1] - 1
# special case, add mean
if order == 0:
xt = x + coefs
else:
if time_index is None:
n_obs = x.shape[1]
time_index = np.arange(n_obs)
else:
# validate given time index
time_index = check_time_index(time_index)
if not len(time_index) == x.shape[1]:
raise ValueError(
'Length of passed index does not match length of passed x')
poly_terms = np.vander(time_index, N=order + 1)
xt = x + np.dot(poly_terms, coefs.T).T
return xt
| en | 0.69825 | Get number of intervals from number of time points for various allowed input arguments. Helpful to compute number of intervals relative to time series length, e.g. using floats or functions. Parameters ---------- n_timepoints : int n : {int, float, str, callable} Returns ------- n_intervals_ : int Computed number of intervals # check input: n_timepoints # compute number of splits # integer # function # string # float # make sure n_intervals is an integer and there is at least one interval Compute slope of time series (y) using ordinary least squares. Parameters ---------- y : array_like Time-series. axis : int Axis along which the time-series slope is computed. Returns ------- slope : float Slope of time-series. # time index # faster than x.mean() Fit linear regression with polynomial terms of given order x : array_like, shape=[n_samples, n_obs] Time series data, each sample is fitted separately order : int The polynomial order of the trend, zero is constant (mean), one is linear trend, two is quadratic trend, and so on. Returns ------- coefs : ndarray, shape=[n_samples, order + 1] Fitted coefficients of polynomial order for each sample, one column means order zero, two columns mean order 1 (linear), three columns mean order 2 (quadratic), etc See Also ------- add_trend remove_trend # linear least squares fitting using numpy's optimised routine, # assuming samples in columns # coefs = np.linalg.pinv(poly_terms).dot(x.T).T # returning fitted coefficients in expected format with samples in rows Remove trend from an array with a trend of given order along axis 0 or 1 Parameters ---------- x : array_like, shape=[n_samples, n_obs] Time series data, each sample is de-trended separately coefs : ndarray, shape=[n_samples, order + 1] Fitted coefficients for each sample, single column means order zero, two columns mean order 1 (linear), three columns mean order 2 (quadratic), etc time_index : array-like, shape=[n_obs], optional (default=None) Time series index for which to add the trend components Returns ------- xt : ndarray The de-trended series is the residual of the linear regression of the data on the trend of given order. See Also -------- fit_trend add_trend References ---------- Adapted from statsmodels (0.9.0), see https://www.statsmodels.org/dev/_modules/statsmodels/tsa/tsatools.html #detrend # infer order from shape of given coefficients # special case, remove mean # if no time index is given, create range index # validate given time index Add trend to array for given fitted coefficients along axis 0 or 1, inverse function to `remove_trend()` Parameters ---------- x : array_like, shape=[n_samples, n_obs] Time series data, each sample is treated separately coefs : array-like, shape=[n_samples, order + 1] fitted coefficients of polynomial order for each sample, one column means order zero, two columns mean order 1 (linear), three columns mean order 2 (quadratic), etc time_index : array-like, shape=[n_obs], optional (default=None) Time series index for which to add the trend components Returns ------- xt : ndarray The series with added trend. See Also ------- fit_trend remove_trend # infer order from shape of given coefficients # special case, add mean # validate given time index | 2.70649 | 3 |
prog_vae/prog_encoder/prog_encoder.py | Hanjun-Dai/sdvae | 70 | 7725 | <reponame>Hanjun-Dai/sdvae
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import csv
import numpy as np
import math
import random
from collections import defaultdict
import torch
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
sys.path.append( '%s/../prog_common' % os.path.dirname(os.path.realpath(__file__)) )
from prog_util import DECISION_DIM
from cmd_args import cmd_args
from pytorch_initializer import weights_init
sys.path.append( '%s/../cfg_parser' % os.path.dirname(os.path.realpath(__file__)) )
import cfg_parser as parser
class CNNEncoder(nn.Module):
def __init__(self, max_len, latent_dim):
super(CNNEncoder, self).__init__()
self.latent_dim = latent_dim
self.max_len = max_len
self.conv1 = nn.Conv1d(DECISION_DIM, cmd_args.c1, cmd_args.c1)
self.conv2 = nn.Conv1d(cmd_args.c1, cmd_args.c2, cmd_args.c2)
self.conv3 = nn.Conv1d(cmd_args.c2, cmd_args.c3, cmd_args.c3)
self.last_conv_size = max_len - cmd_args.c1 + 1 - cmd_args.c2 + 1 - cmd_args.c3 + 1
self.w1 = nn.Linear(self.last_conv_size * cmd_args.c3, cmd_args.dense)
self.mean_w = nn.Linear(cmd_args.dense, latent_dim)
self.log_var_w = nn.Linear(cmd_args.dense, latent_dim)
weights_init(self)
def forward(self, x_cpu):
if cmd_args.mode == 'cpu':
batch_input = Variable(torch.from_numpy(x_cpu))
else:
batch_input = Variable(torch.from_numpy(x_cpu).cuda())
h1 = self.conv1(batch_input)
h1 = F.relu(h1)
h2 = self.conv2(h1)
h2 = F.relu(h2)
h3 = self.conv3(h2)
h3 = F.relu(h3)
# h3 = torch.transpose(h3, 1, 2).contiguous()
flatten = h3.view(x_cpu.shape[0], -1)
h = self.w1(flatten)
h = F.relu(h)
z_mean = self.mean_w(h)
z_log_var = self.log_var_w(h)
return (z_mean, z_log_var)
if __name__ == '__main__':
pass
| #!/usr/bin/env python
from __future__ import print_function
import os
import sys
import csv
import numpy as np
import math
import random
from collections import defaultdict
import torch
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
sys.path.append( '%s/../prog_common' % os.path.dirname(os.path.realpath(__file__)) )
from prog_util import DECISION_DIM
from cmd_args import cmd_args
from pytorch_initializer import weights_init
sys.path.append( '%s/../cfg_parser' % os.path.dirname(os.path.realpath(__file__)) )
import cfg_parser as parser
class CNNEncoder(nn.Module):
def __init__(self, max_len, latent_dim):
super(CNNEncoder, self).__init__()
self.latent_dim = latent_dim
self.max_len = max_len
self.conv1 = nn.Conv1d(DECISION_DIM, cmd_args.c1, cmd_args.c1)
self.conv2 = nn.Conv1d(cmd_args.c1, cmd_args.c2, cmd_args.c2)
self.conv3 = nn.Conv1d(cmd_args.c2, cmd_args.c3, cmd_args.c3)
self.last_conv_size = max_len - cmd_args.c1 + 1 - cmd_args.c2 + 1 - cmd_args.c3 + 1
self.w1 = nn.Linear(self.last_conv_size * cmd_args.c3, cmd_args.dense)
self.mean_w = nn.Linear(cmd_args.dense, latent_dim)
self.log_var_w = nn.Linear(cmd_args.dense, latent_dim)
weights_init(self)
def forward(self, x_cpu):
if cmd_args.mode == 'cpu':
batch_input = Variable(torch.from_numpy(x_cpu))
else:
batch_input = Variable(torch.from_numpy(x_cpu).cuda())
h1 = self.conv1(batch_input)
h1 = F.relu(h1)
h2 = self.conv2(h1)
h2 = F.relu(h2)
h3 = self.conv3(h2)
h3 = F.relu(h3)
# h3 = torch.transpose(h3, 1, 2).contiguous()
flatten = h3.view(x_cpu.shape[0], -1)
h = self.w1(flatten)
h = F.relu(h)
z_mean = self.mean_w(h)
z_log_var = self.log_var_w(h)
return (z_mean, z_log_var)
if __name__ == '__main__':
pass | en | 0.213624 | #!/usr/bin/env python # h3 = torch.transpose(h3, 1, 2).contiguous() | 1.982493 | 2 |
pyvmu/messages.py | JosephRedfern/VarienseVMU | 5 | 7726 | from collections import namedtuple
Accelerometer = namedtuple('Accelerometer', ["timestamp", "x", "y", "z"])
Magnetometer = namedtuple('Magnetometer', ['timestamp', 'x', 'y', 'z'])
Gyroscope = namedtuple('Gyroscope', ['timestamp', 'x', 'y', 'z'])
Euler = namedtuple('Euler', ['timestamp', 'x', 'y', 'z'])
Quaternion = namedtuple('Quaternion', ['timestamp', 'w', 'x', 'y', 'z'])
Heading = namedtuple('Heading', ['timestamp', 'h'])
Status = namedtuple('Status', ['magnetometer_enabled',
'gyroscope_enabled',
'accelerometer_enabled',
'gyroscope_resolution',
'accelerometer_resolution',
'low_output_rate',
'heading_streaming',
'euler_streaming',
'magnetometer_streaming',
'quaternions_streaming',
'gyroscope_streaming',
'accelerometer_streaming'])
| from collections import namedtuple
Accelerometer = namedtuple('Accelerometer', ["timestamp", "x", "y", "z"])
Magnetometer = namedtuple('Magnetometer', ['timestamp', 'x', 'y', 'z'])
Gyroscope = namedtuple('Gyroscope', ['timestamp', 'x', 'y', 'z'])
Euler = namedtuple('Euler', ['timestamp', 'x', 'y', 'z'])
Quaternion = namedtuple('Quaternion', ['timestamp', 'w', 'x', 'y', 'z'])
Heading = namedtuple('Heading', ['timestamp', 'h'])
Status = namedtuple('Status', ['magnetometer_enabled',
'gyroscope_enabled',
'accelerometer_enabled',
'gyroscope_resolution',
'accelerometer_resolution',
'low_output_rate',
'heading_streaming',
'euler_streaming',
'magnetometer_streaming',
'quaternions_streaming',
'gyroscope_streaming',
'accelerometer_streaming'])
| none | 1 | 2.921697 | 3 |
|
scripts/Caesar-Cipher/CaesarCipher.py | Pythobit/python-projects | 2 | 7727 | <filename>scripts/Caesar-Cipher/CaesarCipher.py
from __future__ import print_function
import os
import string
import argparse
try:
maketrans = string.maketrans # python2
except AttributeError:
maketrans = str.maketrans # python3
def caeser_cipher(string_: str, offset: int, decode: bool, file_: string) -> None:
"""Caeser Cipher implementation, reads file or string. Also decodes.
Default implementation is ROT13 encoding.
To decode, specify the same offset you used to encode and your ciphertext / file.
:param string_: string to encode / decode
:param offset: # of chars to rotate by
:param decode: decode instead of encode
:param file_: file to read in then encode/decode
"""
if file_ and os.path.exists(file_):
with open(file_, "r") as f:
string_ = f.read()
if decode:
offset *= -1
lower_offset_alphabet = (
string.ascii_lowercase[offset:] + string.ascii_lowercase[:offset]
)
lower_translation_table = maketrans(string.ascii_lowercase, lower_offset_alphabet)
upper_offset_alphabet = (
string.ascii_uppercase[offset:] + string.ascii_uppercase[:offset]
)
upper_translation_table = maketrans(string.ascii_uppercase, upper_offset_alphabet)
lower_converted = string_.translate(lower_translation_table)
final_converted = lower_converted.translate(upper_translation_table)
if file_:
extension = "dec" if decode else "enc"
with open("{}.{}".format(file_, extension), "w") as f:
print(final_converted, file=f)
else:
print(final_converted)
def check_offset_range(value: int) -> int:
"""Validates that value is in the allowable range.
:param value: integer to validate
:return: valid integer
:raises: argparse.ArgumentTypeError
"""
value = int(value)
if value < -25 or value > 25:
raise argparse.ArgumentTypeError("{} is an invalid offset".format(value))
return value
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Simple Caeser Cipher Encoder and Decoder"
)
parser.add_argument(
"-d",
"--decode",
action="store_true",
dest="decode",
help="decode ciphertext (offset should equal what was used to encode)",
default=False,
)
parser.add_argument(
"-o",
"--offset",
dest="offset",
default=13,
type=check_offset_range,
help="number of characters to shift",
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-f", "--file", dest="file", help="file to encode", default=None)
group.add_argument(
"-s", "--string", dest="string", help="string to encode", default=None
)
args = parser.parse_args()
caeser_cipher(args.string, args.offset, args.decode, args.file)
| <filename>scripts/Caesar-Cipher/CaesarCipher.py
from __future__ import print_function
import os
import string
import argparse
try:
maketrans = string.maketrans # python2
except AttributeError:
maketrans = str.maketrans # python3
def caeser_cipher(string_: str, offset: int, decode: bool, file_: string) -> None:
"""Caeser Cipher implementation, reads file or string. Also decodes.
Default implementation is ROT13 encoding.
To decode, specify the same offset you used to encode and your ciphertext / file.
:param string_: string to encode / decode
:param offset: # of chars to rotate by
:param decode: decode instead of encode
:param file_: file to read in then encode/decode
"""
if file_ and os.path.exists(file_):
with open(file_, "r") as f:
string_ = f.read()
if decode:
offset *= -1
lower_offset_alphabet = (
string.ascii_lowercase[offset:] + string.ascii_lowercase[:offset]
)
lower_translation_table = maketrans(string.ascii_lowercase, lower_offset_alphabet)
upper_offset_alphabet = (
string.ascii_uppercase[offset:] + string.ascii_uppercase[:offset]
)
upper_translation_table = maketrans(string.ascii_uppercase, upper_offset_alphabet)
lower_converted = string_.translate(lower_translation_table)
final_converted = lower_converted.translate(upper_translation_table)
if file_:
extension = "dec" if decode else "enc"
with open("{}.{}".format(file_, extension), "w") as f:
print(final_converted, file=f)
else:
print(final_converted)
def check_offset_range(value: int) -> int:
"""Validates that value is in the allowable range.
:param value: integer to validate
:return: valid integer
:raises: argparse.ArgumentTypeError
"""
value = int(value)
if value < -25 or value > 25:
raise argparse.ArgumentTypeError("{} is an invalid offset".format(value))
return value
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Simple Caeser Cipher Encoder and Decoder"
)
parser.add_argument(
"-d",
"--decode",
action="store_true",
dest="decode",
help="decode ciphertext (offset should equal what was used to encode)",
default=False,
)
parser.add_argument(
"-o",
"--offset",
dest="offset",
default=13,
type=check_offset_range,
help="number of characters to shift",
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-f", "--file", dest="file", help="file to encode", default=None)
group.add_argument(
"-s", "--string", dest="string", help="string to encode", default=None
)
args = parser.parse_args()
caeser_cipher(args.string, args.offset, args.decode, args.file)
| en | 0.604324 | # python2 # python3 Caeser Cipher implementation, reads file or string. Also decodes. Default implementation is ROT13 encoding. To decode, specify the same offset you used to encode and your ciphertext / file. :param string_: string to encode / decode :param offset: # of chars to rotate by :param decode: decode instead of encode :param file_: file to read in then encode/decode Validates that value is in the allowable range. :param value: integer to validate :return: valid integer :raises: argparse.ArgumentTypeError | 3.998743 | 4 |
onadata/libs/permissions.py | BuildAMovement/whistler-kobocat | 38 | 7728 | <reponame>BuildAMovement/whistler-kobocat
from collections import defaultdict
from django.contrib.contenttypes.models import ContentType
from guardian.shortcuts import (
assign_perm,
remove_perm,
get_perms,
get_users_with_perms)
from onadata.apps.api.models import OrganizationProfile
from onadata.apps.main.models.user_profile import UserProfile
from onadata.apps.logger.models import XForm
from onadata.apps.api.models import Project
# Userprofile Permissions
CAN_ADD_USERPROFILE = 'add_userprofile'
CAN_CHANGE_USERPROFILE = 'change_userprofile'
CAN_DELETE_USERPROFILE = 'delete_userprofile'
CAN_ADD_XFORM_TO_PROFILE = 'can_add_xform'
CAN_VIEW_PROFILE = 'view_profile'
# Organization Permissions
CAN_VIEW_ORGANIZATION_PROFILE = 'view_organizationprofile'
CAN_ADD_ORGANIZATION_PROFILE = 'add_organizationprofile'
CAN_ADD_ORGANIZATION_XFORM = 'can_add_xform'
CAN_CHANGE_ORGANIZATION_PROFILE = 'change_organizationprofile'
CAN_DELETE_ORGANIZATION_PROFILE = 'delete_organizationprofile'
IS_ORGANIZATION_OWNER = 'is_org_owner'
# Xform Permissions
CAN_CHANGE_XFORM = 'change_xform'
CAN_ADD_XFORM = 'add_xform'
CAN_DELETE_XFORM = 'delete_xform'
CAN_VIEW_XFORM = 'view_xform'
CAN_ADD_SUBMISSIONS = 'report_xform'
CAN_TRANSFER_OWNERSHIP = 'transfer_xform'
CAN_MOVE_TO_FOLDER = 'move_xform'
# Project Permissions
CAN_VIEW_PROJECT = 'view_project'
CAN_CHANGE_PROJECT = 'change_project'
CAN_TRANSFER_PROJECT_OWNERSHIP = 'transfer_project'
CAN_DELETE_PROJECT = 'delete_project'
CAN_ADD_DATADICTIONARY = 'add_datadictionary'
CAN_CHANGE_DATADICTIONARY = 'change_datadictionary'
CAN_DELETE_DATADICTIONARY = 'delete_datadictionary'
class Role(object):
class_to_permissions = None
permissions = None
name = None
@classmethod
def _remove_obj_permissions(self, user, obj):
content_type = ContentType.objects.get(
model=obj.__class__.__name__.lower(),
app_label=obj.__class__._meta.app_label
)
object_permissions = user.userobjectpermission_set.filter(
object_pk=obj.pk, content_type=content_type)
for perm in object_permissions:
remove_perm(perm.permission.codename, user, obj)
@classmethod
def add(cls, user, obj):
cls._remove_obj_permissions(user, obj)
for codename, klass in cls.permissions:
if type(obj) == klass:
assign_perm(codename, user, obj)
@classmethod
def has_role(cls, permissions, obj):
"""Check that permission correspond to this role for this object.
:param permissions: A list of permissions.
:param obj: An object to get the permissions of.
"""
perms_for_role = set(cls.class_to_permissions[type(obj)])
return perms_for_role.issubset(set(permissions))
@classmethod
def user_has_role(cls, user, obj):
"""Check that a user has this role.
:param user: A user object.
:param obj: An object to get the permissions of.
"""
return user.has_perms(cls.class_to_permissions[type(obj)], obj)
class ReadOnlyRole(Role):
name = 'readonly'
permissions = (
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_VIEW_XFORM, XForm),
(CAN_VIEW_PROJECT, Project),
)
class DataEntryRole(Role):
name = 'dataentry'
permissions = (
(CAN_ADD_SUBMISSIONS, XForm),
(CAN_ADD_XFORM, Project),
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_VIEW_PROJECT, Project),
(CAN_VIEW_XFORM, XForm),
)
class EditorRole(Role):
name = 'editor'
permissions = (
(CAN_ADD_SUBMISSIONS, XForm),
(CAN_ADD_XFORM, Project),
(CAN_CHANGE_PROJECT, Project),
(CAN_CHANGE_XFORM, XForm),
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_VIEW_PROJECT, Project),
(CAN_VIEW_XFORM, XForm),
)
class ManagerRole(Role):
name = 'manager'
permissions = (
(CAN_ADD_SUBMISSIONS, XForm),
(CAN_ADD_XFORM, XForm),
(CAN_ADD_XFORM_TO_PROFILE, OrganizationProfile),
(CAN_ADD_XFORM_TO_PROFILE, UserProfile),
(CAN_CHANGE_PROJECT, Project),
(CAN_CHANGE_XFORM, XForm),
(CAN_DELETE_PROJECT, Project),
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_VIEW_PROFILE, UserProfile),
(CAN_VIEW_PROJECT, Project),
(CAN_VIEW_XFORM, XForm),
)
class MemberRole(Role):
"""This is a role for a member of an organization.
"""
name = 'member'
class OwnerRole(Role):
"""This is a role for an owner of a dataset, organization, or project.
"""
name = 'owner'
permissions = (
(CAN_ADD_SUBMISSIONS, XForm),
(CAN_ADD_XFORM, Project),
(CAN_ADD_XFORM, XForm),
(CAN_VIEW_XFORM, XForm),
(CAN_ADD_DATADICTIONARY, XForm),
(CAN_CHANGE_DATADICTIONARY, XForm),
(CAN_DELETE_DATADICTIONARY, XForm),
(CAN_ADD_SUBMISSIONS, XForm),
(CAN_DELETE_XFORM, XForm),
(CAN_MOVE_TO_FOLDER, XForm),
(CAN_TRANSFER_OWNERSHIP, XForm),
(CAN_CHANGE_XFORM, XForm),
(CAN_ADD_XFORM_TO_PROFILE, UserProfile),
(CAN_ADD_USERPROFILE, UserProfile),
(CAN_CHANGE_USERPROFILE, UserProfile),
(CAN_DELETE_USERPROFILE, UserProfile),
(CAN_ADD_XFORM_TO_PROFILE, UserProfile),
(CAN_VIEW_PROFILE, UserProfile),
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_ADD_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_ADD_ORGANIZATION_XFORM, OrganizationProfile),
(CAN_CHANGE_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_DELETE_ORGANIZATION_PROFILE, OrganizationProfile),
(IS_ORGANIZATION_OWNER, OrganizationProfile),
(CAN_ADD_XFORM_TO_PROFILE, OrganizationProfile),
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_CHANGE_PROJECT, Project),
(CAN_CHANGE_XFORM, XForm),
(CAN_DELETE_PROJECT, Project),
(CAN_DELETE_XFORM, XForm),
(CAN_MOVE_TO_FOLDER, XForm),
(CAN_TRANSFER_OWNERSHIP, XForm),
(CAN_TRANSFER_PROJECT_OWNERSHIP, Project),
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_VIEW_PROFILE, UserProfile),
(CAN_VIEW_PROJECT, Project),
(CAN_VIEW_XFORM, XForm),
(CAN_ADD_DATADICTIONARY, XForm),
(CAN_CHANGE_DATADICTIONARY, XForm),
(CAN_DELETE_DATADICTIONARY, XForm),
(CAN_ADD_SUBMISSIONS, XForm),
)
ROLES_ORDERED = [ReadOnlyRole,
DataEntryRole,
EditorRole,
ManagerRole,
OwnerRole]
ROLES = {role.name: role for role in ROLES_ORDERED}
# Memoize a class to permissions dict.
for role in ROLES.values():
role.class_to_permissions = defaultdict(list)
[role.class_to_permissions[k].append(p) for p, k in role.permissions]
def is_organization(obj):
try:
obj.organizationprofile
return True
except OrganizationProfile.DoesNotExist:
return False
def get_role(permissions, obj):
for role in reversed(ROLES_ORDERED):
if role.has_role(permissions, obj):
return role.name
def get_role_in_org(user, organization):
perms = get_perms(user, organization)
if 'is_org_owner' in perms:
return OwnerRole.name
else:
return get_role(perms, organization) or MemberRole.name
def get_object_users_with_permissions(obj, exclude=None, serializable=False):
"""Returns users, roles and permissions for a object.
When called with with `serializable=True`, return usernames (strings)
instead of User objects, which cannot be serialized by REST Framework.
"""
result = []
if obj:
users_with_perms = get_users_with_perms(
obj, attach_perms=True, with_group_users=False).items()
result = [{
'user': user if not serializable else user.username,
'role': get_role(permissions, obj),
'permissions': permissions} for user, permissions in
users_with_perms if not is_organization(
UserProfile.objects.get_or_create(user=user)[0]
)
]
return result
| from collections import defaultdict
from django.contrib.contenttypes.models import ContentType
from guardian.shortcuts import (
assign_perm,
remove_perm,
get_perms,
get_users_with_perms)
from onadata.apps.api.models import OrganizationProfile
from onadata.apps.main.models.user_profile import UserProfile
from onadata.apps.logger.models import XForm
from onadata.apps.api.models import Project
# Userprofile Permissions
CAN_ADD_USERPROFILE = 'add_userprofile'
CAN_CHANGE_USERPROFILE = 'change_userprofile'
CAN_DELETE_USERPROFILE = 'delete_userprofile'
CAN_ADD_XFORM_TO_PROFILE = 'can_add_xform'
CAN_VIEW_PROFILE = 'view_profile'
# Organization Permissions
CAN_VIEW_ORGANIZATION_PROFILE = 'view_organizationprofile'
CAN_ADD_ORGANIZATION_PROFILE = 'add_organizationprofile'
CAN_ADD_ORGANIZATION_XFORM = 'can_add_xform'
CAN_CHANGE_ORGANIZATION_PROFILE = 'change_organizationprofile'
CAN_DELETE_ORGANIZATION_PROFILE = 'delete_organizationprofile'
IS_ORGANIZATION_OWNER = 'is_org_owner'
# Xform Permissions
CAN_CHANGE_XFORM = 'change_xform'
CAN_ADD_XFORM = 'add_xform'
CAN_DELETE_XFORM = 'delete_xform'
CAN_VIEW_XFORM = 'view_xform'
CAN_ADD_SUBMISSIONS = 'report_xform'
CAN_TRANSFER_OWNERSHIP = 'transfer_xform'
CAN_MOVE_TO_FOLDER = 'move_xform'
# Project Permissions
CAN_VIEW_PROJECT = 'view_project'
CAN_CHANGE_PROJECT = 'change_project'
CAN_TRANSFER_PROJECT_OWNERSHIP = 'transfer_project'
CAN_DELETE_PROJECT = 'delete_project'
CAN_ADD_DATADICTIONARY = 'add_datadictionary'
CAN_CHANGE_DATADICTIONARY = 'change_datadictionary'
CAN_DELETE_DATADICTIONARY = 'delete_datadictionary'
class Role(object):
class_to_permissions = None
permissions = None
name = None
@classmethod
def _remove_obj_permissions(self, user, obj):
content_type = ContentType.objects.get(
model=obj.__class__.__name__.lower(),
app_label=obj.__class__._meta.app_label
)
object_permissions = user.userobjectpermission_set.filter(
object_pk=obj.pk, content_type=content_type)
for perm in object_permissions:
remove_perm(perm.permission.codename, user, obj)
@classmethod
def add(cls, user, obj):
cls._remove_obj_permissions(user, obj)
for codename, klass in cls.permissions:
if type(obj) == klass:
assign_perm(codename, user, obj)
@classmethod
def has_role(cls, permissions, obj):
"""Check that permission correspond to this role for this object.
:param permissions: A list of permissions.
:param obj: An object to get the permissions of.
"""
perms_for_role = set(cls.class_to_permissions[type(obj)])
return perms_for_role.issubset(set(permissions))
@classmethod
def user_has_role(cls, user, obj):
"""Check that a user has this role.
:param user: A user object.
:param obj: An object to get the permissions of.
"""
return user.has_perms(cls.class_to_permissions[type(obj)], obj)
class ReadOnlyRole(Role):
name = 'readonly'
permissions = (
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_VIEW_XFORM, XForm),
(CAN_VIEW_PROJECT, Project),
)
class DataEntryRole(Role):
name = 'dataentry'
permissions = (
(CAN_ADD_SUBMISSIONS, XForm),
(CAN_ADD_XFORM, Project),
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_VIEW_PROJECT, Project),
(CAN_VIEW_XFORM, XForm),
)
class EditorRole(Role):
name = 'editor'
permissions = (
(CAN_ADD_SUBMISSIONS, XForm),
(CAN_ADD_XFORM, Project),
(CAN_CHANGE_PROJECT, Project),
(CAN_CHANGE_XFORM, XForm),
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_VIEW_PROJECT, Project),
(CAN_VIEW_XFORM, XForm),
)
class ManagerRole(Role):
name = 'manager'
permissions = (
(CAN_ADD_SUBMISSIONS, XForm),
(CAN_ADD_XFORM, XForm),
(CAN_ADD_XFORM_TO_PROFILE, OrganizationProfile),
(CAN_ADD_XFORM_TO_PROFILE, UserProfile),
(CAN_CHANGE_PROJECT, Project),
(CAN_CHANGE_XFORM, XForm),
(CAN_DELETE_PROJECT, Project),
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_VIEW_PROFILE, UserProfile),
(CAN_VIEW_PROJECT, Project),
(CAN_VIEW_XFORM, XForm),
)
class MemberRole(Role):
"""This is a role for a member of an organization.
"""
name = 'member'
class OwnerRole(Role):
"""This is a role for an owner of a dataset, organization, or project.
"""
name = 'owner'
permissions = (
(CAN_ADD_SUBMISSIONS, XForm),
(CAN_ADD_XFORM, Project),
(CAN_ADD_XFORM, XForm),
(CAN_VIEW_XFORM, XForm),
(CAN_ADD_DATADICTIONARY, XForm),
(CAN_CHANGE_DATADICTIONARY, XForm),
(CAN_DELETE_DATADICTIONARY, XForm),
(CAN_ADD_SUBMISSIONS, XForm),
(CAN_DELETE_XFORM, XForm),
(CAN_MOVE_TO_FOLDER, XForm),
(CAN_TRANSFER_OWNERSHIP, XForm),
(CAN_CHANGE_XFORM, XForm),
(CAN_ADD_XFORM_TO_PROFILE, UserProfile),
(CAN_ADD_USERPROFILE, UserProfile),
(CAN_CHANGE_USERPROFILE, UserProfile),
(CAN_DELETE_USERPROFILE, UserProfile),
(CAN_ADD_XFORM_TO_PROFILE, UserProfile),
(CAN_VIEW_PROFILE, UserProfile),
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_ADD_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_ADD_ORGANIZATION_XFORM, OrganizationProfile),
(CAN_CHANGE_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_DELETE_ORGANIZATION_PROFILE, OrganizationProfile),
(IS_ORGANIZATION_OWNER, OrganizationProfile),
(CAN_ADD_XFORM_TO_PROFILE, OrganizationProfile),
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_CHANGE_PROJECT, Project),
(CAN_CHANGE_XFORM, XForm),
(CAN_DELETE_PROJECT, Project),
(CAN_DELETE_XFORM, XForm),
(CAN_MOVE_TO_FOLDER, XForm),
(CAN_TRANSFER_OWNERSHIP, XForm),
(CAN_TRANSFER_PROJECT_OWNERSHIP, Project),
(CAN_VIEW_ORGANIZATION_PROFILE, OrganizationProfile),
(CAN_VIEW_PROFILE, UserProfile),
(CAN_VIEW_PROJECT, Project),
(CAN_VIEW_XFORM, XForm),
(CAN_ADD_DATADICTIONARY, XForm),
(CAN_CHANGE_DATADICTIONARY, XForm),
(CAN_DELETE_DATADICTIONARY, XForm),
(CAN_ADD_SUBMISSIONS, XForm),
)
ROLES_ORDERED = [ReadOnlyRole,
DataEntryRole,
EditorRole,
ManagerRole,
OwnerRole]
ROLES = {role.name: role for role in ROLES_ORDERED}
# Memoize a class to permissions dict.
for role in ROLES.values():
role.class_to_permissions = defaultdict(list)
[role.class_to_permissions[k].append(p) for p, k in role.permissions]
def is_organization(obj):
try:
obj.organizationprofile
return True
except OrganizationProfile.DoesNotExist:
return False
def get_role(permissions, obj):
for role in reversed(ROLES_ORDERED):
if role.has_role(permissions, obj):
return role.name
def get_role_in_org(user, organization):
perms = get_perms(user, organization)
if 'is_org_owner' in perms:
return OwnerRole.name
else:
return get_role(perms, organization) or MemberRole.name
def get_object_users_with_permissions(obj, exclude=None, serializable=False):
"""Returns users, roles and permissions for a object.
When called with with `serializable=True`, return usernames (strings)
instead of User objects, which cannot be serialized by REST Framework.
"""
result = []
if obj:
users_with_perms = get_users_with_perms(
obj, attach_perms=True, with_group_users=False).items()
result = [{
'user': user if not serializable else user.username,
'role': get_role(permissions, obj),
'permissions': permissions} for user, permissions in
users_with_perms if not is_organization(
UserProfile.objects.get_or_create(user=user)[0]
)
]
return result | en | 0.86992 | # Userprofile Permissions # Organization Permissions # Xform Permissions # Project Permissions Check that permission correspond to this role for this object. :param permissions: A list of permissions. :param obj: An object to get the permissions of. Check that a user has this role. :param user: A user object. :param obj: An object to get the permissions of. This is a role for a member of an organization. This is a role for an owner of a dataset, organization, or project. # Memoize a class to permissions dict. Returns users, roles and permissions for a object. When called with with `serializable=True`, return usernames (strings) instead of User objects, which cannot be serialized by REST Framework. | 1.920078 | 2 |
lanelines.py | gauborg/lane-finding-gborgaonkar | 0 | 7729 | <gh_stars>0
# Self-Driving Car Engineer Nanodegree
#
# ## Project: **Finding Lane Lines on the Road**
# ## Import Packages
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import math
import moviepy
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
# ## Ideas for Lane Detection Pipeline
# **Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**
#
# `cv2.inRange()` for color selection
# `cv2.fillPoly()` for regions selection
# `cv2.line()` to draw lines on an image given endpoints
# `cv2.addWeighted()` to coadd / overlay two images
# `cv2.cvtColor()` to grayscale or change color
# `cv2.imwrite()` to output images to file
# `cv2.bitwise_and()` to apply a mask to an image
#
# **Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**
import math
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=5):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
# lists to store the slopes of lines which match our criteria
left_slope = []
right_slope = []
# lists to store the calculate b intercepts of these lines
left_b = []
right_b = []
for line in lines:
for x1,y1,x2,y2 in line:
slope = ((y2-y1)/(x2-x1))
# only select lines with specific slope range
if(((slope < 0.8) and (slope > 0.5)) or ((slope > -0.8) and (slope < -0.5))):
# check where the endpoints lie on the image...
if (x1 < (img.shape[1]/2) and x2 < (img.shape[1]/2)):
left_slope.append(slope)
left_b.append(y1-slope*x1)
left_b.append(y2-slope*x2)
else:
right_slope.append(slope)
right_b.append(y1-slope*x1)
right_b.append(y2-slope*x2)
try:
# we calculate average slope to draw the line
avg_left_slope = sum(left_slope)/len(left_slope)
avg_right_slope = sum(right_slope)/len(right_slope)
avg_left_b = sum(left_b)/len(left_b)
avg_right_b = sum(right_b)/len(right_b)
# Y co-ordinate of the lane line will definitely be at the bottom of the image
y1 = img.shape[0]
y2 = 320
y3 = 320
y4 = img.shape[0]
# X co-ordinate can be calculated by using the eqn of the line and y co-ordinate
x1 = (y1 - avg_left_b)/avg_left_slope
x2 = (y2 - avg_left_b)/avg_left_slope
x3 = (y3 - avg_right_b)/avg_right_slope
x4 = (y4 - avg_right_b)/avg_right_slope
# draw the lines, converting values to integer for pixels
cv2.line(img, (int(x1), int(y1)), (int(x2), int(y2)), color, thickness)
cv2.line(img, (int(x3), int(y3)), (int(x4), int(y4)), color, thickness)
except ZeroDivisionError as error:
pass
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, γ)
# ## Test Images
#
# Build your pipeline to work on the images in the directory "test_images"
# **You should make sure your pipeline works well on these images before you try the videos.**
import os
directory = os.listdir("test_images/")
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images_output directory.
def lanelines(image):
# 1. Grayscaling
gray = grayscale(image)
# 2. Gaussian Blur
blur = gaussian_blur(gray, 5)
# 3. Canny Detection
canny_edges = canny(blur, 50, 150)
# 4. Region Masking
vertices = np.array([[(0,image.shape[0]),(460,320),(500,320),(image.shape[1],image.shape[0])]], dtype=np.int32)
selected_region = region_of_interest(canny_edges, vertices)
mpimg.imsave(os.path.join("test_images_output/" + "output-" + i), selected_region)
# image.save(os.path.join("test_images_output/" + i + "-canny-region-output"), format=None, dpi=(540, 960))
# Hough Transform Parameters- Identify lane lines in the masked region
# execute Hough Transform
lines_image = hough_lines(selected_region, 2, np.pi/180, 25, 20, 10)
weighted_image = weighted_img(lines_image, image)
return weighted_image
for i in directory:
image = mpimg.imread(os.path.join("test_images/", i))
weighted_image = lanelines(image)
mpimg.imsave(os.path.join("test_images_output/" + "output+" + i), weighted_image)
# ## Test on Videos
#
# You know what's cooler than drawing lanes over images? Drawing lanes over video!
#
# We can test our solution on two provided videos:
# `solidWhiteRight.mp4`
# `solidYellowLeft.mp4`
#
#
# **If you get an error that looks like this:**
# ```
# NeedDownloadError: Need ffmpeg exe.
# You can download it by calling:
# imageio.plugins.ffmpeg.download()
# Import everything needed to edit/save/watch video clips
import imageio
from moviepy.editor import VideoFileClip
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
result = lanelines(image)
return result
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) # NOTE: this function expects color images!!
white_clip.write_videofile(white_output, audio=False)
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
yellow_clip.write_videofile(yellow_output, audio=False)
challenge_output = 'test_videos_output/challenge.mp4'
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
challenge_clip.write_videofile(challenge_output, audio=False)
| # Self-Driving Car Engineer Nanodegree
#
# ## Project: **Finding Lane Lines on the Road**
# ## Import Packages
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import math
import moviepy
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
# ## Ideas for Lane Detection Pipeline
# **Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**
#
# `cv2.inRange()` for color selection
# `cv2.fillPoly()` for regions selection
# `cv2.line()` to draw lines on an image given endpoints
# `cv2.addWeighted()` to coadd / overlay two images
# `cv2.cvtColor()` to grayscale or change color
# `cv2.imwrite()` to output images to file
# `cv2.bitwise_and()` to apply a mask to an image
#
# **Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**
import math
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=5):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
# lists to store the slopes of lines which match our criteria
left_slope = []
right_slope = []
# lists to store the calculate b intercepts of these lines
left_b = []
right_b = []
for line in lines:
for x1,y1,x2,y2 in line:
slope = ((y2-y1)/(x2-x1))
# only select lines with specific slope range
if(((slope < 0.8) and (slope > 0.5)) or ((slope > -0.8) and (slope < -0.5))):
# check where the endpoints lie on the image...
if (x1 < (img.shape[1]/2) and x2 < (img.shape[1]/2)):
left_slope.append(slope)
left_b.append(y1-slope*x1)
left_b.append(y2-slope*x2)
else:
right_slope.append(slope)
right_b.append(y1-slope*x1)
right_b.append(y2-slope*x2)
try:
# we calculate average slope to draw the line
avg_left_slope = sum(left_slope)/len(left_slope)
avg_right_slope = sum(right_slope)/len(right_slope)
avg_left_b = sum(left_b)/len(left_b)
avg_right_b = sum(right_b)/len(right_b)
# Y co-ordinate of the lane line will definitely be at the bottom of the image
y1 = img.shape[0]
y2 = 320
y3 = 320
y4 = img.shape[0]
# X co-ordinate can be calculated by using the eqn of the line and y co-ordinate
x1 = (y1 - avg_left_b)/avg_left_slope
x2 = (y2 - avg_left_b)/avg_left_slope
x3 = (y3 - avg_right_b)/avg_right_slope
x4 = (y4 - avg_right_b)/avg_right_slope
# draw the lines, converting values to integer for pixels
cv2.line(img, (int(x1), int(y1)), (int(x2), int(y2)), color, thickness)
cv2.line(img, (int(x3), int(y3)), (int(x4), int(y4)), color, thickness)
except ZeroDivisionError as error:
pass
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, γ)
# ## Test Images
#
# Build your pipeline to work on the images in the directory "test_images"
# **You should make sure your pipeline works well on these images before you try the videos.**
import os
directory = os.listdir("test_images/")
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images_output directory.
def lanelines(image):
# 1. Grayscaling
gray = grayscale(image)
# 2. Gaussian Blur
blur = gaussian_blur(gray, 5)
# 3. Canny Detection
canny_edges = canny(blur, 50, 150)
# 4. Region Masking
vertices = np.array([[(0,image.shape[0]),(460,320),(500,320),(image.shape[1],image.shape[0])]], dtype=np.int32)
selected_region = region_of_interest(canny_edges, vertices)
mpimg.imsave(os.path.join("test_images_output/" + "output-" + i), selected_region)
# image.save(os.path.join("test_images_output/" + i + "-canny-region-output"), format=None, dpi=(540, 960))
# Hough Transform Parameters- Identify lane lines in the masked region
# execute Hough Transform
lines_image = hough_lines(selected_region, 2, np.pi/180, 25, 20, 10)
weighted_image = weighted_img(lines_image, image)
return weighted_image
for i in directory:
image = mpimg.imread(os.path.join("test_images/", i))
weighted_image = lanelines(image)
mpimg.imsave(os.path.join("test_images_output/" + "output+" + i), weighted_image)
# ## Test on Videos
#
# You know what's cooler than drawing lanes over images? Drawing lanes over video!
#
# We can test our solution on two provided videos:
# `solidWhiteRight.mp4`
# `solidYellowLeft.mp4`
#
#
# **If you get an error that looks like this:**
# ```
# NeedDownloadError: Need ffmpeg exe.
# You can download it by calling:
# imageio.plugins.ffmpeg.download()
# Import everything needed to edit/save/watch video clips
import imageio
from moviepy.editor import VideoFileClip
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
result = lanelines(image)
return result
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) # NOTE: this function expects color images!!
white_clip.write_videofile(white_output, audio=False)
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
yellow_clip.write_videofile(yellow_output, audio=False)
challenge_output = 'test_videos_output/challenge.mp4'
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
challenge_clip.write_videofile(challenge_output, audio=False) | en | 0.810601 | # Self-Driving Car Engineer Nanodegree # # ## Project: **Finding Lane Lines on the Road** # ## Import Packages #importing some useful packages #printing out some stats and plotting # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray') # ## Ideas for Lane Detection Pipeline # **Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:** # # `cv2.inRange()` for color selection # `cv2.fillPoly()` for regions selection # `cv2.line()` to draw lines on an image given endpoints # `cv2.addWeighted()` to coadd / overlay two images # `cv2.cvtColor()` to grayscale or change color # `cv2.imwrite()` to output images to file # `cv2.bitwise_and()` to apply a mask to an image # # **Check out the OpenCV documentation to learn about these and discover even more awesome functionality!** Applies the Grayscale transform This will return an image with only one color channel but NOTE: to see the returned image as grayscale (assuming your grayscaled image is called 'gray') you should call plt.imshow(gray, cmap='gray') # Or use BGR2GRAY if you read an image with cv2.imread() # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) Applies the Canny transform Applies a Gaussian Noise kernel Applies an image mask. Only keeps the region of the image defined by the polygon formed from `vertices`. The rest of the image is set to black. `vertices` should be a numpy array of integer points. #defining a blank mask to start with #defining a 3 channel or 1 channel color to fill the mask with depending on the input image # i.e. 3 or 4 depending on your image #filling pixels inside the polygon defined by "vertices" with the fill color #returning the image only where mask pixels are nonzero NOTE: this is the function you might want to use as a starting point once you want to average/extrapolate the line segments you detect to map out the full extent of the lane (going from the result shown in raw-lines-example.mp4 to that shown in P1_example.mp4). Think about things like separating line segments by their slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left line vs. the right line. Then, you can average the position of each of the lines and extrapolate to the top and bottom of the lane. This function draws `lines` with `color` and `thickness`. Lines are drawn on the image inplace (mutates the image). If you want to make the lines semi-transparent, think about combining this function with the weighted_img() function below # lists to store the slopes of lines which match our criteria # lists to store the calculate b intercepts of these lines # only select lines with specific slope range # check where the endpoints lie on the image... # we calculate average slope to draw the line # Y co-ordinate of the lane line will definitely be at the bottom of the image # X co-ordinate can be calculated by using the eqn of the line and y co-ordinate # draw the lines, converting values to integer for pixels `img` should be the output of a Canny transform. Returns an image with hough lines drawn. # Python 3 has support for cool math symbols. `img` is the output of the hough_lines(), An image with lines drawn on it. Should be a blank image (all black) with lines drawn on it. `initial_img` should be the image before any processing. The result image is computed as follows: initial_img * α + img * β + γ NOTE: initial_img and img must be the same shape! # ## Test Images # # Build your pipeline to work on the images in the directory "test_images" # **You should make sure your pipeline works well on these images before you try the videos.** # TODO: Build your pipeline that will draw lane lines on the test_images # then save them to the test_images_output directory. # 1. Grayscaling # 2. Gaussian Blur # 3. Canny Detection # 4. Region Masking # image.save(os.path.join("test_images_output/" + i + "-canny-region-output"), format=None, dpi=(540, 960)) # Hough Transform Parameters- Identify lane lines in the masked region # execute Hough Transform # ## Test on Videos # # You know what's cooler than drawing lanes over images? Drawing lanes over video! # # We can test our solution on two provided videos: # `solidWhiteRight.mp4` # `solidYellowLeft.mp4` # # # **If you get an error that looks like this:** # ``` # NeedDownloadError: Need ffmpeg exe. # You can download it by calling: # imageio.plugins.ffmpeg.download() # Import everything needed to edit/save/watch video clips # NOTE: The output you return should be a color image (3 channel) for processing video below # TODO: put your pipeline here, # you should return the final output (image where lines are drawn on lanes) ## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video ## To do so add .subclip(start_second,end_second) to the end of the line below ## Where start_second and end_second are integer values representing the start and end of the subclip ## You may also uncomment the following line for a subclip of the first 5 seconds ##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5) # NOTE: this function expects color images!! | 3.864636 | 4 |
zict/zip.py | phobson/zict | 0 | 7730 | try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
import zipfile
class Zip(MutableMapping):
"""Mutable Mapping interface to a Zip file
Keys must be strings, values must be bytes
Parameters
----------
filename: string
mode: string, ('r', 'w', 'a'), defaults to 'a'
Examples
--------
>>> z = Zip('myfile.zip') # doctest: +SKIP
>>> z['x'] = b'123' # doctest: +SKIP
>>> z['x'] # doctest: +SKIP
b'123'
>>> z.flush() # flush and write metadata to disk # doctest: +SKIP
"""
def __init__(self, filename, mode="a"):
self.filename = filename
self.mode = mode
self._file = None
@property
def file(self):
if self.mode == "closed":
raise OSError("File closed")
if not self._file or not self._file.fp:
self._file = zipfile.ZipFile(self.filename, mode=self.mode)
return self._file
def __getitem__(self, key):
return self.file.read(key)
def __setitem__(self, key, value):
self.file.writestr(key, value)
def keys(self):
return (zi.filename for zi in self.file.filelist)
def values(self):
return map(self.file.read, self.keys())
def items(self):
return ((zi.filename, self.file.read(zi.filename)) for zi in self.file.filelist)
def __iter__(self):
return self.keys()
def __delitem__(self, key):
raise NotImplementedError("Not supported by stdlib zipfile")
def __len__(self):
return len(self.file.filelist)
def flush(self):
self.file.fp.flush()
self.file.close()
def close(self):
self.flush()
self.mode = "closed"
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
| try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
import zipfile
class Zip(MutableMapping):
"""Mutable Mapping interface to a Zip file
Keys must be strings, values must be bytes
Parameters
----------
filename: string
mode: string, ('r', 'w', 'a'), defaults to 'a'
Examples
--------
>>> z = Zip('myfile.zip') # doctest: +SKIP
>>> z['x'] = b'123' # doctest: +SKIP
>>> z['x'] # doctest: +SKIP
b'123'
>>> z.flush() # flush and write metadata to disk # doctest: +SKIP
"""
def __init__(self, filename, mode="a"):
self.filename = filename
self.mode = mode
self._file = None
@property
def file(self):
if self.mode == "closed":
raise OSError("File closed")
if not self._file or not self._file.fp:
self._file = zipfile.ZipFile(self.filename, mode=self.mode)
return self._file
def __getitem__(self, key):
return self.file.read(key)
def __setitem__(self, key, value):
self.file.writestr(key, value)
def keys(self):
return (zi.filename for zi in self.file.filelist)
def values(self):
return map(self.file.read, self.keys())
def items(self):
return ((zi.filename, self.file.read(zi.filename)) for zi in self.file.filelist)
def __iter__(self):
return self.keys()
def __delitem__(self, key):
raise NotImplementedError("Not supported by stdlib zipfile")
def __len__(self):
return len(self.file.filelist)
def flush(self):
self.file.fp.flush()
self.file.close()
def close(self):
self.flush()
self.mode = "closed"
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
| en | 0.65655 | Mutable Mapping interface to a Zip file Keys must be strings, values must be bytes Parameters ---------- filename: string mode: string, ('r', 'w', 'a'), defaults to 'a' Examples -------- >>> z = Zip('myfile.zip') # doctest: +SKIP >>> z['x'] = b'123' # doctest: +SKIP >>> z['x'] # doctest: +SKIP b'123' >>> z.flush() # flush and write metadata to disk # doctest: +SKIP | 3.180754 | 3 |
neutron_lbaas/drivers/driver_mixins.py | containers-kraken/neutron-lbaas | 0 | 7731 | # Copyright 2014 A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron.plugins.common import constants
from oslo_log import log as logging
import six
from neutron_lbaas.db.loadbalancer import models
from neutron_lbaas.services.loadbalancer import constants as lb_const
from neutron_lbaas.services.loadbalancer import data_models
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class BaseManagerMixin(object):
def __init__(self, driver):
self.driver = driver
@abc.abstractproperty
def db_delete_method(self):
pass
@abc.abstractmethod
def create(self, context, obj):
pass
@abc.abstractmethod
def update(self, context, obj_old, obj):
pass
@abc.abstractmethod
def delete(self, context, obj):
pass
def successful_completion(self, context, obj, delete=False,
lb_create=False):
"""
Sets the provisioning_status of the load balancer and obj to
ACTIVE. Should be called last in the implementor's BaseManagerMixin
methods for successful runs.
:param context: neutron context
:param obj: instance of a
neutron_lbaas.services.loadbalancer.data_model
:param delete: set True if being called from a delete method. Will
most likely result in the obj being deleted from the db.
:param lb_create: set True if this is being called after a successful
load balancer create.
"""
LOG.debug("Starting successful_completion method after a successful "
"driver action.")
obj_sa_cls = data_models.DATA_MODEL_TO_SA_MODEL_MAP[obj.__class__]
if delete:
# Check if driver is responsible for vip allocation. If the driver
# is responsible, then it is also responsible for cleaning it up.
# At this point, the VIP should already be cleaned up, so we are
# just doing neutron lbaas db cleanup.
if (obj == obj.root_loadbalancer and
self.driver.load_balancer.allocates_vip):
# NOTE(blogan): this is quite dumb to do but it is necessary
# so that a false negative pep8 error does not get thrown. An
# "unexpected-keyword-argument" pep8 error occurs bc
# self.db_delete_method is a @property method that returns a
# method.
kwargs = {'delete_vip_port': False}
self.db_delete_method(context, obj.id, **kwargs)
else:
self.db_delete_method(context, obj.id)
if obj == obj.root_loadbalancer and delete:
# Load balancer was deleted and no longer exists
return
lb_op_status = None
lb_p_status = constants.ACTIVE
if obj == obj.root_loadbalancer:
# only set the status to online if this an operation on the
# load balancer
lb_op_status = lb_const.ONLINE
# Update the load balancer's vip address and vip port id if the driver
# was responsible for allocating the vip.
if (self.driver.load_balancer.allocates_vip and lb_create and
isinstance(obj, data_models.LoadBalancer)):
self.driver.plugin.db.update_loadbalancer(
context, obj.id, {'vip_address': obj.vip_address,
'vip_port_id': obj.vip_port_id})
self.driver.plugin.db.update_status(
context, models.LoadBalancer, obj.root_loadbalancer.id,
provisioning_status=lb_p_status,
operating_status=lb_op_status)
if obj == obj.root_loadbalancer or delete:
# Do not want to update the status of the load balancer again
# Or the obj was deleted from the db so no need to update the
# statuses
return
obj_op_status = lb_const.ONLINE
if isinstance(obj, data_models.HealthMonitor):
# Health Monitor does not have an operating status
obj_op_status = None
LOG.debug("Updating object of type {0} with id of {1} to "
"provisioning_status = {2}, operating_status = {3}".format(
obj.__class__, obj.id, constants.ACTIVE, obj_op_status))
self.driver.plugin.db.update_status(
context, obj_sa_cls, obj.id,
provisioning_status=constants.ACTIVE,
operating_status=obj_op_status)
def failed_completion(self, context, obj):
"""
Sets the provisioning status of the obj to ERROR. If obj is a
loadbalancer it will be set to ERROR, otherwise set to ACTIVE. Should
be called whenever something goes wrong (raised exception) in an
implementor's BaseManagerMixin methods.
:param context: neutron context
:param obj: instance of a
neutron_lbaas.services.loadbalancer.data_model
"""
LOG.debug("Starting failed_completion method after a failed driver "
"action.")
if isinstance(obj, data_models.LoadBalancer):
LOG.debug("Updating load balancer {0} to provisioning_status = "
"{1}, operating_status = {2}.".format(
obj.root_loadbalancer.id, constants.ERROR,
lb_const.OFFLINE))
self.driver.plugin.db.update_status(
context, models.LoadBalancer, obj.root_loadbalancer.id,
provisioning_status=constants.ERROR,
operating_status=lb_const.OFFLINE)
return
obj_sa_cls = data_models.DATA_MODEL_TO_SA_MODEL_MAP[obj.__class__]
LOG.debug("Updating object of type {0} with id of {1} to "
"provisioning_status = {2}, operating_status = {3}".format(
obj.__class__, obj.id, constants.ERROR,
lb_const.OFFLINE))
self.driver.plugin.db.update_status(
context, obj_sa_cls, obj.id,
provisioning_status=constants.ERROR,
operating_status=lb_const.OFFLINE)
LOG.debug("Updating load balancer {0} to "
"provisioning_status = {1}".format(obj.root_loadbalancer.id,
constants.ACTIVE))
self.driver.plugin.db.update_status(
context, models.LoadBalancer, obj.root_loadbalancer.id,
provisioning_status=constants.ACTIVE)
def update_vip(self, context, loadbalancer_id, vip_address,
vip_port_id=None):
lb_update = {'vip_address': vip_address}
if vip_port_id:
lb_update['vip_port_id'] = vip_port_id
self.driver.plugin.db.update_loadbalancer(context, loadbalancer_id,
lb_update)
@six.add_metaclass(abc.ABCMeta)
class BaseRefreshMixin(object):
@abc.abstractmethod
def refresh(self, context, obj):
pass
@six.add_metaclass(abc.ABCMeta)
class BaseStatsMixin(object):
@abc.abstractmethod
def stats(self, context, obj):
pass
| # Copyright 2014 A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron.plugins.common import constants
from oslo_log import log as logging
import six
from neutron_lbaas.db.loadbalancer import models
from neutron_lbaas.services.loadbalancer import constants as lb_const
from neutron_lbaas.services.loadbalancer import data_models
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class BaseManagerMixin(object):
def __init__(self, driver):
self.driver = driver
@abc.abstractproperty
def db_delete_method(self):
pass
@abc.abstractmethod
def create(self, context, obj):
pass
@abc.abstractmethod
def update(self, context, obj_old, obj):
pass
@abc.abstractmethod
def delete(self, context, obj):
pass
def successful_completion(self, context, obj, delete=False,
lb_create=False):
"""
Sets the provisioning_status of the load balancer and obj to
ACTIVE. Should be called last in the implementor's BaseManagerMixin
methods for successful runs.
:param context: neutron context
:param obj: instance of a
neutron_lbaas.services.loadbalancer.data_model
:param delete: set True if being called from a delete method. Will
most likely result in the obj being deleted from the db.
:param lb_create: set True if this is being called after a successful
load balancer create.
"""
LOG.debug("Starting successful_completion method after a successful "
"driver action.")
obj_sa_cls = data_models.DATA_MODEL_TO_SA_MODEL_MAP[obj.__class__]
if delete:
# Check if driver is responsible for vip allocation. If the driver
# is responsible, then it is also responsible for cleaning it up.
# At this point, the VIP should already be cleaned up, so we are
# just doing neutron lbaas db cleanup.
if (obj == obj.root_loadbalancer and
self.driver.load_balancer.allocates_vip):
# NOTE(blogan): this is quite dumb to do but it is necessary
# so that a false negative pep8 error does not get thrown. An
# "unexpected-keyword-argument" pep8 error occurs bc
# self.db_delete_method is a @property method that returns a
# method.
kwargs = {'delete_vip_port': False}
self.db_delete_method(context, obj.id, **kwargs)
else:
self.db_delete_method(context, obj.id)
if obj == obj.root_loadbalancer and delete:
# Load balancer was deleted and no longer exists
return
lb_op_status = None
lb_p_status = constants.ACTIVE
if obj == obj.root_loadbalancer:
# only set the status to online if this an operation on the
# load balancer
lb_op_status = lb_const.ONLINE
# Update the load balancer's vip address and vip port id if the driver
# was responsible for allocating the vip.
if (self.driver.load_balancer.allocates_vip and lb_create and
isinstance(obj, data_models.LoadBalancer)):
self.driver.plugin.db.update_loadbalancer(
context, obj.id, {'vip_address': obj.vip_address,
'vip_port_id': obj.vip_port_id})
self.driver.plugin.db.update_status(
context, models.LoadBalancer, obj.root_loadbalancer.id,
provisioning_status=lb_p_status,
operating_status=lb_op_status)
if obj == obj.root_loadbalancer or delete:
# Do not want to update the status of the load balancer again
# Or the obj was deleted from the db so no need to update the
# statuses
return
obj_op_status = lb_const.ONLINE
if isinstance(obj, data_models.HealthMonitor):
# Health Monitor does not have an operating status
obj_op_status = None
LOG.debug("Updating object of type {0} with id of {1} to "
"provisioning_status = {2}, operating_status = {3}".format(
obj.__class__, obj.id, constants.ACTIVE, obj_op_status))
self.driver.plugin.db.update_status(
context, obj_sa_cls, obj.id,
provisioning_status=constants.ACTIVE,
operating_status=obj_op_status)
def failed_completion(self, context, obj):
"""
Sets the provisioning status of the obj to ERROR. If obj is a
loadbalancer it will be set to ERROR, otherwise set to ACTIVE. Should
be called whenever something goes wrong (raised exception) in an
implementor's BaseManagerMixin methods.
:param context: neutron context
:param obj: instance of a
neutron_lbaas.services.loadbalancer.data_model
"""
LOG.debug("Starting failed_completion method after a failed driver "
"action.")
if isinstance(obj, data_models.LoadBalancer):
LOG.debug("Updating load balancer {0} to provisioning_status = "
"{1}, operating_status = {2}.".format(
obj.root_loadbalancer.id, constants.ERROR,
lb_const.OFFLINE))
self.driver.plugin.db.update_status(
context, models.LoadBalancer, obj.root_loadbalancer.id,
provisioning_status=constants.ERROR,
operating_status=lb_const.OFFLINE)
return
obj_sa_cls = data_models.DATA_MODEL_TO_SA_MODEL_MAP[obj.__class__]
LOG.debug("Updating object of type {0} with id of {1} to "
"provisioning_status = {2}, operating_status = {3}".format(
obj.__class__, obj.id, constants.ERROR,
lb_const.OFFLINE))
self.driver.plugin.db.update_status(
context, obj_sa_cls, obj.id,
provisioning_status=constants.ERROR,
operating_status=lb_const.OFFLINE)
LOG.debug("Updating load balancer {0} to "
"provisioning_status = {1}".format(obj.root_loadbalancer.id,
constants.ACTIVE))
self.driver.plugin.db.update_status(
context, models.LoadBalancer, obj.root_loadbalancer.id,
provisioning_status=constants.ACTIVE)
def update_vip(self, context, loadbalancer_id, vip_address,
vip_port_id=None):
lb_update = {'vip_address': vip_address}
if vip_port_id:
lb_update['vip_port_id'] = vip_port_id
self.driver.plugin.db.update_loadbalancer(context, loadbalancer_id,
lb_update)
@six.add_metaclass(abc.ABCMeta)
class BaseRefreshMixin(object):
@abc.abstractmethod
def refresh(self, context, obj):
pass
@six.add_metaclass(abc.ABCMeta)
class BaseStatsMixin(object):
@abc.abstractmethod
def stats(self, context, obj):
pass
| en | 0.892768 | # Copyright 2014 A10 Networks # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Sets the provisioning_status of the load balancer and obj to ACTIVE. Should be called last in the implementor's BaseManagerMixin methods for successful runs. :param context: neutron context :param obj: instance of a neutron_lbaas.services.loadbalancer.data_model :param delete: set True if being called from a delete method. Will most likely result in the obj being deleted from the db. :param lb_create: set True if this is being called after a successful load balancer create. # Check if driver is responsible for vip allocation. If the driver # is responsible, then it is also responsible for cleaning it up. # At this point, the VIP should already be cleaned up, so we are # just doing neutron lbaas db cleanup. # NOTE(blogan): this is quite dumb to do but it is necessary # so that a false negative pep8 error does not get thrown. An # "unexpected-keyword-argument" pep8 error occurs bc # self.db_delete_method is a @property method that returns a # method. # Load balancer was deleted and no longer exists # only set the status to online if this an operation on the # load balancer # Update the load balancer's vip address and vip port id if the driver # was responsible for allocating the vip. # Do not want to update the status of the load balancer again # Or the obj was deleted from the db so no need to update the # statuses # Health Monitor does not have an operating status Sets the provisioning status of the obj to ERROR. If obj is a loadbalancer it will be set to ERROR, otherwise set to ACTIVE. Should be called whenever something goes wrong (raised exception) in an implementor's BaseManagerMixin methods. :param context: neutron context :param obj: instance of a neutron_lbaas.services.loadbalancer.data_model | 1.672251 | 2 |
Lib/hTools2/modules/ftp.py | miguelsousa/hTools2 | 0 | 7732 | <filename>Lib/hTools2/modules/ftp.py
# [h] hTools2.modules.ftp
"""Tools to connect to a FTP server, upload files etc."""
# This module uses the `ftplib` library to handle FTP connection and upload.
# http://docs.python.org/library/ftplib.html
import os
from ftplib import FTP
def connect_to_server(url, login, password, folder, verbose=False):
"""Connects to the FTP server using the given connection settings.
Use the given ``url``, ``login`` and ``password`` information to make a connection. Move to the given ``folder`` (if it exists), and return a ``FTP`` object.
To get to the lower level details about the FTP connection, use the optional parameter ``verbose=True``.
"""
# create FTP connection
ftp = FTP(url, login, password)
if verbose == True:
print "%s" % ftp.getwelcome()
# move to folder
ftp.cwd(folder)
if verbose == True:
ftp.retrlines('LIST')
print
return ftp
def upload_file(filePath, FTPconnection):
"""Upload the file at ``file_path`` to a FTP server, using the given ``ftp_connection``."""
file = open(filePath, 'rb')
fileName = os.path.split(filePath)[1]
FTPconnection.storbinary('STOR ' + fileName, file)
file.close()
| <filename>Lib/hTools2/modules/ftp.py
# [h] hTools2.modules.ftp
"""Tools to connect to a FTP server, upload files etc."""
# This module uses the `ftplib` library to handle FTP connection and upload.
# http://docs.python.org/library/ftplib.html
import os
from ftplib import FTP
def connect_to_server(url, login, password, folder, verbose=False):
"""Connects to the FTP server using the given connection settings.
Use the given ``url``, ``login`` and ``password`` information to make a connection. Move to the given ``folder`` (if it exists), and return a ``FTP`` object.
To get to the lower level details about the FTP connection, use the optional parameter ``verbose=True``.
"""
# create FTP connection
ftp = FTP(url, login, password)
if verbose == True:
print "%s" % ftp.getwelcome()
# move to folder
ftp.cwd(folder)
if verbose == True:
ftp.retrlines('LIST')
print
return ftp
def upload_file(filePath, FTPconnection):
"""Upload the file at ``file_path`` to a FTP server, using the given ``ftp_connection``."""
file = open(filePath, 'rb')
fileName = os.path.split(filePath)[1]
FTPconnection.storbinary('STOR ' + fileName, file)
file.close()
| en | 0.628141 | # [h] hTools2.modules.ftp Tools to connect to a FTP server, upload files etc. # This module uses the `ftplib` library to handle FTP connection and upload. # http://docs.python.org/library/ftplib.html Connects to the FTP server using the given connection settings. Use the given ``url``, ``login`` and ``password`` information to make a connection. Move to the given ``folder`` (if it exists), and return a ``FTP`` object. To get to the lower level details about the FTP connection, use the optional parameter ``verbose=True``. # create FTP connection # move to folder Upload the file at ``file_path`` to a FTP server, using the given ``ftp_connection``. | 3.347175 | 3 |
network/pytorch2onnx.py | MRsoymilk/toy-car | 0 | 7733 | import Net
import configparser
import torch
from PIL import Image
config = configparser.ConfigParser()
config.read('./config.ini')
MODEL = config.get("Network", "Model")
transformations = Net.transformations
net = Net.Net()
net.eval()
net.load_state_dict(torch.load(MODEL))
image = Image.open("./html/rwby.jpg")
image = transformations(image).float()
image = torch.autograd.Variable(image[None, ...])
torch.onnx.export(
net,
image,
MODEL.split('pth')[0] + 'onnx',
export_params=True,
output_names=['toy-car']
)
print("finish")
| import Net
import configparser
import torch
from PIL import Image
config = configparser.ConfigParser()
config.read('./config.ini')
MODEL = config.get("Network", "Model")
transformations = Net.transformations
net = Net.Net()
net.eval()
net.load_state_dict(torch.load(MODEL))
image = Image.open("./html/rwby.jpg")
image = transformations(image).float()
image = torch.autograd.Variable(image[None, ...])
torch.onnx.export(
net,
image,
MODEL.split('pth')[0] + 'onnx',
export_params=True,
output_names=['toy-car']
)
print("finish")
| none | 1 | 2.62809 | 3 |
|
var/spack/repos/builtin/packages/r-gridextra/package.py | player1537-forks/spack | 11 | 7734 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGridextra(RPackage):
"""Miscellaneous Functions for "Grid" Graphics.
Provides a number of user-level functions to work with "grid" graphics,
notably to arrange multiple grid-based plots on a page, and draw tables."""
cran = "gridExtras"
version('2.3', sha256='81b60ce6f237ec308555471ae0119158b115463df696d2eca9b177ded8988e3b')
version('2.2.1', sha256='44fe455a5bcdf48a4ece7a542f83e7749cf251dc1df6ae7634470240398c6818')
depends_on('r-gtable', type=('build', 'run'))
| # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGridextra(RPackage):
"""Miscellaneous Functions for "Grid" Graphics.
Provides a number of user-level functions to work with "grid" graphics,
notably to arrange multiple grid-based plots on a page, and draw tables."""
cran = "gridExtras"
version('2.3', sha256='81b60ce6f237ec308555471ae0119158b115463df696d2eca9b177ded8988e3b')
version('2.2.1', sha256='44fe455a5bcdf48a4ece7a542f83e7749cf251dc1df6ae7634470240398c6818')
depends_on('r-gtable', type=('build', 'run'))
| en | 0.797342 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) Miscellaneous Functions for "Grid" Graphics. Provides a number of user-level functions to work with "grid" graphics, notably to arrange multiple grid-based plots on a page, and draw tables. | 1.171989 | 1 |
tuframework/training/network_training/competitions_with_custom_Trainers/MMS/nnUNetTrainerV2_MMS.py | Magnety/tuFramework | 0 | 7735 | import torch
from tuframework.network_architecture.generic_UNet import Generic_UNet
from tuframework.network_architecture.initialization import InitWeights_He
from tuframework.training.network_training.tuframework_variants.data_augmentation.tuframeworkTrainerV2_insaneDA import \
tuframeworkTrainerV2_insaneDA
from tuframework.utilities.nd_softmax import softmax_helper
from torch import nn
class tuframeworkTrainerV2_MMS(tuframeworkTrainerV2_insaneDA):
def setup_DA_params(self):
super().setup_DA_params()
self.data_aug_params["p_rot"] = 0.7
self.data_aug_params["p_eldef"] = 0.1
self.data_aug_params["p_scale"] = 0.3
self.data_aug_params["independent_scale_factor_for_each_axis"] = True
self.data_aug_params["p_independent_scale_per_axis"] = 0.3
self.data_aug_params["do_additive_brightness"] = True
self.data_aug_params["additive_brightness_mu"] = 0
self.data_aug_params["additive_brightness_sigma"] = 0.2
self.data_aug_params["additive_brightness_p_per_sample"] = 0.3
self.data_aug_params["additive_brightness_p_per_channel"] = 1
self.data_aug_params["elastic_deform_alpha"] = (0., 300.)
self.data_aug_params["elastic_deform_sigma"] = (9., 15.)
self.data_aug_params['gamma_range'] = (0.5, 1.6)
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.BatchNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.BatchNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
"""def run_training(self):
from batchviewer import view_batch
a = next(self.tr_gen)
view_batch(a['data'])
import IPython;IPython.embed()"""
| import torch
from tuframework.network_architecture.generic_UNet import Generic_UNet
from tuframework.network_architecture.initialization import InitWeights_He
from tuframework.training.network_training.tuframework_variants.data_augmentation.tuframeworkTrainerV2_insaneDA import \
tuframeworkTrainerV2_insaneDA
from tuframework.utilities.nd_softmax import softmax_helper
from torch import nn
class tuframeworkTrainerV2_MMS(tuframeworkTrainerV2_insaneDA):
def setup_DA_params(self):
super().setup_DA_params()
self.data_aug_params["p_rot"] = 0.7
self.data_aug_params["p_eldef"] = 0.1
self.data_aug_params["p_scale"] = 0.3
self.data_aug_params["independent_scale_factor_for_each_axis"] = True
self.data_aug_params["p_independent_scale_per_axis"] = 0.3
self.data_aug_params["do_additive_brightness"] = True
self.data_aug_params["additive_brightness_mu"] = 0
self.data_aug_params["additive_brightness_sigma"] = 0.2
self.data_aug_params["additive_brightness_p_per_sample"] = 0.3
self.data_aug_params["additive_brightness_p_per_channel"] = 1
self.data_aug_params["elastic_deform_alpha"] = (0., 300.)
self.data_aug_params["elastic_deform_sigma"] = (9., 15.)
self.data_aug_params['gamma_range'] = (0.5, 1.6)
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.BatchNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.BatchNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
"""def run_training(self):
from batchviewer import view_batch
a = next(self.tr_gen)
view_batch(a['data'])
import IPython;IPython.embed()"""
| en | 0.421733 | def run_training(self): from batchviewer import view_batch a = next(self.tr_gen) view_batch(a['data']) import IPython;IPython.embed() | 2.008475 | 2 |
ansible/playbooks/roles/repository/files/download-requirements/src/command/yum.py | romsok24/epiphany | 0 | 7736 | <reponame>romsok24/epiphany
from typing import List
from src.command.command import Command
class Yum(Command):
"""
Interface for `yum`
"""
def __init__(self, retries: int):
super().__init__('yum', retries)
def update(self, enablerepo: str,
package: str = None,
disablerepo: str = '*',
assume_yes: bool = True):
"""
Interface for `yum update`
:param enablerepo:
:param package:
:param disablerepo:
:param assume_yes: if set to True, -y flag will be used
"""
update_parameters: List[str] = ['update']
update_parameters.append('-y' if assume_yes else '')
if package is not None:
update_parameters.append(package)
update_parameters.append(f'--disablerepo={disablerepo}')
update_parameters.append(f'--enablerepo={enablerepo}')
self.run(update_parameters)
def install(self, package: str,
assume_yes: bool = True):
"""
Interface for `yum install -y`
:param package: packaged to be installed
:param assume_yes: if set to True, -y flag will be used
"""
no_ask: str = '-y' if assume_yes else ''
self.run(['install', no_ask, package])
def remove(self, package: str,
assume_yes: bool = True):
"""
Interface for `yum remove -y`
:param package: packaged to be removed
:param assume_yes: if set to True, -y flag will be used
"""
no_ask: str = '-y' if assume_yes else ''
self.run(['remove', no_ask, package])
def is_repo_enabled(self, repo: str) -> bool:
output = self.run(['repolist',
'enabled']).stdout
if repo in output:
return True
return False
def find_rhel_repo_id(self, patterns: List[str]) -> List[str]:
output = self.run(['repolist',
'all']).stdout
repos: List[str] = []
for line in output.split('\n'):
for pattern in patterns:
if pattern in line:
repos.append(pattern)
return repos
def accept_keys(self):
# to accept import of repo's GPG key (for repo_gpgcheck=1)
self.run(['-y', 'repolist'])
def is_repo_available(self, repo: str) -> bool:
retval = self.run(['-q',
'--disablerepo=*',
f'--enablerepo={repo}',
'repoinfo']).returncode
if retval == 0:
return True
return False
def makecache(self, fast: bool = True,
assume_yes: bool = True):
args: List[str] = ['makecache']
args.append('-y' if assume_yes else '')
if fast:
args.append('fast')
self.run(args)
def list_all_repo_info(self) -> List[str]:
args: List[str] = ['repolist',
'-v',
'all']
return self._run_and_filter(args)
| from typing import List
from src.command.command import Command
class Yum(Command):
"""
Interface for `yum`
"""
def __init__(self, retries: int):
super().__init__('yum', retries)
def update(self, enablerepo: str,
package: str = None,
disablerepo: str = '*',
assume_yes: bool = True):
"""
Interface for `yum update`
:param enablerepo:
:param package:
:param disablerepo:
:param assume_yes: if set to True, -y flag will be used
"""
update_parameters: List[str] = ['update']
update_parameters.append('-y' if assume_yes else '')
if package is not None:
update_parameters.append(package)
update_parameters.append(f'--disablerepo={disablerepo}')
update_parameters.append(f'--enablerepo={enablerepo}')
self.run(update_parameters)
def install(self, package: str,
assume_yes: bool = True):
"""
Interface for `yum install -y`
:param package: packaged to be installed
:param assume_yes: if set to True, -y flag will be used
"""
no_ask: str = '-y' if assume_yes else ''
self.run(['install', no_ask, package])
def remove(self, package: str,
assume_yes: bool = True):
"""
Interface for `yum remove -y`
:param package: packaged to be removed
:param assume_yes: if set to True, -y flag will be used
"""
no_ask: str = '-y' if assume_yes else ''
self.run(['remove', no_ask, package])
def is_repo_enabled(self, repo: str) -> bool:
output = self.run(['repolist',
'enabled']).stdout
if repo in output:
return True
return False
def find_rhel_repo_id(self, patterns: List[str]) -> List[str]:
output = self.run(['repolist',
'all']).stdout
repos: List[str] = []
for line in output.split('\n'):
for pattern in patterns:
if pattern in line:
repos.append(pattern)
return repos
def accept_keys(self):
# to accept import of repo's GPG key (for repo_gpgcheck=1)
self.run(['-y', 'repolist'])
def is_repo_available(self, repo: str) -> bool:
retval = self.run(['-q',
'--disablerepo=*',
f'--enablerepo={repo}',
'repoinfo']).returncode
if retval == 0:
return True
return False
def makecache(self, fast: bool = True,
assume_yes: bool = True):
args: List[str] = ['makecache']
args.append('-y' if assume_yes else '')
if fast:
args.append('fast')
self.run(args)
def list_all_repo_info(self) -> List[str]:
args: List[str] = ['repolist',
'-v',
'all']
return self._run_and_filter(args) | en | 0.575351 | Interface for `yum` Interface for `yum update` :param enablerepo: :param package: :param disablerepo: :param assume_yes: if set to True, -y flag will be used Interface for `yum install -y` :param package: packaged to be installed :param assume_yes: if set to True, -y flag will be used Interface for `yum remove -y` :param package: packaged to be removed :param assume_yes: if set to True, -y flag will be used # to accept import of repo's GPG key (for repo_gpgcheck=1) | 2.867566 | 3 |
build/generate_confirmed_cases_by_counties.py | jtagcat/koroonakaart | 1 | 7737 | from build.chart_data_functions import get_confirmed_cases_by_county
from build.chart_data_functions import get_county_by_day
from build.constants import CONFIRMED_CASES_BY_COUNTIES_PATH
from build.constants import COUNTY_MAPPING
from build.constants import COUNTY_POPULATION
from build.constants import DATE_SETTINGS
from build.constants import TEST_RESULTS_PATH
from build.constants import TODAY_DMYHM
from build.constants import YESTERDAY_YMD
from build.utils import analyze_memory
from build.utils import analyze_time
from build.utils import logger
from build.utils import read_json_from_file
from build.utils import save_as_json
import pandas as pd
@analyze_time
@analyze_memory
def main():
# Log status
logger.info("Loading local data files")
test_results = read_json_from_file(TEST_RESULTS_PATH)
# Log status
logger.info("Calculating main statistics")
# Create date ranges for charts
case_dates = pd.date_range(start=DATE_SETTINGS["firstCaseDate"], end=YESTERDAY_YMD)
# Get data for each chart
logger.info("Calculating data for charts")
county_by_day = get_county_by_day(
test_results, case_dates, COUNTY_MAPPING, COUNTY_POPULATION
)
confirmed_cases_by_county = get_confirmed_cases_by_county(
test_results, COUNTY_MAPPING
)
del county_by_day["mapPlayback"]
del county_by_day["mapPlayback10k"]
# Create dictionary for final JSON
logger.info("Compiling final JSON")
final_json = {
"updatedOn": TODAY_DMYHM,
"dataConfirmedCasesByCounties": confirmed_cases_by_county,
"countyByDay": county_by_day,
}
# Dump JSON output
save_as_json(CONFIRMED_CASES_BY_COUNTIES_PATH, final_json)
# Log finish time
logger.info("Finished update process")
if __name__ == "__main__":
main()
| from build.chart_data_functions import get_confirmed_cases_by_county
from build.chart_data_functions import get_county_by_day
from build.constants import CONFIRMED_CASES_BY_COUNTIES_PATH
from build.constants import COUNTY_MAPPING
from build.constants import COUNTY_POPULATION
from build.constants import DATE_SETTINGS
from build.constants import TEST_RESULTS_PATH
from build.constants import TODAY_DMYHM
from build.constants import YESTERDAY_YMD
from build.utils import analyze_memory
from build.utils import analyze_time
from build.utils import logger
from build.utils import read_json_from_file
from build.utils import save_as_json
import pandas as pd
@analyze_time
@analyze_memory
def main():
# Log status
logger.info("Loading local data files")
test_results = read_json_from_file(TEST_RESULTS_PATH)
# Log status
logger.info("Calculating main statistics")
# Create date ranges for charts
case_dates = pd.date_range(start=DATE_SETTINGS["firstCaseDate"], end=YESTERDAY_YMD)
# Get data for each chart
logger.info("Calculating data for charts")
county_by_day = get_county_by_day(
test_results, case_dates, COUNTY_MAPPING, COUNTY_POPULATION
)
confirmed_cases_by_county = get_confirmed_cases_by_county(
test_results, COUNTY_MAPPING
)
del county_by_day["mapPlayback"]
del county_by_day["mapPlayback10k"]
# Create dictionary for final JSON
logger.info("Compiling final JSON")
final_json = {
"updatedOn": TODAY_DMYHM,
"dataConfirmedCasesByCounties": confirmed_cases_by_county,
"countyByDay": county_by_day,
}
# Dump JSON output
save_as_json(CONFIRMED_CASES_BY_COUNTIES_PATH, final_json)
# Log finish time
logger.info("Finished update process")
if __name__ == "__main__":
main()
| en | 0.60802 | # Log status # Log status # Create date ranges for charts # Get data for each chart # Create dictionary for final JSON # Dump JSON output # Log finish time | 2.049486 | 2 |
ros_tf_publisher.py | BrightLamp/PyLearningCodes | 0 | 7738 | # encoding=utf-8
import rospy
import tf
if __name__ == '__main__':
rospy.init_node('py_tf_broadcaster')
br = tf.TransformBroadcaster()
x = 0.0
y = 0.0
z = 0.0
roll = 0
pitch = 0
yaw = 1.57
rate = rospy.Rate(1)
while not rospy.is_shutdown():
yaw = yaw + 0.1
roll = roll + 0.1
br.sendTransform((x, y, z),
tf.transformations.quaternion_from_euler(roll, pitch, yaw),
rospy.Time.now(),
"base_link",
"front_caster") # 发布base_link到link1的平移和翻转
rate.sleep()
| # encoding=utf-8
import rospy
import tf
if __name__ == '__main__':
rospy.init_node('py_tf_broadcaster')
br = tf.TransformBroadcaster()
x = 0.0
y = 0.0
z = 0.0
roll = 0
pitch = 0
yaw = 1.57
rate = rospy.Rate(1)
while not rospy.is_shutdown():
yaw = yaw + 0.1
roll = roll + 0.1
br.sendTransform((x, y, z),
tf.transformations.quaternion_from_euler(roll, pitch, yaw),
rospy.Time.now(),
"base_link",
"front_caster") # 发布base_link到link1的平移和翻转
rate.sleep()
| zh | 0.391343 | # encoding=utf-8 # 发布base_link到link1的平移和翻转 | 2.334716 | 2 |
dataset_manager/technical_indicators.py | NightingaleV/bakalarska_prace-ann-algotrading | 0 | 7739 | <reponame>NightingaleV/bakalarska_prace-ann-algotrading
# Imports
import numpy as np
class TechnicalIndicators:
cci_constant = 0.015
def __init__(self):
self.df = None
# Exponentially-weighted moving average
def ewma(self, periods):
indicator = 'EWMA{}'.format(periods)
self.df[indicator] = self.df['close'].ewm(span=periods).mean()
return self
# Stochastic Oscillator
def stochastic_oscilator(self, k_period, d_period, smooth=1):
lows = 'l{}'.format(k_period)
highs = 'h{}'.format(k_period)
self.df = self.calc_roll_min(self.df, k_period)
self.df = self.calc_roll_max(self.df, k_period)
self.df = self.stok(self.df, k_period)
if smooth >= 1:
self.df = self.smooth_stok(self.df, smooth)
self.df = self.stod(self.df, d_period)
self.df.drop([lows, highs], axis=1, inplace=True)
return self
@staticmethod
def calc_roll_min(dataset, k_period):
lows = 'l{}'.format(k_period)
dataset[lows] = dataset['low'].rolling(window=k_period).min()
return dataset
@staticmethod
def calc_roll_max(dataset, k_period):
highs = 'h{}'.format(k_period)
dataset[highs] = dataset['high'].rolling(window=k_period).max()
return dataset
@staticmethod
def stok(dataset, k_period):
lows = 'l{}'.format(k_period)
highs = 'h{}'.format(k_period)
dataset['%k'] = ((dataset['close'] - dataset[lows]) / (
dataset[highs] - dataset[lows])) * 100
return dataset
@staticmethod
def smooth_stok(dataset, smooth):
dataset['%k'] = dataset['%k'].rolling(window=smooth).mean()
return dataset
@staticmethod
def stod(dataset, d_period):
dataset['%d'] = dataset['%k'].rolling(window=d_period).mean()
return dataset
# RSI - Relative Strength Index
def rsi_indicator(self, period):
rsi = 'rsi{}'.format(period)
# Calculate differences between prices
deltas = np.diff(self.df['close'])
# For every row calculate rsi
for i, row in self.df.iterrows():
if i < period:
self.df.loc[i, rsi] = 0
else:
self.df.loc[i, rsi] = self.calc_rsi(i, period, deltas)
return self
@staticmethod
def calc_rsi(index, period, deltas):
seed = deltas[index - period:index]
average_gain = seed[seed >= 0].sum() / period
average_loss = seed[seed < 0].sum() / period
if abs(average_loss) == 0:
rs = 0
else:
rs = average_gain / abs(average_loss)
rsi = 100. - (100. / (1 + rs))
return rsi
| # Imports
import numpy as np
class TechnicalIndicators:
cci_constant = 0.015
def __init__(self):
self.df = None
# Exponentially-weighted moving average
def ewma(self, periods):
indicator = 'EWMA{}'.format(periods)
self.df[indicator] = self.df['close'].ewm(span=periods).mean()
return self
# Stochastic Oscillator
def stochastic_oscilator(self, k_period, d_period, smooth=1):
lows = 'l{}'.format(k_period)
highs = 'h{}'.format(k_period)
self.df = self.calc_roll_min(self.df, k_period)
self.df = self.calc_roll_max(self.df, k_period)
self.df = self.stok(self.df, k_period)
if smooth >= 1:
self.df = self.smooth_stok(self.df, smooth)
self.df = self.stod(self.df, d_period)
self.df.drop([lows, highs], axis=1, inplace=True)
return self
@staticmethod
def calc_roll_min(dataset, k_period):
lows = 'l{}'.format(k_period)
dataset[lows] = dataset['low'].rolling(window=k_period).min()
return dataset
@staticmethod
def calc_roll_max(dataset, k_period):
highs = 'h{}'.format(k_period)
dataset[highs] = dataset['high'].rolling(window=k_period).max()
return dataset
@staticmethod
def stok(dataset, k_period):
lows = 'l{}'.format(k_period)
highs = 'h{}'.format(k_period)
dataset['%k'] = ((dataset['close'] - dataset[lows]) / (
dataset[highs] - dataset[lows])) * 100
return dataset
@staticmethod
def smooth_stok(dataset, smooth):
dataset['%k'] = dataset['%k'].rolling(window=smooth).mean()
return dataset
@staticmethod
def stod(dataset, d_period):
dataset['%d'] = dataset['%k'].rolling(window=d_period).mean()
return dataset
# RSI - Relative Strength Index
def rsi_indicator(self, period):
rsi = 'rsi{}'.format(period)
# Calculate differences between prices
deltas = np.diff(self.df['close'])
# For every row calculate rsi
for i, row in self.df.iterrows():
if i < period:
self.df.loc[i, rsi] = 0
else:
self.df.loc[i, rsi] = self.calc_rsi(i, period, deltas)
return self
@staticmethod
def calc_rsi(index, period, deltas):
seed = deltas[index - period:index]
average_gain = seed[seed >= 0].sum() / period
average_loss = seed[seed < 0].sum() / period
if abs(average_loss) == 0:
rs = 0
else:
rs = average_gain / abs(average_loss)
rsi = 100. - (100. / (1 + rs))
return rsi | en | 0.827807 | # Imports # Exponentially-weighted moving average # Stochastic Oscillator # RSI - Relative Strength Index # Calculate differences between prices # For every row calculate rsi | 2.568841 | 3 |
users/models.py | makutas/CocktailWebsite | 0 | 7740 | <gh_stars>0
from django.db import models
from django.contrib.auth.models import User
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
user_description = models.CharField(max_length=200, null=True)
user_avatar = models.ImageField(null=True, blank=True)
user_uploaded_recipes = models.IntegerField() # Increment by 1 on upload
def __str__(self):
return f"{self.user.username}"
| from django.db import models
from django.contrib.auth.models import User
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
user_description = models.CharField(max_length=200, null=True)
user_avatar = models.ImageField(null=True, blank=True)
user_uploaded_recipes = models.IntegerField() # Increment by 1 on upload
def __str__(self):
return f"{self.user.username}" | en | 0.978911 | # Increment by 1 on upload | 2.387786 | 2 |
deploy/trained_model.py | Samyak005/Multi-Hop-QG | 0 | 7741 |
import torch
import logging
# Transformer version 4.9.1 - Newer versions may not work.
from transformers import AutoTokenizer
from trained_gpt_model import get_inference2
def t5_supp_inference(review_text):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check.
# device = torch.device('cpu')
print('Using device:' + str(device))
PRETRAINED_MODEL = 't5-base'
SEQ_LENGTH = 600
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<answer>', '<context>']}
)
model = torch.load("../trained_models/t5_model_hotpot_supporting_facts_last.pth")
model.eval()
encoded_text = tokenizer(
review_text,
padding=True,
max_length=SEQ_LENGTH,
truncation=True,
return_tensors="pt"
).to(device)
input_ids = encoded_text['input_ids']
with torch.no_grad():
output = model.generate(input_ids)
decoded_string = tokenizer.decode(output[0], skip_special_tokens=True)
logging.debug("Decoded string" + decoded_string)
print(decoded_string)
# device.empty_cache()
del model
del tokenizer
return decoded_string
def t5_full_inference(review_text):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check.
# device = torch.device('cpu')
print('Using device:' + str(device))
PRETRAINED_MODEL = 't5-base'
SEQ_LENGTH = 600
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<answer>', '<context>']}
)
model = torch.load("../trained_models/t5_model_hotpot_full_context_last.pth")
model.eval()
encoded_text = tokenizer(
review_text,
padding=True,
max_length=SEQ_LENGTH,
truncation=True,
return_tensors="pt"
).to(device)
input_ids = encoded_text['input_ids']
with torch.no_grad():
output = model.generate(input_ids)
decoded_string = tokenizer.decode(output[0], skip_special_tokens=True)
logging.debug("Decoded string" + decoded_string)
print(decoded_string)
# device.empty_cache()
del model
del tokenizer
return decoded_string
def bart_supp_inference(review_text):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check.
# device = torch.device('cpu')
print('Using device:' + str(device))
PRETRAINED_MODEL = 'facebook/bart-base'
SEQ_LENGTH = 600
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<answer>', '<context>']}
)
model = torch.load("../trained_models/bart_model_hotpot_supporting_facts_last.pth")
model.eval()
encoded_text = tokenizer(
review_text,
padding=True,
max_length=SEQ_LENGTH,
truncation=True,
return_tensors="pt"
).to(device)
input_ids = encoded_text['input_ids']
with torch.no_grad():
output = model.generate(input_ids)
decoded_string = tokenizer.decode(output[0], skip_special_tokens=True)
logging.debug("Decoded string" + decoded_string)
print(decoded_string)
# device.empty_cache()
del model
del tokenizer
return decoded_string
def bart_full_inference(review_text):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check.
# device = torch.device('cpu')
print('Using device:' + str(device))
PRETRAINED_MODEL = 'facebook/bart-base'
SEQ_LENGTH = 600
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<answer>', '<context>']}
)
model = torch.load("../trained_models/bart_model_hotpot_full_context_last.pth")
model.eval()
encoded_text = tokenizer(
review_text,
padding=True,
max_length=SEQ_LENGTH,
truncation=True,
return_tensors="pt"
).to(device)
input_ids = encoded_text['input_ids']
with torch.no_grad():
output = model.generate(input_ids)
decoded_string = tokenizer.decode(output[0], skip_special_tokens=True)
logging.debug("Decoded string" + decoded_string)
print(decoded_string)
# device.empty_cache()
del model
del tokenizer
return decoded_string
# if __name__ == "__main__":
# review_text = "<answer> a fusional language <context> Typologically, Estonian represents a transitional form from an agglutinating language to a fusional language. The canonical word order is SVO (subject–verb–object)."
# t5_supp_inference(review_text, md2, device)
def get_inference(answer, context, model_name):
valuation_text = "<answer> " + answer + " <context> " + context
if model_name == 't5_supp':
return t5_supp_inference(valuation_text)
elif model_name == 't5_full':
return t5_full_inference(valuation_text)
elif model_name == 'bart_supp':
return bart_supp_inference(valuation_text)
elif model_name == 'bart_full':
return bart_full_inference(valuation_text)
elif model_name == 'gpt2':
return get_inference2(answer, context)
|
import torch
import logging
# Transformer version 4.9.1 - Newer versions may not work.
from transformers import AutoTokenizer
from trained_gpt_model import get_inference2
def t5_supp_inference(review_text):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check.
# device = torch.device('cpu')
print('Using device:' + str(device))
PRETRAINED_MODEL = 't5-base'
SEQ_LENGTH = 600
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<answer>', '<context>']}
)
model = torch.load("../trained_models/t5_model_hotpot_supporting_facts_last.pth")
model.eval()
encoded_text = tokenizer(
review_text,
padding=True,
max_length=SEQ_LENGTH,
truncation=True,
return_tensors="pt"
).to(device)
input_ids = encoded_text['input_ids']
with torch.no_grad():
output = model.generate(input_ids)
decoded_string = tokenizer.decode(output[0], skip_special_tokens=True)
logging.debug("Decoded string" + decoded_string)
print(decoded_string)
# device.empty_cache()
del model
del tokenizer
return decoded_string
def t5_full_inference(review_text):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check.
# device = torch.device('cpu')
print('Using device:' + str(device))
PRETRAINED_MODEL = 't5-base'
SEQ_LENGTH = 600
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<answer>', '<context>']}
)
model = torch.load("../trained_models/t5_model_hotpot_full_context_last.pth")
model.eval()
encoded_text = tokenizer(
review_text,
padding=True,
max_length=SEQ_LENGTH,
truncation=True,
return_tensors="pt"
).to(device)
input_ids = encoded_text['input_ids']
with torch.no_grad():
output = model.generate(input_ids)
decoded_string = tokenizer.decode(output[0], skip_special_tokens=True)
logging.debug("Decoded string" + decoded_string)
print(decoded_string)
# device.empty_cache()
del model
del tokenizer
return decoded_string
def bart_supp_inference(review_text):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check.
# device = torch.device('cpu')
print('Using device:' + str(device))
PRETRAINED_MODEL = 'facebook/bart-base'
SEQ_LENGTH = 600
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<answer>', '<context>']}
)
model = torch.load("../trained_models/bart_model_hotpot_supporting_facts_last.pth")
model.eval()
encoded_text = tokenizer(
review_text,
padding=True,
max_length=SEQ_LENGTH,
truncation=True,
return_tensors="pt"
).to(device)
input_ids = encoded_text['input_ids']
with torch.no_grad():
output = model.generate(input_ids)
decoded_string = tokenizer.decode(output[0], skip_special_tokens=True)
logging.debug("Decoded string" + decoded_string)
print(decoded_string)
# device.empty_cache()
del model
del tokenizer
return decoded_string
def bart_full_inference(review_text):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check.
# device = torch.device('cpu')
print('Using device:' + str(device))
PRETRAINED_MODEL = 'facebook/bart-base'
SEQ_LENGTH = 600
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<answer>', '<context>']}
)
model = torch.load("../trained_models/bart_model_hotpot_full_context_last.pth")
model.eval()
encoded_text = tokenizer(
review_text,
padding=True,
max_length=SEQ_LENGTH,
truncation=True,
return_tensors="pt"
).to(device)
input_ids = encoded_text['input_ids']
with torch.no_grad():
output = model.generate(input_ids)
decoded_string = tokenizer.decode(output[0], skip_special_tokens=True)
logging.debug("Decoded string" + decoded_string)
print(decoded_string)
# device.empty_cache()
del model
del tokenizer
return decoded_string
# if __name__ == "__main__":
# review_text = "<answer> a fusional language <context> Typologically, Estonian represents a transitional form from an agglutinating language to a fusional language. The canonical word order is SVO (subject–verb–object)."
# t5_supp_inference(review_text, md2, device)
def get_inference(answer, context, model_name):
valuation_text = "<answer> " + answer + " <context> " + context
if model_name == 't5_supp':
return t5_supp_inference(valuation_text)
elif model_name == 't5_full':
return t5_full_inference(valuation_text)
elif model_name == 'bart_supp':
return bart_supp_inference(valuation_text)
elif model_name == 'bart_full':
return bart_full_inference(valuation_text)
elif model_name == 'gpt2':
return get_inference2(answer, context)
| en | 0.783578 | # Transformer version 4.9.1 - Newer versions may not work. # CPU may not work, got to check. # device = torch.device('cpu') # device.empty_cache() # CPU may not work, got to check. # device = torch.device('cpu') # device.empty_cache() # CPU may not work, got to check. # device = torch.device('cpu') # device.empty_cache() # CPU may not work, got to check. # device = torch.device('cpu') # device.empty_cache() # if __name__ == "__main__": # review_text = "<answer> a fusional language <context> Typologically, Estonian represents a transitional form from an agglutinating language to a fusional language. The canonical word order is SVO (subject–verb–object)." # t5_supp_inference(review_text, md2, device) | 2.109083 | 2 |
parlai/agents/drqa/config.py | shagunsodhani/ParlAI | 1 | 7742 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import os
import sys
import logging
def str2bool(v):
return v.lower() in ('yes', 'true', 't', '1', 'y')
def add_cmdline_args(parser):
# Runtime environment
agent = parser.add_argument_group('DrQA Arguments')
agent.add_argument('--no_cuda', type='bool', default=False)
agent.add_argument('--gpu', type=int, default=-1)
agent.add_argument('--random_seed', type=int, default=1013)
# Basics
agent.add_argument('--embedding_file', type=str, default=None,
help='File of space separated embeddings: w e1 ... ed')
agent.add_argument('--pretrained_model', type=str, default=None,
help='Load dict/features/weights/opts from this file')
agent.add_argument('--log_file', type=str, default=None)
# Model details
agent.add_argument('--fix_embeddings', type='bool', default=True)
agent.add_argument('--tune_partial', type=int, default=0,
help='Train the K most frequent word embeddings')
agent.add_argument('--embedding_dim', type=int, default=300,
help=('Default embedding size if '
'embedding_file is not given'))
agent.add_argument('--hidden_size', type=int, default=128,
help='Hidden size of RNN units')
agent.add_argument('--doc_layers', type=int, default=3,
help='Number of RNN layers for passage')
agent.add_argument('--question_layers', type=int, default=3,
help='Number of RNN layers for question')
agent.add_argument('--rnn_type', type=str, default='lstm',
help='RNN type: lstm (default), gru, or rnn')
# Optimization details
agent.add_argument('--valid_metric', type=str,
choices=['accuracy', 'f1'], default='f1',
help='Metric for choosing best valid model')
agent.add_argument('--max_len', type=int, default=15,
help='The max span allowed during decoding')
agent.add_argument('--rnn_padding', type='bool', default=False)
agent.add_argument('--display_iter', type=int, default=10,
help='Print train error after every \
<display_iter> epoches (default 10)')
agent.add_argument('--dropout_emb', type=float, default=0.4,
help='Dropout rate for word embeddings')
agent.add_argument('--dropout_rnn', type=float, default=0.4,
help='Dropout rate for RNN states')
agent.add_argument('--dropout_rnn_output', type='bool', default=True,
help='Whether to dropout the RNN output')
agent.add_argument('--optimizer', type=str, default='adamax',
help='Optimizer: sgd or adamax (default)')
agent.add_argument('--learning_rate', '-lr', type=float, default=0.1,
help='Learning rate for SGD (default 0.1)')
agent.add_argument('--grad_clipping', type=float, default=10,
help='Gradient clipping (default 10.0)')
agent.add_argument('--weight_decay', type=float, default=0,
help='Weight decay (default 0)')
agent.add_argument('--momentum', type=float, default=0,
help='Momentum (default 0)')
# Model-specific
agent.add_argument('--concat_rnn_layers', type='bool', default=True)
agent.add_argument('--question_merge', type=str, default='self_attn',
help='The way of computing question representation')
agent.add_argument('--use_qemb', type='bool', default=True,
help='Whether to use weighted question embeddings')
agent.add_argument('--use_in_question', type='bool', default=True,
help='Whether to use in_question features')
agent.add_argument('--use_tf', type='bool', default=True,
help='Whether to use tf features')
agent.add_argument('--use_time', type=int, default=0,
help='Time features marking how recent word was said')
def set_defaults(opt):
# Embeddings options
if opt.get('embedding_file'):
if not os.path.isfile(opt['embedding_file']):
raise IOError('No such file: %s' % args.embedding_file)
with open(opt['embedding_file']) as f:
dim = len(f.readline().strip().split(' ')) - 1
opt['embedding_dim'] = dim
elif not opt.get('embedding_dim'):
raise RuntimeError(('Either embedding_file or embedding_dim '
'needs to be specified.'))
# Make sure tune_partial and fix_embeddings are consistent
if opt['tune_partial'] > 0 and opt['fix_embeddings']:
print('Setting fix_embeddings to False as tune_partial > 0.')
opt['fix_embeddings'] = False
# Make sure fix_embeddings and embedding_file are consistent
if opt['fix_embeddings']:
if not opt.get('embedding_file') and not opt.get('pretrained_model'):
print('Setting fix_embeddings to False as embeddings are random.')
opt['fix_embeddings'] = False
def override_args(opt, override_opt):
# Major model args are reset to the values in override_opt.
# Non-architecture args (like dropout) are kept.
args = set(['embedding_file', 'embedding_dim', 'hidden_size', 'doc_layers',
'question_layers', 'rnn_type', 'optimizer', 'concat_rnn_layers',
'question_merge', 'use_qemb', 'use_in_question', 'use_tf',
'vocab_size', 'num_features', 'use_time'])
for k, v in override_opt.items():
if k in args:
opt[k] = v
| # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import os
import sys
import logging
def str2bool(v):
return v.lower() in ('yes', 'true', 't', '1', 'y')
def add_cmdline_args(parser):
# Runtime environment
agent = parser.add_argument_group('DrQA Arguments')
agent.add_argument('--no_cuda', type='bool', default=False)
agent.add_argument('--gpu', type=int, default=-1)
agent.add_argument('--random_seed', type=int, default=1013)
# Basics
agent.add_argument('--embedding_file', type=str, default=None,
help='File of space separated embeddings: w e1 ... ed')
agent.add_argument('--pretrained_model', type=str, default=None,
help='Load dict/features/weights/opts from this file')
agent.add_argument('--log_file', type=str, default=None)
# Model details
agent.add_argument('--fix_embeddings', type='bool', default=True)
agent.add_argument('--tune_partial', type=int, default=0,
help='Train the K most frequent word embeddings')
agent.add_argument('--embedding_dim', type=int, default=300,
help=('Default embedding size if '
'embedding_file is not given'))
agent.add_argument('--hidden_size', type=int, default=128,
help='Hidden size of RNN units')
agent.add_argument('--doc_layers', type=int, default=3,
help='Number of RNN layers for passage')
agent.add_argument('--question_layers', type=int, default=3,
help='Number of RNN layers for question')
agent.add_argument('--rnn_type', type=str, default='lstm',
help='RNN type: lstm (default), gru, or rnn')
# Optimization details
agent.add_argument('--valid_metric', type=str,
choices=['accuracy', 'f1'], default='f1',
help='Metric for choosing best valid model')
agent.add_argument('--max_len', type=int, default=15,
help='The max span allowed during decoding')
agent.add_argument('--rnn_padding', type='bool', default=False)
agent.add_argument('--display_iter', type=int, default=10,
help='Print train error after every \
<display_iter> epoches (default 10)')
agent.add_argument('--dropout_emb', type=float, default=0.4,
help='Dropout rate for word embeddings')
agent.add_argument('--dropout_rnn', type=float, default=0.4,
help='Dropout rate for RNN states')
agent.add_argument('--dropout_rnn_output', type='bool', default=True,
help='Whether to dropout the RNN output')
agent.add_argument('--optimizer', type=str, default='adamax',
help='Optimizer: sgd or adamax (default)')
agent.add_argument('--learning_rate', '-lr', type=float, default=0.1,
help='Learning rate for SGD (default 0.1)')
agent.add_argument('--grad_clipping', type=float, default=10,
help='Gradient clipping (default 10.0)')
agent.add_argument('--weight_decay', type=float, default=0,
help='Weight decay (default 0)')
agent.add_argument('--momentum', type=float, default=0,
help='Momentum (default 0)')
# Model-specific
agent.add_argument('--concat_rnn_layers', type='bool', default=True)
agent.add_argument('--question_merge', type=str, default='self_attn',
help='The way of computing question representation')
agent.add_argument('--use_qemb', type='bool', default=True,
help='Whether to use weighted question embeddings')
agent.add_argument('--use_in_question', type='bool', default=True,
help='Whether to use in_question features')
agent.add_argument('--use_tf', type='bool', default=True,
help='Whether to use tf features')
agent.add_argument('--use_time', type=int, default=0,
help='Time features marking how recent word was said')
def set_defaults(opt):
# Embeddings options
if opt.get('embedding_file'):
if not os.path.isfile(opt['embedding_file']):
raise IOError('No such file: %s' % args.embedding_file)
with open(opt['embedding_file']) as f:
dim = len(f.readline().strip().split(' ')) - 1
opt['embedding_dim'] = dim
elif not opt.get('embedding_dim'):
raise RuntimeError(('Either embedding_file or embedding_dim '
'needs to be specified.'))
# Make sure tune_partial and fix_embeddings are consistent
if opt['tune_partial'] > 0 and opt['fix_embeddings']:
print('Setting fix_embeddings to False as tune_partial > 0.')
opt['fix_embeddings'] = False
# Make sure fix_embeddings and embedding_file are consistent
if opt['fix_embeddings']:
if not opt.get('embedding_file') and not opt.get('pretrained_model'):
print('Setting fix_embeddings to False as embeddings are random.')
opt['fix_embeddings'] = False
def override_args(opt, override_opt):
# Major model args are reset to the values in override_opt.
# Non-architecture args (like dropout) are kept.
args = set(['embedding_file', 'embedding_dim', 'hidden_size', 'doc_layers',
'question_layers', 'rnn_type', 'optimizer', 'concat_rnn_layers',
'question_merge', 'use_qemb', 'use_in_question', 'use_tf',
'vocab_size', 'num_features', 'use_time'])
for k, v in override_opt.items():
if k in args:
opt[k] = v
| en | 0.859947 | # Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. # Runtime environment # Basics # Model details # Optimization details # Model-specific # Embeddings options # Make sure tune_partial and fix_embeddings are consistent # Make sure fix_embeddings and embedding_file are consistent # Major model args are reset to the values in override_opt. # Non-architecture args (like dropout) are kept. | 2.096931 | 2 |
gen4service/gen4bean.py | yongli82/CodeGenerator | 0 | 7743 | <filename>gen4service/gen4bean.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
reload(sys)
sys.path.append("..")
sys.setdefaultencoding('utf-8')
from jinja2 import Environment
from jinja2 import Template
import re
from sqlalchemy import schema, types
from sqlalchemy.engine import create_engine
import yyutil
import CodeGen
project_name = "budget"
data_name = "BudgetReport"
table_name = "FC_BudgetBaseInfo"
searchBeanPackage="com.dianping.ba.finance.budget.api.beans"
searchBeanName="BudgetReportSearchBean"
searchBeanField="""
private int budgetTypeId;
private int costDepartmentId;
private String budgetOwnerNo;
private boolean exceedBudget;
private boolean withExpenseType;
private int beginYear;
private int beginMonth;
private int endYear;
private int endMonth;
"""
dataBeanPackage="com.dianping.ba.finance.budget.api.beans"
dataBeanName="BudgetYearReportDisplayBean"
dataBeanField="""
private int budgetYear;
private int budgetTypeId;
private String budgetTypeNo;
private String budgetTypeName;
private int costDepartmentId;
private String costDepartmentName;
private String budgetOwnerNo;
private String budgetOwnerName;
private int budgetStatus;
private String budgetStatusName;
private int budgetPlanId;
private String budgetPlanNo;
private int strategyId;
private int strategyPeriodType;
private String strategyPeriodTypeName;
private BigDecimal yearTotalAmount;
private BigDecimal yearAvailableAmount;
private BigDecimal yearUsedAmount;
private BigDecimal yearFrozenAmount;
private BigDecimal quarterTotalAmount1;
private BigDecimal quarterAvailableAmount1;
private BigDecimal quarterUsedAmount1;
private BigDecimal quarterFrozenAmount1;
private BigDecimal quarterTotalAmount2;
private BigDecimal quarterAvailableAmount2;
private BigDecimal quarterUsedAmount2;
private BigDecimal quarterFrozenAmount2;
private BigDecimal quarterTotalAmount3;
private BigDecimal quarterAvailableAmount3;
private BigDecimal quarterUsedAmount3;
private BigDecimal quarterFrozenAmount3;
private BigDecimal quarterTotalAmount4;
private BigDecimal quarterAvailableAmount4;
private BigDecimal quarterUsedAmount4;
private BigDecimal quarterFrozenAmount4;
private BigDecimal monthTotalAmount1;
private BigDecimal monthAvailableAmount1;
private BigDecimal monthUsedAmount1;
private BigDecimal monthFrozenAmount1;
private BigDecimal monthTotalAmount2;
private BigDecimal monthAvailableAmount2;
private BigDecimal monthUsedAmount2;
private BigDecimal monthFrozenAmount2;
private BigDecimal monthTotalAmount3;
private BigDecimal monthAvailableAmount3;
private BigDecimal monthUsedAmount3;
private BigDecimal monthFrozenAmount3;
private BigDecimal monthTotalAmount4;
private BigDecimal monthAvailableAmount4;
private BigDecimal monthUsedAmount4;
private BigDecimal monthFrozenAmount4;
private BigDecimal monthTotalAmount5;
private BigDecimal monthAvailableAmount5;
private BigDecimal monthUsedAmount5;
private BigDecimal monthFrozenAmount5;
private BigDecimal monthTotalAmount6;
private BigDecimal monthAvailableAmount6;
private BigDecimal monthUsedAmount6;
private BigDecimal monthFrozenAmount6;
private BigDecimal monthTotalAmount7;
private BigDecimal monthAvailableAmount7;
private BigDecimal monthUsedAmount7;
private BigDecimal monthFrozenAmount7;
private BigDecimal monthTotalAmount8;
private BigDecimal monthAvailableAmount8;
private BigDecimal monthUsedAmount8;
private BigDecimal monthFrozenAmount8;
private BigDecimal monthTotalAmount9;
private BigDecimal monthAvailableAmount9;
private BigDecimal monthUsedAmount9;
private BigDecimal monthFrozenAmount9;
private BigDecimal monthTotalAmount10;
private BigDecimal monthAvailableAmount10;
private BigDecimal monthUsedAmount10;
private BigDecimal monthFrozenAmount10;
private BigDecimal monthTotalAmount11;
private BigDecimal monthAvailableAmount11;
private BigDecimal monthUsedAmount11;
private BigDecimal monthFrozenAmount11;
private BigDecimal monthTotalAmount12;
private BigDecimal monthAvailableAmount12;
private BigDecimal monthUsedAmount12;
private BigDecimal monthFrozenAmount12;
"""
columns = yyutil.convert_bean_to_columns(dataBeanField)
search_columns = yyutil.convert_bean_to_columns(searchBeanField)
jinja2_env = CodeGen.getEnvironment("gen4service")
template = jinja2_env.get_template("bean_code_template.md")
#snippet = template.render(table_name=table_name, data_name=data_name, columns=columns)
snippet = template.render(locals())
print snippet
with open(data_name + "_generate.md", 'wb') as f:
f.write(snippet)
f.flush()
f.close()
os.system("open " + data_name + "_generate.md")
| <filename>gen4service/gen4bean.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
reload(sys)
sys.path.append("..")
sys.setdefaultencoding('utf-8')
from jinja2 import Environment
from jinja2 import Template
import re
from sqlalchemy import schema, types
from sqlalchemy.engine import create_engine
import yyutil
import CodeGen
project_name = "budget"
data_name = "BudgetReport"
table_name = "FC_BudgetBaseInfo"
searchBeanPackage="com.dianping.ba.finance.budget.api.beans"
searchBeanName="BudgetReportSearchBean"
searchBeanField="""
private int budgetTypeId;
private int costDepartmentId;
private String budgetOwnerNo;
private boolean exceedBudget;
private boolean withExpenseType;
private int beginYear;
private int beginMonth;
private int endYear;
private int endMonth;
"""
dataBeanPackage="com.dianping.ba.finance.budget.api.beans"
dataBeanName="BudgetYearReportDisplayBean"
dataBeanField="""
private int budgetYear;
private int budgetTypeId;
private String budgetTypeNo;
private String budgetTypeName;
private int costDepartmentId;
private String costDepartmentName;
private String budgetOwnerNo;
private String budgetOwnerName;
private int budgetStatus;
private String budgetStatusName;
private int budgetPlanId;
private String budgetPlanNo;
private int strategyId;
private int strategyPeriodType;
private String strategyPeriodTypeName;
private BigDecimal yearTotalAmount;
private BigDecimal yearAvailableAmount;
private BigDecimal yearUsedAmount;
private BigDecimal yearFrozenAmount;
private BigDecimal quarterTotalAmount1;
private BigDecimal quarterAvailableAmount1;
private BigDecimal quarterUsedAmount1;
private BigDecimal quarterFrozenAmount1;
private BigDecimal quarterTotalAmount2;
private BigDecimal quarterAvailableAmount2;
private BigDecimal quarterUsedAmount2;
private BigDecimal quarterFrozenAmount2;
private BigDecimal quarterTotalAmount3;
private BigDecimal quarterAvailableAmount3;
private BigDecimal quarterUsedAmount3;
private BigDecimal quarterFrozenAmount3;
private BigDecimal quarterTotalAmount4;
private BigDecimal quarterAvailableAmount4;
private BigDecimal quarterUsedAmount4;
private BigDecimal quarterFrozenAmount4;
private BigDecimal monthTotalAmount1;
private BigDecimal monthAvailableAmount1;
private BigDecimal monthUsedAmount1;
private BigDecimal monthFrozenAmount1;
private BigDecimal monthTotalAmount2;
private BigDecimal monthAvailableAmount2;
private BigDecimal monthUsedAmount2;
private BigDecimal monthFrozenAmount2;
private BigDecimal monthTotalAmount3;
private BigDecimal monthAvailableAmount3;
private BigDecimal monthUsedAmount3;
private BigDecimal monthFrozenAmount3;
private BigDecimal monthTotalAmount4;
private BigDecimal monthAvailableAmount4;
private BigDecimal monthUsedAmount4;
private BigDecimal monthFrozenAmount4;
private BigDecimal monthTotalAmount5;
private BigDecimal monthAvailableAmount5;
private BigDecimal monthUsedAmount5;
private BigDecimal monthFrozenAmount5;
private BigDecimal monthTotalAmount6;
private BigDecimal monthAvailableAmount6;
private BigDecimal monthUsedAmount6;
private BigDecimal monthFrozenAmount6;
private BigDecimal monthTotalAmount7;
private BigDecimal monthAvailableAmount7;
private BigDecimal monthUsedAmount7;
private BigDecimal monthFrozenAmount7;
private BigDecimal monthTotalAmount8;
private BigDecimal monthAvailableAmount8;
private BigDecimal monthUsedAmount8;
private BigDecimal monthFrozenAmount8;
private BigDecimal monthTotalAmount9;
private BigDecimal monthAvailableAmount9;
private BigDecimal monthUsedAmount9;
private BigDecimal monthFrozenAmount9;
private BigDecimal monthTotalAmount10;
private BigDecimal monthAvailableAmount10;
private BigDecimal monthUsedAmount10;
private BigDecimal monthFrozenAmount10;
private BigDecimal monthTotalAmount11;
private BigDecimal monthAvailableAmount11;
private BigDecimal monthUsedAmount11;
private BigDecimal monthFrozenAmount11;
private BigDecimal monthTotalAmount12;
private BigDecimal monthAvailableAmount12;
private BigDecimal monthUsedAmount12;
private BigDecimal monthFrozenAmount12;
"""
columns = yyutil.convert_bean_to_columns(dataBeanField)
search_columns = yyutil.convert_bean_to_columns(searchBeanField)
jinja2_env = CodeGen.getEnvironment("gen4service")
template = jinja2_env.get_template("bean_code_template.md")
#snippet = template.render(table_name=table_name, data_name=data_name, columns=columns)
snippet = template.render(locals())
print snippet
with open(data_name + "_generate.md", 'wb') as f:
f.write(snippet)
f.flush()
f.close()
os.system("open " + data_name + "_generate.md")
| en | 0.442358 | #!/usr/bin/python # -*- coding: utf-8 -*- private int budgetTypeId; private int costDepartmentId; private String budgetOwnerNo; private boolean exceedBudget; private boolean withExpenseType; private int beginYear; private int beginMonth; private int endYear; private int endMonth; private int budgetYear; private int budgetTypeId; private String budgetTypeNo; private String budgetTypeName; private int costDepartmentId; private String costDepartmentName; private String budgetOwnerNo; private String budgetOwnerName; private int budgetStatus; private String budgetStatusName; private int budgetPlanId; private String budgetPlanNo; private int strategyId; private int strategyPeriodType; private String strategyPeriodTypeName; private BigDecimal yearTotalAmount; private BigDecimal yearAvailableAmount; private BigDecimal yearUsedAmount; private BigDecimal yearFrozenAmount; private BigDecimal quarterTotalAmount1; private BigDecimal quarterAvailableAmount1; private BigDecimal quarterUsedAmount1; private BigDecimal quarterFrozenAmount1; private BigDecimal quarterTotalAmount2; private BigDecimal quarterAvailableAmount2; private BigDecimal quarterUsedAmount2; private BigDecimal quarterFrozenAmount2; private BigDecimal quarterTotalAmount3; private BigDecimal quarterAvailableAmount3; private BigDecimal quarterUsedAmount3; private BigDecimal quarterFrozenAmount3; private BigDecimal quarterTotalAmount4; private BigDecimal quarterAvailableAmount4; private BigDecimal quarterUsedAmount4; private BigDecimal quarterFrozenAmount4; private BigDecimal monthTotalAmount1; private BigDecimal monthAvailableAmount1; private BigDecimal monthUsedAmount1; private BigDecimal monthFrozenAmount1; private BigDecimal monthTotalAmount2; private BigDecimal monthAvailableAmount2; private BigDecimal monthUsedAmount2; private BigDecimal monthFrozenAmount2; private BigDecimal monthTotalAmount3; private BigDecimal monthAvailableAmount3; private BigDecimal monthUsedAmount3; private BigDecimal monthFrozenAmount3; private BigDecimal monthTotalAmount4; private BigDecimal monthAvailableAmount4; private BigDecimal monthUsedAmount4; private BigDecimal monthFrozenAmount4; private BigDecimal monthTotalAmount5; private BigDecimal monthAvailableAmount5; private BigDecimal monthUsedAmount5; private BigDecimal monthFrozenAmount5; private BigDecimal monthTotalAmount6; private BigDecimal monthAvailableAmount6; private BigDecimal monthUsedAmount6; private BigDecimal monthFrozenAmount6; private BigDecimal monthTotalAmount7; private BigDecimal monthAvailableAmount7; private BigDecimal monthUsedAmount7; private BigDecimal monthFrozenAmount7; private BigDecimal monthTotalAmount8; private BigDecimal monthAvailableAmount8; private BigDecimal monthUsedAmount8; private BigDecimal monthFrozenAmount8; private BigDecimal monthTotalAmount9; private BigDecimal monthAvailableAmount9; private BigDecimal monthUsedAmount9; private BigDecimal monthFrozenAmount9; private BigDecimal monthTotalAmount10; private BigDecimal monthAvailableAmount10; private BigDecimal monthUsedAmount10; private BigDecimal monthFrozenAmount10; private BigDecimal monthTotalAmount11; private BigDecimal monthAvailableAmount11; private BigDecimal monthUsedAmount11; private BigDecimal monthFrozenAmount11; private BigDecimal monthTotalAmount12; private BigDecimal monthAvailableAmount12; private BigDecimal monthUsedAmount12; private BigDecimal monthFrozenAmount12; #snippet = template.render(table_name=table_name, data_name=data_name, columns=columns) | 1.944962 | 2 |
Log_tao.py | zigzax/Basic_Python | 0 | 7744 | <gh_stars>0
Python 3.9.0 (tags/v3.9.0:9cf6752, Oct 5 2020, 15:34:40) [MSC v.1927 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> import turtle
>>> tao = turtle.Turtle()
>>> tao.shape('turtle')
>>> tao.forward(100)
>>> tao.left(90)
>>> tao.forward(100)
>>> tao.left(90)
>>> tao.forward(100)
>>> tao.left(90)
>>> tao.forward(100)
>>> tao.left(90)
>>> tao.reset
<bound method RawTurtle.reset of <turtle.Turtle object at 0x000001F98553ECA0>>
>>> tao.reset()
>>> for i in range(4)
SyntaxError: invalid syntax
>>> for i in range(4):
tao.forward(100)tao.left(90)
SyntaxError: invalid syntax
>>> for i in range(4):
tao.forward(100)
tao.left(90)
>>> range (4)
range(0, 4)
>>> list (range(4))
[0, 1, 2, 3]
>>> for i in range(5)
SyntaxError: invalid syntax
>>> for i in range(5):
print(i)
0
1
2
3
4
\
>>> for i in range(5):
print(i)
0
1
2
3
4
>>> for i in range[10,50,90]:
print(i)
Traceback (most recent call last):
File "<pyshell#28>", line 1, in <module>
for i in range[10,50,90]:
TypeError: 'type' object is not subscriptable
>>> for i in[10,50,90]:
print(i)
10
50
90
>>> range (1,10)
range(1, 10)
>>> list (range(1,10))
[1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> tao.reset()
>>> for i in range (4):
tao.forward(100)
tao.left(90)
print('No.',i)
No. 0
No. 1
No. 2
No. 3
>>> tao.reset
<bound method RawTurtle.reset of <turtle.Turtle object at 0x000001F98553ECA0>>
>>> tao.reset()
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> for i in range (8):
tao.forward(100)
tao.left(45)
print('No.',i)
No. 0
No. 1
No. 2
No. 3
No. 4
No. 5
No. 6
No. 7
>>> tao.reset()
>>> for i in range (8):
tao.forward(100)
tao.left(45)
print('No.',i)
No. 0
No. 1
No. 2
No. 3
No. 4
No. 5
No. 6
No. 7
>>> tao.reset()
>>> def regtangle():
for i in range(4):
tao.forward(100)
tao.left(90)
>>> regtangle()
>>> tao.reset()
>>> for i in range(10):
regtangle()
tao.left(36)
>>> tao.reset()
>>> | Python 3.9.0 (tags/v3.9.0:9cf6752, Oct 5 2020, 15:34:40) [MSC v.1927 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> import turtle
>>> tao = turtle.Turtle()
>>> tao.shape('turtle')
>>> tao.forward(100)
>>> tao.left(90)
>>> tao.forward(100)
>>> tao.left(90)
>>> tao.forward(100)
>>> tao.left(90)
>>> tao.forward(100)
>>> tao.left(90)
>>> tao.reset
<bound method RawTurtle.reset of <turtle.Turtle object at 0x000001F98553ECA0>>
>>> tao.reset()
>>> for i in range(4)
SyntaxError: invalid syntax
>>> for i in range(4):
tao.forward(100)tao.left(90)
SyntaxError: invalid syntax
>>> for i in range(4):
tao.forward(100)
tao.left(90)
>>> range (4)
range(0, 4)
>>> list (range(4))
[0, 1, 2, 3]
>>> for i in range(5)
SyntaxError: invalid syntax
>>> for i in range(5):
print(i)
0
1
2
3
4
\
>>> for i in range(5):
print(i)
0
1
2
3
4
>>> for i in range[10,50,90]:
print(i)
Traceback (most recent call last):
File "<pyshell#28>", line 1, in <module>
for i in range[10,50,90]:
TypeError: 'type' object is not subscriptable
>>> for i in[10,50,90]:
print(i)
10
50
90
>>> range (1,10)
range(1, 10)
>>> list (range(1,10))
[1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> tao.reset()
>>> for i in range (4):
tao.forward(100)
tao.left(90)
print('No.',i)
No. 0
No. 1
No. 2
No. 3
>>> tao.reset
<bound method RawTurtle.reset of <turtle.Turtle object at 0x000001F98553ECA0>>
>>> tao.reset()
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> for i in range (8):
tao.forward(100)
tao.left(45)
print('No.',i)
No. 0
No. 1
No. 2
No. 3
No. 4
No. 5
No. 6
No. 7
>>> tao.reset()
>>> for i in range (8):
tao.forward(100)
tao.left(45)
print('No.',i)
No. 0
No. 1
No. 2
No. 3
No. 4
No. 5
No. 6
No. 7
>>> tao.reset()
>>> def regtangle():
for i in range(4):
tao.forward(100)
tao.left(90)
>>> regtangle()
>>> tao.reset()
>>> for i in range(10):
regtangle()
tao.left(36)
>>> tao.reset()
>>> | en | 0.547958 | #28>", line 1, in <module> | 3.64823 | 4 |
run.py | pome-ta/CodeMirror | 0 | 7745 | """
Pythonista3 app CodeMirror
"""
import pythonista.wkwebview as wkwebview
import ui
import pathlib
uri = pathlib.Path('./main_index.html')
class View(ui.View):
def __init__(self):
self.wv = wkwebview.WKWebView(flex='WH')
self.wv.load_url(str(uri))
self.add_subview(self.wv)
def will_close(self):
self.wv.clear_cache()
_view = View()
_view.present(style='fullscreen', orientations=['portrait'])
| """
Pythonista3 app CodeMirror
"""
import pythonista.wkwebview as wkwebview
import ui
import pathlib
uri = pathlib.Path('./main_index.html')
class View(ui.View):
def __init__(self):
self.wv = wkwebview.WKWebView(flex='WH')
self.wv.load_url(str(uri))
self.add_subview(self.wv)
def will_close(self):
self.wv.clear_cache()
_view = View()
_view.present(style='fullscreen', orientations=['portrait'])
| en | 0.221032 | Pythonista3 app CodeMirror | 2.324605 | 2 |
gen_cnn_dataset.py | NPCai/graphene-py | 5 | 7746 | <gh_stars>1-10
import wrapper as w
from multiprocessing import Process
import atexit
import time
from queue import Queue
''' 8 Processes, 24 threads per process = 192 threads '''
NUM_PROCESSES = 8
workerList = [] # Worker processes
class Worker(Process): # Need multiple threads or else it takes forever
def __init__(self, queue): # filNum is the id of the file to extract from
super().__init__()
self.queue = queue
self.outQueue = Queue()
def run(self):
with concurrent.futures.ThreadPoolExecutor(max_workers=24) as executor:
executor.submit(loadUrl())
def loadUrl():
while not self.queue.empty():
sentence = self.queue.get()
ex = w.GrapheneExtract(sentence)
self.outQueue.put(sentence.strip() + "\t" + str(ex.json) + "\n")
queues = [] # Use seperate queues to avoid waiting for locks
with open("data/all_news.txt", "r") as news:
for line in news[::len(news) / NUM_PROCESSES]:
queue = Queue()
queue.put(line.strip())
print("Queue populated")
for i in range(NUM_PROCESSES):
worker = Worker(queues[i])
worker.daemon = True
worker.start()
workerList.append(worker)
def close_running_threads():
for thread in workerList:
thread.join()
atexit.register(close_running_threads)
print("All threads registered and working.")
while True:
print(queue.qsize() " sentences remaining to be requested")
time.sleep(2) # Print every two seconds | import wrapper as w
from multiprocessing import Process
import atexit
import time
from queue import Queue
''' 8 Processes, 24 threads per process = 192 threads '''
NUM_PROCESSES = 8
workerList = [] # Worker processes
class Worker(Process): # Need multiple threads or else it takes forever
def __init__(self, queue): # filNum is the id of the file to extract from
super().__init__()
self.queue = queue
self.outQueue = Queue()
def run(self):
with concurrent.futures.ThreadPoolExecutor(max_workers=24) as executor:
executor.submit(loadUrl())
def loadUrl():
while not self.queue.empty():
sentence = self.queue.get()
ex = w.GrapheneExtract(sentence)
self.outQueue.put(sentence.strip() + "\t" + str(ex.json) + "\n")
queues = [] # Use seperate queues to avoid waiting for locks
with open("data/all_news.txt", "r") as news:
for line in news[::len(news) / NUM_PROCESSES]:
queue = Queue()
queue.put(line.strip())
print("Queue populated")
for i in range(NUM_PROCESSES):
worker = Worker(queues[i])
worker.daemon = True
worker.start()
workerList.append(worker)
def close_running_threads():
for thread in workerList:
thread.join()
atexit.register(close_running_threads)
print("All threads registered and working.")
while True:
print(queue.qsize() " sentences remaining to be requested")
time.sleep(2) # Print every two seconds | en | 0.834792 | 8 Processes, 24 threads per process = 192 threads # Worker processes # Need multiple threads or else it takes forever # filNum is the id of the file to extract from # Use seperate queues to avoid waiting for locks # Print every two seconds | 3.056899 | 3 |
torch/_prims/context.py | EikanWang/pytorch | 0 | 7747 | <filename>torch/_prims/context.py
from typing import Callable, Sequence, Any, Dict
import functools
import torch
import torch.overrides
from torch._prims.utils import torch_function_passthrough
import torch._refs as refs
import torch._refs
import torch._refs.nn
import torch._refs.nn.functional
import torch._refs.special
import torch._prims
# TODO: automap torch operations to references
# (need to throw a good assertion if the mapping doesn't exist)
_torch_to_reference_map = {
torch.add: refs.add,
# torch.div: refs.div,
torch.mul: refs.mul,
torch.ge: refs.ge,
torch.gt: refs.gt,
torch.le: refs.le,
torch.lt: refs.lt,
}
@functools.lru_cache(None)
def torch_to_refs_map():
"""
Mapping of torch API functions to torch._refs functions.
E.g. torch_to_refs_map()[torch.add] == torch._refs.add
"""
modules = [
(torch, torch._refs),
(torch.nn, torch._refs.nn),
(torch.nn.functional, torch._refs.nn.functional),
(torch.special, torch._refs.special),
]
r = {}
for mod_torch, mod_refs in modules:
for s in mod_refs.__all__: # type: ignore[attr-defined]
r[mod_torch.__dict__.get(s)] = mod_refs.__dict__.get(s)
return r
@functools.lru_cache(None)
def all_prims():
"""
Set of all prim functions, e.g., torch._prims.add in all_prims()
"""
return {torch._prims.__dict__.get(s) for s in torch._prims.__all__}
class TorchRefsMode(torch.overrides.TorchFunctionMode):
"""
Switches the interpretation of torch.* functions and Tensor methods to
use PrimTorch refs in torch._refs. (Direct calls to _refs are unaffected.)
>>> with TorchRefsMode.push():
... torch.add(x, y) # calls torch._refs.add(x, y)
By default, this context manager will fall back on the torch.* if the
ref does not exist; set strict=True to error if this occurs.
"""
def __init__(self, strict=False):
self.strict = strict
def __torch_function__(
self,
orig_func: Callable,
types: Sequence,
args: Sequence[Any] = (),
kwargs: Dict = None,
):
if kwargs is None:
kwargs = {}
# For primitive operations, run them as is without interception
if orig_func in torch_function_passthrough or orig_func in all_prims():
return orig_func(*args, **kwargs)
mapping = torch_to_refs_map()
func = mapping.get(orig_func, None)
if func is not None:
return func(*args, **kwargs)
if self.strict:
raise RuntimeError(
f"no _refs support for {torch.overrides.resolve_name(orig_func)}"
)
return orig_func(*args, **kwargs)
| <filename>torch/_prims/context.py
from typing import Callable, Sequence, Any, Dict
import functools
import torch
import torch.overrides
from torch._prims.utils import torch_function_passthrough
import torch._refs as refs
import torch._refs
import torch._refs.nn
import torch._refs.nn.functional
import torch._refs.special
import torch._prims
# TODO: automap torch operations to references
# (need to throw a good assertion if the mapping doesn't exist)
_torch_to_reference_map = {
torch.add: refs.add,
# torch.div: refs.div,
torch.mul: refs.mul,
torch.ge: refs.ge,
torch.gt: refs.gt,
torch.le: refs.le,
torch.lt: refs.lt,
}
@functools.lru_cache(None)
def torch_to_refs_map():
"""
Mapping of torch API functions to torch._refs functions.
E.g. torch_to_refs_map()[torch.add] == torch._refs.add
"""
modules = [
(torch, torch._refs),
(torch.nn, torch._refs.nn),
(torch.nn.functional, torch._refs.nn.functional),
(torch.special, torch._refs.special),
]
r = {}
for mod_torch, mod_refs in modules:
for s in mod_refs.__all__: # type: ignore[attr-defined]
r[mod_torch.__dict__.get(s)] = mod_refs.__dict__.get(s)
return r
@functools.lru_cache(None)
def all_prims():
"""
Set of all prim functions, e.g., torch._prims.add in all_prims()
"""
return {torch._prims.__dict__.get(s) for s in torch._prims.__all__}
class TorchRefsMode(torch.overrides.TorchFunctionMode):
"""
Switches the interpretation of torch.* functions and Tensor methods to
use PrimTorch refs in torch._refs. (Direct calls to _refs are unaffected.)
>>> with TorchRefsMode.push():
... torch.add(x, y) # calls torch._refs.add(x, y)
By default, this context manager will fall back on the torch.* if the
ref does not exist; set strict=True to error if this occurs.
"""
def __init__(self, strict=False):
self.strict = strict
def __torch_function__(
self,
orig_func: Callable,
types: Sequence,
args: Sequence[Any] = (),
kwargs: Dict = None,
):
if kwargs is None:
kwargs = {}
# For primitive operations, run them as is without interception
if orig_func in torch_function_passthrough or orig_func in all_prims():
return orig_func(*args, **kwargs)
mapping = torch_to_refs_map()
func = mapping.get(orig_func, None)
if func is not None:
return func(*args, **kwargs)
if self.strict:
raise RuntimeError(
f"no _refs support for {torch.overrides.resolve_name(orig_func)}"
)
return orig_func(*args, **kwargs)
| en | 0.732976 | # TODO: automap torch operations to references # (need to throw a good assertion if the mapping doesn't exist) # torch.div: refs.div, Mapping of torch API functions to torch._refs functions. E.g. torch_to_refs_map()[torch.add] == torch._refs.add # type: ignore[attr-defined] Set of all prim functions, e.g., torch._prims.add in all_prims() Switches the interpretation of torch.* functions and Tensor methods to use PrimTorch refs in torch._refs. (Direct calls to _refs are unaffected.) >>> with TorchRefsMode.push(): ... torch.add(x, y) # calls torch._refs.add(x, y) By default, this context manager will fall back on the torch.* if the ref does not exist; set strict=True to error if this occurs. # For primitive operations, run them as is without interception | 2.346505 | 2 |
search/tests/test_read_similarities.py | cotsog/pathways-backend | 0 | 7748 | from django.test import TestCase
from search.read_similarities import build_manual_similarity_map
from common.testhelpers.random_test_values import a_string, a_float
class TestReadingManualTaskSimilarities(TestCase):
def test_convert_matrix_to_map_from_topic_to_array_of_services(self):
data = [
['topic1', 'topic2'],
['service1', 'service2'],
]
expected_result = {
'topic1': ['service1'],
'topic2': ['service2'],
}
result = build_manual_similarity_map(data)
self.assertEqual(result, expected_result)
def test_can_handle_multiple_services_for_a_topic(self):
data = [
['topic1', ],
['service1'],
['service2'],
['service3'],
]
expected_result = {
'topic1': ['service1', 'service2', 'service3'],
}
result = build_manual_similarity_map(data)
self.assertEqual(result, expected_result)
def test_can_handle_different_numbers_of_services_for_different_topics(self):
data = [
['topic1', 'topic2'],
['service1', 'service2'],
['service3'],
]
expected_result = {
'topic1': ['service1', 'service3'],
'topic2': ['service2'],
}
result = build_manual_similarity_map(data)
self.assertEqual(result, expected_result)
def test_can_handle_empty_entries(self):
data = [
['topic1', 'topic2'],
['service1', 'service2'],
['', 'service3'],
[None, 'service4'],
]
expected_result = {
'topic1': ['service1'],
'topic2': ['service2', 'service3', 'service4'],
}
result = build_manual_similarity_map(data)
self.assertEqual(result, expected_result)
| from django.test import TestCase
from search.read_similarities import build_manual_similarity_map
from common.testhelpers.random_test_values import a_string, a_float
class TestReadingManualTaskSimilarities(TestCase):
def test_convert_matrix_to_map_from_topic_to_array_of_services(self):
data = [
['topic1', 'topic2'],
['service1', 'service2'],
]
expected_result = {
'topic1': ['service1'],
'topic2': ['service2'],
}
result = build_manual_similarity_map(data)
self.assertEqual(result, expected_result)
def test_can_handle_multiple_services_for_a_topic(self):
data = [
['topic1', ],
['service1'],
['service2'],
['service3'],
]
expected_result = {
'topic1': ['service1', 'service2', 'service3'],
}
result = build_manual_similarity_map(data)
self.assertEqual(result, expected_result)
def test_can_handle_different_numbers_of_services_for_different_topics(self):
data = [
['topic1', 'topic2'],
['service1', 'service2'],
['service3'],
]
expected_result = {
'topic1': ['service1', 'service3'],
'topic2': ['service2'],
}
result = build_manual_similarity_map(data)
self.assertEqual(result, expected_result)
def test_can_handle_empty_entries(self):
data = [
['topic1', 'topic2'],
['service1', 'service2'],
['', 'service3'],
[None, 'service4'],
]
expected_result = {
'topic1': ['service1'],
'topic2': ['service2', 'service3', 'service4'],
}
result = build_manual_similarity_map(data)
self.assertEqual(result, expected_result)
| none | 1 | 2.70937 | 3 |
|
fortuna/fortuna.py | Zabamund/HackCPH18 | 3 | 7749 | """
Fortuna
Python project to visualize uncertatinty in probabilistic exploration models.
Created on 09/06/2018
@authors: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
"""
# Import libraries
import numpy as np
import glob
from matplotlib import pyplot as plt
import pandas as pd
import xarray as xr
import pyproj as proj
from scipy.stats import norm
class Fortuna(object):
"""
Class to load the fortuna dataset and call different methods for visualization in a web frontend.
Args:
There are no required arguments at the moment. Input files could be defined.
"""
def __init__(self, **kwargs):
"""
Method that is called when a object of the class Fortuna is initiated, it imports the data and directly creates some important variables.
"""
# hardcode geometry
self.size_raster = (250,162)
self.X_corner = 390885
self.Y_corner = 7156947
self.dx, self.dy, self.dz = 25, 25, 100
self.top_model = 950
self.bottom_model = 1050
self.base_cube = None
self.top_cube = None
self.base_n = None
self.top_n = None
self.vol = None
# Create empty xarray dataset
self.ds = xr.Dataset()
self.xx = None
self.yy = None
self.zz = None
self.model = None
self.base_mean = None
self.base_std = None
self.top_mean = None
self.top_std = None
## Initial methods to load
self.import_data()
self.calc_xarray()
self.calc_stat()
### Methods for initiating the object
def folder2cube(self, files):
"""
Method to read a file.
"""
base_set = glob.glob(files)
cube = np.zeros(self.size_raster + (len(base_set),))
for i, model in enumerate(base_set):
cube[:, :, i] = np.loadtxt(model, skiprows=1).reshape(self.size_raster)
return cube, len(base_set)
def import_data(self):
"""
Method to load different data objects from files.
"""
self.base_cube, self.base_n = self.folder2cube('data/Hackaton/BaseSet/MapSimu__*.data')
self.top_cube, self.top_n = self.folder2cube('data/Hackaton/TopSet/MapSimu__*.data')
self.vol = pd.read_csv('data/Hackaton/VolumeDistribution/Volumes', delim_whitespace=True)
def calc_xarray (self):
self.xx = np.linspace(self.X_corner, self.X_corner + self.size_raster[0] * self.dx, self.size_raster[0])
self.yy = np.linspace(self.Y_corner, self.Y_corner + self.size_raster[1] * self.dy, self.size_raster[1])
self.zz = np.linspace(self.top_model, self.bottom_model, self.dz)
self.model = np.linspace(0, self.top_model, self.base_n)
self.ds.coords['X'] = self.xx
self.ds.coords['Y'] = self.yy
self.ds.coords['Z'] = self.zz
self.ds.coords['MODEL'] = self.model
self.ds['BASE'] = (('X', 'Y', 'MODEL'), self.base_cube)
self.ds['TOP'] = (('X', 'Y', 'MODEL'), self.top_cube)
def calc_stat (self):
self.base_mean = self.ds['BASE'].mean(dim='MODEL')
self.base_std = self.ds['BASE'].std(dim='MODEL')
self.top_mean = self.ds['TOP'].mean(dim='MODEL')
self.top_std = self.ds['TOP'].std(dim='MODEL')
## Data Management methods
def load_pickle(self, path):
return np.load(path)
## Methods to compute different uncertatinty cubes --> cubes to be displayed in the frontend
def calc_lithology(self, iterations = 2):
"""
Sample from both distributions and fill each z-stack accordingly
"""
# create empty array
block = np.zeros((iterations, self.size_raster[0], self.size_raster[1], self.zz.size), dtype='int8')
for i in range(iterations):
for j in range(self.size_raster[0]): # size_raster[0]
for k in range(self.size_raster[1]):
# sample from top and base distributions for specific x,y position
top = np.random.normal(self.top_mean[j, k], self.top_std[j, k])
base = np.random.normal(self.base_mean[j, k], self.base_std[j, k])
# iterate over vertical z-stack
for l in range(self.zz.size):
if self.zz[l] <= top:
block[i, j, k, l] = 1
elif self.zz[l] > base:
block[i, j, k, l] = 3
elif ((self.zz[l] > top) and (l <= base)):
block[i, j, k, l] = 2
return block
def calc_lithology_vect(self, iterations=2):
"""
Resample from z value statistics and fill each z-stack in a lithology block accordingly.
This is the new method with vectorized operations to speed up calculations.
"""
# create empty array
block = np.zeros((iterations, self.xx.size, self.yy.size, self.zz.size), dtype='int8')
for i in range(iterations):
# create meshgrids grid for coordinate-wise iterations
mesh_x, mesh_y, mesh_z = np.meshgrid(np.arange(self.xx.size),
np.arange(self.yy.size),
np.arange(self.zz.size))
# sample from top and base distributions for specific x,y position
top = np.zeros([self.xx.size, self.yy.size])
base = np.zeros([self.xx.size, self.yy.size])
top[mesh_x, mesh_y] = np.random.normal(self.top_mean.values[mesh_x, mesh_y],
self.top_std.values[mesh_x, mesh_y])
base[mesh_x, mesh_y] = np.random.normal(self.top_mean.values[mesh_x, mesh_y],
self.top_std.values[mesh_x, mesh_y])
# compare each cell to resampled reference values
# TODO generalize for any number of lithologies
block[i, mesh_x, mesh_y, mesh_z] = np.where(self.zz < top[mesh_x, mesh_y], 1,
np.where(self.zz < base[mesh_x, mesh_y], 2, 3))
return block
### Modifyed from GemPy!
def calc_probability_lithology(self, cube):
"""Blocks must be just the lith blocks!"""
lith_blocks = cube.reshape([cube.shape[0], (self.xx.size * self.yy.size * self.zz.size)])
lith_id = np.unique(lith_blocks)
# lith_count = np.zeros_like(lith_blocks[0:len(lith_id)])
lith_count = np.zeros((len(np.unique(lith_blocks)), lith_blocks.shape[1]))
for i, l_id in enumerate(lith_id):
lith_count[i] = np.sum(lith_blocks == l_id, axis=0)
lith_prob = lith_count / len(lith_blocks)
return lith_prob
### Modyfied from GemPy!
def calc_information_entropy(self, lith_prob):
"""Calculates information entropy for the given probability array."""
cube = np.zeros_like(lith_prob[0])
for l in lith_prob:
pm = np.ma.masked_equal(l, 0) # mask where layer prob is 0
cube -= (pm * np.ma.log2(pm)).filled(0)
return cube.reshape([self.xx.size, self.yy.size, self.zz.size])
# Try numpy.flatten and numpy.ravel
## Simple plotting methods
def plot_entropy(self, cube, slice=10):
plt.imshow(cube[slice, :, :].T, origin='upperleft', cmap='viridis')
plt.show() | """
Fortuna
Python project to visualize uncertatinty in probabilistic exploration models.
Created on 09/06/2018
@authors: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
"""
# Import libraries
import numpy as np
import glob
from matplotlib import pyplot as plt
import pandas as pd
import xarray as xr
import pyproj as proj
from scipy.stats import norm
class Fortuna(object):
"""
Class to load the fortuna dataset and call different methods for visualization in a web frontend.
Args:
There are no required arguments at the moment. Input files could be defined.
"""
def __init__(self, **kwargs):
"""
Method that is called when a object of the class Fortuna is initiated, it imports the data and directly creates some important variables.
"""
# hardcode geometry
self.size_raster = (250,162)
self.X_corner = 390885
self.Y_corner = 7156947
self.dx, self.dy, self.dz = 25, 25, 100
self.top_model = 950
self.bottom_model = 1050
self.base_cube = None
self.top_cube = None
self.base_n = None
self.top_n = None
self.vol = None
# Create empty xarray dataset
self.ds = xr.Dataset()
self.xx = None
self.yy = None
self.zz = None
self.model = None
self.base_mean = None
self.base_std = None
self.top_mean = None
self.top_std = None
## Initial methods to load
self.import_data()
self.calc_xarray()
self.calc_stat()
### Methods for initiating the object
def folder2cube(self, files):
"""
Method to read a file.
"""
base_set = glob.glob(files)
cube = np.zeros(self.size_raster + (len(base_set),))
for i, model in enumerate(base_set):
cube[:, :, i] = np.loadtxt(model, skiprows=1).reshape(self.size_raster)
return cube, len(base_set)
def import_data(self):
"""
Method to load different data objects from files.
"""
self.base_cube, self.base_n = self.folder2cube('data/Hackaton/BaseSet/MapSimu__*.data')
self.top_cube, self.top_n = self.folder2cube('data/Hackaton/TopSet/MapSimu__*.data')
self.vol = pd.read_csv('data/Hackaton/VolumeDistribution/Volumes', delim_whitespace=True)
def calc_xarray (self):
self.xx = np.linspace(self.X_corner, self.X_corner + self.size_raster[0] * self.dx, self.size_raster[0])
self.yy = np.linspace(self.Y_corner, self.Y_corner + self.size_raster[1] * self.dy, self.size_raster[1])
self.zz = np.linspace(self.top_model, self.bottom_model, self.dz)
self.model = np.linspace(0, self.top_model, self.base_n)
self.ds.coords['X'] = self.xx
self.ds.coords['Y'] = self.yy
self.ds.coords['Z'] = self.zz
self.ds.coords['MODEL'] = self.model
self.ds['BASE'] = (('X', 'Y', 'MODEL'), self.base_cube)
self.ds['TOP'] = (('X', 'Y', 'MODEL'), self.top_cube)
def calc_stat (self):
self.base_mean = self.ds['BASE'].mean(dim='MODEL')
self.base_std = self.ds['BASE'].std(dim='MODEL')
self.top_mean = self.ds['TOP'].mean(dim='MODEL')
self.top_std = self.ds['TOP'].std(dim='MODEL')
## Data Management methods
def load_pickle(self, path):
return np.load(path)
## Methods to compute different uncertatinty cubes --> cubes to be displayed in the frontend
def calc_lithology(self, iterations = 2):
"""
Sample from both distributions and fill each z-stack accordingly
"""
# create empty array
block = np.zeros((iterations, self.size_raster[0], self.size_raster[1], self.zz.size), dtype='int8')
for i in range(iterations):
for j in range(self.size_raster[0]): # size_raster[0]
for k in range(self.size_raster[1]):
# sample from top and base distributions for specific x,y position
top = np.random.normal(self.top_mean[j, k], self.top_std[j, k])
base = np.random.normal(self.base_mean[j, k], self.base_std[j, k])
# iterate over vertical z-stack
for l in range(self.zz.size):
if self.zz[l] <= top:
block[i, j, k, l] = 1
elif self.zz[l] > base:
block[i, j, k, l] = 3
elif ((self.zz[l] > top) and (l <= base)):
block[i, j, k, l] = 2
return block
def calc_lithology_vect(self, iterations=2):
"""
Resample from z value statistics and fill each z-stack in a lithology block accordingly.
This is the new method with vectorized operations to speed up calculations.
"""
# create empty array
block = np.zeros((iterations, self.xx.size, self.yy.size, self.zz.size), dtype='int8')
for i in range(iterations):
# create meshgrids grid for coordinate-wise iterations
mesh_x, mesh_y, mesh_z = np.meshgrid(np.arange(self.xx.size),
np.arange(self.yy.size),
np.arange(self.zz.size))
# sample from top and base distributions for specific x,y position
top = np.zeros([self.xx.size, self.yy.size])
base = np.zeros([self.xx.size, self.yy.size])
top[mesh_x, mesh_y] = np.random.normal(self.top_mean.values[mesh_x, mesh_y],
self.top_std.values[mesh_x, mesh_y])
base[mesh_x, mesh_y] = np.random.normal(self.top_mean.values[mesh_x, mesh_y],
self.top_std.values[mesh_x, mesh_y])
# compare each cell to resampled reference values
# TODO generalize for any number of lithologies
block[i, mesh_x, mesh_y, mesh_z] = np.where(self.zz < top[mesh_x, mesh_y], 1,
np.where(self.zz < base[mesh_x, mesh_y], 2, 3))
return block
### Modifyed from GemPy!
def calc_probability_lithology(self, cube):
"""Blocks must be just the lith blocks!"""
lith_blocks = cube.reshape([cube.shape[0], (self.xx.size * self.yy.size * self.zz.size)])
lith_id = np.unique(lith_blocks)
# lith_count = np.zeros_like(lith_blocks[0:len(lith_id)])
lith_count = np.zeros((len(np.unique(lith_blocks)), lith_blocks.shape[1]))
for i, l_id in enumerate(lith_id):
lith_count[i] = np.sum(lith_blocks == l_id, axis=0)
lith_prob = lith_count / len(lith_blocks)
return lith_prob
### Modyfied from GemPy!
def calc_information_entropy(self, lith_prob):
"""Calculates information entropy for the given probability array."""
cube = np.zeros_like(lith_prob[0])
for l in lith_prob:
pm = np.ma.masked_equal(l, 0) # mask where layer prob is 0
cube -= (pm * np.ma.log2(pm)).filled(0)
return cube.reshape([self.xx.size, self.yy.size, self.zz.size])
# Try numpy.flatten and numpy.ravel
## Simple plotting methods
def plot_entropy(self, cube, slice=10):
plt.imshow(cube[slice, :, :].T, origin='upperleft', cmap='viridis')
plt.show() | en | 0.747475 | Fortuna Python project to visualize uncertatinty in probabilistic exploration models. Created on 09/06/2018 @authors: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # Import libraries Class to load the fortuna dataset and call different methods for visualization in a web frontend. Args: There are no required arguments at the moment. Input files could be defined. Method that is called when a object of the class Fortuna is initiated, it imports the data and directly creates some important variables. # hardcode geometry # Create empty xarray dataset ## Initial methods to load ### Methods for initiating the object Method to read a file. Method to load different data objects from files. ## Data Management methods ## Methods to compute different uncertatinty cubes --> cubes to be displayed in the frontend Sample from both distributions and fill each z-stack accordingly # create empty array # size_raster[0] # sample from top and base distributions for specific x,y position # iterate over vertical z-stack Resample from z value statistics and fill each z-stack in a lithology block accordingly. This is the new method with vectorized operations to speed up calculations. # create empty array # create meshgrids grid for coordinate-wise iterations # sample from top and base distributions for specific x,y position # compare each cell to resampled reference values # TODO generalize for any number of lithologies ### Modifyed from GemPy! Blocks must be just the lith blocks! # lith_count = np.zeros_like(lith_blocks[0:len(lith_id)]) ### Modyfied from GemPy! Calculates information entropy for the given probability array. # mask where layer prob is 0 # Try numpy.flatten and numpy.ravel ## Simple plotting methods | 3.044391 | 3 |
resize.py | Linx3/6.867-Final-Project | 3 | 7750 | from PIL import Image
# open an image file (.bmp,.jpg,.png,.gif) you have in the working folder
# //imageFile = "03802.png"
import os
arr=os.listdir()
for imageFile in arr:
if "png" in imageFile:
im1 = Image.open(imageFile)
# adjust width and height to your needs
width = 416
height = 416
# use one of these filter options to resize the image
im2 = im1.resize((width, height), Image.NEAREST) # use nearest neighbour
# im3 = im1.resize((width, height), Image.BILINEAR) # linear interpolation in a 2x2 environment
# im4 = im1.resize((width, height), Image.BICUBIC) # cubic spline interpolation in a 4x4 environment
# im5 = im1.resize((width, height), Image.ANTIALIAS) # best down-sizing filter
ext = ".png"
# print(imageFile.split(".")[0])
num=imageFile.split(".")[0]
print(num)
print(type(num))
im2.save(imageFile)
# im2.save(imageFile+ ext)
# im3.save("BILINEAR" + ext)
# im4.save("BICUBIC" + ext)
# im5.save("ANTIALIAS" + ext)
| from PIL import Image
# open an image file (.bmp,.jpg,.png,.gif) you have in the working folder
# //imageFile = "03802.png"
import os
arr=os.listdir()
for imageFile in arr:
if "png" in imageFile:
im1 = Image.open(imageFile)
# adjust width and height to your needs
width = 416
height = 416
# use one of these filter options to resize the image
im2 = im1.resize((width, height), Image.NEAREST) # use nearest neighbour
# im3 = im1.resize((width, height), Image.BILINEAR) # linear interpolation in a 2x2 environment
# im4 = im1.resize((width, height), Image.BICUBIC) # cubic spline interpolation in a 4x4 environment
# im5 = im1.resize((width, height), Image.ANTIALIAS) # best down-sizing filter
ext = ".png"
# print(imageFile.split(".")[0])
num=imageFile.split(".")[0]
print(num)
print(type(num))
im2.save(imageFile)
# im2.save(imageFile+ ext)
# im3.save("BILINEAR" + ext)
# im4.save("BICUBIC" + ext)
# im5.save("ANTIALIAS" + ext)
| en | 0.646112 | # open an image file (.bmp,.jpg,.png,.gif) you have in the working folder # //imageFile = "03802.png" # adjust width and height to your needs # use one of these filter options to resize the image # use nearest neighbour # im3 = im1.resize((width, height), Image.BILINEAR) # linear interpolation in a 2x2 environment # im4 = im1.resize((width, height), Image.BICUBIC) # cubic spline interpolation in a 4x4 environment # im5 = im1.resize((width, height), Image.ANTIALIAS) # best down-sizing filter # print(imageFile.split(".")[0]) # im2.save(imageFile+ ext) # im3.save("BILINEAR" + ext) # im4.save("BICUBIC" + ext) # im5.save("ANTIALIAS" + ext) | 3.435431 | 3 |
src/game/exceptions.py | UnBParadigmas/2020.1_G2_SMA_DarwInPython | 0 | 7751 | class InvalidMovementException(Exception):
pass
class InvalidMovementTargetException(InvalidMovementException):
pass
class InvalidMovimentOriginException(InvalidMovementException):
pass | class InvalidMovementException(Exception):
pass
class InvalidMovementTargetException(InvalidMovementException):
pass
class InvalidMovimentOriginException(InvalidMovementException):
pass | none | 1 | 1.616694 | 2 |
|
src/pipeline/sentence-retrieval/run.py | simonepri/fever-transformers | 8 | 7752 | <filename>src/pipeline/sentence-retrieval/run.py
#!/usr/bin/env python3
import argparse
import bisect
import csv
import json
import os
from collections import defaultdict
from functools import reduce
from tqdm import tqdm
def get_best_evidence(scores_file, max_sentences_per_claim):
weighted_claim_evidence = defaultdict(lambda: [])
with open(scores_file, "r") as f:
nlines = reduce(lambda a, b: a + b, map(lambda x: 1, f.readlines()), 0)
f.seek(0)
lines = csv.reader(f, delimiter="\t")
for line in tqdm(lines, desc="Score", total=nlines):
claim_id, claim, page, sent_id, sent, score = line
claim_id, sent_id, score = int(claim_id), int(sent_id), float(score)
evid = (page, sent_id, sent)
bisect.insort(weighted_claim_evidence[claim_id], (-score, evid))
if len(weighted_claim_evidence[claim_id]) > max_sentences_per_claim:
weighted_claim_evidence[claim_id].pop()
for claim_id in weighted_claim_evidence:
for i, (score, evid) in enumerate(weighted_claim_evidence[claim_id]):
weighted_claim_evidence[claim_id][i] = (-score, evid)
return weighted_claim_evidence
def main(scores_file, in_file, out_file, max_sentences_per_claim=None):
path = os.getcwd()
scores_file = os.path.join(path, scores_file)
in_file = os.path.join(path, in_file)
out_file = os.path.join(path, out_file)
best_evidence = get_best_evidence(scores_file, max_sentences_per_claim)
with open(out_file, "w+") as fout:
with open(in_file, "r") as fin:
nlines = reduce(lambda a, b: a + b, map(lambda x: 1, fin.readlines()), 0)
fin.seek(0)
lines = map(json.loads, fin.readlines())
for line in tqdm(lines, desc="Claim", total=nlines):
claim_id = line["id"]
line["predicted_sentences"] = best_evidence[claim_id]
fout.write(json.dumps(line) + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--scores-file", type=str)
parser.add_argument("--in-file", type=str, help="input dataset")
parser.add_argument("--out-file", type=str,
help="path to save output dataset")
parser.add_argument("--max-sentences-per-claim", type=int,
help="number of top sentences to return for each claim")
args = parser.parse_args()
main(args.scores_file, args.in_file, args.out_file, max_sentences_per_claim=args.max_sentences_per_claim)
| <filename>src/pipeline/sentence-retrieval/run.py
#!/usr/bin/env python3
import argparse
import bisect
import csv
import json
import os
from collections import defaultdict
from functools import reduce
from tqdm import tqdm
def get_best_evidence(scores_file, max_sentences_per_claim):
weighted_claim_evidence = defaultdict(lambda: [])
with open(scores_file, "r") as f:
nlines = reduce(lambda a, b: a + b, map(lambda x: 1, f.readlines()), 0)
f.seek(0)
lines = csv.reader(f, delimiter="\t")
for line in tqdm(lines, desc="Score", total=nlines):
claim_id, claim, page, sent_id, sent, score = line
claim_id, sent_id, score = int(claim_id), int(sent_id), float(score)
evid = (page, sent_id, sent)
bisect.insort(weighted_claim_evidence[claim_id], (-score, evid))
if len(weighted_claim_evidence[claim_id]) > max_sentences_per_claim:
weighted_claim_evidence[claim_id].pop()
for claim_id in weighted_claim_evidence:
for i, (score, evid) in enumerate(weighted_claim_evidence[claim_id]):
weighted_claim_evidence[claim_id][i] = (-score, evid)
return weighted_claim_evidence
def main(scores_file, in_file, out_file, max_sentences_per_claim=None):
path = os.getcwd()
scores_file = os.path.join(path, scores_file)
in_file = os.path.join(path, in_file)
out_file = os.path.join(path, out_file)
best_evidence = get_best_evidence(scores_file, max_sentences_per_claim)
with open(out_file, "w+") as fout:
with open(in_file, "r") as fin:
nlines = reduce(lambda a, b: a + b, map(lambda x: 1, fin.readlines()), 0)
fin.seek(0)
lines = map(json.loads, fin.readlines())
for line in tqdm(lines, desc="Claim", total=nlines):
claim_id = line["id"]
line["predicted_sentences"] = best_evidence[claim_id]
fout.write(json.dumps(line) + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--scores-file", type=str)
parser.add_argument("--in-file", type=str, help="input dataset")
parser.add_argument("--out-file", type=str,
help="path to save output dataset")
parser.add_argument("--max-sentences-per-claim", type=int,
help="number of top sentences to return for each claim")
args = parser.parse_args()
main(args.scores_file, args.in_file, args.out_file, max_sentences_per_claim=args.max_sentences_per_claim)
| fr | 0.221828 | #!/usr/bin/env python3 | 2.632391 | 3 |
bot/__main__.py | KOTBOTS/Telegram-CloneBot | 1 | 7753 | from telegram.ext import CommandHandler, run_async
from bot.gDrive import GoogleDriveHelper
from bot.fs_utils import get_readable_file_size
from bot import LOGGER, dispatcher, updater, bot
from bot.config import BOT_TOKEN, OWNER_ID, GDRIVE_FOLDER_ID
from bot.decorators import is_authorised, is_owner
from telegram.error import TimedOut, BadRequest
from bot.clone_status import CloneStatus
from bot.msg_utils import deleteMessage, sendMessage
import time
REPO_LINK = "https://t.me/KOT_BOTS"
# Soon to be used for direct updates from within the bot.
@run_async
def start(update, context):
sendMessage("Hello! Please send me a Google Drive Shareable Link to Clone to your Drive!" \
"\nSend /help for checking all available commands.",
context.bot, update, 'Markdown')
# ;-;
@run_async
def helper(update, context):
sendMessage("Here are the available commands of the bot\n\n" \
"*Usage:* `/clone <link> [DESTINATION_ID]`\n*Example:* \n1. `/clone https://drive.google.com/drive/u/1/folders/0AO-ISIXXXXXXXXXXXX`\n2. `/clone 0AO-ISIXXXXXXXXXXXX`" \
"\n*DESTIONATION_ID* is optional. It can be either link or ID to where you wish to store a particular clone." \
"\n\nYou can also *ignore folders* from clone process by doing the following:\n" \
"`/clone <FOLDER_ID> [DESTINATION] [id1,id2,id3]`\n In this example: id1, id2 and id3 would get ignored from cloning\nDo not use <> or [] in actual message." \
"*Make sure to not put any space between commas (,).*\n" \
f"Source of this bot: [GitHub]({REPO_LINK})", context.bot, update, 'Markdown')
# TODO Cancel Clones with /cancel command.
@run_async
@is_authorised
def cloneNode(update, context):
args = update.message.text.split(" ")
if len(args) > 1:
link = args[1]
try:
ignoreList = args[-1].split(',')
except IndexError:
ignoreList = []
DESTINATION_ID = GDRIVE_FOLDER_ID
try:
DESTINATION_ID = args[2]
print(DESTINATION_ID)
except IndexError:
pass
# Usage: /clone <FolderToClone> <Destination> <IDtoIgnoreFromClone>,<IDtoIgnoreFromClone>
msg = sendMessage(f"<b>Cloning:</b> <code>{link}</code>", context.bot, update)
status_class = CloneStatus()
gd = GoogleDriveHelper(GFolder_ID=DESTINATION_ID)
sendCloneStatus(update, context, status_class, msg, link)
result = gd.clone(link, status_class, ignoreList=ignoreList)
deleteMessage(context.bot, msg)
status_class.set_status(True)
sendMessage(result, context.bot, update)
else:
sendMessage("Please Provide a Google Drive Shared Link to Clone.", bot, update)
@run_async
def sendCloneStatus(update, context, status, msg, link):
old_text = ''
while not status.done():
sleeper(3)
try:
text=f'🔗 *Cloning:* [{status.MainFolderName}]({status.MainFolderLink})\n━━━━━━━━━━━━━━\n🗃️ *Current File:* `{status.get_name()}`\n⬆️ *Transferred*: `{status.get_size()}`\n📁 *Destination:* [{status.DestinationFolderName}]({status.DestinationFolderLink})'
if status.checkFileStatus():
text += f"\n🕒 *Checking Existing Files:* `{str(status.checkFileStatus())}`"
if not text == old_text:
msg.edit_text(text=text, parse_mode="Markdown", timeout=200)
old_text = text
except Exception as e:
LOGGER.error(e)
if str(e) == "Message to edit not found":
break
sleeper(2)
continue
return
def sleeper(value, enabled=True):
time.sleep(int(value))
return
@run_async
@is_owner
def sendLogs(update, context):
with open('log.txt', 'rb') as f:
bot.send_document(document=f, filename=f.name,
reply_to_message_id=update.message.message_id,
chat_id=update.message.chat_id)
def main():
LOGGER.info("Bot Started!")
clone_handler = CommandHandler('clone', cloneNode)
start_handler = CommandHandler('start', start)
help_handler = CommandHandler('help', helper)
log_handler = CommandHandler('logs', sendLogs)
dispatcher.add_handler(log_handler)
dispatcher.add_handler(start_handler)
dispatcher.add_handler(clone_handler)
dispatcher.add_handler(help_handler)
updater.start_polling()
main()
| from telegram.ext import CommandHandler, run_async
from bot.gDrive import GoogleDriveHelper
from bot.fs_utils import get_readable_file_size
from bot import LOGGER, dispatcher, updater, bot
from bot.config import BOT_TOKEN, OWNER_ID, GDRIVE_FOLDER_ID
from bot.decorators import is_authorised, is_owner
from telegram.error import TimedOut, BadRequest
from bot.clone_status import CloneStatus
from bot.msg_utils import deleteMessage, sendMessage
import time
REPO_LINK = "https://t.me/KOT_BOTS"
# Soon to be used for direct updates from within the bot.
@run_async
def start(update, context):
sendMessage("Hello! Please send me a Google Drive Shareable Link to Clone to your Drive!" \
"\nSend /help for checking all available commands.",
context.bot, update, 'Markdown')
# ;-;
@run_async
def helper(update, context):
sendMessage("Here are the available commands of the bot\n\n" \
"*Usage:* `/clone <link> [DESTINATION_ID]`\n*Example:* \n1. `/clone https://drive.google.com/drive/u/1/folders/0AO-ISIXXXXXXXXXXXX`\n2. `/clone 0AO-ISIXXXXXXXXXXXX`" \
"\n*DESTIONATION_ID* is optional. It can be either link or ID to where you wish to store a particular clone." \
"\n\nYou can also *ignore folders* from clone process by doing the following:\n" \
"`/clone <FOLDER_ID> [DESTINATION] [id1,id2,id3]`\n In this example: id1, id2 and id3 would get ignored from cloning\nDo not use <> or [] in actual message." \
"*Make sure to not put any space between commas (,).*\n" \
f"Source of this bot: [GitHub]({REPO_LINK})", context.bot, update, 'Markdown')
# TODO Cancel Clones with /cancel command.
@run_async
@is_authorised
def cloneNode(update, context):
args = update.message.text.split(" ")
if len(args) > 1:
link = args[1]
try:
ignoreList = args[-1].split(',')
except IndexError:
ignoreList = []
DESTINATION_ID = GDRIVE_FOLDER_ID
try:
DESTINATION_ID = args[2]
print(DESTINATION_ID)
except IndexError:
pass
# Usage: /clone <FolderToClone> <Destination> <IDtoIgnoreFromClone>,<IDtoIgnoreFromClone>
msg = sendMessage(f"<b>Cloning:</b> <code>{link}</code>", context.bot, update)
status_class = CloneStatus()
gd = GoogleDriveHelper(GFolder_ID=DESTINATION_ID)
sendCloneStatus(update, context, status_class, msg, link)
result = gd.clone(link, status_class, ignoreList=ignoreList)
deleteMessage(context.bot, msg)
status_class.set_status(True)
sendMessage(result, context.bot, update)
else:
sendMessage("Please Provide a Google Drive Shared Link to Clone.", bot, update)
@run_async
def sendCloneStatus(update, context, status, msg, link):
old_text = ''
while not status.done():
sleeper(3)
try:
text=f'🔗 *Cloning:* [{status.MainFolderName}]({status.MainFolderLink})\n━━━━━━━━━━━━━━\n🗃️ *Current File:* `{status.get_name()}`\n⬆️ *Transferred*: `{status.get_size()}`\n📁 *Destination:* [{status.DestinationFolderName}]({status.DestinationFolderLink})'
if status.checkFileStatus():
text += f"\n🕒 *Checking Existing Files:* `{str(status.checkFileStatus())}`"
if not text == old_text:
msg.edit_text(text=text, parse_mode="Markdown", timeout=200)
old_text = text
except Exception as e:
LOGGER.error(e)
if str(e) == "Message to edit not found":
break
sleeper(2)
continue
return
def sleeper(value, enabled=True):
time.sleep(int(value))
return
@run_async
@is_owner
def sendLogs(update, context):
with open('log.txt', 'rb') as f:
bot.send_document(document=f, filename=f.name,
reply_to_message_id=update.message.message_id,
chat_id=update.message.chat_id)
def main():
LOGGER.info("Bot Started!")
clone_handler = CommandHandler('clone', cloneNode)
start_handler = CommandHandler('start', start)
help_handler = CommandHandler('help', helper)
log_handler = CommandHandler('logs', sendLogs)
dispatcher.add_handler(log_handler)
dispatcher.add_handler(start_handler)
dispatcher.add_handler(clone_handler)
dispatcher.add_handler(help_handler)
updater.start_polling()
main()
| en | 0.544985 | # Soon to be used for direct updates from within the bot. # ;-; # TODO Cancel Clones with /cancel command. # Usage: /clone <FolderToClone> <Destination> <IDtoIgnoreFromClone>,<IDtoIgnoreFromClone> | 2.172499 | 2 |
src/pyfinlab/risk_models.py | AnaSan27/pyfinlab | 1 | 7754 | <reponame>AnaSan27/pyfinlab<gh_stars>1-10
import pandas as pd
import numpy as np
from portfoliolab.utils import RiskMetrics
from portfoliolab.estimators import RiskEstimators
from pypfopt import risk_models as risk_models_
"""
Available covariance risk models in PortfolioLab library.
https://hudson-and-thames-portfoliolab-pro.readthedocs-hosted.com/en/latest/estimators/risk_estimators.html
Available covariance risk models in PyPortfolioOpt library.
https://pyportfolioopt.readthedocs.io/en/latest/RiskModels.html#
These functions bring together all covariance matrix risk models from PortfolioLab and PyPortfolioOpt into one
function for ease of use.
"""
risk_met = RiskMetrics()
risk_estimators = RiskEstimators()
risk_models = [
# PyPortfolioOpt
'sample_cov',
'semicovariance',
'exp_cov',
'ledoit_wolf_constant_variance',
'ledoit_wolf_single_factor',
'ledoit_wolf_constant_correlation',
'oracle_approximating',
# PortfolioLab
'sample_covariance',
'minimum_covariance_determinant',
'empirical_covariance',
'shrinked_covariance_basic',
'shrinked_covariance_lw',
'shrinked_covariance_oas',
'semi_covariance',
'exponential_covariance',
'constant_residual_eigenvalues_denoised',
'constant_residual_spectral_denoised',
'targeted_shrinkage_denoised',
'targeted_shrinkage_detoned',
'constant_residual_detoned',
'hierarchical_filtered_complete',
'hierarchical_filtered_single',
'hierarchical_filtered_avg'
]
def risk_model(prices, model, kde_bwidth=0.01, basic_shrinkage=0.1):
"""
Calculates the covariance matrix for a dataframe of asset prices.
:param prices: (pd.DataFrame) Dataframe where each column is a series of prices for an asset.
:param model: (str) Risk model to use. Should be one of:
PyPortfolioOpt
- 'sample_cov',
- 'semicovariance',
- 'exp_cov',
- 'ledoit_wolf_constant_variance',
- 'ledoit_wolf_single_factor'
- 'ledoit_wolf_constant_correlation',
- 'oracle_approximating'
PortfolioLab
- 'sample_covariance',
- 'minimum_covariance_determinant',
- 'empirical_covariance',
- 'shrinked_covariance_basic',
- 'shrinked_covariance_lw',
- 'shrinked_covariance_oas',
- 'semi_covariance',
- 'exponential_covariance',
- 'constant_residual_eigenvalues_denoised',
- 'constant_residual_spectral_denoised',
- 'targeted_shrinkage_denoised',
- 'targeted_shrinkage_detoned',
- 'constant_residual_detoned',
- 'hierarchical_filtered_complete',
- 'hierarchical_filtered_single',
- 'hierarchical_filtered_avg'
:param kde_bwidth: (float) Optional, bandwidth of the kernel to fit KDE. (0.01 by default)
:param basic_shrinkage: (float) Optional, between 0 and 1. Coefficient in the convex combination for basic shrinkage.
(0.1 by default)
:return: (pd.DataFrame) Estimated covariance matrix.
"""
tn_relation = prices.shape[0] / prices.shape[1]
sample_cov = prices.pct_change().dropna().cov()
empirical_cov = pd.DataFrame(risk_estimators.empirical_covariance(prices, price_data=True),
index=sample_cov.index, columns=sample_cov.columns)
empirical_corr = pd.DataFrame(risk_estimators.cov_to_corr(empirical_cov ** 2),
index=sample_cov.index, columns=sample_cov.columns)
std = np.diag(empirical_cov) ** (1 / 2)
if model == 'sample_covariance':
return prices.pct_change().dropna().cov()
elif model == 'minimum_covariance_determinant':
covariance_matrix = risk_estimators.minimum_covariance_determinant(prices, price_data=True)
elif model == 'empirical_covariance':
covariance_matrix = risk_estimators.empirical_covariance(prices, price_data=True)
elif model == 'shrinked_covariance_basic':
covariance_matrix = risk_estimators.shrinked_covariance(
prices, price_data=True, shrinkage_type='basic', basic_shrinkage=basic_shrinkage)
elif model == 'shrinked_covariance_lw':
covariance_matrix = risk_estimators.shrinked_covariance(
prices, price_data=True, shrinkage_type='lw', basic_shrinkage=basic_shrinkage)
elif model == 'shrinked_covariance_oas':
covariance_matrix = risk_estimators.shrinked_covariance(
prices, price_data=True, shrinkage_type='oas', basic_shrinkage=basic_shrinkage)
elif model == 'semi_covariance':
covariance_matrix = risk_estimators.semi_covariance(prices, price_data=True, threshold_return=0)
elif model == 'exponential_covariance':
covariance_matrix = risk_estimators.exponential_covariance(prices, price_data=True, window_span=60)
elif model == 'constant_residual_eigenvalues_denoised':
covariance_matrix = risk_estimators.denoise_covariance(
empirical_cov, tn_relation, denoise_method='const_resid_eigen', detone=False, kde_bwidth=kde_bwidth)
elif model == 'constant_residual_spectral_denoised':
covariance_matrix = risk_estimators.denoise_covariance(empirical_cov, tn_relation, denoise_method='spectral')
elif model == 'targeted_shrinkage_denoised':
covariance_matrix = risk_estimators.denoise_covariance(
empirical_cov, tn_relation, denoise_method='target_shrink', detone=False, kde_bwidth=kde_bwidth)
elif model == 'targeted_shrinkage_detoned':
covariance_matrix = risk_estimators.denoise_covariance(
empirical_cov, tn_relation, denoise_method='target_shrink', detone=True, kde_bwidth=kde_bwidth)
elif model == 'constant_residual_detoned':
covariance_matrix = risk_estimators.denoise_covariance(
empirical_cov, tn_relation, denoise_method='const_resid_eigen', detone=True, market_component=1,
kde_bwidth=kde_bwidth)
elif model == 'hierarchical_filtered_complete':
covariance_matrix = risk_estimators.corr_to_cov(risk_estimators.filter_corr_hierarchical(
empirical_corr.to_numpy(), method='complete', draw_plot=False), std)
elif model == 'hierarchical_filtered_single':
covariance_matrix = risk_estimators.corr_to_cov(risk_estimators.filter_corr_hierarchical(
empirical_corr.to_numpy(), method='single', draw_plot=False), std)
elif model == 'hierarchical_filtered_avg':
covariance_matrix = risk_estimators.corr_to_cov(risk_estimators.filter_corr_hierarchical(
empirical_corr.to_numpy(), method='average', draw_plot=False), std)
elif model == 'sample_cov':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.sample_cov(prices)) / 252
elif model == 'semicovariance':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.semicovariance(prices)) / 252
elif model == 'exp_cov':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.exp_cov(prices, span=180)) / 252
elif model == 'ledoit_wolf_constant_variance':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.risk_matrix(prices, model)) / 252
elif model == 'ledoit_wolf_single_factor':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.risk_matrix(prices, model)) / 252
elif model == 'ledoit_wolf_constant_correlation':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.risk_matrix(prices, model)) / 252
elif model == 'oracle_approximating':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.risk_matrix(prices, model)) / 252
else:
raise NameError('You must input a risk model. Check spelling. Case-Sensitive.')
if not isinstance(covariance_matrix, pd.DataFrame):
covariance_matrix = pd.DataFrame(covariance_matrix, index=sample_cov.index, columns=sample_cov.columns).round(6)
return covariance_matrix * 252
| import pandas as pd
import numpy as np
from portfoliolab.utils import RiskMetrics
from portfoliolab.estimators import RiskEstimators
from pypfopt import risk_models as risk_models_
"""
Available covariance risk models in PortfolioLab library.
https://hudson-and-thames-portfoliolab-pro.readthedocs-hosted.com/en/latest/estimators/risk_estimators.html
Available covariance risk models in PyPortfolioOpt library.
https://pyportfolioopt.readthedocs.io/en/latest/RiskModels.html#
These functions bring together all covariance matrix risk models from PortfolioLab and PyPortfolioOpt into one
function for ease of use.
"""
risk_met = RiskMetrics()
risk_estimators = RiskEstimators()
risk_models = [
# PyPortfolioOpt
'sample_cov',
'semicovariance',
'exp_cov',
'ledoit_wolf_constant_variance',
'ledoit_wolf_single_factor',
'ledoit_wolf_constant_correlation',
'oracle_approximating',
# PortfolioLab
'sample_covariance',
'minimum_covariance_determinant',
'empirical_covariance',
'shrinked_covariance_basic',
'shrinked_covariance_lw',
'shrinked_covariance_oas',
'semi_covariance',
'exponential_covariance',
'constant_residual_eigenvalues_denoised',
'constant_residual_spectral_denoised',
'targeted_shrinkage_denoised',
'targeted_shrinkage_detoned',
'constant_residual_detoned',
'hierarchical_filtered_complete',
'hierarchical_filtered_single',
'hierarchical_filtered_avg'
]
def risk_model(prices, model, kde_bwidth=0.01, basic_shrinkage=0.1):
"""
Calculates the covariance matrix for a dataframe of asset prices.
:param prices: (pd.DataFrame) Dataframe where each column is a series of prices for an asset.
:param model: (str) Risk model to use. Should be one of:
PyPortfolioOpt
- 'sample_cov',
- 'semicovariance',
- 'exp_cov',
- 'ledoit_wolf_constant_variance',
- 'ledoit_wolf_single_factor'
- 'ledoit_wolf_constant_correlation',
- 'oracle_approximating'
PortfolioLab
- 'sample_covariance',
- 'minimum_covariance_determinant',
- 'empirical_covariance',
- 'shrinked_covariance_basic',
- 'shrinked_covariance_lw',
- 'shrinked_covariance_oas',
- 'semi_covariance',
- 'exponential_covariance',
- 'constant_residual_eigenvalues_denoised',
- 'constant_residual_spectral_denoised',
- 'targeted_shrinkage_denoised',
- 'targeted_shrinkage_detoned',
- 'constant_residual_detoned',
- 'hierarchical_filtered_complete',
- 'hierarchical_filtered_single',
- 'hierarchical_filtered_avg'
:param kde_bwidth: (float) Optional, bandwidth of the kernel to fit KDE. (0.01 by default)
:param basic_shrinkage: (float) Optional, between 0 and 1. Coefficient in the convex combination for basic shrinkage.
(0.1 by default)
:return: (pd.DataFrame) Estimated covariance matrix.
"""
tn_relation = prices.shape[0] / prices.shape[1]
sample_cov = prices.pct_change().dropna().cov()
empirical_cov = pd.DataFrame(risk_estimators.empirical_covariance(prices, price_data=True),
index=sample_cov.index, columns=sample_cov.columns)
empirical_corr = pd.DataFrame(risk_estimators.cov_to_corr(empirical_cov ** 2),
index=sample_cov.index, columns=sample_cov.columns)
std = np.diag(empirical_cov) ** (1 / 2)
if model == 'sample_covariance':
return prices.pct_change().dropna().cov()
elif model == 'minimum_covariance_determinant':
covariance_matrix = risk_estimators.minimum_covariance_determinant(prices, price_data=True)
elif model == 'empirical_covariance':
covariance_matrix = risk_estimators.empirical_covariance(prices, price_data=True)
elif model == 'shrinked_covariance_basic':
covariance_matrix = risk_estimators.shrinked_covariance(
prices, price_data=True, shrinkage_type='basic', basic_shrinkage=basic_shrinkage)
elif model == 'shrinked_covariance_lw':
covariance_matrix = risk_estimators.shrinked_covariance(
prices, price_data=True, shrinkage_type='lw', basic_shrinkage=basic_shrinkage)
elif model == 'shrinked_covariance_oas':
covariance_matrix = risk_estimators.shrinked_covariance(
prices, price_data=True, shrinkage_type='oas', basic_shrinkage=basic_shrinkage)
elif model == 'semi_covariance':
covariance_matrix = risk_estimators.semi_covariance(prices, price_data=True, threshold_return=0)
elif model == 'exponential_covariance':
covariance_matrix = risk_estimators.exponential_covariance(prices, price_data=True, window_span=60)
elif model == 'constant_residual_eigenvalues_denoised':
covariance_matrix = risk_estimators.denoise_covariance(
empirical_cov, tn_relation, denoise_method='const_resid_eigen', detone=False, kde_bwidth=kde_bwidth)
elif model == 'constant_residual_spectral_denoised':
covariance_matrix = risk_estimators.denoise_covariance(empirical_cov, tn_relation, denoise_method='spectral')
elif model == 'targeted_shrinkage_denoised':
covariance_matrix = risk_estimators.denoise_covariance(
empirical_cov, tn_relation, denoise_method='target_shrink', detone=False, kde_bwidth=kde_bwidth)
elif model == 'targeted_shrinkage_detoned':
covariance_matrix = risk_estimators.denoise_covariance(
empirical_cov, tn_relation, denoise_method='target_shrink', detone=True, kde_bwidth=kde_bwidth)
elif model == 'constant_residual_detoned':
covariance_matrix = risk_estimators.denoise_covariance(
empirical_cov, tn_relation, denoise_method='const_resid_eigen', detone=True, market_component=1,
kde_bwidth=kde_bwidth)
elif model == 'hierarchical_filtered_complete':
covariance_matrix = risk_estimators.corr_to_cov(risk_estimators.filter_corr_hierarchical(
empirical_corr.to_numpy(), method='complete', draw_plot=False), std)
elif model == 'hierarchical_filtered_single':
covariance_matrix = risk_estimators.corr_to_cov(risk_estimators.filter_corr_hierarchical(
empirical_corr.to_numpy(), method='single', draw_plot=False), std)
elif model == 'hierarchical_filtered_avg':
covariance_matrix = risk_estimators.corr_to_cov(risk_estimators.filter_corr_hierarchical(
empirical_corr.to_numpy(), method='average', draw_plot=False), std)
elif model == 'sample_cov':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.sample_cov(prices)) / 252
elif model == 'semicovariance':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.semicovariance(prices)) / 252
elif model == 'exp_cov':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.exp_cov(prices, span=180)) / 252
elif model == 'ledoit_wolf_constant_variance':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.risk_matrix(prices, model)) / 252
elif model == 'ledoit_wolf_single_factor':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.risk_matrix(prices, model)) / 252
elif model == 'ledoit_wolf_constant_correlation':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.risk_matrix(prices, model)) / 252
elif model == 'oracle_approximating':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.risk_matrix(prices, model)) / 252
else:
raise NameError('You must input a risk model. Check spelling. Case-Sensitive.')
if not isinstance(covariance_matrix, pd.DataFrame):
covariance_matrix = pd.DataFrame(covariance_matrix, index=sample_cov.index, columns=sample_cov.columns).round(6)
return covariance_matrix * 252 | en | 0.487139 | Available covariance risk models in PortfolioLab library. https://hudson-and-thames-portfoliolab-pro.readthedocs-hosted.com/en/latest/estimators/risk_estimators.html Available covariance risk models in PyPortfolioOpt library. https://pyportfolioopt.readthedocs.io/en/latest/RiskModels.html# These functions bring together all covariance matrix risk models from PortfolioLab and PyPortfolioOpt into one function for ease of use. # PyPortfolioOpt # PortfolioLab Calculates the covariance matrix for a dataframe of asset prices. :param prices: (pd.DataFrame) Dataframe where each column is a series of prices for an asset. :param model: (str) Risk model to use. Should be one of: PyPortfolioOpt - 'sample_cov', - 'semicovariance', - 'exp_cov', - 'ledoit_wolf_constant_variance', - 'ledoit_wolf_single_factor' - 'ledoit_wolf_constant_correlation', - 'oracle_approximating' PortfolioLab - 'sample_covariance', - 'minimum_covariance_determinant', - 'empirical_covariance', - 'shrinked_covariance_basic', - 'shrinked_covariance_lw', - 'shrinked_covariance_oas', - 'semi_covariance', - 'exponential_covariance', - 'constant_residual_eigenvalues_denoised', - 'constant_residual_spectral_denoised', - 'targeted_shrinkage_denoised', - 'targeted_shrinkage_detoned', - 'constant_residual_detoned', - 'hierarchical_filtered_complete', - 'hierarchical_filtered_single', - 'hierarchical_filtered_avg' :param kde_bwidth: (float) Optional, bandwidth of the kernel to fit KDE. (0.01 by default) :param basic_shrinkage: (float) Optional, between 0 and 1. Coefficient in the convex combination for basic shrinkage. (0.1 by default) :return: (pd.DataFrame) Estimated covariance matrix. | 2.501497 | 3 |
gaussian_blur/gaussian_blur.py | Soft-illusion/ComputerVision | 0 | 7755 | <reponame>Soft-illusion/ComputerVision
import cv2 as cv
import sys
import numpy as np
import random as r
import os
from PIL import Image as im
def noisy(noise_typ,image):
if noise_typ == "gauss":
# Generate Gaussian noise
gauss = np.random.normal(0,1,image.size)
print(gauss)
gauss = gauss.reshape(image.shape[0],image.shape[1],image.shape[2]).astype('uint8')
# Add the Gaussian noise to the image
img_gauss = cv.add(image,gauss)
cv.imwrite("Noise.png", gauss)
return img_gauss
elif noise_typ == "s&p":
row,col,ch = image.shape
s_vs_p = 0.5
amount = 0.004
out = np.copy(image)
# Salt mode
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in image.shape]
out[coords] = 1
# Pepper mode
num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper))
for i in image.shape]
out[coords] = 0
return out
elif noise_typ == "poisson":
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
noisy = np.random.poisson(image * vals) / float(vals)
return noisy
elif noise_typ =="speckle":
row,col,ch = image.shape
gauss = np.random.randn(row,col,ch)
gauss = gauss.reshape(row,col,ch)
noisy = image + image * gauss
return noisy
img = cv.imread(cv.samples.findFile("3.png"))
if img is None:
sys.exit("Could not read the image.")
else :
width , height , depth = img.shape
img_noisy = noisy("gauss",img)
for kernal_size in range (1,71,2):
print(kernal_size)
dst = cv.GaussianBlur(img_noisy,(kernal_size,kernal_size),0)
# print( cv.getGaussianKernel(kernal_size,0))
file_name = "gaussian_blur" + str(kernal_size) + ".png"
cv.imwrite(file_name, dst)
# dst = img_noisy
# for kernal_no in range (0,200):
# print(kernal_no)
# dst = cv.GaussianBlur(dst,(3,3),1)
# # print( cv.getGaussianKernel(kernal_size,3))
# file_name = "gaussian_blur" + str(kernal_no) + ".png"
# cv.imwrite(file_name, dst)
for kernal_size in range (1,71,2):
print(kernal_size)
dst = cv.bilateralFilter(img_noisy,kernal_size,300,300)
# print( cv.getGaussianKernel(kernal_size,0))
file_name = "bilateral_blur" + str(kernal_size) + ".png"
cv.imwrite(file_name, dst)
| import cv2 as cv
import sys
import numpy as np
import random as r
import os
from PIL import Image as im
def noisy(noise_typ,image):
if noise_typ == "gauss":
# Generate Gaussian noise
gauss = np.random.normal(0,1,image.size)
print(gauss)
gauss = gauss.reshape(image.shape[0],image.shape[1],image.shape[2]).astype('uint8')
# Add the Gaussian noise to the image
img_gauss = cv.add(image,gauss)
cv.imwrite("Noise.png", gauss)
return img_gauss
elif noise_typ == "s&p":
row,col,ch = image.shape
s_vs_p = 0.5
amount = 0.004
out = np.copy(image)
# Salt mode
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in image.shape]
out[coords] = 1
# Pepper mode
num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper))
for i in image.shape]
out[coords] = 0
return out
elif noise_typ == "poisson":
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
noisy = np.random.poisson(image * vals) / float(vals)
return noisy
elif noise_typ =="speckle":
row,col,ch = image.shape
gauss = np.random.randn(row,col,ch)
gauss = gauss.reshape(row,col,ch)
noisy = image + image * gauss
return noisy
img = cv.imread(cv.samples.findFile("3.png"))
if img is None:
sys.exit("Could not read the image.")
else :
width , height , depth = img.shape
img_noisy = noisy("gauss",img)
for kernal_size in range (1,71,2):
print(kernal_size)
dst = cv.GaussianBlur(img_noisy,(kernal_size,kernal_size),0)
# print( cv.getGaussianKernel(kernal_size,0))
file_name = "gaussian_blur" + str(kernal_size) + ".png"
cv.imwrite(file_name, dst)
# dst = img_noisy
# for kernal_no in range (0,200):
# print(kernal_no)
# dst = cv.GaussianBlur(dst,(3,3),1)
# # print( cv.getGaussianKernel(kernal_size,3))
# file_name = "gaussian_blur" + str(kernal_no) + ".png"
# cv.imwrite(file_name, dst)
for kernal_size in range (1,71,2):
print(kernal_size)
dst = cv.bilateralFilter(img_noisy,kernal_size,300,300)
# print( cv.getGaussianKernel(kernal_size,0))
file_name = "bilateral_blur" + str(kernal_size) + ".png"
cv.imwrite(file_name, dst) | en | 0.288151 | # Generate Gaussian noise # Add the Gaussian noise to the image # Salt mode # Pepper mode # print( cv.getGaussianKernel(kernal_size,0)) # dst = img_noisy # for kernal_no in range (0,200): # print(kernal_no) # dst = cv.GaussianBlur(dst,(3,3),1) # # print( cv.getGaussianKernel(kernal_size,3)) # file_name = "gaussian_blur" + str(kernal_no) + ".png" # cv.imwrite(file_name, dst) # print( cv.getGaussianKernel(kernal_size,0)) | 2.888355 | 3 |
citywok_ms/employee/routes.py | fossabot/CityWok-Manager | 0 | 7756 | <reponame>fossabot/CityWok-Manager
from citywok_ms.file.models import EmployeeFile, File
import citywok_ms.employee.messages as employee_msg
import citywok_ms.file.messages as file_msg
from citywok_ms.employee.forms import EmployeeForm
from citywok_ms.file.forms import FileForm
from flask import Blueprint, flash, redirect, render_template, url_for
from citywok_ms.employee.models import Employee
employee = Blueprint("employee", __name__, url_prefix="/employee")
@employee.route("/")
def index():
return render_template(
"employee/index.html",
title=employee_msg.INDEX_TITLE,
active_employees=Employee.get_active(),
suspended_employees=Employee.get_suspended(),
)
@employee.route("/new", methods=["GET", "POST"])
def new():
form = EmployeeForm()
if form.validate_on_submit():
employee = Employee.create_by_form(form)
flash(employee_msg.NEW_SUCCESS.format(name=employee.full_name), "success")
return redirect(url_for("employee.index"))
return render_template(
"employee/form.html", title=employee_msg.NEW_TITLE, form=form
)
@employee.route("/<int:employee_id>")
def detail(employee_id):
return render_template(
"employee/detail.html",
title=employee_msg.DETAIL_TITLE,
employee=Employee.get_or_404(employee_id),
file_form=FileForm(),
)
@employee.route("/<int:employee_id>/update", methods=["GET", "POST"])
def update(employee_id):
employee = Employee.get_or_404(employee_id)
form = EmployeeForm()
form.hide_id.data = employee_id
if form.validate_on_submit():
employee.update_by_form(form)
flash(employee_msg.UPDATE_SUCCESS.format(name=employee.full_name), "success")
return redirect(url_for("employee.detail", employee_id=employee_id))
form.process(obj=employee)
return render_template(
"employee/form.html",
employee=employee,
form=form,
title=employee_msg.UPDATE_TITLE,
)
@employee.route("/<int:employee_id>/suspend", methods=["POST"])
def suspend(employee_id):
employee = Employee.get_or_404(employee_id)
employee.suspend()
flash(employee_msg.SUSPEND_SUCCESS.format(name=employee.full_name), "success")
return redirect(url_for("employee.detail", employee_id=employee_id))
@employee.route("/<int:employee_id>/activate", methods=["POST"])
def activate(employee_id):
employee = Employee.get_or_404(employee_id)
employee.activate()
flash(employee_msg.ACTIVATE_SUCCESS.format(name=employee.full_name), "success")
return redirect(url_for("employee.detail", employee_id=employee_id))
@employee.route("/<int:employee_id>/upload", methods=["POST"])
def upload(employee_id):
form = FileForm()
file = form.file.data
if form.validate_on_submit():
db_file = EmployeeFile.create_by_form(form, Employee.get_or_404(employee_id))
flash(file_msg.UPLOAD_SUCCESS.format(name=db_file.full_name), "success")
elif file is not None:
flash(
file_msg.INVALID_FORMAT.format(format=File.split_file_format(file)),
"danger",
)
else:
flash(file_msg.NO_FILE, "danger")
return redirect(url_for("employee.detail", employee_id=employee_id))
| from citywok_ms.file.models import EmployeeFile, File
import citywok_ms.employee.messages as employee_msg
import citywok_ms.file.messages as file_msg
from citywok_ms.employee.forms import EmployeeForm
from citywok_ms.file.forms import FileForm
from flask import Blueprint, flash, redirect, render_template, url_for
from citywok_ms.employee.models import Employee
employee = Blueprint("employee", __name__, url_prefix="/employee")
@employee.route("/")
def index():
return render_template(
"employee/index.html",
title=employee_msg.INDEX_TITLE,
active_employees=Employee.get_active(),
suspended_employees=Employee.get_suspended(),
)
@employee.route("/new", methods=["GET", "POST"])
def new():
form = EmployeeForm()
if form.validate_on_submit():
employee = Employee.create_by_form(form)
flash(employee_msg.NEW_SUCCESS.format(name=employee.full_name), "success")
return redirect(url_for("employee.index"))
return render_template(
"employee/form.html", title=employee_msg.NEW_TITLE, form=form
)
@employee.route("/<int:employee_id>")
def detail(employee_id):
return render_template(
"employee/detail.html",
title=employee_msg.DETAIL_TITLE,
employee=Employee.get_or_404(employee_id),
file_form=FileForm(),
)
@employee.route("/<int:employee_id>/update", methods=["GET", "POST"])
def update(employee_id):
employee = Employee.get_or_404(employee_id)
form = EmployeeForm()
form.hide_id.data = employee_id
if form.validate_on_submit():
employee.update_by_form(form)
flash(employee_msg.UPDATE_SUCCESS.format(name=employee.full_name), "success")
return redirect(url_for("employee.detail", employee_id=employee_id))
form.process(obj=employee)
return render_template(
"employee/form.html",
employee=employee,
form=form,
title=employee_msg.UPDATE_TITLE,
)
@employee.route("/<int:employee_id>/suspend", methods=["POST"])
def suspend(employee_id):
employee = Employee.get_or_404(employee_id)
employee.suspend()
flash(employee_msg.SUSPEND_SUCCESS.format(name=employee.full_name), "success")
return redirect(url_for("employee.detail", employee_id=employee_id))
@employee.route("/<int:employee_id>/activate", methods=["POST"])
def activate(employee_id):
employee = Employee.get_or_404(employee_id)
employee.activate()
flash(employee_msg.ACTIVATE_SUCCESS.format(name=employee.full_name), "success")
return redirect(url_for("employee.detail", employee_id=employee_id))
@employee.route("/<int:employee_id>/upload", methods=["POST"])
def upload(employee_id):
form = FileForm()
file = form.file.data
if form.validate_on_submit():
db_file = EmployeeFile.create_by_form(form, Employee.get_or_404(employee_id))
flash(file_msg.UPLOAD_SUCCESS.format(name=db_file.full_name), "success")
elif file is not None:
flash(
file_msg.INVALID_FORMAT.format(format=File.split_file_format(file)),
"danger",
)
else:
flash(file_msg.NO_FILE, "danger")
return redirect(url_for("employee.detail", employee_id=employee_id)) | none | 1 | 2.252377 | 2 |
|
kitsune/customercare/cron.py | safwanrahman/Ford | 1 | 7757 | import calendar
from datetime import datetime, timedelta
import json
import logging
import re
import rfc822
from django.conf import settings
from django.db.utils import IntegrityError
import cronjobs
from multidb.pinning import pin_this_thread
from statsd import statsd
from twython import Twython
from kitsune.customercare.models import Tweet, TwitterAccount, Reply
from kitsune.sumo.redis_utils import redis_client, RedisError
from kitsune.sumo.utils import chunked
LINK_REGEX = re.compile('https?\:', re.IGNORECASE)
RT_REGEX = re.compile('^rt\W', re.IGNORECASE)
ALLOWED_USERS = [
{'id': 2142731, 'username': 'Firefox'},
{'id': 150793437, 'username': 'FirefoxBrasil'},
{'id': 107272435, 'username': 'firefox_es'},
]
log = logging.getLogger('k.twitter')
def get_word_blacklist_regex():
"""
Make a regex that looks kind of like r'\b(foo|bar|baz)\b'.
This is a function so that it isn't calculated at import time,
and so can be tested more easily.
This doesn't use raw strings (r'') because the "mismatched" parens
were confusing my syntax highlighter, which was confusing me.
"""
return re.compile(
'\\b(' +
'|'.join(map(re.escape, settings.CC_WORD_BLACKLIST)) +
')\\b')
@cronjobs.register
def collect_tweets():
# Don't (ab)use the twitter API from dev and stage.
if settings.STAGE:
return
"""Collect new tweets about Firefox."""
with statsd.timer('customercare.tweets.time_elapsed'):
t = Twython(settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET,
settings.TWITTER_ACCESS_TOKEN,
settings.TWITTER_ACCESS_TOKEN_SECRET)
search_options = {
'q': ('firefox OR #fxinput OR @firefoxbrasil OR #firefoxos '
'OR @firefox_es'),
'count': settings.CC_TWEETS_PERPAGE, # Items per page.
'result_type': 'recent', # Retrieve tweets by date.
}
# If we already have some tweets, collect nothing older than what we
# have.
try:
latest_tweet = Tweet.latest()
except Tweet.DoesNotExist:
log.debug('No existing tweets. Retrieving %d tweets from search.' %
settings.CC_TWEETS_PERPAGE)
else:
search_options['since_id'] = latest_tweet.tweet_id
log.info('Retrieving tweets with id >= %s' % latest_tweet.tweet_id)
# Retrieve Tweets
results = t.search(**search_options)
if len(results['statuses']) == 0:
# Twitter returned 0 results.
return
# Drop tweets into DB
for item in results['statuses']:
# Apply filters to tweet before saving
# Allow links in #fxinput tweets
statsd.incr('customercare.tweet.collected')
item = _filter_tweet(item,
allow_links='#fxinput' in item['text'])
if not item:
continue
created_date = datetime.utcfromtimestamp(calendar.timegm(
rfc822.parsedate(item['created_at'])))
item_lang = item['metadata'].get('iso_language_code', 'en')
tweet = Tweet(tweet_id=item['id'], raw_json=json.dumps(item),
locale=item_lang, created=created_date)
try:
tweet.save()
statsd.incr('customercare.tweet.saved')
except IntegrityError:
pass
@cronjobs.register
def purge_tweets():
"""Periodically purge old tweets for each locale.
This does a lot of DELETEs on master, so it shouldn't run too frequently.
Probably once every hour or more.
"""
# Pin to master
pin_this_thread()
# Build list of tweets to delete, by id.
for locale in settings.SUMO_LANGUAGES:
locale = settings.LOCALES[locale].iso639_1
# Some locales don't have an iso639_1 code, too bad for them.
if not locale:
continue
oldest = _get_oldest_tweet(locale, settings.CC_MAX_TWEETS)
if oldest:
log.debug('Truncating tweet list: Removing tweets older than %s, '
'for [%s].' % (oldest.created, locale))
Tweet.objects.filter(locale=locale,
created__lte=oldest.created).delete()
def _get_oldest_tweet(locale, n=0):
"""Returns the nth oldest tweet per locale, defaults to newest."""
try:
return Tweet.objects.filter(locale=locale).order_by(
'-created')[n]
except IndexError:
return None
def _filter_tweet(item, allow_links=False):
"""
Apply some filters to an incoming tweet.
May modify tweet. If None is returned, tweet will be discarded.
Used to exclude replies and such from incoming tweets.
"""
text = item['text'].lower()
# No replies, except to ALLOWED_USERS
allowed_user_ids = [u['id'] for u in ALLOWED_USERS]
to_user_id = item.get('to_user_id')
if to_user_id and to_user_id not in allowed_user_ids:
statsd.incr('customercare.tweet.rejected.reply_or_mention')
return None
# No mentions, except of ALLOWED_USERS
for user in item['entities']['user_mentions']:
if user['id'] not in allowed_user_ids:
statsd.incr('customercare.tweet.rejected.reply_or_mention')
return None
# No retweets
if RT_REGEX.search(text) or text.find('(via ') > -1:
statsd.incr('customercare.tweet.rejected.retweet')
return None
# No links
if not allow_links and LINK_REGEX.search(text):
statsd.incr('customercare.tweet.rejected.link')
return None
screen_name = item['user']['screen_name']
# Django's caching system will save us here.
IGNORED_USERS = set(
TwitterAccount.objects
.filter(ignored=True)
.values_list('username', flat=True)
)
# Exclude filtered users
if screen_name in IGNORED_USERS:
statsd.incr('customercare.tweet.rejected.user')
return None
# Exlude users with firefox in the handle
if 'firefox' in screen_name.lower():
statsd.incr('customercare.tweet.rejected.firefox_in_handle')
return None
# Exclude problem words
match = get_word_blacklist_regex().search(text)
if match:
bad_word = match.group(1)
statsd.incr('customercare.tweet.rejected.blacklist_word.' + bad_word)
return None
return item
@cronjobs.register
def get_customercare_stats():
"""
Generate customer care stats from the Replies table.
This gets cached in Redis as a sorted list of contributors, stored as JSON.
Example Top Contributor data:
[
{
'twitter_username': 'username1',
'avatar': 'http://twitter.com/path/to/the/avatar.png',
'avatar_https': 'https://twitter.com/path/to/the/avatar.png',
'all': 5211,
'1m': 230,
'1w': 33,
'1d': 3,
},
{ ... },
{ ... },
]
"""
if settings.STAGE:
return
contributor_stats = {}
now = datetime.now()
one_month_ago = now - timedelta(days=30)
one_week_ago = now - timedelta(days=7)
yesterday = now - timedelta(days=1)
for chunk in chunked(Reply.objects.all(), 2500, Reply.objects.count()):
for reply in chunk:
user = reply.twitter_username
if user not in contributor_stats:
raw = json.loads(reply.raw_json)
if 'from_user' in raw: # For tweets collected using v1 API
user_data = raw
else:
user_data = raw['user']
contributor_stats[user] = {
'twitter_username': user,
'avatar': user_data['profile_image_url'],
'avatar_https': user_data['profile_image_url_https'],
'all': 0, '1m': 0, '1w': 0, '1d': 0,
}
contributor = contributor_stats[reply.twitter_username]
contributor['all'] += 1
if reply.created > one_month_ago:
contributor['1m'] += 1
if reply.created > one_week_ago:
contributor['1w'] += 1
if reply.created > yesterday:
contributor['1d'] += 1
sort_key = settings.CC_TOP_CONTRIB_SORT
limit = settings.CC_TOP_CONTRIB_LIMIT
# Sort by whatever is in settings, break ties with 'all'
contributor_stats = sorted(contributor_stats.values(),
key=lambda c: (c[sort_key], c['all']),
reverse=True)[:limit]
try:
redis = redis_client(name='default')
key = settings.CC_TOP_CONTRIB_CACHE_KEY
redis.set(key, json.dumps(contributor_stats))
except RedisError as e:
statsd.incr('redis.error')
log.error('Redis error: %s' % e)
return contributor_stats
| import calendar
from datetime import datetime, timedelta
import json
import logging
import re
import rfc822
from django.conf import settings
from django.db.utils import IntegrityError
import cronjobs
from multidb.pinning import pin_this_thread
from statsd import statsd
from twython import Twython
from kitsune.customercare.models import Tweet, TwitterAccount, Reply
from kitsune.sumo.redis_utils import redis_client, RedisError
from kitsune.sumo.utils import chunked
LINK_REGEX = re.compile('https?\:', re.IGNORECASE)
RT_REGEX = re.compile('^rt\W', re.IGNORECASE)
ALLOWED_USERS = [
{'id': 2142731, 'username': 'Firefox'},
{'id': 150793437, 'username': 'FirefoxBrasil'},
{'id': 107272435, 'username': 'firefox_es'},
]
log = logging.getLogger('k.twitter')
def get_word_blacklist_regex():
"""
Make a regex that looks kind of like r'\b(foo|bar|baz)\b'.
This is a function so that it isn't calculated at import time,
and so can be tested more easily.
This doesn't use raw strings (r'') because the "mismatched" parens
were confusing my syntax highlighter, which was confusing me.
"""
return re.compile(
'\\b(' +
'|'.join(map(re.escape, settings.CC_WORD_BLACKLIST)) +
')\\b')
@cronjobs.register
def collect_tweets():
# Don't (ab)use the twitter API from dev and stage.
if settings.STAGE:
return
"""Collect new tweets about Firefox."""
with statsd.timer('customercare.tweets.time_elapsed'):
t = Twython(settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET,
settings.TWITTER_ACCESS_TOKEN,
settings.TWITTER_ACCESS_TOKEN_SECRET)
search_options = {
'q': ('firefox OR #fxinput OR @firefoxbrasil OR #firefoxos '
'OR @firefox_es'),
'count': settings.CC_TWEETS_PERPAGE, # Items per page.
'result_type': 'recent', # Retrieve tweets by date.
}
# If we already have some tweets, collect nothing older than what we
# have.
try:
latest_tweet = Tweet.latest()
except Tweet.DoesNotExist:
log.debug('No existing tweets. Retrieving %d tweets from search.' %
settings.CC_TWEETS_PERPAGE)
else:
search_options['since_id'] = latest_tweet.tweet_id
log.info('Retrieving tweets with id >= %s' % latest_tweet.tweet_id)
# Retrieve Tweets
results = t.search(**search_options)
if len(results['statuses']) == 0:
# Twitter returned 0 results.
return
# Drop tweets into DB
for item in results['statuses']:
# Apply filters to tweet before saving
# Allow links in #fxinput tweets
statsd.incr('customercare.tweet.collected')
item = _filter_tweet(item,
allow_links='#fxinput' in item['text'])
if not item:
continue
created_date = datetime.utcfromtimestamp(calendar.timegm(
rfc822.parsedate(item['created_at'])))
item_lang = item['metadata'].get('iso_language_code', 'en')
tweet = Tweet(tweet_id=item['id'], raw_json=json.dumps(item),
locale=item_lang, created=created_date)
try:
tweet.save()
statsd.incr('customercare.tweet.saved')
except IntegrityError:
pass
@cronjobs.register
def purge_tweets():
"""Periodically purge old tweets for each locale.
This does a lot of DELETEs on master, so it shouldn't run too frequently.
Probably once every hour or more.
"""
# Pin to master
pin_this_thread()
# Build list of tweets to delete, by id.
for locale in settings.SUMO_LANGUAGES:
locale = settings.LOCALES[locale].iso639_1
# Some locales don't have an iso639_1 code, too bad for them.
if not locale:
continue
oldest = _get_oldest_tweet(locale, settings.CC_MAX_TWEETS)
if oldest:
log.debug('Truncating tweet list: Removing tweets older than %s, '
'for [%s].' % (oldest.created, locale))
Tweet.objects.filter(locale=locale,
created__lte=oldest.created).delete()
def _get_oldest_tweet(locale, n=0):
"""Returns the nth oldest tweet per locale, defaults to newest."""
try:
return Tweet.objects.filter(locale=locale).order_by(
'-created')[n]
except IndexError:
return None
def _filter_tweet(item, allow_links=False):
"""
Apply some filters to an incoming tweet.
May modify tweet. If None is returned, tweet will be discarded.
Used to exclude replies and such from incoming tweets.
"""
text = item['text'].lower()
# No replies, except to ALLOWED_USERS
allowed_user_ids = [u['id'] for u in ALLOWED_USERS]
to_user_id = item.get('to_user_id')
if to_user_id and to_user_id not in allowed_user_ids:
statsd.incr('customercare.tweet.rejected.reply_or_mention')
return None
# No mentions, except of ALLOWED_USERS
for user in item['entities']['user_mentions']:
if user['id'] not in allowed_user_ids:
statsd.incr('customercare.tweet.rejected.reply_or_mention')
return None
# No retweets
if RT_REGEX.search(text) or text.find('(via ') > -1:
statsd.incr('customercare.tweet.rejected.retweet')
return None
# No links
if not allow_links and LINK_REGEX.search(text):
statsd.incr('customercare.tweet.rejected.link')
return None
screen_name = item['user']['screen_name']
# Django's caching system will save us here.
IGNORED_USERS = set(
TwitterAccount.objects
.filter(ignored=True)
.values_list('username', flat=True)
)
# Exclude filtered users
if screen_name in IGNORED_USERS:
statsd.incr('customercare.tweet.rejected.user')
return None
# Exlude users with firefox in the handle
if 'firefox' in screen_name.lower():
statsd.incr('customercare.tweet.rejected.firefox_in_handle')
return None
# Exclude problem words
match = get_word_blacklist_regex().search(text)
if match:
bad_word = match.group(1)
statsd.incr('customercare.tweet.rejected.blacklist_word.' + bad_word)
return None
return item
@cronjobs.register
def get_customercare_stats():
"""
Generate customer care stats from the Replies table.
This gets cached in Redis as a sorted list of contributors, stored as JSON.
Example Top Contributor data:
[
{
'twitter_username': 'username1',
'avatar': 'http://twitter.com/path/to/the/avatar.png',
'avatar_https': 'https://twitter.com/path/to/the/avatar.png',
'all': 5211,
'1m': 230,
'1w': 33,
'1d': 3,
},
{ ... },
{ ... },
]
"""
if settings.STAGE:
return
contributor_stats = {}
now = datetime.now()
one_month_ago = now - timedelta(days=30)
one_week_ago = now - timedelta(days=7)
yesterday = now - timedelta(days=1)
for chunk in chunked(Reply.objects.all(), 2500, Reply.objects.count()):
for reply in chunk:
user = reply.twitter_username
if user not in contributor_stats:
raw = json.loads(reply.raw_json)
if 'from_user' in raw: # For tweets collected using v1 API
user_data = raw
else:
user_data = raw['user']
contributor_stats[user] = {
'twitter_username': user,
'avatar': user_data['profile_image_url'],
'avatar_https': user_data['profile_image_url_https'],
'all': 0, '1m': 0, '1w': 0, '1d': 0,
}
contributor = contributor_stats[reply.twitter_username]
contributor['all'] += 1
if reply.created > one_month_ago:
contributor['1m'] += 1
if reply.created > one_week_ago:
contributor['1w'] += 1
if reply.created > yesterday:
contributor['1d'] += 1
sort_key = settings.CC_TOP_CONTRIB_SORT
limit = settings.CC_TOP_CONTRIB_LIMIT
# Sort by whatever is in settings, break ties with 'all'
contributor_stats = sorted(contributor_stats.values(),
key=lambda c: (c[sort_key], c['all']),
reverse=True)[:limit]
try:
redis = redis_client(name='default')
key = settings.CC_TOP_CONTRIB_CACHE_KEY
redis.set(key, json.dumps(contributor_stats))
except RedisError as e:
statsd.incr('redis.error')
log.error('Redis error: %s' % e)
return contributor_stats
| en | 0.841292 | Make a regex that looks kind of like r'\b(foo|bar|baz)\b'. This is a function so that it isn't calculated at import time, and so can be tested more easily. This doesn't use raw strings (r'') because the "mismatched" parens were confusing my syntax highlighter, which was confusing me. # Don't (ab)use the twitter API from dev and stage. Collect new tweets about Firefox. #fxinput OR @firefoxbrasil OR #firefoxos ' # Items per page. # Retrieve tweets by date. # If we already have some tweets, collect nothing older than what we # have. # Retrieve Tweets # Twitter returned 0 results. # Drop tweets into DB # Apply filters to tweet before saving # Allow links in #fxinput tweets Periodically purge old tweets for each locale. This does a lot of DELETEs on master, so it shouldn't run too frequently. Probably once every hour or more. # Pin to master # Build list of tweets to delete, by id. # Some locales don't have an iso639_1 code, too bad for them. Returns the nth oldest tweet per locale, defaults to newest. Apply some filters to an incoming tweet. May modify tweet. If None is returned, tweet will be discarded. Used to exclude replies and such from incoming tweets. # No replies, except to ALLOWED_USERS # No mentions, except of ALLOWED_USERS # No retweets # No links # Django's caching system will save us here. # Exclude filtered users # Exlude users with firefox in the handle # Exclude problem words Generate customer care stats from the Replies table. This gets cached in Redis as a sorted list of contributors, stored as JSON. Example Top Contributor data: [ { 'twitter_username': 'username1', 'avatar': 'http://twitter.com/path/to/the/avatar.png', 'avatar_https': 'https://twitter.com/path/to/the/avatar.png', 'all': 5211, '1m': 230, '1w': 33, '1d': 3, }, { ... }, { ... }, ] # For tweets collected using v1 API # Sort by whatever is in settings, break ties with 'all' | 2.160142 | 2 |
setup.py | nrcmedia/pdfrw | 2 | 7758 | <reponame>nrcmedia/pdfrw
#!/usr/bin/env python
from distutils.core import setup
try:
import setuptools
except:
pass
setup(
name='pdfrw',
version='0.1',
description='PDF file reader/writer library',
long_description='''
pdfrw lets you read and write PDF files, including
compositing multiple pages together (e.g. to do watermarking,
or to copy an image or diagram from one PDF to another),
and can output by itself, or in conjunction with reportlab.
pdfrw will faithfully reproduce vector formats without
rasterization, so the rst2pdf package has used pdfrw
by default for PDF and SVG images by default since
March 2010. Several small examples are provided.
''',
author='<NAME>',
author_email='<EMAIL>',
platforms='Independent',
url='http://code.google.com/p/pdfrw/',
packages=['pdfrw', 'pdfrw.objects'],
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Multimedia :: Graphics :: Graphics Conversion',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities'
],
keywords='pdf vector graphics',
)
| #!/usr/bin/env python
from distutils.core import setup
try:
import setuptools
except:
pass
setup(
name='pdfrw',
version='0.1',
description='PDF file reader/writer library',
long_description='''
pdfrw lets you read and write PDF files, including
compositing multiple pages together (e.g. to do watermarking,
or to copy an image or diagram from one PDF to another),
and can output by itself, or in conjunction with reportlab.
pdfrw will faithfully reproduce vector formats without
rasterization, so the rst2pdf package has used pdfrw
by default for PDF and SVG images by default since
March 2010. Several small examples are provided.
''',
author='<NAME>',
author_email='<EMAIL>',
platforms='Independent',
url='http://code.google.com/p/pdfrw/',
packages=['pdfrw', 'pdfrw.objects'],
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Multimedia :: Graphics :: Graphics Conversion',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities'
],
keywords='pdf vector graphics',
) | en | 0.856505 | #!/usr/bin/env python pdfrw lets you read and write PDF files, including compositing multiple pages together (e.g. to do watermarking, or to copy an image or diagram from one PDF to another), and can output by itself, or in conjunction with reportlab. pdfrw will faithfully reproduce vector formats without rasterization, so the rst2pdf package has used pdfrw by default for PDF and SVG images by default since March 2010. Several small examples are provided. | 1.996744 | 2 |
checkAnnotation.py | ZZIDZZ/pytorch-ssd | 0 | 7759 | import argparse
import sys
import cv2
import os
import os.path as osp
import numpy as np
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
parser = argparse.ArgumentParser(
description='Single Shot MultiBox Detector Training With Pytorch')
train_set = parser.add_mutually_exclusive_group()
parser.add_argument('--root', help='Dataset root directory path')
args = parser.parse_args()
CLASSES = ( # always index 0
'helmet', 'vest', 'no_helmet')
annopath = osp.join('%s', 'Annotations', '%s.{}'.format("xml"))
imgpath = osp.join('%s', 'JPEGImages', '%s.{}'.format("jpg"))
def vocChecker(image_id, width, height, keep_difficult = False):
target = ET.parse(annopath % image_id).getroot()
res = []
for obj in target.iter('object'):
difficult = int(obj.find('difficult').text) == 1
if not keep_difficult and difficult:
continue
name = obj.find('name').text.lower().strip()
bbox = obj.find('bndbox')
pts = ['xmin', 'ymin', 'xmax', 'ymax']
bndbox = []
for i, pt in enumerate(pts):
cur_pt = int(bbox.find(pt).text) - 1
# scale height or width
cur_pt = float(cur_pt) / width if i % 2 == 0 else float(cur_pt) / height
bndbox.append(cur_pt)
print(name)
label_idx = dict(zip(CLASSES, range(len(CLASSES))))[name]
bndbox.append(label_idx)
res += [bndbox] # [xmin, ymin, xmax, ymax, label_ind]
# img_id = target.find('filename').text[:-4]
print(res)
try :
print(np.array(res)[:,4])
print(np.array(res)[:,:4])
except IndexError:
print("\nINDEX ERROR HERE !\n")
exit(0)
return res # [[xmin, ymin, xmax, ymax, label_ind], ... ]
if __name__ == '__main__' :
i = 0
for name in sorted(os.listdir(osp.join(args.root,'Annotations'))):
# as we have only one annotations file per image
i += 1
img = cv2.imread(imgpath % (args.root,name.split('.')[0]))
height, width, channels = img.shape
res = vocChecker((args.root, name.split('.')[0]), height, width)
print("path : {}".format(annopath % (args.root,name.split('.')[0])))
res = vocChecker((args.root, name.split('.')[0]), height, width)
print("Total of annotations : {}".format(i)) | import argparse
import sys
import cv2
import os
import os.path as osp
import numpy as np
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
parser = argparse.ArgumentParser(
description='Single Shot MultiBox Detector Training With Pytorch')
train_set = parser.add_mutually_exclusive_group()
parser.add_argument('--root', help='Dataset root directory path')
args = parser.parse_args()
CLASSES = ( # always index 0
'helmet', 'vest', 'no_helmet')
annopath = osp.join('%s', 'Annotations', '%s.{}'.format("xml"))
imgpath = osp.join('%s', 'JPEGImages', '%s.{}'.format("jpg"))
def vocChecker(image_id, width, height, keep_difficult = False):
target = ET.parse(annopath % image_id).getroot()
res = []
for obj in target.iter('object'):
difficult = int(obj.find('difficult').text) == 1
if not keep_difficult and difficult:
continue
name = obj.find('name').text.lower().strip()
bbox = obj.find('bndbox')
pts = ['xmin', 'ymin', 'xmax', 'ymax']
bndbox = []
for i, pt in enumerate(pts):
cur_pt = int(bbox.find(pt).text) - 1
# scale height or width
cur_pt = float(cur_pt) / width if i % 2 == 0 else float(cur_pt) / height
bndbox.append(cur_pt)
print(name)
label_idx = dict(zip(CLASSES, range(len(CLASSES))))[name]
bndbox.append(label_idx)
res += [bndbox] # [xmin, ymin, xmax, ymax, label_ind]
# img_id = target.find('filename').text[:-4]
print(res)
try :
print(np.array(res)[:,4])
print(np.array(res)[:,:4])
except IndexError:
print("\nINDEX ERROR HERE !\n")
exit(0)
return res # [[xmin, ymin, xmax, ymax, label_ind], ... ]
if __name__ == '__main__' :
i = 0
for name in sorted(os.listdir(osp.join(args.root,'Annotations'))):
# as we have only one annotations file per image
i += 1
img = cv2.imread(imgpath % (args.root,name.split('.')[0]))
height, width, channels = img.shape
res = vocChecker((args.root, name.split('.')[0]), height, width)
print("path : {}".format(annopath % (args.root,name.split('.')[0])))
res = vocChecker((args.root, name.split('.')[0]), height, width)
print("Total of annotations : {}".format(i)) | en | 0.341346 | # always index 0 # scale height or width # [xmin, ymin, xmax, ymax, label_ind] # img_id = target.find('filename').text[:-4] # [[xmin, ymin, xmax, ymax, label_ind], ... ] # as we have only one annotations file per image | 2.51012 | 3 |
src/oci/identity_data_plane/models/password_reset_authentication_request.py | LaudateCorpus1/oci-python-sdk | 0 | 7760 | <gh_stars>0
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class PasswordResetAuthenticationRequest(object):
"""
PasswordResetAuthenticationRequest model.
"""
def __init__(self, **kwargs):
"""
Initializes a new PasswordResetAuthenticationRequest object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param user_id:
The value to assign to the user_id property of this PasswordResetAuthenticationRequest.
:type user_id: str
:param password_reset_token:
The value to assign to the password_reset_token property of this PasswordResetAuthenticationRequest.
:type password_reset_token: str
"""
self.swagger_types = {
'user_id': 'str',
'password_reset_token': 'str'
}
self.attribute_map = {
'user_id': 'userId',
'password_reset_token': '<PASSWORD>'
}
self._user_id = None
self._password_reset_token = None
@property
def user_id(self):
"""
**[Required]** Gets the user_id of this PasswordResetAuthenticationRequest.
The id of the user
:return: The user_id of this PasswordResetAuthenticationRequest.
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""
Sets the user_id of this PasswordResetAuthenticationRequest.
The id of the user
:param user_id: The user_id of this PasswordResetAuthenticationRequest.
:type: str
"""
self._user_id = user_id
@property
def password_reset_token(self):
"""
**[Required]** Gets the password_reset_token of this PasswordResetAuthenticationRequest.
The password reset token
:return: The password_reset_token of this PasswordResetAuthenticationRequest.
:rtype: str
"""
return self._password_reset_token
@password_reset_token.setter
def password_reset_token(self, password_reset_token):
"""
Sets the password_reset_token of this PasswordResetAuthenticationRequest.
The password reset token
:param password_reset_token: The password_reset_token of this PasswordResetAuthenticationRequest.
:type: str
"""
self._password_reset_token = password_reset_token
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class PasswordResetAuthenticationRequest(object):
"""
PasswordResetAuthenticationRequest model.
"""
def __init__(self, **kwargs):
"""
Initializes a new PasswordResetAuthenticationRequest object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param user_id:
The value to assign to the user_id property of this PasswordResetAuthenticationRequest.
:type user_id: str
:param password_reset_token:
The value to assign to the password_reset_token property of this PasswordResetAuthenticationRequest.
:type password_reset_token: str
"""
self.swagger_types = {
'user_id': 'str',
'password_reset_token': 'str'
}
self.attribute_map = {
'user_id': 'userId',
'password_reset_token': '<PASSWORD>'
}
self._user_id = None
self._password_reset_token = None
@property
def user_id(self):
"""
**[Required]** Gets the user_id of this PasswordResetAuthenticationRequest.
The id of the user
:return: The user_id of this PasswordResetAuthenticationRequest.
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""
Sets the user_id of this PasswordResetAuthenticationRequest.
The id of the user
:param user_id: The user_id of this PasswordResetAuthenticationRequest.
:type: str
"""
self._user_id = user_id
@property
def password_reset_token(self):
"""
**[Required]** Gets the password_reset_token of this PasswordResetAuthenticationRequest.
The password reset token
:return: The password_reset_token of this PasswordResetAuthenticationRequest.
:rtype: str
"""
return self._password_reset_token
@password_reset_token.setter
def password_reset_token(self, password_reset_token):
"""
Sets the password_reset_token of this PasswordResetAuthenticationRequest.
The password reset token
:param password_reset_token: The password_reset_token of this PasswordResetAuthenticationRequest.
:type: str
"""
self._password_reset_token = password_reset_token
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other | en | 0.637305 | # coding: utf-8 # Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. # noqa: F401 PasswordResetAuthenticationRequest model. Initializes a new PasswordResetAuthenticationRequest object with values from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param user_id: The value to assign to the user_id property of this PasswordResetAuthenticationRequest. :type user_id: str :param password_reset_token: The value to assign to the password_reset_token property of this PasswordResetAuthenticationRequest. :type password_reset_token: str **[Required]** Gets the user_id of this PasswordResetAuthenticationRequest. The id of the user :return: The user_id of this PasswordResetAuthenticationRequest. :rtype: str Sets the user_id of this PasswordResetAuthenticationRequest. The id of the user :param user_id: The user_id of this PasswordResetAuthenticationRequest. :type: str **[Required]** Gets the password_reset_token of this PasswordResetAuthenticationRequest. The password reset token :return: The password_reset_token of this PasswordResetAuthenticationRequest. :rtype: str Sets the password_reset_token of this PasswordResetAuthenticationRequest. The password reset token :param password_reset_token: The password_reset_token of this PasswordResetAuthenticationRequest. :type: str | 2.332455 | 2 |
venv/lib/python3.7/site-packages/convertdate/dublin.py | vchiapaikeo/prophet | 0 | 7761 | <filename>venv/lib/python3.7/site-packages/convertdate/dublin.py
# -*- coding: utf-8 -*-
# This file is part of convertdate.
# http://github.com/fitnr/convertdate
# Licensed under the MIT license:
# http://opensource.org/licenses/MIT
# Copyright (c) 2016, fitnr <<EMAIL>>
'''Convert to and from the Dublin day count'''
from . import daycount
EPOCH = 2415020 # Julian Day Count for Dublin Count 0
_dublin = daycount.DayCount(EPOCH)
to_gregorian = _dublin.to_gregorian
from_gregorian = _dublin.from_gregorian
to_jd = _dublin.to_jd
from_jd = _dublin.from_jd
from_julian = _dublin.from_julian
to_julian = _dublin.to_julian
to_datetime = _dublin.to_datetime
from_datetime = _dublin.from_datetime
| <filename>venv/lib/python3.7/site-packages/convertdate/dublin.py
# -*- coding: utf-8 -*-
# This file is part of convertdate.
# http://github.com/fitnr/convertdate
# Licensed under the MIT license:
# http://opensource.org/licenses/MIT
# Copyright (c) 2016, fitnr <<EMAIL>>
'''Convert to and from the Dublin day count'''
from . import daycount
EPOCH = 2415020 # Julian Day Count for Dublin Count 0
_dublin = daycount.DayCount(EPOCH)
to_gregorian = _dublin.to_gregorian
from_gregorian = _dublin.from_gregorian
to_jd = _dublin.to_jd
from_jd = _dublin.from_jd
from_julian = _dublin.from_julian
to_julian = _dublin.to_julian
to_datetime = _dublin.to_datetime
from_datetime = _dublin.from_datetime
| en | 0.786213 | # -*- coding: utf-8 -*- # This file is part of convertdate. # http://github.com/fitnr/convertdate # Licensed under the MIT license: # http://opensource.org/licenses/MIT # Copyright (c) 2016, fitnr <<EMAIL>> Convert to and from the Dublin day count # Julian Day Count for Dublin Count 0 | 2.374884 | 2 |
tests/functional/controllers/test_group_controller_superuser.py | roscisz/TensorHive | 129 | 7762 | <filename>tests/functional/controllers/test_group_controller_superuser.py<gh_stars>100-1000
from tensorhive.models.Group import Group
from fixtures.controllers import API_URI as BASE_URI, HEADERS
from http import HTTPStatus
from importlib import reload
import json
import auth_patcher
ENDPOINT = BASE_URI + '/groups'
def setup_module(_):
auth_patches = auth_patcher.get_patches(superuser=True)
for auth_patch in auth_patches:
auth_patch.start()
for module in auth_patcher.CONTROLLER_MODULES:
reload(module)
for auth_patch in auth_patches:
auth_patch.stop()
# POST /groups
def test_create_group(tables, client):
group_name = 'TestGroup'
data = {'name': group_name}
resp = client.post(ENDPOINT, headers=HEADERS, data=json.dumps(data))
resp_json = json.loads(resp.data.decode('utf-8'))
assert resp.status_code == HTTPStatus.CREATED
assert resp_json['group']['id'] is not None
assert resp_json['group']['name'] == group_name
assert Group.get(int(resp_json['group']['id'])) is not None
# PUT /groups/{id}
def test_update_group(tables, client, new_group):
new_group.save()
new_group_name = new_group.name + '111'
resp = client.put(ENDPOINT + '/' + str(new_group.id), headers=HEADERS, data=json.dumps({'name': new_group_name}))
resp_json = json.loads(resp.data.decode('utf-8'))
assert resp.status_code == HTTPStatus.OK
assert resp_json['group']['name'] == new_group_name
assert Group.get(new_group.id).name == new_group_name
# PUT /groups/{id} - nonexistent id
def test_update_group_that_doesnt_exist(tables, client):
non_existent_id = '777'
resp = client.put(ENDPOINT + '/' + non_existent_id, headers=HEADERS, data=json.dumps({'name': 'test'}))
assert resp.status_code == HTTPStatus.NOT_FOUND
# DELETE /groups/{id}
def test_delete_group(tables, client, new_group):
new_group.save()
resp = client.delete(ENDPOINT + '/' + str(new_group.id), headers=HEADERS)
assert resp.status_code == HTTPStatus.OK
# Let's get all groups to verify
resp = client.get(ENDPOINT, headers=HEADERS)
resp_json = json.loads(resp.data.decode('utf-8'))
assert len(resp_json) == 0
# DELETE /groups/{id} - nonexistent id
def test_delete_group_that_doesnt_exist(tables, client):
non_existent_id = '777'
resp = client.delete(ENDPOINT + '/' + non_existent_id, headers=HEADERS)
assert resp.status_code == HTTPStatus.NOT_FOUND
# PUT /groups/{id}/users/{id}
def test_add_user_to_a_group(tables, client, new_group, new_user):
new_group.save()
new_user.save()
resp = client.put(ENDPOINT + '/{}/users/{}'.format(new_group.id, new_user.id), headers=HEADERS)
assert resp.status_code == HTTPStatus.OK
assert new_group in new_user.groups
assert new_user in new_group.users
# DELETE /groups/{id}/users/{id}
def test_remove_user_from_a_group(tables, client, new_group_with_member):
new_group_with_member.save()
user = new_group_with_member.users[0]
resp = client.delete(ENDPOINT + '/{}/users/{}'.format(new_group_with_member.id, user.id), headers=HEADERS)
assert resp.status_code == HTTPStatus.OK
assert new_group_with_member not in user.groups
assert user not in new_group_with_member.users
# PUT /groups/{id}/users/{id} - nonexistent user id
def test_add_nonexistent_user_to_a_group(tables, client, new_group):
new_group.save()
nonexistent_user_id = '777'
resp = client.put(ENDPOINT + '/{}/users/{}'.format(new_group.id, nonexistent_user_id), headers=HEADERS)
assert resp.status_code == HTTPStatus.NOT_FOUND
# PUT /groups/{id}/users/{id} - nonexistent group id
def test_add_user_to_nonexistent_group(tables, client, new_user):
new_user.save()
nonexistent_group_id = '777'
resp = client.put(ENDPOINT + '/{}/users/{}'.format(nonexistent_group_id, new_user.id), headers=HEADERS)
assert resp.status_code == HTTPStatus.NOT_FOUND
# DELETE /groups/{id}/users/{id} - nonexistent user id
def test_remove_nonexistent_user_from_a_group(tables, client, new_group):
new_group.save()
nonexistent_user_id = '777'
resp = client.delete(ENDPOINT + '/{}/users/{}'.format(new_group.id, nonexistent_user_id), headers=HEADERS)
assert resp.status_code == HTTPStatus.NOT_FOUND
# DELETE /groups/{id}/users/{id} - nonexistent group id
def test_remove_user_from_a_nonexistent_group(tables, client, new_user):
new_user.save()
nonexistent_group_id = '777'
resp = client.delete(ENDPOINT + '/{}/users/{}'.format(nonexistent_group_id, new_user.id), headers=HEADERS)
assert resp.status_code == HTTPStatus.NOT_FOUND
# PUT /groups/{id}
def test_set_group_as_a_default(tables, client, new_group):
new_group.save()
resp = client.put(ENDPOINT + '/{}'.format(new_group.id), data=json.dumps({'isDefault': True}), headers=HEADERS)
assert resp.status_code == HTTPStatus.OK
assert Group.get(new_group.id).is_default
# PUT /groups/{id}
def test_mark_default_group_as_non_default(tables, client, new_group):
new_group.is_default = True
new_group.save()
resp = client.put(ENDPOINT + '/{}'.format(new_group.id), data=json.dumps({'isDefault': False}),
headers=HEADERS)
assert resp.status_code == HTTPStatus.OK
assert Group.get(new_group.id).is_default is False
| <filename>tests/functional/controllers/test_group_controller_superuser.py<gh_stars>100-1000
from tensorhive.models.Group import Group
from fixtures.controllers import API_URI as BASE_URI, HEADERS
from http import HTTPStatus
from importlib import reload
import json
import auth_patcher
ENDPOINT = BASE_URI + '/groups'
def setup_module(_):
auth_patches = auth_patcher.get_patches(superuser=True)
for auth_patch in auth_patches:
auth_patch.start()
for module in auth_patcher.CONTROLLER_MODULES:
reload(module)
for auth_patch in auth_patches:
auth_patch.stop()
# POST /groups
def test_create_group(tables, client):
group_name = 'TestGroup'
data = {'name': group_name}
resp = client.post(ENDPOINT, headers=HEADERS, data=json.dumps(data))
resp_json = json.loads(resp.data.decode('utf-8'))
assert resp.status_code == HTTPStatus.CREATED
assert resp_json['group']['id'] is not None
assert resp_json['group']['name'] == group_name
assert Group.get(int(resp_json['group']['id'])) is not None
# PUT /groups/{id}
def test_update_group(tables, client, new_group):
new_group.save()
new_group_name = new_group.name + '111'
resp = client.put(ENDPOINT + '/' + str(new_group.id), headers=HEADERS, data=json.dumps({'name': new_group_name}))
resp_json = json.loads(resp.data.decode('utf-8'))
assert resp.status_code == HTTPStatus.OK
assert resp_json['group']['name'] == new_group_name
assert Group.get(new_group.id).name == new_group_name
# PUT /groups/{id} - nonexistent id
def test_update_group_that_doesnt_exist(tables, client):
non_existent_id = '777'
resp = client.put(ENDPOINT + '/' + non_existent_id, headers=HEADERS, data=json.dumps({'name': 'test'}))
assert resp.status_code == HTTPStatus.NOT_FOUND
# DELETE /groups/{id}
def test_delete_group(tables, client, new_group):
new_group.save()
resp = client.delete(ENDPOINT + '/' + str(new_group.id), headers=HEADERS)
assert resp.status_code == HTTPStatus.OK
# Let's get all groups to verify
resp = client.get(ENDPOINT, headers=HEADERS)
resp_json = json.loads(resp.data.decode('utf-8'))
assert len(resp_json) == 0
# DELETE /groups/{id} - nonexistent id
def test_delete_group_that_doesnt_exist(tables, client):
non_existent_id = '777'
resp = client.delete(ENDPOINT + '/' + non_existent_id, headers=HEADERS)
assert resp.status_code == HTTPStatus.NOT_FOUND
# PUT /groups/{id}/users/{id}
def test_add_user_to_a_group(tables, client, new_group, new_user):
new_group.save()
new_user.save()
resp = client.put(ENDPOINT + '/{}/users/{}'.format(new_group.id, new_user.id), headers=HEADERS)
assert resp.status_code == HTTPStatus.OK
assert new_group in new_user.groups
assert new_user in new_group.users
# DELETE /groups/{id}/users/{id}
def test_remove_user_from_a_group(tables, client, new_group_with_member):
new_group_with_member.save()
user = new_group_with_member.users[0]
resp = client.delete(ENDPOINT + '/{}/users/{}'.format(new_group_with_member.id, user.id), headers=HEADERS)
assert resp.status_code == HTTPStatus.OK
assert new_group_with_member not in user.groups
assert user not in new_group_with_member.users
# PUT /groups/{id}/users/{id} - nonexistent user id
def test_add_nonexistent_user_to_a_group(tables, client, new_group):
new_group.save()
nonexistent_user_id = '777'
resp = client.put(ENDPOINT + '/{}/users/{}'.format(new_group.id, nonexistent_user_id), headers=HEADERS)
assert resp.status_code == HTTPStatus.NOT_FOUND
# PUT /groups/{id}/users/{id} - nonexistent group id
def test_add_user_to_nonexistent_group(tables, client, new_user):
new_user.save()
nonexistent_group_id = '777'
resp = client.put(ENDPOINT + '/{}/users/{}'.format(nonexistent_group_id, new_user.id), headers=HEADERS)
assert resp.status_code == HTTPStatus.NOT_FOUND
# DELETE /groups/{id}/users/{id} - nonexistent user id
def test_remove_nonexistent_user_from_a_group(tables, client, new_group):
new_group.save()
nonexistent_user_id = '777'
resp = client.delete(ENDPOINT + '/{}/users/{}'.format(new_group.id, nonexistent_user_id), headers=HEADERS)
assert resp.status_code == HTTPStatus.NOT_FOUND
# DELETE /groups/{id}/users/{id} - nonexistent group id
def test_remove_user_from_a_nonexistent_group(tables, client, new_user):
new_user.save()
nonexistent_group_id = '777'
resp = client.delete(ENDPOINT + '/{}/users/{}'.format(nonexistent_group_id, new_user.id), headers=HEADERS)
assert resp.status_code == HTTPStatus.NOT_FOUND
# PUT /groups/{id}
def test_set_group_as_a_default(tables, client, new_group):
new_group.save()
resp = client.put(ENDPOINT + '/{}'.format(new_group.id), data=json.dumps({'isDefault': True}), headers=HEADERS)
assert resp.status_code == HTTPStatus.OK
assert Group.get(new_group.id).is_default
# PUT /groups/{id}
def test_mark_default_group_as_non_default(tables, client, new_group):
new_group.is_default = True
new_group.save()
resp = client.put(ENDPOINT + '/{}'.format(new_group.id), data=json.dumps({'isDefault': False}),
headers=HEADERS)
assert resp.status_code == HTTPStatus.OK
assert Group.get(new_group.id).is_default is False
| en | 0.327351 | # POST /groups # PUT /groups/{id} # PUT /groups/{id} - nonexistent id # DELETE /groups/{id} # Let's get all groups to verify # DELETE /groups/{id} - nonexistent id # PUT /groups/{id}/users/{id} # DELETE /groups/{id}/users/{id} # PUT /groups/{id}/users/{id} - nonexistent user id # PUT /groups/{id}/users/{id} - nonexistent group id # DELETE /groups/{id}/users/{id} - nonexistent user id # DELETE /groups/{id}/users/{id} - nonexistent group id # PUT /groups/{id} # PUT /groups/{id} | 2.233922 | 2 |
code/generate_thought_vectors.py | midas-research/text2facegan | 23 | 7763 | <gh_stars>10-100
import os
from os.path import join, isfile
import re
import numpy as np
import pickle
import argparse
import skipthoughts
import h5py
def main():
parser = argparse.ArgumentParser()
#parser.add_argument('--caption_file', type=str, default='Data/sample_captions.txt',
# help='caption file')
parser.add_argument('--caption_file', type=str, default='/media/ssd_working_space/osaid/Data/sample_captions.txt',
help='caption file')
#parser.add_argument('--data_dir', type=str, default='Data',
# help='Data Directory')
parser.add_argument('--data_dir', type=str, default='/media/ssd_working_space/osaid/Data',
help='Data Directory')
args = parser.parse_args()
with open( args.caption_file ) as f:
captions = f.read().split('\n')
captions = [cap for cap in captions if len(cap) > 0]
print(captions)
model = skipthoughts.load_model()
caption_vectors = skipthoughts.encode(model, captions)
if os.path.isfile(join(args.data_dir, 'sample_caption_vectors.hdf5')):
os.remove(join(args.data_dir, 'sample_caption_vectors.hdf5'))
h = h5py.File(join(args.data_dir, 'sample_caption_vectors.hdf5'))
h.create_dataset('vectors', data=caption_vectors)
h.close()
if __name__ == '__main__':
main() | import os
from os.path import join, isfile
import re
import numpy as np
import pickle
import argparse
import skipthoughts
import h5py
def main():
parser = argparse.ArgumentParser()
#parser.add_argument('--caption_file', type=str, default='Data/sample_captions.txt',
# help='caption file')
parser.add_argument('--caption_file', type=str, default='/media/ssd_working_space/osaid/Data/sample_captions.txt',
help='caption file')
#parser.add_argument('--data_dir', type=str, default='Data',
# help='Data Directory')
parser.add_argument('--data_dir', type=str, default='/media/ssd_working_space/osaid/Data',
help='Data Directory')
args = parser.parse_args()
with open( args.caption_file ) as f:
captions = f.read().split('\n')
captions = [cap for cap in captions if len(cap) > 0]
print(captions)
model = skipthoughts.load_model()
caption_vectors = skipthoughts.encode(model, captions)
if os.path.isfile(join(args.data_dir, 'sample_caption_vectors.hdf5')):
os.remove(join(args.data_dir, 'sample_caption_vectors.hdf5'))
h = h5py.File(join(args.data_dir, 'sample_caption_vectors.hdf5'))
h.create_dataset('vectors', data=caption_vectors)
h.close()
if __name__ == '__main__':
main() | ar | 0.046141 | #parser.add_argument('--caption_file', type=str, default='Data/sample_captions.txt', # help='caption file') #parser.add_argument('--data_dir', type=str, default='Data', # help='Data Directory') | 2.641063 | 3 |
venv/Lib/site-packages/mcipc/rcon/response_types/difficulty.py | Svesnav2/Discord-Bot-Minecraft-server-status | 0 | 7764 | """Parsing responses from the difficulty command."""
from mcipc.rcon.functions import boolmap
__all__ = ['parse']
SET = 'The difficulty has been set to (\\w+)'
UNCHANGED = 'The difficulty did not change; it is already set to (\\w+)'
def parse(text: str) -> bool:
"""Parses a boolean value from the text
returned by the difficulty command.
"""
return boolmap(text, true=SET, false=UNCHANGED)
| """Parsing responses from the difficulty command."""
from mcipc.rcon.functions import boolmap
__all__ = ['parse']
SET = 'The difficulty has been set to (\\w+)'
UNCHANGED = 'The difficulty did not change; it is already set to (\\w+)'
def parse(text: str) -> bool:
"""Parses a boolean value from the text
returned by the difficulty command.
"""
return boolmap(text, true=SET, false=UNCHANGED)
| en | 0.953414 | Parsing responses from the difficulty command. Parses a boolean value from the text returned by the difficulty command. | 3.460874 | 3 |
eth/beacon/aggregation.py | Bhargavasomu/py-evm | 0 | 7765 | <filename>eth/beacon/aggregation.py
from typing import (
Iterable,
Tuple,
)
from cytoolz import (
pipe
)
from eth._utils import bls
from eth._utils.bitfield import (
set_voted,
)
from eth.beacon.enums import SignatureDomain
from eth.beacon.typing import (
BLSPubkey,
BLSSignature,
Bitfield,
CommitteeIndex,
)
def verify_votes(
message: bytes,
votes: Iterable[Tuple[CommitteeIndex, BLSSignature, BLSPubkey]],
domain: SignatureDomain
) -> Tuple[Tuple[BLSSignature, ...], Tuple[CommitteeIndex, ...]]:
"""
Verify the given votes.
vote: (committee_index, sig, public_key)
"""
sigs_with_committe_info = tuple(
(sig, committee_index)
for (committee_index, sig, public_key)
in votes
if bls.verify(message, public_key, sig, domain)
)
try:
sigs, committee_indices = zip(*sigs_with_committe_info)
except ValueError:
sigs = tuple()
committee_indices = tuple()
return sigs, committee_indices
def aggregate_votes(
bitfield: Bitfield,
sigs: Iterable[BLSSignature],
voting_sigs: Iterable[BLSSignature],
voting_committee_indices: Iterable[CommitteeIndex]
) -> Tuple[Bitfield, BLSSignature]:
"""
Aggregate the votes.
"""
# Update the bitfield and append the signatures
sigs = tuple(sigs) + tuple(voting_sigs)
bitfield = pipe(
bitfield,
*(
set_voted(index=committee_index)
for committee_index in voting_committee_indices
)
)
return bitfield, bls.aggregate_signatures(sigs)
| <filename>eth/beacon/aggregation.py
from typing import (
Iterable,
Tuple,
)
from cytoolz import (
pipe
)
from eth._utils import bls
from eth._utils.bitfield import (
set_voted,
)
from eth.beacon.enums import SignatureDomain
from eth.beacon.typing import (
BLSPubkey,
BLSSignature,
Bitfield,
CommitteeIndex,
)
def verify_votes(
message: bytes,
votes: Iterable[Tuple[CommitteeIndex, BLSSignature, BLSPubkey]],
domain: SignatureDomain
) -> Tuple[Tuple[BLSSignature, ...], Tuple[CommitteeIndex, ...]]:
"""
Verify the given votes.
vote: (committee_index, sig, public_key)
"""
sigs_with_committe_info = tuple(
(sig, committee_index)
for (committee_index, sig, public_key)
in votes
if bls.verify(message, public_key, sig, domain)
)
try:
sigs, committee_indices = zip(*sigs_with_committe_info)
except ValueError:
sigs = tuple()
committee_indices = tuple()
return sigs, committee_indices
def aggregate_votes(
bitfield: Bitfield,
sigs: Iterable[BLSSignature],
voting_sigs: Iterable[BLSSignature],
voting_committee_indices: Iterable[CommitteeIndex]
) -> Tuple[Bitfield, BLSSignature]:
"""
Aggregate the votes.
"""
# Update the bitfield and append the signatures
sigs = tuple(sigs) + tuple(voting_sigs)
bitfield = pipe(
bitfield,
*(
set_voted(index=committee_index)
for committee_index in voting_committee_indices
)
)
return bitfield, bls.aggregate_signatures(sigs)
| en | 0.64408 | Verify the given votes. vote: (committee_index, sig, public_key) Aggregate the votes. # Update the bitfield and append the signatures | 2.447472 | 2 |
src/server/bos/controllers/v1/components.py | Cray-HPE/bos | 1 | 7766 | <filename>src/server/bos/controllers/v1/components.py
# Copyright 2021 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# (MIT License)
import connexion
from datetime import datetime
import logging
from bos import redis_db_utils as dbutils
LOGGER = logging.getLogger('bos.controllers.v1.components')
DB = dbutils.get_wrapper(db='components')
@dbutils.redis_error_handler
def get_components(ids="", enabled=None):
"""Used by the GET /components API operation
Allows filtering using a comma seperated list of ids.
"""
LOGGER.debug("GET /components invoked get_components")
id_list = []
if ids:
try:
id_list = ids.split(',')
except Exception as err:
return connexion.problem(
status=400, title="Error parsing the ids provided.",
detail=str(err))
response = get_components_data(id_list=id_list, enabled=enabled)
return response, 200
def get_components_data(id_list=None, enabled=None):
"""Used by the GET /components API operation
Allows filtering using a comma separated list of ids.
"""
response = []
if id_list:
for component_id in id_list:
data = DB.get(component_id)
if data:
response.append(data)
else:
# TODO: On large scale systems, this response may be too large
# and require paging to be implemented
response = DB.get_all()
if enabled is not None:
response = [r for r in response if _matches_filter(r, enabled)]
return response
def _matches_filter(data, enabled):
if enabled is not None and data.get('enabled', None) != enabled:
return False
return True
@dbutils.redis_error_handler
def put_components():
"""Used by the PUT /components API operation"""
LOGGER.debug("PUT /components invoked put_components")
try:
data = connexion.request.get_json()
components = []
for component_data in data:
component_id = component_data['id']
components.append((component_id, component_data))
except Exception as err:
return connexion.problem(
status=400, title="Error parsing the data provided.",
detail=str(err))
response = []
for component_id, component_data in components:
component_data = _set_auto_fields(component_data)
response.append(DB.put(component_id, component_data))
return response, 200
@dbutils.redis_error_handler
def patch_components():
"""Used by the PATCH /components API operation"""
LOGGER.debug("PATCH /components invoked patch_components")
try:
data = connexion.request.get_json()
components = []
for component_data in data:
component_id = component_data['id']
if component_id not in DB:
return connexion.problem(
status=404, title="Component could not found.",
detail="Component {} could not be found".format(component_id))
components.append((component_id, component_data))
except Exception as err:
return connexion.problem(
status=400, title="Error parsing the data provided.",
detail=str(err))
response = []
for component_id, component_data in components:
component_data = _set_auto_fields(component_data)
response.append(DB.patch(component_id, component_data, _update_handler))
return response, 200
@dbutils.redis_error_handler
def get_component(component_id, config_details=False, v2=False):
"""Used by the GET /components/{component_id} API operation"""
LOGGER.debug("GET /components/id invoked get_component")
if component_id not in DB:
return connexion.problem(
status=404, title="Component could not found.",
detail="Component {} could not be found".format(component_id))
component = DB.get(component_id)
return component, 200
@dbutils.redis_error_handler
def put_component(component_id):
"""Used by the PUT /components/{component_id} API operation"""
LOGGER.debug("PUT /components/id invoked put_component")
try:
data = connexion.request.get_json()
except Exception as err:
return connexion.problem(
status=400, title="Error parsing the data provided.",
detail=str(err))
data['id'] = component_id
data = _set_auto_fields(data)
return DB.put(component_id, data), 200
@dbutils.redis_error_handler
def patch_component(component_id):
"""Used by the PATCH /components/{component_id} API operation"""
LOGGER.debug("PATCH /components/id invoked patch_component")
if component_id not in DB:
return connexion.problem(
status=404, title="Component could not found.",
detail="Component {} could not be found".format(component_id))
try:
data = connexion.request.get_json()
except Exception as err:
return connexion.problem(
status=400, title="Error parsing the data provided.",
detail=str(err))
data = _set_auto_fields(data)
return DB.patch(component_id, data, _update_handler), 200
@dbutils.redis_error_handler
def delete_component(component_id):
"""Used by the DELETE /components/{component_id} API operation"""
LOGGER.debug("DELETE /components/id invoked delete_component")
if component_id not in DB:
return connexion.problem(
status=404, title="Component could not found.",
detail="Component {} could not be found".format(component_id))
return DB.delete(component_id), 204
def _set_auto_fields(data):
data = _set_last_updated(data)
return data
def _set_last_updated(data):
timestamp = datetime.utcnow().isoformat()
for section in ['actualState', 'desiredState', 'lastAction']:
if section in data and type(data[section]) == dict:
data[section]['lastUpdated'] = timestamp
return data
def _update_handler(data):
# Allows processing of data during common patch operation
return data
| <filename>src/server/bos/controllers/v1/components.py
# Copyright 2021 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# (MIT License)
import connexion
from datetime import datetime
import logging
from bos import redis_db_utils as dbutils
LOGGER = logging.getLogger('bos.controllers.v1.components')
DB = dbutils.get_wrapper(db='components')
@dbutils.redis_error_handler
def get_components(ids="", enabled=None):
"""Used by the GET /components API operation
Allows filtering using a comma seperated list of ids.
"""
LOGGER.debug("GET /components invoked get_components")
id_list = []
if ids:
try:
id_list = ids.split(',')
except Exception as err:
return connexion.problem(
status=400, title="Error parsing the ids provided.",
detail=str(err))
response = get_components_data(id_list=id_list, enabled=enabled)
return response, 200
def get_components_data(id_list=None, enabled=None):
"""Used by the GET /components API operation
Allows filtering using a comma separated list of ids.
"""
response = []
if id_list:
for component_id in id_list:
data = DB.get(component_id)
if data:
response.append(data)
else:
# TODO: On large scale systems, this response may be too large
# and require paging to be implemented
response = DB.get_all()
if enabled is not None:
response = [r for r in response if _matches_filter(r, enabled)]
return response
def _matches_filter(data, enabled):
if enabled is not None and data.get('enabled', None) != enabled:
return False
return True
@dbutils.redis_error_handler
def put_components():
"""Used by the PUT /components API operation"""
LOGGER.debug("PUT /components invoked put_components")
try:
data = connexion.request.get_json()
components = []
for component_data in data:
component_id = component_data['id']
components.append((component_id, component_data))
except Exception as err:
return connexion.problem(
status=400, title="Error parsing the data provided.",
detail=str(err))
response = []
for component_id, component_data in components:
component_data = _set_auto_fields(component_data)
response.append(DB.put(component_id, component_data))
return response, 200
@dbutils.redis_error_handler
def patch_components():
"""Used by the PATCH /components API operation"""
LOGGER.debug("PATCH /components invoked patch_components")
try:
data = connexion.request.get_json()
components = []
for component_data in data:
component_id = component_data['id']
if component_id not in DB:
return connexion.problem(
status=404, title="Component could not found.",
detail="Component {} could not be found".format(component_id))
components.append((component_id, component_data))
except Exception as err:
return connexion.problem(
status=400, title="Error parsing the data provided.",
detail=str(err))
response = []
for component_id, component_data in components:
component_data = _set_auto_fields(component_data)
response.append(DB.patch(component_id, component_data, _update_handler))
return response, 200
@dbutils.redis_error_handler
def get_component(component_id, config_details=False, v2=False):
"""Used by the GET /components/{component_id} API operation"""
LOGGER.debug("GET /components/id invoked get_component")
if component_id not in DB:
return connexion.problem(
status=404, title="Component could not found.",
detail="Component {} could not be found".format(component_id))
component = DB.get(component_id)
return component, 200
@dbutils.redis_error_handler
def put_component(component_id):
"""Used by the PUT /components/{component_id} API operation"""
LOGGER.debug("PUT /components/id invoked put_component")
try:
data = connexion.request.get_json()
except Exception as err:
return connexion.problem(
status=400, title="Error parsing the data provided.",
detail=str(err))
data['id'] = component_id
data = _set_auto_fields(data)
return DB.put(component_id, data), 200
@dbutils.redis_error_handler
def patch_component(component_id):
"""Used by the PATCH /components/{component_id} API operation"""
LOGGER.debug("PATCH /components/id invoked patch_component")
if component_id not in DB:
return connexion.problem(
status=404, title="Component could not found.",
detail="Component {} could not be found".format(component_id))
try:
data = connexion.request.get_json()
except Exception as err:
return connexion.problem(
status=400, title="Error parsing the data provided.",
detail=str(err))
data = _set_auto_fields(data)
return DB.patch(component_id, data, _update_handler), 200
@dbutils.redis_error_handler
def delete_component(component_id):
"""Used by the DELETE /components/{component_id} API operation"""
LOGGER.debug("DELETE /components/id invoked delete_component")
if component_id not in DB:
return connexion.problem(
status=404, title="Component could not found.",
detail="Component {} could not be found".format(component_id))
return DB.delete(component_id), 204
def _set_auto_fields(data):
data = _set_last_updated(data)
return data
def _set_last_updated(data):
timestamp = datetime.utcnow().isoformat()
for section in ['actualState', 'desiredState', 'lastAction']:
if section in data and type(data[section]) == dict:
data[section]['lastUpdated'] = timestamp
return data
def _update_handler(data):
# Allows processing of data during common patch operation
return data
| en | 0.788318 | # Copyright 2021 Hewlett Packard Enterprise Development LP # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # # (MIT License) Used by the GET /components API operation Allows filtering using a comma seperated list of ids. Used by the GET /components API operation Allows filtering using a comma separated list of ids. # TODO: On large scale systems, this response may be too large # and require paging to be implemented Used by the PUT /components API operation Used by the PATCH /components API operation Used by the GET /components/{component_id} API operation Used by the PUT /components/{component_id} API operation Used by the PATCH /components/{component_id} API operation Used by the DELETE /components/{component_id} API operation # Allows processing of data during common patch operation | 1.950306 | 2 |
cracking_the_coding_interview_qs/10.4/find_x_in_listy_test.py | angelusualle/algorithms | 0 | 7767 | <filename>cracking_the_coding_interview_qs/10.4/find_x_in_listy_test.py
import unittest
from find_x_in_listy import find_x_in_listy, Listy
class Test_Case_Find_X_In_Listy(unittest.TestCase):
def test_case_find_x_in_listy(self):
listy = Listy(list(range(0, 1*10**8)))
self.assertEqual(find_x_in_listy(listy, 5678), 5678) | <filename>cracking_the_coding_interview_qs/10.4/find_x_in_listy_test.py
import unittest
from find_x_in_listy import find_x_in_listy, Listy
class Test_Case_Find_X_In_Listy(unittest.TestCase):
def test_case_find_x_in_listy(self):
listy = Listy(list(range(0, 1*10**8)))
self.assertEqual(find_x_in_listy(listy, 5678), 5678) | none | 1 | 3.192616 | 3 |
|
my_general_helpers.py | arminbahl/drosophila_phototaxis_paper | 0 | 7768 | <gh_stars>0
from scipy.signal import butter,filtfilt
from numba import jit
import bisect
def is_number_in_sorted_vector(sorted_vector, num):
index = bisect.bisect_left(sorted_vector, num)
return index != len(sorted_vector) and sorted_vector[index] == num
# def butter_lowpass(cutoff, fs, order=5):
# nyq = 0.5 * fs
# normal_cutoff = cutoff / nyq
# b, a = butter(order, normal_cutoff, btype='low', analog=False)
# return b, a
def butter_lowpass_filter(data, cutoff, fs, order):
nyq = 0.5 * fs # Nyquist Frequency
normal_cutoff = cutoff / nyq
# Get the filter coefficients
b, a = butter(order, normal_cutoff, btype='low', analog=False)
y = filtfilt(b, a, data)
return y
@jit
def first_order_lowpass_filter(signal_in, signal_out, tau, dt):
alpha_lowpass = dt / (tau + dt)
signal_out[0] = signal_in[0]
for i in range(1, len(signal_in)):
signal_out[i] = alpha_lowpass*signal_in[i] + (1-alpha_lowpass)*signal_out[i-1]
| from scipy.signal import butter,filtfilt
from numba import jit
import bisect
def is_number_in_sorted_vector(sorted_vector, num):
index = bisect.bisect_left(sorted_vector, num)
return index != len(sorted_vector) and sorted_vector[index] == num
# def butter_lowpass(cutoff, fs, order=5):
# nyq = 0.5 * fs
# normal_cutoff = cutoff / nyq
# b, a = butter(order, normal_cutoff, btype='low', analog=False)
# return b, a
def butter_lowpass_filter(data, cutoff, fs, order):
nyq = 0.5 * fs # Nyquist Frequency
normal_cutoff = cutoff / nyq
# Get the filter coefficients
b, a = butter(order, normal_cutoff, btype='low', analog=False)
y = filtfilt(b, a, data)
return y
@jit
def first_order_lowpass_filter(signal_in, signal_out, tau, dt):
alpha_lowpass = dt / (tau + dt)
signal_out[0] = signal_in[0]
for i in range(1, len(signal_in)):
signal_out[i] = alpha_lowpass*signal_in[i] + (1-alpha_lowpass)*signal_out[i-1] | en | 0.637434 | # def butter_lowpass(cutoff, fs, order=5): # nyq = 0.5 * fs # normal_cutoff = cutoff / nyq # b, a = butter(order, normal_cutoff, btype='low', analog=False) # return b, a # Nyquist Frequency # Get the filter coefficients | 2.825552 | 3 |
test/mitmproxy/addons/test_proxyserver.py | KarlParkinson/mitmproxy | 24,939 | 7769 | <reponame>KarlParkinson/mitmproxy
import asyncio
from contextlib import asynccontextmanager
import pytest
from mitmproxy import exceptions
from mitmproxy.addons.proxyserver import Proxyserver
from mitmproxy.connection import Address
from mitmproxy.proxy import layers, server_hooks
from mitmproxy.proxy.layers.http import HTTPMode
from mitmproxy.test import taddons, tflow
from mitmproxy.test.tflow import tclient_conn, tserver_conn
class HelperAddon:
def __init__(self):
self.flows = []
self.layers = [
lambda ctx: layers.modes.HttpProxy(ctx),
lambda ctx: layers.HttpLayer(ctx, HTTPMode.regular),
lambda ctx: layers.TCPLayer(ctx),
]
def request(self, f):
self.flows.append(f)
def tcp_start(self, f):
self.flows.append(f)
def next_layer(self, nl):
nl.layer = self.layers.pop(0)(nl.context)
@asynccontextmanager
async def tcp_server(handle_conn) -> Address:
server = await asyncio.start_server(handle_conn, '127.0.0.1', 0)
await server.start_serving()
try:
yield server.sockets[0].getsockname()
finally:
server.close()
@pytest.mark.asyncio
async def test_start_stop():
async def server_handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
assert await reader.readuntil(b"\r\n\r\n") == b"GET /hello HTTP/1.1\r\n\r\n"
writer.write(b"HTTP/1.1 204 No Content\r\n\r\n")
await writer.drain()
writer.close()
ps = Proxyserver()
with taddons.context(ps) as tctx:
state = HelperAddon()
tctx.master.addons.add(state)
async with tcp_server(server_handler) as addr:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
assert not ps.server
ps.running()
await tctx.master.await_log("Proxy server listening", level="info")
assert ps.server
proxy_addr = ps.server.sockets[0].getsockname()[:2]
reader, writer = await asyncio.open_connection(*proxy_addr)
req = f"GET http://{addr[0]}:{addr[1]}/hello HTTP/1.1\r\n\r\n"
writer.write(req.encode())
assert await reader.readuntil(b"\r\n\r\n") == b"HTTP/1.1 204 No Content\r\n\r\n"
assert repr(ps) == "ProxyServer(running, 1 active conns)"
tctx.configure(ps, server=False)
await tctx.master.await_log("Stopping server", level="info")
assert not ps.server
assert state.flows
assert state.flows[0].request.path == "/hello"
assert state.flows[0].response.status_code == 204
# Waiting here until everything is really torn down... takes some effort.
conn_handler = list(ps._connections.values())[0]
client_handler = conn_handler.transports[conn_handler.client].handler
writer.close()
await writer.wait_closed()
try:
await client_handler
except asyncio.CancelledError:
pass
for _ in range(5):
# Get all other scheduled coroutines to run.
await asyncio.sleep(0)
assert repr(ps) == "ProxyServer(stopped, 0 active conns)"
@pytest.mark.asyncio
async def test_inject() -> None:
async def server_handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
while s := await reader.read(1):
writer.write(s.upper())
ps = Proxyserver()
with taddons.context(ps) as tctx:
state = HelperAddon()
tctx.master.addons.add(state)
async with tcp_server(server_handler) as addr:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
ps.running()
await tctx.master.await_log("Proxy server listening", level="info")
proxy_addr = ps.server.sockets[0].getsockname()[:2]
reader, writer = await asyncio.open_connection(*proxy_addr)
req = f"CONNECT {addr[0]}:{addr[1]} HTTP/1.1\r\n\r\n"
writer.write(req.encode())
assert await reader.readuntil(b"\r\n\r\n") == b"HTTP/1.1 200 Connection established\r\n\r\n"
writer.write(b"a")
assert await reader.read(1) == b"A"
ps.inject_tcp(state.flows[0], False, b"b")
assert await reader.read(1) == b"B"
ps.inject_tcp(state.flows[0], True, b"c")
assert await reader.read(1) == b"c"
@pytest.mark.asyncio
async def test_inject_fail() -> None:
ps = Proxyserver()
with taddons.context(ps) as tctx:
ps.inject_websocket(
tflow.tflow(),
True,
b"test"
)
await tctx.master.await_log("Cannot inject WebSocket messages into non-WebSocket flows.", level="warn")
ps.inject_tcp(
tflow.tflow(),
True,
b"test"
)
await tctx.master.await_log("Cannot inject TCP messages into non-TCP flows.", level="warn")
ps.inject_websocket(
tflow.twebsocketflow(),
True,
b"test"
)
await tctx.master.await_log("Flow is not from a live connection.", level="warn")
ps.inject_websocket(
tflow.ttcpflow(),
True,
b"test"
)
await tctx.master.await_log("Flow is not from a live connection.", level="warn")
@pytest.mark.asyncio
async def test_warn_no_nextlayer():
"""
Test that we log an error if the proxy server is started without NextLayer addon.
That is a mean trap to fall into when writing end-to-end tests.
"""
ps = Proxyserver()
with taddons.context(ps) as tctx:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
ps.running()
await tctx.master.await_log("Proxy server listening at", level="info")
assert tctx.master.has_log("Warning: Running proxyserver without nextlayer addon!", level="warn")
await ps.shutdown_server()
def test_self_connect():
server = tserver_conn()
client = tclient_conn()
server.address = ("localhost", 8080)
ps = Proxyserver()
with taddons.context(ps) as tctx:
# not calling .running() here to avoid unnecessary socket
ps.options = tctx.options
ps.server_connect(
server_hooks.ServerConnectionHookData(server, client)
)
assert server.error == "Stopped mitmproxy from recursively connecting to itself."
def test_options():
ps = Proxyserver()
with taddons.context(ps) as tctx:
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, body_size_limit="invalid")
tctx.configure(ps, body_size_limit="1m")
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, stream_large_bodies="invalid")
tctx.configure(ps, stream_large_bodies="1m")
| import asyncio
from contextlib import asynccontextmanager
import pytest
from mitmproxy import exceptions
from mitmproxy.addons.proxyserver import Proxyserver
from mitmproxy.connection import Address
from mitmproxy.proxy import layers, server_hooks
from mitmproxy.proxy.layers.http import HTTPMode
from mitmproxy.test import taddons, tflow
from mitmproxy.test.tflow import tclient_conn, tserver_conn
class HelperAddon:
def __init__(self):
self.flows = []
self.layers = [
lambda ctx: layers.modes.HttpProxy(ctx),
lambda ctx: layers.HttpLayer(ctx, HTTPMode.regular),
lambda ctx: layers.TCPLayer(ctx),
]
def request(self, f):
self.flows.append(f)
def tcp_start(self, f):
self.flows.append(f)
def next_layer(self, nl):
nl.layer = self.layers.pop(0)(nl.context)
@asynccontextmanager
async def tcp_server(handle_conn) -> Address:
server = await asyncio.start_server(handle_conn, '127.0.0.1', 0)
await server.start_serving()
try:
yield server.sockets[0].getsockname()
finally:
server.close()
@pytest.mark.asyncio
async def test_start_stop():
async def server_handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
assert await reader.readuntil(b"\r\n\r\n") == b"GET /hello HTTP/1.1\r\n\r\n"
writer.write(b"HTTP/1.1 204 No Content\r\n\r\n")
await writer.drain()
writer.close()
ps = Proxyserver()
with taddons.context(ps) as tctx:
state = HelperAddon()
tctx.master.addons.add(state)
async with tcp_server(server_handler) as addr:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
assert not ps.server
ps.running()
await tctx.master.await_log("Proxy server listening", level="info")
assert ps.server
proxy_addr = ps.server.sockets[0].getsockname()[:2]
reader, writer = await asyncio.open_connection(*proxy_addr)
req = f"GET http://{addr[0]}:{addr[1]}/hello HTTP/1.1\r\n\r\n"
writer.write(req.encode())
assert await reader.readuntil(b"\r\n\r\n") == b"HTTP/1.1 204 No Content\r\n\r\n"
assert repr(ps) == "ProxyServer(running, 1 active conns)"
tctx.configure(ps, server=False)
await tctx.master.await_log("Stopping server", level="info")
assert not ps.server
assert state.flows
assert state.flows[0].request.path == "/hello"
assert state.flows[0].response.status_code == 204
# Waiting here until everything is really torn down... takes some effort.
conn_handler = list(ps._connections.values())[0]
client_handler = conn_handler.transports[conn_handler.client].handler
writer.close()
await writer.wait_closed()
try:
await client_handler
except asyncio.CancelledError:
pass
for _ in range(5):
# Get all other scheduled coroutines to run.
await asyncio.sleep(0)
assert repr(ps) == "ProxyServer(stopped, 0 active conns)"
@pytest.mark.asyncio
async def test_inject() -> None:
async def server_handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
while s := await reader.read(1):
writer.write(s.upper())
ps = Proxyserver()
with taddons.context(ps) as tctx:
state = HelperAddon()
tctx.master.addons.add(state)
async with tcp_server(server_handler) as addr:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
ps.running()
await tctx.master.await_log("Proxy server listening", level="info")
proxy_addr = ps.server.sockets[0].getsockname()[:2]
reader, writer = await asyncio.open_connection(*proxy_addr)
req = f"CONNECT {addr[0]}:{addr[1]} HTTP/1.1\r\n\r\n"
writer.write(req.encode())
assert await reader.readuntil(b"\r\n\r\n") == b"HTTP/1.1 200 Connection established\r\n\r\n"
writer.write(b"a")
assert await reader.read(1) == b"A"
ps.inject_tcp(state.flows[0], False, b"b")
assert await reader.read(1) == b"B"
ps.inject_tcp(state.flows[0], True, b"c")
assert await reader.read(1) == b"c"
@pytest.mark.asyncio
async def test_inject_fail() -> None:
ps = Proxyserver()
with taddons.context(ps) as tctx:
ps.inject_websocket(
tflow.tflow(),
True,
b"test"
)
await tctx.master.await_log("Cannot inject WebSocket messages into non-WebSocket flows.", level="warn")
ps.inject_tcp(
tflow.tflow(),
True,
b"test"
)
await tctx.master.await_log("Cannot inject TCP messages into non-TCP flows.", level="warn")
ps.inject_websocket(
tflow.twebsocketflow(),
True,
b"test"
)
await tctx.master.await_log("Flow is not from a live connection.", level="warn")
ps.inject_websocket(
tflow.ttcpflow(),
True,
b"test"
)
await tctx.master.await_log("Flow is not from a live connection.", level="warn")
@pytest.mark.asyncio
async def test_warn_no_nextlayer():
"""
Test that we log an error if the proxy server is started without NextLayer addon.
That is a mean trap to fall into when writing end-to-end tests.
"""
ps = Proxyserver()
with taddons.context(ps) as tctx:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
ps.running()
await tctx.master.await_log("Proxy server listening at", level="info")
assert tctx.master.has_log("Warning: Running proxyserver without nextlayer addon!", level="warn")
await ps.shutdown_server()
def test_self_connect():
server = tserver_conn()
client = tclient_conn()
server.address = ("localhost", 8080)
ps = Proxyserver()
with taddons.context(ps) as tctx:
# not calling .running() here to avoid unnecessary socket
ps.options = tctx.options
ps.server_connect(
server_hooks.ServerConnectionHookData(server, client)
)
assert server.error == "Stopped mitmproxy from recursively connecting to itself."
def test_options():
ps = Proxyserver()
with taddons.context(ps) as tctx:
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, body_size_limit="invalid")
tctx.configure(ps, body_size_limit="1m")
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, stream_large_bodies="invalid")
tctx.configure(ps, stream_large_bodies="1m") | en | 0.891132 | # Waiting here until everything is really torn down... takes some effort. # Get all other scheduled coroutines to run. Test that we log an error if the proxy server is started without NextLayer addon. That is a mean trap to fall into when writing end-to-end tests. # not calling .running() here to avoid unnecessary socket | 2.051513 | 2 |
tensorflow_probability/python/distributions/masked.py | mederrata/probability | 1 | 7770 | <reponame>mederrata/probability
# Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The MaskedIndependent distribution class."""
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import bijector as bijector_lib
from tensorflow_probability.python.distributions import batch_broadcast
from tensorflow_probability.python.distributions import distribution as distribution_lib
from tensorflow_probability.python.distributions import kullback_leibler
from tensorflow_probability.python.distributions import log_prob_ratio
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensor_util
def _add_event_dims_to_mask(validity_mask, *, dist=None, event_ndims=None):
validity_mask = tf.convert_to_tensor(validity_mask)
if event_ndims is None:
event_ndims = ps.rank_from_shape(dist.event_shape_tensor())
return tf.reshape(
validity_mask,
ps.concat([
ps.shape(validity_mask),
ps.ones(event_ndims, dtype=tf.int32)
], axis=0))
def _make_masked_fn(fn_name, n_event_shapes, safe_value,
make_arg0_safe=False):
"""Implements functions like mean, variance, etc.
Args:
fn_name: Name of the method called on the underlying distribution.
n_event_shapes: Number of event shape repeats in the shape of the underlying
function's output.
safe_value: The value to be placed in invalid locations. May be
`'safe_sample'` to specify we should use the "safe sample" value.
make_arg0_safe: If `True`, we will apply `self.safe_sample_fn` to ensure the
argument passed into the underlying routine is a "safe" sample.
Returns:
fn: Callable implementing the given function.
"""
def fn(self, *args, **kwargs):
if safe_value == 'safe_sample' or make_arg0_safe: # Only if needed.
safe_val = tf.stop_gradient(self.safe_sample_fn(self.distribution))
validity_mask = tf.convert_to_tensor(self.validity_mask)
if make_arg0_safe:
x = args[0]
safe_x = tf.where(
_add_event_dims_to_mask(validity_mask, dist=self), x, safe_val)
args = (safe_x,) + args[1:]
val = getattr(self.distribution, fn_name)(*args, **kwargs)
if n_event_shapes:
validity_mask = tf.reshape(
validity_mask,
ps.concat(
[ps.shape(validity_mask)] +
[ps.ones_like(self.event_shape_tensor())] * n_event_shapes,
axis=0))
if safe_value == 'safe_sample':
sentinel = tf.cast(safe_val, val.dtype)
else:
sentinel = tf.cast(safe_value, val.dtype)
return tf.where(validity_mask, val, sentinel)
fn.__name__ = f'_{fn_name}'
return fn
def _fixed_sample(d):
return d.sample(seed=samplers.zeros_seed())
class _Masked(distribution_lib.Distribution):
"""A distribution that masks invalid underlying distributions.
Sometimes we may want a way of masking out a subset of distributions. Perhaps
we have labels for only a subset of batch members and want to evaluate a
log_prob. Or we may want to encode a sparse random variable as a dense
random variable with a mask applied. In single-program/multiple-data regimes,
it can be necessary to pad Distributions and the samples thereof to a given
size in order to achieve the "single-program" desideratum.
When computing a probability density in this regime, we would like to mask out
the contributions of invalid batch members. We may also want to ensure that
the values being sampled are valid parameters for descendant distributions in
a hierarchical model, even if they are ultimately masked out. This
distribution answers those requirements. Specifically, for invalid batch
elements:
- `log_prob(x) == 0.` for all `x`, with no gradients back to `x`, nor any
gradients to the parameters of `distribution`.
- `sample() == tf.stop_gradient(safe_value_fn(distribution))`, with no
gradients back to the parameters of `distribution`.
The distribution accepts a mask specified by `validity_mask`, a boolean tensor
broadcastable with the underlying distribution's batch shape which specifies
for each batch element whether or not it is valid.
Entries in `validity_mask` which are `False` denote missing distributions,
which means that the corresponding entries in the measures (e.g. `prob`)
and statistics (e.g. `mean`) must not be treated as coming from some real
distribution. Whenever doing a reduction across those quantites, make sure to
either mask out the invalid entries or make sure the returned value
corresponds to the identity element of the reduction. For a couple examples:
- OK: `reduce_sum(masked_dist.log_prob(x))`
- OK: `tfd.Independent(masked_dist, ...)`
- Not OK: `reduce_var(masked_dist.mean())` will underestimate the variance
because it uses too large an `N`.
- Not OK: `tf.linalg.cholesky(masked_dist.covariance())` will fail for invalid
batch elements.
The default `safe_value_fn` is to draw a fixed-seeded sample from the
underlying `distribution`. Since this may be expensive, it is suggested to
specify a computationally cheaper method. Some options might include:
- `tfd.Distribution.mode`
- `tfd.Distribution.mean`
- `lambda d: d.quantile(.5)` (median)
- `lambda _: 0.` (if zero is always in the support of d)
- `lambda d: d.experimental_default_event_space_bijector()(0.)`
Besides the output of `sample`, results from `safe_value_fn` may also appear
in (invalid batch members of) `masked.default_event_space_bijector().forward`.
#### Examples
```
# Use tf.sequence_mask for `range(n) < num_valid`.
num_valid = 3
num_entries = 4
d = tfd.Masked(
tfd.MultivariateNormalDiag(tf.zeros([2, num_entries, 5]), tf.ones([5])),
tf.sequence_mask(num_valid, num_entries))
d.batch_shape # [2, 4]
d.event_shape # [5]
d.log_prob(tf.zeros([5])) # shape [2, 4]
# => [[nonzero, nonzero, nonzero, 0.],
# [nonzero, nonzero, nonzero, 0.]]
# Explicitly denote which elements are valid, adding a new batch dim of 2.
d = tfd.Masked(tfd.MultivariateNormalDiag(tf.zeros([4, 5]), tf.ones([5])),
[[False], [True]])
d.batch_shape # [2, 4]
d.event_shape # [5]
d.log_prob(tf.zeros([5])) # shape [2, 4]
# => [[0., 0., 0., 0.],
# [nonzero, nonzero, nonzero, nonzero]]
# Use `BatchBroadcast` and `Independent` to achieve the equivalent of adding
# positional mask functionality to `tfd.Sample`.
# Suppose we wanted to achieve this:
# `tfd.Sample(tfd.Normal(tf.zeros(2), 1), [3, 4], validity_mask=mask)`
# We can write:
d = tfd.Independent(
tfd.Masked(tfd.BatchBroadcast(tfd.Normal(0, 1), [2, 3, 4]), mask),
reinterpreted_batch_ndims=2)
d.batch_shape # [2]
d.event_shape # [3, 4]
d.log_prob(tf.ones([3, 4])) # shape [2]
```
"""
def __init__(self,
distribution,
validity_mask,
safe_sample_fn=_fixed_sample,
validate_args=False,
allow_nan_stats=True,
name=None):
"""Constructs a Masked distribution.
Args:
distribution: The underlying distribution, which will be masked.
validity_mask: Boolean mask where `True` indicates an element is valid.
`validity_mask` must broadcast with the batch shape of the underlying
distribution. Invalid batch elements are masked so that sampling returns
`safe_sample_fn(dist)` in invalid positions and `log_prob(x)` returns
`0.` for invalid positions.
safe_sample_fn: A callable which takes a distribution (namely,
the `distribution` argument) and returns a determinstic, safe sample
value. This helps to avoid `nan` gradients and allows downstream usage
of samples from a `Masked` distribution to assume a "safe" even if
invalid value. (Be careful to ensure that such downstream usages are
themselves masked!) Note that the result of this function will be
wrapped in a `tf.stop_gradient` call.
validate_args: Boolean indicating whether argument assertions should be
run. May impose performance penalties.
allow_nan_stats: Boolean indicating whether statistical functions may
return `nan`, or should instead use asserts where possible.
name: Optional name for operation scoping.
"""
parameters = dict(locals())
with tf.name_scope(name or f'Masked{distribution.name}') as name:
self._distribution = distribution
self._validity_mask = tensor_util.convert_nonref_to_tensor(
validity_mask, dtype_hint=tf.bool)
self._safe_sample_fn = safe_sample_fn
super(_Masked, self).__init__(
dtype=distribution.dtype,
reparameterization_type=distribution.reparameterization_type,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
return dict(
distribution=parameter_properties.BatchedComponentProperties(),
validity_mask=parameter_properties.ParameterProperties(
shape_fn=parameter_properties.SHAPE_FN_NOT_IMPLEMENTED))
@property
def distribution(self):
return self._distribution
@property
def validity_mask(self):
return self._validity_mask
@property
def safe_sample_fn(self):
return self._safe_sample_fn
@property
def experimental_is_sharded(self):
return self.distribution.experimental_is_sharded
def _event_shape(self):
return self.distribution.event_shape
def _event_shape_tensor(self):
return self.distribution.event_shape_tensor()
def _sample_n(self, n, seed=None, **kwargs):
validity_mask = tf.convert_to_tensor(self.validity_mask)
# To avoid the shape gymnastics of drawing extra samples, we delegate
# sampling to the BatchBroadcast distribution.
bb = batch_broadcast.BatchBroadcast(self.distribution,
ps.shape(validity_mask))
samples = bb.sample(n, seed=seed, **kwargs)
safe_val = tf.stop_gradient(self.safe_sample_fn(self.distribution))
return tf.where(_add_event_dims_to_mask(validity_mask, dist=self),
samples, safe_val)
_log_prob = _make_masked_fn(
'log_prob', n_event_shapes=0, safe_value=0., make_arg0_safe=True)
_prob = _make_masked_fn(
'prob', n_event_shapes=0, safe_value=1., make_arg0_safe=True)
_log_cdf = _make_masked_fn(
'log_cdf', n_event_shapes=0, safe_value=0., make_arg0_safe=True)
_cdf = _make_masked_fn(
'cdf', n_event_shapes=0, safe_value=1., make_arg0_safe=True)
_log_survival_function = _make_masked_fn(
'log_survival_function', n_event_shapes=0, safe_value=-float('inf'),
make_arg0_safe=True)
_survival_function = _make_masked_fn(
'survival_function', n_event_shapes=0, safe_value=0.,
make_arg0_safe=True)
_entropy = _make_masked_fn(
'entropy', n_event_shapes=0, safe_value=0.)
_mode = _make_masked_fn(
'mode', n_event_shapes=1, safe_value='safe_sample')
_mean = _make_masked_fn(
'mean', n_event_shapes=1, safe_value='safe_sample')
_variance = _make_masked_fn(
'variance', n_event_shapes=1, safe_value=0.)
_stddev = _make_masked_fn(
'stddev', n_event_shapes=1, safe_value=0.)
_covariance = _make_masked_fn(
'covariance', n_event_shapes=2, safe_value=0.)
_quantile = _make_masked_fn(
'quantile', n_event_shapes=1, safe_value='safe_sample')
def _default_event_space_bijector(self, *args, **kwargs):
underlying_bijector = (
self.distribution.experimental_default_event_space_bijector())
if underlying_bijector is None:
return None
return _MaskedBijector(self, underlying_bijector)
class Masked(_Masked, distribution_lib.AutoCompositeTensorDistribution):
def __new__(cls, *args, **kwargs):
"""Maybe return a non-`CompositeTensor` `_Masked`."""
if cls is Masked:
if args:
distribution = args[0]
else:
distribution = kwargs.get('distribution')
if not isinstance(distribution, tf.__internal__.CompositeTensor):
return _Masked(*args, **kwargs)
return super(Masked, cls).__new__(cls)
Masked.__doc__ = _Masked.__doc__ + '\n' + (
'If `distribution` is a `CompositeTensor`, then the resulting `Masked` '
'instance is a `CompositeTensor` as well. Otherwise, a '
'non-`CompositeTensor` `_Masked` instance is created instead. Distribution '
'subclasses that inherit from `Masked` will also inherit from '
'`CompositeTensor`.')
@kullback_leibler.RegisterKL(_Masked, _Masked)
def _kl_masked_masked(a, b, name=None):
"""KL divergence between Masked distributions."""
with tf.name_scope(name or 'kl_masked_masked'):
a_valid = tf.convert_to_tensor(a.validity_mask)
b_valid = tf.convert_to_tensor(b.validity_mask)
underlying_kl = kullback_leibler.kl_divergence(
a.distribution, b.distribution)
# The treatment for KL is as follows:
# When both random variables are valid, the underlying KL applies.
# When neither random variable is valid, the KL is 0., i.e.
# `a log a - a log b = 0` because log a and log b are everywhere 0.
# When exactly one is valid, we (a) raise an assertion error, if either
# distribution's allow_nan_stats is set to False, or (b) return nan in
# such positions.
asserts = []
if not (a.allow_nan_stats and b.allow_nan_stats):
asserts.append(assert_util.assert_equal(
a_valid, b_valid,
message='KL is only valid for matching mask values'))
with tf.control_dependencies(asserts):
both_valid = (a_valid & b_valid)
neither_valid = (~a_valid) & (~b_valid)
dtype = underlying_kl.dtype
return tf.where(both_valid, underlying_kl,
tf.where(neither_valid,
tf.zeros([], dtype), float('nan')))
@log_prob_ratio.RegisterLogProbRatio(_Masked)
def _masked_log_prob_ratio(p, x, q, y, name=None):
"""Computes log p(x) - log q(y) for Masked p, q."""
with tf.name_scope(name or 'masked_log_prob_ratio'):
p_valid = tf.convert_to_tensor(p.validity_mask)
safe_x = tf.where(_add_event_dims_to_mask(p_valid, dist=p),
x, tf.stop_gradient(p.safe_sample_fn(p.distribution)))
q_valid = tf.convert_to_tensor(q.validity_mask)
safe_y = tf.where(_add_event_dims_to_mask(q_valid, dist=q),
y, tf.stop_gradient(q.safe_sample_fn(q.distribution)))
underlying = log_prob_ratio.log_prob_ratio(
p.distribution, safe_x, q.distribution, safe_y)
asserts = []
# As with KL, we return the underlying log_prob_ratio where both are valid,
# `0.` where neither is valid, and `nan` otherwise (or an assertion if
# either distribution does not `allow_nan_stats`).
if not (p.allow_nan_stats and p.allow_nan_stats):
asserts.append(assert_util.assert_equal(
p_valid, q_valid,
message='Masked log_prob_ratio only valid for matching mask values'))
with tf.control_dependencies(asserts):
both_valid = (p_valid & q_valid)
neither_valid = (~p_valid) & (~q_valid)
return tf.where(both_valid, underlying,
tf.where(neither_valid,
tf.zeros([], dtype=underlying.dtype),
float('nan')))
class _NonCompositeTensorMaskedBijector(bijector_lib.Bijector):
"""Event space bijector for Masked distributions."""
def __init__(self, masked, underlying_bijector):
self._masked = masked
self._bijector = underlying_bijector
super(_NonCompositeTensorMaskedBijector, self).__init__(
validate_args=underlying_bijector.validate_args,
dtype=underlying_bijector.dtype,
forward_min_event_ndims=underlying_bijector.forward_min_event_ndims,
inverse_min_event_ndims=underlying_bijector.inverse_min_event_ndims)
def _forward_event_shape(self, x):
return self._bijector.forward_event_shape(x)
def _forward_event_shape_tensor(self, x):
return self._bijector.forward_event_shape_tensor(x)
def _inverse_event_shape(self, y):
return self._bijector.inverse_event_shape(y)
def _inverse_event_shape_tensor(self, y):
return self._bijector.inverse_event_shape_tensor(y)
def _make_safe_x(self, x, validity_mask):
bij = self._bijector
masked = self._masked
pullback_event_ndims = ps.rank_from_shape(
lambda: bij.inverse_event_shape_tensor(masked.event_shape_tensor()),
self._bijector.inverse_event_shape(masked.event_shape))
pullback_event_mask = _add_event_dims_to_mask(
validity_mask, event_ndims=pullback_event_ndims)
# We presume that 0 in unconstrained space is safe.
return tf.where(pullback_event_mask, x, 0.)
def _forward(self, x):
mask = self._masked.validity_mask
safe_x = self._make_safe_x(x, mask)
return self._make_safe_y(self._bijector.forward(safe_x), mask)
def _forward_log_det_jacobian(self, x):
validity_mask = tf.convert_to_tensor(self._masked.validity_mask)
safe_x = self._make_safe_x(x, validity_mask)
return tf.where(validity_mask,
self._bijector.forward_log_det_jacobian(safe_x),
0.)
def _make_safe_y(self, y, validity_mask):
safe_val = tf.stop_gradient(
self._masked.safe_sample_fn(self._masked.distribution))
event_mask = _add_event_dims_to_mask(validity_mask, dist=self._masked)
return tf.where(event_mask, y, safe_val)
def _inverse(self, y):
safe_y = self._make_safe_y(y, self._masked.validity_mask)
return self._bijector.inverse(safe_y)
def _inverse_log_det_jacobian(self, y):
validity_mask = tf.convert_to_tensor(self._masked.validity_mask)
safe_y = self._make_safe_y(y, validity_mask)
return tf.where(validity_mask,
self._bijector.inverse_log_det_jacobian(safe_y),
0.)
class _MaskedBijector(_NonCompositeTensorMaskedBijector,
bijector_lib.AutoCompositeTensorBijector):
"""Event space bijector for Masked distributions."""
def __new__(cls, *args, **kwargs):
"""Maybe return a `_NonCompositeTensorMaskedBijector`."""
if cls is _MaskedBijector:
if args:
masked = args[0]
else:
masked = kwargs.get('masked')
if len(args) > 1:
bijector = args[1]
else:
bijector = kwargs.get('underlying_bijector')
if not (isinstance(masked, tf.__internal__.CompositeTensor)
and isinstance(bijector, tf.__internal__.CompositeTensor)):
return _NonCompositeTensorMaskedBijector(*args, **kwargs)
return super(_MaskedBijector, cls).__new__(cls)
| # Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The MaskedIndependent distribution class."""
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import bijector as bijector_lib
from tensorflow_probability.python.distributions import batch_broadcast
from tensorflow_probability.python.distributions import distribution as distribution_lib
from tensorflow_probability.python.distributions import kullback_leibler
from tensorflow_probability.python.distributions import log_prob_ratio
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensor_util
def _add_event_dims_to_mask(validity_mask, *, dist=None, event_ndims=None):
validity_mask = tf.convert_to_tensor(validity_mask)
if event_ndims is None:
event_ndims = ps.rank_from_shape(dist.event_shape_tensor())
return tf.reshape(
validity_mask,
ps.concat([
ps.shape(validity_mask),
ps.ones(event_ndims, dtype=tf.int32)
], axis=0))
def _make_masked_fn(fn_name, n_event_shapes, safe_value,
make_arg0_safe=False):
"""Implements functions like mean, variance, etc.
Args:
fn_name: Name of the method called on the underlying distribution.
n_event_shapes: Number of event shape repeats in the shape of the underlying
function's output.
safe_value: The value to be placed in invalid locations. May be
`'safe_sample'` to specify we should use the "safe sample" value.
make_arg0_safe: If `True`, we will apply `self.safe_sample_fn` to ensure the
argument passed into the underlying routine is a "safe" sample.
Returns:
fn: Callable implementing the given function.
"""
def fn(self, *args, **kwargs):
if safe_value == 'safe_sample' or make_arg0_safe: # Only if needed.
safe_val = tf.stop_gradient(self.safe_sample_fn(self.distribution))
validity_mask = tf.convert_to_tensor(self.validity_mask)
if make_arg0_safe:
x = args[0]
safe_x = tf.where(
_add_event_dims_to_mask(validity_mask, dist=self), x, safe_val)
args = (safe_x,) + args[1:]
val = getattr(self.distribution, fn_name)(*args, **kwargs)
if n_event_shapes:
validity_mask = tf.reshape(
validity_mask,
ps.concat(
[ps.shape(validity_mask)] +
[ps.ones_like(self.event_shape_tensor())] * n_event_shapes,
axis=0))
if safe_value == 'safe_sample':
sentinel = tf.cast(safe_val, val.dtype)
else:
sentinel = tf.cast(safe_value, val.dtype)
return tf.where(validity_mask, val, sentinel)
fn.__name__ = f'_{fn_name}'
return fn
def _fixed_sample(d):
return d.sample(seed=samplers.zeros_seed())
class _Masked(distribution_lib.Distribution):
"""A distribution that masks invalid underlying distributions.
Sometimes we may want a way of masking out a subset of distributions. Perhaps
we have labels for only a subset of batch members and want to evaluate a
log_prob. Or we may want to encode a sparse random variable as a dense
random variable with a mask applied. In single-program/multiple-data regimes,
it can be necessary to pad Distributions and the samples thereof to a given
size in order to achieve the "single-program" desideratum.
When computing a probability density in this regime, we would like to mask out
the contributions of invalid batch members. We may also want to ensure that
the values being sampled are valid parameters for descendant distributions in
a hierarchical model, even if they are ultimately masked out. This
distribution answers those requirements. Specifically, for invalid batch
elements:
- `log_prob(x) == 0.` for all `x`, with no gradients back to `x`, nor any
gradients to the parameters of `distribution`.
- `sample() == tf.stop_gradient(safe_value_fn(distribution))`, with no
gradients back to the parameters of `distribution`.
The distribution accepts a mask specified by `validity_mask`, a boolean tensor
broadcastable with the underlying distribution's batch shape which specifies
for each batch element whether or not it is valid.
Entries in `validity_mask` which are `False` denote missing distributions,
which means that the corresponding entries in the measures (e.g. `prob`)
and statistics (e.g. `mean`) must not be treated as coming from some real
distribution. Whenever doing a reduction across those quantites, make sure to
either mask out the invalid entries or make sure the returned value
corresponds to the identity element of the reduction. For a couple examples:
- OK: `reduce_sum(masked_dist.log_prob(x))`
- OK: `tfd.Independent(masked_dist, ...)`
- Not OK: `reduce_var(masked_dist.mean())` will underestimate the variance
because it uses too large an `N`.
- Not OK: `tf.linalg.cholesky(masked_dist.covariance())` will fail for invalid
batch elements.
The default `safe_value_fn` is to draw a fixed-seeded sample from the
underlying `distribution`. Since this may be expensive, it is suggested to
specify a computationally cheaper method. Some options might include:
- `tfd.Distribution.mode`
- `tfd.Distribution.mean`
- `lambda d: d.quantile(.5)` (median)
- `lambda _: 0.` (if zero is always in the support of d)
- `lambda d: d.experimental_default_event_space_bijector()(0.)`
Besides the output of `sample`, results from `safe_value_fn` may also appear
in (invalid batch members of) `masked.default_event_space_bijector().forward`.
#### Examples
```
# Use tf.sequence_mask for `range(n) < num_valid`.
num_valid = 3
num_entries = 4
d = tfd.Masked(
tfd.MultivariateNormalDiag(tf.zeros([2, num_entries, 5]), tf.ones([5])),
tf.sequence_mask(num_valid, num_entries))
d.batch_shape # [2, 4]
d.event_shape # [5]
d.log_prob(tf.zeros([5])) # shape [2, 4]
# => [[nonzero, nonzero, nonzero, 0.],
# [nonzero, nonzero, nonzero, 0.]]
# Explicitly denote which elements are valid, adding a new batch dim of 2.
d = tfd.Masked(tfd.MultivariateNormalDiag(tf.zeros([4, 5]), tf.ones([5])),
[[False], [True]])
d.batch_shape # [2, 4]
d.event_shape # [5]
d.log_prob(tf.zeros([5])) # shape [2, 4]
# => [[0., 0., 0., 0.],
# [nonzero, nonzero, nonzero, nonzero]]
# Use `BatchBroadcast` and `Independent` to achieve the equivalent of adding
# positional mask functionality to `tfd.Sample`.
# Suppose we wanted to achieve this:
# `tfd.Sample(tfd.Normal(tf.zeros(2), 1), [3, 4], validity_mask=mask)`
# We can write:
d = tfd.Independent(
tfd.Masked(tfd.BatchBroadcast(tfd.Normal(0, 1), [2, 3, 4]), mask),
reinterpreted_batch_ndims=2)
d.batch_shape # [2]
d.event_shape # [3, 4]
d.log_prob(tf.ones([3, 4])) # shape [2]
```
"""
def __init__(self,
distribution,
validity_mask,
safe_sample_fn=_fixed_sample,
validate_args=False,
allow_nan_stats=True,
name=None):
"""Constructs a Masked distribution.
Args:
distribution: The underlying distribution, which will be masked.
validity_mask: Boolean mask where `True` indicates an element is valid.
`validity_mask` must broadcast with the batch shape of the underlying
distribution. Invalid batch elements are masked so that sampling returns
`safe_sample_fn(dist)` in invalid positions and `log_prob(x)` returns
`0.` for invalid positions.
safe_sample_fn: A callable which takes a distribution (namely,
the `distribution` argument) and returns a determinstic, safe sample
value. This helps to avoid `nan` gradients and allows downstream usage
of samples from a `Masked` distribution to assume a "safe" even if
invalid value. (Be careful to ensure that such downstream usages are
themselves masked!) Note that the result of this function will be
wrapped in a `tf.stop_gradient` call.
validate_args: Boolean indicating whether argument assertions should be
run. May impose performance penalties.
allow_nan_stats: Boolean indicating whether statistical functions may
return `nan`, or should instead use asserts where possible.
name: Optional name for operation scoping.
"""
parameters = dict(locals())
with tf.name_scope(name or f'Masked{distribution.name}') as name:
self._distribution = distribution
self._validity_mask = tensor_util.convert_nonref_to_tensor(
validity_mask, dtype_hint=tf.bool)
self._safe_sample_fn = safe_sample_fn
super(_Masked, self).__init__(
dtype=distribution.dtype,
reparameterization_type=distribution.reparameterization_type,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
return dict(
distribution=parameter_properties.BatchedComponentProperties(),
validity_mask=parameter_properties.ParameterProperties(
shape_fn=parameter_properties.SHAPE_FN_NOT_IMPLEMENTED))
@property
def distribution(self):
return self._distribution
@property
def validity_mask(self):
return self._validity_mask
@property
def safe_sample_fn(self):
return self._safe_sample_fn
@property
def experimental_is_sharded(self):
return self.distribution.experimental_is_sharded
def _event_shape(self):
return self.distribution.event_shape
def _event_shape_tensor(self):
return self.distribution.event_shape_tensor()
def _sample_n(self, n, seed=None, **kwargs):
validity_mask = tf.convert_to_tensor(self.validity_mask)
# To avoid the shape gymnastics of drawing extra samples, we delegate
# sampling to the BatchBroadcast distribution.
bb = batch_broadcast.BatchBroadcast(self.distribution,
ps.shape(validity_mask))
samples = bb.sample(n, seed=seed, **kwargs)
safe_val = tf.stop_gradient(self.safe_sample_fn(self.distribution))
return tf.where(_add_event_dims_to_mask(validity_mask, dist=self),
samples, safe_val)
_log_prob = _make_masked_fn(
'log_prob', n_event_shapes=0, safe_value=0., make_arg0_safe=True)
_prob = _make_masked_fn(
'prob', n_event_shapes=0, safe_value=1., make_arg0_safe=True)
_log_cdf = _make_masked_fn(
'log_cdf', n_event_shapes=0, safe_value=0., make_arg0_safe=True)
_cdf = _make_masked_fn(
'cdf', n_event_shapes=0, safe_value=1., make_arg0_safe=True)
_log_survival_function = _make_masked_fn(
'log_survival_function', n_event_shapes=0, safe_value=-float('inf'),
make_arg0_safe=True)
_survival_function = _make_masked_fn(
'survival_function', n_event_shapes=0, safe_value=0.,
make_arg0_safe=True)
_entropy = _make_masked_fn(
'entropy', n_event_shapes=0, safe_value=0.)
_mode = _make_masked_fn(
'mode', n_event_shapes=1, safe_value='safe_sample')
_mean = _make_masked_fn(
'mean', n_event_shapes=1, safe_value='safe_sample')
_variance = _make_masked_fn(
'variance', n_event_shapes=1, safe_value=0.)
_stddev = _make_masked_fn(
'stddev', n_event_shapes=1, safe_value=0.)
_covariance = _make_masked_fn(
'covariance', n_event_shapes=2, safe_value=0.)
_quantile = _make_masked_fn(
'quantile', n_event_shapes=1, safe_value='safe_sample')
def _default_event_space_bijector(self, *args, **kwargs):
underlying_bijector = (
self.distribution.experimental_default_event_space_bijector())
if underlying_bijector is None:
return None
return _MaskedBijector(self, underlying_bijector)
class Masked(_Masked, distribution_lib.AutoCompositeTensorDistribution):
def __new__(cls, *args, **kwargs):
"""Maybe return a non-`CompositeTensor` `_Masked`."""
if cls is Masked:
if args:
distribution = args[0]
else:
distribution = kwargs.get('distribution')
if not isinstance(distribution, tf.__internal__.CompositeTensor):
return _Masked(*args, **kwargs)
return super(Masked, cls).__new__(cls)
Masked.__doc__ = _Masked.__doc__ + '\n' + (
'If `distribution` is a `CompositeTensor`, then the resulting `Masked` '
'instance is a `CompositeTensor` as well. Otherwise, a '
'non-`CompositeTensor` `_Masked` instance is created instead. Distribution '
'subclasses that inherit from `Masked` will also inherit from '
'`CompositeTensor`.')
@kullback_leibler.RegisterKL(_Masked, _Masked)
def _kl_masked_masked(a, b, name=None):
"""KL divergence between Masked distributions."""
with tf.name_scope(name or 'kl_masked_masked'):
a_valid = tf.convert_to_tensor(a.validity_mask)
b_valid = tf.convert_to_tensor(b.validity_mask)
underlying_kl = kullback_leibler.kl_divergence(
a.distribution, b.distribution)
# The treatment for KL is as follows:
# When both random variables are valid, the underlying KL applies.
# When neither random variable is valid, the KL is 0., i.e.
# `a log a - a log b = 0` because log a and log b are everywhere 0.
# When exactly one is valid, we (a) raise an assertion error, if either
# distribution's allow_nan_stats is set to False, or (b) return nan in
# such positions.
asserts = []
if not (a.allow_nan_stats and b.allow_nan_stats):
asserts.append(assert_util.assert_equal(
a_valid, b_valid,
message='KL is only valid for matching mask values'))
with tf.control_dependencies(asserts):
both_valid = (a_valid & b_valid)
neither_valid = (~a_valid) & (~b_valid)
dtype = underlying_kl.dtype
return tf.where(both_valid, underlying_kl,
tf.where(neither_valid,
tf.zeros([], dtype), float('nan')))
@log_prob_ratio.RegisterLogProbRatio(_Masked)
def _masked_log_prob_ratio(p, x, q, y, name=None):
"""Computes log p(x) - log q(y) for Masked p, q."""
with tf.name_scope(name or 'masked_log_prob_ratio'):
p_valid = tf.convert_to_tensor(p.validity_mask)
safe_x = tf.where(_add_event_dims_to_mask(p_valid, dist=p),
x, tf.stop_gradient(p.safe_sample_fn(p.distribution)))
q_valid = tf.convert_to_tensor(q.validity_mask)
safe_y = tf.where(_add_event_dims_to_mask(q_valid, dist=q),
y, tf.stop_gradient(q.safe_sample_fn(q.distribution)))
underlying = log_prob_ratio.log_prob_ratio(
p.distribution, safe_x, q.distribution, safe_y)
asserts = []
# As with KL, we return the underlying log_prob_ratio where both are valid,
# `0.` where neither is valid, and `nan` otherwise (or an assertion if
# either distribution does not `allow_nan_stats`).
if not (p.allow_nan_stats and p.allow_nan_stats):
asserts.append(assert_util.assert_equal(
p_valid, q_valid,
message='Masked log_prob_ratio only valid for matching mask values'))
with tf.control_dependencies(asserts):
both_valid = (p_valid & q_valid)
neither_valid = (~p_valid) & (~q_valid)
return tf.where(both_valid, underlying,
tf.where(neither_valid,
tf.zeros([], dtype=underlying.dtype),
float('nan')))
class _NonCompositeTensorMaskedBijector(bijector_lib.Bijector):
"""Event space bijector for Masked distributions."""
def __init__(self, masked, underlying_bijector):
self._masked = masked
self._bijector = underlying_bijector
super(_NonCompositeTensorMaskedBijector, self).__init__(
validate_args=underlying_bijector.validate_args,
dtype=underlying_bijector.dtype,
forward_min_event_ndims=underlying_bijector.forward_min_event_ndims,
inverse_min_event_ndims=underlying_bijector.inverse_min_event_ndims)
def _forward_event_shape(self, x):
return self._bijector.forward_event_shape(x)
def _forward_event_shape_tensor(self, x):
return self._bijector.forward_event_shape_tensor(x)
def _inverse_event_shape(self, y):
return self._bijector.inverse_event_shape(y)
def _inverse_event_shape_tensor(self, y):
return self._bijector.inverse_event_shape_tensor(y)
def _make_safe_x(self, x, validity_mask):
bij = self._bijector
masked = self._masked
pullback_event_ndims = ps.rank_from_shape(
lambda: bij.inverse_event_shape_tensor(masked.event_shape_tensor()),
self._bijector.inverse_event_shape(masked.event_shape))
pullback_event_mask = _add_event_dims_to_mask(
validity_mask, event_ndims=pullback_event_ndims)
# We presume that 0 in unconstrained space is safe.
return tf.where(pullback_event_mask, x, 0.)
def _forward(self, x):
mask = self._masked.validity_mask
safe_x = self._make_safe_x(x, mask)
return self._make_safe_y(self._bijector.forward(safe_x), mask)
def _forward_log_det_jacobian(self, x):
validity_mask = tf.convert_to_tensor(self._masked.validity_mask)
safe_x = self._make_safe_x(x, validity_mask)
return tf.where(validity_mask,
self._bijector.forward_log_det_jacobian(safe_x),
0.)
def _make_safe_y(self, y, validity_mask):
safe_val = tf.stop_gradient(
self._masked.safe_sample_fn(self._masked.distribution))
event_mask = _add_event_dims_to_mask(validity_mask, dist=self._masked)
return tf.where(event_mask, y, safe_val)
def _inverse(self, y):
safe_y = self._make_safe_y(y, self._masked.validity_mask)
return self._bijector.inverse(safe_y)
def _inverse_log_det_jacobian(self, y):
validity_mask = tf.convert_to_tensor(self._masked.validity_mask)
safe_y = self._make_safe_y(y, validity_mask)
return tf.where(validity_mask,
self._bijector.inverse_log_det_jacobian(safe_y),
0.)
class _MaskedBijector(_NonCompositeTensorMaskedBijector,
bijector_lib.AutoCompositeTensorBijector):
"""Event space bijector for Masked distributions."""
def __new__(cls, *args, **kwargs):
"""Maybe return a `_NonCompositeTensorMaskedBijector`."""
if cls is _MaskedBijector:
if args:
masked = args[0]
else:
masked = kwargs.get('masked')
if len(args) > 1:
bijector = args[1]
else:
bijector = kwargs.get('underlying_bijector')
if not (isinstance(masked, tf.__internal__.CompositeTensor)
and isinstance(bijector, tf.__internal__.CompositeTensor)):
return _NonCompositeTensorMaskedBijector(*args, **kwargs)
return super(_MaskedBijector, cls).__new__(cls) | en | 0.755109 | # Copyright 2021 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ The MaskedIndependent distribution class. Implements functions like mean, variance, etc. Args: fn_name: Name of the method called on the underlying distribution. n_event_shapes: Number of event shape repeats in the shape of the underlying function's output. safe_value: The value to be placed in invalid locations. May be `'safe_sample'` to specify we should use the "safe sample" value. make_arg0_safe: If `True`, we will apply `self.safe_sample_fn` to ensure the argument passed into the underlying routine is a "safe" sample. Returns: fn: Callable implementing the given function. # Only if needed. A distribution that masks invalid underlying distributions. Sometimes we may want a way of masking out a subset of distributions. Perhaps we have labels for only a subset of batch members and want to evaluate a log_prob. Or we may want to encode a sparse random variable as a dense random variable with a mask applied. In single-program/multiple-data regimes, it can be necessary to pad Distributions and the samples thereof to a given size in order to achieve the "single-program" desideratum. When computing a probability density in this regime, we would like to mask out the contributions of invalid batch members. We may also want to ensure that the values being sampled are valid parameters for descendant distributions in a hierarchical model, even if they are ultimately masked out. This distribution answers those requirements. Specifically, for invalid batch elements: - `log_prob(x) == 0.` for all `x`, with no gradients back to `x`, nor any gradients to the parameters of `distribution`. - `sample() == tf.stop_gradient(safe_value_fn(distribution))`, with no gradients back to the parameters of `distribution`. The distribution accepts a mask specified by `validity_mask`, a boolean tensor broadcastable with the underlying distribution's batch shape which specifies for each batch element whether or not it is valid. Entries in `validity_mask` which are `False` denote missing distributions, which means that the corresponding entries in the measures (e.g. `prob`) and statistics (e.g. `mean`) must not be treated as coming from some real distribution. Whenever doing a reduction across those quantites, make sure to either mask out the invalid entries or make sure the returned value corresponds to the identity element of the reduction. For a couple examples: - OK: `reduce_sum(masked_dist.log_prob(x))` - OK: `tfd.Independent(masked_dist, ...)` - Not OK: `reduce_var(masked_dist.mean())` will underestimate the variance because it uses too large an `N`. - Not OK: `tf.linalg.cholesky(masked_dist.covariance())` will fail for invalid batch elements. The default `safe_value_fn` is to draw a fixed-seeded sample from the underlying `distribution`. Since this may be expensive, it is suggested to specify a computationally cheaper method. Some options might include: - `tfd.Distribution.mode` - `tfd.Distribution.mean` - `lambda d: d.quantile(.5)` (median) - `lambda _: 0.` (if zero is always in the support of d) - `lambda d: d.experimental_default_event_space_bijector()(0.)` Besides the output of `sample`, results from `safe_value_fn` may also appear in (invalid batch members of) `masked.default_event_space_bijector().forward`. #### Examples ``` # Use tf.sequence_mask for `range(n) < num_valid`. num_valid = 3 num_entries = 4 d = tfd.Masked( tfd.MultivariateNormalDiag(tf.zeros([2, num_entries, 5]), tf.ones([5])), tf.sequence_mask(num_valid, num_entries)) d.batch_shape # [2, 4] d.event_shape # [5] d.log_prob(tf.zeros([5])) # shape [2, 4] # => [[nonzero, nonzero, nonzero, 0.], # [nonzero, nonzero, nonzero, 0.]] # Explicitly denote which elements are valid, adding a new batch dim of 2. d = tfd.Masked(tfd.MultivariateNormalDiag(tf.zeros([4, 5]), tf.ones([5])), [[False], [True]]) d.batch_shape # [2, 4] d.event_shape # [5] d.log_prob(tf.zeros([5])) # shape [2, 4] # => [[0., 0., 0., 0.], # [nonzero, nonzero, nonzero, nonzero]] # Use `BatchBroadcast` and `Independent` to achieve the equivalent of adding # positional mask functionality to `tfd.Sample`. # Suppose we wanted to achieve this: # `tfd.Sample(tfd.Normal(tf.zeros(2), 1), [3, 4], validity_mask=mask)` # We can write: d = tfd.Independent( tfd.Masked(tfd.BatchBroadcast(tfd.Normal(0, 1), [2, 3, 4]), mask), reinterpreted_batch_ndims=2) d.batch_shape # [2] d.event_shape # [3, 4] d.log_prob(tf.ones([3, 4])) # shape [2] ``` Constructs a Masked distribution. Args: distribution: The underlying distribution, which will be masked. validity_mask: Boolean mask where `True` indicates an element is valid. `validity_mask` must broadcast with the batch shape of the underlying distribution. Invalid batch elements are masked so that sampling returns `safe_sample_fn(dist)` in invalid positions and `log_prob(x)` returns `0.` for invalid positions. safe_sample_fn: A callable which takes a distribution (namely, the `distribution` argument) and returns a determinstic, safe sample value. This helps to avoid `nan` gradients and allows downstream usage of samples from a `Masked` distribution to assume a "safe" even if invalid value. (Be careful to ensure that such downstream usages are themselves masked!) Note that the result of this function will be wrapped in a `tf.stop_gradient` call. validate_args: Boolean indicating whether argument assertions should be run. May impose performance penalties. allow_nan_stats: Boolean indicating whether statistical functions may return `nan`, or should instead use asserts where possible. name: Optional name for operation scoping. # To avoid the shape gymnastics of drawing extra samples, we delegate # sampling to the BatchBroadcast distribution. Maybe return a non-`CompositeTensor` `_Masked`. KL divergence between Masked distributions. # The treatment for KL is as follows: # When both random variables are valid, the underlying KL applies. # When neither random variable is valid, the KL is 0., i.e. # `a log a - a log b = 0` because log a and log b are everywhere 0. # When exactly one is valid, we (a) raise an assertion error, if either # distribution's allow_nan_stats is set to False, or (b) return nan in # such positions. Computes log p(x) - log q(y) for Masked p, q. # As with KL, we return the underlying log_prob_ratio where both are valid, # `0.` where neither is valid, and `nan` otherwise (or an assertion if # either distribution does not `allow_nan_stats`). Event space bijector for Masked distributions. # We presume that 0 in unconstrained space is safe. Event space bijector for Masked distributions. Maybe return a `_NonCompositeTensorMaskedBijector`. | 1.836301 | 2 |
download.py | kaija/taiwan_stockloader | 2 | 7771 | import datetime
import httplib
import urllib
from datetime import timedelta
#now = datetime.datetime.now();
#today = now.strftime('%Y-%m-%d')
#print today
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def convfloat(value):
try:
return float(value)
except ValueError:
return -1
today = datetime.date.today()
one_day = timedelta(days=1);
#start_day = datetime.date(2004, 2, 11);
start_day = datetime.date(2010, 8, 21);
print "Download from " + start_day.strftime("%Y-%m-%d") + " to " + today.strftime("%Y-%m-%d")
dl_date = start_day
while dl_date < today:
httpreq = httplib.HTTPConnection('www.twse.com.tw')
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
date_str = str(dl_date.year - 1911 ) + dl_date.strftime("/%m/%d")
form = urllib.urlencode({'download': 'csv', 'qdate': date_str, 'selectType': 'ALLBUT0999'})
httpreq.request("POST", "/ch/trading/exchange/MI_INDEX/MI_INDEX.php", form, headers);
httpres = httpreq.getresponse()
stock_csv = httpres.read()
file_name = "data/" + dl_date.strftime("%Y%m%d") + ".csv"
print "downloading " + file_name
f = open(file_name, "w")
f.write(stock_csv)
dl_date += one_day
print "Download Finish!"
| import datetime
import httplib
import urllib
from datetime import timedelta
#now = datetime.datetime.now();
#today = now.strftime('%Y-%m-%d')
#print today
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def convfloat(value):
try:
return float(value)
except ValueError:
return -1
today = datetime.date.today()
one_day = timedelta(days=1);
#start_day = datetime.date(2004, 2, 11);
start_day = datetime.date(2010, 8, 21);
print "Download from " + start_day.strftime("%Y-%m-%d") + " to " + today.strftime("%Y-%m-%d")
dl_date = start_day
while dl_date < today:
httpreq = httplib.HTTPConnection('www.twse.com.tw')
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
date_str = str(dl_date.year - 1911 ) + dl_date.strftime("/%m/%d")
form = urllib.urlencode({'download': 'csv', 'qdate': date_str, 'selectType': 'ALLBUT0999'})
httpreq.request("POST", "/ch/trading/exchange/MI_INDEX/MI_INDEX.php", form, headers);
httpres = httpreq.getresponse()
stock_csv = httpres.read()
file_name = "data/" + dl_date.strftime("%Y%m%d") + ".csv"
print "downloading " + file_name
f = open(file_name, "w")
f.write(stock_csv)
dl_date += one_day
print "Download Finish!"
| en | 0.468208 | #now = datetime.datetime.now(); #today = now.strftime('%Y-%m-%d') #print today #start_day = datetime.date(2004, 2, 11); | 3.232826 | 3 |
heuristic/improvement/reopt/disruption_updater.py | annalunde/master | 1 | 7772 | <filename>heuristic/improvement/reopt/disruption_updater.py<gh_stars>1-10
import copy
import pandas as pd
from decouple import config
from heuristic.construction.construction import ConstructionHeuristic
from config.construction_config import *
from simulation.simulator import Simulator
from heuristic.improvement.reopt.new_request_updater import NewRequestUpdater
class DisruptionUpdater:
def __init__(self, new_request_updater):
self.new_request_updater = new_request_updater
def update_route_plan(self, current_route_plan, disruption_type, disruption_info, sim_clock):
# adding current position for each vehicle
vehicle_clocks, artificial_depot = self.update_vehicle_clocks(
current_route_plan, sim_clock, disruption_type, disruption_info)
updated_route_plan = copy.deepcopy(current_route_plan)
if disruption_type == 'request':
self.new_request_updater.set_parameters(disruption_info)
elif disruption_type == 'delay':
updated_route_plan = self.update_with_delay(
current_route_plan, disruption_info)
elif disruption_type == 'cancel':
# update capacities
updated_vehicle_route = self.update_capacities(
updated_route_plan[disruption_info[0]], disruption_info[1], disruption_info[2],
updated_route_plan[disruption_info[0]][disruption_info[1]][5])
updated_route_plan[disruption_info[0]] = updated_vehicle_route
if artificial_depot:
# remove dropoff node
del updated_route_plan[disruption_info[0]][disruption_info[2]]
else:
# remove dropoff node
del updated_route_plan[disruption_info[0]][disruption_info[2]]
# remove pickup node
del updated_route_plan[disruption_info[0]][disruption_info[1]]
else:
# no show
# update capacities
updated_vehicle_route = self.update_capacities(
updated_route_plan[disruption_info[0]], disruption_info[1], disruption_info[2],
updated_route_plan[disruption_info[0]][disruption_info[1]][5])
updated_route_plan[disruption_info[0]] = updated_vehicle_route
# remove dropoff node
del updated_route_plan[disruption_info[0]][disruption_info[2]]
return updated_route_plan, vehicle_clocks
def update_with_delay(self, current_route_plan, disruption_info):
delay_duration = disruption_info[2]
route_plan = copy.deepcopy(current_route_plan)
start_idx = disruption_info[1]
for node in route_plan[disruption_info[0]][disruption_info[1]:]:
t = node[1] + delay_duration
d = node[2] + delay_duration
node = (node[0], t, d, node[3], node[4], node[5])
route_plan[disruption_info[0]][start_idx] = node
start_idx += 1
return route_plan
@staticmethod
def recalibrate_solution(current_route_plan, disruption_info, still_delayed_nodes):
delay_duration = disruption_info[2]
route_plan = copy.deepcopy(current_route_plan)
for node in still_delayed_nodes:
idx = next(i for i, (node_test, *_)
in enumerate(route_plan[disruption_info[0]]) if node_test == node)
node_route = route_plan[disruption_info[0]][idx]
d = node_route[2] - delay_duration
node_route = (node_route[0], node_route[1], d,
node_route[3], node_route[4], node_route[5])
route_plan[disruption_info[0]][idx] = node_route
return route_plan
def update_vehicle_clocks(self, current_route_plan, sim_clock, disruption_type, disruption_info):
artificial_depot = False
# find index for next node after sim_clock and corresponding time of service
vehicle_clocks = []
for vehicle_route in current_route_plan:
if len(vehicle_route) > 1:
if vehicle_route[0][1] < sim_clock:
prev_idx = 0
for idx, (node, time, deviation, passenger, wheelchair, _) in enumerate(vehicle_route):
if time <= sim_clock:
prev_idx = idx
if prev_idx == len(vehicle_route) - 1:
vehicle_clocks.append(sim_clock)
else:
next_idx = prev_idx + 1
vehicle_clocks.append(vehicle_route[next_idx][1])
if disruption_type == 'cancel':
# check whether next node after sim_clock is the request that is cancelled
if current_route_plan[disruption_info[0]][disruption_info[1]] == vehicle_route[next_idx]:
artificial_depot = True
else:
vehicle_clocks.append(sim_clock)
else:
vehicle_clocks.append(sim_clock)
return vehicle_clocks, artificial_depot
def update_capacities(self, vehicle_route, start_id, dropoff_id, request):
idx = start_id
for n, t, d, p, w, _ in vehicle_route[start_id:dropoff_id]:
p -= request["Number of Passengers"]
w -= request["Wheelchair"]
vehicle_route[idx] = (n, t, d, p, w, _)
idx += 1
return vehicle_route
| <filename>heuristic/improvement/reopt/disruption_updater.py<gh_stars>1-10
import copy
import pandas as pd
from decouple import config
from heuristic.construction.construction import ConstructionHeuristic
from config.construction_config import *
from simulation.simulator import Simulator
from heuristic.improvement.reopt.new_request_updater import NewRequestUpdater
class DisruptionUpdater:
def __init__(self, new_request_updater):
self.new_request_updater = new_request_updater
def update_route_plan(self, current_route_plan, disruption_type, disruption_info, sim_clock):
# adding current position for each vehicle
vehicle_clocks, artificial_depot = self.update_vehicle_clocks(
current_route_plan, sim_clock, disruption_type, disruption_info)
updated_route_plan = copy.deepcopy(current_route_plan)
if disruption_type == 'request':
self.new_request_updater.set_parameters(disruption_info)
elif disruption_type == 'delay':
updated_route_plan = self.update_with_delay(
current_route_plan, disruption_info)
elif disruption_type == 'cancel':
# update capacities
updated_vehicle_route = self.update_capacities(
updated_route_plan[disruption_info[0]], disruption_info[1], disruption_info[2],
updated_route_plan[disruption_info[0]][disruption_info[1]][5])
updated_route_plan[disruption_info[0]] = updated_vehicle_route
if artificial_depot:
# remove dropoff node
del updated_route_plan[disruption_info[0]][disruption_info[2]]
else:
# remove dropoff node
del updated_route_plan[disruption_info[0]][disruption_info[2]]
# remove pickup node
del updated_route_plan[disruption_info[0]][disruption_info[1]]
else:
# no show
# update capacities
updated_vehicle_route = self.update_capacities(
updated_route_plan[disruption_info[0]], disruption_info[1], disruption_info[2],
updated_route_plan[disruption_info[0]][disruption_info[1]][5])
updated_route_plan[disruption_info[0]] = updated_vehicle_route
# remove dropoff node
del updated_route_plan[disruption_info[0]][disruption_info[2]]
return updated_route_plan, vehicle_clocks
def update_with_delay(self, current_route_plan, disruption_info):
delay_duration = disruption_info[2]
route_plan = copy.deepcopy(current_route_plan)
start_idx = disruption_info[1]
for node in route_plan[disruption_info[0]][disruption_info[1]:]:
t = node[1] + delay_duration
d = node[2] + delay_duration
node = (node[0], t, d, node[3], node[4], node[5])
route_plan[disruption_info[0]][start_idx] = node
start_idx += 1
return route_plan
@staticmethod
def recalibrate_solution(current_route_plan, disruption_info, still_delayed_nodes):
delay_duration = disruption_info[2]
route_plan = copy.deepcopy(current_route_plan)
for node in still_delayed_nodes:
idx = next(i for i, (node_test, *_)
in enumerate(route_plan[disruption_info[0]]) if node_test == node)
node_route = route_plan[disruption_info[0]][idx]
d = node_route[2] - delay_duration
node_route = (node_route[0], node_route[1], d,
node_route[3], node_route[4], node_route[5])
route_plan[disruption_info[0]][idx] = node_route
return route_plan
def update_vehicle_clocks(self, current_route_plan, sim_clock, disruption_type, disruption_info):
artificial_depot = False
# find index for next node after sim_clock and corresponding time of service
vehicle_clocks = []
for vehicle_route in current_route_plan:
if len(vehicle_route) > 1:
if vehicle_route[0][1] < sim_clock:
prev_idx = 0
for idx, (node, time, deviation, passenger, wheelchair, _) in enumerate(vehicle_route):
if time <= sim_clock:
prev_idx = idx
if prev_idx == len(vehicle_route) - 1:
vehicle_clocks.append(sim_clock)
else:
next_idx = prev_idx + 1
vehicle_clocks.append(vehicle_route[next_idx][1])
if disruption_type == 'cancel':
# check whether next node after sim_clock is the request that is cancelled
if current_route_plan[disruption_info[0]][disruption_info[1]] == vehicle_route[next_idx]:
artificial_depot = True
else:
vehicle_clocks.append(sim_clock)
else:
vehicle_clocks.append(sim_clock)
return vehicle_clocks, artificial_depot
def update_capacities(self, vehicle_route, start_id, dropoff_id, request):
idx = start_id
for n, t, d, p, w, _ in vehicle_route[start_id:dropoff_id]:
p -= request["Number of Passengers"]
w -= request["Wheelchair"]
vehicle_route[idx] = (n, t, d, p, w, _)
idx += 1
return vehicle_route
| en | 0.724663 | # adding current position for each vehicle # update capacities # remove dropoff node # remove dropoff node # remove pickup node # no show # update capacities # remove dropoff node # find index for next node after sim_clock and corresponding time of service # check whether next node after sim_clock is the request that is cancelled | 2.218529 | 2 |
evennia/scripts/migrations/0013_auto_20191025_0831.py | Jaykingamez/evennia | 1,544 | 7773 | # Generated by Django 2.2.6 on 2019-10-25 12:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("scripts", "0012_auto_20190128_1820")]
operations = [
migrations.AlterField(
model_name="scriptdb",
name="db_typeclass_path",
field=models.CharField(
db_index=True,
help_text="this defines what 'type' of entity this is. This variable holds a Python path to a module with a valid Evennia Typeclass.",
max_length=255,
null=True,
verbose_name="typeclass",
),
)
]
| # Generated by Django 2.2.6 on 2019-10-25 12:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("scripts", "0012_auto_20190128_1820")]
operations = [
migrations.AlterField(
model_name="scriptdb",
name="db_typeclass_path",
field=models.CharField(
db_index=True,
help_text="this defines what 'type' of entity this is. This variable holds a Python path to a module with a valid Evennia Typeclass.",
max_length=255,
null=True,
verbose_name="typeclass",
),
)
]
| en | 0.775277 | # Generated by Django 2.2.6 on 2019-10-25 12:31 | 1.806595 | 2 |
tests/test_pyqrcodeng_issue13.py | dbajar/segno | 254 | 7774 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - 2020 -- <NAME>
# All rights reserved.
#
# License: BSD License
#
"""\
Test against issue <https://github.com/pyqrcode/pyqrcodeNG/pull/13/>.
The initial test was created by Mathieu <https://github.com/albatros69>,
see the above mentioned pull request.
Adapted for Segno to check if it suffers from the same problem.
"""
from __future__ import absolute_import, unicode_literals
import segno
def test_autodetect():
data = 'Émetteur'
qr = segno.make(data)
assert qr.mode == 'byte'
def test_encoding():
encoding = 'iso-8859-15'
data = 'Émetteur'
qr = segno.make(data.encode(encoding))
assert qr.mode == 'byte'
qr2 = segno.make(data, encoding=encoding)
assert qr2 == qr
if __name__ == '__main__':
import pytest
pytest.main([__file__])
| # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - 2020 -- <NAME>
# All rights reserved.
#
# License: BSD License
#
"""\
Test against issue <https://github.com/pyqrcode/pyqrcodeNG/pull/13/>.
The initial test was created by Mathieu <https://github.com/albatros69>,
see the above mentioned pull request.
Adapted for Segno to check if it suffers from the same problem.
"""
from __future__ import absolute_import, unicode_literals
import segno
def test_autodetect():
data = 'Émetteur'
qr = segno.make(data)
assert qr.mode == 'byte'
def test_encoding():
encoding = 'iso-8859-15'
data = 'Émetteur'
qr = segno.make(data.encode(encoding))
assert qr.mode == 'byte'
qr2 = segno.make(data, encoding=encoding)
assert qr2 == qr
if __name__ == '__main__':
import pytest
pytest.main([__file__])
| en | 0.874974 | # -*- coding: utf-8 -*- # # Copyright (c) 2016 - 2020 -- <NAME> # All rights reserved. # # License: BSD License # \ Test against issue <https://github.com/pyqrcode/pyqrcodeNG/pull/13/>. The initial test was created by Mathieu <https://github.com/albatros69>, see the above mentioned pull request. Adapted for Segno to check if it suffers from the same problem. | 1.612454 | 2 |
qiskit/quantum_info/operators/__init__.py | jagunnels/qiskit-sdk-py | 0 | 7775 | # -*- coding: utf-8 -*-
# Copyright 2019, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""Quantum Operators."""
from .operator import Operator
from .unitary import Unitary
from .pauli import Pauli, pauli_group
from .channel import Choi, SuperOp, Kraus, Stinespring, Chi, PTM
| # -*- coding: utf-8 -*-
# Copyright 2019, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""Quantum Operators."""
from .operator import Operator
from .unitary import Unitary
from .pauli import Pauli, pauli_group
from .channel import Choi, SuperOp, Kraus, Stinespring, Chi, PTM
| en | 0.868071 | # -*- coding: utf-8 -*- # Copyright 2019, IBM. # # This source code is licensed under the Apache License, Version 2.0 found in # the LICENSE.txt file in the root directory of this source tree. Quantum Operators. | 1.280147 | 1 |
iocms/iocms/urls.py | Gaurav-Zaiswal/iw-acad-iocms-be | 0 | 7776 | <gh_stars>0
from django.contrib import admin
from django.urls import include, path
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('class/', include('classroom.urls')),
path('assignment-api/', include('assignment.urls', namespace='assignment')),
path('feed/', include('feed.urls', namespace='feed')),
path('users/', include('users.urls'), name="user-register")
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| from django.contrib import admin
from django.urls import include, path
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('class/', include('classroom.urls')),
path('assignment-api/', include('assignment.urls', namespace='assignment')),
path('feed/', include('feed.urls', namespace='feed')),
path('users/', include('users.urls'), name="user-register")
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | none | 1 | 1.851993 | 2 |
|
src/security/__init__.py | slippers/blogging_security_flatpage | 1 | 7777 | from src import app, db
from .models import User, Role, RoleUsers
from .security_admin import UserAdmin, RoleAdmin
from flask_security import Security, SQLAlchemyUserDatastore, \
login_required, roles_accepted
from flask_security.utils import encrypt_password
def config_security_admin(admin):
admin.add_view(UserAdmin(db.session))
admin.add_view(RoleAdmin(db.session))
def configure_security():
# Create the Roles "admin" and "end-user" -- unless they already exist
user_datastore.find_or_create_role(name='admin', description='Administrator')
user_datastore.find_or_create_role(name='end-user', description='End user')
user_datastore.find_or_create_role(name='blogger', description='Blogger')
# Create two Users for testing purposes -- unless they already exists.
# In each case, use Flask-Security utility function to encrypt the password.
pw = encrypt_password('password')
# pw = 'password'
if not user_datastore.get_user('<EMAIL>'):
user_datastore.create_user(email='<EMAIL>', password=pw)
if not user_datastore.get_user('<EMAIL>'):
user_datastore.create_user(email='<EMAIL>', password=pw)
# Give one User has the "end-user" role, while the other has the "admin" role.
#(This will have no effect if the
# Users already have these Roles.) Again, commit any database changes.
user_datastore.add_role_to_user('<EMAIL>', 'end-user')
user_datastore.add_role_to_user('<EMAIL>', 'blogger')
user_datastore.add_role_to_user('<EMAIL>', 'admin')
user_datastore.add_role_to_user('<EMAIL>', 'blogger')
db.session.commit()
# Setup Flask-Security
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(app, user_datastore)
# Create any database tables that don't exist yet.
db.create_all()
| from src import app, db
from .models import User, Role, RoleUsers
from .security_admin import UserAdmin, RoleAdmin
from flask_security import Security, SQLAlchemyUserDatastore, \
login_required, roles_accepted
from flask_security.utils import encrypt_password
def config_security_admin(admin):
admin.add_view(UserAdmin(db.session))
admin.add_view(RoleAdmin(db.session))
def configure_security():
# Create the Roles "admin" and "end-user" -- unless they already exist
user_datastore.find_or_create_role(name='admin', description='Administrator')
user_datastore.find_or_create_role(name='end-user', description='End user')
user_datastore.find_or_create_role(name='blogger', description='Blogger')
# Create two Users for testing purposes -- unless they already exists.
# In each case, use Flask-Security utility function to encrypt the password.
pw = encrypt_password('password')
# pw = 'password'
if not user_datastore.get_user('<EMAIL>'):
user_datastore.create_user(email='<EMAIL>', password=pw)
if not user_datastore.get_user('<EMAIL>'):
user_datastore.create_user(email='<EMAIL>', password=pw)
# Give one User has the "end-user" role, while the other has the "admin" role.
#(This will have no effect if the
# Users already have these Roles.) Again, commit any database changes.
user_datastore.add_role_to_user('<EMAIL>', 'end-user')
user_datastore.add_role_to_user('<EMAIL>', 'blogger')
user_datastore.add_role_to_user('<EMAIL>', 'admin')
user_datastore.add_role_to_user('<EMAIL>', 'blogger')
db.session.commit()
# Setup Flask-Security
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(app, user_datastore)
# Create any database tables that don't exist yet.
db.create_all()
| en | 0.874873 | # Create the Roles "admin" and "end-user" -- unless they already exist # Create two Users for testing purposes -- unless they already exists. # In each case, use Flask-Security utility function to encrypt the password. # pw = 'password' # Give one User has the "end-user" role, while the other has the "admin" role. #(This will have no effect if the # Users already have these Roles.) Again, commit any database changes. # Setup Flask-Security # Create any database tables that don't exist yet. | 3.094532 | 3 |
usaspending_api/download/lookups.py | lenjonemcse/usaspending-api | 1 | 7778 | <filename>usaspending_api/download/lookups.py<gh_stars>1-10
"""
This file defines a series of constants that represent the values used in
the API's "helper" tables.
Rather than define the values in the db setup scripts and then make db calls to
lookup the surrogate keys, we'll define everything here, in a file that can be
used by the db setup scripts *and* the application code.
"""
from collections import namedtuple, OrderedDict
from usaspending_api.accounts.models import AppropriationAccountBalances
from usaspending_api.accounts.v2.filters.account_download import account_download_filter
from usaspending_api.awards.models import Award, TransactionNormalized
from usaspending_api.awards.models import FinancialAccountsByAwards
from usaspending_api.download.helpers.elasticsearch_download_functions import (
AwardsElasticsearchDownload,
TransactionsElasticsearchDownload,
)
from usaspending_api.download.helpers.disaster_filter_functions import disaster_filter_function
from usaspending_api.search.models import AwardSearchView, TransactionSearch, SubawardView
from usaspending_api.awards.v2.filters.idv_filters import (
idv_order_filter,
idv_transaction_filter,
idv_treasury_account_funding_filter,
)
from usaspending_api.awards.v2.filters.award_filters import (
awards_transaction_filter,
awards_subaward_filter,
awards_treasury_account_funding_filter,
)
from usaspending_api.awards.v2.filters.search import (
universal_award_matview_filter,
transaction_search_filter,
)
from usaspending_api.awards.v2.filters.sub_award import subaward_download
from usaspending_api.financial_activities.models import FinancialAccountsByProgramActivityObjectClass
from usaspending_api.download.helpers.download_annotation_functions import (
transaction_search_annotations,
universal_award_matview_annotations,
subaward_annotations,
idv_order_annotations,
idv_transaction_annotations,
)
LookupType = namedtuple("LookupType", ["id", "name", "desc"])
JOB_STATUS = [
LookupType(1, "ready", "job is ready to be run"),
LookupType(2, "running", "job is currently in progress"),
LookupType(3, "finished", "job is complete"),
LookupType(4, "failed", "job failed to complete"),
LookupType(5, "queued", "job sent to queue for async processing"),
LookupType(6, "resumed", "job is being reprocessed after a failure"),
LookupType(7, "created", "job product has been created and stored locally"),
LookupType(8, "uploading", "job is being uploaded to public storage"),
]
JOB_STATUS_DICT = {item.name: item.id for item in JOB_STATUS}
VALUE_MAPPINGS = {
# Award Level
"awards": {
"source_type": "award",
"table": AwardSearchView,
"table_name": "award",
"type_name": "PrimeAwardSummaries",
"download_name": "{agency}{type}_PrimeAwardSummaries_{timestamp}",
"contract_data": "award__latest_transaction__contract_data",
"assistance_data": "award__latest_transaction__assistance_data",
"filter_function": universal_award_matview_filter,
"annotations_function": universal_award_matview_annotations,
},
# Elasticsearch Award Level
"elasticsearch_awards": {
"source_type": "award",
"table": AwardSearchView,
"table_name": "award",
"type_name": "PrimeAwardSummaries",
"download_name": "{agency}{type}_PrimeAwardSummaries_{timestamp}",
"contract_data": "award__latest_transaction__contract_data",
"assistance_data": "award__latest_transaction__assistance_data",
"filter_function": AwardsElasticsearchDownload.query,
"annotations_function": universal_award_matview_annotations,
},
# Transaction Level
"transactions": {
"source_type": "award",
"table": TransactionSearch,
"table_name": "transaction",
"type_name": "PrimeTransactions",
"download_name": "{agency}{type}_PrimeTransactions_{timestamp}",
"contract_data": "transaction__contract_data",
"assistance_data": "transaction__assistance_data",
"filter_function": transaction_search_filter,
"annotations_function": transaction_search_annotations,
},
# Elasticsearch Transaction Level
"elasticsearch_transactions": {
"source_type": "award",
"table": TransactionSearch,
"table_name": "transaction",
"type_name": "PrimeTransactions",
"download_name": "{agency}{type}_PrimeTransactions_{timestamp}",
"contract_data": "transaction__contract_data",
"assistance_data": "transaction__assistance_data",
"filter_function": TransactionsElasticsearchDownload.query,
"annotations_function": transaction_search_annotations,
},
# SubAward Level
"sub_awards": {
"source_type": "award",
"table": SubawardView,
"table_name": "subaward",
"type_name": "Subawards",
"download_name": "{agency}{type}_Subawards_{timestamp}",
"contract_data": "award__latest_transaction__contract_data",
"assistance_data": "award__latest_transaction__assistance_data",
"filter_function": subaward_download,
"annotations_function": subaward_annotations,
},
# Appropriations Account Data
"account_balances": {
"source_type": "account",
"table": AppropriationAccountBalances,
"table_name": "account_balances",
"download_name": "{data_quarters}_{agency}_{level}_AccountBalances_{timestamp}",
"zipfile_template": "{data_quarters}_{agency}_{level}_AccountBalances_{timestamp}",
"filter_function": account_download_filter,
},
# Object Class Program Activity Account Data
"object_class_program_activity": {
"source_type": "account",
"table": FinancialAccountsByProgramActivityObjectClass,
"table_name": "object_class_program_activity",
"download_name": "{data_quarters}_{agency}_{level}_AccountBreakdownByPA-OC_{timestamp}",
"zipfile_template": "{data_quarters}_{agency}_{level}_AccountBreakdownByPA-OC_{timestamp}",
"filter_function": account_download_filter,
},
"award_financial": {
"source_type": "account",
"table": FinancialAccountsByAwards,
"table_name": "award_financial",
"download_name": "{data_quarters}_{agency}_{level}_AccountBreakdownByAward_{timestamp}",
"zipfile_template": "{data_quarters}_{agency}_{level}_AccountBreakdownByAward_{timestamp}",
"filter_function": account_download_filter,
},
"idv_orders": {
"source_type": "award",
"table": Award,
"table_name": "idv_orders",
"download_name": "IDV_{piid}_Orders",
"contract_data": "latest_transaction__contract_data",
"filter_function": idv_order_filter,
"is_for_idv": True,
"annotations_function": idv_order_annotations,
},
"idv_federal_account_funding": {
"source_type": "account",
"table": FinancialAccountsByAwards,
"table_name": "award_financial",
"download_name": "IDV_{piid}_FederalAccountFunding",
"filter_function": idv_treasury_account_funding_filter,
"is_for_idv": True,
},
"idv_transaction_history": {
"source_type": "award",
"table": TransactionNormalized,
"table_name": "idv_transaction_history",
"download_name": "IDV_{piid}_TransactionHistory",
"contract_data": "contract_data",
"filter_function": idv_transaction_filter,
"is_for_idv": True,
"annotations_function": idv_transaction_annotations,
},
"contract_federal_account_funding": {
"source_type": "account",
"table": FinancialAccountsByAwards,
"table_name": "award_financial",
"download_name": "Contract_{piid}_FederalAccountFunding",
"filter_function": awards_treasury_account_funding_filter,
"is_for_contract": True,
},
"assistance_federal_account_funding": {
"source_type": "account",
"table": FinancialAccountsByAwards,
"table_name": "award_financial",
"download_name": "Assistance_{assistance_id}_FederalAccountFunding",
"filter_function": awards_treasury_account_funding_filter,
"is_for_assistance": True,
},
"sub_contracts": {
"source_type": "award",
"table": SubawardView,
"table_name": "subaward",
"download_name": "Contract_{piid}_Sub-Awards",
"contract_data": "award__latest_transaction__contract_data",
"filter_function": awards_subaward_filter,
"is_for_contract": True,
"annotations_function": subaward_annotations,
},
"sub_grants": {
"source_type": "award",
"table": SubawardView,
"table_name": "subaward",
"download_name": "Assistance_{assistance_id}_Sub-Awards",
"assistance_data": "award__latest_transaction__assistance_data",
"filter_function": awards_subaward_filter,
"is_for_assistance": True,
"annotations_function": subaward_annotations,
},
"contract_transactions": {
"source_type": "award",
"table": TransactionNormalized,
"table_name": "idv_transaction_history",
"download_name": "Contract_{piid}_TransactionHistory",
"contract_data": "contract_data",
"filter_function": awards_transaction_filter,
"is_for_contract": True,
"annotations_function": idv_transaction_annotations,
},
"assistance_transactions": {
"source_type": "award",
"table": TransactionNormalized,
"table_name": "assistance_transaction_history",
"download_name": "Assistance_{assistance_id}_TransactionHistory",
"assistance_data": "assistance_data",
"filter_function": awards_transaction_filter,
"is_for_assistance": True,
"annotations_function": idv_transaction_annotations,
},
"disaster_recipient": {
"source_type": "disaster",
"table": AwardSearchView,
"table_name": "recipient",
"download_name": "COVID-19_Recipients_{award_category}_{timestamp}",
"filter_function": disaster_filter_function,
"base_fields": ["recipient_name", "recipient_unique_id"],
},
}
# Bulk Download still uses "prime awards" instead of "transactions"
VALUE_MAPPINGS["prime_awards"] = VALUE_MAPPINGS["transactions"]
# List of CFO CGACS for list agencies viewset in the correct order, names included for reference
# TODO: Find a solution that marks the CFO agencies in the database AND have the correct order
CFO_CGACS_MAPPING = OrderedDict(
[
("012", "Department of Agriculture"),
("013", "Department of Commerce"),
("097", "Department of Defense"),
("091", "Department of Education"),
("089", "Department of Energy"),
("075", "Department of Health and Human Services"),
("070", "Department of Homeland Security"),
("086", "Department of Housing and Urban Development"),
("015", "Department of Justice"),
("1601", "Department of Labor"),
("019", "Department of State"),
("014", "Department of the Interior"),
("020", "Department of the Treasury"),
("069", "Department of Transportation"),
("036", "Department of Veterans Affairs"),
("068", "Environmental Protection Agency"),
("047", "General Services Administration"),
("080", "National Aeronautics and Space Administration"),
("049", "National Science Foundation"),
("031", "Nuclear Regulatory Commission"),
("024", "Office of Personnel Management"),
("073", "Small Business Administration"),
("028", "Social Security Administration"),
("072", "Agency for International Development"),
]
)
CFO_CGACS = list(CFO_CGACS_MAPPING.keys())
FILE_FORMATS = {
"csv": {"delimiter": ",", "extension": "csv", "options": "WITH CSV HEADER"},
"tsv": {"delimiter": "\t", "extension": "tsv", "options": r"WITH CSV DELIMITER E'\t' HEADER"},
"pstxt": {"delimiter": "|", "extension": "txt", "options": "WITH CSV DELIMITER '|' HEADER"},
}
VALID_ACCOUNT_SUBMISSION_TYPES = ("account_balances", "object_class_program_activity", "award_financial")
| <filename>usaspending_api/download/lookups.py<gh_stars>1-10
"""
This file defines a series of constants that represent the values used in
the API's "helper" tables.
Rather than define the values in the db setup scripts and then make db calls to
lookup the surrogate keys, we'll define everything here, in a file that can be
used by the db setup scripts *and* the application code.
"""
from collections import namedtuple, OrderedDict
from usaspending_api.accounts.models import AppropriationAccountBalances
from usaspending_api.accounts.v2.filters.account_download import account_download_filter
from usaspending_api.awards.models import Award, TransactionNormalized
from usaspending_api.awards.models import FinancialAccountsByAwards
from usaspending_api.download.helpers.elasticsearch_download_functions import (
AwardsElasticsearchDownload,
TransactionsElasticsearchDownload,
)
from usaspending_api.download.helpers.disaster_filter_functions import disaster_filter_function
from usaspending_api.search.models import AwardSearchView, TransactionSearch, SubawardView
from usaspending_api.awards.v2.filters.idv_filters import (
idv_order_filter,
idv_transaction_filter,
idv_treasury_account_funding_filter,
)
from usaspending_api.awards.v2.filters.award_filters import (
awards_transaction_filter,
awards_subaward_filter,
awards_treasury_account_funding_filter,
)
from usaspending_api.awards.v2.filters.search import (
universal_award_matview_filter,
transaction_search_filter,
)
from usaspending_api.awards.v2.filters.sub_award import subaward_download
from usaspending_api.financial_activities.models import FinancialAccountsByProgramActivityObjectClass
from usaspending_api.download.helpers.download_annotation_functions import (
transaction_search_annotations,
universal_award_matview_annotations,
subaward_annotations,
idv_order_annotations,
idv_transaction_annotations,
)
LookupType = namedtuple("LookupType", ["id", "name", "desc"])
JOB_STATUS = [
LookupType(1, "ready", "job is ready to be run"),
LookupType(2, "running", "job is currently in progress"),
LookupType(3, "finished", "job is complete"),
LookupType(4, "failed", "job failed to complete"),
LookupType(5, "queued", "job sent to queue for async processing"),
LookupType(6, "resumed", "job is being reprocessed after a failure"),
LookupType(7, "created", "job product has been created and stored locally"),
LookupType(8, "uploading", "job is being uploaded to public storage"),
]
JOB_STATUS_DICT = {item.name: item.id for item in JOB_STATUS}
VALUE_MAPPINGS = {
# Award Level
"awards": {
"source_type": "award",
"table": AwardSearchView,
"table_name": "award",
"type_name": "PrimeAwardSummaries",
"download_name": "{agency}{type}_PrimeAwardSummaries_{timestamp}",
"contract_data": "award__latest_transaction__contract_data",
"assistance_data": "award__latest_transaction__assistance_data",
"filter_function": universal_award_matview_filter,
"annotations_function": universal_award_matview_annotations,
},
# Elasticsearch Award Level
"elasticsearch_awards": {
"source_type": "award",
"table": AwardSearchView,
"table_name": "award",
"type_name": "PrimeAwardSummaries",
"download_name": "{agency}{type}_PrimeAwardSummaries_{timestamp}",
"contract_data": "award__latest_transaction__contract_data",
"assistance_data": "award__latest_transaction__assistance_data",
"filter_function": AwardsElasticsearchDownload.query,
"annotations_function": universal_award_matview_annotations,
},
# Transaction Level
"transactions": {
"source_type": "award",
"table": TransactionSearch,
"table_name": "transaction",
"type_name": "PrimeTransactions",
"download_name": "{agency}{type}_PrimeTransactions_{timestamp}",
"contract_data": "transaction__contract_data",
"assistance_data": "transaction__assistance_data",
"filter_function": transaction_search_filter,
"annotations_function": transaction_search_annotations,
},
# Elasticsearch Transaction Level
"elasticsearch_transactions": {
"source_type": "award",
"table": TransactionSearch,
"table_name": "transaction",
"type_name": "PrimeTransactions",
"download_name": "{agency}{type}_PrimeTransactions_{timestamp}",
"contract_data": "transaction__contract_data",
"assistance_data": "transaction__assistance_data",
"filter_function": TransactionsElasticsearchDownload.query,
"annotations_function": transaction_search_annotations,
},
# SubAward Level
"sub_awards": {
"source_type": "award",
"table": SubawardView,
"table_name": "subaward",
"type_name": "Subawards",
"download_name": "{agency}{type}_Subawards_{timestamp}",
"contract_data": "award__latest_transaction__contract_data",
"assistance_data": "award__latest_transaction__assistance_data",
"filter_function": subaward_download,
"annotations_function": subaward_annotations,
},
# Appropriations Account Data
"account_balances": {
"source_type": "account",
"table": AppropriationAccountBalances,
"table_name": "account_balances",
"download_name": "{data_quarters}_{agency}_{level}_AccountBalances_{timestamp}",
"zipfile_template": "{data_quarters}_{agency}_{level}_AccountBalances_{timestamp}",
"filter_function": account_download_filter,
},
# Object Class Program Activity Account Data
"object_class_program_activity": {
"source_type": "account",
"table": FinancialAccountsByProgramActivityObjectClass,
"table_name": "object_class_program_activity",
"download_name": "{data_quarters}_{agency}_{level}_AccountBreakdownByPA-OC_{timestamp}",
"zipfile_template": "{data_quarters}_{agency}_{level}_AccountBreakdownByPA-OC_{timestamp}",
"filter_function": account_download_filter,
},
"award_financial": {
"source_type": "account",
"table": FinancialAccountsByAwards,
"table_name": "award_financial",
"download_name": "{data_quarters}_{agency}_{level}_AccountBreakdownByAward_{timestamp}",
"zipfile_template": "{data_quarters}_{agency}_{level}_AccountBreakdownByAward_{timestamp}",
"filter_function": account_download_filter,
},
"idv_orders": {
"source_type": "award",
"table": Award,
"table_name": "idv_orders",
"download_name": "IDV_{piid}_Orders",
"contract_data": "latest_transaction__contract_data",
"filter_function": idv_order_filter,
"is_for_idv": True,
"annotations_function": idv_order_annotations,
},
"idv_federal_account_funding": {
"source_type": "account",
"table": FinancialAccountsByAwards,
"table_name": "award_financial",
"download_name": "IDV_{piid}_FederalAccountFunding",
"filter_function": idv_treasury_account_funding_filter,
"is_for_idv": True,
},
"idv_transaction_history": {
"source_type": "award",
"table": TransactionNormalized,
"table_name": "idv_transaction_history",
"download_name": "IDV_{piid}_TransactionHistory",
"contract_data": "contract_data",
"filter_function": idv_transaction_filter,
"is_for_idv": True,
"annotations_function": idv_transaction_annotations,
},
"contract_federal_account_funding": {
"source_type": "account",
"table": FinancialAccountsByAwards,
"table_name": "award_financial",
"download_name": "Contract_{piid}_FederalAccountFunding",
"filter_function": awards_treasury_account_funding_filter,
"is_for_contract": True,
},
"assistance_federal_account_funding": {
"source_type": "account",
"table": FinancialAccountsByAwards,
"table_name": "award_financial",
"download_name": "Assistance_{assistance_id}_FederalAccountFunding",
"filter_function": awards_treasury_account_funding_filter,
"is_for_assistance": True,
},
"sub_contracts": {
"source_type": "award",
"table": SubawardView,
"table_name": "subaward",
"download_name": "Contract_{piid}_Sub-Awards",
"contract_data": "award__latest_transaction__contract_data",
"filter_function": awards_subaward_filter,
"is_for_contract": True,
"annotations_function": subaward_annotations,
},
"sub_grants": {
"source_type": "award",
"table": SubawardView,
"table_name": "subaward",
"download_name": "Assistance_{assistance_id}_Sub-Awards",
"assistance_data": "award__latest_transaction__assistance_data",
"filter_function": awards_subaward_filter,
"is_for_assistance": True,
"annotations_function": subaward_annotations,
},
"contract_transactions": {
"source_type": "award",
"table": TransactionNormalized,
"table_name": "idv_transaction_history",
"download_name": "Contract_{piid}_TransactionHistory",
"contract_data": "contract_data",
"filter_function": awards_transaction_filter,
"is_for_contract": True,
"annotations_function": idv_transaction_annotations,
},
"assistance_transactions": {
"source_type": "award",
"table": TransactionNormalized,
"table_name": "assistance_transaction_history",
"download_name": "Assistance_{assistance_id}_TransactionHistory",
"assistance_data": "assistance_data",
"filter_function": awards_transaction_filter,
"is_for_assistance": True,
"annotations_function": idv_transaction_annotations,
},
"disaster_recipient": {
"source_type": "disaster",
"table": AwardSearchView,
"table_name": "recipient",
"download_name": "COVID-19_Recipients_{award_category}_{timestamp}",
"filter_function": disaster_filter_function,
"base_fields": ["recipient_name", "recipient_unique_id"],
},
}
# Bulk Download still uses "prime awards" instead of "transactions"
VALUE_MAPPINGS["prime_awards"] = VALUE_MAPPINGS["transactions"]
# List of CFO CGACS for list agencies viewset in the correct order, names included for reference
# TODO: Find a solution that marks the CFO agencies in the database AND have the correct order
CFO_CGACS_MAPPING = OrderedDict(
[
("012", "Department of Agriculture"),
("013", "Department of Commerce"),
("097", "Department of Defense"),
("091", "Department of Education"),
("089", "Department of Energy"),
("075", "Department of Health and Human Services"),
("070", "Department of Homeland Security"),
("086", "Department of Housing and Urban Development"),
("015", "Department of Justice"),
("1601", "Department of Labor"),
("019", "Department of State"),
("014", "Department of the Interior"),
("020", "Department of the Treasury"),
("069", "Department of Transportation"),
("036", "Department of Veterans Affairs"),
("068", "Environmental Protection Agency"),
("047", "General Services Administration"),
("080", "National Aeronautics and Space Administration"),
("049", "National Science Foundation"),
("031", "Nuclear Regulatory Commission"),
("024", "Office of Personnel Management"),
("073", "Small Business Administration"),
("028", "Social Security Administration"),
("072", "Agency for International Development"),
]
)
CFO_CGACS = list(CFO_CGACS_MAPPING.keys())
FILE_FORMATS = {
"csv": {"delimiter": ",", "extension": "csv", "options": "WITH CSV HEADER"},
"tsv": {"delimiter": "\t", "extension": "tsv", "options": r"WITH CSV DELIMITER E'\t' HEADER"},
"pstxt": {"delimiter": "|", "extension": "txt", "options": "WITH CSV DELIMITER '|' HEADER"},
}
VALID_ACCOUNT_SUBMISSION_TYPES = ("account_balances", "object_class_program_activity", "award_financial")
| en | 0.817716 | This file defines a series of constants that represent the values used in the API's "helper" tables. Rather than define the values in the db setup scripts and then make db calls to lookup the surrogate keys, we'll define everything here, in a file that can be used by the db setup scripts *and* the application code. # Award Level # Elasticsearch Award Level # Transaction Level # Elasticsearch Transaction Level # SubAward Level # Appropriations Account Data # Object Class Program Activity Account Data # Bulk Download still uses "prime awards" instead of "transactions" # List of CFO CGACS for list agencies viewset in the correct order, names included for reference # TODO: Find a solution that marks the CFO agencies in the database AND have the correct order | 1.651194 | 2 |
python/modules/mysql_server.py | 91-jinrong/-91_monitor | 1 | 7779 | <filename>python/modules/mysql_server.py
#!/bin/env python
#-*-coding:utf-8-*-
import os
import sys
import string
import time
import datetime
import MySQLdb
class MySQL:
def __int__(self,host,port,user,passwd,dbname,timeout,charset):
self.host = host
self.port = port
self.user = user
self.passwd = <PASSWORD>
self.dbname = test
self.timeout = timeout
self.charset = charset
def db_connect(self):
connect=MySQLdb.connect(host=self.host,user=self.user,passwd=<PASSWORD>,port=int(self.port),connect_timeout=int(self.timeout),charset=self.charset)
return connect
def execute(self,sql,param):
conn=MySQLdb.connect(host=self.host,user=self.user,passwd=<PASSWORD>,port=int(self.port),connect_timeout=int(self.timeout),charset=self.charset)
conn.select_db(self.dbname)
cursor = conn.cursor()
if param <> '':
cursor.execute(sql,param)
else:
cursor.execute(sql)
conn.commit()
cursor.close()
conn.close()
def query(self,sql):
conn=MySQLdb.connect(host=self.host,user=self.user,passwd=<PASSWORD>,port=int(self.port),connect_timeout=int(self.timeout),charset=self.charset)
conn.select_db(self.dbname)
cursor = conn.cursor()
count=cursor.execute(sql)
if count == 0 :
result=0
else:
result=cursor.fetchall()
return result
cursor.close()
conn.close()
def get_option(self,key):
conn=MySQLdb.connect(host=self.host,user=self.user,passwd=<PASSWORD>,port=int(self.port),connect_timeout=int(self.timeout),charset=self.charset)
conn.select_db(self.dbname)
cursor = conn.cursor()
sql="select value from options where name=+'"+key+"'"
count=cursor.execute(sql)
if count == 0 :
result=0
else:
result=cursor.fetchone()
return result[0]
cursor.close()
conn.close()
| <filename>python/modules/mysql_server.py
#!/bin/env python
#-*-coding:utf-8-*-
import os
import sys
import string
import time
import datetime
import MySQLdb
class MySQL:
def __int__(self,host,port,user,passwd,dbname,timeout,charset):
self.host = host
self.port = port
self.user = user
self.passwd = <PASSWORD>
self.dbname = test
self.timeout = timeout
self.charset = charset
def db_connect(self):
connect=MySQLdb.connect(host=self.host,user=self.user,passwd=<PASSWORD>,port=int(self.port),connect_timeout=int(self.timeout),charset=self.charset)
return connect
def execute(self,sql,param):
conn=MySQLdb.connect(host=self.host,user=self.user,passwd=<PASSWORD>,port=int(self.port),connect_timeout=int(self.timeout),charset=self.charset)
conn.select_db(self.dbname)
cursor = conn.cursor()
if param <> '':
cursor.execute(sql,param)
else:
cursor.execute(sql)
conn.commit()
cursor.close()
conn.close()
def query(self,sql):
conn=MySQLdb.connect(host=self.host,user=self.user,passwd=<PASSWORD>,port=int(self.port),connect_timeout=int(self.timeout),charset=self.charset)
conn.select_db(self.dbname)
cursor = conn.cursor()
count=cursor.execute(sql)
if count == 0 :
result=0
else:
result=cursor.fetchall()
return result
cursor.close()
conn.close()
def get_option(self,key):
conn=MySQLdb.connect(host=self.host,user=self.user,passwd=<PASSWORD>,port=int(self.port),connect_timeout=int(self.timeout),charset=self.charset)
conn.select_db(self.dbname)
cursor = conn.cursor()
sql="select value from options where name=+'"+key+"'"
count=cursor.execute(sql)
if count == 0 :
result=0
else:
result=cursor.fetchone()
return result[0]
cursor.close()
conn.close()
| en | 0.32684 | #!/bin/env python #-*-coding:utf-8-*- | 3.013985 | 3 |
Ethan File/Carrentsystem/Carrentsystem/test.py | hklhfong/Car-Rental-System | 0 | 7780 | <filename>Ethan File/Carrentsystem/Carrentsystem/test.py
import sqlite3
conn = sqlite3.connect("db")
cur = conn.cursor()
cur.execute("select * from CAR_ID limit 5;")
results = cur.fetchall()
print(results)
| <filename>Ethan File/Carrentsystem/Carrentsystem/test.py
import sqlite3
conn = sqlite3.connect("db")
cur = conn.cursor()
cur.execute("select * from CAR_ID limit 5;")
results = cur.fetchall()
print(results)
| none | 1 | 2.877575 | 3 |
|
tests/integration/hub_usage/dummyhub_slow/__init__.py | abreu4/jina | 2 | 7781 | import time
from jina.executors.crafters import BaseCrafter
from .helper import foo
class DummyHubExecutorSlow(BaseCrafter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
time.sleep(15)
foo()
| import time
from jina.executors.crafters import BaseCrafter
from .helper import foo
class DummyHubExecutorSlow(BaseCrafter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
time.sleep(15)
foo()
| none | 1 | 2.056926 | 2 |
|
src/evaluation_utils.py | philipp-hess/deep-learning-for-heavy-rainfall | 0 | 7782 | import numpy as np
import pandas as pd
from scipy.stats import spearmanr
from sklearn.metrics import f1_score, precision_score, recall_score
from IPython.display import display, clear_output
from sklearn.metrics import confusion_matrix
import scipy.stats as st
def continuous_to_categorical_with_quantiles(data: np.ndarray, quantiles:list ) -> np.ndarray:
""" Converts continuous data into binar classes using quantiles
Args:
data: shape [n_time, n_lat, n_lon]
quantiles:
list containing quantiles
Returns:
tmp: shape [n_quantiles, n_time*n_lat*n_lon]
binary data
"""
shape = data.shape
tmp = np.zeros((len(quantiles), shape[0], shape[1], shape[2]))
for i, quantile in enumerate(quantiles):
threshold = np.quantile(data, quantile)
binary = np.where(data > threshold, 1, 0).reshape((shape[0], shape[1], shape[2],-1))
tmp[i] = binary.squeeze()
return tmp
def global_thresholds_from_quantiles(data: np.ndarray, quantiles:list) -> list:
thresholds = [np.quantile(data, quantile) for quantile in quantiles]
return thresholds
def local_thresholds_from_percentiles(data: np.ndarray, percentile: float, data_min=0) -> np.ndarray:
n_lat = data.shape[1]
n_lon = data.shape[2]
threshold_map = np.zeros((n_lat, n_lon))
for lat in range(n_lat):
for lon in range(n_lon):
tmp = data[:, lat, lon]
threshold = st.scoreatpercentile(tmp[tmp>data_min], percentile)
if not np.isnan(threshold):
threshold_map[lat, lon] = threshold
return threshold_map
def get_threshold_mask(data: np.ndarray, percentile: float, data_min=0) -> np.ndarray:
n_lat = data.shape[1]
n_lon = data.shape[2]
mask = np.zeros((n_lat, n_lon))
for lat in range(n_lat):
for lon in range(n_lon):
tmp = data[:, lat, lon]
threshold = st.scoreatpercentile(tmp[tmp>data_min], percentile)
if np.isnan(threshold):
mask[lat, lon] = 1
return mask
def continuous_to_categorical_with_thresholds(data: np.ndarray, thresholds: list) -> np.ndarray:
""" Converts continuous data into binar classes using thresholds
Args:
data: shape [n_time, n_lat, n_lon]
quantiles:
list containing thresholds
Returns:
tmp: shape [n_quantiles, n_time*n_lat*n_lon]
binary data
"""
shape = data.shape
tmp = np.zeros((len(thresholds), shape[0], shape[1], shape[2]))
for i, threshold in enumerate(thresholds):
binary = np.where(data > threshold, 1, 0).reshape((shape[0], shape[1], shape[2],-1))
tmp[i] = binary.squeeze()
return tmp
def categorical_evaluation(prediction: np.ndarray, target: np.ndarray, metric_name: str, mask=None) -> pd.DataFrame:
"""
Evaluates a regression prediction with the F1 score
on quantile-based categories
Args:
prediction: shape [n_classes, X]
target: shape [n_classes, X]
X can be any other number of dimensions > 0
Returns:
scores (list):
List with an element per class
"""
n_classes = prediction.shape[0]
prediction = prediction.reshape(n_classes, -1)
target = target.reshape(n_classes, -1)
scores = []
for c in range(n_classes):
forecast_skill = ForecastSkill(prediction[c], target[c])
forecast_skill.compute_categories(mask=mask)
scores.append(getattr(forecast_skill, f'get_{metric_name}')())
return scores
def geographic_categorical_evaluation(prediction: np.ndarray, target: np.ndarray, metric_name: str) -> np.ndarray:
"""
Evaluates a regression prediction with the F1 score
on quantile-based categories
Args:
prediction: shape [n_classes, n_time, n_lat, n_lon]
target: shape [n_classes, n_time, n_lat, n_lon]
Returns:
scores: shape [n_classes, n_lat, n_lon]
"""
n_classes = prediction.shape[0]
n_lat = prediction.shape[2]
n_lon = prediction.shape[3]
scores = np.zeros((n_classes, n_lat, n_lon))
for c in range(n_classes):
for lat in range(n_lat):
for lon in range(n_lon):
grid_cell_prediction = prediction[c, :, lat, lon]
grid_cell_target = target[c, :, lat, lon]
if sum(grid_cell_prediction) == 0 and sum(grid_cell_target) == 0:
scores[c, lat, lon] = -999
else:
forecast_skill = ForecastSkill(prediction[c, :, lat, lon], target[c, :, lat, lon])
forecast_skill.compute_categories()
scores[c, lat, lon] = getattr(forecast_skill, f'get_{metric_name}')()
print(f'Progress {int((lat * lon)/(n_lat*n_lon)*100):2d}%')
clear_output(wait=True)
return scores
class ForecastSkill:
""" A collection of categorical forecast skill metrics """
def __init__(self, prediction, target):
self.prediction = prediction
self.target = target
self.true_positive = 0
self.false_positive = 0
self.false_negative = 0
self.true_negative = 0
def compute_categories(self, mask=None):
self.target = self.target.flatten().astype('int')
self.prediction = self.prediction.flatten().astype('int')
if mask is not None:
mask = mask.flatten()
indices_to_remove = np.where(mask==1)
self.target = np.delete(self.target, indices_to_remove)
self.prediction = np.delete(self.prediction, indices_to_remove)
categories = confusion_matrix(self.target, self.prediction)
self.true_negative, self.false_positive, self.false_negative, self.true_positive = categories.ravel()
def print_category_sums(self):
total = self.target.size
print(f'tp: {self.true_positive/total*100:2.3f}')
print(f'fp: {self.false_positive/total*100:2.3f}')
print(f'fn: {self.false_negative/total*100:2.3f}')
print(f'tn: {self.true_negative/total*100:2.3f}')
def get_category_sums(self):
return self.true_positive, self.false_positive, self.false_negative, self.true_negative
def get_heidke_skill_score(self) -> float:
tp = self.true_positive
fp = self.false_positive
fn = self.false_negative
tn = self.true_negative
nominator = 2*(tp*tn - fp*fn)
denominator = ((tp + fn)*(fn + tn) + (tp + fp)*(fp + tn))
if denominator > 0:
return nominator/denominator
else:
raise ValueError('devision by zero')
def get_critical_success_index(self) -> float:
hits = self.true_positive
false_alarms = self.false_positive
misses = self.false_negative
nominator = hits
denominator = hits + misses + false_alarms
if denominator > 0:
return nominator/denominator
else:
raise ValueError('devision by zero')
def get_false_alarm_ratio(self) -> float:
hits = self.true_positive
false_alarms = self.false_positive
nominator = false_alarms
denominator = hits + false_alarms
if denominator > 0:
return nominator/denominator
else:
raise ValueError('devision by zero')
def get_probability_of_detection(self) -> float:
hits = self.true_positive
misses = self.false_negative
nominator = hits
denominator = hits + misses
if denominator > 0:
return nominator/denominator
else:
raise ValueError('devision by zero')
def get_f1(self) -> float:
return f1_score(self.target, self.prediction, average='binary')
def get_recall(self) -> float:
return recall_score(self.target, self.prediction, average='binary')
def get_precision(self) -> float:
return precision_score(self.target, self.prediction, average='binary')
def rmse(output, target):
return np.sqrt(((output-target)**2).mean(axis=0))
def me(output, target):
return (output-target).mean(axis=0)
def corr(output, target):
result = np.zeros((output.shape[1], output.shape[2]))
for i in range(output.shape[1]):
for j in range(output.shape[2]):
result[i,j] = spearmanr(output[:,i,j], target[:,i,j])[0]
return result
| import numpy as np
import pandas as pd
from scipy.stats import spearmanr
from sklearn.metrics import f1_score, precision_score, recall_score
from IPython.display import display, clear_output
from sklearn.metrics import confusion_matrix
import scipy.stats as st
def continuous_to_categorical_with_quantiles(data: np.ndarray, quantiles:list ) -> np.ndarray:
""" Converts continuous data into binar classes using quantiles
Args:
data: shape [n_time, n_lat, n_lon]
quantiles:
list containing quantiles
Returns:
tmp: shape [n_quantiles, n_time*n_lat*n_lon]
binary data
"""
shape = data.shape
tmp = np.zeros((len(quantiles), shape[0], shape[1], shape[2]))
for i, quantile in enumerate(quantiles):
threshold = np.quantile(data, quantile)
binary = np.where(data > threshold, 1, 0).reshape((shape[0], shape[1], shape[2],-1))
tmp[i] = binary.squeeze()
return tmp
def global_thresholds_from_quantiles(data: np.ndarray, quantiles:list) -> list:
thresholds = [np.quantile(data, quantile) for quantile in quantiles]
return thresholds
def local_thresholds_from_percentiles(data: np.ndarray, percentile: float, data_min=0) -> np.ndarray:
n_lat = data.shape[1]
n_lon = data.shape[2]
threshold_map = np.zeros((n_lat, n_lon))
for lat in range(n_lat):
for lon in range(n_lon):
tmp = data[:, lat, lon]
threshold = st.scoreatpercentile(tmp[tmp>data_min], percentile)
if not np.isnan(threshold):
threshold_map[lat, lon] = threshold
return threshold_map
def get_threshold_mask(data: np.ndarray, percentile: float, data_min=0) -> np.ndarray:
n_lat = data.shape[1]
n_lon = data.shape[2]
mask = np.zeros((n_lat, n_lon))
for lat in range(n_lat):
for lon in range(n_lon):
tmp = data[:, lat, lon]
threshold = st.scoreatpercentile(tmp[tmp>data_min], percentile)
if np.isnan(threshold):
mask[lat, lon] = 1
return mask
def continuous_to_categorical_with_thresholds(data: np.ndarray, thresholds: list) -> np.ndarray:
""" Converts continuous data into binar classes using thresholds
Args:
data: shape [n_time, n_lat, n_lon]
quantiles:
list containing thresholds
Returns:
tmp: shape [n_quantiles, n_time*n_lat*n_lon]
binary data
"""
shape = data.shape
tmp = np.zeros((len(thresholds), shape[0], shape[1], shape[2]))
for i, threshold in enumerate(thresholds):
binary = np.where(data > threshold, 1, 0).reshape((shape[0], shape[1], shape[2],-1))
tmp[i] = binary.squeeze()
return tmp
def categorical_evaluation(prediction: np.ndarray, target: np.ndarray, metric_name: str, mask=None) -> pd.DataFrame:
"""
Evaluates a regression prediction with the F1 score
on quantile-based categories
Args:
prediction: shape [n_classes, X]
target: shape [n_classes, X]
X can be any other number of dimensions > 0
Returns:
scores (list):
List with an element per class
"""
n_classes = prediction.shape[0]
prediction = prediction.reshape(n_classes, -1)
target = target.reshape(n_classes, -1)
scores = []
for c in range(n_classes):
forecast_skill = ForecastSkill(prediction[c], target[c])
forecast_skill.compute_categories(mask=mask)
scores.append(getattr(forecast_skill, f'get_{metric_name}')())
return scores
def geographic_categorical_evaluation(prediction: np.ndarray, target: np.ndarray, metric_name: str) -> np.ndarray:
"""
Evaluates a regression prediction with the F1 score
on quantile-based categories
Args:
prediction: shape [n_classes, n_time, n_lat, n_lon]
target: shape [n_classes, n_time, n_lat, n_lon]
Returns:
scores: shape [n_classes, n_lat, n_lon]
"""
n_classes = prediction.shape[0]
n_lat = prediction.shape[2]
n_lon = prediction.shape[3]
scores = np.zeros((n_classes, n_lat, n_lon))
for c in range(n_classes):
for lat in range(n_lat):
for lon in range(n_lon):
grid_cell_prediction = prediction[c, :, lat, lon]
grid_cell_target = target[c, :, lat, lon]
if sum(grid_cell_prediction) == 0 and sum(grid_cell_target) == 0:
scores[c, lat, lon] = -999
else:
forecast_skill = ForecastSkill(prediction[c, :, lat, lon], target[c, :, lat, lon])
forecast_skill.compute_categories()
scores[c, lat, lon] = getattr(forecast_skill, f'get_{metric_name}')()
print(f'Progress {int((lat * lon)/(n_lat*n_lon)*100):2d}%')
clear_output(wait=True)
return scores
class ForecastSkill:
""" A collection of categorical forecast skill metrics """
def __init__(self, prediction, target):
self.prediction = prediction
self.target = target
self.true_positive = 0
self.false_positive = 0
self.false_negative = 0
self.true_negative = 0
def compute_categories(self, mask=None):
self.target = self.target.flatten().astype('int')
self.prediction = self.prediction.flatten().astype('int')
if mask is not None:
mask = mask.flatten()
indices_to_remove = np.where(mask==1)
self.target = np.delete(self.target, indices_to_remove)
self.prediction = np.delete(self.prediction, indices_to_remove)
categories = confusion_matrix(self.target, self.prediction)
self.true_negative, self.false_positive, self.false_negative, self.true_positive = categories.ravel()
def print_category_sums(self):
total = self.target.size
print(f'tp: {self.true_positive/total*100:2.3f}')
print(f'fp: {self.false_positive/total*100:2.3f}')
print(f'fn: {self.false_negative/total*100:2.3f}')
print(f'tn: {self.true_negative/total*100:2.3f}')
def get_category_sums(self):
return self.true_positive, self.false_positive, self.false_negative, self.true_negative
def get_heidke_skill_score(self) -> float:
tp = self.true_positive
fp = self.false_positive
fn = self.false_negative
tn = self.true_negative
nominator = 2*(tp*tn - fp*fn)
denominator = ((tp + fn)*(fn + tn) + (tp + fp)*(fp + tn))
if denominator > 0:
return nominator/denominator
else:
raise ValueError('devision by zero')
def get_critical_success_index(self) -> float:
hits = self.true_positive
false_alarms = self.false_positive
misses = self.false_negative
nominator = hits
denominator = hits + misses + false_alarms
if denominator > 0:
return nominator/denominator
else:
raise ValueError('devision by zero')
def get_false_alarm_ratio(self) -> float:
hits = self.true_positive
false_alarms = self.false_positive
nominator = false_alarms
denominator = hits + false_alarms
if denominator > 0:
return nominator/denominator
else:
raise ValueError('devision by zero')
def get_probability_of_detection(self) -> float:
hits = self.true_positive
misses = self.false_negative
nominator = hits
denominator = hits + misses
if denominator > 0:
return nominator/denominator
else:
raise ValueError('devision by zero')
def get_f1(self) -> float:
return f1_score(self.target, self.prediction, average='binary')
def get_recall(self) -> float:
return recall_score(self.target, self.prediction, average='binary')
def get_precision(self) -> float:
return precision_score(self.target, self.prediction, average='binary')
def rmse(output, target):
return np.sqrt(((output-target)**2).mean(axis=0))
def me(output, target):
return (output-target).mean(axis=0)
def corr(output, target):
result = np.zeros((output.shape[1], output.shape[2]))
for i in range(output.shape[1]):
for j in range(output.shape[2]):
result[i,j] = spearmanr(output[:,i,j], target[:,i,j])[0]
return result
| en | 0.663358 | Converts continuous data into binar classes using quantiles Args: data: shape [n_time, n_lat, n_lon] quantiles: list containing quantiles Returns: tmp: shape [n_quantiles, n_time*n_lat*n_lon] binary data Converts continuous data into binar classes using thresholds Args: data: shape [n_time, n_lat, n_lon] quantiles: list containing thresholds Returns: tmp: shape [n_quantiles, n_time*n_lat*n_lon] binary data Evaluates a regression prediction with the F1 score on quantile-based categories Args: prediction: shape [n_classes, X] target: shape [n_classes, X] X can be any other number of dimensions > 0 Returns: scores (list): List with an element per class Evaluates a regression prediction with the F1 score on quantile-based categories Args: prediction: shape [n_classes, n_time, n_lat, n_lon] target: shape [n_classes, n_time, n_lat, n_lon] Returns: scores: shape [n_classes, n_lat, n_lon] A collection of categorical forecast skill metrics | 2.846492 | 3 |
poloniex_apis/api_models/deposit_withdrawal_history.py | xJuggl3r/anapolo | 0 | 7783 | <filename>poloniex_apis/api_models/deposit_withdrawal_history.py<gh_stars>0
from collections import defaultdict
from poloniex_apis.api_models.ticker_price import TickerData
class DWHistory:
def __init__(self, history):
self.withdrawals = defaultdict(float)
self.deposits = defaultdict(float)
self.history = history
def get_dw_history(self):
for deposit in self.history['deposits']:
if deposit['currency'] in self.deposits:
self.deposits[deposit['currency']] += float(deposit['amount'])
else:
self.deposits[deposit['currency']] = float(deposit['amount'])
for withdrawal in self.history['withdrawals']:
if withdrawal['currency'] in self.withdrawals:
self.withdrawals[withdrawal['currency']] += float(withdrawal['amount'])
else:
self.withdrawals[withdrawal['currency']] = float(withdrawal['amount'])
return self.deposits, self.withdrawals
def get_btc_balance(self, ticker):
balance = 0
for deposit_symbol, amount in self.deposits.items():
if deposit_symbol == u"USDT":
balance += amount * ticker.get_price("USDT_BTC")
if deposit_symbol != u'BTC':
balance += amount * ticker.get_price("BTC_" + deposit_symbol)
else:
balance += amount
for withdrawal_symbol, amount in self.withdrawals.items():
if withdrawal_symbol == u"USDT":
balance -= amount * ticker.get_price("USDT_BTC")
if withdrawal_symbol != u'BTC':
balance -= amount * ticker.get_price("BTC_" + withdrawal_symbol)
else:
balance -= amount
return balance
| <filename>poloniex_apis/api_models/deposit_withdrawal_history.py<gh_stars>0
from collections import defaultdict
from poloniex_apis.api_models.ticker_price import TickerData
class DWHistory:
def __init__(self, history):
self.withdrawals = defaultdict(float)
self.deposits = defaultdict(float)
self.history = history
def get_dw_history(self):
for deposit in self.history['deposits']:
if deposit['currency'] in self.deposits:
self.deposits[deposit['currency']] += float(deposit['amount'])
else:
self.deposits[deposit['currency']] = float(deposit['amount'])
for withdrawal in self.history['withdrawals']:
if withdrawal['currency'] in self.withdrawals:
self.withdrawals[withdrawal['currency']] += float(withdrawal['amount'])
else:
self.withdrawals[withdrawal['currency']] = float(withdrawal['amount'])
return self.deposits, self.withdrawals
def get_btc_balance(self, ticker):
balance = 0
for deposit_symbol, amount in self.deposits.items():
if deposit_symbol == u"USDT":
balance += amount * ticker.get_price("USDT_BTC")
if deposit_symbol != u'BTC':
balance += amount * ticker.get_price("BTC_" + deposit_symbol)
else:
balance += amount
for withdrawal_symbol, amount in self.withdrawals.items():
if withdrawal_symbol == u"USDT":
balance -= amount * ticker.get_price("USDT_BTC")
if withdrawal_symbol != u'BTC':
balance -= amount * ticker.get_price("BTC_" + withdrawal_symbol)
else:
balance -= amount
return balance
| none | 1 | 2.757413 | 3 |
|
app/handler.py | vnrag/aws-pipeline-dashboard | 0 | 7784 | from datetime import datetime,timezone
import sys
import boto3
import json
def pipeline_event(event, context):
state = get_final_state(event)
if state is None:
return
event_time = datetime.strptime(event['time'], '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=timezone.utc)
metric_data = []
if event['detail-type'] == "CodePipeline Pipeline Execution State Change":
# Write green/red time based on last execution state
prior_execution = get_prior_execution(event['detail']['pipeline'], event['detail']['execution-id'])
if prior_execution is not None:
last_execution_state = prior_execution['status']
seconds_since_last_execution = (event_time - prior_execution['lastUpdateTime']).total_seconds()
if last_execution_state == "Succeeded":
append_metric(metric_data, "GreenTime", event, seconds=seconds_since_last_execution)
elif last_execution_state == "Failed":
append_metric(metric_data, "RedTime", event, seconds=seconds_since_last_execution)
if state == "SUCCEEDED":
append_metric(metric_data, "SuccessCount", event, count=1)
current_execution = get_execution(event['detail']['pipeline'], event['detail']['execution-id'])
if current_execution is not None:
duration = (event_time - current_execution['startTime']).total_seconds()
append_metric(metric_data, "LeadTime", event, seconds=duration)
elif state == "FAILED":
append_metric(metric_data, "FailureCount", event, count=1)
elif event['detail-type'] == "CodePipeline Stage Execution State Change":
if state == "SUCCEEDED":
append_metric(metric_data, "SuccessCount", event, count=1)
#append_metric(metric_data, "LeadTime", event, seconds=duration)
elif state == "FAILED":
append_metric(metric_data, "FailureCount", event, count=1)
elif event['detail-type'] == "CodePipeline Action Execution State Change":
if state == "SUCCEEDED":
append_metric(metric_data, "SuccessCount", event, count=1)
elif state == "FAILED":
append_metric(metric_data, "FailureCount", event, count=1)
if len(metric_data) > 0:
client = boto3.client('cloudwatch')
client.put_metric_data(
Namespace='Pipeline',
MetricData=metric_data
)
# Return the state from the event iff it's one of SUCCEEDED or FAILED
def get_final_state(event):
if 'detail' in event and 'state' in event['detail']:
if any(event['detail']['state'] in s for s in ['SUCCEEDED', 'FAILED']):
return event['detail']['state']
return None
# Return the execution summary for a given execution id
def get_execution(pipeline_name, execution_id):
client = boto3.client('codepipeline')
response = client.list_pipeline_executions(pipelineName=pipeline_name)
for e in response['pipelineExecutionSummaries']:
if e['pipelineExecutionId'] == execution_id:
return e
return None
# Return the execution summary for the most prior final execution before a given execution id
def get_prior_execution(pipeline_name, execution_id):
client = boto3.client('codepipeline')
response = client.list_pipeline_executions(pipelineName=pipeline_name)
found_current = False
for e in response['pipelineExecutionSummaries']:
if found_current and any(e['status'] in s for s in ['Succeeded', 'Failed']):
return e
elif e['pipelineExecutionId'] == execution_id:
found_current = True
return None
def append_metric(metric_list, metric_name, event, seconds=0, count=0):
data = {
'MetricName': metric_name,
'Dimensions': [],
'Timestamp': datetime.strptime(event['time'], '%Y-%m-%dT%H:%M:%SZ'),
}
resource_parts = []
if 'pipeline' in event['detail']:
data['Dimensions'].append({
'Name': 'PipelineName',
'Value': event['detail']['pipeline']
})
resource_parts.append(event['detail']['pipeline'])
if 'stage' in event['detail']:
data['Dimensions'].append({
'Name': 'StageName',
'Value': event['detail']['stage']
})
resource_parts.append(event['detail']['stage'])
if 'action' in event['detail']:
data['Dimensions'].append({
'Name': 'ActionName',
'Value': event['detail']['action']
})
resource_parts.append(event['detail']['action'])
if seconds > 0:
data['Value'] = seconds
data['Unit'] = 'Seconds'
elif count > 0:
data['Value'] = count
data['Unit'] = 'Count'
else:
# no metric to add
return
print("resource=%s metric=%s value=%s" % ('.'.join(resource_parts), metric_name, data['Value']))
metric_list.append(data)
def generate_dashboard(client):
paginator = client.get_paginator('list_metrics')
response_iterator = paginator.paginate(
Namespace='Pipeline'
)
pipeline_names = set()
for response in response_iterator:
for metric in response['Metrics']:
for dim in metric['Dimensions']:
if dim['Name'] == 'PipelineName':
pipeline_names.add(dim['Value'])
widgets = []
dashboard = {
"widgets": widgets
}
y = 0
for pipeline_name in sorted(pipeline_names):
widgets.append({
"type": "metric",
"x": 0,
"y": y,
"width": 18,
"height": 3,
"properties": {
"view": "singleValue",
"metrics": [
[ "Pipeline", "SuccessCount", "PipelineName", pipeline_name, { "stat": "Sum", "period": 2592000 } ],
[ ".", "FailureCount", ".", ".", { "stat": "Sum", "period": 2592000 } ],
[ ".", "LeadTime", ".", ".", { "period": 2592000, "color": "#9467bd" } ],
[ ".", "RedTime", ".", ".", { "stat": "Sum", "period": 2592000, "yAxis": "left", "color": "#d62728" } ],
[ ".", "GreenTime", ".", ".", { "period": 2592000, "stat": "Sum", "color": "#2ca02c" } ]
],
"region": "eu-central-1",
"title": pipeline_name,
"period": 300
}
})
y += 3
widgets.append({
"type": "text",
"x": 18,
"y": 0,
"width": 6,
"height": 6,
"properties": {
"markdown": "\nAll metrics are calculated over the past 30 days\n\n* **SuccessCount** - count of all successful pipeline executions\n* **FailureCount** - count of all failed pipeline executions\n* **LeadTime** - average pipeline time for successful executions\n* **RedTime** - sum of all time spent with a red pipeline\n* **GreenTime** - sum of all time spent with a green pipeline\n"
}
})
return dashboard
def dashboard_event(event, context):
client = boto3.client('cloudwatch')
dashboard = generate_dashboard(client)
client.put_dashboard(
DashboardName='Pipeline',
DashboardBody=json.dumps(dashboard)
)
if __name__ == '__main__':
dashboard_event(None, None)
| from datetime import datetime,timezone
import sys
import boto3
import json
def pipeline_event(event, context):
state = get_final_state(event)
if state is None:
return
event_time = datetime.strptime(event['time'], '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=timezone.utc)
metric_data = []
if event['detail-type'] == "CodePipeline Pipeline Execution State Change":
# Write green/red time based on last execution state
prior_execution = get_prior_execution(event['detail']['pipeline'], event['detail']['execution-id'])
if prior_execution is not None:
last_execution_state = prior_execution['status']
seconds_since_last_execution = (event_time - prior_execution['lastUpdateTime']).total_seconds()
if last_execution_state == "Succeeded":
append_metric(metric_data, "GreenTime", event, seconds=seconds_since_last_execution)
elif last_execution_state == "Failed":
append_metric(metric_data, "RedTime", event, seconds=seconds_since_last_execution)
if state == "SUCCEEDED":
append_metric(metric_data, "SuccessCount", event, count=1)
current_execution = get_execution(event['detail']['pipeline'], event['detail']['execution-id'])
if current_execution is not None:
duration = (event_time - current_execution['startTime']).total_seconds()
append_metric(metric_data, "LeadTime", event, seconds=duration)
elif state == "FAILED":
append_metric(metric_data, "FailureCount", event, count=1)
elif event['detail-type'] == "CodePipeline Stage Execution State Change":
if state == "SUCCEEDED":
append_metric(metric_data, "SuccessCount", event, count=1)
#append_metric(metric_data, "LeadTime", event, seconds=duration)
elif state == "FAILED":
append_metric(metric_data, "FailureCount", event, count=1)
elif event['detail-type'] == "CodePipeline Action Execution State Change":
if state == "SUCCEEDED":
append_metric(metric_data, "SuccessCount", event, count=1)
elif state == "FAILED":
append_metric(metric_data, "FailureCount", event, count=1)
if len(metric_data) > 0:
client = boto3.client('cloudwatch')
client.put_metric_data(
Namespace='Pipeline',
MetricData=metric_data
)
# Return the state from the event iff it's one of SUCCEEDED or FAILED
def get_final_state(event):
if 'detail' in event and 'state' in event['detail']:
if any(event['detail']['state'] in s for s in ['SUCCEEDED', 'FAILED']):
return event['detail']['state']
return None
# Return the execution summary for a given execution id
def get_execution(pipeline_name, execution_id):
client = boto3.client('codepipeline')
response = client.list_pipeline_executions(pipelineName=pipeline_name)
for e in response['pipelineExecutionSummaries']:
if e['pipelineExecutionId'] == execution_id:
return e
return None
# Return the execution summary for the most prior final execution before a given execution id
def get_prior_execution(pipeline_name, execution_id):
client = boto3.client('codepipeline')
response = client.list_pipeline_executions(pipelineName=pipeline_name)
found_current = False
for e in response['pipelineExecutionSummaries']:
if found_current and any(e['status'] in s for s in ['Succeeded', 'Failed']):
return e
elif e['pipelineExecutionId'] == execution_id:
found_current = True
return None
def append_metric(metric_list, metric_name, event, seconds=0, count=0):
data = {
'MetricName': metric_name,
'Dimensions': [],
'Timestamp': datetime.strptime(event['time'], '%Y-%m-%dT%H:%M:%SZ'),
}
resource_parts = []
if 'pipeline' in event['detail']:
data['Dimensions'].append({
'Name': 'PipelineName',
'Value': event['detail']['pipeline']
})
resource_parts.append(event['detail']['pipeline'])
if 'stage' in event['detail']:
data['Dimensions'].append({
'Name': 'StageName',
'Value': event['detail']['stage']
})
resource_parts.append(event['detail']['stage'])
if 'action' in event['detail']:
data['Dimensions'].append({
'Name': 'ActionName',
'Value': event['detail']['action']
})
resource_parts.append(event['detail']['action'])
if seconds > 0:
data['Value'] = seconds
data['Unit'] = 'Seconds'
elif count > 0:
data['Value'] = count
data['Unit'] = 'Count'
else:
# no metric to add
return
print("resource=%s metric=%s value=%s" % ('.'.join(resource_parts), metric_name, data['Value']))
metric_list.append(data)
def generate_dashboard(client):
paginator = client.get_paginator('list_metrics')
response_iterator = paginator.paginate(
Namespace='Pipeline'
)
pipeline_names = set()
for response in response_iterator:
for metric in response['Metrics']:
for dim in metric['Dimensions']:
if dim['Name'] == 'PipelineName':
pipeline_names.add(dim['Value'])
widgets = []
dashboard = {
"widgets": widgets
}
y = 0
for pipeline_name in sorted(pipeline_names):
widgets.append({
"type": "metric",
"x": 0,
"y": y,
"width": 18,
"height": 3,
"properties": {
"view": "singleValue",
"metrics": [
[ "Pipeline", "SuccessCount", "PipelineName", pipeline_name, { "stat": "Sum", "period": 2592000 } ],
[ ".", "FailureCount", ".", ".", { "stat": "Sum", "period": 2592000 } ],
[ ".", "LeadTime", ".", ".", { "period": 2592000, "color": "#9467bd" } ],
[ ".", "RedTime", ".", ".", { "stat": "Sum", "period": 2592000, "yAxis": "left", "color": "#d62728" } ],
[ ".", "GreenTime", ".", ".", { "period": 2592000, "stat": "Sum", "color": "#2ca02c" } ]
],
"region": "eu-central-1",
"title": pipeline_name,
"period": 300
}
})
y += 3
widgets.append({
"type": "text",
"x": 18,
"y": 0,
"width": 6,
"height": 6,
"properties": {
"markdown": "\nAll metrics are calculated over the past 30 days\n\n* **SuccessCount** - count of all successful pipeline executions\n* **FailureCount** - count of all failed pipeline executions\n* **LeadTime** - average pipeline time for successful executions\n* **RedTime** - sum of all time spent with a red pipeline\n* **GreenTime** - sum of all time spent with a green pipeline\n"
}
})
return dashboard
def dashboard_event(event, context):
client = boto3.client('cloudwatch')
dashboard = generate_dashboard(client)
client.put_dashboard(
DashboardName='Pipeline',
DashboardBody=json.dumps(dashboard)
)
if __name__ == '__main__':
dashboard_event(None, None)
| en | 0.806267 | # Write green/red time based on last execution state #append_metric(metric_data, "LeadTime", event, seconds=duration) # Return the state from the event iff it's one of SUCCEEDED or FAILED # Return the execution summary for a given execution id # Return the execution summary for the most prior final execution before a given execution id # no metric to add | 2.214583 | 2 |
cogs/commands.py | sudo-do/discord-chatbot | 1 | 7785 | <filename>cogs/commands.py
import discord
import sqlite3
from discord.ext import commands
conn= sqlite3.connect("dbs/main.db")
class Commands(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
@commands.cooldown(1, 30, commands.BucketType.guild)
@commands.has_permissions(manage_channels=True)
async def setchannel(self, ctx, *, cbchannel: discord.TextChannel = None):
if cbchannel == None:
await ctx.send(":warning: You have to mention the channel that you want as the channel in which users will talk to me. Example: `!!setchannel #channel-name`")
return
elif cbchannel != None:
try:
cur= conn.cursor()
guildID= str(ctx.guild.id)
r= cur.execute("SELECT channel_id FROM main WHERE guild_id = '"+guildID+"'")
row= None
for row in r:
...
if row != None:
await ctx.send(f":warning: The channel is already setup to <#{row[0]}>. Use `!!settings channel` to change it.")
elif row == None:
guildID= str(ctx.guild.id)
channelID= str(cbchannel.id)
cur.execute("INSERT INTO main(guild_id, channel_id, toggle) VALUES('"+guildID+"', '"+channelID+"', '1')")
conn.commit()
await ctx.send(f":tada: Start talking to me in {cbchannel.mention}!")
except discord.NotFound:
await ctx.send(":warning: I can't find that channel. Make sure I can access it or channel is valid.")
return
except discord.MissingPermissions:
await ctx.send(":warning: I can't send messages in that channel.")
return
@commands.group(invoke_without_command=True)
async def settings(self, ctx):
em= discord.Embed(title="Discord Chat Bot Settings", description="Welcome to Discord Chat Bot Settings! Here are the list of commands you can use to setup the bot. If this is your first time with this bot, Use the `!!setchannel` command first. **Arguments enclosed in `<>` are required!**")
em.add_field(name="`!!settings channel <channel_mention>`", value="Updates the chatting channel.")
em.add_field(name="`!!settings toggle <toggle>`", value="Toggles the bot chat on or off. This doesn't disable commands.")
await ctx.send(embed=em)
@settings.command()
@commands.has_permissions(manage_channels=True)
@commands.cooldown(1, 30, commands.BucketType.guild)
async def channel(self, ctx, *, cbchannel: discord.TextChannel = None):
cur= conn.cursor()
if cbchannel == None:
guildID= str(ctx.guild.id)
r= cur.execute("SELECT channel_id FROM main WHERE guild_id = '"+guildID+"'")
row= None
for row in r:
...
if row != None:
await ctx.send(f"I'm currently waiting for messages in <#{row[0]}>. Run `!!settings channel #channel-mention` to change this.")
elif row == None:
await ctx.send("Channel is not even setup yet! Use `!!setchannel` to set a channel.")
elif cbchannel != None:
guildID= str(ctx.guild.id)
channelID= str(cbchannel.id)
r= cur.execute("SELECT channel_id FROM main WHERE guild_id = '"+guildID+"'")
row= None
for row in r:
...
if row == None:
await ctx.send("Channel is not even setup yet! Use `!!setchannel` to set a channel.")
elif row != None:
cur.execute("UPDATE main SET channel_id = '"+channelID+"' where guild_id = '"+guildID+"'")
conn.commit()
await ctx.send(f":tada: Channel has been updated to {cbchannel.mention}!")
@settings.command()
@commands.has_permissions(manage_channels=True)
@commands.cooldown(1, 30, commands.BucketType.guild)
async def toggle(self, ctx, *, toggle = None):
if toggle == None:
await ctx.send(":warning: Use the command again but mention the toggle i.e `on` or `off` For example: `!!settings toggle on` to toggle on, `!!settings toggle off` to toggle off.")
elif toggle != None:
if toggle.lower() == "on":
toggle = '1'
elif toggle.lower() == 'off':
toggle = '0'
else:
await ctx.send(":warning: Use the command again but mention the toggle correctly. i.e `on` or `off` For example: `!!settings toggle on` to toggle on, `!!settings toggle off` to toggle off.")
return
guildID= str(ctx.guild.id)
cur= conn.cursor()
r= cur.execute("SELECT toggle FROM main WHERE guild_id = '"+guildID+"'")
row= None
for row in r:
...
if row == None:
await ctx.send("Channel is not setup yet! Use `!!setchannel` to set a channel.")
elif row != None:
cur.execute("UPDATE main SET toggle = '"+toggle+"' where guild_id = '"+guildID+"'")
conn.commit()
await ctx.send(f":tada: Toggle updated!")
def setup(bot):
bot.add_cog(Commands(bot))
| <filename>cogs/commands.py
import discord
import sqlite3
from discord.ext import commands
conn= sqlite3.connect("dbs/main.db")
class Commands(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
@commands.cooldown(1, 30, commands.BucketType.guild)
@commands.has_permissions(manage_channels=True)
async def setchannel(self, ctx, *, cbchannel: discord.TextChannel = None):
if cbchannel == None:
await ctx.send(":warning: You have to mention the channel that you want as the channel in which users will talk to me. Example: `!!setchannel #channel-name`")
return
elif cbchannel != None:
try:
cur= conn.cursor()
guildID= str(ctx.guild.id)
r= cur.execute("SELECT channel_id FROM main WHERE guild_id = '"+guildID+"'")
row= None
for row in r:
...
if row != None:
await ctx.send(f":warning: The channel is already setup to <#{row[0]}>. Use `!!settings channel` to change it.")
elif row == None:
guildID= str(ctx.guild.id)
channelID= str(cbchannel.id)
cur.execute("INSERT INTO main(guild_id, channel_id, toggle) VALUES('"+guildID+"', '"+channelID+"', '1')")
conn.commit()
await ctx.send(f":tada: Start talking to me in {cbchannel.mention}!")
except discord.NotFound:
await ctx.send(":warning: I can't find that channel. Make sure I can access it or channel is valid.")
return
except discord.MissingPermissions:
await ctx.send(":warning: I can't send messages in that channel.")
return
@commands.group(invoke_without_command=True)
async def settings(self, ctx):
em= discord.Embed(title="Discord Chat Bot Settings", description="Welcome to Discord Chat Bot Settings! Here are the list of commands you can use to setup the bot. If this is your first time with this bot, Use the `!!setchannel` command first. **Arguments enclosed in `<>` are required!**")
em.add_field(name="`!!settings channel <channel_mention>`", value="Updates the chatting channel.")
em.add_field(name="`!!settings toggle <toggle>`", value="Toggles the bot chat on or off. This doesn't disable commands.")
await ctx.send(embed=em)
@settings.command()
@commands.has_permissions(manage_channels=True)
@commands.cooldown(1, 30, commands.BucketType.guild)
async def channel(self, ctx, *, cbchannel: discord.TextChannel = None):
cur= conn.cursor()
if cbchannel == None:
guildID= str(ctx.guild.id)
r= cur.execute("SELECT channel_id FROM main WHERE guild_id = '"+guildID+"'")
row= None
for row in r:
...
if row != None:
await ctx.send(f"I'm currently waiting for messages in <#{row[0]}>. Run `!!settings channel #channel-mention` to change this.")
elif row == None:
await ctx.send("Channel is not even setup yet! Use `!!setchannel` to set a channel.")
elif cbchannel != None:
guildID= str(ctx.guild.id)
channelID= str(cbchannel.id)
r= cur.execute("SELECT channel_id FROM main WHERE guild_id = '"+guildID+"'")
row= None
for row in r:
...
if row == None:
await ctx.send("Channel is not even setup yet! Use `!!setchannel` to set a channel.")
elif row != None:
cur.execute("UPDATE main SET channel_id = '"+channelID+"' where guild_id = '"+guildID+"'")
conn.commit()
await ctx.send(f":tada: Channel has been updated to {cbchannel.mention}!")
@settings.command()
@commands.has_permissions(manage_channels=True)
@commands.cooldown(1, 30, commands.BucketType.guild)
async def toggle(self, ctx, *, toggle = None):
if toggle == None:
await ctx.send(":warning: Use the command again but mention the toggle i.e `on` or `off` For example: `!!settings toggle on` to toggle on, `!!settings toggle off` to toggle off.")
elif toggle != None:
if toggle.lower() == "on":
toggle = '1'
elif toggle.lower() == 'off':
toggle = '0'
else:
await ctx.send(":warning: Use the command again but mention the toggle correctly. i.e `on` or `off` For example: `!!settings toggle on` to toggle on, `!!settings toggle off` to toggle off.")
return
guildID= str(ctx.guild.id)
cur= conn.cursor()
r= cur.execute("SELECT toggle FROM main WHERE guild_id = '"+guildID+"'")
row= None
for row in r:
...
if row == None:
await ctx.send("Channel is not setup yet! Use `!!setchannel` to set a channel.")
elif row != None:
cur.execute("UPDATE main SET toggle = '"+toggle+"' where guild_id = '"+guildID+"'")
conn.commit()
await ctx.send(f":tada: Toggle updated!")
def setup(bot):
bot.add_cog(Commands(bot))
| en | 0.469155 | #channel-name`") #{row[0]}>. Use `!!settings channel` to change it.") #{row[0]}>. Run `!!settings channel #channel-mention` to change this.") | 3.10357 | 3 |
poetry/console/commands/self/update.py | mgasner/poetry | 0 | 7786 | <reponame>mgasner/poetry<gh_stars>0
import hashlib
import os
import shutil
import subprocess
import sys
import tarfile
from functools import cmp_to_key
from gzip import GzipFile
try:
from urllib.error import HTTPError
from urllib.request import urlopen
except ImportError:
from urllib2 import HTTPError
from urllib2 import urlopen
from cleo import argument
from cleo import option
from ..command import Command
class SelfUpdateCommand(Command):
name = "update"
description = "Updates poetry to the latest version."
arguments = [argument("version", "The version to update to.", optional=True)]
options = [option("preview", None, "Install prereleases.")]
BASE_URL = "https://github.com/sdispater/poetry/releases/download"
@property
def home(self):
from poetry.utils._compat import Path
from poetry.utils.appdirs import expanduser
home = Path(expanduser("~"))
return home / ".poetry"
@property
def lib(self):
return self.home / "lib"
@property
def lib_backup(self):
return self.home / "lib-backup"
def handle(self):
from poetry.__version__ import __version__
from poetry.repositories.pypi_repository import PyPiRepository
from poetry.semver import Version
from poetry.utils._compat import Path
current = Path(__file__)
try:
current.relative_to(self.home)
except ValueError:
raise RuntimeError(
"Poetry was not installed with the recommended installer. "
"Cannot update automatically."
)
version = self.argument("version")
if not version:
version = ">=" + __version__
repo = PyPiRepository(fallback=False)
packages = repo.find_packages(
"poetry", version, allow_prereleases=self.option("preview")
)
if not packages:
self.line("No release found for the specified version")
return
packages.sort(
key=cmp_to_key(
lambda x, y: 0
if x.version == y.version
else int(x.version < y.version or -1)
)
)
release = None
for package in packages:
if package.is_prerelease():
if self.option("preview"):
release = package
break
continue
release = package
break
if release is None:
self.line("No new release found")
return
if release.version == Version.parse(__version__):
self.line("You are using the latest version")
return
self.update(release)
def update(self, release):
version = release.version
self.line("Updating to <info>{}</info>".format(version))
if self.lib_backup.exists():
shutil.rmtree(str(self.lib_backup))
# Backup the current installation
if self.lib.exists():
shutil.copytree(str(self.lib), str(self.lib_backup))
shutil.rmtree(str(self.lib))
try:
self._update(version)
except Exception:
if not self.lib_backup.exists():
raise
shutil.copytree(str(self.lib_backup), str(self.lib))
shutil.rmtree(str(self.lib_backup))
raise
finally:
if self.lib_backup.exists():
shutil.rmtree(str(self.lib_backup))
self.line("")
self.line("")
self.line(
"<info>Poetry</info> (<comment>{}</comment>) is installed now. Great!".format(
version
)
)
def _update(self, version):
from poetry.utils.helpers import temporary_directory
platform = sys.platform
if platform == "linux2":
platform = "linux"
checksum = "poetry-{}-{}.sha256sum".format(version, platform)
try:
r = urlopen(self.BASE_URL + "/{}/{}".format(version, checksum))
except HTTPError as e:
if e.code == 404:
raise RuntimeError("Could not find {} file".format(checksum))
raise
checksum = r.read().decode()
# We get the payload from the remote host
name = "poetry-{}-{}.tar.gz".format(version, platform)
try:
r = urlopen(self.BASE_URL + "/{}/{}".format(version, name))
except HTTPError as e:
if e.code == 404:
raise RuntimeError("Could not find {} file".format(name))
raise
meta = r.info()
size = int(meta["Content-Length"])
current = 0
block_size = 8192
bar = self.progress_bar(max=size)
bar.set_format(" - Downloading <info>{}</> <comment>%percent%%</>".format(name))
bar.start()
sha = hashlib.sha256()
with temporary_directory(prefix="poetry-updater-") as dir_:
tar = os.path.join(dir_, name)
with open(tar, "wb") as f:
while True:
buffer = r.read(block_size)
if not buffer:
break
current += len(buffer)
f.write(buffer)
sha.update(buffer)
bar.set_progress(current)
bar.finish()
# Checking hashes
if checksum != sha.hexdigest():
raise RuntimeError(
"Hashes for {} do not match: {} != {}".format(
name, checksum, sha.hexdigest()
)
)
gz = GzipFile(tar, mode="rb")
try:
with tarfile.TarFile(tar, fileobj=gz, format=tarfile.PAX_FORMAT) as f:
f.extractall(str(self.lib))
finally:
gz.close()
def process(self, *args):
return subprocess.check_output(list(args), stderr=subprocess.STDOUT)
def _bin_path(self, base_path, bin):
if sys.platform == "win32":
return (base_path / "Scripts" / bin).with_suffix(".exe")
return base_path / "bin" / bin
| import hashlib
import os
import shutil
import subprocess
import sys
import tarfile
from functools import cmp_to_key
from gzip import GzipFile
try:
from urllib.error import HTTPError
from urllib.request import urlopen
except ImportError:
from urllib2 import HTTPError
from urllib2 import urlopen
from cleo import argument
from cleo import option
from ..command import Command
class SelfUpdateCommand(Command):
name = "update"
description = "Updates poetry to the latest version."
arguments = [argument("version", "The version to update to.", optional=True)]
options = [option("preview", None, "Install prereleases.")]
BASE_URL = "https://github.com/sdispater/poetry/releases/download"
@property
def home(self):
from poetry.utils._compat import Path
from poetry.utils.appdirs import expanduser
home = Path(expanduser("~"))
return home / ".poetry"
@property
def lib(self):
return self.home / "lib"
@property
def lib_backup(self):
return self.home / "lib-backup"
def handle(self):
from poetry.__version__ import __version__
from poetry.repositories.pypi_repository import PyPiRepository
from poetry.semver import Version
from poetry.utils._compat import Path
current = Path(__file__)
try:
current.relative_to(self.home)
except ValueError:
raise RuntimeError(
"Poetry was not installed with the recommended installer. "
"Cannot update automatically."
)
version = self.argument("version")
if not version:
version = ">=" + __version__
repo = PyPiRepository(fallback=False)
packages = repo.find_packages(
"poetry", version, allow_prereleases=self.option("preview")
)
if not packages:
self.line("No release found for the specified version")
return
packages.sort(
key=cmp_to_key(
lambda x, y: 0
if x.version == y.version
else int(x.version < y.version or -1)
)
)
release = None
for package in packages:
if package.is_prerelease():
if self.option("preview"):
release = package
break
continue
release = package
break
if release is None:
self.line("No new release found")
return
if release.version == Version.parse(__version__):
self.line("You are using the latest version")
return
self.update(release)
def update(self, release):
version = release.version
self.line("Updating to <info>{}</info>".format(version))
if self.lib_backup.exists():
shutil.rmtree(str(self.lib_backup))
# Backup the current installation
if self.lib.exists():
shutil.copytree(str(self.lib), str(self.lib_backup))
shutil.rmtree(str(self.lib))
try:
self._update(version)
except Exception:
if not self.lib_backup.exists():
raise
shutil.copytree(str(self.lib_backup), str(self.lib))
shutil.rmtree(str(self.lib_backup))
raise
finally:
if self.lib_backup.exists():
shutil.rmtree(str(self.lib_backup))
self.line("")
self.line("")
self.line(
"<info>Poetry</info> (<comment>{}</comment>) is installed now. Great!".format(
version
)
)
def _update(self, version):
from poetry.utils.helpers import temporary_directory
platform = sys.platform
if platform == "linux2":
platform = "linux"
checksum = "poetry-{}-{}.sha256sum".format(version, platform)
try:
r = urlopen(self.BASE_URL + "/{}/{}".format(version, checksum))
except HTTPError as e:
if e.code == 404:
raise RuntimeError("Could not find {} file".format(checksum))
raise
checksum = r.read().decode()
# We get the payload from the remote host
name = "poetry-{}-{}.tar.gz".format(version, platform)
try:
r = urlopen(self.BASE_URL + "/{}/{}".format(version, name))
except HTTPError as e:
if e.code == 404:
raise RuntimeError("Could not find {} file".format(name))
raise
meta = r.info()
size = int(meta["Content-Length"])
current = 0
block_size = 8192
bar = self.progress_bar(max=size)
bar.set_format(" - Downloading <info>{}</> <comment>%percent%%</>".format(name))
bar.start()
sha = hashlib.sha256()
with temporary_directory(prefix="poetry-updater-") as dir_:
tar = os.path.join(dir_, name)
with open(tar, "wb") as f:
while True:
buffer = r.read(block_size)
if not buffer:
break
current += len(buffer)
f.write(buffer)
sha.update(buffer)
bar.set_progress(current)
bar.finish()
# Checking hashes
if checksum != sha.hexdigest():
raise RuntimeError(
"Hashes for {} do not match: {} != {}".format(
name, checksum, sha.hexdigest()
)
)
gz = GzipFile(tar, mode="rb")
try:
with tarfile.TarFile(tar, fileobj=gz, format=tarfile.PAX_FORMAT) as f:
f.extractall(str(self.lib))
finally:
gz.close()
def process(self, *args):
return subprocess.check_output(list(args), stderr=subprocess.STDOUT)
def _bin_path(self, base_path, bin):
if sys.platform == "win32":
return (base_path / "Scripts" / bin).with_suffix(".exe")
return base_path / "bin" / bin | en | 0.857781 | # Backup the current installation # We get the payload from the remote host # Checking hashes | 2.199359 | 2 |
osp/test/corpus/syllabus/test_text.py | davidmcclure/open-syllabus-project | 220 | 7787 | <reponame>davidmcclure/open-syllabus-project<gh_stars>100-1000
from osp.corpus.syllabus import Syllabus
from osp.test.utils import requires_tika
def test_empty(mock_osp):
"""
Should return None if the file is empty.
"""
path = mock_osp.add_file(content='', ftype='plain')
syllabus = Syllabus(path)
assert syllabus.text == None
def test_plaintext(mock_osp):
"""
Should extract text from vanilla text files.
"""
path = mock_osp.add_file(content='text', ftype='plain')
syllabus = Syllabus(path)
assert syllabus.text == 'text'
def test_html(mock_osp):
"""
Should extract text from HTML files.
"""
path = mock_osp.add_file(content='<p>text</p>', ftype='html')
syllabus = Syllabus(path)
assert syllabus.text == 'text'
def test_pdf(mock_osp):
"""
Should extract text from PDF files.
"""
path = mock_osp.add_file(content='text', ftype='pdf')
syllabus = Syllabus(path)
assert syllabus.text.strip() == 'text'
@requires_tika
def test_office(mock_osp):
"""
Should extract text from office files.
"""
path = mock_osp.add_file(content='text', ftype='docx')
syllabus = Syllabus(path)
assert syllabus.text.strip() == 'text'
| from osp.corpus.syllabus import Syllabus
from osp.test.utils import requires_tika
def test_empty(mock_osp):
"""
Should return None if the file is empty.
"""
path = mock_osp.add_file(content='', ftype='plain')
syllabus = Syllabus(path)
assert syllabus.text == None
def test_plaintext(mock_osp):
"""
Should extract text from vanilla text files.
"""
path = mock_osp.add_file(content='text', ftype='plain')
syllabus = Syllabus(path)
assert syllabus.text == 'text'
def test_html(mock_osp):
"""
Should extract text from HTML files.
"""
path = mock_osp.add_file(content='<p>text</p>', ftype='html')
syllabus = Syllabus(path)
assert syllabus.text == 'text'
def test_pdf(mock_osp):
"""
Should extract text from PDF files.
"""
path = mock_osp.add_file(content='text', ftype='pdf')
syllabus = Syllabus(path)
assert syllabus.text.strip() == 'text'
@requires_tika
def test_office(mock_osp):
"""
Should extract text from office files.
"""
path = mock_osp.add_file(content='text', ftype='docx')
syllabus = Syllabus(path)
assert syllabus.text.strip() == 'text' | en | 0.71857 | Should return None if the file is empty. Should extract text from vanilla text files. Should extract text from HTML files. Should extract text from PDF files. Should extract text from office files. | 2.696382 | 3 |
boa_test/tests/test_ico_template.py | mixbee/neo-boa | 4 | 7788 | <reponame>mixbee/neo-boa
from boa_test.tests.boa_test import BoaFixtureTest
from boa.compiler import Compiler
from neo.Core.TX.Transaction import Transaction
from neo.Prompt.Commands.BuildNRun import TestBuild
from neo.EventHub import events
from neo.SmartContract.SmartContractEvent import SmartContractEvent, NotifyEvent
from neo.Settings import settings
from neo.Prompt.Utils import parse_param
from neo.Core.FunctionCode import FunctionCode
from neocore.Fixed8 import Fixed8
from boa_test.example.demo.nex.token import *
import shutil
import os
from logzero import logger
settings.USE_DEBUG_STORAGE = True
settings.DEBUG_STORAGE_PATH = './fixtures/debugstorage'
class TestContract(BoaFixtureTest):
dispatched_events = []
dispatched_logs = []
@classmethod
def tearDownClass(cls):
super(BoaFixtureTest, cls).tearDownClass()
try:
if os.path.exists(settings.debug_storage_leveldb_path):
shutil.rmtree(settings.debug_storage_leveldb_path)
else:
logger.error("debug storage path doesn't exist")
except Exception as e:
logger.error("couldn't remove debug storage %s " % e)
@classmethod
def setUpClass(cls):
super(TestContract, cls).setUpClass()
def on_notif(evt):
print(evt)
cls.dispatched_events.append(evt)
print("dispatched events %s " % cls.dispatched_events)
def on_log(evt):
print(evt)
cls.dispatched_logs.append(evt)
events.on(SmartContractEvent.RUNTIME_NOTIFY, on_notif)
events.on(SmartContractEvent.RUNTIME_LOG, on_log)
def test_ICOTemplate_1(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
# print(output.to_s())
tx, results, total_ops, engine = TestBuild(out, ['name', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetString(), TOKEN_NAME)
tx, results, total_ops, engine = TestBuild(out, ['symbol', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetString(), TOKEN_SYMBOL)
tx, results, total_ops, engine = TestBuild(out, ['decimals', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), TOKEN_DECIMALS)
tx, results, total_ops, engine = TestBuild(out, ['totalSupply', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 0)
tx, results, total_ops, engine = TestBuild(out, ['nonexistentmethod', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetString(), 'unknown operation')
# deploy with wallet 2 should fail CheckWitness
tx, results, total_ops, engine = TestBuild(out, ['deploy', '[]'], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
tx, results, total_ops, engine = TestBuild(out, ['deploy', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
# second time, it should already be deployed and return false
tx, results, total_ops, engine = TestBuild(out, ['deploy', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# now total supply should be equal to the initial owner amount
tx, results, total_ops, engine = TestBuild(out, ['totalSupply', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), TOKEN_INITIAL_AMOUNT)
# now the owner should have a balance of the TOKEN_INITIAL_AMOUNT
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([bytearray(TOKEN_OWNER)])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), TOKEN_INITIAL_AMOUNT)
def test_ICOTemplate_2(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
# now transfer tokens to wallet 2
TestContract.dispatched_events = []
test_transfer_amount = 2400000001
tx, results, total_ops, engine = TestBuild(out, ['transfer', parse_param([bytearray(TOKEN_OWNER), self.wallet_2_script_hash.Data, test_transfer_amount])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
self.assertEqual(len(TestContract.dispatched_events), 1)
evt = TestContract.dispatched_events[0]
self.assertIsInstance(evt, NotifyEvent)
self.assertEqual(evt.addr_from.Data, bytearray(TOKEN_OWNER))
self.assertEqual(evt.addr_to, self.wallet_2_script_hash)
self.assertEqual(evt.amount, test_transfer_amount)
# now get balance of wallet 2
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([self.wallet_2_script_hash.Data])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), test_transfer_amount)
# now the owner should have less
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([bytearray(TOKEN_OWNER)])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), TOKEN_INITIAL_AMOUNT - test_transfer_amount)
# now this transfer should fail
tx, results, total_ops, engine = TestBuild(out, ['transfer', parse_param([bytearray(TOKEN_OWNER), self.wallet_2_script_hash.Data, TOKEN_INITIAL_AMOUNT])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# this transfer should fail because it is not signed by the 'from' address
tx, results, total_ops, engine = TestBuild(out, ['transfer', parse_param([bytearray(TOKEN_OWNER), self.wallet_2_script_hash.Data, 10000])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# now this transfer should fail, this is from address with no tokens
tx, results, total_ops, engine = TestBuild(out, ['transfer', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 1000])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# get balance of bad data
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param(['abc'])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 0)
# get balance no params
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
def test_ICOTemplate_3_KYC(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
print(output.to_s())
# now transfer tokens to wallet 2
TestContract.dispatched_events = []
# test mint tokens without being kyc verified
tx, results, total_ops, engine = TestBuild(out, ['mintTokens', '[]', '--attach-neo=10'], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# Try to register as a non owner
tx, results, total_ops, engine = TestBuild(out, ['crowdsale_register', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# Get status of non registered address
tx, results, total_ops, engine = TestBuild(out, ['crowdsale_status', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
TestContract.dispatched_events = []
# register an address
tx, results, total_ops, engine = TestBuild(out, ['crowdsale_register', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 1)
# it should dispatch an event
self.assertEqual(len(TestContract.dispatched_events), 1)
evt = TestContract.dispatched_events[0]
self.assertEqual(evt.event_payload.Value[0].Value, b'kyc_registration')
# register 2 addresses at once
tx, results, total_ops, engine = TestBuild(out, ['crowdsale_register', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 2)
# now check reg status
tx, results, total_ops, engine = TestBuild(out, ['crowdsale_status', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
def test_ICOTemplate_4_attachments(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
# test mint tokens without being kyc verified
tx, results, total_ops, engine = TestBuild(out, ['get_attachments', '[]', '--attach-neo=10'], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
attachments = results[0].GetArray()
self.assertEqual(len(attachments), 4)
fn = FunctionCode(out, '0705', '05')
self.assertEqual(attachments[0].GetByteArray(), fn.ScriptHash().Data)
self.assertEqual(attachments[1].GetByteArray(), self.wallet_3_script_hash.Data)
self.assertEqual(attachments[2].GetBigInteger(), Fixed8.FromDecimal(10).value)
self.assertEqual(attachments[3].GetBigInteger(), 0)
tx, results, total_ops, engine = TestBuild(out, ['get_attachments', '[]'], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
attachments = results[0].GetArray()
self.assertEqual(len(attachments), 4)
self.assertEqual(attachments[1].GetByteArray(), bytearray())
self.assertEqual(attachments[2].GetBigInteger(), 0)
self.assertEqual(attachments[3].GetBigInteger(), 0)
tx, results, total_ops, engine = TestBuild(out, ['get_attachments', '[]', '--attach-neo=3', '--attach-gas=3.12'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
attachments = results[0].GetArray()
self.assertEqual(len(attachments), 4)
self.assertEqual(attachments[1].GetByteArray(), self.wallet_1_script_hash.Data)
self.assertEqual(attachments[2].GetBigInteger(), Fixed8.FromDecimal(3).value)
self.assertEqual(attachments[3].GetBigInteger(), Fixed8.FromDecimal(3.12).value)
def test_ICOTemplate_5_mint(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
# register an address
tx, results, total_ops, engine = TestBuild(out, ['crowdsale_register', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 1)
TestContract.dispatched_events = []
# test mint tokens, this should return true
tx, results, total_ops, engine = TestBuild(out, ['mintTokens', '[]', '--attach-neo=10'], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
# it should dispatch an event
self.assertEqual(len(TestContract.dispatched_events), 1)
evt = TestContract.dispatched_events[0]
self.assertIsInstance(evt, NotifyEvent)
self.assertEqual(evt.amount, 10 * TOKENS_PER_NEO)
self.assertEqual(evt.addr_to, self.wallet_3_script_hash)
# test mint tokens again, this should be false since you can't do it twice
tx, results, total_ops, engine = TestBuild(out, ['mintTokens', '[]', '--attach-neo=10'], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# now the minter should have a balance
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 10 * TOKENS_PER_NEO)
# now the total circulation should be bigger
tx, results, total_ops, engine = TestBuild(out, ['totalSupply', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), (10 * TOKENS_PER_NEO) + TOKEN_INITIAL_AMOUNT)
def test_ICOTemplate_6_approval(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
# tranfer_from, approve, allowance
tx, results, total_ops, engine = TestBuild(out, ['allowance', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 0)
# try to transfer from
tx, results, total_ops, engine = TestBuild(out, ['transferFrom', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 10000])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# try to approve from someone not yourself
tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 10000])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 0)
# try to approve more than you have
tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, TOKEN_INITIAL_AMOUNT])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 0)
TestContract.dispatched_events = []
# approve should work
tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 1234])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
# it should dispatch an event
self.assertEqual(len(TestContract.dispatched_events), 1)
evt = TestContract.dispatched_events[0]
self.assertIsInstance(evt, NotifyEvent)
self.assertEqual(evt.notify_type, b'approve')
self.assertEqual(evt.amount, 1234)
# check allowance
tx, results, total_ops, engine = TestBuild(out, ['allowance', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 1234)
# approve should not be additive, it should overwrite previous approvals
tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 133234])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
tx, results, total_ops, engine = TestBuild(out, ['allowance', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 133234)
# now you can transfer from
tx, results, total_ops, engine = TestBuild(out, ['transferFrom', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 10000])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
# now the recevier should have a balance
# it is equal to 10000 plus test_transfer_amount = 2400000001
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([self.wallet_2_script_hash.Data])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 10000 + 2400000001)
# now the allowance should be less
tx, results, total_ops, engine = TestBuild(out, ['allowance', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 133234 - 10000)
# try to transfer too much, even with approval
tx, results, total_ops, engine = TestBuild(out, ['transferFrom', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 14440000])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# cant approve negative amounts
tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, -1000])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
def test_many_ops(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
# tranfer_from, approve, allowance
tx, results, total_ops, engine = TestBuild(out, ['another_op_5', bytearray()], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 6)
| from boa_test.tests.boa_test import BoaFixtureTest
from boa.compiler import Compiler
from neo.Core.TX.Transaction import Transaction
from neo.Prompt.Commands.BuildNRun import TestBuild
from neo.EventHub import events
from neo.SmartContract.SmartContractEvent import SmartContractEvent, NotifyEvent
from neo.Settings import settings
from neo.Prompt.Utils import parse_param
from neo.Core.FunctionCode import FunctionCode
from neocore.Fixed8 import Fixed8
from boa_test.example.demo.nex.token import *
import shutil
import os
from logzero import logger
settings.USE_DEBUG_STORAGE = True
settings.DEBUG_STORAGE_PATH = './fixtures/debugstorage'
class TestContract(BoaFixtureTest):
dispatched_events = []
dispatched_logs = []
@classmethod
def tearDownClass(cls):
super(BoaFixtureTest, cls).tearDownClass()
try:
if os.path.exists(settings.debug_storage_leveldb_path):
shutil.rmtree(settings.debug_storage_leveldb_path)
else:
logger.error("debug storage path doesn't exist")
except Exception as e:
logger.error("couldn't remove debug storage %s " % e)
@classmethod
def setUpClass(cls):
super(TestContract, cls).setUpClass()
def on_notif(evt):
print(evt)
cls.dispatched_events.append(evt)
print("dispatched events %s " % cls.dispatched_events)
def on_log(evt):
print(evt)
cls.dispatched_logs.append(evt)
events.on(SmartContractEvent.RUNTIME_NOTIFY, on_notif)
events.on(SmartContractEvent.RUNTIME_LOG, on_log)
def test_ICOTemplate_1(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
# print(output.to_s())
tx, results, total_ops, engine = TestBuild(out, ['name', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetString(), TOKEN_NAME)
tx, results, total_ops, engine = TestBuild(out, ['symbol', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetString(), TOKEN_SYMBOL)
tx, results, total_ops, engine = TestBuild(out, ['decimals', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), TOKEN_DECIMALS)
tx, results, total_ops, engine = TestBuild(out, ['totalSupply', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 0)
tx, results, total_ops, engine = TestBuild(out, ['nonexistentmethod', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetString(), 'unknown operation')
# deploy with wallet 2 should fail CheckWitness
tx, results, total_ops, engine = TestBuild(out, ['deploy', '[]'], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
tx, results, total_ops, engine = TestBuild(out, ['deploy', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
# second time, it should already be deployed and return false
tx, results, total_ops, engine = TestBuild(out, ['deploy', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# now total supply should be equal to the initial owner amount
tx, results, total_ops, engine = TestBuild(out, ['totalSupply', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), TOKEN_INITIAL_AMOUNT)
# now the owner should have a balance of the TOKEN_INITIAL_AMOUNT
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([bytearray(TOKEN_OWNER)])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), TOKEN_INITIAL_AMOUNT)
def test_ICOTemplate_2(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
# now transfer tokens to wallet 2
TestContract.dispatched_events = []
test_transfer_amount = 2400000001
tx, results, total_ops, engine = TestBuild(out, ['transfer', parse_param([bytearray(TOKEN_OWNER), self.wallet_2_script_hash.Data, test_transfer_amount])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
self.assertEqual(len(TestContract.dispatched_events), 1)
evt = TestContract.dispatched_events[0]
self.assertIsInstance(evt, NotifyEvent)
self.assertEqual(evt.addr_from.Data, bytearray(TOKEN_OWNER))
self.assertEqual(evt.addr_to, self.wallet_2_script_hash)
self.assertEqual(evt.amount, test_transfer_amount)
# now get balance of wallet 2
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([self.wallet_2_script_hash.Data])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), test_transfer_amount)
# now the owner should have less
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([bytearray(TOKEN_OWNER)])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), TOKEN_INITIAL_AMOUNT - test_transfer_amount)
# now this transfer should fail
tx, results, total_ops, engine = TestBuild(out, ['transfer', parse_param([bytearray(TOKEN_OWNER), self.wallet_2_script_hash.Data, TOKEN_INITIAL_AMOUNT])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# this transfer should fail because it is not signed by the 'from' address
tx, results, total_ops, engine = TestBuild(out, ['transfer', parse_param([bytearray(TOKEN_OWNER), self.wallet_2_script_hash.Data, 10000])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# now this transfer should fail, this is from address with no tokens
tx, results, total_ops, engine = TestBuild(out, ['transfer', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 1000])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# get balance of bad data
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param(['abc'])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 0)
# get balance no params
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
def test_ICOTemplate_3_KYC(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
print(output.to_s())
# now transfer tokens to wallet 2
TestContract.dispatched_events = []
# test mint tokens without being kyc verified
tx, results, total_ops, engine = TestBuild(out, ['mintTokens', '[]', '--attach-neo=10'], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# Try to register as a non owner
tx, results, total_ops, engine = TestBuild(out, ['crowdsale_register', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# Get status of non registered address
tx, results, total_ops, engine = TestBuild(out, ['crowdsale_status', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
TestContract.dispatched_events = []
# register an address
tx, results, total_ops, engine = TestBuild(out, ['crowdsale_register', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 1)
# it should dispatch an event
self.assertEqual(len(TestContract.dispatched_events), 1)
evt = TestContract.dispatched_events[0]
self.assertEqual(evt.event_payload.Value[0].Value, b'kyc_registration')
# register 2 addresses at once
tx, results, total_ops, engine = TestBuild(out, ['crowdsale_register', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 2)
# now check reg status
tx, results, total_ops, engine = TestBuild(out, ['crowdsale_status', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
def test_ICOTemplate_4_attachments(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
# test mint tokens without being kyc verified
tx, results, total_ops, engine = TestBuild(out, ['get_attachments', '[]', '--attach-neo=10'], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
attachments = results[0].GetArray()
self.assertEqual(len(attachments), 4)
fn = FunctionCode(out, '0705', '05')
self.assertEqual(attachments[0].GetByteArray(), fn.ScriptHash().Data)
self.assertEqual(attachments[1].GetByteArray(), self.wallet_3_script_hash.Data)
self.assertEqual(attachments[2].GetBigInteger(), Fixed8.FromDecimal(10).value)
self.assertEqual(attachments[3].GetBigInteger(), 0)
tx, results, total_ops, engine = TestBuild(out, ['get_attachments', '[]'], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
attachments = results[0].GetArray()
self.assertEqual(len(attachments), 4)
self.assertEqual(attachments[1].GetByteArray(), bytearray())
self.assertEqual(attachments[2].GetBigInteger(), 0)
self.assertEqual(attachments[3].GetBigInteger(), 0)
tx, results, total_ops, engine = TestBuild(out, ['get_attachments', '[]', '--attach-neo=3', '--attach-gas=3.12'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
attachments = results[0].GetArray()
self.assertEqual(len(attachments), 4)
self.assertEqual(attachments[1].GetByteArray(), self.wallet_1_script_hash.Data)
self.assertEqual(attachments[2].GetBigInteger(), Fixed8.FromDecimal(3).value)
self.assertEqual(attachments[3].GetBigInteger(), Fixed8.FromDecimal(3.12).value)
def test_ICOTemplate_5_mint(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
# register an address
tx, results, total_ops, engine = TestBuild(out, ['crowdsale_register', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 1)
TestContract.dispatched_events = []
# test mint tokens, this should return true
tx, results, total_ops, engine = TestBuild(out, ['mintTokens', '[]', '--attach-neo=10'], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
# it should dispatch an event
self.assertEqual(len(TestContract.dispatched_events), 1)
evt = TestContract.dispatched_events[0]
self.assertIsInstance(evt, NotifyEvent)
self.assertEqual(evt.amount, 10 * TOKENS_PER_NEO)
self.assertEqual(evt.addr_to, self.wallet_3_script_hash)
# test mint tokens again, this should be false since you can't do it twice
tx, results, total_ops, engine = TestBuild(out, ['mintTokens', '[]', '--attach-neo=10'], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# now the minter should have a balance
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 10 * TOKENS_PER_NEO)
# now the total circulation should be bigger
tx, results, total_ops, engine = TestBuild(out, ['totalSupply', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), (10 * TOKENS_PER_NEO) + TOKEN_INITIAL_AMOUNT)
def test_ICOTemplate_6_approval(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
# tranfer_from, approve, allowance
tx, results, total_ops, engine = TestBuild(out, ['allowance', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 0)
# try to transfer from
tx, results, total_ops, engine = TestBuild(out, ['transferFrom', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 10000])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# try to approve from someone not yourself
tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 10000])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 0)
# try to approve more than you have
tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, TOKEN_INITIAL_AMOUNT])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 0)
TestContract.dispatched_events = []
# approve should work
tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 1234])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
# it should dispatch an event
self.assertEqual(len(TestContract.dispatched_events), 1)
evt = TestContract.dispatched_events[0]
self.assertIsInstance(evt, NotifyEvent)
self.assertEqual(evt.notify_type, b'approve')
self.assertEqual(evt.amount, 1234)
# check allowance
tx, results, total_ops, engine = TestBuild(out, ['allowance', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 1234)
# approve should not be additive, it should overwrite previous approvals
tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 133234])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
tx, results, total_ops, engine = TestBuild(out, ['allowance', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 133234)
# now you can transfer from
tx, results, total_ops, engine = TestBuild(out, ['transferFrom', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 10000])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
# now the recevier should have a balance
# it is equal to 10000 plus test_transfer_amount = 2400000001
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([self.wallet_2_script_hash.Data])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 10000 + 2400000001)
# now the allowance should be less
tx, results, total_ops, engine = TestBuild(out, ['allowance', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 133234 - 10000)
# try to transfer too much, even with approval
tx, results, total_ops, engine = TestBuild(out, ['transferFrom', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 14440000])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# cant approve negative amounts
tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, -1000])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
def test_many_ops(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
# tranfer_from, approve, allowance
tx, results, total_ops, engine = TestBuild(out, ['another_op_5', bytearray()], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 6) | en | 0.908543 | # print(output.to_s()) # deploy with wallet 2 should fail CheckWitness # second time, it should already be deployed and return false # now total supply should be equal to the initial owner amount # now the owner should have a balance of the TOKEN_INITIAL_AMOUNT # now transfer tokens to wallet 2 # now get balance of wallet 2 # now the owner should have less # now this transfer should fail # this transfer should fail because it is not signed by the 'from' address # now this transfer should fail, this is from address with no tokens # get balance of bad data # get balance no params # now transfer tokens to wallet 2 # test mint tokens without being kyc verified # Try to register as a non owner # Get status of non registered address # register an address # it should dispatch an event # register 2 addresses at once # now check reg status # test mint tokens without being kyc verified # register an address # test mint tokens, this should return true # it should dispatch an event # test mint tokens again, this should be false since you can't do it twice # now the minter should have a balance # now the total circulation should be bigger # tranfer_from, approve, allowance # try to transfer from # try to approve from someone not yourself # try to approve more than you have # approve should work # it should dispatch an event # check allowance # approve should not be additive, it should overwrite previous approvals # now you can transfer from # now the recevier should have a balance # it is equal to 10000 plus test_transfer_amount = 2400000001 # now the allowance should be less # try to transfer too much, even with approval # cant approve negative amounts # tranfer_from, approve, allowance | 1.678295 | 2 |
regexem.py | lvijay/ilc | 1 | 7789 | <filename>regexem.py
#!/usr/bin/python
# -*- mode: python; -*-
## This file is part of Indian Language Converter
## Copyright (C) 2006 <NAME> <<EMAIL>>
## Indian Language Converter is free software; you can redistribute it
## and/or modify it under the terms of the GNU General Public License
## as published by the Free Software Foundation; either version 2 of
## the License, or (at your option) any later version.
## This program is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
## 02110-1301, USA.
## $Id: regexem.py,v 1.4 2006-03-26 03:15:24 vijay Exp $
## Author: <NAME>
## $Date: 2006-03-26 03:15:24 $
import sys
from re import escape
def regexem (strlst):
"""Returns a single string which is the regular expression to
identify any single word in the given argument.
See the Examples given at the end of this file."""
return regexem_internal([escape(s) for s in strlst])
def regexem_internal (strlst):
strlst.sort()
s, rest = strlst[0], strlst[1:]
groups = {}
groups[s] = [s]
for string in rest:
if string.startswith(s) and len(s) < len(string): # avoid duplicates
groups[s].append(string[len(s):]) # add the suffix to the group
else:
s = string # a fresh prefix
groups[s] = [s]
regex = ''
for prefix, words in groups.items():
inreg = ''
if len(words) == 2: # i.e. words[0] is a subset of words[1]
inreg += words[0] + '(' + words[1] + ')' + '?'
elif len(words) > 2:
inreg += words[0] + '(' + regexem_internal(words[1:]) + ')' + '?'
else:
inreg += prefix # since prefix == words[0] in this case.
regex += '(' + inreg + ')' + '|'
return regex[:-1] # we don't need the last '|'
if __name__ == '__main__':
print ''.join(regexem(sys.argv[1:]))
## Examples
#
# $ ./regexem.py emacs vi ed
# (ed)|(emacs)|(vi)
#
# $ ./regexem.py batsman bats well
# (well)|(bats(man)?)
#
# $ ./regexem.py houses housefly
# (houses)|(housefly) ## Note that they aren't grouped together
#
## a slightly complicated example
# $ ./regexem.py an anteater and an ant
# (an((d)|(t(eater)?))?)
| <filename>regexem.py
#!/usr/bin/python
# -*- mode: python; -*-
## This file is part of Indian Language Converter
## Copyright (C) 2006 <NAME> <<EMAIL>>
## Indian Language Converter is free software; you can redistribute it
## and/or modify it under the terms of the GNU General Public License
## as published by the Free Software Foundation; either version 2 of
## the License, or (at your option) any later version.
## This program is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
## 02110-1301, USA.
## $Id: regexem.py,v 1.4 2006-03-26 03:15:24 vijay Exp $
## Author: <NAME>
## $Date: 2006-03-26 03:15:24 $
import sys
from re import escape
def regexem (strlst):
"""Returns a single string which is the regular expression to
identify any single word in the given argument.
See the Examples given at the end of this file."""
return regexem_internal([escape(s) for s in strlst])
def regexem_internal (strlst):
strlst.sort()
s, rest = strlst[0], strlst[1:]
groups = {}
groups[s] = [s]
for string in rest:
if string.startswith(s) and len(s) < len(string): # avoid duplicates
groups[s].append(string[len(s):]) # add the suffix to the group
else:
s = string # a fresh prefix
groups[s] = [s]
regex = ''
for prefix, words in groups.items():
inreg = ''
if len(words) == 2: # i.e. words[0] is a subset of words[1]
inreg += words[0] + '(' + words[1] + ')' + '?'
elif len(words) > 2:
inreg += words[0] + '(' + regexem_internal(words[1:]) + ')' + '?'
else:
inreg += prefix # since prefix == words[0] in this case.
regex += '(' + inreg + ')' + '|'
return regex[:-1] # we don't need the last '|'
if __name__ == '__main__':
print ''.join(regexem(sys.argv[1:]))
## Examples
#
# $ ./regexem.py emacs vi ed
# (ed)|(emacs)|(vi)
#
# $ ./regexem.py batsman bats well
# (well)|(bats(man)?)
#
# $ ./regexem.py houses housefly
# (houses)|(housefly) ## Note that they aren't grouped together
#
## a slightly complicated example
# $ ./regexem.py an anteater and an ant
# (an((d)|(t(eater)?))?)
| en | 0.757882 | #!/usr/bin/python # -*- mode: python; -*- ## This file is part of Indian Language Converter ## Copyright (C) 2006 <NAME> <<EMAIL>> ## Indian Language Converter is free software; you can redistribute it ## and/or modify it under the terms of the GNU General Public License ## as published by the Free Software Foundation; either version 2 of ## the License, or (at your option) any later version. ## This program is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## You should have received a copy of the GNU General Public License ## along with this program; if not, write to the Free Software ## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA ## 02110-1301, USA. ## $Id: regexem.py,v 1.4 2006-03-26 03:15:24 vijay Exp $ ## Author: <NAME> ## $Date: 2006-03-26 03:15:24 $ Returns a single string which is the regular expression to identify any single word in the given argument. See the Examples given at the end of this file. # avoid duplicates # add the suffix to the group # a fresh prefix # i.e. words[0] is a subset of words[1] # since prefix == words[0] in this case. # we don't need the last '|' ## Examples # # $ ./regexem.py emacs vi ed # (ed)|(emacs)|(vi) # # $ ./regexem.py batsman bats well # (well)|(bats(man)?) # # $ ./regexem.py houses housefly # (houses)|(housefly) ## Note that they aren't grouped together # ## a slightly complicated example # $ ./regexem.py an anteater and an ant # (an((d)|(t(eater)?))?) | 3.608429 | 4 |
main.py | rohit-k-das/crowdstrike-alerts | 3 | 7790 | import requests
import crowdstrike_detection as crowdstrike
import logging
import click
import urllib.parse
import ConfigParser
import os
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(name)-15s [%(levelname)-8s]: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger(__name__)
Config = ConfigParser.ConfigParser()
Config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'Crowdstrike_creds'))
# Create your own slackbot
hubot_webhook_url = Config.get('Settings', 'Slackbot_Url')
# Send slack alert via hubot for each high or critical detection in crowdstrike
def send_hubot_alert_crowdstrike(detection):
logger.info("Send hubot alert for detection %s" % detection.detection_id)
# Emoji for slack based on action taken
green_alerts = ['Kill process', 'Kill subprocess', 'Quarantine file', 'Kill parent', 'Process blocked',
'Operation blocked']
red_alerts = ['Policy disabled']
amber_alerts = []
actions = []
for behavior in detection.behavior:
actions.extend(behavior['action_taken'])
if actions:
actions = list(set(actions))
alerts = []
if actions:
if list(set(actions).intersection(red_alerts)):
alerts.append(':red-alert: Allowed')
if list(set(actions).intersection(green_alerts)):
alerts.append(':green-alert: Blocked')
else:
alerts.append(':red-alert: Allowed')
if ':green-alert: Blocked' in alerts and ':red-alert: Allowed' in alerts:
alerts = [':amber-alert: Suspicious']
message_to_send = ":crowd-strike: *%s* Alert: <%s|%s> ---> %s\n" % (
detection.severity, detection.link, detection.detection_id.split(':')[2], str(alerts).strip('[').strip(']').replace("'", ""))
message_to_send = "%sDevice: %s\n" % (message_to_send, detection.device)
for behavior in detection.behavior:
message_to_send = "%sBad Behavior: %s\n" % (message_to_send, behavior['bad_behavior'].replace('&', '%26amp;').replace('<', '%26lt;').replace('>', '%26gt;'))
message_to_send = "%sHash: %s\n" % (message_to_send, behavior['hash'])
message_to_send = "%sParent Cmd: %s\n" % (message_to_send, behavior['parent_commandline'])
message_to_send = "%sTactic-Technique: %s\n" % (message_to_send, behavior['tactic + technique'])
if behavior['action_taken']:
message_to_send = "%sAction Taken: %s" % (
message_to_send, str(behavior['action_taken']).strip('[').strip(']').replace("'", ""))
else:
message_to_send = "%sAction Taken: %s" % (message_to_send, 'None')
if len(detection.behavior) > 1:
message_to_send = "%s\n" % message_to_send
# Whom to send the alert
send_to = 'yourchannel or a user'
data = {'message': message_to_send, 'users': send_to}
data = urllib.parse.urlencode(data)
headers = {"Content-Type": "application/x-www-form-urlencoded"}
resp = requests.post(hubot_webhook_url, headers=headers, data=data)
if resp.ok:
logger.info("Sent alert to user/channel %s" % send_to)
else:
logger.critical("Unable to connect to hubot.")
logger.info("Hubot Error %d:%s" % (resp.status_code, resp.text))
@click.command()
@click.option("-d", "--duration", default=600, show_default=True, nargs=1, type=int, required=False, help="Crowdstrike detections that were last seen since 'duration' seconds")
def main(duration):
crowdstrike_detections = crowdstrike.fetch_detections(duration)
if crowdstrike_detections:
logger.info("Sending alerts")
for detection in crowdstrike_detections:
send_hubot_alert_crowdstrike(detection)
if __name__ == '__main__':
main()
| import requests
import crowdstrike_detection as crowdstrike
import logging
import click
import urllib.parse
import ConfigParser
import os
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(name)-15s [%(levelname)-8s]: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger(__name__)
Config = ConfigParser.ConfigParser()
Config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'Crowdstrike_creds'))
# Create your own slackbot
hubot_webhook_url = Config.get('Settings', 'Slackbot_Url')
# Send slack alert via hubot for each high or critical detection in crowdstrike
def send_hubot_alert_crowdstrike(detection):
logger.info("Send hubot alert for detection %s" % detection.detection_id)
# Emoji for slack based on action taken
green_alerts = ['Kill process', 'Kill subprocess', 'Quarantine file', 'Kill parent', 'Process blocked',
'Operation blocked']
red_alerts = ['Policy disabled']
amber_alerts = []
actions = []
for behavior in detection.behavior:
actions.extend(behavior['action_taken'])
if actions:
actions = list(set(actions))
alerts = []
if actions:
if list(set(actions).intersection(red_alerts)):
alerts.append(':red-alert: Allowed')
if list(set(actions).intersection(green_alerts)):
alerts.append(':green-alert: Blocked')
else:
alerts.append(':red-alert: Allowed')
if ':green-alert: Blocked' in alerts and ':red-alert: Allowed' in alerts:
alerts = [':amber-alert: Suspicious']
message_to_send = ":crowd-strike: *%s* Alert: <%s|%s> ---> %s\n" % (
detection.severity, detection.link, detection.detection_id.split(':')[2], str(alerts).strip('[').strip(']').replace("'", ""))
message_to_send = "%sDevice: %s\n" % (message_to_send, detection.device)
for behavior in detection.behavior:
message_to_send = "%sBad Behavior: %s\n" % (message_to_send, behavior['bad_behavior'].replace('&', '%26amp;').replace('<', '%26lt;').replace('>', '%26gt;'))
message_to_send = "%sHash: %s\n" % (message_to_send, behavior['hash'])
message_to_send = "%sParent Cmd: %s\n" % (message_to_send, behavior['parent_commandline'])
message_to_send = "%sTactic-Technique: %s\n" % (message_to_send, behavior['tactic + technique'])
if behavior['action_taken']:
message_to_send = "%sAction Taken: %s" % (
message_to_send, str(behavior['action_taken']).strip('[').strip(']').replace("'", ""))
else:
message_to_send = "%sAction Taken: %s" % (message_to_send, 'None')
if len(detection.behavior) > 1:
message_to_send = "%s\n" % message_to_send
# Whom to send the alert
send_to = 'yourchannel or a user'
data = {'message': message_to_send, 'users': send_to}
data = urllib.parse.urlencode(data)
headers = {"Content-Type": "application/x-www-form-urlencoded"}
resp = requests.post(hubot_webhook_url, headers=headers, data=data)
if resp.ok:
logger.info("Sent alert to user/channel %s" % send_to)
else:
logger.critical("Unable to connect to hubot.")
logger.info("Hubot Error %d:%s" % (resp.status_code, resp.text))
@click.command()
@click.option("-d", "--duration", default=600, show_default=True, nargs=1, type=int, required=False, help="Crowdstrike detections that were last seen since 'duration' seconds")
def main(duration):
crowdstrike_detections = crowdstrike.fetch_detections(duration)
if crowdstrike_detections:
logger.info("Sending alerts")
for detection in crowdstrike_detections:
send_hubot_alert_crowdstrike(detection)
if __name__ == '__main__':
main()
| en | 0.783962 | # Create your own slackbot # Send slack alert via hubot for each high or critical detection in crowdstrike # Emoji for slack based on action taken # Whom to send the alert | 2.343609 | 2 |
connexion/http_facts.py | lumikanta/connexion | 0 | 7791 | <filename>connexion/http_facts.py
FORM_CONTENT_TYPES = [
'application/x-www-form-urlencoded',
'multipart/form-data'
]
| <filename>connexion/http_facts.py
FORM_CONTENT_TYPES = [
'application/x-www-form-urlencoded',
'multipart/form-data'
]
| none | 1 | 1.118037 | 1 |
|
Test3/yandexAPI3.py | klepik1990/YandexTestAPI | 0 | 7792 | import requests
import json
HEADERS = {"Authorization": "OAuth <KEY>", "Accept": "*/*"}
URL = "https://cloud-api.yandex.net:443/v1/disk/"
def get_folder_info(folder_name_1, folder_name_2, url=None, headers=None):
"""Получение информации о статусе папок на диске
Args:
folder_name_1: имя корневой папки.
folder_name_2: имя вложенной папки.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Информация о папках: путь до папок, если созданы успешно. В противном случае описание ошибки.
"""
info = requests.get(url= URL + "resources?path=" + folder_name_1 + "/" + folder_name_2 + "&fields=path", headers=HEADERS)
dict_response = json.loads(info.content)
if info.status_code == 404:
return dict_response["description"]
else:
return dict_response["path"]
def get_file_info(folder_name_1, folder_name_2, file_name, url=None, headers=None):
"""Получение информации о файле
Args:
folder_name_1: имя корневой папки.
folder_name_2: имя вложенной папки.
file_name: имя файла.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Путь до файла.
"""
file_info_json = requests.get(url= URL + "resources?path=" + folder_name_1 + "/" + folder_name_2 + "/" + file_name +
".jpg&fields=path", headers = HEADERS)
file_info_dict = json.loads(file_info_json.content)
if file_info_json.status_code == 404:
return file_info_dict["description"]
else:
return file_info_dict["path"]
def create_folder(folder_name_1, folder_name_2, url=None, headers=None):
"""Создание папок на диске.
Args:
folder_name_1: имя корневой папки.
folder_name_2: имя вложенной папки.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Информация о папках через вызов другой функции.
"""
response_code = [202, 204]
new_folder = requests.put(url= URL + "resources?path=" + folder_name_1, headers=HEADERS)
if new_folder.status_code == 409:
new_folder = requests.delete(url= URL + "resources?path=" + folder_name_1 + "&permanently=true", headers=HEADERS)
if new_folder.status_code in response_code:
requests.put(url= URL + "resources?path=" + folder_name_1, headers=HEADERS)
requests.put(url= URL + "resources?path=" + folder_name_1 + "/" + folder_name_2, headers=HEADERS)
return get_folder_info(folder_name_1, folder_name_2)
def create_file(folder_name_1, folder_name_2, file_name, url=None, headers=None):
"""Загрузка файла на диск.
Args:
folder_name_1: имя корневой папки.
folder_name_2: имя вложенной папки.
file_name: имя файла.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Информацию о созданном файле через вызов другой функции.
"""
assert len(file_name) > 0, "Не введено имя файла"
new_file = requests.get(url= URL + "resources/upload?path=" + folder_name_1 + "/" + folder_name_2 + "/" + file_name +
".jpg&overwrite=true", headers=HEADERS)
get_link = new_file.content
link = json.loads(get_link)
requests.put(url=link["href"])
return get_file_info(folder_name_1, folder_name_2, file_name)
def move_to_bucket(folder_name, url=None, headers=None):
"""Перемещение папки с содержимым в корзину.
Args:
folder_name: имя корневой папки.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Ссылку для проверки статуса.
"""
order_response = requests.delete(url= URL + "resources?path=" + folder_name, headers=HEADERS)
return json.loads(order_response.content)["href"]
def get_status(link, headers=None):
"""Получение статуса операции по ссылке.
Args:
link: ссылка, для которой проверяется статус.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Статус операции.
"""
status_response = requests.get(url=link, headers=HEADERS)
return json.loads(status_response.content)["status"]
def clean_bucket():
"""Очистка корзины.
Returns:
Ссылку для проверки статуса.
"""
remove_folder = requests.delete(url= URL + "trash/resources", headers=HEADERS)
return json.loads(remove_folder.content)["href"]
| import requests
import json
HEADERS = {"Authorization": "OAuth <KEY>", "Accept": "*/*"}
URL = "https://cloud-api.yandex.net:443/v1/disk/"
def get_folder_info(folder_name_1, folder_name_2, url=None, headers=None):
"""Получение информации о статусе папок на диске
Args:
folder_name_1: имя корневой папки.
folder_name_2: имя вложенной папки.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Информация о папках: путь до папок, если созданы успешно. В противном случае описание ошибки.
"""
info = requests.get(url= URL + "resources?path=" + folder_name_1 + "/" + folder_name_2 + "&fields=path", headers=HEADERS)
dict_response = json.loads(info.content)
if info.status_code == 404:
return dict_response["description"]
else:
return dict_response["path"]
def get_file_info(folder_name_1, folder_name_2, file_name, url=None, headers=None):
"""Получение информации о файле
Args:
folder_name_1: имя корневой папки.
folder_name_2: имя вложенной папки.
file_name: имя файла.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Путь до файла.
"""
file_info_json = requests.get(url= URL + "resources?path=" + folder_name_1 + "/" + folder_name_2 + "/" + file_name +
".jpg&fields=path", headers = HEADERS)
file_info_dict = json.loads(file_info_json.content)
if file_info_json.status_code == 404:
return file_info_dict["description"]
else:
return file_info_dict["path"]
def create_folder(folder_name_1, folder_name_2, url=None, headers=None):
"""Создание папок на диске.
Args:
folder_name_1: имя корневой папки.
folder_name_2: имя вложенной папки.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Информация о папках через вызов другой функции.
"""
response_code = [202, 204]
new_folder = requests.put(url= URL + "resources?path=" + folder_name_1, headers=HEADERS)
if new_folder.status_code == 409:
new_folder = requests.delete(url= URL + "resources?path=" + folder_name_1 + "&permanently=true", headers=HEADERS)
if new_folder.status_code in response_code:
requests.put(url= URL + "resources?path=" + folder_name_1, headers=HEADERS)
requests.put(url= URL + "resources?path=" + folder_name_1 + "/" + folder_name_2, headers=HEADERS)
return get_folder_info(folder_name_1, folder_name_2)
def create_file(folder_name_1, folder_name_2, file_name, url=None, headers=None):
"""Загрузка файла на диск.
Args:
folder_name_1: имя корневой папки.
folder_name_2: имя вложенной папки.
file_name: имя файла.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Информацию о созданном файле через вызов другой функции.
"""
assert len(file_name) > 0, "Не введено имя файла"
new_file = requests.get(url= URL + "resources/upload?path=" + folder_name_1 + "/" + folder_name_2 + "/" + file_name +
".jpg&overwrite=true", headers=HEADERS)
get_link = new_file.content
link = json.loads(get_link)
requests.put(url=link["href"])
return get_file_info(folder_name_1, folder_name_2, file_name)
def move_to_bucket(folder_name, url=None, headers=None):
"""Перемещение папки с содержимым в корзину.
Args:
folder_name: имя корневой папки.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Ссылку для проверки статуса.
"""
order_response = requests.delete(url= URL + "resources?path=" + folder_name, headers=HEADERS)
return json.loads(order_response.content)["href"]
def get_status(link, headers=None):
"""Получение статуса операции по ссылке.
Args:
link: ссылка, для которой проверяется статус.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Статус операции.
"""
status_response = requests.get(url=link, headers=HEADERS)
return json.loads(status_response.content)["status"]
def clean_bucket():
"""Очистка корзины.
Returns:
Ссылку для проверки статуса.
"""
remove_folder = requests.delete(url= URL + "trash/resources", headers=HEADERS)
return json.loads(remove_folder.content)["href"]
| ru | 0.972711 | Получение информации о статусе папок на диске Args: folder_name_1: имя корневой папки. folder_name_2: имя вложенной папки. url: адрес для запроса. headers: заголовки запроса, содержащие токен авторизации. Returns: Информация о папках: путь до папок, если созданы успешно. В противном случае описание ошибки. Получение информации о файле Args: folder_name_1: имя корневой папки. folder_name_2: имя вложенной папки. file_name: имя файла. url: адрес для запроса. headers: заголовки запроса, содержащие токен авторизации. Returns: Путь до файла. Создание папок на диске. Args: folder_name_1: имя корневой папки. folder_name_2: имя вложенной папки. url: адрес для запроса. headers: заголовки запроса, содержащие токен авторизации. Returns: Информация о папках через вызов другой функции. Загрузка файла на диск. Args: folder_name_1: имя корневой папки. folder_name_2: имя вложенной папки. file_name: имя файла. url: адрес для запроса. headers: заголовки запроса, содержащие токен авторизации. Returns: Информацию о созданном файле через вызов другой функции. Перемещение папки с содержимым в корзину. Args: folder_name: имя корневой папки. url: адрес для запроса. headers: заголовки запроса, содержащие токен авторизации. Returns: Ссылку для проверки статуса. Получение статуса операции по ссылке. Args: link: ссылка, для которой проверяется статус. headers: заголовки запроса, содержащие токен авторизации. Returns: Статус операции. Очистка корзины. Returns: Ссылку для проверки статуса. | 2.990415 | 3 |
app/users/operator/views.py | trinanda/AQUR | 0 | 7793 | import os
from collections import defaultdict
from flask import render_template
from flask_login import login_required
from sqlalchemy import and_
from app import db
from app.decorators import operator_required
from app.models import Student, MonthNameList, Course, PaymentStatus, Payment, Teacher, Schedule
from app.users.operator import operator
@operator.route('/')
@login_required
@operator_required
def index():
title = os.environ.get('APP_NAME')
# get all students data on schedule, except if the student tuition payment is None, PENDING, REJECTED or WARNING_3
students_courses_data = db.session.query(Schedule, Payment).join(Payment).filter(
and_(Payment.status_of_payment is not None,
Payment.status_of_payment != PaymentStatus.PENDING.name,
Payment.status_of_payment != PaymentStatus.REJECTED.name,
Payment.status_of_payment != PaymentStatus.WARNING_3.name))
# get the amount of Teachers and Students
total_students = Student.query.count()
total_teachers = Teacher.query.count()
month_name_list = []
for data in MonthNameList:
month_name_list.append(str(data))
# make a query object for "Tahsin" and "Arabic Language" course
tahsin = students_courses_data.join(Course).filter(Course.name == "Tahsin")
arabic = students_courses_data.join(Course).filter(Course.name == "Bahasa Arab")
# the total payment for the courses each month
tahsin_course_data = []
arabic_course_data = []
for data in tahsin:
for month_name in month_name_list:
tahsin_course_data.append({str(month_name): data.Payment.created_at.strftime('%B').count(month_name)})
for data in arabic:
for month_name in month_name_list:
arabic_course_data.append({str(month_name): data.Payment.created_at.strftime('%B').count(month_name)})
# merge and sum the total value from the dictionary on the same month from the _courses_data result above
total_tahsin_students_per_month = defaultdict(int)
total_arabic_students_per_month = defaultdict(int)
for d in tahsin_course_data:
for key, value in d.items():
total_tahsin_students_per_month[key] += value
for d in arabic_course_data:
for key, value in d.items():
total_arabic_students_per_month[key] += value
# store all of the month values on a list for each course
tahsin_values = []
arabic_values = []
for key, value in total_tahsin_students_per_month.items():
tahsin_values.append(value)
for key, value in total_arabic_students_per_month.items():
arabic_values.append(value)
# make a dictionary to represent course name with the matching total student that do the payment for each month
data_courses_each_month = [
{
'Tahsin': tahsin_values,
},
{
'Bahasa Arab': arabic_values
}
]
return render_template('main/operator/operator-dashboard.html', title=title, total_teachers=total_teachers,
total_students=total_students, month_name_list=month_name_list,
data_courses_each_month=data_courses_each_month)
| import os
from collections import defaultdict
from flask import render_template
from flask_login import login_required
from sqlalchemy import and_
from app import db
from app.decorators import operator_required
from app.models import Student, MonthNameList, Course, PaymentStatus, Payment, Teacher, Schedule
from app.users.operator import operator
@operator.route('/')
@login_required
@operator_required
def index():
title = os.environ.get('APP_NAME')
# get all students data on schedule, except if the student tuition payment is None, PENDING, REJECTED or WARNING_3
students_courses_data = db.session.query(Schedule, Payment).join(Payment).filter(
and_(Payment.status_of_payment is not None,
Payment.status_of_payment != PaymentStatus.PENDING.name,
Payment.status_of_payment != PaymentStatus.REJECTED.name,
Payment.status_of_payment != PaymentStatus.WARNING_3.name))
# get the amount of Teachers and Students
total_students = Student.query.count()
total_teachers = Teacher.query.count()
month_name_list = []
for data in MonthNameList:
month_name_list.append(str(data))
# make a query object for "Tahsin" and "Arabic Language" course
tahsin = students_courses_data.join(Course).filter(Course.name == "Tahsin")
arabic = students_courses_data.join(Course).filter(Course.name == "Bahasa Arab")
# the total payment for the courses each month
tahsin_course_data = []
arabic_course_data = []
for data in tahsin:
for month_name in month_name_list:
tahsin_course_data.append({str(month_name): data.Payment.created_at.strftime('%B').count(month_name)})
for data in arabic:
for month_name in month_name_list:
arabic_course_data.append({str(month_name): data.Payment.created_at.strftime('%B').count(month_name)})
# merge and sum the total value from the dictionary on the same month from the _courses_data result above
total_tahsin_students_per_month = defaultdict(int)
total_arabic_students_per_month = defaultdict(int)
for d in tahsin_course_data:
for key, value in d.items():
total_tahsin_students_per_month[key] += value
for d in arabic_course_data:
for key, value in d.items():
total_arabic_students_per_month[key] += value
# store all of the month values on a list for each course
tahsin_values = []
arabic_values = []
for key, value in total_tahsin_students_per_month.items():
tahsin_values.append(value)
for key, value in total_arabic_students_per_month.items():
arabic_values.append(value)
# make a dictionary to represent course name with the matching total student that do the payment for each month
data_courses_each_month = [
{
'Tahsin': tahsin_values,
},
{
'Bahasa Arab': arabic_values
}
]
return render_template('main/operator/operator-dashboard.html', title=title, total_teachers=total_teachers,
total_students=total_students, month_name_list=month_name_list,
data_courses_each_month=data_courses_each_month)
| en | 0.819447 | # get all students data on schedule, except if the student tuition payment is None, PENDING, REJECTED or WARNING_3 # get the amount of Teachers and Students # make a query object for "Tahsin" and "Arabic Language" course # the total payment for the courses each month # merge and sum the total value from the dictionary on the same month from the _courses_data result above # store all of the month values on a list for each course # make a dictionary to represent course name with the matching total student that do the payment for each month | 2.608465 | 3 |
arvet/core/metric.py | jskinn/arvet | 2 | 7794 | <gh_stars>1-10
# Copyright (c) 2017, <NAME>
import abc
import typing
import bson
import pymodm
import pymodm.fields as fields
import arvet.database.pymodm_abc as pymodm_abc
from arvet.database.reference_list_field import ReferenceListField
import arvet.core.trial_result
class Metric(pymodm.MongoModel, metaclass=pymodm_abc.ABCModelMeta):
"""
A class that measures results
This is an abstract base class defining an interface for all metrics,
to allow them to be called easily and in a structured way.
"""
@property
def identifier(self) -> bson.ObjectId:
"""
Get the id for this metric
:return:
"""
return self._id
@abc.abstractmethod
def is_trial_appropriate(self, trial_result: arvet.core.trial_result.TrialResult) -> bool:
"""
Fine-grained filtering for trial results, to make sure this class can measure this trial result.
:return:
"""
pass
@abc.abstractmethod
def measure_results(self, trial_results: typing.Iterable[arvet.core.trial_result.TrialResult]) \
-> 'MetricResult':
"""
Measure the results of running a particular system on a particular image source.
We take a collection of trials to allow for multiple repeats of the system on the same data,
which allows us to account for and measure random variation in the system.
A helper to check this is provided below, call it in any implementation.
The trial result MUST include the ground truth along with the system estimates,
which must be the same for all trials.
:param trial_results: A collection of trial results to measure.
These are assumed to be repeat runs of the same system on the same data.
:return: A MetricResult object containing either the results, or explaining the error
:rtype: MetricResult
"""
pass
@abc.abstractmethod
def get_columns(self) -> typing.Set[str]:
"""
Get the set of available properties for this metric. Pass these to "get_properties", below.
:return:
"""
pass
@abc.abstractmethod
def get_properties(self, columns: typing.Iterable[str] = None) -> typing.Mapping[str, typing.Any]:
"""
Get the values of the requested properties
:param columns:
:return:
"""
pass
@classmethod
def get_pretty_name(cls) -> str:
"""
Get a human-readable name for this metric
:return:
"""
return cls.__module__ + '.' + cls.__name__
@classmethod
def get_instance(cls) -> 'Metric':
"""
Get an instance of this vision system, with some parameters, pulling from the database if possible,
or construct a new one if needed.
It is the responsibility of subclasses to ensure that as few instances of each system as possible exist
within the database.
Does not save the returned object, you'll usually want to do that straight away.
:return:
"""
all_objects = cls.objects.all()
if all_objects.count() > 0:
return all_objects.first()
obj = cls()
return obj
class MetricResult(pymodm.MongoModel):
"""
A general superclass for metric results for all metrics
"""
metric = fields.ReferenceField(Metric, required=True, on_delete=fields.ReferenceField.CASCADE)
trial_results = ReferenceListField(arvet.core.trial_result.TrialResult,
required=True, on_delete=fields.ReferenceField.CASCADE)
success = fields.BooleanField(required=True)
message = fields.CharField()
# The set of plots available to visualize_results.
available_plots = set()
@property
def identifier(self) -> bson.ObjectId:
"""
Get the id of this metric result
:return:
"""
return self._id
def get_columns(self) -> typing.Set[str]:
"""
Get a list of available results columns, which are the possible keys in dictionaries returned by get_results.
Should delegate to the linked trial results, systems, etc for the full list.
:return:
"""
return set()
def get_results(self, columns: typing.Iterable[str] = None) -> typing.List[dict]:
"""
Get the results from this metric result, as a list of dictionaries we can turn into a Pandas data frame.
Each dictionary should include as much data as possible, including data about the system, the image source,
the particular image, etc...
Use the argument to restrict the columns to a limited set, should return all by default.
This must return a non-empty list for any trial result where success is True.
:return:
"""
return []
def check_trial_collection(trial_results: typing.Iterable[arvet.core.trial_result.TrialResult]) \
-> typing.Union[str, None]:
"""
A helper function to check that all the given trial results come from the same system and image source.
Call this at the start of Metric.measure_results
:param trial_results: A collection of trial results passed to Metric.measure_results
:return: None if all the trials are OK, string explaining the problem if they are not
"""
first_trial = None
for idx, trial in enumerate(trial_results):
if not trial.success:
return "Trial {0} (1) is failed".format(idx, trial.pk)
if first_trial is None:
first_trial = trial
else:
if trial.image_source != first_trial.image_source:
return "Trial {0} ({1}) does not have the same image source as the first trial".format(idx, trial.pk)
if trial.system != first_trial.system:
return "Trial {0} ({1}) does not have the same system as the first trial".format(idx, trial.pk)
| # Copyright (c) 2017, <NAME>
import abc
import typing
import bson
import pymodm
import pymodm.fields as fields
import arvet.database.pymodm_abc as pymodm_abc
from arvet.database.reference_list_field import ReferenceListField
import arvet.core.trial_result
class Metric(pymodm.MongoModel, metaclass=pymodm_abc.ABCModelMeta):
"""
A class that measures results
This is an abstract base class defining an interface for all metrics,
to allow them to be called easily and in a structured way.
"""
@property
def identifier(self) -> bson.ObjectId:
"""
Get the id for this metric
:return:
"""
return self._id
@abc.abstractmethod
def is_trial_appropriate(self, trial_result: arvet.core.trial_result.TrialResult) -> bool:
"""
Fine-grained filtering for trial results, to make sure this class can measure this trial result.
:return:
"""
pass
@abc.abstractmethod
def measure_results(self, trial_results: typing.Iterable[arvet.core.trial_result.TrialResult]) \
-> 'MetricResult':
"""
Measure the results of running a particular system on a particular image source.
We take a collection of trials to allow for multiple repeats of the system on the same data,
which allows us to account for and measure random variation in the system.
A helper to check this is provided below, call it in any implementation.
The trial result MUST include the ground truth along with the system estimates,
which must be the same for all trials.
:param trial_results: A collection of trial results to measure.
These are assumed to be repeat runs of the same system on the same data.
:return: A MetricResult object containing either the results, or explaining the error
:rtype: MetricResult
"""
pass
@abc.abstractmethod
def get_columns(self) -> typing.Set[str]:
"""
Get the set of available properties for this metric. Pass these to "get_properties", below.
:return:
"""
pass
@abc.abstractmethod
def get_properties(self, columns: typing.Iterable[str] = None) -> typing.Mapping[str, typing.Any]:
"""
Get the values of the requested properties
:param columns:
:return:
"""
pass
@classmethod
def get_pretty_name(cls) -> str:
"""
Get a human-readable name for this metric
:return:
"""
return cls.__module__ + '.' + cls.__name__
@classmethod
def get_instance(cls) -> 'Metric':
"""
Get an instance of this vision system, with some parameters, pulling from the database if possible,
or construct a new one if needed.
It is the responsibility of subclasses to ensure that as few instances of each system as possible exist
within the database.
Does not save the returned object, you'll usually want to do that straight away.
:return:
"""
all_objects = cls.objects.all()
if all_objects.count() > 0:
return all_objects.first()
obj = cls()
return obj
class MetricResult(pymodm.MongoModel):
"""
A general superclass for metric results for all metrics
"""
metric = fields.ReferenceField(Metric, required=True, on_delete=fields.ReferenceField.CASCADE)
trial_results = ReferenceListField(arvet.core.trial_result.TrialResult,
required=True, on_delete=fields.ReferenceField.CASCADE)
success = fields.BooleanField(required=True)
message = fields.CharField()
# The set of plots available to visualize_results.
available_plots = set()
@property
def identifier(self) -> bson.ObjectId:
"""
Get the id of this metric result
:return:
"""
return self._id
def get_columns(self) -> typing.Set[str]:
"""
Get a list of available results columns, which are the possible keys in dictionaries returned by get_results.
Should delegate to the linked trial results, systems, etc for the full list.
:return:
"""
return set()
def get_results(self, columns: typing.Iterable[str] = None) -> typing.List[dict]:
"""
Get the results from this metric result, as a list of dictionaries we can turn into a Pandas data frame.
Each dictionary should include as much data as possible, including data about the system, the image source,
the particular image, etc...
Use the argument to restrict the columns to a limited set, should return all by default.
This must return a non-empty list for any trial result where success is True.
:return:
"""
return []
def check_trial_collection(trial_results: typing.Iterable[arvet.core.trial_result.TrialResult]) \
-> typing.Union[str, None]:
"""
A helper function to check that all the given trial results come from the same system and image source.
Call this at the start of Metric.measure_results
:param trial_results: A collection of trial results passed to Metric.measure_results
:return: None if all the trials are OK, string explaining the problem if they are not
"""
first_trial = None
for idx, trial in enumerate(trial_results):
if not trial.success:
return "Trial {0} (1) is failed".format(idx, trial.pk)
if first_trial is None:
first_trial = trial
else:
if trial.image_source != first_trial.image_source:
return "Trial {0} ({1}) does not have the same image source as the first trial".format(idx, trial.pk)
if trial.system != first_trial.system:
return "Trial {0} ({1}) does not have the same system as the first trial".format(idx, trial.pk) | en | 0.883912 | # Copyright (c) 2017, <NAME> A class that measures results This is an abstract base class defining an interface for all metrics, to allow them to be called easily and in a structured way. Get the id for this metric :return: Fine-grained filtering for trial results, to make sure this class can measure this trial result. :return: Measure the results of running a particular system on a particular image source. We take a collection of trials to allow for multiple repeats of the system on the same data, which allows us to account for and measure random variation in the system. A helper to check this is provided below, call it in any implementation. The trial result MUST include the ground truth along with the system estimates, which must be the same for all trials. :param trial_results: A collection of trial results to measure. These are assumed to be repeat runs of the same system on the same data. :return: A MetricResult object containing either the results, or explaining the error :rtype: MetricResult Get the set of available properties for this metric. Pass these to "get_properties", below. :return: Get the values of the requested properties :param columns: :return: Get a human-readable name for this metric :return: Get an instance of this vision system, with some parameters, pulling from the database if possible, or construct a new one if needed. It is the responsibility of subclasses to ensure that as few instances of each system as possible exist within the database. Does not save the returned object, you'll usually want to do that straight away. :return: A general superclass for metric results for all metrics # The set of plots available to visualize_results. Get the id of this metric result :return: Get a list of available results columns, which are the possible keys in dictionaries returned by get_results. Should delegate to the linked trial results, systems, etc for the full list. :return: Get the results from this metric result, as a list of dictionaries we can turn into a Pandas data frame. Each dictionary should include as much data as possible, including data about the system, the image source, the particular image, etc... Use the argument to restrict the columns to a limited set, should return all by default. This must return a non-empty list for any trial result where success is True. :return: A helper function to check that all the given trial results come from the same system and image source. Call this at the start of Metric.measure_results :param trial_results: A collection of trial results passed to Metric.measure_results :return: None if all the trials are OK, string explaining the problem if they are not | 2.453861 | 2 |
pfile/accessor.py | thorwhalen/ut | 4 | 7795 | <filename>pfile/accessor.py
"""File access utils"""
__author__ = 'thorwhalen'
# from ut.datapath import datapath
import pickle
import os
from ut.util.importing import get_environment_variable
import pandas as pd
import ut.pfile.to as file_to
import ut.pfile.name as pfile_name
import ut.pstr.to as pstr_to
from ut.serialize.local import Local
from ut.serialize.s3 import S3
from os import environ # does this load the whole array? Can we just take MS_DATA instead?
import ut.pstr.trans as pstr_trans
import shutil
try:
MS_DATA = get_environment_variable('MS_DATA')
except KeyError:
MS_DATA = ''
LOCATION_LOCAL = 'LOCAL'
LOCATION_S3 = 'S3'
####################################################################################################################
# Quick Utils
def ms_data_path(relative_root, root_folder=MS_DATA):
return os.path.join(pfile_name.ensure_slash_suffix(root_folder), relative_root)
####################################################################################################################
# FACTORIES
def for_local(relative_root='', read_only=False, extension=None, force_extension=False, root_folder=MS_DATA, **kwargs):
# if a full path (i.e. starting with "/" is entered as a relative_root, then take it as the sound_file_root_folder
if relative_root and ((relative_root[0] == '/') or (relative_root[0] == '~')):
root_folder = relative_root
relative_root = ''
elif relative_root == 'test': # if relative root is test...
relative_root = 'test'
print("you asked for a local test, so I forced the root to be %s" % relative_root)
# ensure that sound_file_root_folder ends with a "/"
file_handler = FilepathHandler(relative_root=pfile_name.ensure_slash_suffix(root_folder)+relative_root)
# take care of extensions
if extension:
extension_handler = ExtensionHandler(extension=extension, force_extension=force_extension)
file_loc_proc = lambda x: file_handler.process(extension_handler.process(x))
else:
file_loc_proc = file_handler.process
instance = Accessor(
relative_root=relative_root,
extension=extension,
force_extension=force_extension,
file_loc_proc=file_loc_proc,
location=LOCATION_LOCAL,
read_only=read_only,
**kwargs
)
instance._set_local_defaults()
return instance
def for_s3(relative_root='loc-data', read_only=False, extension=None, force_extension=False, **kwargs):
if relative_root == 'test':
relative_root = 'loc-data/test'
print("you asked for a s3 test, so I forced the root to be %s" % relative_root)
file_handler = FilepathHandler(relative_root=relative_root)
if extension:
extension_handler = ExtensionHandler(extension=extension, force_extension=force_extension)
file_loc_proc = lambda x: file_handler.process(extension_handler.process(x))
else:
file_loc_proc = file_handler.process
instance = Accessor(
relative_root=relative_root,
extension=extension,
force_extension=force_extension,
file_loc_proc=file_loc_proc,
location=LOCATION_S3,
read_only=read_only,
**kwargs
)
save_kwargs = instance.mk_save_kwargs(relative_root)
try:
bucket_name = save_kwargs['bucket_name']
base_folder = save_kwargs['key_name']
except:
print("couldn't get bucket_name and key_name for relative_root")
instance.s3 = S3(bucket_name=bucket_name, base_folder=base_folder)
instance._set_s3_defaults()
return instance
####################################################################################################################
class Accessor(object):
LOCATION_LOCAL = LOCATION_LOCAL
LOCATION_S3 = LOCATION_S3
def __init__(self,
file_loc_proc=None,
location=LOCATION_LOCAL,
mk_save_kwargs=None,
pre_save_proc=None,
save_fun=None,
mk_load_kwargs=None,
load_fun=None,
post_load_proc=None,
read_only=False,
**kwargs):
# if file_loc_proc:
# self.file_loc_proc = file_loc_proc
# else:
# self.file_loc_proc = FilepathHandler().process
self.file_loc_proc = file_loc_proc
self.location = location
self.mk_save_kwargs = mk_save_kwargs
self.pre_save_proc = pre_save_proc
self.save_fun = save_fun
self.mk_load_kwargs = mk_load_kwargs
self.load_fun = load_fun
self.post_load_proc = post_load_proc
self.read_only = read_only
for k, v in list(kwargs.items()):
self.__setattr__(k,v)
self._guess_missing_attributes()
def __call__(self, *args, **kwargs):
return self.filepath(*args, **kwargs)
####################################################################################################################
# INSTANCE METHODS
def root_folder(self):
if self.extension:
return self.file_loc_proc('')[:(-len(self.extension))]
else:
return self.file_loc_proc('')
def filepath(self, file_spec):
return self.file_loc_proc(file_spec)
def exists(self, file_spec):
return os.path.exists(self.filepath(file_spec))
def save(self, obj, file_spec, **kwargs):
if self.read_only:
raise BaseException("read_only was set to True, so you can't save anything")
else:
# make the dict specifying the input to the save_fun
file_spec = self.file_loc_proc(file_spec)
if self.pre_save_proc:
obj = self.pre_save_proc(obj)
if self.mk_save_kwargs:
file_spec_kwargs = self.mk_save_kwargs(file_spec)
self.save_fun(obj, **file_spec_kwargs)
else:
self.save_fun(obj, file_spec)
def append(self, obj, file_spec, **kwargs): # TODO: Write this code someday
"""
Intent of this function is to append data to a file's data without having to specify how to do so.
For example, if the obj is a string and the file is a text file, use file append.
If obj is a pickled dataframe, the effect (however you do it--hopefully there's a better way than loading the
data, appending, and saving the final result) should be to have a pickled version of the old and new dataframes
appended.
Etc.
"""
pass
# if isinstance(obj, basestring):
# raise ValueError("strings not implemented yet")
# elif isinstance(obj, (pd.DataFrame, pd.Series)):
# pass
def load(self, file_spec, **kwargs):
file_spec = self.file_loc_proc(file_spec)
if pfile_name.get_extension(file_spec) not in ['.xls', '.xlsx']:
if self.mk_load_kwargs:
file_spec_kwargs = self.mk_load_kwargs(file_spec)
obj = self.load_fun(**file_spec_kwargs)
else:
obj = self.load_fun(file_spec)
if self.post_load_proc:
obj = self.post_load_proc(obj)
else:
# obj = pd.read_excel(file_spec, **kwargs)
xls = pd.ExcelFile(file_spec)
kwargs = dict({'sheetname': xls.sheet_names[0]}, **kwargs) # take first sheet if sheet not specified
obj = pd.read_excel(file_spec, **kwargs)
#obj = xls.parse(**kwargs)
return obj
def copy_local_file_to(self, local_file_path, target_file_spec):
'''
Copies a file from the local computer to self.filepath(target_file_spec)
:param local_file_path:
:param target_file_spec:
:return:
'''
if self.read_only:
raise BaseException("read_only was set to True, so you can't copy anything to this location")
else:
if self.location == LOCATION_LOCAL:
if not os.path.exists(local_file_path):
local_file_path = self.filepath(local_file_path)
shutil.copyfile(local_file_path, self.filepath(target_file_spec))
elif self.location == LOCATION_S3:
# make the dict specifying the input to the save_fun
target_file_spec = self.file_loc_proc(target_file_spec)
if self.pre_save_proc:
local_file_path = self.pre_save_proc(local_file_path)
if self.mk_save_kwargs:
file_spec_kwargs = self.mk_save_kwargs(target_file_spec)
self.copy_local_file_to_fun(local_file_path, **file_spec_kwargs)
else:
raise ("this shouldn't happen")
else:
raise ValueError("unknown location")
def copy_to(self, target_relative_root, file_spec, target_location=None):
if isinstance(target_relative_root, str):
target_relative_root, target_location = \
_make_a_file_loc_proc_and_location_from_string_specifications(target_relative_root, target_location)
# make a file accessor for the (target_location, target_relative_root)
facc = Accessor(relative_root=target_relative_root, location=target_location)
####################################################################################################################
# PARTIAL FACTORIES
def _add_extension_handler(self, extension, force_extension=False):
extension_handler = ExtensionHandler(extension=extension, force_extension=force_extension)
self.file_loc_proc = lambda x : self.file_loc_proc(extension_handler.process(x))
def _guess_missing_attributes(self):
if self.file_loc_proc is None: # if no file_loc_proc is given
if self.location is not None and isinstance(self.location, str):
self.file_loc_proc==self.location
else:
self.file_loc_proc==LOCATION_LOCAL
elif isinstance(self.file_loc_proc, str): # if file_loc_proc is a string
self.file_loc_proc, self.location = \
_make_a_file_loc_proc_and_location_from_string_specifications(self.file_loc_proc, self.location)
# if self.file_loc_proc==LOCATION_LOCAL:
# self.location = LOCATION_LOCAL
# self.file_loc_proc = ''
# elif self.file_loc_proc==LOCATION_S3:
# self.location = LOCATION_S3
# self.file_loc_proc = ''
# else:
# if self.location==LOCATION_LOCAL:
# self.file_loc_proc = FilepathHandler(relative_root=os.path.join(MS_DATA,self.file_loc_proc)).process
# elif self.location==LOCATION_S3:
# self.file_loc_proc = FilepathHandler(relative_root=os.path.join('loc-data',self.file_loc_proc)).process
# set defaults for remaining missing attributes
self._set_defaults()
def _set_defaults(self):
if self.location is None:
print("setting location to LOCAL (because you didn't specify a location)")
self.location = LOCATION_LOCAL
if self.location == LOCATION_LOCAL:
self._set_local_defaults()
elif self.location == LOCATION_S3:
self._set_s3_defaults()
def _set_local_defaults(self, root_folder=MS_DATA):
# set defaults for local if attr is None
self.file_loc_proc = self.file_loc_proc or FilepathHandler(relative_root=os.path.join(root_folder)).process
self.save_fun = self.save_fun or LocalIOMethods().unicode_save
self.load_fun = self.load_fun or LocalIOMethods().unicode_load
# self.pre_save_proc = self.pre_save_proc or FilepathHandler().process
# self.post_load_proc = self.post_load_proc or FilepathHandler().process
def _set_s3_defaults(self):
# set defaults for local if attr is None
self.file_loc_proc = self.file_loc_proc or FilepathHandler(relative_root='loc-data').process
self.mk_save_kwargs = fullpath_to_s3_kargs
self.mk_load_kwargs = fullpath_to_s3_kargs
self.save_fun = self.save_fun or S3IOMethods().unicode_save
self.load_fun = self.load_fun or S3IOMethods().unicode_load
self.copy_local_file_to_fun = S3IOMethods().copy_local_file_to_fun
####################################################################################################################
# OBJECT UTILS
def local_file_loc_proc_simple(self, file_spec):
# add extension
file_spec = self.handle_extension(file_spec)
# remove slash suffix if present (because self.sound_file_root_folder ends with / already)
if file_spec.startswith('/'):
file_spec = file_spec[1:]
def handle_extension(self, file_spec):
if self.extension:
if self.force_extension:
file_spec = pfile_name.replace_extension(file_spec, self.extension)
else:
file_spec = pfile_name.add_extension_if_not_present(file_spec, self.extension)
return os.path.join(self.root_folder, file_spec)
####################################################################################################################
# OTHER UTILS
def _make_a_file_loc_proc_and_location_from_string_specifications(file_loc_proc, location):
if file_loc_proc is None and isinstance(location, str):
file_loc_proc = location + "/"
location = None
elif location is None and isinstance(file_loc_proc, str):
first_folder = pfile_name.get_highest_level_folder(location)
if first_folder in [LOCATION_LOCAL, LOCATION_S3]:
location = first_folder # set the location to first_folder
file_loc_proc.replace(location+"/","") # remove the first_folder
else:
raise ValueError("location was not specified and couldn't be guessed from the file_loc_proc")
else:
raise ValueError("you've neither specified a file_loc_proc (as a file_loc_proc) nor a location")
# make a file accessor for the (location, target_relative_root)
file_loc_proc = FilepathHandler(relative_root=os.path.join(location,file_loc_proc)).process
return (file_loc_proc, location)
def file_loc_proc_from_full_path(fullpath):
return FilepathHandler(relative_root=fullpath).process
def fullpath_to_s3_kargs(filename):
# remove slash suffix if present (because self.sound_file_root_folder ends with / already)
if filename.startswith('/'):
filename = filename[1:]
mother_root = pfile_name.get_highest_level_folder(filename)
rest_of_the_filepath = filename.replace(mother_root + '/','',1)
return {
'bucket_name': mother_root,
'key_name': rest_of_the_filepath
}
class ExtensionHandler(object):
def __init__(self, extension=None, force_extension=False):
self.extension = extension
self.force_extension = force_extension
def process(self, file_spec):
if self.force_extension:
return pfile_name.replace_extension(file_spec, self.extension)
else:
return pfile_name.add_extension_if_not_present(file_spec, self.extension)
class FilepathHandler(object):
def __init__(self, relative_root=''):
self.relative_root = relative_root
def process(self, filepath=''):
return os.path.join(self.relative_root, filepath)
##### LOCAL METHODS
class LocalIOMethods(object):
def __init__(self, encoding="UTF-8"):
self.encoding = encoding
def unicode_save(self, obj, filepath=None, **kwargs):
if isinstance(obj, str):
# pstr_to.file(string=pstr_trans.to_unicode_or_bust(obj), tofile=filepath, encoding=self.encoding)
# pstr_to.file(string=pstr_trans.to_utf8_or_bust_iter(obj), tofile=filepath, encoding=self.encoding)
# pstr_to.file(string=pstr_trans.str_to_utf8_or_bust(obj), tofile=filepath, encoding=self.encoding)
pstr_to.file(string=obj, tofile=filepath, encoding=self.encoding)
else:
pickle.dump(obj=obj, file=open(filepath, 'w'))
def simple_save(self, obj, filepath=None, **kwargs):
if isinstance(obj, str):
pstr_to.file(string=obj, tofile=filepath, encoding=self.encoding)
else:
pickle.dump(obj=obj, file=open(filepath, 'w'))
def unicode_load(self, filepath=None, **kwargs):
"""
try pd.from_pickle, then pickle.loading, and if it doesn't work, try file_to.string
"""
return pstr_trans.to_unicode_or_bust(self.simple_load(filepath=filepath, **kwargs))
# try:
# try: # getting it as a pandas object
# return pstr_trans.to_unicode_or_bust(pd.read_pickle(path=filepath))
# except Exception: # getting it as a pickled object
# return pstr_trans.to_unicode_or_bust(pickle.load(file=open(filepath, 'r')))
# except Exception: # getting it as a string
# return pstr_trans.to_unicode_or_bust(file_to.string(filename=filepath))
def simple_load(self, filepath=None, **kwargs):
"""
try pd.read_pickle, pickle.load, and file_to.string in that order
"""
try:
try: # getting it as a pandas object
return pd.read_pickle(path=filepath)
except Exception: # getting it as a pickled object
return pickle.load(file=open(filepath, 'r'))
except Exception: # getting it as a string
return file_to.string(filename=filepath)
##### S3 METHODS
class S3IOMethods(object):
def __init__(self, **kwargs):
self.s3 = S3(**kwargs)
def unicode_save(self, obj, key_name, bucket_name):
if isinstance(obj, str):
self.s3.dumps(the_str=pstr_trans.to_unicode_or_bust(obj), key_name=key_name, bucket_name=bucket_name)
else:
self.s3.dumpo(obj=obj, key_name=key_name, bucket_name=bucket_name)
def simple_save(self, obj, key_name, bucket_name):
if isinstance(obj, str):
self.s3.dumps(the_str=obj, key_name=key_name, bucket_name=bucket_name)
else:
self.s3.dumpo(obj=obj, key_name=key_name, bucket_name=bucket_name)
def unicode_load(self, key_name, bucket_name):
"""
try pickle.loading, and if it doesn't work, try file_to.string
"""
try:
return self.s3.loado(key_name=key_name, bucket_name=bucket_name)
except:
return pstr_trans.to_unicode_or_bust(self.s3.loads(key_name=key_name, bucket_name=bucket_name))
def simple_load(self, key_name, bucket_name):
"""
try pickle.loading, and if it doesn't work, try file_to.string
"""
try:
return self.s3.loado(key_name=key_name, bucket_name=bucket_name)
except:
return self.s3.loads(key_name=key_name, bucket_name=bucket_name)
def copy_local_file_to_fun(self, filepath, key_name, bucket_name):
return self.s3.dumpf(f=filepath, key_name=key_name, bucket_name=bucket_name)
| <filename>pfile/accessor.py
"""File access utils"""
__author__ = 'thorwhalen'
# from ut.datapath import datapath
import pickle
import os
from ut.util.importing import get_environment_variable
import pandas as pd
import ut.pfile.to as file_to
import ut.pfile.name as pfile_name
import ut.pstr.to as pstr_to
from ut.serialize.local import Local
from ut.serialize.s3 import S3
from os import environ # does this load the whole array? Can we just take MS_DATA instead?
import ut.pstr.trans as pstr_trans
import shutil
try:
MS_DATA = get_environment_variable('MS_DATA')
except KeyError:
MS_DATA = ''
LOCATION_LOCAL = 'LOCAL'
LOCATION_S3 = 'S3'
####################################################################################################################
# Quick Utils
def ms_data_path(relative_root, root_folder=MS_DATA):
return os.path.join(pfile_name.ensure_slash_suffix(root_folder), relative_root)
####################################################################################################################
# FACTORIES
def for_local(relative_root='', read_only=False, extension=None, force_extension=False, root_folder=MS_DATA, **kwargs):
# if a full path (i.e. starting with "/" is entered as a relative_root, then take it as the sound_file_root_folder
if relative_root and ((relative_root[0] == '/') or (relative_root[0] == '~')):
root_folder = relative_root
relative_root = ''
elif relative_root == 'test': # if relative root is test...
relative_root = 'test'
print("you asked for a local test, so I forced the root to be %s" % relative_root)
# ensure that sound_file_root_folder ends with a "/"
file_handler = FilepathHandler(relative_root=pfile_name.ensure_slash_suffix(root_folder)+relative_root)
# take care of extensions
if extension:
extension_handler = ExtensionHandler(extension=extension, force_extension=force_extension)
file_loc_proc = lambda x: file_handler.process(extension_handler.process(x))
else:
file_loc_proc = file_handler.process
instance = Accessor(
relative_root=relative_root,
extension=extension,
force_extension=force_extension,
file_loc_proc=file_loc_proc,
location=LOCATION_LOCAL,
read_only=read_only,
**kwargs
)
instance._set_local_defaults()
return instance
def for_s3(relative_root='loc-data', read_only=False, extension=None, force_extension=False, **kwargs):
if relative_root == 'test':
relative_root = 'loc-data/test'
print("you asked for a s3 test, so I forced the root to be %s" % relative_root)
file_handler = FilepathHandler(relative_root=relative_root)
if extension:
extension_handler = ExtensionHandler(extension=extension, force_extension=force_extension)
file_loc_proc = lambda x: file_handler.process(extension_handler.process(x))
else:
file_loc_proc = file_handler.process
instance = Accessor(
relative_root=relative_root,
extension=extension,
force_extension=force_extension,
file_loc_proc=file_loc_proc,
location=LOCATION_S3,
read_only=read_only,
**kwargs
)
save_kwargs = instance.mk_save_kwargs(relative_root)
try:
bucket_name = save_kwargs['bucket_name']
base_folder = save_kwargs['key_name']
except:
print("couldn't get bucket_name and key_name for relative_root")
instance.s3 = S3(bucket_name=bucket_name, base_folder=base_folder)
instance._set_s3_defaults()
return instance
####################################################################################################################
class Accessor(object):
LOCATION_LOCAL = LOCATION_LOCAL
LOCATION_S3 = LOCATION_S3
def __init__(self,
file_loc_proc=None,
location=LOCATION_LOCAL,
mk_save_kwargs=None,
pre_save_proc=None,
save_fun=None,
mk_load_kwargs=None,
load_fun=None,
post_load_proc=None,
read_only=False,
**kwargs):
# if file_loc_proc:
# self.file_loc_proc = file_loc_proc
# else:
# self.file_loc_proc = FilepathHandler().process
self.file_loc_proc = file_loc_proc
self.location = location
self.mk_save_kwargs = mk_save_kwargs
self.pre_save_proc = pre_save_proc
self.save_fun = save_fun
self.mk_load_kwargs = mk_load_kwargs
self.load_fun = load_fun
self.post_load_proc = post_load_proc
self.read_only = read_only
for k, v in list(kwargs.items()):
self.__setattr__(k,v)
self._guess_missing_attributes()
def __call__(self, *args, **kwargs):
return self.filepath(*args, **kwargs)
####################################################################################################################
# INSTANCE METHODS
def root_folder(self):
if self.extension:
return self.file_loc_proc('')[:(-len(self.extension))]
else:
return self.file_loc_proc('')
def filepath(self, file_spec):
return self.file_loc_proc(file_spec)
def exists(self, file_spec):
return os.path.exists(self.filepath(file_spec))
def save(self, obj, file_spec, **kwargs):
if self.read_only:
raise BaseException("read_only was set to True, so you can't save anything")
else:
# make the dict specifying the input to the save_fun
file_spec = self.file_loc_proc(file_spec)
if self.pre_save_proc:
obj = self.pre_save_proc(obj)
if self.mk_save_kwargs:
file_spec_kwargs = self.mk_save_kwargs(file_spec)
self.save_fun(obj, **file_spec_kwargs)
else:
self.save_fun(obj, file_spec)
def append(self, obj, file_spec, **kwargs): # TODO: Write this code someday
"""
Intent of this function is to append data to a file's data without having to specify how to do so.
For example, if the obj is a string and the file is a text file, use file append.
If obj is a pickled dataframe, the effect (however you do it--hopefully there's a better way than loading the
data, appending, and saving the final result) should be to have a pickled version of the old and new dataframes
appended.
Etc.
"""
pass
# if isinstance(obj, basestring):
# raise ValueError("strings not implemented yet")
# elif isinstance(obj, (pd.DataFrame, pd.Series)):
# pass
def load(self, file_spec, **kwargs):
file_spec = self.file_loc_proc(file_spec)
if pfile_name.get_extension(file_spec) not in ['.xls', '.xlsx']:
if self.mk_load_kwargs:
file_spec_kwargs = self.mk_load_kwargs(file_spec)
obj = self.load_fun(**file_spec_kwargs)
else:
obj = self.load_fun(file_spec)
if self.post_load_proc:
obj = self.post_load_proc(obj)
else:
# obj = pd.read_excel(file_spec, **kwargs)
xls = pd.ExcelFile(file_spec)
kwargs = dict({'sheetname': xls.sheet_names[0]}, **kwargs) # take first sheet if sheet not specified
obj = pd.read_excel(file_spec, **kwargs)
#obj = xls.parse(**kwargs)
return obj
def copy_local_file_to(self, local_file_path, target_file_spec):
'''
Copies a file from the local computer to self.filepath(target_file_spec)
:param local_file_path:
:param target_file_spec:
:return:
'''
if self.read_only:
raise BaseException("read_only was set to True, so you can't copy anything to this location")
else:
if self.location == LOCATION_LOCAL:
if not os.path.exists(local_file_path):
local_file_path = self.filepath(local_file_path)
shutil.copyfile(local_file_path, self.filepath(target_file_spec))
elif self.location == LOCATION_S3:
# make the dict specifying the input to the save_fun
target_file_spec = self.file_loc_proc(target_file_spec)
if self.pre_save_proc:
local_file_path = self.pre_save_proc(local_file_path)
if self.mk_save_kwargs:
file_spec_kwargs = self.mk_save_kwargs(target_file_spec)
self.copy_local_file_to_fun(local_file_path, **file_spec_kwargs)
else:
raise ("this shouldn't happen")
else:
raise ValueError("unknown location")
def copy_to(self, target_relative_root, file_spec, target_location=None):
if isinstance(target_relative_root, str):
target_relative_root, target_location = \
_make_a_file_loc_proc_and_location_from_string_specifications(target_relative_root, target_location)
# make a file accessor for the (target_location, target_relative_root)
facc = Accessor(relative_root=target_relative_root, location=target_location)
####################################################################################################################
# PARTIAL FACTORIES
def _add_extension_handler(self, extension, force_extension=False):
extension_handler = ExtensionHandler(extension=extension, force_extension=force_extension)
self.file_loc_proc = lambda x : self.file_loc_proc(extension_handler.process(x))
def _guess_missing_attributes(self):
if self.file_loc_proc is None: # if no file_loc_proc is given
if self.location is not None and isinstance(self.location, str):
self.file_loc_proc==self.location
else:
self.file_loc_proc==LOCATION_LOCAL
elif isinstance(self.file_loc_proc, str): # if file_loc_proc is a string
self.file_loc_proc, self.location = \
_make_a_file_loc_proc_and_location_from_string_specifications(self.file_loc_proc, self.location)
# if self.file_loc_proc==LOCATION_LOCAL:
# self.location = LOCATION_LOCAL
# self.file_loc_proc = ''
# elif self.file_loc_proc==LOCATION_S3:
# self.location = LOCATION_S3
# self.file_loc_proc = ''
# else:
# if self.location==LOCATION_LOCAL:
# self.file_loc_proc = FilepathHandler(relative_root=os.path.join(MS_DATA,self.file_loc_proc)).process
# elif self.location==LOCATION_S3:
# self.file_loc_proc = FilepathHandler(relative_root=os.path.join('loc-data',self.file_loc_proc)).process
# set defaults for remaining missing attributes
self._set_defaults()
def _set_defaults(self):
if self.location is None:
print("setting location to LOCAL (because you didn't specify a location)")
self.location = LOCATION_LOCAL
if self.location == LOCATION_LOCAL:
self._set_local_defaults()
elif self.location == LOCATION_S3:
self._set_s3_defaults()
def _set_local_defaults(self, root_folder=MS_DATA):
# set defaults for local if attr is None
self.file_loc_proc = self.file_loc_proc or FilepathHandler(relative_root=os.path.join(root_folder)).process
self.save_fun = self.save_fun or LocalIOMethods().unicode_save
self.load_fun = self.load_fun or LocalIOMethods().unicode_load
# self.pre_save_proc = self.pre_save_proc or FilepathHandler().process
# self.post_load_proc = self.post_load_proc or FilepathHandler().process
def _set_s3_defaults(self):
# set defaults for local if attr is None
self.file_loc_proc = self.file_loc_proc or FilepathHandler(relative_root='loc-data').process
self.mk_save_kwargs = fullpath_to_s3_kargs
self.mk_load_kwargs = fullpath_to_s3_kargs
self.save_fun = self.save_fun or S3IOMethods().unicode_save
self.load_fun = self.load_fun or S3IOMethods().unicode_load
self.copy_local_file_to_fun = S3IOMethods().copy_local_file_to_fun
####################################################################################################################
# OBJECT UTILS
def local_file_loc_proc_simple(self, file_spec):
# add extension
file_spec = self.handle_extension(file_spec)
# remove slash suffix if present (because self.sound_file_root_folder ends with / already)
if file_spec.startswith('/'):
file_spec = file_spec[1:]
def handle_extension(self, file_spec):
if self.extension:
if self.force_extension:
file_spec = pfile_name.replace_extension(file_spec, self.extension)
else:
file_spec = pfile_name.add_extension_if_not_present(file_spec, self.extension)
return os.path.join(self.root_folder, file_spec)
####################################################################################################################
# OTHER UTILS
def _make_a_file_loc_proc_and_location_from_string_specifications(file_loc_proc, location):
if file_loc_proc is None and isinstance(location, str):
file_loc_proc = location + "/"
location = None
elif location is None and isinstance(file_loc_proc, str):
first_folder = pfile_name.get_highest_level_folder(location)
if first_folder in [LOCATION_LOCAL, LOCATION_S3]:
location = first_folder # set the location to first_folder
file_loc_proc.replace(location+"/","") # remove the first_folder
else:
raise ValueError("location was not specified and couldn't be guessed from the file_loc_proc")
else:
raise ValueError("you've neither specified a file_loc_proc (as a file_loc_proc) nor a location")
# make a file accessor for the (location, target_relative_root)
file_loc_proc = FilepathHandler(relative_root=os.path.join(location,file_loc_proc)).process
return (file_loc_proc, location)
def file_loc_proc_from_full_path(fullpath):
return FilepathHandler(relative_root=fullpath).process
def fullpath_to_s3_kargs(filename):
# remove slash suffix if present (because self.sound_file_root_folder ends with / already)
if filename.startswith('/'):
filename = filename[1:]
mother_root = pfile_name.get_highest_level_folder(filename)
rest_of_the_filepath = filename.replace(mother_root + '/','',1)
return {
'bucket_name': mother_root,
'key_name': rest_of_the_filepath
}
class ExtensionHandler(object):
def __init__(self, extension=None, force_extension=False):
self.extension = extension
self.force_extension = force_extension
def process(self, file_spec):
if self.force_extension:
return pfile_name.replace_extension(file_spec, self.extension)
else:
return pfile_name.add_extension_if_not_present(file_spec, self.extension)
class FilepathHandler(object):
def __init__(self, relative_root=''):
self.relative_root = relative_root
def process(self, filepath=''):
return os.path.join(self.relative_root, filepath)
##### LOCAL METHODS
class LocalIOMethods(object):
def __init__(self, encoding="UTF-8"):
self.encoding = encoding
def unicode_save(self, obj, filepath=None, **kwargs):
if isinstance(obj, str):
# pstr_to.file(string=pstr_trans.to_unicode_or_bust(obj), tofile=filepath, encoding=self.encoding)
# pstr_to.file(string=pstr_trans.to_utf8_or_bust_iter(obj), tofile=filepath, encoding=self.encoding)
# pstr_to.file(string=pstr_trans.str_to_utf8_or_bust(obj), tofile=filepath, encoding=self.encoding)
pstr_to.file(string=obj, tofile=filepath, encoding=self.encoding)
else:
pickle.dump(obj=obj, file=open(filepath, 'w'))
def simple_save(self, obj, filepath=None, **kwargs):
if isinstance(obj, str):
pstr_to.file(string=obj, tofile=filepath, encoding=self.encoding)
else:
pickle.dump(obj=obj, file=open(filepath, 'w'))
def unicode_load(self, filepath=None, **kwargs):
"""
try pd.from_pickle, then pickle.loading, and if it doesn't work, try file_to.string
"""
return pstr_trans.to_unicode_or_bust(self.simple_load(filepath=filepath, **kwargs))
# try:
# try: # getting it as a pandas object
# return pstr_trans.to_unicode_or_bust(pd.read_pickle(path=filepath))
# except Exception: # getting it as a pickled object
# return pstr_trans.to_unicode_or_bust(pickle.load(file=open(filepath, 'r')))
# except Exception: # getting it as a string
# return pstr_trans.to_unicode_or_bust(file_to.string(filename=filepath))
def simple_load(self, filepath=None, **kwargs):
"""
try pd.read_pickle, pickle.load, and file_to.string in that order
"""
try:
try: # getting it as a pandas object
return pd.read_pickle(path=filepath)
except Exception: # getting it as a pickled object
return pickle.load(file=open(filepath, 'r'))
except Exception: # getting it as a string
return file_to.string(filename=filepath)
##### S3 METHODS
class S3IOMethods(object):
def __init__(self, **kwargs):
self.s3 = S3(**kwargs)
def unicode_save(self, obj, key_name, bucket_name):
if isinstance(obj, str):
self.s3.dumps(the_str=pstr_trans.to_unicode_or_bust(obj), key_name=key_name, bucket_name=bucket_name)
else:
self.s3.dumpo(obj=obj, key_name=key_name, bucket_name=bucket_name)
def simple_save(self, obj, key_name, bucket_name):
if isinstance(obj, str):
self.s3.dumps(the_str=obj, key_name=key_name, bucket_name=bucket_name)
else:
self.s3.dumpo(obj=obj, key_name=key_name, bucket_name=bucket_name)
def unicode_load(self, key_name, bucket_name):
"""
try pickle.loading, and if it doesn't work, try file_to.string
"""
try:
return self.s3.loado(key_name=key_name, bucket_name=bucket_name)
except:
return pstr_trans.to_unicode_or_bust(self.s3.loads(key_name=key_name, bucket_name=bucket_name))
def simple_load(self, key_name, bucket_name):
"""
try pickle.loading, and if it doesn't work, try file_to.string
"""
try:
return self.s3.loado(key_name=key_name, bucket_name=bucket_name)
except:
return self.s3.loads(key_name=key_name, bucket_name=bucket_name)
def copy_local_file_to_fun(self, filepath, key_name, bucket_name):
return self.s3.dumpf(f=filepath, key_name=key_name, bucket_name=bucket_name)
| en | 0.390053 | File access utils # from ut.datapath import datapath # does this load the whole array? Can we just take MS_DATA instead? #################################################################################################################### # Quick Utils #################################################################################################################### # FACTORIES # if a full path (i.e. starting with "/" is entered as a relative_root, then take it as the sound_file_root_folder # if relative root is test... # ensure that sound_file_root_folder ends with a "/" # take care of extensions #################################################################################################################### # if file_loc_proc: # self.file_loc_proc = file_loc_proc # else: # self.file_loc_proc = FilepathHandler().process #################################################################################################################### # INSTANCE METHODS # make the dict specifying the input to the save_fun # TODO: Write this code someday Intent of this function is to append data to a file's data without having to specify how to do so. For example, if the obj is a string and the file is a text file, use file append. If obj is a pickled dataframe, the effect (however you do it--hopefully there's a better way than loading the data, appending, and saving the final result) should be to have a pickled version of the old and new dataframes appended. Etc. # if isinstance(obj, basestring): # raise ValueError("strings not implemented yet") # elif isinstance(obj, (pd.DataFrame, pd.Series)): # pass # obj = pd.read_excel(file_spec, **kwargs) # take first sheet if sheet not specified #obj = xls.parse(**kwargs) Copies a file from the local computer to self.filepath(target_file_spec) :param local_file_path: :param target_file_spec: :return: # make the dict specifying the input to the save_fun # make a file accessor for the (target_location, target_relative_root) #################################################################################################################### # PARTIAL FACTORIES # if no file_loc_proc is given # if file_loc_proc is a string # if self.file_loc_proc==LOCATION_LOCAL: # self.location = LOCATION_LOCAL # self.file_loc_proc = '' # elif self.file_loc_proc==LOCATION_S3: # self.location = LOCATION_S3 # self.file_loc_proc = '' # else: # if self.location==LOCATION_LOCAL: # self.file_loc_proc = FilepathHandler(relative_root=os.path.join(MS_DATA,self.file_loc_proc)).process # elif self.location==LOCATION_S3: # self.file_loc_proc = FilepathHandler(relative_root=os.path.join('loc-data',self.file_loc_proc)).process # set defaults for remaining missing attributes # set defaults for local if attr is None # self.pre_save_proc = self.pre_save_proc or FilepathHandler().process # self.post_load_proc = self.post_load_proc or FilepathHandler().process # set defaults for local if attr is None #################################################################################################################### # OBJECT UTILS # add extension # remove slash suffix if present (because self.sound_file_root_folder ends with / already) #################################################################################################################### # OTHER UTILS # set the location to first_folder # remove the first_folder # make a file accessor for the (location, target_relative_root) # remove slash suffix if present (because self.sound_file_root_folder ends with / already) ##### LOCAL METHODS # pstr_to.file(string=pstr_trans.to_unicode_or_bust(obj), tofile=filepath, encoding=self.encoding) # pstr_to.file(string=pstr_trans.to_utf8_or_bust_iter(obj), tofile=filepath, encoding=self.encoding) # pstr_to.file(string=pstr_trans.str_to_utf8_or_bust(obj), tofile=filepath, encoding=self.encoding) try pd.from_pickle, then pickle.loading, and if it doesn't work, try file_to.string # try: # try: # getting it as a pandas object # return pstr_trans.to_unicode_or_bust(pd.read_pickle(path=filepath)) # except Exception: # getting it as a pickled object # return pstr_trans.to_unicode_or_bust(pickle.load(file=open(filepath, 'r'))) # except Exception: # getting it as a string # return pstr_trans.to_unicode_or_bust(file_to.string(filename=filepath)) try pd.read_pickle, pickle.load, and file_to.string in that order # getting it as a pandas object # getting it as a pickled object # getting it as a string ##### S3 METHODS try pickle.loading, and if it doesn't work, try file_to.string try pickle.loading, and if it doesn't work, try file_to.string | 2.255971 | 2 |
scripts/statistics.py | cstenkamp/MastersThesisText | 0 | 7796 | <filename>scripts/statistics.py
import subprocess
import git
from os.path import dirname, join, abspath
import pandas as pd
from matplotlib import pyplot as plt
import requests
import io
import zipfile
import tempfile
from datetime import timedelta
FILENAME = join(dirname(__file__), "..", "thesis.tex")
DISP_PAGESMAX = 80
DISP_WORDSMAX = 10000
def return_piped_cmd(cmd, stdin=None):
cmd = cmd.split("|")
if not stdin:
ps = subprocess.Popen(cmd[0].strip().split(" "), stdout=subprocess.PIPE)
else:
ps = subprocess.Popen(cmd[0].strip().split(" "), stdin=subprocess.PIPE, stdout=subprocess.PIPE)
ps.stdin.write(stdin.encode("UTF-8"))
ps.stdin.close()
if len(cmd) == 1:
return ps.stdout.read().decode("UTF-8")
output = subprocess.check_output(cmd[1].strip().split(" "), stdin=ps.stdout).decode("UTF-8")
ps.wait()
return output
def get_todos(fname=None, txt=None):
if fname:
with open(fname, "r") as rfile:
txt = rfile.read()
txt = txt.replace("% ", "%").lower()
return txt.count("%todo")
def get_npages(fname):
tmp = return_piped_cmd(f'pdfinfo {fname.replace(".tex", ".pdf")}')
return int([i for i in tmp.split("\n") if "Pages:" in i][0][len("Pages:"):].strip())
def github_get_npages(owner, repo, pdfname):
date_pages = {}
resp = requests.get(f"https://api.github.com/repos/{owner}/{repo}/actions/artifacts", headers=dict(Accept="application/vnd.github.v3+json"))
for i in resp.json()["artifacts"]:
art_id = i["url"][i["url"].rfind("/")+1:]
re2 = requests.get(f"https://nightly.link/{owner}/{repo}/actions/artifacts/{art_id}.zip")
if re2.status_code != 404:
# print(i["created_at"])
archive = zipfile.ZipFile(io.BytesIO(re2.content))
with tempfile.NamedTemporaryFile(suffix=".pdf") as wfile:
wfile.write(archive.read(pdfname))
n_pages = get_npages(wfile.name)
# print(f"Pages: {n_pages}")
date_pages[pd.to_datetime([i["created_at"]]).to_pydatetime()[0]] = n_pages
return pd.Series(date_pages)
def plot_df(df):
ax1 = df["Words"].plot(color="red", linestyle="-", marker="o", ylabel="Words")
ax1.set_ylim(0, max(df["Words"].max(), DISP_WORDSMAX))
ax2 = ax1.twinx()
ax2.spines['right'].set_position(('axes', 1.0))
df["Todos"].plot(ax=ax2, color="blue", linestyle="-", marker="x", ylabel="Todos")
ax3 = ax1.twinx()
df["Pages"].plot(ax=ax3, color="yellow", linestyle="", marker="s", ylabel="Pages")
for ax in [ax2, ax3]: ax.set_ylim((0, max(df["Todos"].max(), df["Pages"].max(), DISP_PAGESMAX)))
ax3.yaxis.set_ticklabels([])
lines, labels = list(zip(*[[i[0] for i in ax.get_legend_handles_labels()] for ax in [ax1, ax2, ax3]]))
plt.legend(lines, labels, loc=0)
plt.show()
def create_history_df(repo_dir, filename):
#print(abspath(repo_dir))
repo = git.Repo(repo_dir)
all_commits = {}
for commit in repo.iter_commits():
txt = (commit.tree / filename).data_stream.read().decode("UTF-8")
n_words = int(return_piped_cmd("detex | wc -w", stdin=txt).strip())
n_todos = get_todos(txt=txt)
# print(datetime.fromtimestamp(commit.committed_date))
# print(f"words: {n_words}, todos: {n_todos}")
all_commits[pd.to_datetime(commit.committed_datetime, utc=True)] = [n_words, n_todos]
df = pd.DataFrame(all_commits, index=["Words", "Todos"]).T
return df
def merge_page_df(df, date_pages):
for date in df.index:
try:
nearest_datepage_after = date_pages.index[date_pages.index.get_loc(date, method='bfill')]
except KeyError:
continue
if nearest_datepage_after-date <= timedelta(hours=2):
df.loc[date, "Pages"] = int(date_pages[nearest_datepage_after])
return df
if __name__ == "__main__":
#history
df = create_history_df(dirname(FILENAME), "thesis.tex")
date_pages = github_get_npages("cstenkamp", "MastersThesisText", "thesis.pdf")
df = merge_page_df(df, date_pages)
plot_df(df)
#current
n_words = int(return_piped_cmd(f"detex {FILENAME} | wc -w"))
n_pages = get_npages(FILENAME)
n_todos = get_todos(FILENAME)
print(f"Words: {n_words}, Pages: {n_pages}, Todos: {n_todos}") | <filename>scripts/statistics.py
import subprocess
import git
from os.path import dirname, join, abspath
import pandas as pd
from matplotlib import pyplot as plt
import requests
import io
import zipfile
import tempfile
from datetime import timedelta
FILENAME = join(dirname(__file__), "..", "thesis.tex")
DISP_PAGESMAX = 80
DISP_WORDSMAX = 10000
def return_piped_cmd(cmd, stdin=None):
cmd = cmd.split("|")
if not stdin:
ps = subprocess.Popen(cmd[0].strip().split(" "), stdout=subprocess.PIPE)
else:
ps = subprocess.Popen(cmd[0].strip().split(" "), stdin=subprocess.PIPE, stdout=subprocess.PIPE)
ps.stdin.write(stdin.encode("UTF-8"))
ps.stdin.close()
if len(cmd) == 1:
return ps.stdout.read().decode("UTF-8")
output = subprocess.check_output(cmd[1].strip().split(" "), stdin=ps.stdout).decode("UTF-8")
ps.wait()
return output
def get_todos(fname=None, txt=None):
if fname:
with open(fname, "r") as rfile:
txt = rfile.read()
txt = txt.replace("% ", "%").lower()
return txt.count("%todo")
def get_npages(fname):
tmp = return_piped_cmd(f'pdfinfo {fname.replace(".tex", ".pdf")}')
return int([i for i in tmp.split("\n") if "Pages:" in i][0][len("Pages:"):].strip())
def github_get_npages(owner, repo, pdfname):
date_pages = {}
resp = requests.get(f"https://api.github.com/repos/{owner}/{repo}/actions/artifacts", headers=dict(Accept="application/vnd.github.v3+json"))
for i in resp.json()["artifacts"]:
art_id = i["url"][i["url"].rfind("/")+1:]
re2 = requests.get(f"https://nightly.link/{owner}/{repo}/actions/artifacts/{art_id}.zip")
if re2.status_code != 404:
# print(i["created_at"])
archive = zipfile.ZipFile(io.BytesIO(re2.content))
with tempfile.NamedTemporaryFile(suffix=".pdf") as wfile:
wfile.write(archive.read(pdfname))
n_pages = get_npages(wfile.name)
# print(f"Pages: {n_pages}")
date_pages[pd.to_datetime([i["created_at"]]).to_pydatetime()[0]] = n_pages
return pd.Series(date_pages)
def plot_df(df):
ax1 = df["Words"].plot(color="red", linestyle="-", marker="o", ylabel="Words")
ax1.set_ylim(0, max(df["Words"].max(), DISP_WORDSMAX))
ax2 = ax1.twinx()
ax2.spines['right'].set_position(('axes', 1.0))
df["Todos"].plot(ax=ax2, color="blue", linestyle="-", marker="x", ylabel="Todos")
ax3 = ax1.twinx()
df["Pages"].plot(ax=ax3, color="yellow", linestyle="", marker="s", ylabel="Pages")
for ax in [ax2, ax3]: ax.set_ylim((0, max(df["Todos"].max(), df["Pages"].max(), DISP_PAGESMAX)))
ax3.yaxis.set_ticklabels([])
lines, labels = list(zip(*[[i[0] for i in ax.get_legend_handles_labels()] for ax in [ax1, ax2, ax3]]))
plt.legend(lines, labels, loc=0)
plt.show()
def create_history_df(repo_dir, filename):
#print(abspath(repo_dir))
repo = git.Repo(repo_dir)
all_commits = {}
for commit in repo.iter_commits():
txt = (commit.tree / filename).data_stream.read().decode("UTF-8")
n_words = int(return_piped_cmd("detex | wc -w", stdin=txt).strip())
n_todos = get_todos(txt=txt)
# print(datetime.fromtimestamp(commit.committed_date))
# print(f"words: {n_words}, todos: {n_todos}")
all_commits[pd.to_datetime(commit.committed_datetime, utc=True)] = [n_words, n_todos]
df = pd.DataFrame(all_commits, index=["Words", "Todos"]).T
return df
def merge_page_df(df, date_pages):
for date in df.index:
try:
nearest_datepage_after = date_pages.index[date_pages.index.get_loc(date, method='bfill')]
except KeyError:
continue
if nearest_datepage_after-date <= timedelta(hours=2):
df.loc[date, "Pages"] = int(date_pages[nearest_datepage_after])
return df
if __name__ == "__main__":
#history
df = create_history_df(dirname(FILENAME), "thesis.tex")
date_pages = github_get_npages("cstenkamp", "MastersThesisText", "thesis.pdf")
df = merge_page_df(df, date_pages)
plot_df(df)
#current
n_words = int(return_piped_cmd(f"detex {FILENAME} | wc -w"))
n_pages = get_npages(FILENAME)
n_todos = get_todos(FILENAME)
print(f"Words: {n_words}, Pages: {n_pages}, Todos: {n_todos}") | en | 0.234204 | # print(i["created_at"]) # print(f"Pages: {n_pages}") #print(abspath(repo_dir)) # print(datetime.fromtimestamp(commit.committed_date)) # print(f"words: {n_words}, todos: {n_todos}") #history #current | 2.512454 | 3 |
setup.py | TheFraserLab/enrich_pvalues | 1 | 7797 | <gh_stars>1-10
"""Installation instructions for enrich_pvalues."""
import os
from setuptools import setup
import enrich_pvalues # For version
VERSION=enrich_pvalues.__version__
GITHUB='https://github.com/MikeDacre/enrich_pvalues'
with open('requirements.txt') as fin:
REQUIREMENTS = [
i[0] for i in [j.split('>=') for j in fin.read().strip().split('\n')]
]
def read(fname):
"""Read the contents of a file in this dir."""
with open(os.path.join(os.path.dirname(__file__), fname)) as fin:
return fin.read()
# Actual setup instructions
setup(
name = 'enrich_pvalues',
version = VERSION,
author = '<NAME>',
author_email = '<EMAIL>',
description = (
"Compare one dataset to another at a variety of p-value cutoffs"
),
keywords = (
"statistics p-values biology molecular-biology console"
),
long_description = read('README.rst'),
license = 'MIT',
# URLs
url = GITHUB,
download_url='{0}/archive/v{1}.tar.gz'.format(GITHUB, VERSION),
py_modules=['enrich_pvalues'],
entry_points = {
'console_scripts': [
'enrich_pvalues = enrich_pvalues:main',
],
},
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Utilities',
],
# Requirements
requires=REQUIREMENTS,
install_requires=REQUIREMENTS
)
| """Installation instructions for enrich_pvalues."""
import os
from setuptools import setup
import enrich_pvalues # For version
VERSION=enrich_pvalues.__version__
GITHUB='https://github.com/MikeDacre/enrich_pvalues'
with open('requirements.txt') as fin:
REQUIREMENTS = [
i[0] for i in [j.split('>=') for j in fin.read().strip().split('\n')]
]
def read(fname):
"""Read the contents of a file in this dir."""
with open(os.path.join(os.path.dirname(__file__), fname)) as fin:
return fin.read()
# Actual setup instructions
setup(
name = 'enrich_pvalues',
version = VERSION,
author = '<NAME>',
author_email = '<EMAIL>',
description = (
"Compare one dataset to another at a variety of p-value cutoffs"
),
keywords = (
"statistics p-values biology molecular-biology console"
),
long_description = read('README.rst'),
license = 'MIT',
# URLs
url = GITHUB,
download_url='{0}/archive/v{1}.tar.gz'.format(GITHUB, VERSION),
py_modules=['enrich_pvalues'],
entry_points = {
'console_scripts': [
'enrich_pvalues = enrich_pvalues:main',
],
},
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Utilities',
],
# Requirements
requires=REQUIREMENTS,
install_requires=REQUIREMENTS
) | en | 0.577805 | Installation instructions for enrich_pvalues. # For version Read the contents of a file in this dir. # Actual setup instructions # URLs # See https://pypi.python.org/pypi?%3Aaction=list_classifiers # 'Development Status :: 5 - Production/Stable', # Requirements | 2.223215 | 2 |
homeschool/students/tests/test_forms.py | brandonmcclure/homeschool | 0 | 7798 | import datetime
from homeschool.courses.tests.factories import (
CourseFactory,
CourseTaskFactory,
GradedWorkFactory,
)
from homeschool.schools.tests.factories import GradeLevelFactory
from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm
from homeschool.students.models import Coursework, Grade
from homeschool.students.tests.factories import (
CourseworkFactory,
EnrollmentFactory,
GradeFactory,
StudentFactory,
)
from homeschool.test import TestCase
class TestCourseworkForm(TestCase):
def test_is_valid(self):
"""The coursework validates."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
"completed_date": str(grade_level.school_year.start_date),
}
form = CourseworkForm(data=data)
is_valid = form.is_valid()
assert is_valid
def test_student_can_create_coursework(self):
"""The student is enrolled in a course that contains the task."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
"completed_date": str(grade_level.school_year.start_date),
}
form = CourseworkForm(data=data)
is_valid = form.is_valid()
assert not is_valid
assert form.non_field_errors() == [
"The student is not enrolled in this course."
]
def test_save_new_coursework(self):
"""A new coursework is created for a student and task."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
"completed_date": str(grade_level.school_year.start_date),
}
form = CourseworkForm(data=data)
form.is_valid()
form.save()
assert (
Coursework.objects.filter(student=student, course_task=course_task).count()
== 1
)
def test_save_existing_coursework(self):
"""A new coursework is created for a student and task."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
CourseworkFactory(student=student, course_task=course_task)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
"completed_date": str(grade_level.school_year.start_date),
}
form = CourseworkForm(data=data)
form.is_valid()
form.save()
assert (
Coursework.objects.filter(student=student, course_task=course_task).count()
== 1
)
def test_save_deletes_coursework(self):
"""A blank completed date deletes an existing coursework."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
CourseworkFactory(student=student, course_task=course_task)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
}
form = CourseworkForm(data=data)
form.is_valid()
form.save()
assert (
Coursework.objects.filter(student=student, course_task=course_task).count()
== 0
)
def test_completed_date_outside_school_year(self):
"""The completed data must be in the school year."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
"completed_date": str(
grade_level.school_year.start_date - datetime.timedelta(days=1)
),
}
form = CourseworkForm(data=data)
is_valid = form.is_valid()
assert not is_valid
assert form.non_field_errors() == [
"The completed date must be in the school year."
]
def test_invalid_course_task(self):
"""An invalid course task is an error."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
CourseTaskFactory(course=course)
data = {
"student": str(student.id),
"course_task": "0",
"completed_date": str(grade_level.school_year.start_date),
}
form = CourseworkForm(data=data)
is_valid = form.is_valid()
assert not is_valid
def test_invalid_completed_date(self):
"""An invalid completed date is an error."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
"completed_date": "boom",
}
form = CourseworkForm(data=data)
is_valid = form.is_valid()
assert not is_valid
class TestEnrollmentForm(TestCase):
def test_students_only_enroll_in_one_grade_level_per_year(self):
"""A student can only be enrolled in a single grade level in a school year."""
user = self.make_user()
enrollment = EnrollmentFactory(
student__school=user.school, grade_level__school_year__school=user.school
)
another_grade_level = GradeLevelFactory(
school_year=enrollment.grade_level.school_year
)
data = {
"student": str(enrollment.student.id),
"grade_level": str(another_grade_level.id),
}
form = EnrollmentForm(user=user, data=data)
is_valid = form.is_valid()
assert not is_valid
assert (
"A student may not be enrolled in multiple grade levels in a school year. "
f"{enrollment.student} is enrolled in {enrollment.grade_level}."
in form.non_field_errors()
)
def test_no_grade_level(self):
"""A missing grade level raises a validation error."""
user = self.make_user()
school = user.school
enrollment = EnrollmentFactory(
student__school=school, grade_level__school_year__school=school
)
data = {"student": str(enrollment.student.id), "grade_level": "0"}
form = EnrollmentForm(user=user, data=data)
is_valid = form.is_valid()
assert not is_valid
assert "You need to select a grade level." in form.non_field_errors()
class TestGradeForm(TestCase):
def test_is_valid(self):
"""The new grade validates."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
graded_work = GradedWorkFactory(course_task__course=course)
data = {
"student": str(student.id),
"graded_work": str(graded_work.id),
"score": "100",
}
form = GradeForm(data=data)
is_valid = form.is_valid()
assert is_valid
def test_invalid_graded_work(self):
"""An invalid graded work is an error."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
GradedWorkFactory(course_task__course=course)
data = {"student": str(student.id), "graded_work": "0", "score": "100"}
form = GradeForm(data=data)
is_valid = form.is_valid()
assert not is_valid
def test_save(self):
"""The form creates a new grade."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
graded_work = GradedWorkFactory(course_task__course=course)
data = {
"student": str(student.id),
"graded_work": str(graded_work.id),
"score": "100",
}
form = GradeForm(data=data)
form.is_valid()
form.save()
assert (
Grade.objects.filter(
student=student, graded_work=graded_work, score=100
).count()
== 1
)
def test_save_update(self):
"""The form updates a grade."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
graded_work = GradedWorkFactory(course_task__course=course)
GradeFactory(student=student, graded_work=graded_work)
data = {
"student": str(student.id),
"graded_work": str(graded_work.id),
"score": "100",
}
form = GradeForm(data=data)
form.is_valid()
form.save()
assert (
Grade.objects.filter(student=student, graded_work=graded_work).count() == 1
)
| import datetime
from homeschool.courses.tests.factories import (
CourseFactory,
CourseTaskFactory,
GradedWorkFactory,
)
from homeschool.schools.tests.factories import GradeLevelFactory
from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm
from homeschool.students.models import Coursework, Grade
from homeschool.students.tests.factories import (
CourseworkFactory,
EnrollmentFactory,
GradeFactory,
StudentFactory,
)
from homeschool.test import TestCase
class TestCourseworkForm(TestCase):
def test_is_valid(self):
"""The coursework validates."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
"completed_date": str(grade_level.school_year.start_date),
}
form = CourseworkForm(data=data)
is_valid = form.is_valid()
assert is_valid
def test_student_can_create_coursework(self):
"""The student is enrolled in a course that contains the task."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
"completed_date": str(grade_level.school_year.start_date),
}
form = CourseworkForm(data=data)
is_valid = form.is_valid()
assert not is_valid
assert form.non_field_errors() == [
"The student is not enrolled in this course."
]
def test_save_new_coursework(self):
"""A new coursework is created for a student and task."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
"completed_date": str(grade_level.school_year.start_date),
}
form = CourseworkForm(data=data)
form.is_valid()
form.save()
assert (
Coursework.objects.filter(student=student, course_task=course_task).count()
== 1
)
def test_save_existing_coursework(self):
"""A new coursework is created for a student and task."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
CourseworkFactory(student=student, course_task=course_task)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
"completed_date": str(grade_level.school_year.start_date),
}
form = CourseworkForm(data=data)
form.is_valid()
form.save()
assert (
Coursework.objects.filter(student=student, course_task=course_task).count()
== 1
)
def test_save_deletes_coursework(self):
"""A blank completed date deletes an existing coursework."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
CourseworkFactory(student=student, course_task=course_task)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
}
form = CourseworkForm(data=data)
form.is_valid()
form.save()
assert (
Coursework.objects.filter(student=student, course_task=course_task).count()
== 0
)
def test_completed_date_outside_school_year(self):
"""The completed data must be in the school year."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
"completed_date": str(
grade_level.school_year.start_date - datetime.timedelta(days=1)
),
}
form = CourseworkForm(data=data)
is_valid = form.is_valid()
assert not is_valid
assert form.non_field_errors() == [
"The completed date must be in the school year."
]
def test_invalid_course_task(self):
"""An invalid course task is an error."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
CourseTaskFactory(course=course)
data = {
"student": str(student.id),
"course_task": "0",
"completed_date": str(grade_level.school_year.start_date),
}
form = CourseworkForm(data=data)
is_valid = form.is_valid()
assert not is_valid
def test_invalid_completed_date(self):
"""An invalid completed date is an error."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
"completed_date": "boom",
}
form = CourseworkForm(data=data)
is_valid = form.is_valid()
assert not is_valid
class TestEnrollmentForm(TestCase):
def test_students_only_enroll_in_one_grade_level_per_year(self):
"""A student can only be enrolled in a single grade level in a school year."""
user = self.make_user()
enrollment = EnrollmentFactory(
student__school=user.school, grade_level__school_year__school=user.school
)
another_grade_level = GradeLevelFactory(
school_year=enrollment.grade_level.school_year
)
data = {
"student": str(enrollment.student.id),
"grade_level": str(another_grade_level.id),
}
form = EnrollmentForm(user=user, data=data)
is_valid = form.is_valid()
assert not is_valid
assert (
"A student may not be enrolled in multiple grade levels in a school year. "
f"{enrollment.student} is enrolled in {enrollment.grade_level}."
in form.non_field_errors()
)
def test_no_grade_level(self):
"""A missing grade level raises a validation error."""
user = self.make_user()
school = user.school
enrollment = EnrollmentFactory(
student__school=school, grade_level__school_year__school=school
)
data = {"student": str(enrollment.student.id), "grade_level": "0"}
form = EnrollmentForm(user=user, data=data)
is_valid = form.is_valid()
assert not is_valid
assert "You need to select a grade level." in form.non_field_errors()
class TestGradeForm(TestCase):
def test_is_valid(self):
"""The new grade validates."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
graded_work = GradedWorkFactory(course_task__course=course)
data = {
"student": str(student.id),
"graded_work": str(graded_work.id),
"score": "100",
}
form = GradeForm(data=data)
is_valid = form.is_valid()
assert is_valid
def test_invalid_graded_work(self):
"""An invalid graded work is an error."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
GradedWorkFactory(course_task__course=course)
data = {"student": str(student.id), "graded_work": "0", "score": "100"}
form = GradeForm(data=data)
is_valid = form.is_valid()
assert not is_valid
def test_save(self):
"""The form creates a new grade."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
graded_work = GradedWorkFactory(course_task__course=course)
data = {
"student": str(student.id),
"graded_work": str(graded_work.id),
"score": "100",
}
form = GradeForm(data=data)
form.is_valid()
form.save()
assert (
Grade.objects.filter(
student=student, graded_work=graded_work, score=100
).count()
== 1
)
def test_save_update(self):
"""The form updates a grade."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
graded_work = GradedWorkFactory(course_task__course=course)
GradeFactory(student=student, graded_work=graded_work)
data = {
"student": str(student.id),
"graded_work": str(graded_work.id),
"score": "100",
}
form = GradeForm(data=data)
form.is_valid()
form.save()
assert (
Grade.objects.filter(student=student, graded_work=graded_work).count() == 1
)
| en | 0.924085 | The coursework validates. The student is enrolled in a course that contains the task. A new coursework is created for a student and task. A new coursework is created for a student and task. A blank completed date deletes an existing coursework. The completed data must be in the school year. An invalid course task is an error. An invalid completed date is an error. A student can only be enrolled in a single grade level in a school year. A missing grade level raises a validation error. The new grade validates. An invalid graded work is an error. The form creates a new grade. The form updates a grade. | 2.814668 | 3 |
Mining_Projects/getAllProjects_Parallel.py | ai-se/heroes_compsci | 0 | 7799 | """ @Author Jchakra"""
""" This code is to download project information using GitHub API (Following Amrit's Hero paper criteria of how to find good projects) """
from multiprocessing import Process,Lock
import time
import json
import requests
## Downloading all the projects
def func1():
repo_result = []
Token_list = [''**'',''**'',''**'',''**'',''**'']
i = 0
api_url = 'https://api.github.com/'
while i < 10000: # This number will be increased to collect all the projects
repo_url = api_url + 'repositories?since=' + str(i)
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
repo_response = requests.get(repo_url, headers=headers).json()
#print(repo_response)
try:
if ( len(repo_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
project_list = []
try:
for j in range(0,len(repo_response)):
project_id = repo_response[j]['id']
project_name = repo_response[j]['name']
project_full_name = repo_response[j]['full_name']
project_html_url = repo_response[j]['html_url']
project_owner_name = repo_response[j]['owner']['login']
project_obj = {"id" : project_id, "name": project_name, "full_name" : project_full_name, "html_url" : project_html_url, "owner" : project_owner_name , "issues" :
"", "commits" : "", "PR" : ""}
project_list.append(project_obj)
except:
print ("exception occurred")
try:
last_id = repo_response[99]["id"]
i = last_id
repo_result = repo_result + project_list
except:
print(" exception inside function 1 ")
break
## Removing projects having less than 8 issues
p = 0
while p < len(repo_result):
repo_owner = repo_result[p]['owner']
repo_name = repo_result[p]['name']
issue_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'issues'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
issue_response = requests.get(issue_url, headers=headers).json()
try:
if ( len(issue_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(issue_response) > 10):
repo_result[p]["issues"] = len(issue_response)
p = p + 1
else:
repo_result.pop(p)
## Selecting the projects with Pull Request > 0
m = 0
while m < len(repo_result):
repo_owner = repo_result[m]['owner']
repo_name = repo_result[m]['name']
PR_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'pulls?state=all'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
PR_response = requests.get(PR_url, headers=headers).json()
try:
if ( len(PR_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(PR_response) > 0):
repo_result[m]["PR"] = len(PR_response)
m = m + 1
else:
repo_result.pop(m)
## Selecting Projects with commits > 20
n = 0
while n < len(repo_result):
repo_owner = repo_result[n]['owner']
repo_name = repo_result[n]['name']
commit_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'commits'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
commit_response = requests.get(commit_url, headers=headers).json()
try:
if ( len(commit_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(commit_response) > 20):
repo_result[n]["commits"] = len(commit_response)
n = n + 1
else:
repo_result.pop(n)
with open("repo_file1.json", "w") as repo_file:
json.dump(repo_result, repo_file)
print("function 1 finished", len(repo_result))
def func2():
repo_result = []
Token_list = [''**'',''**'',''**'',''**'',''**'']
i = 10000
api_url = 'https://api.github.com/'
while i < 20000: # This number will be increased to collect all the projects
repo_url = api_url + 'repositories?since=' + str(i)
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
repo_response = requests.get(repo_url, headers=headers).json()
#print(repo_response)
try:
if ( len(repo_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
project_list = []
try:
for j in range(0,len(repo_response)):
project_id = repo_response[j]['id']
project_name = repo_response[j]['name']
project_full_name = repo_response[j]['full_name']
project_html_url = repo_response[j]['html_url']
project_owner_name = repo_response[j]['owner']['login']
project_obj = {"id" : project_id, "name": project_name, "full_name" : project_full_name, "html_url" : project_html_url, "owner" : project_owner_name , "issues" :
"", "commits" : "", "PR" : ""}
project_list.append(project_obj)
except:
print ("exception occurred")
try:
last_id = repo_response[99]["id"]
i = last_id
repo_result = repo_result + project_list
except:
print(" exception inside function 2 ")
break
## Removing projects having less than 8 issues
p = 0
while p < len(repo_result):
repo_owner = repo_result[p]['owner']
repo_name = repo_result[p]['name']
issue_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'issues'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
issue_response = requests.get(issue_url, headers=headers).json()
try:
if ( len(issue_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(issue_response) > 10):
repo_result[p]["issues"] = len(issue_response)
p = p + 1
else:
repo_result.pop(p)
## Selecting the projects with Pull Request > 0
m = 0
while m < len(repo_result):
repo_owner = repo_result[m]['owner']
repo_name = repo_result[m]['name']
PR_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'pulls?state=all'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
PR_response = requests.get(PR_url, headers=headers).json()
try:
if ( len(PR_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(PR_response) > 0):
repo_result[m]["PR"] = len(PR_response)
m = m + 1
else:
repo_result.pop(m)
## Selecting Projects with commits > 20
n = 0
while n < len(repo_result):
repo_owner = repo_result[n]['owner']
repo_name = repo_result[n]['name']
commit_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'commits'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
commit_response = requests.get(commit_url, headers=headers).json()
try:
if ( len(commit_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(commit_response) > 20):
repo_result[n]["commits"] = len(commit_response)
n = n + 1
else:
repo_result.pop(n)
with open("repo_file2.json", "w") as repo_file:
json.dump(repo_result, repo_file)
print("function 2 finished", len(repo_result))
def func3():
repo_result = []
Token_list = [''**'',''**'',''**'',''**'',''**'']
i = 20000
api_url = 'https://api.github.com/'
while i < 30000: # This number will be increased to collect all the projects
repo_url = api_url + 'repositories?since=' + str(i)
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
repo_response = requests.get(repo_url, headers=headers).json()
#print(repo_response)
try:
if ( len(repo_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
project_list = []
try:
for j in range(0,len(repo_response)):
project_id = repo_response[j]['id']
project_name = repo_response[j]['name']
project_full_name = repo_response[j]['full_name']
project_html_url = repo_response[j]['html_url']
project_owner_name = repo_response[j]['owner']['login']
project_obj = {"id" : project_id, "name": project_name, "full_name" : project_full_name, "html_url" : project_html_url, "owner" : project_owner_name , "issues" :
"", "commits" : "", "PR" : ""}
project_list.append(project_obj)
except:
print ("exception occurred")
try:
last_id = repo_response[99]["id"]
i = last_id
repo_result = repo_result + project_list
except:
print(" exception inside function 3 ")
break
## Removing projects having less than 8 issues
p = 0
while p < len(repo_result):
repo_owner = repo_result[p]['owner']
repo_name = repo_result[p]['name']
issue_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'issues'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
issue_response = requests.get(issue_url, headers=headers).json()
try:
if ( len(issue_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(issue_response) > 10):
repo_result[p]["issues"] = len(issue_response)
p = p + 1
else:
repo_result.pop(p)
## Selecting the projects with Pull Request > 0
m = 0
while m < len(repo_result):
repo_owner = repo_result[m]['owner']
repo_name = repo_result[m]['name']
PR_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'pulls?state=all'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
PR_response = requests.get(PR_url, headers=headers).json()
try:
if ( len(PR_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(PR_response) > 0):
repo_result[m]["PR"] = len(PR_response)
m = m + 1
else:
repo_result.pop(m)
## Selecting Projects with commits > 20
n = 0
while n < len(repo_result):
repo_owner = repo_result[n]['owner']
repo_name = repo_result[n]['name']
commit_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'commits'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
commit_response = requests.get(commit_url, headers=headers).json()
try:
if ( len(commit_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(commit_response) > 20):
repo_result[n]["commits"] = len(commit_response)
n = n + 1
else:
repo_result.pop(n)
with open("repo_file3.json", "w") as repo_file:
json.dump(repo_result, repo_file)
print("function 3 finished", len(repo_result))
def func4():
repo_result = []
Token_list = [''**'',''**'',''**'',''**'',''**'']
i = 30000
api_url = 'https://api.github.com/'
while i < 40000: # This number will be increased to collect all the projects
repo_url = api_url + 'repositories?since=' + str(i)
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
repo_response = requests.get(repo_url, headers=headers).json()
#print(repo_response)
try:
if ( len(repo_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
project_list = []
try:
for j in range(0,len(repo_response)):
project_id = repo_response[j]['id']
project_name = repo_response[j]['name']
project_full_name = repo_response[j]['full_name']
project_html_url = repo_response[j]['html_url']
project_owner_name = repo_response[j]['owner']['login']
project_obj = {"id" : project_id, "name": project_name, "full_name" : project_full_name, "html_url" : project_html_url, "owner" : project_owner_name , "issues" :
"", "commits" : "", "PR" : ""}
project_list.append(project_obj)
except:
print ("exception occurred")
try:
last_id = repo_response[99]["id"]
i = last_id
repo_result = repo_result + project_list
except:
print(" exception inside function 4 ")
break
## Removing projects having less than 8 issues
p = 0
while p < len(repo_result):
repo_owner = repo_result[p]['owner']
repo_name = repo_result[p]['name']
issue_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'issues'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
issue_response = requests.get(issue_url, headers=headers).json()
try:
if ( len(issue_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(issue_response) > 10):
repo_result[p]["issues"] = len(issue_response)
p = p + 1
else:
repo_result.pop(p)
## Selecting the projects with Pull Request > 0
m = 0
while m < len(repo_result):
repo_owner = repo_result[m]['owner']
repo_name = repo_result[m]['name']
PR_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'pulls?state=all'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
PR_response = requests.get(PR_url, headers=headers).json()
try:
if ( len(PR_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(PR_response) > 0):
repo_result[m]["PR"] = len(PR_response)
m = m + 1
else:
repo_result.pop(m)
## Selecting Projects with commits > 20
n = 0
while n < len(repo_result):
repo_owner = repo_result[n]['owner']
repo_name = repo_result[n]['name']
commit_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'commits'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
commit_response = requests.get(commit_url, headers=headers).json()
try:
if ( len(commit_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(commit_response) > 20):
repo_result[n]["commits"] = len(commit_response)
n = n + 1
else:
repo_result.pop(n)
with open("repo_file4.json", "w") as repo_file:
json.dump(repo_result, repo_file)
print("function 4 finished", len(repo_result))
if __name__ == '__main__':
lock = Lock()
p1 = Process(target=func1)
p2 = Process(target=func2)
p3 = Process(target=func3)
p4 = Process(target=func4)
p1.start()
p2.start()
p3.start()
p4.start()
p1.join()
p2.join()
p3.join()
p4.join()
| """ @Author Jchakra"""
""" This code is to download project information using GitHub API (Following Amrit's Hero paper criteria of how to find good projects) """
from multiprocessing import Process,Lock
import time
import json
import requests
## Downloading all the projects
def func1():
repo_result = []
Token_list = [''**'',''**'',''**'',''**'',''**'']
i = 0
api_url = 'https://api.github.com/'
while i < 10000: # This number will be increased to collect all the projects
repo_url = api_url + 'repositories?since=' + str(i)
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
repo_response = requests.get(repo_url, headers=headers).json()
#print(repo_response)
try:
if ( len(repo_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
project_list = []
try:
for j in range(0,len(repo_response)):
project_id = repo_response[j]['id']
project_name = repo_response[j]['name']
project_full_name = repo_response[j]['full_name']
project_html_url = repo_response[j]['html_url']
project_owner_name = repo_response[j]['owner']['login']
project_obj = {"id" : project_id, "name": project_name, "full_name" : project_full_name, "html_url" : project_html_url, "owner" : project_owner_name , "issues" :
"", "commits" : "", "PR" : ""}
project_list.append(project_obj)
except:
print ("exception occurred")
try:
last_id = repo_response[99]["id"]
i = last_id
repo_result = repo_result + project_list
except:
print(" exception inside function 1 ")
break
## Removing projects having less than 8 issues
p = 0
while p < len(repo_result):
repo_owner = repo_result[p]['owner']
repo_name = repo_result[p]['name']
issue_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'issues'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
issue_response = requests.get(issue_url, headers=headers).json()
try:
if ( len(issue_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(issue_response) > 10):
repo_result[p]["issues"] = len(issue_response)
p = p + 1
else:
repo_result.pop(p)
## Selecting the projects with Pull Request > 0
m = 0
while m < len(repo_result):
repo_owner = repo_result[m]['owner']
repo_name = repo_result[m]['name']
PR_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'pulls?state=all'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
PR_response = requests.get(PR_url, headers=headers).json()
try:
if ( len(PR_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(PR_response) > 0):
repo_result[m]["PR"] = len(PR_response)
m = m + 1
else:
repo_result.pop(m)
## Selecting Projects with commits > 20
n = 0
while n < len(repo_result):
repo_owner = repo_result[n]['owner']
repo_name = repo_result[n]['name']
commit_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'commits'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
commit_response = requests.get(commit_url, headers=headers).json()
try:
if ( len(commit_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(commit_response) > 20):
repo_result[n]["commits"] = len(commit_response)
n = n + 1
else:
repo_result.pop(n)
with open("repo_file1.json", "w") as repo_file:
json.dump(repo_result, repo_file)
print("function 1 finished", len(repo_result))
def func2():
repo_result = []
Token_list = [''**'',''**'',''**'',''**'',''**'']
i = 10000
api_url = 'https://api.github.com/'
while i < 20000: # This number will be increased to collect all the projects
repo_url = api_url + 'repositories?since=' + str(i)
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
repo_response = requests.get(repo_url, headers=headers).json()
#print(repo_response)
try:
if ( len(repo_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
project_list = []
try:
for j in range(0,len(repo_response)):
project_id = repo_response[j]['id']
project_name = repo_response[j]['name']
project_full_name = repo_response[j]['full_name']
project_html_url = repo_response[j]['html_url']
project_owner_name = repo_response[j]['owner']['login']
project_obj = {"id" : project_id, "name": project_name, "full_name" : project_full_name, "html_url" : project_html_url, "owner" : project_owner_name , "issues" :
"", "commits" : "", "PR" : ""}
project_list.append(project_obj)
except:
print ("exception occurred")
try:
last_id = repo_response[99]["id"]
i = last_id
repo_result = repo_result + project_list
except:
print(" exception inside function 2 ")
break
## Removing projects having less than 8 issues
p = 0
while p < len(repo_result):
repo_owner = repo_result[p]['owner']
repo_name = repo_result[p]['name']
issue_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'issues'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
issue_response = requests.get(issue_url, headers=headers).json()
try:
if ( len(issue_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(issue_response) > 10):
repo_result[p]["issues"] = len(issue_response)
p = p + 1
else:
repo_result.pop(p)
## Selecting the projects with Pull Request > 0
m = 0
while m < len(repo_result):
repo_owner = repo_result[m]['owner']
repo_name = repo_result[m]['name']
PR_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'pulls?state=all'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
PR_response = requests.get(PR_url, headers=headers).json()
try:
if ( len(PR_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(PR_response) > 0):
repo_result[m]["PR"] = len(PR_response)
m = m + 1
else:
repo_result.pop(m)
## Selecting Projects with commits > 20
n = 0
while n < len(repo_result):
repo_owner = repo_result[n]['owner']
repo_name = repo_result[n]['name']
commit_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'commits'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
commit_response = requests.get(commit_url, headers=headers).json()
try:
if ( len(commit_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(commit_response) > 20):
repo_result[n]["commits"] = len(commit_response)
n = n + 1
else:
repo_result.pop(n)
with open("repo_file2.json", "w") as repo_file:
json.dump(repo_result, repo_file)
print("function 2 finished", len(repo_result))
def func3():
repo_result = []
Token_list = [''**'',''**'',''**'',''**'',''**'']
i = 20000
api_url = 'https://api.github.com/'
while i < 30000: # This number will be increased to collect all the projects
repo_url = api_url + 'repositories?since=' + str(i)
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
repo_response = requests.get(repo_url, headers=headers).json()
#print(repo_response)
try:
if ( len(repo_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
project_list = []
try:
for j in range(0,len(repo_response)):
project_id = repo_response[j]['id']
project_name = repo_response[j]['name']
project_full_name = repo_response[j]['full_name']
project_html_url = repo_response[j]['html_url']
project_owner_name = repo_response[j]['owner']['login']
project_obj = {"id" : project_id, "name": project_name, "full_name" : project_full_name, "html_url" : project_html_url, "owner" : project_owner_name , "issues" :
"", "commits" : "", "PR" : ""}
project_list.append(project_obj)
except:
print ("exception occurred")
try:
last_id = repo_response[99]["id"]
i = last_id
repo_result = repo_result + project_list
except:
print(" exception inside function 3 ")
break
## Removing projects having less than 8 issues
p = 0
while p < len(repo_result):
repo_owner = repo_result[p]['owner']
repo_name = repo_result[p]['name']
issue_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'issues'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
issue_response = requests.get(issue_url, headers=headers).json()
try:
if ( len(issue_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(issue_response) > 10):
repo_result[p]["issues"] = len(issue_response)
p = p + 1
else:
repo_result.pop(p)
## Selecting the projects with Pull Request > 0
m = 0
while m < len(repo_result):
repo_owner = repo_result[m]['owner']
repo_name = repo_result[m]['name']
PR_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'pulls?state=all'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
PR_response = requests.get(PR_url, headers=headers).json()
try:
if ( len(PR_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(PR_response) > 0):
repo_result[m]["PR"] = len(PR_response)
m = m + 1
else:
repo_result.pop(m)
## Selecting Projects with commits > 20
n = 0
while n < len(repo_result):
repo_owner = repo_result[n]['owner']
repo_name = repo_result[n]['name']
commit_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'commits'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
commit_response = requests.get(commit_url, headers=headers).json()
try:
if ( len(commit_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(commit_response) > 20):
repo_result[n]["commits"] = len(commit_response)
n = n + 1
else:
repo_result.pop(n)
with open("repo_file3.json", "w") as repo_file:
json.dump(repo_result, repo_file)
print("function 3 finished", len(repo_result))
def func4():
repo_result = []
Token_list = [''**'',''**'',''**'',''**'',''**'']
i = 30000
api_url = 'https://api.github.com/'
while i < 40000: # This number will be increased to collect all the projects
repo_url = api_url + 'repositories?since=' + str(i)
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
repo_response = requests.get(repo_url, headers=headers).json()
#print(repo_response)
try:
if ( len(repo_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
project_list = []
try:
for j in range(0,len(repo_response)):
project_id = repo_response[j]['id']
project_name = repo_response[j]['name']
project_full_name = repo_response[j]['full_name']
project_html_url = repo_response[j]['html_url']
project_owner_name = repo_response[j]['owner']['login']
project_obj = {"id" : project_id, "name": project_name, "full_name" : project_full_name, "html_url" : project_html_url, "owner" : project_owner_name , "issues" :
"", "commits" : "", "PR" : ""}
project_list.append(project_obj)
except:
print ("exception occurred")
try:
last_id = repo_response[99]["id"]
i = last_id
repo_result = repo_result + project_list
except:
print(" exception inside function 4 ")
break
## Removing projects having less than 8 issues
p = 0
while p < len(repo_result):
repo_owner = repo_result[p]['owner']
repo_name = repo_result[p]['name']
issue_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'issues'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
issue_response = requests.get(issue_url, headers=headers).json()
try:
if ( len(issue_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(issue_response) > 10):
repo_result[p]["issues"] = len(issue_response)
p = p + 1
else:
repo_result.pop(p)
## Selecting the projects with Pull Request > 0
m = 0
while m < len(repo_result):
repo_owner = repo_result[m]['owner']
repo_name = repo_result[m]['name']
PR_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'pulls?state=all'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
PR_response = requests.get(PR_url, headers=headers).json()
try:
if ( len(PR_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(PR_response) > 0):
repo_result[m]["PR"] = len(PR_response)
m = m + 1
else:
repo_result.pop(m)
## Selecting Projects with commits > 20
n = 0
while n < len(repo_result):
repo_owner = repo_result[n]['owner']
repo_name = repo_result[n]['name']
commit_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'commits'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
commit_response = requests.get(commit_url, headers=headers).json()
try:
if ( len(commit_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(commit_response) > 20):
repo_result[n]["commits"] = len(commit_response)
n = n + 1
else:
repo_result.pop(n)
with open("repo_file4.json", "w") as repo_file:
json.dump(repo_result, repo_file)
print("function 4 finished", len(repo_result))
if __name__ == '__main__':
lock = Lock()
p1 = Process(target=func1)
p2 = Process(target=func2)
p3 = Process(target=func3)
p4 = Process(target=func4)
p1.start()
p2.start()
p3.start()
p4.start()
p1.join()
p2.join()
p3.join()
p4.join()
| en | 0.676425 | @Author Jchakra This code is to download project information using GitHub API (Following Amrit's Hero paper criteria of how to find good projects) ## Downloading all the projects # This number will be increased to collect all the projects #print(Token_list[k]) #print(repo_response) ## Removing projects having less than 8 issues #print(Token_list[k]) ## Selecting the projects with Pull Request > 0 #print(Token_list[k]) ## Selecting Projects with commits > 20 #print(Token_list[k]) # This number will be increased to collect all the projects #print(Token_list[k]) #print(repo_response) ## Removing projects having less than 8 issues #print(Token_list[k]) ## Selecting the projects with Pull Request > 0 #print(Token_list[k]) ## Selecting Projects with commits > 20 #print(Token_list[k]) # This number will be increased to collect all the projects #print(Token_list[k]) #print(repo_response) ## Removing projects having less than 8 issues #print(Token_list[k]) ## Selecting the projects with Pull Request > 0 #print(Token_list[k]) ## Selecting Projects with commits > 20 #print(Token_list[k]) # This number will be increased to collect all the projects #print(Token_list[k]) #print(repo_response) ## Removing projects having less than 8 issues #print(Token_list[k]) ## Selecting the projects with Pull Request > 0 #print(Token_list[k]) ## Selecting Projects with commits > 20 #print(Token_list[k]) | 3.113857 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.