hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9646e8e2458a9b399cec0bf5ce7ece6cbbdffad6
| 1,938 |
py
|
Python
|
temp/src/square.py
|
wvu-irl/smart-2
|
b39b6d477b5259b3bf0d96180a154ee1dafae0ac
|
[
"MIT"
] | null | null | null |
temp/src/square.py
|
wvu-irl/smart-2
|
b39b6d477b5259b3bf0d96180a154ee1dafae0ac
|
[
"MIT"
] | null | null | null |
temp/src/square.py
|
wvu-irl/smart-2
|
b39b6d477b5259b3bf0d96180a154ee1dafae0ac
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
from math import radians
import os
import numpy as np
from nav_msgs.msg import Odometry
class DrawASquare():
def __init__(self):
# initiliaze
rospy.init_node('drawasquare', anonymous=True)
# What to do you ctrl + c
rospy.on_shutdown(self.shutdown)
self.cmd_vel = rospy.Publisher('cmd_vel', Twist, queue_size=10)
# 10 HZ
r = rospy.Rate(10);
# create two different Twist() variables. One for moving forward. One for turning 45 degrees.
# let's go forward at 0.2 m/s
move_cmd = Twist()
move_cmd.linear.x = 0.25
# by default angular.z is 0 so setting this isn't required
#let's turn at 45 deg/s
turn_cmd = Twist()
turn_cmd.linear.x = 0
turn_cmd.angular.z = radians(45); #45 deg/s in radians/s
#two keep drawing squares. Go forward for 4 seconds (40 x 10 HZ) then turn for 2 second
count = 0
while not rospy.is_shutdown():
# go forward 1 m (4 seconds * 0.25 m / seconds)
rospy.loginfo("Going Straight")
for x in range(0,40):
self.cmd_vel.publish(move_cmd)
r.sleep()
# turn 90 degrees
rospy.loginfo("Turning")
for x in range(0,20):
self.cmd_vel.publish(turn_cmd)
r.sleep()
count = count + 1
if(count == 4):
#count = 0
shutdown(self)
if(count == 0):
rospy.loginfo("TurtleBot should be close to the original starting position (but it's probably way off)")
def shutdown(self):
# stop turtlebot
rospy.loginfo("Stop Drawing Squares")
self.cmd_vel.publish(Twist())
rospy.sleep(1)
if __name__ == '__main__':
try:
DrawASquare()
except:
rospy.loginfo("node terminated.")
| 27.685714 | 120 | 0.583591 | 1,657 | 0.855005 | 0 | 0 | 0 | 0 | 0 | 0 | 660 | 0.340557 |
96484d835344ea1b7665583dd26015806d06751a
| 4,233 |
py
|
Python
|
ellipses/script_train_radon_tiramisu_jitter_v6.py
|
jmaces/robust-nets
|
25d49302f9fa5fcc9ded2727de75e96e25243d09
|
[
"MIT"
] | 14 |
2020-11-10T07:37:23.000Z
|
2022-03-21T15:19:22.000Z
|
ellipses/script_train_radon_tiramisu_jitter_v6.py
|
jmaces/robust-nets
|
25d49302f9fa5fcc9ded2727de75e96e25243d09
|
[
"MIT"
] | null | null | null |
ellipses/script_train_radon_tiramisu_jitter_v6.py
|
jmaces/robust-nets
|
25d49302f9fa5fcc9ded2727de75e96e25243d09
|
[
"MIT"
] | 2 |
2021-03-13T14:39:36.000Z
|
2022-02-17T06:44:29.000Z
|
import os
import matplotlib as mpl
import torch
import torchvision
from data_management import IPDataset, Jitter, SimulateMeasurements
from networks import IterativeNet, Tiramisu
from operators import Radon
# ----- load configuration -----
import config # isort:skip
# ----- global configuration -----
mpl.use("agg")
device = torch.device("cuda:0")
torch.cuda.set_device(0)
# ----- measurement configuration -----
theta = torch.linspace(0, 180, 61)[:-1] # 60 lines, exclude endpoint
OpA = Radon(config.n, theta)
# ----- network configuration -----
subnet_params = {
"in_channels": 1,
"out_channels": 1,
"drop_factor": 0.0,
"down_blocks": (5, 7, 9, 12, 15),
"up_blocks": (15, 12, 9, 7, 5),
"pool_factors": (2, 2, 2, 2, 2),
"bottleneck_layers": 20,
"growth_rate": 16,
"out_chans_first_conv": 16,
}
subnet = Tiramisu
it_net_params = {
"num_iter": 1,
"lam": 0.0,
"lam_learnable": False,
"final_dc": False,
"resnet_factor": 1.0,
"operator": OpA,
"inverter": OpA.inv,
}
# ----- training configuration -----
mseloss = torch.nn.MSELoss(reduction="sum")
def loss_func(pred, tar):
return mseloss(pred, tar) / pred.shape[0]
train_phases = 1
train_params = {
"num_epochs": [19],
"batch_size": [10],
"loss_func": loss_func,
"save_path": [
os.path.join(
config.RESULTS_PATH,
"Radon_Tiramisu_jitter_v6_"
"train_phase_{}".format((i + 1) % (train_phases + 1)),
)
for i in range(train_phases + 1)
],
"save_epochs": 1,
"optimizer": torch.optim.Adam,
"optimizer_params": [{"lr": 8e-5, "eps": 2e-4, "weight_decay": 5e-4}],
"scheduler": torch.optim.lr_scheduler.StepLR,
"scheduler_params": {"step_size": 1, "gamma": 1.0},
"acc_steps": [1],
"train_transform": torchvision.transforms.Compose(
[SimulateMeasurements(OpA), Jitter(5e2, 0.0, 1.0)]
),
"val_transform": torchvision.transforms.Compose(
[SimulateMeasurements(OpA)],
),
"train_loader_params": {"shuffle": True, "num_workers": 0},
"val_loader_params": {"shuffle": False, "num_workers": 0},
}
# ----- data configuration -----
train_data_params = {
"path": config.DATA_PATH,
"device": device,
}
train_data = IPDataset
val_data_params = {
"path": config.DATA_PATH,
"device": device,
}
val_data = IPDataset
# ------ save hyperparameters -------
os.makedirs(train_params["save_path"][-1], exist_ok=True)
with open(
os.path.join(train_params["save_path"][-1], "hyperparameters.txt"), "w"
) as file:
for key, value in subnet_params.items():
file.write(key + ": " + str(value) + "\n")
for key, value in it_net_params.items():
file.write(key + ": " + str(value) + "\n")
for key, value in train_params.items():
file.write(key + ": " + str(value) + "\n")
for key, value in train_data_params.items():
file.write(key + ": " + str(value) + "\n")
for key, value in val_data_params.items():
file.write(key + ": " + str(value) + "\n")
file.write("train_phases" + ": " + str(train_phases) + "\n")
# ------ construct network and train -----
subnet_tmp = subnet(**subnet_params).to(device)
it_net_tmp = IterativeNet(
subnet_tmp,
**{
"num_iter": 1,
"lam": 0.0,
"lam_learnable": False,
"final_dc": False,
"resnet_factor": 1.0,
"operator": OpA,
"inverter": OpA.inv,
}
).to(device)
it_net_tmp.load_state_dict(
torch.load(
"results/Radon_Tiramisu_jitter_v4_train_phase_1/model_weights.pt",
map_location=torch.device(device),
)
)
subnet = it_net_tmp.subnet
it_net = IterativeNet(subnet, **it_net_params).to(device)
train_data = train_data("train", **train_data_params)
val_data = val_data("val", **val_data_params)
for i in range(train_phases):
train_params_cur = {}
for key, value in train_params.items():
train_params_cur[key] = (
value[i] if isinstance(value, (tuple, list)) else value
)
print("Phase {}:".format(i + 1))
for key, value in train_params_cur.items():
print(key + ": " + str(value))
it_net.train_on(train_data, val_data, **train_params_cur)
| 27.309677 | 75 | 0.617056 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,183 | 0.279471 |
964933bf3abeb9eecd5bbbd430d2ba1f1f9daec5
| 142 |
py
|
Python
|
Python practice/Mit opencourceware(2.7)/quiz1_p2.py
|
chiranjeevbitp/Python27new
|
d366efee57857402bae16cabf1df94c657490750
|
[
"bzip2-1.0.6"
] | null | null | null |
Python practice/Mit opencourceware(2.7)/quiz1_p2.py
|
chiranjeevbitp/Python27new
|
d366efee57857402bae16cabf1df94c657490750
|
[
"bzip2-1.0.6"
] | null | null | null |
Python practice/Mit opencourceware(2.7)/quiz1_p2.py
|
chiranjeevbitp/Python27new
|
d366efee57857402bae16cabf1df94c657490750
|
[
"bzip2-1.0.6"
] | null | null | null |
#import pdb
T = (0.1, 0.1)
x = 0.0
for i in range(len(T)):
for j in T:
x += i + j
print x
print i
#pdb.set_trace()
| 14.2 | 24 | 0.464789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.204225 |
964a16c465623ad04aac69e828d235990e03190f
| 13,029 |
py
|
Python
|
invMLEnc_toy/main.py
|
Lupin1998/inv-ML
|
9f3db461911748292dff18024587538eb66d44bf
|
[
"MIT"
] | 1 |
2021-12-14T09:16:17.000Z
|
2021-12-14T09:16:17.000Z
|
invMLEnc_toy/main.py
|
Lupin1998/inv-ML
|
9f3db461911748292dff18024587538eb66d44bf
|
[
"MIT"
] | null | null | null |
invMLEnc_toy/main.py
|
Lupin1998/inv-ML
|
9f3db461911748292dff18024587538eb66d44bf
|
[
"MIT"
] | 2 |
2021-12-14T09:10:00.000Z
|
2022-01-21T16:57:44.000Z
|
import os
import numpy as np
import random as rd
import time
import argparse
import torch
import torch.utils.data
from torch import optim
import dataset
import gifploter
from trainer.InvML_trainer import InvML_trainer
from generator.samplegenerater import SampleIndexGenerater
def PlotLatenSpace(model, batch_size, datas, labels, loss_caler, gif_ploter, device,
path='./', name='no name', indicator=True, full=True, save_plot=True):
"""use to test the model and plot the latent space
Arguments:
model {torch model} -- a model need to train
batch_size {int} -- batch size
datas {tensor} -- the train data
labels {label} -- the train label, for unsuprised method, it is only used in plot fig
Keyword Arguments:
path {str} -- the path to save the fig (default: {'./'})
name {str} -- the name of current fig (default: {'no name'})
indicator {bool} -- a flag to calculate the indicator (default: {True})
"""
model.eval()
train_loss_sum = [0, 0, 0, 0, 0, 0]
num_train_sample = datas.shape[0]
if full == True:
for batch_idx in torch.arange(0, (num_train_sample-1)//batch_size + 1):
start_number = (batch_idx * batch_size).int()
end_number = torch.min(torch.tensor(
[batch_idx*batch_size+batch_size, num_train_sample])).int()
data = datas[start_number:end_number].float()
label = labels[start_number:end_number]
data = data.to(device)
label = label.to(device)
# train info
train_info = model(data)
loss_dict = loss_caler.CalLosses(train_info)
if type(train_info) == type(dict()):
train_info = train_info['output']
for i, k in enumerate(list(loss_dict.keys())):
train_loss_sum[i] += loss_dict[k].item()
if batch_idx == 0:
latent_point = []
for train_info_item in train_info:
latent_point.append(train_info_item.detach().cpu().numpy())
label_point = label.cpu().detach().numpy()
else:
for i, train_info_item in enumerate(train_info):
latent_point_c = train_info_item.detach().cpu().numpy()
latent_point[i] = np.concatenate(
(latent_point[i], latent_point_c), axis=0)
label_point = np.concatenate(
(label_point, label.cpu().detach().numpy()), axis=0)
gif_ploter.AddNewFig(
latent_point, label_point,
title_=path+'/'+name +
'__AE_' + str(4)[:4] + '__MAE_'+ str(4)[:4],
loss=train_loss_sum,
save=save_plot
)
else:
data = datas.to(device)
label = labels.to(device)
eval_info = model(data)
if type(eval_info) == type(dict()):
eval_info = eval_info['output']
latent_point = []
for info_item in eval_info:
latent_point.append(info_item.detach().cpu().numpy())
label_point = label.cpu().detach().numpy()
gif_ploter.AddNewFig(
latent_point, label_point,
title_=path+'/'+'result', loss=None, save=save_plot
)
def SaveParam(path, param):
"""save the current param in the path """
for v, k in param.items():
print('{v}:{k}'.format(v=v, k=k))
print('{v}:{k}'.format(v=v, k=k), file=open(path+'/param.txt', 'a'))
def SetSeed(seed):
"""function used to set a random seed """
SEED = seed
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
rd.seed(SEED)
np.random.seed(SEED)
def SetParam(mode='encoder'):
# config param
param = dict(
# regular default params
EPOCHS = 8000,
LEARNINGRATE = 1e-3,
BATCHSIZE = 800,
N_dataset = 800,
regularB = 3,
gradualchanging = [600, 1200],
epcilon = 0.23,
MAEK = 10,
LOGINTERVAL = 1.0,
PlotForloop = 2000,
sampleMethod = 'normal', # 'normal', 'near'
# choose
Noise = 0.0,
NetworkStructure = dict(
layer = [3, 100, 100, 100, 100, 2],
relu = [1, 1, 1, 1, 0],
Enc_require_gard = [1, 1, 1, 1, 1],
Dec_require_gard = [0, 0, 0, 0, 0],
inv_Enc = 0, inv_Dec = 1,
),
# Extra Head (DR project)
ExtraHead = dict(
layer = [], # None
weight = [], #[0, 0, 0, 0, 0, 0],
),
# ReLU
ReluType = dict(
type = "Leaky", # "InvLeaky"
Enc_alpha = 0.1,
Dec_alpha = 10,
),
# LIS
LISWeght = dict(
cross = [0,],
enc_forward = [0, 0, 0, 0, 0],
dec_forward = [0, 0, 0, 0, 0],
enc_backward = [0, 0, 0, 0, 0],
dec_backward = [0, 0, 0, 0, 0],
each = [0, 0, 0, 0, 0],
# [dist, angle, push]
cross_w = [1, 1, 1],
enc_forward_w = [1, 1, 1],
dec_forward_w = [1, 1, 1],
enc_backward_w = [1, 1, 1],
dec_backward_w = [1, 1, 1],
each_w = [1, 1, 1],
# [start, end, mode]
LIS_gradual = [0, 0, 1],
push_gradual = dict( # add 0716
cross_w = [500, 1000, 0], # 1 -> 0
enc_w = [500, 1000, 0],
dec_w = [500, 1000, 0],
each_w = [500, 1000, 0],
extra_w = [500, 1000, 0],
),
),
# AE layer
AEWeight = dict(
each = [],
AE_gradual = [0, 0, 1], # [start, end, mode]
),
# Orth
OrthWeight = dict(
each = [0, 0, 0, 0, 0],
Orth_gradual = [0, 0, 1], # [start, end, mode]
),
)
# cmd argsparse
parser = argparse.ArgumentParser()
parser.add_argument("-D", "--DATASET",
default='SwissRoll',
type=str, choices=[
'sphere', 'mnist','Fmnist', 'SwissRoll', 'SCurve'])
parser.add_argument("-M", "--Model",
default='MLP',
choices=['MLP', 'InvMLP'])
parser.add_argument("-SD", "--SEED", default=0, type=int)
# new params
parser.add_argument("-Name", "--ExpName", default="Inv", type=str)
parser.add_argument("-R", "--ratio",
default={"AE":0.005,
"dist":1,
"angle":0,
"push":1,
"orth":0,
"pad":1,
},
type=dict, help='the weight for each loss [ae, dist, ang, mutex, orth]')
parser.add_argument("-Mode", "--InverseMode",
default={
"mode": "pinverse", # ["pinverse", "CSinverse", "ZeroPadding"], "ZeroPadding" has jump in Enc
"padding": [0, 0, 0, 0, 0],
"pad_w": [0, 0, 0, 0, 0],
"pad_gradual": [0, 0, 1], # [start, end, mode]
"p_gradual": [2000, 4000, 0], # p-norm: p=2 -> p=1
},
type=dict) ### new 0716
# useless param
parser.add_argument("-NAME", "--SaveName",
default="None",
type=str)
parser.add_argument("-T", "--Test",
default=1, type=int, choices=[1, 2, 3, 4, 5, 6, 7, 8])
args = parser.parse_args()
# to param dict
args = parser.parse_args()
args = args.__dict__
param.update(args)
# use config file to update param: param['Test']
from test_config import import_test_config
new_param = import_test_config(param['Test'], mode=mode)
param.update(new_param)
# save name
if param["SaveName"] == "None":
path_file = "./{}".format(param['Model'])
for k in param['ratio'].keys():
path_file += '_'+k+str(param['ratio'][k])
else:
path_file = param["SaveName"]
path = os.path.join(param['ExpName'], path_file)
if not os.path.exists(path):
os.makedirs(path)
return param, path
def SetModel(param):
from models.InvML_MLP import InvML_MLP
from loss.InvML_loss import InvMLLosses
if param['Model'] == 'MLP':
Model = InvML_MLP(param).to(device)
param['index'] = Model.plot_index_list
loss = InvMLLosses(args=param, cuda=device)
return Model, loss
def SetModelVAE(param):
from models.VAE import VAE_MLP
from loss.VAE_loss import VAEloss
Model = VAE_MLP(param).to(device)
loss = VAEloss()
return Model, loss
def single_test(new_param=dict(), mode='encoder', device='cuda'):
param, path = SetParam(mode)
SetSeed(param['SEED'])
# updata new params
param.update(new_param)
# load the data
train_data, train_label, test_data, test_label = dataset.LoadData(
dataname=param['DATASET'],
train_number=param['N_dataset'], test_number=param['N_dataset'],
noise=param['Noise'], randomstate=param['SEED'], remove='star'
)
param['BATCHSIZE'] = min(param['BATCHSIZE'], train_data.shape[0])
# init the model, set mode
Model, loss_caler = SetModel(param)
Model.SetMode(mode)
# optimizer = optim.Adam(Model.parameters(), lr=param['LEARNINGRATE']) # ori
optimizer = optim.Adam(filter(lambda p: p.requires_grad, Model.parameters()), lr=param['LEARNINGRATE'])
sample_index_generater_train = SampleIndexGenerater(
train_data, param['BATCHSIZE'], method=param['sampleMethod'], choos_center='normal'
)
gif_ploter = gifploter.GIFPloter(param, Model)
# load .pth of pertrained encoder
if mode == 'decoder':
checkpoint = torch.load(param["ExpName"]+".pth")
state_dict = checkpoint['state_dict']
# choose param for this Model
model_dict = Model.state_dict()
pretrained = {k: v for k, v in state_dict.items() if k in model_dict}
model_dict.update(pretrained)
Model.load_state_dict(model_dict)
print('load encoder model:', param["ExpName"]+".pth")
# training
for epoch in range(param['EPOCHS'] + 1):
# start a trainer
loss_sum = InvML_trainer(Model, loss_caler, epoch, train_data, train_label,
optimizer, device,
sample_index_generater_train,
batch_size=param['BATCHSIZE'],
verbose=epoch % param['PlotForloop']==0)
# update plot loss
loss_interval = 200
if epoch == 0 or epoch % loss_interval == 0 and epoch % param['PlotForloop'] != 0:
gif_ploter.update_loss(loss_sum)
# plot GIF
if epoch % param['PlotForloop'] == 0 and epoch > 0:
# transfer
if mode == 'encoder':
Model.params_transfer()
# plot
name = 'epoch_' + str(epoch).zfill(5)
PlotLatenSpace(
Model, param['BATCHSIZE'],
train_data, train_label, loss_caler, gif_ploter, device,
path=path, name=name, indicator=False
)
# save .pth
if mode == 'encoder':
state = {'state_dict': Model.state_dict()}
torch.save(state, param["ExpName"]+"_Dec"+".pth")
print('save encoder model as ', param["ExpName"]+"_Dec"+".pth")
# test
test_data = test_data.float()
data = test_data.to(device)
label = test_label.to(device)
Model.eval()
test_info = Model(data)
# plot test img (True/False)
plot_test = False
if mode == 'decoder':
plot_test = True
if plot_test:
name = 'test_result'
PlotLatenSpace(
Model, param['BATCHSIZE'],
data, label, loss_caler, gif_ploter, device,
path=path, name=name,
indicator=False,
full=False,
)
return param["ExpName"], param["Test"]
if __name__ == '__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 1. test encoder
new_param = {}
expName, testName = single_test(new_param, "encoder", device)
# expName, testName = "Orth", 1 # decoder for 1
# expName, testName = "Orth2", 2 # decoder for 2
# expName, testName = "Orth3", 3 # decoder for 3
# expName, testName = "Orth4", 4 # decoder for 4
# test decoder based on encoder param
new_param = {"ExpName": expName+"_Dec", "Test": testName}
# 2. test decoder
_,_ = single_test(new_param, "decoder", device)
| 34.468254 | 126 | 0.523985 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,753 | 0.211298 |
964a55bd656809ac77520d02fe3eb7d52ed867d8
| 16,896 |
py
|
Python
|
backend/openweathermap.py
|
tangb/cleepmod-openweathermap
|
cbef1cad7af36ac6b801cb0df6651dd732b4a160
|
[
"MIT"
] | null | null | null |
backend/openweathermap.py
|
tangb/cleepmod-openweathermap
|
cbef1cad7af36ac6b801cb0df6651dd732b4a160
|
[
"MIT"
] | null | null | null |
backend/openweathermap.py
|
tangb/cleepmod-openweathermap
|
cbef1cad7af36ac6b801cb0df6651dd732b4a160
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import time
import requests
from cleep.exception import CommandError, MissingParameter
from cleep.libs.internals.task import Task
from cleep.core import CleepModule
from cleep.common import CATEGORIES
__all__ = ["Openweathermap"]
class Openweathermap(CleepModule):
"""
OpenWeatherMap application.
Returns current weather conditions and forecast.
Note:
https://openweathermap.org/api
"""
MODULE_AUTHOR = "Cleep"
MODULE_VERSION = "1.2.3"
MODULE_DEPS = []
MODULE_CATEGORY = CATEGORIES.SERVICE
MODULE_DESCRIPTION = "Gets weather conditions using OpenWeatherMap service"
MODULE_LONGDESCRIPTION = (
"This application gets data from OpenWeatherMap online service and displays it directly on your device "
"dashboard.<br>OpenWeatherMap allows to get for free current weather condition and 5 days forecast.<br> "
"This application also broadcasts weather event on all your devices."
)
MODULE_TAGS = ["weather", "forecast"]
MODULE_URLINFO = "https://github.com/tangb/cleepapp-openweathermap"
MODULE_URLHELP = None
MODULE_URLSITE = "https://openweathermap.org/"
MODULE_URLBUGS = "https://github.com/tangb/cleepapp-openweathermap/issues"
MODULE_CONFIG_FILE = "openweathermap.conf"
DEFAULT_CONFIG = {"apikey": None}
OWM_WEATHER_URL = "https://api.openweathermap.org/data/2.5/weather"
OWM_FORECAST_URL = "https://api.openweathermap.org/data/2.5/forecast"
OWM_ICON_URL = "https://openweathermap.org/img/wn/%s.png"
OWM_TASK_DELAY = 900
OWM_PREVENT_FLOOD = 15
OWM_WEATHER_CODES = {
200: "Thunderstorm with light rain",
201: "Thunderstorm with rain",
202: "Thunderstorm with heavy rain",
210: "Light thunderstorm",
211: "Thunderstorm",
212: "Heavy thunderstorm",
221: "Ragged thunderstorm",
230: "Thunderstorm with light drizzle",
231: "Thunderstorm with drizzle",
232: "Thunderstorm with heavy drizzle",
300: "Light intensity drizzle",
301: "Drizzle",
302: "Heavy intensity drizzle",
310: "Light intensity drizzle rain",
311: "Drizzle rain",
312: "Heavy intensity drizzle rain",
313: "Shower rain and drizzle",
314: "Heavy shower rain and drizzle",
321: "Shower drizzle",
500: "Light rain",
501: "Moderate rain",
502: "Heavy intensity rain",
503: "Very heavy rain",
504: "Extreme rain",
511: "Freezing rain",
520: "Light intensity shower rain",
521: "Shower rain",
522: "Heavy intensity shower rain",
531: "Ragged shower rain",
600: "Light snow",
601: "Snow",
602: "Heavy snow",
611: "Sleet",
612: "Shower sleet",
615: "Light rain and snow",
616: "Rain and snow",
620: "Light shower snow",
621: "Shower snow",
622: "Heavy shower snow",
701: "Mist",
711: "Smoke",
721: "Haze",
731: "Sand, dust whirls",
741: "Fog",
751: "Sand",
761: "Dust",
762: "Volcanic ash",
771: "Squalls",
781: "Tornado",
800: "Clear sky",
801: "Few clouds",
802: "Scattered clouds",
803: "Broken clouds",
804: "Overcast clouds",
900: "Tornado",
901: "Tropical storm",
902: "Hurricane",
903: "Cold",
904: "Hot",
905: "Windy",
906: "Hail",
951: "Calm",
952: "Light breeze",
953: "Gentle breeze",
954: "Moderate breeze",
955: "Fresh breeze",
956: "Strong breeze",
957: "High wind, near gale",
958: "Gale",
959: "Severe gale",
960: "Storm",
961: "Violent storm",
962: "Hurricane",
}
OWM_WIND_DIRECTIONS = [
"N",
"NNE",
"NE",
"ENE",
"E",
"ESE",
"SE",
"SSE",
"S",
"SSW",
"SW",
"WSW",
"W",
"WNW",
"NW",
"NNW",
"N",
]
def __init__(self, bootstrap, debug_enabled):
"""
Constructor
Args:
bootstrap (dict): bootstrap objects
debug_enabled (bool): flag to set debug level to logger
"""
# init
CleepModule.__init__(self, bootstrap, debug_enabled)
# members
self.weather_task = None
self.__owm_uuid = None
self.__forecast = []
# events
self.openweathermap_weather_update = self._get_event(
"openweathermap.weather.update"
)
def _configure(self):
"""
Configure module
"""
# add openweathermap device
if self._get_device_count() == 0:
owm = {
"type": "openweathermap",
"name": "OpenWeatherMap",
"lastupdate": None,
"celsius": None,
"fahrenheit": None,
"humidity": None,
"pressure": None,
"windspeed": None,
"winddirection": None,
"code": None,
"condition": None,
"icon": None,
}
self._add_device(owm)
# get device uuid
devices = self.get_module_devices()
self.__owm_uuid = list(devices.keys())[0]
def _on_start(self):
"""
Module starts
"""
# update weather conditions
self._force_weather_update()
# start weather task
self._start_weather_task()
def _on_stop(self):
"""
Module stops
"""
self._stop_weather_task()
def _force_weather_update(self):
"""
Force weather update according to last update to not flood owm api
"""
# get devices if not provided
devices = self.get_module_devices()
last_update = devices[self.__owm_uuid].get("lastupdate")
if last_update is None or last_update + self.OWM_PREVENT_FLOOD < time.time():
self._weather_task()
def _start_weather_task(self):
"""
Start weather task
"""
if self.weather_task is None:
self.weather_task = Task(
self.OWM_TASK_DELAY, self._weather_task, self.logger
)
self.weather_task.start()
def _stop_weather_task(self):
"""
Stop weather task
"""
if self.weather_task is not None:
self.weather_task.stop()
def _restart_weather_task(self):
"""
Restart weather task
"""
self._stop_weather_task()
self._start_weather_task()
def _owm_request(self, url, params):
"""
Request OWM api
Args:
url (string): request url
params (dict): dict of parameters
Returns:
tuple: request response::
(
status (int): request status code,
data (dict): request response data
)
"""
status = None
resp_data = None
try:
self.logger.debug("Request params: %s" % params)
resp = requests.get(url, params=params)
resp_data = resp.json()
self.logger.debug("Response data: %s" % resp_data)
status = resp.status_code
if status != 200:
self.logger.error("OWM api response [%s]: %s" % (status, resp_data))
except Exception:
self.logger.exception("Error while requesting OWM API:")
return (status, resp_data)
def _get_weather(self, apikey):
"""
Get weather condition
Args:
apikey (string): OWM apikey
Returns:
dict: weather conditions (see http://openweathermap.org/current#parameter for output format)
Raises:
InvalidParameter: if input parameter is invalid
CommandError: if command failed
"""
# check parameter
self._check_parameters([{"name": "apikey", "value": apikey, "type": str}])
# get position infos from parameters app
resp = self.send_command("get_position", "parameters")
self.logger.debug("Get position from parameters module resp: %s" % resp)
if not resp or resp.error:
raise Exception(
"Unable to get device position (%s)" % resp.error
if resp
else "No response"
)
position = resp.data
# request api
(status, resp) = self._owm_request(
self.OWM_WEATHER_URL,
{
"appid": apikey,
"lat": position["latitude"],
"lon": position["longitude"],
"units": "metric",
"mode": "json",
},
)
self.logger.debug("OWM response: %s" % (resp))
# handle errors
if status == 401:
raise Exception("Invalid OWM api key")
if status != 200:
raise Exception("Error requesting openweathermap api (status %s)" % status)
if not isinstance(resp, dict) or "cod" not in resp:
raise Exception("Invalid OWM api response format. Is API changed?")
if resp["cod"] != 200: # cod is int for weather request
raise Exception(
resp["message"] if "message" in resp else "Unknown error from api"
)
return resp
def _get_forecast(self, apikey):
"""
Get forecast (5 days with 3 hours step)
Args:
apikey (string): OWM apikey
Returns:
dict: forecast (see http://openweathermap.org/forecast5 for output format)
Raises:
InvalidParameter: if input parameter is invalid
CommandError: if command failed
"""
# check parameter
self._check_parameters([{"name": "apikey", "value": apikey, "type": str}])
# get position infos from parameters app
resp = self.send_command("get_position", "parameters")
self.logger.debug("Get position from parameters module resp: %s" % resp)
if not resp or resp.error:
raise Exception(
"Unable to get device position (%s)" % resp.error
if resp
else "No response"
)
position = resp.data
# request api
(status, resp) = self._owm_request(
self.OWM_FORECAST_URL,
{
"appid": apikey,
"lat": position["latitude"],
"lon": position["longitude"],
"units": "metric",
"mode": "json",
},
)
self.logger.trace("OWM response: %s" % (resp))
# handle errors
if status == 401:
raise Exception("Invalid OWM api key")
if status != 200:
raise Exception("Error requesting openweathermap api (status %s)" % status)
if "cod" not in resp:
raise Exception("Invalid OWM api response format. Is API changed?")
if resp["cod"] != "200": # cod is string for forecast request
raise Exception(
"API message: %s" % resp["message"]
if "message" in resp
else "Unknown error from api"
)
if "list" not in resp or len(resp["list"]) == 0:
raise Exception("No forecast data retrieved")
return resp["list"]
def _weather_task(self):
"""
Weather task in charge to refresh weather condition every hours
Send openweathermap.weather.update event with following data::
{
lastupdate (int): timestamp,
icon (string): openweathermap icon,
condition (string): openweathermap condition string (english),
code (int): openweather condition code,
celsius (float): current temperature in celsius,
fahrenheit (float): current temperature in fahrenheit,
pressure (float): current pressure,
humidity (float): current humidity,
windspeed (float): current wind speed,
winddirection (string): current wind direction,
winddegrees (float): current wind degrees
}
"""
try:
self.logger.debug("Update weather conditions")
# get api key
config = self._get_config()
if config["apikey"] is None or len(config["apikey"]) == 0:
self.logger.debug("No apikey configured")
return
# apikey configured, get weather
weather = self._get_weather(config["apikey"])
self.logger.debug("Weather: %s" % weather)
self.__forecast = self._get_forecast(config["apikey"])
self.logger.debug("Forecast: %s" % self.__forecast)
# save current weather conditions
device = self._get_devices()[self.__owm_uuid]
device["lastupdate"] = int(time.time())
if "weather" in weather and len(weather["weather"]) > 0:
icon = weather["weather"][0].get("icon")
device["icon"] = self.OWM_ICON_URL % icon or "unknown"
wid = weather["weather"][0].get("id")
device["condition"] = self.OWM_WEATHER_CODES[wid] if wid else "?"
device["code"] = int(wid) if wid else 0
else:
device["icon"] = self.OWM_ICON_URL % "unknown"
device["condition"] = "?"
device["code"] = 0
if "main" in weather:
device["celsius"] = weather["main"].get("temp", 0.0)
device["fahrenheit"] = (
weather["main"].get("temp", 0.0) * 9.0 / 5.0 + 32.0
)
device["pressure"] = weather["main"].get("pressure", 0.0)
device["humidity"] = weather["main"].get("humidity", 0.0)
else:
device["celsius"] = 0.0
device["fahrenheit"] = 0.0
device["pressure"] = 0.0
device["humidity"] = 0.0
if "wind" in weather:
device["windspeed"] = weather["wind"].get("speed", 0.0)
device["winddegrees"] = weather["wind"].get("deg", 0)
index = int(round((weather["wind"].get("deg", 0) % 360) / 22.5) + 1)
device["winddirection"] = self.OWM_WIND_DIRECTIONS[
0 if index >= 17 else index
]
else:
device["windspeed"] = 0.0
device["winddegrees"] = 0.0
device["winddirection"] = "N"
self._update_device(self.__owm_uuid, device)
# and emit event
event_keys = [
"icon",
"condition",
"code",
"celsius",
"fahrenheit",
"pressure",
"humidity",
"windspeed",
"winddegrees",
"winddirection",
"lastupdate",
]
self.openweathermap_weather_update.send(
params={k: v for k, v in device.items() if k in event_keys},
device_id=self.__owm_uuid,
)
except Exception:
self.logger.exception("Exception during weather task:")
def set_apikey(self, apikey):
"""
Set openweathermap apikey
Args:
apikey (string): OWM apikey
Returns:
bool: True if apikey saved successfully
Raises:
CommandError: if error occured while using apikey to get current weather
"""
self._check_parameters([{"name": "apikey", "value": apikey, "type": str}])
# test apikey (should raise exception if error)
self._get_weather(apikey)
# test succeed, update weather right now and restart task
self._restart_weather_task()
self._force_weather_update()
# save config
return self._update_config({"apikey": apikey})
def get_weather(self):
"""
Return current weather conditions
Useful to use it in action script
Returns:
dict: device information
"""
return self._get_devices()[self.__owm_uuid]
def get_forecast(self):
"""
Return last forecast information.
May be empty if cleep just restarted.
Returns:
list: list of forecast data (every 3 hours)
"""
return self.__forecast
| 32 | 113 | 0.533381 | 16,603 | 0.982659 | 0 | 0 | 0 | 0 | 0 | 0 | 7,512 | 0.444602 |
964a8ebce3df5d896031c77dad18e3a15b609702
| 527 |
py
|
Python
|
tests/test_wps_dummy.py
|
f-PLT/emu
|
c0bb27d57afcaa361772ce99eaf11f706983b3b2
|
[
"Apache-2.0"
] | 3 |
2015-11-10T10:08:07.000Z
|
2019-09-09T20:41:25.000Z
|
tests/test_wps_dummy.py
|
f-PLT/emu
|
c0bb27d57afcaa361772ce99eaf11f706983b3b2
|
[
"Apache-2.0"
] | 76 |
2015-02-01T23:17:17.000Z
|
2021-12-20T14:17:59.000Z
|
tests/test_wps_dummy.py
|
f-PLT/emu
|
c0bb27d57afcaa361772ce99eaf11f706983b3b2
|
[
"Apache-2.0"
] | 8 |
2016-10-13T16:44:02.000Z
|
2020-12-22T18:36:53.000Z
|
from pywps import Service
from pywps.tests import assert_response_success
from .common import client_for, get_output
from emu.processes.wps_dummy import Dummy
def test_wps_dummy():
client = client_for(Service(processes=[Dummy()]))
datainputs = "input1=10;input2=2"
resp = client.get(
service='WPS', request='Execute', version='1.0.0',
identifier='dummyprocess',
datainputs=datainputs)
assert_response_success(resp)
assert get_output(resp.xml) == {'output1': '11', 'output2': '1'}
| 31 | 68 | 0.705882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 80 | 0.151803 |
964ae4268e2f7a93ee8eacf634fa2376a1e04d95
| 476 |
py
|
Python
|
test/talker.py
|
cjds/rosgo
|
2a832421948707baca6413fe4394e28ed0c36d86
|
[
"Apache-2.0"
] | 148 |
2016-02-16T18:29:34.000Z
|
2022-03-18T13:13:46.000Z
|
test/talker.py
|
cjds/rosgo
|
2a832421948707baca6413fe4394e28ed0c36d86
|
[
"Apache-2.0"
] | 24 |
2018-12-21T19:32:15.000Z
|
2021-01-20T00:27:51.000Z
|
test/talker.py
|
cjds/rosgo
|
2a832421948707baca6413fe4394e28ed0c36d86
|
[
"Apache-2.0"
] | 45 |
2015-11-16T06:31:10.000Z
|
2022-03-28T12:46:44.000Z
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
def talker():
pub = rospy.Publisher('chatter', String)
rospy.init_node('talker', anonymous=True)
while not rospy.is_shutdown():
str = "%s: hello world %s" % (rospy.get_name(), rospy.get_time())
rospy.loginfo(str)
pub.publish(String(str))
rospy.sleep(1.0)
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
| 22.666667 | 73 | 0.628151 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.142857 |
964b60ee1051cb3579b95c9af76b42037448ddeb
| 9,365 |
py
|
Python
|
point_to_box/model.py
|
BavarianToolbox/point_to_box
|
6769739361410499596f53a60704cbedae56bd81
|
[
"Apache-2.0"
] | null | null | null |
point_to_box/model.py
|
BavarianToolbox/point_to_box
|
6769739361410499596f53a60704cbedae56bd81
|
[
"Apache-2.0"
] | null | null | null |
point_to_box/model.py
|
BavarianToolbox/point_to_box
|
6769739361410499596f53a60704cbedae56bd81
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_model.ipynb (unless otherwise specified).
__all__ = ['EfficientLoc', 'CIoU']
# Cell
#export
from efficientnet_pytorch import EfficientNet
import copy
import time
import math
import torch
import torch.optim as opt
from torch.utils.data import DataLoader
from torchvision import transforms
# Cell
class EfficientLoc():
def __init__(self, version = 'efficientnet-b0', in_channels = 4, out_features = 4, export = False):
"""
EfficientLoc model class for loading, training, and exporting models
"""
self.version = version
# self.inter_channels = versoin_dict([version])
# TODO
# check version is compliant
self.in_channels = in_channels
self.out_features = out_features
self.export = export
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.data_parallel = False
self.model = self.get_model(version = self.version,
in_channels = self.in_channels, out_features = self.out_features)
def get_model(self, version, in_channels, out_features):
"""
Adjusts efficient net model architecture for point-to-box data
"""
version_chnls = {
'efficientnet-b0': 1280,
'efficientnet-b1': 1280,
'efficientnet-b2': 1408,
'efficientnet-b3': 1536,
'efficientnet-b4': 1792
# 'efficientnet-b5': 456
# 'efficientnet-b6': 528
# 'efficientnet-b7': 600
# 'efficientnet-b8': 672
# 'efficientnet-l2': 800
}
inter_channel = version_chnls[version]
model = EfficientNet.from_pretrained(version, include_top = False)
# adjust in channels in conv stem
model._change_in_channels(in_channels)
# if self.export:
model.set_swish(memory_efficient= (not self.export))
model = torch.nn.Sequential(
model,
# torch.nn.AdaptiveAvgPool2d(),
torch.nn.Dropout(0.2),
torch.nn.Flatten(),
torch.nn.Linear(inter_channel, out_features),
# torch.nn.Linear(100, out_features),
torch.nn.Sigmoid()
)
for param in model.parameters():
param.requires_grad = True
if torch.cuda.device_count() > 1:
print(f'Using {torch.cuda.device_count()} GPUs')
model = torch.nn.DataParallel(model)
self.data_parallel = True
model.to(self.device)
return model
def train(self, dataloaders, criterion, optimizer, num_epochs, ds_sizes, print_every = 100, scheduler=None):
"""
Training function for model
**Params**
loaders : dict of val/train DataLoaders
criterion : loss function
optimizer : training optimizer
num_epochs : number of training epochs
ds_sizes : dict of number of samples in
print_every : batch_interval for intermediate loss printing
scheduler : Optional learning rate scheduler
"""
train_start = time.time()
best_model_wts = copy.deepcopy(self.model.state_dict())
best_loss = 10000000.0
for epoch in range(num_epochs):
print(f'Epoch {epoch + 1}/{num_epochs}')
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
phase_start = time.time()
if phase == 'train':
self.model.train()
else:
self.model.eval()
inter_loss = 0.
running_loss = 0.
batches_past = 0
# Iterate over data.
for i, (inputs, labels) in enumerate(dataloaders[phase]):
inputs = inputs.to(self.device)
labels = labels.to(self.device)
# zero the parameter gradients
optimizer.zero_grad()
# forward, only track history in train phase
with torch.set_grad_enabled(phase == 'train'):
outputs = self.model(inputs)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
running_loss += loss.item()
inter_loss += loss.item()
if (i+1) % print_every == 0:
inter_loss = inter_loss / ((i+1-batches_past) * inputs.shape[0])
print(f'Intermediate loss: {inter_loss:.6f}')
inter_loss = 0.
batches_past = i+1
if phase == 'train' and scheduler is not None:
scheduler.step()
epoch_loss = running_loss / ds_sizes[phase]
phase_duration = time.time() - phase_start
phase_duration = f'{(phase_duration // 60):.0f}m {(phase_duration % 60):.0f}s'
print('-' * 5)
print(f'{phase} Phase Duration: {phase_duration} Average Loss: {epoch_loss:.6f} in ')
print('-' * 5)
# deep copy the model
if phase == 'val' and epoch_loss < best_loss:
best_loss = epoch_loss
best_model_wts = copy.deepcopy(self.model.state_dict())
time_elapsed = time.time() - train_start
print(f'Training complete in {(time_elapsed // 60):.0f}m {(time_elapsed % 60):.0f}s')
print(f'Best val Loss: {best_loss:.4f}')
# load best model weights
self.model.load_state_dict(best_model_wts)
def save(self, dst, info = None):
"""Save model and optimizer state dict
**Params**
dst : destination file path including .pth file name
info : Optional dictionary with model info
"""
if info:
torch.save(info, dst)
else:
model_dict = self.model.state_dict()
if self.data_parallel:
model_dict = self.model.module.state_dict()
torch.save({
'base_arch' : self.version,
'model_state_dict' : model_dict,
}, dst)
def load(self, model_state_dict):
"""Load model weights from state-dict"""
self.model.load_state_dict(model_state_dict)
def _export(self, dst, dummy, verbose = True):
"""Export model as onnx graph
**Params**
dst : destination including .onnx file name
dummy : dummy variable for export structure, shape (B,C,W,H)
"""
self.model.eval()
torch.onnx.export(self.model, dummy, dst, verbose = verbose)
# Cell
class CIoU(torch.nn.Module):
"""Complete IoU loss class"""
def __init__(self) -> None:
super(CIoU, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
return self.ciou(input, target)
# return F.l1_loss(input, target, reduction=self.reduction)
def ciou(self, bboxes1, bboxes2):
bboxes1 = torch.sigmoid(bboxes1)
bboxes2 = torch.sigmoid(bboxes2)
rows = bboxes1.shape[0]
cols = bboxes2.shape[0]
cious = torch.zeros((rows, cols))
if rows * cols == 0:
return cious
exchange = False
if bboxes1.shape[0] > bboxes2.shape[0]:
bboxes1, bboxes2 = bboxes2, bboxes1
cious = torch.zeros((cols, rows))
exchange = True
w1 = torch.exp(bboxes1[:, 2])
h1 = torch.exp(bboxes1[:, 3])
w2 = torch.exp(bboxes2[:, 2])
h2 = torch.exp(bboxes2[:, 3])
area1 = w1 * h1
area2 = w2 * h2
center_x1 = bboxes1[:, 0]
center_y1 = bboxes1[:, 1]
center_x2 = bboxes2[:, 0]
center_y2 = bboxes2[:, 1]
inter_l = torch.max(center_x1 - w1 / 2,center_x2 - w2 / 2)
inter_r = torch.min(center_x1 + w1 / 2,center_x2 + w2 / 2)
inter_t = torch.max(center_y1 - h1 / 2,center_y2 - h2 / 2)
inter_b = torch.min(center_y1 + h1 / 2,center_y2 + h2 / 2)
inter_area = torch.clamp((inter_r - inter_l),min=0) * torch.clamp((inter_b - inter_t),min=0)
c_l = torch.min(center_x1 - w1 / 2,center_x2 - w2 / 2)
c_r = torch.max(center_x1 + w1 / 2,center_x2 + w2 / 2)
c_t = torch.min(center_y1 - h1 / 2,center_y2 - h2 / 2)
c_b = torch.max(center_y1 + h1 / 2,center_y2 + h2 / 2)
inter_diag = (center_x2 - center_x1)**2 + (center_y2 - center_y1)**2
c_diag = torch.clamp((c_r - c_l),min=0)**2 + torch.clamp((c_b - c_t),min=0)**2
union = area1+area2-inter_area
u = (inter_diag) / c_diag
iou = inter_area / union
v = (4 / (math.pi ** 2)) * torch.pow((torch.atan(w2 / h2) - torch.atan(w1 / h1)), 2)
with torch.no_grad():
S = (iou>0.5).float()
alpha= S*v/(1-iou+v)
cious = iou - u - alpha * v
cious = torch.clamp(cious,min=-1.0,max = 1.0)
if exchange:
cious = cious.T
return torch.sum(1-cious)
| 33.091873 | 112 | 0.553017 | 9,004 | 0.961452 | 0 | 0 | 0 | 0 | 0 | 0 | 2,450 | 0.261612 |
964d531f50577c7580159804463196dbab58e21c
| 7,643 |
py
|
Python
|
Receptive_Field_PyNN/2rtna_connected_to_4ReceptiveFields/anmy_TDXY.py
|
mahmoud-a-ali/Thesis_sample_codes
|
02a912dd012291b00c89db195b4cba2ebb4d35fe
|
[
"MIT"
] | null | null | null |
Receptive_Field_PyNN/2rtna_connected_to_4ReceptiveFields/anmy_TDXY.py
|
mahmoud-a-ali/Thesis_sample_codes
|
02a912dd012291b00c89db195b4cba2ebb4d35fe
|
[
"MIT"
] | null | null | null |
Receptive_Field_PyNN/2rtna_connected_to_4ReceptiveFields/anmy_TDXY.py
|
mahmoud-a-ali/Thesis_sample_codes
|
02a912dd012291b00c89db195b4cba2ebb4d35fe
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 2 13:09:55 2018
@author: mali
"""
#import time
import pickle
import pyNN.utility.plotting as plot
import matplotlib.pyplot as plt
import comn_conversion as cnvrt
import prnt_plt_anmy as ppanmy
# file and folder names =======================================================
fldr_name = 'rslts/icub64x64/'
pickle_filename = 'TDXY.pickle'
file_pth = cnvrt.read_flenfldr_ncrntpth(fldr_name, pickle_filename )
with open(file_pth , 'rb') as tdxy:
TDXY = pickle.load( tdxy )
print '### lenght of TDXY : {}'.format( len(TDXY) ) # 2+ 2*n_orn )
pop = TDXY[0]
t_ist = 1040
print 'check pop: L_rtna_TDXY'
print '### T : {}'.format(pop[0][t_ist]) # dimension 4 x t_stp x depend
print '### 1D : {}'.format(pop[1][t_ist]) # dimension 4 x t_stp x depend
print '### X : {}'.format(pop[2][t_ist]) # dimension 4 x t_stp x depend
print '### Y : {}'.format(pop[3][t_ist]) # dimension 4 x t_stp x depend
print pop[0]
print pop[1]
#required variables============================================================
n_rtna = 2 # till now should be two
n_orn = 4
rtna_w = 64
rtna_h = 64
krnl_sz = 5
rf_w = rtna_w - krnl_sz +1
rf_h = rtna_h - krnl_sz +1
subplt_rws = n_rtna
subplt_cls = n_orn+1
########### to make animation fast as scale now in micro second ###############
#first to scale be divide over 10 or 100 ======================================
T=TDXY[0][0]
t10u=T [0:T[-1]:100]
#print '### t_10u : {}'.format(t10u)
# second find all times has spikes any one of the rtna or rf ==================
t_spks=[]
for pop in range ( len(TDXY) ):
for inst in range( len(TDXY[pop][0]) ):
if TDXY[pop][2][inst]!=[] :
t_spks.append( TDXY[pop][0][inst] )
print pop, TDXY[pop][0][inst]
t_spks.sort()
for each in t_spks:
count = t_spks.count(each)
if count > 1:
t_spks.remove(each)
print 't_spks : {}'.format( t_spks )
#animate the rtna_rf =========================================================
#print 'abplt_rw, sbplt_cl, rtna_w, rtna_h, rf_w, rf_h: {}, {}, {}, {}, {}, {} '.format(subplt_rws, subplt_cls, rtna_w, rtna_h, rf_w, rf_h)
fig, axs = plt.subplots(subplt_rws, subplt_cls, sharex=False, sharey=False) #, figsize=(12,5))
axs = ppanmy.init_fig_mxn_sbplt_wxh_res (fig, axs, rtna_h, rtna_w, rf_w, rf_h, subplt_rws, subplt_cls)
plt.grid(True)
plt.show(block=False)
plt.pause(.01)
#for i in t_spks: #t10u:
# axs = ppanmy.init_fig_mxn_sbplt_wxh_res (fig, axs, rtna_h, rtna_w, rf_w, rf_h, subplt_rws, subplt_cls)
# plt.suptitle('rtna_rf_orn_3: t= {} usec'.format( i ) )
# if subplt_rws==1:
# axs[0].scatter( TDXY[0][2][i], TDXY[0][3][i] )
# for col in range (subplt_cls):
# axs[col].scatter( TDXY[col+1][2][i], TDXY[col+1][3][i] )
## plt.savefig( 'fgrs/anmy_1/{}_t{}.png'.format(vrjn, i) )
# plt.show(block=False)
# plt.pause(2)
# for col in range(subplt_cls):
# axs[col].cla()
#
# elif subplt_rws==2:
# for col in range (subplt_cls):
# axs[0][0].scatter( TDXY[0][2][i], TDXY[0][3][i] )
# axs[1][0].scatter( TDXY[1][2][i], TDXY[1][3][i] )
# for col in range(1,n_orn+1):
# row=0
# axs[row][col].scatter( TDXY[col+1][2][i], TDXY[col+1][3][i] )
# for col in range(1,n_orn):
# row=1
# axs[row][col].scatter( TDXY[n_orn+1+col][2][i], TDXY[n_orn+1+col][3][i] )
## plt.savefig( 'fgrs/anmy_1/{}_t{}.png'.format(vrjn, i) )
# plt.show(block=False)
# plt.pause(2)
# for row in range(subplt_rws):
# for col in range (subplt_cls):
# axs[row][col].cla()
#
print '##### required variables: \n n_rtna={}, TDXY_len={}, rtna_w={}, rtna_h={}, krnl_sz={}, rf_w={} , rf_h={}'.format( n_rtna , len(TDXY), rtna_w, rtna_h, krnl_sz, rf_w , rf_h )
plt.show(block=False)
last_t_spks=-310
for i in range( len(t_spks) ): #t10u:
# plt.pause(2)
if t_spks[i]-last_t_spks > 300:
#clear
if subplt_rws==2:
for row in range(subplt_rws):
for col in range (subplt_cls):
axs[row][col].cla()
elif subplt_rws==1:
for col in range(subplt_cls):
axs[col].cla()
axs = ppanmy.init_fig_mxn_sbplt_wxh_res (fig, axs, rtna_h, rtna_w, rf_w, rf_h, subplt_rws, subplt_cls)
plt.suptitle('rtna_rf_orn: t= {} usec'.format( t_spks[i] ) )
plt.pause(1.5)
#--------------------------------------------------------------------------
if subplt_rws==1:
axs[0].scatter( TDXY[0][2][t_spks[i]], TDXY[0][3][t_spks[i]] )
for col in range (subplt_cls):
axs[col].scatter( TDXY[col+1][2][t_spks[i]], TDXY[col+1][3][t_spks[i]] )
# plt.savefig( 'fgrs/anmy_1/{}_t{}.png'.format(vrjn, i) )
elif subplt_rws==2:
for col in range (subplt_cls):
axs[0][0].scatter( TDXY[0][2][t_spks[i]], TDXY[0][3][t_spks[i]] )
axs[1][0].scatter( TDXY[1][2][t_spks[i]], TDXY[1][3][t_spks[i]] )
for col in range(1,n_orn+1):
row=0
axs[row][col].scatter( TDXY[col+1][2][t_spks[i]], TDXY[col+1][3][t_spks[i]] )
for col in range(1,n_orn+1):
row=1
axs[row][col].scatter( TDXY[n_orn+1+col][2][t_spks[i]], TDXY[n_orn+1+col][3][t_spks[i]] )
# plt.savefig( 'fgrs/anmy_1/{}_t{}.png'.format(vrjn, i) )
#--------------------------------------------------------------------------
plt.pause(.5)
else: #====================================================================
#--------------------------------------------------------------------------
if subplt_rws==1:
axs[0].scatter( TDXY[0][2][t_spks[i]], TDXY[0][3][t_spks[i]] )
for col in range (subplt_cls):
axs[col].scatter( TDXY[col+1][2][t_spks[i]], TDXY[col+1][3][t_spks[i]] )
# plt.savefig( 'fgrs/anmy_1/{}_t{}.png'.format(vrjn, i) )
elif subplt_rws==2:
for col in range (subplt_cls):
axs[0][0].scatter( TDXY[0][2][t_spks[i]], TDXY[0][3][t_spks[i]] )
axs[1][0].scatter( TDXY[1][2][t_spks[i]], TDXY[1][3][t_spks[i]] )
for col in range(1,n_orn+1):
row=0
axs[row][col].scatter( TDXY[col+1][2][t_spks[i]], TDXY[col+1][3][t_spks[i]] )
for col in range(1,n_orn+1):
row=1
axs[row][col].scatter( TDXY[n_orn+1+col][2][t_spks[i]], TDXY[n_orn+1+col][3][t_spks[i]] )
# plt.savefig( 'fgrs/anmy_1/{}_t{}.png'.format(vrjn, i) )
#--------------------------------------------------------------------------
plt.pause(.5)
last_t_spks = t_spks[i]
# suing builtin animation function ===========================================
#strt_tm = TDXY[0][0][0]
#stop_tm = TDXY[0][0][-1]
#print '\n### n_orn x n_rtna : {}x{}'.format(n_orn, n_rtna)
#print '\n### strt_tm - stop_tm : {} - {}'.format(strt_tm, stop_tm)
#ppanmy.anmy_rtna_rf_orn( TDXY, rtna_h, rtna_w, n_rtna, krnl_sz, strt_tm , stop_tm)
| 37.282927 | 180 | 0.483056 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,708 | 0.48515 |
964d60906285ddfcdeb808da94455db7bd3067ea
| 5,998 |
py
|
Python
|
meAdota/settings.py
|
guipeeix7/website
|
4081899060e69688314a5577ceab5c7b840e7b7f
|
[
"MIT"
] | 6 |
2020-10-19T23:13:07.000Z
|
2020-12-02T19:08:32.000Z
|
meAdota/settings.py
|
guipeeix7/website
|
4081899060e69688314a5577ceab5c7b840e7b7f
|
[
"MIT"
] | 69 |
2020-10-23T03:52:47.000Z
|
2020-12-04T01:12:49.000Z
|
meAdota/settings.py
|
guipeeix7/website
|
4081899060e69688314a5577ceab5c7b840e7b7f
|
[
"MIT"
] | 1 |
2020-12-08T22:10:08.000Z
|
2020-12-08T22:10:08.000Z
|
"""
Django settings for meAdota project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Load dotenv
import os
from dotenv import load_dotenv
load_dotenv()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'localhost']
STATIC_ROOT = ''
STATIC_URL = '/static/'
STATICFILES_DIRS = ('static',)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'django_countries',
'cpf_field',
'django_filters',
# AllAuth [custom providers]
'allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.google',
'allauth.socialaccount.providers.twitter',
#my apps
'users',
'pets',
'crispy_forms',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
SITE_ID = 1
AUTH_USER_MODEL = 'users.User'
#verify email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' #check email on console
ACCOUNT_EMAIL_VERIFICATION = True
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USER_MODEL_USERNAME_FIELD = None
ACCOUNT_LOGOUT_ON_GET = True
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'meAdota.urls'
ACCOUNT_FORMS = {
'login': 'users.forms.MyLoginForm',
# 'signup': 'allauth.account.forms.SignupForm',
'signup': 'users.forms.MyCustomSignupForm',
'add_email': 'allauth.account.forms.AddEmailForm',
'change_password': 'allauth.account.forms.ChangePasswordForm',
'set_password': 'allauth.account.forms.SetPasswordForm',
'reset_password': 'allauth.account.forms.ResetPasswordForm',
'reset_password_from_key': 'allauth.account.forms.ResetPasswordKeyForm',
'disconnect': 'allauth.socialaccount.forms.DisconnectForm',
}
SOCIALACCOUNT_PROVIDERS = {
'facebook': {
'METHOD': 'oauth2',
'SDK_URL': '//connect.facebook.net/{locale}/sdk.js',
'SCOPE': ['email', 'public_profile'],
'AUTH_PARAMS': {'auth_type': 'reauthenticate'},
'INIT_PARAMS': {'cookie': True},
'FIELDS': [
'id',
'first_name',
'last_name',
'middle_name',
'name',
'name_format',
'picture',
'short_name'
],
'EXCHANGE_TOKEN': True,
'LOCALE_FUNC': lambda request: 'en_US',
'VERIFIED_EMAIL': False,
'VERSION': 'v7.0',
},
'google': {
'SCOPE': [
'profile',
'email',
],
'AUTH_PARAMS': {
'access_type': 'online',
}
}
}
LOGIN_REDIRECT_URL ='/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [str(BASE_DIR / "templates"), str(BASE_DIR / "templates/account")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
AUTHENTICATION_BACKENDS = [
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
]
WSGI_APPLICATION = 'meAdota.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv("DATABASE_NAME"),
'USER': os.getenv("DATABASE_USER"),
'PASSWORD': os.getenv("DATABASE_PASSWORD"),
'HOST': os.getenv("DATABASE_HOST"),
'PORT': os.getenv("DATABASE_PORT"),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| 27.140271 | 91 | 0.672391 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,932 | 0.655552 |
964eca76f4c35f59907bf26e2512971b1aa50b0f
| 1,098 |
py
|
Python
|
Machine Learning/Regression/reg.py
|
brett-harvey/Brett-s-AI-Library
|
43f9bfd5eca92b9e28c6d4532afb943d03d80b67
|
[
"MIT"
] | null | null | null |
Machine Learning/Regression/reg.py
|
brett-harvey/Brett-s-AI-Library
|
43f9bfd5eca92b9e28c6d4532afb943d03d80b67
|
[
"MIT"
] | null | null | null |
Machine Learning/Regression/reg.py
|
brett-harvey/Brett-s-AI-Library
|
43f9bfd5eca92b9e28c6d4532afb943d03d80b67
|
[
"MIT"
] | null | null | null |
from sklearn import preprocessing, svm
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn import cross_validation
import pandas as pd
import numpy as np
import quandl
import math
df = quandl.get('WIKI/GOOGL')
df = df[['Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume']]
df['HL_PCT'] = (df['Adj. High'] - df['Adj. Low']) / df['Adj. Close'] * 100.0
df['PCT_change'] = (df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open'] * 100.0
df = df[['Adj. Close', 'HL_PCT', 'PCT_change', 'Adj. Volume']]
forecast_col = 'Adj. Close'
df.fillna(-99999, inplace = True)
forecast_out = int(math.ceil(0.01 * len(df)))
print(forecast_out)
df['label'] = df[forecast_col].shift(-forecast_out)
df.dropna(inplace = True)
X = np.array(df.drop(['label'],1))
y = np.array(df['label'])
X = preprocessing.scale(X)
y = np.array(df['label'])
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,y, test_size = 0.2)
clf = LinearRegression()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test,y_test)
print(accuracy)
| 29.675676 | 90 | 0.6949 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 241 | 0.21949 |
964ed654a2da39f4913b8a0e948783cdb15246e1
| 12,126 |
py
|
Python
|
src/app.py
|
tatianamaia/Corona
|
d944395d8b7b7a740b57e9bb7895f0835bc63d10
|
[
"MIT"
] | null | null | null |
src/app.py
|
tatianamaia/Corona
|
d944395d8b7b7a740b57e9bb7895f0835bc63d10
|
[
"MIT"
] | null | null | null |
src/app.py
|
tatianamaia/Corona
|
d944395d8b7b7a740b57e9bb7895f0835bc63d10
|
[
"MIT"
] | null | null | null |
import datetime
import os
import yaml
import numpy as np
import pandas as pd
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from scipy.integrate import solve_ivp
from scipy.optimize import minimize
import plotly.graph_objs as go
ENV_FILE = '../env.yaml'
with open(ENV_FILE) as f:
params = yaml.load(f, Loader=yaml.FullLoader)
# Initialisation des chemins vers les fichiers
ROOT_DIR = os.path.dirname(os.path.abspath(ENV_FILE))
DATA_FILE = os.path.join(ROOT_DIR,
params['directories']['processed'],
params['files']['all_data'])
#Lecture du fihcier de données
epidemie_df = (pd.read_csv(DATA_FILE, parse_dates=['Last Update'])
.assign(day=lambda _df:_df['Last Update'].dt.date)
.drop_duplicates(subset=['Country/Region', 'Province/State', 'day'])
[lambda df: df['day'] <= datetime.date(2020,3,20)]
)
# replacing Mainland china with just China
cases = ['Confirmed', 'Deaths', 'Recovered']
# After 14/03/2020 the names of the countries are quite different
epidemie_df['Country/Region'] = epidemie_df['Country/Region'].replace('Mainland China', 'China')
# filling missing values
epidemie_df[['Province/State']] = epidemie_df[['Province/State']].fillna('')
epidemie_df[cases] = epidemie_df[cases].fillna(0)
countries=[{'label':c, 'value': c} for c in epidemie_df['Country/Region'].unique()]
app = dash.Dash('C0VID-19 Explorer')
app.layout = html.Div([
html.H1(['C0VID-19 Explorer'], style={'textAlign': 'center', 'color': 'navy', 'font-weight': 'bold'}),
dcc.Tabs([
dcc.Tab(label='Time', children=[
dcc.Markdown("""
Select a country:
""",style={'textAlign': 'left', 'color': 'navy', 'font-weight': 'bold'} ),
html.Div([
dcc.Dropdown(
id='country',
options=countries,
placeholder="Select a country...",
)
]),
html.Div([
dcc.Markdown("""You can select a second country:""",
style={'textAlign': 'left', 'color': 'navy', 'font-weight': 'bold'} ),
dcc.Dropdown(
id='country2',
options=countries,
placeholder="Select a country...",
)
]),
html.Div([dcc.Markdown("""Cases: """,
style={'textAlign': 'left', 'color': 'navy', 'font-weight': 'bold'} ),
dcc.RadioItems(
id='variable',
options=[
{'label':'Confirmed', 'value': 'Confirmed'},
{'label':'Deaths', 'value': 'Deaths'},
{'label':'Recovered', 'value': 'Recovered'}
],
value='Confirmed',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Graph(id='graph1')
])
]),
dcc.Tab(label='Map', children=[
#html.H6(['COVID-19 in numbers:']),
dcc.Markdown("""
**COVID-19**
This is a graph that shows the evolution of the COVID-19 around the world
** Cases:**
""", style={'textAlign': 'left', 'color': 'navy', 'font-weight': 'bold'} ),
dcc.Dropdown(id="value-selected", value='Confirmed',
options=[{'label': "Deaths ", 'value': 'Deaths'},
{'label': "Confirmed", 'value': 'Confirmed'},
{'label': "Recovered", 'value': 'Recovered'}],
placeholder="Select a country...",
style={"display": "inline-block", "margin-left": "auto", "margin-right": "auto",
"width": "70%"}, className="six columns"),
dcc.Graph(id='map1'),
dcc.Slider(
id='map_day',
min=0,
max=(epidemie_df['day'].max() - epidemie_df['day'].min()).days,
value=0,
marks={i:str(i) for i, date in enumerate(epidemie_df['day'].unique())}
)
]),
dcc.Tab(label='SIR Model', children=[
dcc.Markdown("""
**SIR model**
S(Susceptible)I(Infectious)R(Recovered) is a model describing the dynamics of infectious disease. The model divides the population into compartments. Each compartment is expected to have the same characteristics. SIR represents the three compartments segmented by the model.
**Select a country:**
""", style={'textAlign': 'left', 'color': 'navy'}),
html.Div([
dcc.Dropdown(
id='Country',
value='Portugal',
options=countries),
]),
dcc.Markdown("""Select:""", style={'textAlign': 'left', 'color': 'navy'}),
dcc.Dropdown(id='cases',
options=[
{'label': 'Confirmed', 'value': 'Confirmed'},
{'label': 'Deaths', 'value': 'Deaths'},
{'label': 'Recovered', 'value': 'Recovered'}],
value=['Confirmed','Deaths','Recovered'],
multi=True),
dcc.Markdown("""
**Select your paramaters:**
""", style={'textAlign': 'left', 'color': 'navy'}),
html.Label( style={'textAlign': 'left', 'color': 'navy', "width": "20%"}),
html.Div([
dcc.Markdown(""" Beta:
""", style={'textAlign': 'left', 'color': 'navy'}),
dcc.Input(
id='input-beta',
type ='number',
placeholder='Input Beta',
min =-50,
max =100,
step =0.01,
value=0.45
)
]),
html.Div([
dcc.Markdown(""" Gamma:
""", style={'textAlign': 'left', 'color': 'navy'}),
dcc.Input(
id='input-gamma',
type ='number',
placeholder='Input Gamma',
min =-50,
max =100,
step =0.01,
value=0.55
)
]),
html.Div([
dcc.Markdown(""" Population:
""", style={'textAlign': 'left', 'color': 'navy'}),
dcc.Input(
id='input-pop',placeholder='Population',
type ='number',
min =1000,
max =1000000000000000,
step =1000,
value=1000,
)
]),
html.Div([
dcc.RadioItems(id='variable2',
options=[
{'label':'Optimize','value':'optimize'}],
value='Confirmed',
labelStyle={'display':'inline-block','color': 'navy', "width": "20%"})
]),
html.Div([
dcc.Graph(id='graph2')
]),
])
]),
])
@app.callback(
Output('graph1', 'figure'),
[
Input('country','value'),
Input('country2','value'),
Input('variable','value'),
]
)
def update_graph(country, country2, variable):
print(country)
if country is None:
graph_df = epidemie_df.groupby('day').agg({variable:'sum'}).reset_index()
else:
graph_df=(epidemie_df[epidemie_df['Country/Region'] == country]
.groupby(['Country/Region', 'day'])
.agg({variable:'sum'})
.reset_index()
)
if country2 is not None:
graph2_df=(epidemie_df[epidemie_df['Country/Region'] == country2]
.groupby(['Country/Region', 'day'])
.agg({variable:'sum'})
.reset_index()
)
return {
'data':[
dict(
x=graph_df['day'],
y=graph_df[variable],
type='line',
name=country if country is not None else 'Total'
)
] + ([
dict(
x=graph2_df['day'],
y=graph2_df[variable],
type='line',
name=country2
)
] if country2 is not None else [])
}
@app.callback(
Output('map1', 'figure'),
[
Input('map_day','value'),
Input("value-selected", "value")
]
)
def update_map(map_day,selected):
day= epidemie_df['day'].sort_values(ascending=False).unique()[map_day]
map_df = (epidemie_df[epidemie_df['day'] == day]
.groupby(['Country/Region'])
.agg({selected:'sum', 'Latitude': 'mean', 'Longitude': 'mean'})
.reset_index()
)
return {
'data':[
dict(
type='scattergeo',
lon=map_df['Longitude'],
lat=map_df['Latitude'],
text=map_df.apply(lambda r: r['Country/Region'] + '(' + str(r[selected]) + ')', axis=1),
mode='markers',
marker=dict(
size=np.maximum(map_df[selected]/ 1_000, 10)
)
)
],
'layout': dict(
title=str(day),
geo=dict(showland=True)
)
}
@app.callback(
Output('graph2', 'figure'),
[
Input('input-beta', 'value'),
Input('input-gamma','value'),
Input('input-pop','value'),
Input('Country','value')
#Input('variable2','value')
]
)
def update_model(beta, gamma, population, Country):
print(Country)
country=Country
country_df = (epidemie_df[epidemie_df['Country/Region'] == country]
.groupby(['Country/Region', 'day'])
.agg({'Confirmed': 'sum', 'Deaths': 'sum', 'Recovered': 'sum'})
.reset_index())
country_df['Infected'] = country_df['Confirmed'].diff()
steps = len(country_df['Infected'])
def SIR(t, y):
S = y[0]; I = y[1]; R = y[2]
return([-beta*S*I, beta*S*I-gamma*I, gamma*I])
solution = solve_ivp(SIR, [0, steps], [population, 1, 0], t_eval=np.arange(0, steps, 1))
#def sumsq_error(parameters):
#beta, gamma = parameters
#def SIR(t,y):
#S=y[0]
#I=y[1]
#R=y[2]
#return([-beta*S*I, beta*S*I-gamma*I, gamma*I])
#solution = solve_ivp(SIR,[0,nb_steps-1],[total_population,1,0],t_eval=np.arange(0,nb_steps,1))
#return(sum((solution.y[1]-infected_population)**2))
#msol = minimize(sumsq_error,[0.001,0.1],method='Nelder-Mead')
#if variable2 == 'optimize':
#gamma,beta == msol.x
return {
'data': [
dict(
x=solution.t,
y=solution.y[0],
type='line',
name=country+': Susceptible')
] + ([
dict(
x=solution.t,
y=solution.y[1],
type='line',
name=country+': Infected')
]) + ([
dict(
x=solution.t,
y=solution.y[2],
type='line',
name=country+': Recovered')
]) + ([
dict(
x=solution.t,
y=country_df['Infected'],
type='line',
name=country+': Original Data(Infected)')
])
}
if __name__ == '__main__':
app.run_server(debug=True)
| 34.547009 | 289 | 0.455303 | 0 | 0 | 0 | 0 | 4,447 | 0.366702 | 0 | 0 | 3,928 | 0.323905 |
964f51bb97bc51b17e213093a0a26eca6712c8ec
| 1,014 |
py
|
Python
|
tests/io/test_kepseismic.py
|
jorgemarpa/lightkurve
|
86320a67eabb3a93f60e9faff0447e4b235bccf2
|
[
"MIT"
] | 235 |
2018-01-22T01:22:10.000Z
|
2021-02-02T04:57:26.000Z
|
tests/io/test_kepseismic.py
|
jorgemarpa/lightkurve
|
86320a67eabb3a93f60e9faff0447e4b235bccf2
|
[
"MIT"
] | 847 |
2018-01-22T05:49:16.000Z
|
2021-02-10T17:05:19.000Z
|
tests/io/test_kepseismic.py
|
jorgemarpa/lightkurve
|
86320a67eabb3a93f60e9faff0447e4b235bccf2
|
[
"MIT"
] | 121 |
2018-01-22T01:11:19.000Z
|
2021-01-26T21:07:07.000Z
|
import pytest
from astropy.io import fits
import numpy as np
from lightkurve.io.kepseismic import read_kepseismic_lightcurve
from lightkurve.io.detect import detect_filetype
@pytest.mark.remote_data
def test_detect_kepseismic():
"""Can we detect the correct format for KEPSEISMIC files?"""
url = "https://archive.stsci.edu/hlsps/kepseismic/001200000/92147/20d-filter/hlsp_kepseismic_kepler_phot_kplr001292147-20d_kepler_v1_cor-filt-inp.fits"
f = fits.open(url)
assert detect_filetype(f) == "KEPSEISMIC"
@pytest.mark.remote_data
def test_read_kepseismic():
"""Can we read KEPSEISMIC files?"""
url = "https://archive.stsci.edu/hlsps/kepseismic/001200000/92147/20d-filter/hlsp_kepseismic_kepler_phot_kplr001292147-20d_kepler_v1_cor-filt-inp.fits"
with fits.open(url, mode="readonly") as hdulist:
fluxes = hdulist[1].data["FLUX"]
lc = read_kepseismic_lightcurve(url)
flux_lc = lc.flux.value
# print(flux_lc, fluxes)
assert np.sum(fluxes) == np.sum(flux_lc)
| 32.709677 | 155 | 0.757396 | 0 | 0 | 0 | 0 | 833 | 0.821499 | 0 | 0 | 437 | 0.430966 |
964fab6dbeeb71e25e526001be717823ea172dc3
| 32 |
py
|
Python
|
backend/appengine/routes/gallerys/model.py
|
SamaraCardoso27/eMakeup
|
02c3099aca85b5f54214c3a32590e80eb61621e7
|
[
"MIT"
] | null | null | null |
backend/appengine/routes/gallerys/model.py
|
SamaraCardoso27/eMakeup
|
02c3099aca85b5f54214c3a32590e80eb61621e7
|
[
"MIT"
] | null | null | null |
backend/appengine/routes/gallerys/model.py
|
SamaraCardoso27/eMakeup
|
02c3099aca85b5f54214c3a32590e80eb61621e7
|
[
"MIT"
] | null | null | null |
__author__ = 'Samara Cardoso'
| 8 | 29 | 0.71875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.5 |
965034c03fdf2183dfe02406617dfa08e3bd353a
| 1,153 |
py
|
Python
|
recipes/Python/223585_Stable_deep_sorting_dottedindexed_attributes/recipe-223585.py
|
tdiprima/code
|
61a74f5f93da087d27c70b2efe779ac6bd2a3b4f
|
[
"MIT"
] | 2,023 |
2017-07-29T09:34:46.000Z
|
2022-03-24T08:00:45.000Z
|
recipes/Python/223585_Stable_deep_sorting_dottedindexed_attributes/recipe-223585.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 32 |
2017-09-02T17:20:08.000Z
|
2022-02-11T17:49:37.000Z
|
recipes/Python/223585_Stable_deep_sorting_dottedindexed_attributes/recipe-223585.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 780 |
2017-07-28T19:23:28.000Z
|
2022-03-25T20:39:41.000Z
|
def sortByAttrs(seq, attrs):
listComp = ['seq[:] = [(']
for attr in attrs:
listComp.append('seq[i].%s, ' % attr)
listComp.append('i, seq[i]) for i in xrange(len(seq))]')
exec('%s' % ''.join(listComp))
seq.sort()
seq[:] = [obj[-1] for obj in seq]
return
#
# begin test code
#
from random import randint
class a:
def __init__(self):
self.x = (randint(1, 5), randint(1, 5))
class b:
def __init__(self):
self.x = randint(1, 5)
self.y = (a(), a())
class c:
def __init__(self, arg):
self.x = arg
self.y = b()
if __name__ == '__main__':
aList = [c(1), c(2), c(3), c(4), c(5), c(6)]
print '\n...to be sorted by obj.y.y[1].x[1]'
print ' then, as needed, by obj.y.x'
print ' then, as needed, by obj.x\n\n ',
for i in range(6):
print '(' + str(aList[i].y.y[1].x[1]) + ',',
print str(aList[i].y.x) + ',',
print str(aList[i].x) + ') ',
sortByAttrs(aList, ['y.y[1].x[1]', 'y.x', 'x'])
print '\n\n...now sorted by listed attributes.\n\n ',
for i in range(6):
print '(' + str(aList[i].y.y[1].x[1]) + ',',
print str(aList[i].y.x) + ',',
print str(aList[i].x) + ') ',
print
#
# end test code
#
| 18.015625 | 58 | 0.542064 | 211 | 0.183001 | 0 | 0 | 0 | 0 | 0 | 0 | 325 | 0.281873 |
9650401ec27713e595c5dbc7faa0b1080d66e16e
| 1,710 |
py
|
Python
|
spirit/topic/forms.py
|
ImaginaryLandscape/Spirit
|
58b563c1b2290a95219257045afaa4f08ac94cbf
|
[
"MIT"
] | 974 |
2015-01-02T12:56:00.000Z
|
2022-03-24T00:01:54.000Z
|
spirit/topic/forms.py
|
ImaginaryLandscape/Spirit
|
58b563c1b2290a95219257045afaa4f08ac94cbf
|
[
"MIT"
] | 247 |
2015-01-07T02:59:26.000Z
|
2022-02-23T08:27:57.000Z
|
spirit/topic/forms.py
|
ImaginaryLandscape/Spirit
|
58b563c1b2290a95219257045afaa4f08ac94cbf
|
[
"MIT"
] | 366 |
2015-01-08T10:22:25.000Z
|
2022-02-21T12:58:31.000Z
|
# -*- coding: utf-8 -*-
from django import forms
from django.utils.translation import gettext_lazy as _
from django.utils.encoding import smart_bytes
from django.utils import timezone
from ..core import utils
from ..core.utils.forms import NestedModelChoiceField
from ..category.models import Category
from .models import Topic
class TopicForm(forms.ModelForm):
topic_hash = forms.CharField(
max_length=32,
widget=forms.HiddenInput,
required=False)
class Meta:
model = Topic
fields = ('title', 'category')
def __init__(self, user, *args, **kwargs):
super(TopicForm, self).__init__(*args, **kwargs)
self.user = user
self.fields['category'] = NestedModelChoiceField(
queryset=Category.objects.visible().opened().ordered(),
related_name='category_set',
parent_field='parent_id',
label_field='title',
label=_("Category"),
empty_label=_("Choose a category"))
if self.instance.pk and not user.st.is_moderator:
del self.fields['category']
def get_category(self):
return self.cleaned_data['category']
def get_topic_hash(self):
topic_hash = self.cleaned_data.get('topic_hash', None)
if topic_hash:
return topic_hash
return utils.get_hash((
smart_bytes(self.cleaned_data['title']),
smart_bytes('category-{}'.format(self.cleaned_data['category'].pk))))
def save(self, commit=True):
if not self.instance.pk:
self.instance.user = self.user
self.instance.reindex_at = timezone.now()
return super(TopicForm, self).save(commit)
| 29.482759 | 81 | 0.64269 | 1,377 | 0.805263 | 0 | 0 | 0 | 0 | 0 | 0 | 173 | 0.10117 |
965076565be0243a0d0b837e3affc60f4cce7858
| 7,931 |
py
|
Python
|
OST_helper/parameter.py
|
HomeletW/OST
|
5e359d00a547af194a2a1a2591a53c93d8f40b84
|
[
"MIT"
] | 1 |
2020-07-31T16:43:13.000Z
|
2020-07-31T16:43:13.000Z
|
OST_helper/parameter.py
|
HomeletW/OST
|
5e359d00a547af194a2a1a2591a53c93d8f40b84
|
[
"MIT"
] | null | null | null |
OST_helper/parameter.py
|
HomeletW/OST
|
5e359d00a547af194a2a1a2591a53c93d8f40b84
|
[
"MIT"
] | null | null | null |
# constant
import json
import logging
import os
import platform
import subprocess
from datetime import date
from os.path import exists, expanduser, isdir, isfile, join, abspath, dirname
from PIL import Image
logging.basicConfig(
style="{",
format="{threadName:<10s} <{levelname:<7s}> [{asctime:<15s}] {message}",
level=logging.DEBUG
)
# paths
CCCL_PATH = None
SETTING_PATH = None
APP_LOGO = None
OST_SAMPLE = None
DEFAULT_OST_PATH = None
TFONT = None
DEFAULT_COORDINATES_PATH = None
# os
DEVICE_OS = platform.system()
# ost sample
OST_SAMPLE_IMAGE = None
# today
today = None
PRODUCTION_SUCCESS = 0
PRODUCTION_FILE_EXISTS = 1
PRODUCTION_FILE_NOT_RECOGNIZED = 2
def update_today():
global today
day = date.today()
year, month, day = day.year, day.month, day.day
today = str(year), str(month), str(day)
update_today()
def from_json(path):
with open(path, "r") as js:
data = json.load(js)
return data
def to_json(path, data):
with open(path, "w+") as js:
json.dump(data, js, indent=4)
def get_desktop_directory():
if DEVICE_OS in ["Linux", "Darwin"]:
home_dir = join(expanduser("~"), "Desktop")
elif DEVICE_OS in ["Windows"]:
home_dir = join(os.environ["USERPROFILE"], "Desktop")
else:
home_dir = None
if home_dir is not None and isdir(home_dir):
return home_dir
else:
return "/"
def open_path(abs_path):
if not exists(abs_path):
raise ValueError("Path Not Exist!")
if DEVICE_OS in ["Darwin"]:
subprocess.Popen(["open", "-R", "{}".format(abs_path)])
elif DEVICE_OS in ["Windows"]:
subprocess.Popen('cmd /c start "START" "{}"'.format(abs_path))
elif DEVICE_OS in ["Linux"]:
subprocess.Popen(["nautilus", "--browser", "{}".format(abs_path)])
else:
raise Exception(
"Not Supported Operating System [{}]!".format(DEVICE_OS))
DEFAULT_DIR = get_desktop_directory()
# "course_code": ["course_title", "course_level", "credit", "compulsory"]
default_common_course_code_library = {
}
default_setting = {
"draw_ost_template": True,
"smart_fill": True,
"train": True,
"json_dir": DEFAULT_DIR,
"img_dir": DEFAULT_DIR,
"last_session": None,
}
default_ost = {
"OST_date_of_issue": today,
"name": ["", ""],
"OEN": "",
"student_number": "",
"gender": "",
"date_of_birth": ["", "", ""],
"name_of_district_school_board": "Toronto Private Inspected",
"district_school_board_number": "",
"name_of_school": "",
"school_number": "",
"date_of_entry": ["", "", ""],
"community_involvement_flag": False,
"provincial_secondary_school_literacy_requirement_flag": False,
"specialized_program": "",
"diploma_or_certificate": "Ontario Secondary School Diploma",
"diploma_or_certificate_date_of_issue": ["", ""],
"authorization": "",
"course_list": [],
"course_font_size": 50,
"course_spacing": 5,
}
default_coordinates = {
"Size": (3300, 2532),
"Offset": (0, 0),
# (x, y, width, height)
"OST_DateOfIssue": (2301, 73, 532, 85, 55),
"Page_1": (2826, 73, 183, 85, 50),
"Page_2": (3046, 73, 183, 85, 50),
"Surname": (85, 204, 645, 94, 50),
"GivenName": (730, 204, 772, 94, 50),
"OEN": (1502, 204, 537, 94, 50),
"StudentNumber": (2039, 204, 538, 94, 50),
"Gender": (2577, 204, 136, 94, 50),
"DateOfBirth_Y": (2713, 228, 202, 70, 40),
"DateOfBirth_M": (2915, 228, 202, 70, 40),
"DateOfBirth_D": (3117, 228, 147, 70, 40),
"NameOfDSB": (85, 336, 1023, 100, 50),
"NumberOfDSB": (1108, 338, 397, 100, 50),
"NameOfSchool": (1505, 338, 807, 100, 50),
"NumberOfSchool": (2311, 338, 402, 100, 50),
"DateOfEntry_Y": (2713, 368, 202, 70, 40),
"DateOfEntry_M": (2915, 368, 202, 70, 40),
"DateOfEntry_D": (3117, 368, 147, 70, 40),
# (x, y, width, height)
"Course": (35, 564, 3230, 1419),
# (x_offset, width)
"Course_date_offset": (35 - 35, 268),
"Course_level_offset": (306 - 35, 183),
"Course_title_offset": (491 - 35, 1637),
"Course_code_offset": (2131 - 35, 244),
"Course_percentage_offset": (2378 - 35, 175),
"Course_credit_offset": (2563 - 35, 183),
"Course_compulsory_offset": (2748 - 35, 207),
"Course_note_offset": (2965 - 35, 299),
"SummaryOfCredit": (2562, 1992, 184, 69, 55),
"SummaryOfCompulsory": (2748, 1992, 207, 69, 55),
"CommunityInvolvement_True": (75, 2125),
"CommunityInvolvement_False": (385, 2125),
"ProvincialSecondarySchoolLiteracy_True": (623, 2125),
"ProvincialSecondarySchoolLiteracy_False": (1173, 2125),
"SpecializedProgram": (1436, 2104, 1828, 96, 40),
"DiplomaOrCertificate": (77, 2240, 1622, 90, 40),
"DiplomaOrCertificate_DateOfIssue_Y": (1702, 2273, 180, 57, 40),
"DiplomaOrCertificate_DateOfIssue_M": (1885, 2273, 180, 57, 40),
"Authorization": (2070, 2240, 1148, 90, 40),
}
COMMON_COURSE_CODE_LIBRARY = None
SETTING = None
DEFAULT_OST_INFO = None
COORDINATES = None
def finalize():
logger = logging.getLogger()
# save CCCL
to_json(CCCL_PATH, COMMON_COURSE_CODE_LIBRARY)
logger.info("Common Course Code Library saved!")
# save setting
to_json(SETTING_PATH, SETTING)
logger.info("Setting saved!")
def initialize(resource_path):
global COMMON_COURSE_CODE_LIBRARY
global SETTING
global DEFAULT_OST_INFO
global COORDINATES
global CCCL_PATH
global SETTING_PATH
global APP_LOGO
global OST_SAMPLE
global DEFAULT_OST_PATH
global TFONT
global DEFAULT_COORDINATES_PATH
global OST_SAMPLE_IMAGE
CCCL_PATH = join(resource_path, "CCCL.json")
SETTING_PATH = join(resource_path, "setting.json")
APP_LOGO = join(resource_path, "logo.ico")
OST_SAMPLE = join(resource_path, "ost_sample.png")
DEFAULT_OST_PATH = join(resource_path, "default_ost.json")
TFONT = join(resource_path, "font.ttf")
DEFAULT_COORDINATES_PATH = join(resource_path, "default_coordinates.json")
OST_SAMPLE_IMAGE = Image.open(OST_SAMPLE)
logger = logging.getLogger()
# initialize CCCL
try:
COMMON_COURSE_CODE_LIBRARY = from_json(CCCL_PATH)
logger.info(f"Loaded CCCL [{len(COMMON_COURSE_CODE_LIBRARY)} courses]!")
except Exception:
COMMON_COURSE_CODE_LIBRARY = default_common_course_code_library
logger.warning("No CCCL found, restoring from default...")
# initialize settings
try:
SETTING = from_json(SETTING_PATH)
if not isdir(SETTING["json_dir"]):
SETTING["json_dir"] = DEFAULT_DIR
logger.warning("json dir no longer is dir, resetting to default")
if not isdir(SETTING["img_dir"]):
SETTING["img_dir"] = DEFAULT_DIR
logger.warning("image dir no longer is dir, resetting to default")
if SETTING["last_session"] is not None and not isfile(
SETTING["last_session"]):
SETTING["last_session"] = None
logger.warning(
"last session ost file no longer exist, resetting to default")
logger.info("Loaded setting!")
except Exception:
SETTING = default_setting
logger.info("No setting found, restoring from default...")
# initialize OST
from OST_helper.data_handler.Data import OST_info
try:
DEFAULT_OST_INFO = OST_info.from_json(DEFAULT_OST_PATH)
DEFAULT_OST_INFO.update_date_of_issue(today)
logger.info("Loaded ost!")
except Exception:
DEFAULT_OST_INFO = OST_info.from_data(default_ost)
logger.warning("No default OST info found, using default...")
# initialize coordinate
try:
COORDINATES = from_json(DEFAULT_COORDINATES_PATH)
logger.info("Loaded coordinates!")
except Exception:
COORDINATES = default_coordinates
logger.warning("No default coordinates found, using default...")
| 30.980469 | 80 | 0.651998 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,544 | 0.320767 |
96519d3044209db1a7fd83988e9afafa3678e598
| 398 |
py
|
Python
|
examples/compat/ggplot_point.py
|
azjps/bokeh
|
13375db53d4c60216f3bcf5aacccb081cf19450a
|
[
"BSD-3-Clause"
] | 1 |
2017-04-27T09:15:48.000Z
|
2017-04-27T09:15:48.000Z
|
app/static/libs/bokeh/examples/compat/ggplot_point.py
|
TBxy/bokeh_start_app
|
755494f6bc60e92ce17022bbd7f707a39132cbd0
|
[
"MIT"
] | null | null | null |
app/static/libs/bokeh/examples/compat/ggplot_point.py
|
TBxy/bokeh_start_app
|
755494f6bc60e92ce17022bbd7f707a39132cbd0
|
[
"MIT"
] | 1 |
2021-09-09T03:33:04.000Z
|
2021-09-09T03:33:04.000Z
|
from ggplot import aes, geom_point, ggplot, mtcars
import matplotlib.pyplot as plt
from pandas import DataFrame
from bokeh import mpl
from bokeh.plotting import output_file, show
g = ggplot(mtcars, aes(x='wt', y='mpg', color='qsec')) + geom_point()
g.make()
plt.title("Point ggplot-based plot in Bokeh.")
output_file("ggplot_point.html", title="ggplot_point.py example")
show(mpl.to_bokeh())
| 23.411765 | 69 | 0.751256 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 94 | 0.236181 |
965346317a700c60ccb16c29a2836bf7e207f10e
| 14,194 |
py
|
Python
|
src/train_tune.py
|
vanang/korquad-challenge
|
c5df887aaa7f6b68edb5d46ad12d42097132df46
|
[
"Apache-2.0"
] | null | null | null |
src/train_tune.py
|
vanang/korquad-challenge
|
c5df887aaa7f6b68edb5d46ad12d42097132df46
|
[
"Apache-2.0"
] | null | null | null |
src/train_tune.py
|
vanang/korquad-challenge
|
c5df887aaa7f6b68edb5d46ad12d42097132df46
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import random
import sys
from io import open
import numpy as np
import torch
import json
from torch.utils.data import (DataLoader, SequentialSampler, RandomSampler, TensorDataset)
from tqdm import tqdm, trange
import ray
from ray import tune
from ray.tune.schedulers import HyperBandScheduler
from models.modeling_bert import QuestionAnswering, Config
from utils.optimization import AdamW, WarmupLinearSchedule
from utils.tokenization import BertTokenizer
from utils.korquad_utils import (read_squad_examples, convert_examples_to_features, RawResult, write_predictions)
from debug.evaluate_korquad import evaluate as korquad_eval
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
# In[2]:
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
# In[3]:
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
# In[4]:
from ray import tune
from ray.tune import track
from ray.tune.schedulers import HyperBandScheduler
from ray.tune.suggest.bayesopt import BayesOptSearch
ray.shutdown()
ray.init(webui_host='127.0.0.1')
# In[5]:
search_space = {
"max_seq_length": 512,
"doc_stride": 128,
"max_query_length": tune.sample_from(lambda _: int(np.random.uniform(50, 100))), #tune.uniform(50, 100),
"train_batch_size": 32,
"learning_rate": tune.loguniform(5e-4, 5e-7, 10),
"num_train_epochs": tune.grid_search([4, 8, 12, 16]),
"max_grad_norm": 1.0,
"adam_epsilon": 1e-6,
"warmup_proportion": 0.1,
"n_best_size": tune.sample_from(lambda _: int(np.random.uniform(50, 100))), #tune.uniform(50, 100),
"max_answer_length": tune.sample_from(lambda _: int(np.random.uniform(12, 25))), #tune.uniform(12, 25),
"seed": tune.sample_from(lambda _: int(np.random.uniform(1e+6, 1e+8)))
}
# In[ ]:
def load_and_cache_examples(predict_file, max_seq_length, doc_stride, max_query_length, tokenizer):
# Load data features from cache or dataset file
examples = read_squad_examples(input_file=predict_file,
is_training=False,
version_2_with_negative=False)
features = convert_examples_to_features(examples=examples,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=False)
return examples, features
# In[ ]:
def evaluate(predict_file, batch_size, device, output_dir, n_best_size, max_answer_length, model, eval_examples, eval_features):
""" Eval """
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
sampler = SequentialSampler(dataset)
dataloader = DataLoader(dataset, sampler=sampler, batch_size=batch_size)
logger.info("***** Evaluating *****")
logger.info(" Num features = %d", len(dataset))
logger.info(" Batch size = %d", batch_size)
model.eval()
all_results = []
# set_seed(args) # Added here for reproductibility (even between python 2 and 3)
logger.info("Start evaluating!")
for input_ids, input_mask, segment_ids, example_indices in tqdm(dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
batch_start_logits, batch_end_logits = model(input_ids, segment_ids, input_mask)
for i, example_index in enumerate(example_indices):
start_logits = batch_start_logits[i].detach().cpu().tolist()
end_logits = batch_end_logits[i].detach().cpu().tolist()
eval_feature = eval_features[example_index.item()]
unique_id = int(eval_feature.unique_id)
all_results.append(RawResult(unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
output_prediction_file = os.path.join(output_dir, "predictions.json")
output_nbest_file = os.path.join(output_dir, "nbest_predictions.json")
write_predictions(eval_examples, eval_features, all_results,
n_best_size, max_answer_length,
False, output_prediction_file, output_nbest_file,
None, False, False, 0.0)
expected_version = 'KorQuAD_v1.0'
with open(predict_file) as dataset_file:
dataset_json = json.load(dataset_file)
read_version = "_".join(dataset_json['version'].split("_")[:-1])
if (read_version != expected_version):
logger.info('Evaluation expects ' + expected_version + ', but got dataset with ' + read_version, file=sys.stderr)
dataset = dataset_json['data']
with open(os.path.join(output_dir, "predictions.json")) as prediction_file:
predictions = json.load(prediction_file)
_eval = korquad_eval(dataset, predictions)
logger.info(json.dumps(_eval))
return _eval
# In[6]:
def train_korquad(train_config):
# setup
basepath = '/jupyterhome/enpline_bert_competition/korquad-challenge/src'
logger.info("train_config : %s" % str(train_config))
output_dir='output'
checkpoint=os.path.join(basepath,'data/bert_small_ckpt.bin')
model_config=os.path.join(basepath,'data/bert_small.json')
vocab_file=os.path.join(basepath,'data/ko_vocab_32k.txt')
train_file=os.path.join(basepath, 'data/KorQuAD_v1.0_train.json')
predict_file=os.path.join(basepath, 'data/KorQuAD_v1.0_dev.json')
null_score_diff_threshold = 0.0
no_cuda = False
verbose_logging = False
fp16 = True
fp16_opt_level = 'O2'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
print("device: {} n_gpu: {}, 16-bits training: {}".format(device, n_gpu, fp16))
random.seed(train_config['seed'])
np.random.seed(train_config['seed'])
torch.manual_seed(train_config['seed'])
if n_gpu > 0:
torch.cuda.manual_seed_all(train_config['seed'])
if not os.path.exists(output_dir):
os.makedirs(output_dir)
tokenizer = BertTokenizer(vocab_file, max_len=train_config['max_seq_length'], do_basic_tokenize=True)
# Prepare model
config = Config.from_json_file(model_config)
model = QuestionAnswering(config)
model.bert.load_state_dict(torch.load(checkpoint))
num_params = count_parameters(model)
logger.info("Total Parameter: %d" % num_params)
logger.info("Hyper-parameters: %s" % str(train_config))
paramfile_path = os.path.join(output_dir, 'hyperparameters.txt')
with open(paramfile_path, "w") as paramfile:
logger.info("writing hyperparameters at",paramfile_path)
paramfile.write("%s" % str(train_config))
model.to(device)
cached_train_features_file = train_file + '_{0}_{1}_{2}'.format(str(train_config['max_seq_length']), str(train_config['doc_stride']),
str(train_config['max_query_length']))
train_examples = read_squad_examples(input_file=train_file, is_training=True, version_2_with_negative=False)
try:
with open(cached_train_features_file, "rb") as reader:
train_features = pickle.load(reader)
except:
train_features = convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=train_config['max_seq_length'],
doc_stride=train_config['doc_stride'],
max_query_length=train_config['max_query_length'],
is_training=True)
logger.info(" Saving train features into cached file %s", cached_train_features_file)
with open(cached_train_features_file, "wb") as writer:
pickle.dump(train_features, writer)
num_train_optimization_steps = int(len(train_features) / train_config['train_batch_size']) * train_config['num_train_epochs']
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=train_config['learning_rate'],
eps=train_config['adam_epsilon'])
scheduler = WarmupLinearSchedule(optimizer,
warmup_steps=num_train_optimization_steps*0.1,
t_total=num_train_optimization_steps)
if fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=fp16_opt_level)
logger.info("***** Running training *****")
logger.info(" Num orig examples = %d", len(train_examples))
logger.info(" Num split examples = %d", len(train_features))
logger.info(" Batch size = %d", train_config['train_batch_size'])
logger.info(" Num steps = %d", num_train_optimization_steps)
num_train_step = num_train_optimization_steps
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_start_positions, all_end_positions)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=train_config['train_batch_size'])
model.train()
global_step = 0
epoch = 0
output_model_file = ''
# training
# for epoch_idx in trange(int(train_config['num_train_epochs'])):
# iter_bar = tqdm(train_dataloader, desc="Train(XX Epoch) Step(XX/XX) (Mean loss=X.X) (loss=X.X)")
for epoch_idx in range(int(train_config['num_train_epochs'])):
tr_step, total_loss, mean_loss = 0, 0., 0.
for step, batch in enumerate(train_dataloader):
if n_gpu == 1:
batch = tuple(t.to(device) for t in batch) # multi-gpu does scattering it-self
input_ids, input_mask, segment_ids, start_positions, end_positions = batch
loss = model(input_ids, segment_ids, input_mask, start_positions, end_positions)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), train_config['max_grad_norm'])
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), train_config['max_grad_norm'])
scheduler.step()
optimizer.step()
optimizer.zero_grad()
global_step += 1
tr_step += 1
total_loss += loss
mean_loss = total_loss / tr_step
# iter_bar.set_description("Train Step(%d / %d) (Mean loss=%5.5f) (loss=%5.5f)" %
# (global_step, num_train_step, mean_loss, loss.item()))
epoch += 1
logger.info("** ** * Saving file * ** **")
model_checkpoint = "korquad_%d.bin" % (epoch)
logger.info(model_checkpoint)
#save the last model
output_model_file = os.path.join(output_dir, model_checkpoint)
if n_gpu > 1:
torch.save(model.module.state_dict(), output_model_file)
else:
torch.save(model.state_dict(), output_model_file)
# Evaluate with final model
examples, features = load_and_cache_examples(predict_file, train_config['max_seq_length'], train_config['doc_stride'],
train_config['max_query_length'], tokenizer)
eval = evaluate(predict_file=predict_file, batch_size=16, device=device, output_dir=output_dir, n_best_size=train_config['n_best_size'], max_answer_length=train_config['max_answer_length'],
model=model, eval_examples=examples, eval_features=features)
logger.info("-" * 16, 'evaltion', "-" * 16)
logger.info(eval)
track.log(f1 = eval['f1'])
# In[ ]:
analysis = tune.run(train_korquad, config=search_space, scheduler=HyperBandScheduler(metric='f1', mode='max'), resources_per_trial={'gpu':1})
# In[ ]:
dfs = analysis.trial_dataframes
# In[ ]:
# ax = None
# for d in dfs.values():
# ax = d.mean_loss.plot(ax=ax, legend=True)
# ax.set_xlabel("Epochs")
# ax.set_ylabel("Mean Loss")
| 38.994505 | 193 | 0.659222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,677 | 0.188601 |
9655150c478e5c7edceea8519f955d5cbf7c2792
| 3,604 |
py
|
Python
|
BuyandBye_project/users/forms.py
|
sthasam2/BuyandBye
|
07a998f289f9ae87b234cd6ca653a4fdb2765b95
|
[
"MIT"
] | 1 |
2019-12-26T16:52:10.000Z
|
2019-12-26T16:52:10.000Z
|
BuyandBye_project/users/forms.py
|
sthasam2/buyandbye
|
07a998f289f9ae87b234cd6ca653a4fdb2765b95
|
[
"MIT"
] | 13 |
2021-06-02T03:51:06.000Z
|
2022-03-12T00:53:22.000Z
|
BuyandBye_project/users/forms.py
|
sthasam2/buyandbye
|
07a998f289f9ae87b234cd6ca653a4fdb2765b95
|
[
"MIT"
] | null | null | null |
from datetime import date
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from phonenumber_field.formfields import PhoneNumberField
from .models import Profile
from .options import STATE_CHOICES, YEARS
from .utils import AgeValidator
class UserRegisterForm(UserCreationForm):
first_name = forms.CharField(
max_length=30, widget=forms.TextInput(attrs={"placeholder": "Given name"})
)
middle_name = forms.CharField(
max_length=30,
required=False,
widget=forms.TextInput(attrs={"placeholder": "Middle name"}),
)
last_name = forms.CharField(
max_length=30, widget=forms.TextInput(attrs={"placeholder": "Surname"})
)
date_of_birth = forms.DateField(
label="Date of Birth",
initial=date.today(),
required=True,
help_text="Age must be above 16",
validators=[AgeValidator],
widget=forms.SelectDateWidget(years=YEARS),
)
email = forms.EmailField(
max_length=150,
widget=forms.TextInput(attrs={"placeholder": "e.g. [email protected]"}),
)
address1 = forms.CharField(
max_length=100,
help_text="Street, District",
widget=forms.TextInput(attrs={"placeholder": "Street, District"}),
)
address2 = forms.CharField(
max_length=100,
help_text="State",
widget=forms.Select(attrs={"placeholder": "State"}, choices=STATE_CHOICES),
)
phone = PhoneNumberField(
required=False,
initial="+977",
help_text="Phone number must contain country calling code (e.g. +97798XXYYZZSS)",
)
class Meta:
model = User
fields = [
"first_name",
"middle_name",
"last_name",
"date_of_birth",
"username",
"email",
"phone",
"password1",
"password2",
"address1",
"address2",
]
# widget={
# 'username': forms.TextInput(attrs={'placeholder': 'Enter desired username.'}),
# }
class UserUpdateForm(forms.ModelForm):
class Meta:
model = User
fields = [
"username",
]
class ProfileUpdateForm(forms.ModelForm):
first_name = forms.CharField(
max_length=30, widget=forms.TextInput(attrs={"placeholder": "Given name"})
)
middle_name = forms.CharField(
max_length=30,
required=False,
widget=forms.TextInput(attrs={"placeholder": "Middle name"}),
)
last_name = forms.CharField(
max_length=30, widget=forms.TextInput(attrs={"placeholder": "Surname"})
)
email = forms.EmailField(
max_length=150,
widget=forms.TextInput(attrs={"placeholder": "e.g. [email protected]"}),
)
address1 = forms.CharField(
max_length=100,
help_text="Street, District",
widget=forms.TextInput(attrs={"placeholder": "Street, District"}),
)
address2 = forms.CharField(
max_length=100,
help_text="State",
widget=forms.Select(attrs={"placeholder": "State"}, choices=STATE_CHOICES),
)
phone = PhoneNumberField(
required=False,
help_text="Phone number must contain country calling code (e.g. +97798XXYYZZSS)",
)
class Meta:
model = Profile
fields = [
"first_name",
"middle_name",
"last_name",
"email",
"address1",
"address2",
"phone",
"image",
]
| 29.064516 | 92 | 0.59434 | 3,283 | 0.910932 | 0 | 0 | 0 | 0 | 0 | 0 | 850 | 0.235849 |
96565fe229818a242f95852b7feea959f0bbeb31
| 10,110 |
py
|
Python
|
kolab/tokibi/tokibi.py
|
oshiooshi/kolab
|
5f34614a995b2a31156b65e6eb9d512b9867540e
|
[
"MIT"
] | null | null | null |
kolab/tokibi/tokibi.py
|
oshiooshi/kolab
|
5f34614a995b2a31156b65e6eb9d512b9867540e
|
[
"MIT"
] | null | null | null |
kolab/tokibi/tokibi.py
|
oshiooshi/kolab
|
5f34614a995b2a31156b65e6eb9d512b9867540e
|
[
"MIT"
] | null | null | null |
import sys
import pegtree as pg
from pegtree.visitor import ParseTreeVisitor
import random
# from . import verb
import verb
EMPTY = tuple()
# オプション
OPTION = {
'Simple': False, # シンプルな表現を優先する
'Block': False, # Expressionに <e> </e> ブロックをつける
'EnglishFirst': False, # 英訳の精度を優先する
'ShuffleSynonym': True, # 同音異議語をシャッフルする
'MultipleSentence': False, # 複数行モード
'ShuffleOrder': True, # 順序も入れ替える
'Verbose': True, # デバッグ出力あり
}
# {心が折れた|やる気が失せた}フリをする
# 現状:[猫|ネコ] -> ランダム
# 将来: 猫 -> 異音同義語(synonyms) -> ランダム (自動的) これをどう作るか?
# 順番を入れ替える -> NSuffix(「に」のように助詞)
# [ネコ|ネコ|] -> 省略可能
# 雑音を入れる <- BERT
# Aに Bを 足す 「Aに」を取り除く -> Bを 足す -> Aがありませんよ。
# randomize
RandomIndex = 0
def randomize():
global RandomIndex
if OPTION['ShuffleSynonym']:
RandomIndex = random.randint(1, 1789)
else:
RandomIndex = 0
def random_index(arraysize: int, seed):
if OPTION['ShuffleSynonym']:
return (RandomIndex + seed) % arraysize
return 0
def alt(s: str):
if '|' in s:
ss = s.split('|')
if OPTION['EnglishFirst']:
return ss[-1] # 最後が英語
return ss[random_index(len(ss), len(s))]
return s
def choice(ss: list):
return ss[random_index(len(ss), 17)]
# def conjugate(w, mode=0, vpos=None):
# suffix = ''
# if mode & verb.CASE == verb.CASE:
# if RandomIndex % 2 != 0:
# mode = (mode & ~verb.CASE) | verb.NOUN
# suffix = alt('とき、|場合、|際、')
# else:
# suffix = '、'
# if mode & verb.THEN == verb.THEN:
# if RandomIndex % 2 != 0:
# mode = (mode & ~verb.THEN) | verb._I
# suffix = '、'
# return verb.conjugate(w, mode, vpos) + suffix
# NExpr
def identity(e):
return e
class NExpr(object):
subs: tuple
def __init__(self, subs=EMPTY):
self.subs = tuple(NWord(s) if isinstance(s, str) else s for s in subs)
def apply(self, dict_or_func=identity):
if len(self.subs) > 0:
(e.apply(dict_or_func) for e in self.subs)
return self
def generate(self):
ss = []
c = 0
while c < 5:
randomize()
buffers = []
self.emit(buffers)
s = ''.join(buffers)
if s not in ss:
ss.append(s)
else:
c += 1
return ss
class NWord(NExpr):
w: str
def __init__(self, w):
NExpr.__init__(self)
self.w = str(w)
def __repr__(self):
if '|' in self.w:
return '[' + self.w + ']'
return self.w
def apply(self, dict_or_func=identity):
if not isinstance(dict_or_func, dict):
return dict_or_func(self)
return self
def emit(self, buffers):
buffers.append(alt(self.w))
class NVerb(NExpr):
w: str
vpos: str
mode: int
def __init__(self, w, vpos, mode=0):
NExpr.__init__(self)
self.w = str(w)
self.vpos = vpos
self.mode = mode
def __repr__(self):
return verb.conjugate(self.w, self.mode, self.vpos)
def apply(self, dict_or_func=identity):
if not isinstance(dict_or_func, dict):
return dict_or_func(self)
return self
def emit(self, buffers):
buffers.append(verb.conjugate(self.w, self.mode|verb.POL, self.vpos))
class NChoice(NExpr):
def __init__(self, *subs):
NExpr.__init__(self, subs)
def __repr__(self):
ss = []
for p in self.subs:
ss.append(repr(p))
return ' | '.join(ss)
def apply(self, dict_or_func=identity):
return NChoice(*(e.apply(dict_or_func) for e in self.subs))
def emit(self, buffers):
choice(self.subs).emit(buffers)
class NPhrase(NExpr):
def __init__(self, *subs):
NExpr.__init__(self, subs)
def __repr__(self):
ss = []
for p in self.subs:
ss.append(grouping(p))
return ' '.join(ss)
def apply(self, dict_or_func=identity):
return NPhrase(*(e.apply(dict_or_func) for e in self.subs))
def emit(self, buffers):
for p in self.subs:
p.emit(buffers)
def grouping(e):
if isinstance(e, NPhrase):
return '{' + repr(e) + '}'
return repr(e)
class NOrdered(NExpr):
def __init__(self, *subs):
NExpr.__init__(self, subs)
def __repr__(self):
ss = []
for p in self.subs:
ss.append(grouping(p))
return '/'.join(ss)
def apply(self, dict_or_func=identity):
return NOrdered(*(e.apply(dict_or_func) for e in self.subs))
def emit(self, buffers):
subs = list(self.subs)
if OPTION['ShuffleOrder']:
random.shuffle(subs)
for p in subs:
p.emit(buffers)
class NClause(NExpr): # 名詞節 〜する(verb)+名詞(noun)
def __init__(self, verb, noun):
NExpr.__init__(self, (verb,noun))
def __repr__(self):
return grouping(self.subs[0]) + grouping(self.subs[1])
def apply(self, dict_or_func=identity):
return NClause(*(e.apply(dict_or_func) for e in self.subs))
def emit(self, buffers):
verb = self.subs[0]
noun = self.subs[1]
if isinstance(verb, NClause):
verb.subs[0].emit(buffers)
else:
verb.emit(buffers)
noun.emit(buffers)
class NSuffix(NExpr):
suffix: str
def __init__(self, e, suffix):
NExpr.__init__(self, (e,))
self.suffix = suffix
def __repr__(self):
return grouping(self.subs[0]) + self.suffix
def apply(self, dict_or_func=identity):
return NSuffix(self.subs[0].apply(dict_or_func), self.suffix)
def emit(self, buffers):
self.subs[0].emit(buffers)
buffers.append(self.suffix)
neko = NWord('猫|ねこ|ネコ')
print('@', neko, neko.generate())
wo = NSuffix(neko, 'を')
print('@', wo, wo.generate())
ni = NSuffix(neko, 'に')
print('@', ni, ni.generate())
ageru = NVerb('あげる', 'V1', 0)
e = NPhrase(NOrdered(ni, wo), ageru)
print('@', e, e.generate())
class NLiteral(NExpr):
w: str
def __init__(self, w):
NExpr.__init__(self)
self.w = str(w)
def __repr__(self):
return self.w
def apply(self, dict_or_func=identity):
if not isinstance(dict_or_func):
return dict_or_func(self)
def emit(self, buffers):
buffers.append(self.w)
class NSymbol(NExpr):
index: int
w: str
def __init__(self, index, w):
NExpr.__init__(self)
self.index = index
self.w = str(w)
def __repr__(self):
return self.w
def apply(self, dict_or_func=identity):
if isinstance(dict_or_func, dict):
if self.index in dict_or_func:
return dict_or_func[self.index]
if self.w in dict_or_func:
return dict_or_func[self.w]
return self
else:
return dict_or_func(self)
def emit(self, buffers):
buffers.append(self.w)
## ここから下は気にしなくていいです。
## テキストを NExpr (構文木)に変換しています。
peg = pg.grammar('tokibi.pegtree')
tokibi_parser = pg.generate(peg)
class TokibiReader(ParseTreeVisitor):
def __init__(self, synonyms=None):
ParseTreeVisitor.__init__(self)
self.indexes = {}
self.synonyms = {} if synonyms is None else synonyms
def parse(self, s):
tree = tokibi_parser(s)
self.indexes = {}
nexpr = self.visit(tree)
return nexpr #, self.indexes
# [#NPhrase [#NOrdered [#NSuffix [#NSymbol 'str'][# 'が']][#NSuffix [#NSymbol 'prefix'][# 'で']]][#NWord '始まるかどうか']]
def acceptNChoice(self, tree):
ne = NChoice(*(self.visit(t) for t in tree))
return ne
def acceptNPhrase(self, tree):
ne = NPhrase(*(self.visit(t) for t in tree))
if len(ne.subs) == 1:
return ne.subs[0]
return ne
def acceptNClause(self, tree):
ne = NClause(self.visit(tree[0]), self.visit(tree[1]))
return ne
def acceptNOrdered(self, tree):
ne = NOrdered(*(self.visit(t) for t in tree))
return ne
def acceptNSuffix(self, tree):
t = self.visit(tree[0])
suffix = str(tree[1])
return NSuffix(t, suffix)
def acceptNSymbol(self, tree):
s = str(tree)
if s not in self.indexes:
self.indexes[s] = len(self.indexes)
return NSymbol(self.indexes[s], s)
def acceptNLiteral(self, tree):
s = str(tree)
return NLiteral(s)
def acceptNWord(self, tree):
s = str(tree)
w, vpos, mode = verb.parse(s.split('|')[0])
if vpos.startswith('V') or vpos == 'ADJ':
return NVerb(w, vpos, mode)
return NWord(s)
def acceptNPiece(self, tree):
s = str(tree)
return NWord(s)
tokibi_reader = TokibiReader()
def parse(s, synonyms=None):
if synonyms is not None:
tokibi_reader.synonyms = synonyms
if s.endswith('かどうか'):
s = s[:-4]
e = tokibi_reader.parse(s)
e = NClause(e, NWord('かどうか'))
else:
e = tokibi_reader.parse(s)
#print(grouping(e[0]))
return e
def read_tsv(filename):
with open(filename) as f:
for line in f.readlines():
line = line.strip()
if line == '' or line.startswith('#'):
continue
if '#' in line:
line = line.split('#')[1].strip()
e = parse(line)
print(e, e.generate())
# t = parse('{心が折れた|やる気が失せた}気がする')
# print(t, t.generate())
# t = parse('望遠鏡で{子犬が泳ぐ}のを見る')
# print(t)
# if __name__ == '__main__':
# if len(sys.argv) > 1:
# read_tsv(sys.argv[1])
# else:
# e = parse('望遠鏡で/{[子犬|Puppy]が泳ぐ}[様子|の]を見る')
# print(e, e.generate())
# e2 = parse('[猫|ねこ]が/[虎|トラ]と等しくないかどうか')
# #e2, _ = parse('{Aと/B(子犬)を/順に/[ひとつずつ]表示した}結果')
# e = parse('Aを調べる')
# e = e.apply({0: e2})
# print(e, e.generate())
# e = parse('A(事実)を調べる')
# e = e.apply({0: e2})
# print(e, e.generate())
| 24.128878 | 114 | 0.559149 | 6,600 | 0.610659 | 0 | 0 | 0 | 0 | 0 | 0 | 2,565 | 0.237324 |
96566f3a27305df43ef46e729f0d4af5e2007006
| 1,275 |
py
|
Python
|
kaggle_downloader/kaggle_downloader.py
|
lars-reimann/kaggle-downloader
|
583e68ee1c4860a153ae38f2a1cdba108ea8cb5a
|
[
"MIT"
] | null | null | null |
kaggle_downloader/kaggle_downloader.py
|
lars-reimann/kaggle-downloader
|
583e68ee1c4860a153ae38f2a1cdba108ea8cb5a
|
[
"MIT"
] | 3 |
2021-08-05T11:05:32.000Z
|
2022-03-01T13:05:18.000Z
|
kaggle_downloader/kaggle_downloader.py
|
lars-reimann/kaggle-downloader
|
583e68ee1c4860a153ae38f2a1cdba108ea8cb5a
|
[
"MIT"
] | null | null | null |
from typing import Callable, Union
from kaggle import KaggleApi
from kaggle.models.kaggle_models_extended import Competition, Kernel
class KaggleDownloader:
def __init__(self) -> None:
self.client = KaggleApi()
self.client.authenticate()
def fetch_competition_refs(self) -> list[str]:
return self._fetch_all_pages(
lambda page: self.client.competitions_list(page=page)
)
def fetch_kernel_refs(self, competition_ref: str) -> list[str]:
return self._fetch_all_pages(
lambda page: self.client.kernels_list(
competition=competition_ref, page=page, page_size=100
)
)
def fetch_notebook(self, kernel_ref: str) -> dict:
user_name, kernel_slug = kernel_ref.split("/")
return self.client.kernel_pull(user_name, kernel_slug)
@staticmethod
def _fetch_all_pages(
fetcher: Callable[[int], list[Union[Kernel, Competition]]]
) -> list[str]:
result = []
page = 1
while True:
# noinspection PyUnresolvedReferences
batch = [it.ref for it in fetcher(page)]
if len(batch) == 0:
break
result += batch
page += 1
return result
| 27.717391 | 69 | 0.614902 | 1,138 | 0.892549 | 0 | 0 | 416 | 0.326275 | 0 | 0 | 40 | 0.031373 |
96578d44622fea775471eea152128045fac7dede
| 1,281 |
py
|
Python
|
trip/urls.py
|
tboonma/thairepose
|
89aff7836a29bfee58a633db10c19d5e1ce4475f
|
[
"MIT"
] | 4 |
2021-11-07T05:50:41.000Z
|
2021-12-01T08:57:12.000Z
|
trip/urls.py
|
tboonma/thairepose
|
89aff7836a29bfee58a633db10c19d5e1ce4475f
|
[
"MIT"
] | 111 |
2021-10-19T09:24:14.000Z
|
2021-11-28T18:02:21.000Z
|
trip/urls.py
|
tboonma/thairepose
|
89aff7836a29bfee58a633db10c19d5e1ce4475f
|
[
"MIT"
] | 2 |
2021-11-28T06:37:03.000Z
|
2022-01-16T18:17:02.000Z
|
from django.urls import path
from . import views
app_name = 'trip'
urlpatterns = [
path('', views.index, name='index'),
path('tripblog/', views.AllTrip.as_view(), name="tripplan"),
path('likereview/', views.like_comment_view, name="like_comment"),
path('tripdetail/<int:pk>/', views.trip_detail, name="tripdetail"),
path('addpost/', views.add_post, name="addpost"),
path('likepost/', views.like_post, name="like_trip"),
path('tripdetail/edit/<int:pk>', views.edit_post, name='editpost'),
path('tripdetail/<int:pk>/remove', views.delete_post, name='deletepost'),
path('category/<category>', views.CatsListView.as_view(), name='category'),
path('addcomment/', views.post_comment, name="add_comment"),
path('action/gettripqueries', views.get_trip_queries, name='get-trip-query'),
# 127.0.0.1/domnfoironkwe_0394
path('place/<str:place_id>/', views.place_info, name='place-detail'),
path('place/<str:place_id>/like', views.place_like, name='place-like'),
path('place/<str:place_id>/dislike', views.place_dislike, name='place-dislike'),
path('place/<str:place_id>/addreview', views.place_review, name='place-review'),
path('place/<str:place_id>/removereview', views.place_remove_review, name='place-remove-review'),
]
| 53.375 | 101 | 0.698673 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 563 | 0.4395 |
9657e65ccf97f075f8a25b19bf95a3ca2e2decf0
| 5,010 |
py
|
Python
|
tests/test_rpc.py
|
tzoiker/aio-pika
|
5a04853006310d2fbf458c449b0ea98427668fe8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_rpc.py
|
tzoiker/aio-pika
|
5a04853006310d2fbf458c449b0ea98427668fe8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_rpc.py
|
tzoiker/aio-pika
|
5a04853006310d2fbf458c449b0ea98427668fe8
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import logging
import pytest
from aio_pika import Message, connect_robust
from aio_pika.exceptions import DeliveryError
from aio_pika.patterns.rpc import RPC, log as rpc_logger
from tests import AMQP_URL
from tests.test_amqp import BaseTestCase
pytestmark = pytest.mark.asyncio
def rpc_func(*, foo, bar):
assert not foo
assert not bar
return {'foo': 'bar'}
class TestCase(BaseTestCase):
async def test_simple(self):
channel = await self.create_channel()
rpc = await RPC.create(channel, auto_delete=True)
await rpc.register('test.rpc', rpc_func, auto_delete=True)
result = await rpc.proxy.test.rpc(foo=None, bar=None)
self.assertDictEqual(result, {'foo': 'bar'})
await rpc.unregister(rpc_func)
await rpc.close()
# Close already closed
await rpc.close()
async def test_error(self):
channel = await self.create_channel()
rpc = await RPC.create(channel, auto_delete=True)
await rpc.register('test.rpc', rpc_func, auto_delete=True)
with pytest.raises(AssertionError):
await rpc.proxy.test.rpc(foo=True, bar=None)
await rpc.unregister(rpc_func)
await rpc.close()
async def test_unroutable(self):
channel = await self.create_channel()
rpc = await RPC.create(channel, auto_delete=True)
await rpc.register('test.rpc', rpc_func, auto_delete=True)
with pytest.raises(DeliveryError):
await rpc.proxy.unroutable()
await rpc.unregister(rpc_func)
await rpc.close()
async def test_timed_out(self):
channel = await self.create_channel()
rpc = await RPC.create(channel, auto_delete=True)
await rpc.register('test.rpc', rpc_func, auto_delete=True)
await channel.declare_queue(
'test.timed_out', auto_delete=True, arguments={
'x-dead-letter-exchange': RPC.DLX_NAME,
}
)
with pytest.raises(asyncio.TimeoutError):
await rpc.call('test.timed_out', expiration=1)
await rpc.unregister(rpc_func)
await rpc.close()
async def test_close_twice(self):
channel = await self.create_channel()
rpc = await RPC.create(channel, auto_delete=True)
await rpc.close()
await rpc.close()
async def test_init_twice(self):
channel = await self.create_channel()
rpc = await RPC.create(channel, auto_delete=True)
await rpc.initialize()
await rpc.close()
async def test_send_unknown_message(self):
channel = await self.create_channel()
rpc = await RPC.create(channel, auto_delete=True)
with self.assertLogs(rpc_logger, logging.WARNING) as capture:
await channel.default_exchange.publish(
Message(b''), routing_key=rpc.result_queue.name
)
await asyncio.sleep(0.5, loop=self.loop)
self.assertIn('Unknown message: ', capture.output[0])
with self.assertLogs(rpc_logger, logging.WARNING) as capture:
await channel.default_exchange.publish(
Message(b''), routing_key='should-returned'
)
await asyncio.sleep(0.5, loop=self.loop)
self.assertIn('Unknown message was returned: ', capture.output[0])
await rpc.close()
async def test_close_cancelling(self):
channel = await self.create_channel()
rpc = await RPC.create(channel, auto_delete=True)
async def sleeper():
await asyncio.sleep(60, loop=self.loop)
method_name = self.get_random_name('test', 'sleeper')
await rpc.register(method_name, sleeper, auto_delete=True)
tasks = set()
for _ in range(10):
tasks.add(self.loop.create_task(rpc.call(method_name)))
await rpc.close()
logging.info("Waiting for results")
for task in tasks:
with pytest.raises(asyncio.CancelledError):
await task
async def test_register_twice(self):
channel = await self.create_channel()
rpc = await RPC.create(channel, auto_delete=True)
await rpc.register('test.sleeper', lambda x: None, auto_delete=True)
with pytest.raises(RuntimeError):
await rpc.register(
'test.sleeper', lambda x: None, auto_delete=True
)
await rpc.register(
'test.one', rpc_func, auto_delete=True
)
with pytest.raises(RuntimeError):
await rpc.register(
'test.two', rpc_func, auto_delete=True
)
await rpc.unregister(rpc_func)
await rpc.unregister(rpc_func)
await rpc.close()
class TestCaseRobust(TestCase):
async def create_connection(self, cleanup=True):
client = await connect_robust(str(AMQP_URL), loop=self.loop)
if cleanup:
self.addCleanup(client.close)
return client
| 28.146067 | 76 | 0.633533 | 4,613 | 0.920758 | 0 | 0 | 0 | 0 | 4,494 | 0.897006 | 296 | 0.059082 |
96588284712f2b02b4c2431118e6f0abd22431a0
| 8,331 |
py
|
Python
|
tests/python/pants_test/base/test_cmd_line_spec_parser.py
|
dturner-tw/pants
|
3a04f2e46bf2b8fb0a7999c09e4ffdf9057ed33f
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/base/test_cmd_line_spec_parser.py
|
dturner-tw/pants
|
3a04f2e46bf2b8fb0a7999c09e4ffdf9057ed33f
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/base/test_cmd_line_spec_parser.py
|
dturner-tw/pants
|
3a04f2e46bf2b8fb0a7999c09e4ffdf9057ed33f
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
from pants.base.cmd_line_spec_parser import CmdLineSpecParser
from pants.build_graph.address import Address
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.build_graph.target import Target
from pants_test.base_test import BaseTest
class CmdLineSpecParserTest(BaseTest):
@property
def alias_groups(self):
return BuildFileAliases(
targets={
'generic': Target
}
)
def setUp(self):
super(CmdLineSpecParserTest, self).setUp()
def add_target(path, name):
self.add_to_build_file(path, 'generic(name="{name}")\n'.format(name=name))
add_target('BUILD', 'root')
add_target('a', 'a')
add_target('a', 'b')
add_target('a/b', 'b')
add_target('a/b', 'c')
self.spec_parser = CmdLineSpecParser(self.build_root, self.address_mapper)
def test_normal(self):
self.assert_parsed(cmdline_spec=':root', expected=[':root'])
self.assert_parsed(cmdline_spec='//:root', expected=[':root'])
self.assert_parsed(cmdline_spec='a', expected=['a'])
self.assert_parsed(cmdline_spec='a:a', expected=['a'])
self.assert_parsed(cmdline_spec='a/b', expected=['a/b'])
self.assert_parsed(cmdline_spec='a/b:b', expected=['a/b'])
self.assert_parsed(cmdline_spec='a/b:c', expected=['a/b:c'])
def test_sibling(self):
self.assert_parsed(cmdline_spec=':', expected=[':root'])
self.assert_parsed(cmdline_spec='//:', expected=[':root'])
self.assert_parsed(cmdline_spec='a:', expected=['a', 'a:b'])
self.assert_parsed(cmdline_spec='//a:', expected=['a', 'a:b'])
self.assert_parsed(cmdline_spec='a/b:', expected=['a/b', 'a/b:c'])
self.assert_parsed(cmdline_spec='//a/b:', expected=['a/b', 'a/b:c'])
def test_sibling_or_descendents(self):
self.assert_parsed(cmdline_spec='::', expected=[':root', 'a', 'a:b', 'a/b', 'a/b:c'])
self.assert_parsed(cmdline_spec='//::', expected=[':root', 'a', 'a:b', 'a/b', 'a/b:c'])
self.assert_parsed(cmdline_spec='a::', expected=['a', 'a:b', 'a/b', 'a/b:c'])
self.assert_parsed(cmdline_spec='//a::', expected=['a', 'a:b', 'a/b', 'a/b:c'])
self.assert_parsed(cmdline_spec='a/b::', expected=['a/b', 'a/b:c'])
self.assert_parsed(cmdline_spec='//a/b::', expected=['a/b', 'a/b:c'])
def test_absolute(self):
self.assert_parsed(cmdline_spec=os.path.join(self.build_root, 'a'), expected=['a'])
self.assert_parsed(cmdline_spec=os.path.join(self.build_root, 'a:a'), expected=['a'])
self.assert_parsed(cmdline_spec=os.path.join(self.build_root, 'a:'), expected=['a', 'a:b'])
self.assert_parsed(cmdline_spec=os.path.join(self.build_root, 'a::'),
expected=['a', 'a:b', 'a/b', 'a/b:c'])
double_absolute = '/' + os.path.join(self.build_root, 'a')
self.assertEquals('//', double_absolute[:2],
'A sanity check we have a leading-// absolute spec')
with self.assertRaises(self.spec_parser.BadSpecError):
self.spec_parser.parse_addresses(double_absolute).next()
with self.assertRaises(self.spec_parser.BadSpecError):
self.spec_parser.parse_addresses('/not/the/buildroot/a').next()
def test_cmd_line_affordances(self):
self.assert_parsed(cmdline_spec='./:root', expected=[':root'])
self.assert_parsed(cmdline_spec='//./:root', expected=[':root'])
self.assert_parsed(cmdline_spec='//./a/../:root', expected=[':root'])
self.assert_parsed(cmdline_spec=os.path.join(self.build_root, './a/../:root'),
expected=[':root'])
self.assert_parsed(cmdline_spec='a/', expected=['a'])
self.assert_parsed(cmdline_spec='./a/', expected=['a'])
self.assert_parsed(cmdline_spec=os.path.join(self.build_root, './a/'), expected=['a'])
self.assert_parsed(cmdline_spec='a/b/:b', expected=['a/b'])
self.assert_parsed(cmdline_spec='./a/b/:b', expected=['a/b'])
self.assert_parsed(cmdline_spec=os.path.join(self.build_root, './a/b/:b'), expected=['a/b'])
def test_cmd_line_spec_list(self):
self.assert_parsed_list(cmdline_spec_list=['a', 'a/b'], expected=['a', 'a/b'])
self.assert_parsed_list(cmdline_spec_list=['::'], expected=[':root', 'a', 'a:b', 'a/b', 'a/b:c'])
def test_does_not_exist(self):
with self.assertRaises(self.spec_parser.BadSpecError):
self.spec_parser.parse_addresses('c').next()
with self.assertRaises(self.spec_parser.BadSpecError):
self.spec_parser.parse_addresses('c:').next()
with self.assertRaises(self.spec_parser.BadSpecError):
self.spec_parser.parse_addresses('c::').next()
def assert_parsed(self, cmdline_spec, expected):
def sort(addresses):
return sorted(addresses, key=lambda address: address.spec)
self.assertEqual(sort(Address.parse(addr) for addr in expected),
sort(self.spec_parser.parse_addresses(cmdline_spec)))
def assert_parsed_list(self, cmdline_spec_list, expected):
def sort(addresses):
return sorted(addresses, key=lambda address: address.spec)
self.assertEqual(sort(Address.parse(addr) for addr in expected),
sort(self.spec_parser.parse_addresses(cmdline_spec_list)))
def test_spec_excludes(self):
expected_specs = [':root', 'a', 'a:b', 'a/b', 'a/b:c']
# This bogus BUILD file gets in the way of parsing.
self.add_to_build_file('some/dir', 'COMPLETELY BOGUS BUILDFILE)\n')
with self.assertRaises(CmdLineSpecParser.BadSpecError):
self.assert_parsed_list(cmdline_spec_list=['::'], expected=expected_specs)
# Test absolute path in spec_excludes.
self.spec_parser = CmdLineSpecParser(self.build_root, self.address_mapper,
spec_excludes=[os.path.join(self.build_root, 'some')])
self.assert_parsed_list(cmdline_spec_list=['::'], expected=expected_specs)
# Test relative path in spec_excludes.
self.spec_parser = CmdLineSpecParser(self.build_root, self.address_mapper,
spec_excludes=['some'])
self.assert_parsed_list(cmdline_spec_list=['::'], expected=expected_specs)
def test_exclude_target_regexps(self):
expected_specs = [':root', 'a', 'a:b', 'a/b', 'a/b:c']
# This bogus BUILD file gets in the way of parsing.
self.add_to_build_file('some/dir', 'COMPLETELY BOGUS BUILDFILE)\n')
with self.assertRaises(CmdLineSpecParser.BadSpecError):
self.assert_parsed_list(cmdline_spec_list=['::'], expected=expected_specs)
self.spec_parser = CmdLineSpecParser(self.build_root, self.address_mapper,
exclude_target_regexps=[r'.*some/dir.*'])
self.assert_parsed_list(cmdline_spec_list=['::'], expected=expected_specs)
class CmdLineSpecParserBadBuildTest(BaseTest):
def setUp(self):
super(CmdLineSpecParserBadBuildTest, self).setUp()
self.add_to_build_file('bad/a', 'a_is_bad')
self.add_to_build_file('bad/b', 'b_is_bad')
self.spec_parser = CmdLineSpecParser(self.build_root, self.address_mapper)
self.NO_FAIL_FAST_RE = re.compile(r"""^--------------------
.*
Exception message: name 'a_is_bad' is not defined
while executing BUILD file BuildFile\((/[^/]+)*/bad/a/BUILD, FileSystemProjectTree\(.*\)\)
Loading addresses from 'bad/a' failed\.
.*
Exception message: name 'b_is_bad' is not defined
while executing BUILD file BuildFile\((/[^/]+)*T/bad/b/BUILD, FileSystemProjectTree\(.*\)\)
Loading addresses from 'bad/b' failed\.
Invalid BUILD files for \[::\]$""", re.DOTALL)
self.FAIL_FAST_RE = """^name 'a_is_bad' is not defined
while executing BUILD file BuildFile\((/[^/]+)*/bad/a/BUILD\, FileSystemProjectTree\(.*\)\)
Loading addresses from 'bad/a' failed.$"""
def test_bad_build_files(self):
with self.assertRaisesRegexp(self.spec_parser.BadSpecError, self.NO_FAIL_FAST_RE):
list(self.spec_parser.parse_addresses('::'))
def test_bad_build_files_fail_fast(self):
with self.assertRaisesRegexp(self.spec_parser.BadSpecError, self.FAIL_FAST_RE):
list(self.spec_parser.parse_addresses('::', True))
| 42.723077 | 101 | 0.680471 | 7,744 | 0.92954 | 0 | 0 | 120 | 0.014404 | 0 | 0 | 1,887 | 0.226503 |
9658caeffcd376fc80bf7f4677ee09db9594f2eb
| 655 |
py
|
Python
|
code/strats/switcher.py
|
Barigamb738/PrisonersDilemmaTournament
|
22834192a30cbefdef4f06f41eea5ef7ec3a0652
|
[
"MIT"
] | 23 |
2021-05-20T07:34:33.000Z
|
2021-06-20T13:09:04.000Z
|
code/strats/switcher.py
|
Barigamb738/PrisonersDilemmaTournament
|
22834192a30cbefdef4f06f41eea5ef7ec3a0652
|
[
"MIT"
] | 19 |
2021-05-21T04:10:55.000Z
|
2021-06-13T15:17:52.000Z
|
code/strats/switcher.py
|
Barigamb738/PrisonersDilemmaTournament
|
22834192a30cbefdef4f06f41eea5ef7ec3a0652
|
[
"MIT"
] | 43 |
2021-05-21T02:24:35.000Z
|
2021-06-24T21:08:11.000Z
|
def strategy(history, memory):
round = history.shape[1]
GRUDGE = 0
LASTACTION = 1
if round == 0:
mem = []
mem.append(False)
mem.append(0)
return "cooperate", mem
mem = memory
if mem[GRUDGE]:
return "defect", mem
if round >= 5:
sin = 0
for i in range(1, 5):
if history[1, -i] == 0:
sin += 1
if sin == 4:
mem[GRUDGE] = True
return "defect", mem
if mem[LASTACTION] == 0:
mem[LASTACTION] = 1
return "cooperate", mem
else:
mem[LASTACTION] = 0
return "defect", mem
| 24.259259 | 36 | 0.464122 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.070229 |
965a2d366a9e0c3114e09f3517d25bed152a9d40
| 2,363 |
py
|
Python
|
pull_into_place/commands/run_additional_metrics.py
|
Kortemme-Lab/pull_into_place
|
0019a6cec2a6130ebbaa49d7ab67d4c840fbe33c
|
[
"MIT"
] | 3 |
2018-05-31T18:46:46.000Z
|
2020-05-04T03:27:38.000Z
|
pull_into_place/commands/run_additional_metrics.py
|
Kortemme-Lab/pull_into_place
|
0019a6cec2a6130ebbaa49d7ab67d4c840fbe33c
|
[
"MIT"
] | 14 |
2016-09-14T00:16:49.000Z
|
2018-04-11T03:04:21.000Z
|
pull_into_place/commands/run_additional_metrics.py
|
Kortemme-Lab/pull_into_place
|
0019a6cec2a6130ebbaa49d7ab67d4c840fbe33c
|
[
"MIT"
] | 1 |
2017-11-27T07:35:56.000Z
|
2017-11-27T07:35:56.000Z
|
#!/usr/bin/env python2
"""\
Run additional filters on a folder of pdbs and copy the results back
into the original pdb.
Usage:
pull_into_place run_additional_metrics <directory> [options]
Options:
--max-runtime TIME [default: 12:00:00]
The runtime limit for each design job. The default value is
set pretty low so that the short queue is available by default. This
should work fine more often than not, but you also shouldn't be
surprised if you need to increase this.
--max-memory MEM [default: 2G]
The memory limit for each design job.
--mkdir
Make the directory corresponding to this step in the pipeline, but
don't do anything else. This is useful if you want to create custom
input files for just this step.
--test-run
Run on the short queue with a limited number of iterations. This
option automatically clears old results.
--clear
Clear existing results before submitting new jobs.
To use this class:
1. You need to initiate it with the directory where your pdb files
to be rerun are.
2. You need to use the setters for the Rosetta executable and the
metric.
"""
from klab import docopt, scripting, cluster
from pull_into_place import pipeline, big_jobs
@scripting.catch_and_print_errors()
def main():
args = docopt.docopt(__doc__)
cluster.require_qsub()
# Setup the workspace.
workspace = pipeline.AdditionalMetricWorkspace(args['<directory>'])
workspace.check_paths()
workspace.check_rosetta()
workspace.make_dirs()
if args['--mkdir']:
return
if args['--clear'] or args['--test-run']:
workspace.clear_outputs()
# Decide which inputs to use.
inputs = workspace.unclaimed_inputs
nstruct = len(inputs) * len(args['--nstruct'])
if not inputs:
print """\
All the input structures have already been (or are already being)
designed. If you want to rerun all the inputs from scratch, use the
--clear flag."""
raise SystemExit
# Submit the design job.
big_jobs.submit(
'pip_add_metrics.py', workspace,
inputs=inputs, nstruct=nstruct,
max_runtime=args['--max-runtime'],
max_memory=args['--max-memory'],
test_run=args['--test-run']
)
| 28.130952 | 77 | 0.66314 | 0 | 0 | 0 | 0 | 1,036 | 0.438426 | 0 | 0 | 1,570 | 0.66441 |
965a870632eb281fc73c846d9b482a54e2ad0de9
| 827 |
py
|
Python
|
setup.py
|
japherwocky/cl3ver
|
148242feb676cc675bbdf11ae39c3179b9a6ffe1
|
[
"MIT"
] | 1 |
2017-04-01T00:15:38.000Z
|
2017-04-01T00:15:38.000Z
|
setup.py
|
japherwocky/cl3ver
|
148242feb676cc675bbdf11ae39c3179b9a6ffe1
|
[
"MIT"
] | null | null | null |
setup.py
|
japherwocky/cl3ver
|
148242feb676cc675bbdf11ae39c3179b9a6ffe1
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
setup(
name = 'cl3ver',
packages = ['cl3ver'],
license = 'MIT',
install_requires = ['requests'],
version = '0.2',
description = 'A python 3 wrapper for the cleverbot.com API',
author = 'Japhy Bartlett',
author_email = '[email protected]',
url = 'https://github.com/japherwocky/cl3ver',
download_url = 'https://github.com/japherwocky/cl3ver/tarball/0.2.tar.gz',
keywords = ['cleverbot', 'wrapper', 'clever', 'chatbot', 'cl3ver'],
classifiers =[
'Programming Language :: Python :: 3 :: Only',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Natural Language :: English',
],
)
| 39.380952 | 84 | 0.541717 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 410 | 0.495768 |
966165e75931deeaee2d1ab429f5cda6020e085f
| 19,860 |
py
|
Python
|
linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/headstock/example/microblog/microblog/jabber/pubsub.py
|
mdavid/nuxleus
|
653f1310d8bf08eaa5a7e3326c2349e56a6abdc2
|
[
"BSD-3-Clause"
] | 1 |
2017-03-28T06:41:51.000Z
|
2017-03-28T06:41:51.000Z
|
linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/headstock/example/microblog/microblog/jabber/pubsub.py
|
mdavid/nuxleus
|
653f1310d8bf08eaa5a7e3326c2349e56a6abdc2
|
[
"BSD-3-Clause"
] | null | null | null |
linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/headstock/example/microblog/microblog/jabber/pubsub.py
|
mdavid/nuxleus
|
653f1310d8bf08eaa5a7e3326c2349e56a6abdc2
|
[
"BSD-3-Clause"
] | 1 |
2016-12-13T21:08:58.000Z
|
2016-12-13T21:08:58.000Z
|
# -*- coding: utf-8 -*-
import re
from Axon.Component import component
from Kamaelia.Util.Backplane import PublishTo, SubscribeTo
from Axon.Ipc import shutdownMicroprocess, producerFinished
from Kamaelia.Protocol.HTTP.HTTPClient import SimpleHTTPClient
from headstock.api.jid import JID
from headstock.api.im import Message, Body
from headstock.api.pubsub import Node, Item, Message
from headstock.api.discovery import *
from headstock.lib.utils import generate_unique
from bridge import Element as E
from bridge.common import XMPP_CLIENT_NS, XMPP_ROSTER_NS, \
XMPP_LAST_NS, XMPP_DISCO_INFO_NS, XMPP_DISCO_ITEMS_NS,\
XMPP_PUBSUB_NS
from amplee.utils import extract_url_trail, get_isodate,\
generate_uuid_uri
from amplee.error import ResourceOperationException
from microblog.atompub.resource import ResourceWrapper
from microblog.jabber.atomhandler import FeedReaderComponent
__all__ = ['DiscoHandler', 'ItemsHandler', 'MessageHandler']
publish_item_rx = re.compile(r'\[(.*)\] ([\w ]*)')
retract_item_rx = re.compile(r'\[(.*)\] ([\w:\-]*)')
geo_rx = re.compile(r'(.*) ([\[\.|\d,|\-\]]*)')
GEORSS_NS = u"http://www.georss.org/georss"
GEORSS_PREFIX = u"georss"
class DiscoHandler(component):
Inboxes = {"inbox" : "",
"control" : "",
"initiate" : "",
"jid" : "",
"error" : "",
"features.result": "",
"items.result": "",
"items.error" : "",
"docreate" : "",
"docreatecollection": "",
"dodelete" : "",
"dounsubscribe" : "",
"dosubscribe" : "",
"subscribed": "",
"subscriptions.result": "",
"affiliations.result": "",
"created": "",
"deleted" : ""}
Outboxes = {"outbox" : "",
"signal" : "Shutdown signal",
"message" : "",
"features-disco": "headstock.api.discovery.FeaturesDiscovery query to the server",
"features-announce": "headstock.api.discovery.FeaturesDiscovery informs"\
"the other components about the features instance received from the server",
"items-disco" : "",
"create-node" : "",
"delete-node" : "",
"subscribe-node" : "",
"unsubscribe-node" : "",
"subscriptions-disco": "",
"affiliations-disco" : ""}
def __init__(self, from_jid, atompub, host='localhost', session_id=None, profile=None):
super(DiscoHandler, self).__init__()
self.from_jid = from_jid
self.atompub = atompub
self.xmpphost = host
self.session_id = session_id
self.profile = profile
self._collection = None
self.pubsub_top_level_node = u'home/%s/%s' % (self.xmpphost, self.from_jid.node)
@property
def collection(self):
# Lazy loading of collection
if not self._collection:
self._collection = self.atompub.get_collection(self.profile.username)
return self._collection
def initComponents(self):
sub = SubscribeTo("JID.%s" % self.session_id)
self.link((sub, 'outbox'), (self, 'jid'))
self.addChildren(sub)
sub.activate()
pub = PublishTo("DISCO_FEAT.%s" % self.session_id)
self.link((self, 'features-announce'), (pub, 'inbox'))
self.addChildren(pub)
pub.activate()
sub = SubscribeTo("BOUND.%s" % self.session_id)
self.link((sub, 'outbox'), (self, 'initiate'))
self.addChildren(sub)
sub.activate()
return 1
def main(self):
yield self.initComponents()
while 1:
if self.dataReady("control"):
mes = self.recv("control")
if isinstance(mes, shutdownMicroprocess) or isinstance(mes, producerFinished):
self.send(producerFinished(), "signal")
break
if self.dataReady("jid"):
self.from_jid = self.recv('jid')
self.pubsub_top_level_node = u'home/%s/%s' % (self.xmpphost, self.from_jid.node)
if self.dataReady("initiate"):
self.recv("initiate")
p = Node(unicode(self.from_jid), u'pubsub.%s' % self.xmpphost,
node_name=self.pubsub_top_level_node)
self.send(p, "create-node")
yield 1
#d = FeaturesDiscovery(unicode(self.from_jid), u'pubsub.%s' % self.xmpphost)
#self.send(d, "features-disco")
d = SubscriptionsDiscovery(unicode(self.from_jid), u'pubsub.%s' % self.xmpphost)
self.send(d, "subscriptions-disco")
d = AffiliationsDiscovery(unicode(self.from_jid), u'pubsub.%s' % self.xmpphost)
self.send(d, "affiliations-disco")
n = ItemsDiscovery(unicode(self.from_jid), u'pubsub.%s' % self.xmpphost,
node_name=self.pubsub_top_level_node)
self.send(n, "items-disco")
# The response to our discovery query
# is a a headstock.api.discovery.FeaturesDiscovery instance.
# What we immediatly do is to notify all handlers
# interested in that event about it.
if self.dataReady('features.result'):
disco = self.recv('features.result')
for feature in disco.features:
print " ", feature.var
self.send(disco, 'features-announce')
if self.dataReady('items.result'):
items = self.recv('items.result')
print "%s has %d item(s)" % (items.node_name, len(items.items))
#for item in items.items:
#n = ItemsDiscovery(unicode(self.from_jid), u'pubsub.%s' % self.xmpphost,
# node_name=item.node)
#self.send(n, "items-disco")
if self.dataReady('items.error'):
items_disco = self.recv('items.error')
print "DISCO ERROR: ", items_disco.node_name, items_disco.error
if items_disco.error.condition == 'item-not-found':
p = Node(unicode(self.from_jid), u'pubsub.%s' % self.xmpphost,
node_name=items_disco.node_name)
self.send(p, "create-node")
yield 1
if self.dataReady('subscriptions.result'):
subscriptions = self.recv('subscriptions.result')
for sub in subscriptions.subscriptions:
print "Subscription: %s (%s)" % (sub.node, sub.state)
if self.dataReady('affiliations.result'):
affiliations = self.recv('affiliations.result')
for aff in affiliations.affiliations:
print "Affiliation: %s %s" % (aff.node, aff.affiliation)
if self.dataReady('docreate'):
nodeid = self.recv('docreate').strip()
p = Node(unicode(self.from_jid), u'pubsub.%s' % self.xmpphost,
node_name=nodeid)
self.send(p, "create-node")
if self.dataReady('docreatecollection'):
nodeid = self.recv('docreatecollection').strip()
p = Node(unicode(self.from_jid), u'pubsub.%s' % self.xmpphost,
node_name=nodeid)
p.set_default_collection_conf()
self.send(p, "create-node")
if self.dataReady('dodelete'):
nodeid = self.recv('dodelete').strip()
p = Node(unicode(self.from_jid), u'pubsub.%s' % self.xmpphost,
node_name=nodeid)
self.send(p, "delete-node")
if self.dataReady('dosubscribe'):
nodeid = self.recv('dosubscribe').strip()
p = Node(unicode(self.from_jid), u'pubsub.%s' % self.xmpphost,
node_name=nodeid, sub_jid=self.from_jid.nodeid())
self.send(p, "subscribe-node")
if self.dataReady('dounsubscribe'):
nodeid = self.recv('dounsubscribe').strip()
p = Node(unicode(self.from_jid), u'pubsub.%s' % self.xmpphost,
node_name=nodeid, sub_jid=self.from_jid.nodeid())
self.send(p, "unsubscribe-node")
if self.dataReady('created'):
node = self.recv('created')
print "Node created: %s" % node.node_name
p = Node(unicode(self.from_jid), u'pubsub.%s' % self.xmpphost,
node_name=node.node_name, sub_jid=self.from_jid.nodeid())
self.send(p, "subscribe-node")
if self.dataReady('subscribed'):
node = self.recv('subscribed')
print "Node subscribed: %s" % node.node_name
if self.dataReady('deleted'):
node = self.recv('deleted')
print "Node deleted: %s" % node.node_name
if self.dataReady('error'):
node = self.recv('error')
print node.error
if not self.anyReady():
self.pause()
yield 1
class ItemsHandler(component):
Inboxes = {"inbox" : "",
"topublish" : "",
"todelete" : "",
"topurge": "",
"control" : "",
"xmpp.result": "",
"published": "",
"publish.error": "",
"retract.error": "",
"jid" : "",
"_feedresponse": "",
"_delresponse": ""}
Outboxes = {"outbox" : "",
"publish" : "",
"delete" : "",
"purge" : "",
"signal" : "Shutdown signal",
"_feedrequest": "",
"_delrequest": ""}
def __init__(self, from_jid, atompub, host='localhost', session_id=None, profile=None):
super(ItemsHandler, self).__init__()
self.from_jid = from_jid
self.atompub = atompub
self.xmpphost = host
self.session_id = session_id
self.profile = profile
self._collection = None
self.pubsub_top_level_node = u'home/%s/%s' % (self.xmpphost, self.from_jid.node)
@property
def collection(self):
# Lazy loading of collection
if not self._collection:
self._collection = self.atompub.get_collection(self.profile.username)
return self._collection
def initComponents(self):
sub = SubscribeTo("JID.%s" % self.session_id)
self.link((sub, 'outbox'), (self, 'jid'))
self.addChildren(sub)
sub.activate()
feedreader = FeedReaderComponent(use_etags=False)
self.addChildren(feedreader)
feedreader.activate()
client = SimpleHTTPClient()
self.addChildren(client)
self.link((self, '_feedrequest'), (client, 'inbox'))
self.link((client, 'outbox'), (feedreader, 'inbox'))
self.link((feedreader, 'outbox'), (self, '_feedresponse'))
client.activate()
client = SimpleHTTPClient()
self.addChildren(client)
self.link((self, '_delrequest'), (client, 'inbox'))
self.link((client, 'outbox'), (self, '_delresponse'))
client.activate()
return 1
def make_entry(self, msg, node):
uuid = generate_uuid_uri()
entry = E.load('./entry.atom').xml_root
entry.get_child('id', ns=entry.xml_ns).xml_text = uuid
dt = get_isodate()
entry.get_child('author', ns=entry.xml_ns).get_child('name', ns=entry.xml_ns).xml_text = unicode(self.profile.username)
entry.get_child('published', ns=entry.xml_ns).xml_text = dt
entry.get_child('updated', ns=entry.xml_ns).xml_text = dt
entry.get_child('content', ns=entry.xml_ns).xml_text = unicode(msg)
if node != self.pubsub_top_level_node:
tag = extract_url_trail(node)
E(u'category', namespace=entry.xml_ns, prefix=entry.xml_prefix,
attributes={u'term': unicode(tag)}, parent=entry)
return uuid, entry
def add_geo_point(self, entry, long, lat):
E(u'point', prefix=GEORSS_PREFIX, namespace=GEORSS_NS,
content=u'%s %s' % (unicode(long), unicode(lat)), parent=entry)
def main(self):
yield self.initComponents()
while 1:
if self.dataReady("control"):
mes = self.recv("control")
if isinstance(mes, shutdownMicroprocess) or isinstance(mes, producerFinished):
self.send(producerFinished(), "signal")
break
if self.dataReady("jid"):
self.from_jid = self.recv('jid')
self.pubsub_top_level_node = u'home/%s/%s' % (self.xmpphost, self.from_jid.node)
if self.dataReady("topublish"):
message = self.recv("topublish")
node = self.pubsub_top_level_node
m = geo_rx.match(message)
long = lat = None
if not m:
m = publish_item_rx.match(message)
if m:
node, message = m.groups()
else:
message, long_lat = m.groups()
long, lat = long_lat.strip('[').rstrip(']').split(',')
uuid, entry = self.make_entry(message, node)
if long and lat:
self.add_geo_point(entry, long, lat)
i = Item(id=uuid, payload=entry)
p = Node(unicode(self.from_jid), u'pubsub.%s' % self.xmpphost,
node_name=unicode(node), item=i)
self.send(p, "publish")
yield 1
if self.dataReady("todelete"):
item_id = self.recv("todelete")
node = self.pubsub_top_level_node
m = retract_item_rx.match(item_id)
if m:
node, item_id = m.groups()
i = Item(id=unicode(item_id))
p = Node(unicode(self.from_jid), u'pubsub.%s' % self.xmpphost,
node_name=unicode(node), item=i)
self.send(p, "delete")
yield 1
if self.dataReady("topurge"):
node_id = self.recv("topurge")
p = Node(unicode(self.from_jid), u'pubsub.%s' % self.xmpphost,
node_name=node_id)
self.send(p, "purge")
params = {'url': '%s/feed' % (self.collection.get_base_edit_uri().rstrip('/')),
'method': 'GET'}
self.send(params, '_feedrequest')
if self.dataReady("published"):
node = self.recv("published")
print "Item published: %s" % node
if self.dataReady("publish.error"):
node = self.recv("publish.error")
print node.error
if self.dataReady("retract.error"):
node = self.recv("retract.error")
print node.error
if self.dataReady("_feedresponse"):
feed = self.recv("_feedresponse")
for entry in feed.entries:
for link in entry.links:
if link.rel == 'edit':
params = {'url': link.href, 'method': 'DELETE'}
self.send(params, '_delrequest')
if self.dataReady("_delresponse"):
self.recv("_delresponse")
if not self.anyReady():
self.pause()
yield 1
class MessageHandler(component):
Inboxes = {"inbox" : "",
"control" : "",
"jid" : "",
"_response" : ""}
Outboxes = {"outbox" : "",
"signal" : "Shutdown signal",
"items-disco" : "",
"_request": ""}
def __init__(self, from_jid, atompub, host='localhost', session_id=None, profile=None):
super(MessageHandler, self).__init__()
self.from_jid = from_jid
self.atompub = atompub
self.xmpphost = host
self.session_id = session_id
self.profile = profile
self._collection = None
@property
def collection(self):
# Lazy loading of collection
if not self._collection:
self._collection = self.atompub.get_collection(self.profile.username)
return self._collection
def initComponents(self):
sub = SubscribeTo("JID.%s" % self.session_id)
self.link((sub, 'outbox'), (self, 'jid'))
self.addChildren(sub)
sub.activate()
client = SimpleHTTPClient()
self.addChildren(client)
self.link((self, '_request'), (client, 'inbox'))
self.link((client, 'outbox'), (self, '_response'))
client.activate()
return 1
def main(self):
yield self.initComponents()
while 1:
if self.dataReady("control"):
mes = self.recv("control")
if isinstance(mes, shutdownMicroprocess) or isinstance(mes, producerFinished):
self.send(producerFinished(), "signal")
break
if self.dataReady("jid"):
self.from_jid = self.recv('jid')
if self.dataReady("_response"):
#discard the HTTP response for now
member_entry = self.recv("_response")
if self.dataReady("inbox"):
msg = self.recv("inbox")
collection = self.collection
if collection:
for item in msg.items:
if item.event == 'item' and item.payload:
print "Published item: %s" % item.id
member = collection.get_member(item.id)
if not member:
if isinstance(item.payload, list):
body = item.payload[0].xml()
else:
body = item.payload.xml()
params = {'url': collection.get_base_edit_uri(),
'method': 'POST', 'postbody': body,
'extraheaders': {'content-type': 'application/atom+xml;type=entry',
'content-length': str(len(body)),
'slug': item.id}}
self.send(params, '_request')
elif item.event == 'retract':
print "Removed item: %s" % item.id
params = {'url': '%s/%s' % (collection.get_base_edit_uri().rstrip('/'),
item.id.encode('utf-8')),
'method': 'DELETE'}
self.send(params, '_request')
if not self.anyReady():
self.pause()
yield 1
| 39.72 | 127 | 0.507049 | 18,671 | 0.940131 | 11,582 | 0.583182 | 657 | 0.033082 | 0 | 0 | 3,829 | 0.1928 |
96616a7644cba49b6924d5d5a5b5f061a8473987
| 7,865 |
py
|
Python
|
examples/task_sequence_labeling_ner_lstm_crf.py
|
lonePatient/TorchBlocks
|
4a65d746cc8a396cb7df73ed4644d97ddf843e29
|
[
"MIT"
] | 82 |
2020-06-23T05:51:08.000Z
|
2022-03-29T08:11:08.000Z
|
examples/task_sequence_labeling_ner_lstm_crf.py
|
lonePatient/TorchBlocks
|
4a65d746cc8a396cb7df73ed4644d97ddf843e29
|
[
"MIT"
] | null | null | null |
examples/task_sequence_labeling_ner_lstm_crf.py
|
lonePatient/TorchBlocks
|
4a65d746cc8a396cb7df73ed4644d97ddf843e29
|
[
"MIT"
] | 22 |
2020-06-23T05:51:10.000Z
|
2022-03-18T07:01:43.000Z
|
import os
import json
from torchblocks.metrics import SequenceLabelingScore
from torchblocks.trainer import SequenceLabelingTrainer
from torchblocks.callback import TrainLogger
from torchblocks.processor import SequenceLabelingProcessor, InputExample
from torchblocks.utils import seed_everything, dict_to_text, build_argparse
from torchblocks.utils import prepare_device, get_checkpoints
from torchblocks.data import CNTokenizer
from torchblocks.data import Vocabulary, VOCAB_NAME
from torchblocks.models.nn.lstm_crf import LSTMCRF
from torchblocks.models.bases import TrainConfig
from torchblocks.models.bases import WEIGHTS_NAME
MODEL_CLASSES = {
'lstm-crf': (TrainConfig, LSTMCRF, CNTokenizer)
}
def build_vocab(data_dir, vocab_dir):
'''
构建vocab
'''
vocab = Vocabulary()
vocab_path = os.path.join(vocab_dir, VOCAB_NAME)
if os.path.exists(vocab_path):
vocab.load_vocab(str(vocab_path))
else:
files = ["train.json", "dev.json", "test.json"]
for file in files:
with open(os.path.join(data_dir, file), 'r') as fr:
for line in fr:
line = json.loads(line.strip())
text = line['text']
vocab.update(list(text))
vocab.build_vocab()
vocab.save_vocab(vocab_path)
print("vocab size: ", len(vocab))
class CluenerProcessor(SequenceLabelingProcessor):
def get_labels(self):
"""See base class."""
# 默认第一个为X
return ["X", "B-address", "B-book", "B-company", 'B-game', 'B-government', 'B-movie', 'B-name',
'B-organization', 'B-position', 'B-scene', "I-address",
"I-book", "I-company", 'I-game', 'I-government', 'I-movie', 'I-name',
'I-organization', 'I-position', 'I-scene',
"S-address", "S-book", "S-company", 'S-game', 'S-government', 'S-movie',
'S-name', 'S-organization', 'S-position',
'S-scene', 'O', "[START]", "[END]"]
def read_data(self, input_file):
"""Reads a json list file."""
lines = []
with open(input_file, 'r') as f:
for line in f:
line = json.loads(line.strip())
text = line['text']
label_entities = line.get('label', None)
labels = ['O'] * len(text)
if label_entities is not None:
for key, value in label_entities.items():
for sub_name, sub_index in value.items():
for start_index, end_index in sub_index:
assert text[start_index:end_index + 1] == sub_name
if start_index == end_index:
labels[start_index] = 'S-' + key
else:
labels[start_index] = 'B-' + key
labels[start_index + 1:end_index + 1] = ['I-' + key] * (len(sub_name) - 1)
lines.append({"text": text, "labels": labels})
return lines
def create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line['text']
labels = line['labels']
examples.append(InputExample(guid=guid, texts=[text_a, None], label_ids=labels))
return examples
def main():
parser = build_argparse()
parser.add_argument('--markup', type=str, default='bios', choices=['bios', 'bio'])
parser.add_argument('--use_crf', action='store_true', default=True)
args = parser.parse_args()
# output dir
if args.model_name is None:
args.model_name = args.model_path.split("/")[-1]
args.output_dir = args.output_dir + '{}'.format(args.model_name)
os.makedirs(args.output_dir, exist_ok=True)
# logging
prefix = "_".join([args.model_name, args.task_name])
logger = TrainLogger(log_dir=args.output_dir, prefix=prefix)
# device
logger.info("initializing device")
args.device, args.n_gpu = prepare_device(args.gpu, args.local_rank)
# build vocab
build_vocab(args.data_dir, vocab_dir=args.model_path)
seed_everything(args.seed)
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
# data processor
logger.info("initializing data processor")
tokenizer = tokenizer_class.from_pretrained(args.model_path, do_lower_case=args.do_lower_case)
processor = CluenerProcessor(data_dir=args.data_dir, tokenizer=tokenizer, prefix=prefix,add_special_tokens=False)
label_list = processor.get_labels()
num_labels = len(label_list)
id2label = {i: label for i, label in enumerate(label_list)}
args.id2label = id2label
args.num_labels = num_labels
# model
logger.info("initializing model and config")
config = config_class.from_pretrained(args.model_path, num_labels=num_labels,
cache_dir=args.cache_dir if args.cache_dir else None)
model = model_class.from_pretrained(args.model_path, config=config)
model.to(args.device)
# Trainer
logger.info("initializing traniner")
trainer = SequenceLabelingTrainer(args=args, logger=logger, collate_fn=processor.collate_fn,
input_keys=processor.get_input_keys(),
metrics=[SequenceLabelingScore(id2label, markup=args.markup)])
# do train
if args.do_train:
train_dataset = processor.create_dataset(args.train_max_seq_length, 'train.json', 'train', )
eval_dataset = processor.create_dataset(args.eval_max_seq_length, 'dev.json', 'dev')
trainer.train(model, train_dataset=train_dataset, eval_dataset=eval_dataset)
# do eval
if args.do_eval and args.local_rank in [-1, 0]:
results = {}
eval_dataset = processor.create_dataset(args.eval_max_seq_length, 'dev.json', 'dev')
checkpoints = [args.output_dir]
if args.eval_all_checkpoints or args.checkpoint_number > 0:
checkpoints = get_checkpoints(args.output_dir, args.checkpoint_number, WEIGHTS_NAME)
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("/")[-1].split("-")[-1]
model = model_class.from_pretrained(checkpoint, config=config)
model.to(args.device)
trainer.evaluate(model, eval_dataset, save_preds=True, prefix=str(global_step))
if global_step:
result = {"{}_{}".format(global_step, k): v for k, v in trainer.records['result'].items()}
results.update(result)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
dict_to_text(output_eval_file, results)
# do predict
if args.do_predict:
test_dataset = processor.create_dataset(args.eval_max_seq_length, 'test.json', 'test')
if args.checkpoint_number == 0:
raise ValueError("checkpoint number should > 0,but get %d", args.checkpoint_number)
checkpoints = get_checkpoints(args.output_dir, args.checkpoint_number, WEIGHTS_NAME)
for checkpoint in checkpoints:
global_step = checkpoint.split("/")[-1].split("-")[-1]
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
trainer.predict(model, test_dataset=test_dataset, prefix=str(global_step))
if __name__ == "__main__":
main()
| 46.264706 | 118 | 0.614495 | 2,212 | 0.280675 | 0 | 0 | 0 | 0 | 0 | 0 | 1,107 | 0.140464 |
966233a14411c83da996b856512cb5f8c21c76c2
| 277 |
py
|
Python
|
teachers/views.py
|
xuhairmeer/school-management
|
36394c841a61e46bc00e1dc21bcfcdd5fa6f6918
|
[
"bzip2-1.0.6"
] | null | null | null |
teachers/views.py
|
xuhairmeer/school-management
|
36394c841a61e46bc00e1dc21bcfcdd5fa6f6918
|
[
"bzip2-1.0.6"
] | 9 |
2021-03-19T08:15:07.000Z
|
2022-03-12T00:13:19.000Z
|
teachers/views.py
|
muhammadzuhair95/school-management
|
36394c841a61e46bc00e1dc21bcfcdd5fa6f6918
|
[
"bzip2-1.0.6"
] | null | null | null |
# Create your views here.
from django.urls import reverse_lazy
from django.views import generic
from forms.forms import UserCreateForm
class SignUp(generic.CreateView):
form_class = UserCreateForm
success_url = reverse_lazy('home')
template_name = 'signup.html'
| 23.083333 | 38 | 0.776173 | 138 | 0.498195 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.158845 |
9662ad75421db9c9fefe25e938e00f34ee4e0c42
| 1,466 |
py
|
Python
|
Code/Main.py
|
iqbalsublime/EPCMS
|
d2a745bda9a1d256d8834d1fa1105bb2bab79e3f
|
[
"MIT"
] | null | null | null |
Code/Main.py
|
iqbalsublime/EPCMS
|
d2a745bda9a1d256d8834d1fa1105bb2bab79e3f
|
[
"MIT"
] | null | null | null |
Code/Main.py
|
iqbalsublime/EPCMS
|
d2a745bda9a1d256d8834d1fa1105bb2bab79e3f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 20 01:33:18 2019
@author: iqbalsublime
"""
from Customer import Customer
from Restaurent import Restaurent
from Reserve import Reserve
from Menu import Menu
from Order import Order
cust1= Customer(1,"Iqbal", "0167****671")
rest1= Restaurent(1,"Farmgate", "102 Kazi Nazrul Islam Ave, Dhaka")
reserve1=Reserve(1, "20-11-2019",cust1, rest1)
"""
print("******Reservation*******")
print("Reserve ID:{}, Date: {} Customer Name: {}, Mobile:{}, Branch: {}".format(reserve1.reserveid,
reserve1.date, reserve1.customer.name, reserve1.customer.mobile, reserve1.restaurent.bname))
#print(reserve1.description())
print("******Reservation*******")
"""
menu1= Menu(1,"Burger", 160,"Fast Food",4)
menu2= Menu(2,"Pizza", 560,"Fast Food",2)
menu3= Menu(3,"Biriani", 220,"Indian",1)
menu4= Menu(4,"Pitha", 50,"Bangla",5)
order1= Order(1,"20-11-2019", cust1)
order1.addMenu(menu1)
order1.addMenu(menu2)
order1.addMenu(menu3)
order1.addMenu(menu4)
print("******Invoice*******")
print("Order ID:{}, Date: {} Customer Name: {}, Mobile:{}".format(order1.oid,
order1.date, order1.Customer.name, order1.Customer.mobile))
totalBill=0.0
serial=1
print("SL---Food----Price---Qy----total")
for order in order1.menus:
print(serial,order.name, order.price, order.quantity, (order.price*order.quantity))
totalBill=totalBill+(order.price*order.quantity)
print("Grand Total :", totalBill)
print("******Invoice*******")
| 30.541667 | 100 | 0.680764 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 697 | 0.475443 |
966342122018d47e80bbaf5398de1bb1a30423a0
| 4,544 |
py
|
Python
|
venv/Lib/site-packages/twisted/logger/_io.py
|
AironMattos/Web-Scraping-Project
|
89290fd376e2b42258c49e3ce2c3669932e03ad3
|
[
"MIT"
] | 2 |
2021-05-30T16:35:00.000Z
|
2021-06-03T12:23:33.000Z
|
Lib/site-packages/twisted/logger/_io.py
|
Jriszz/guacamole-python
|
cf0dfcaaa7d85c3577571954fc5b2b9dcf55ba17
|
[
"MIT"
] | 20 |
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
Lib/site-packages/twisted/logger/_io.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | 2 |
2021-05-29T21:12:22.000Z
|
2021-05-30T04:56:50.000Z
|
# -*- test-case-name: twisted.logger.test.test_io -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
File-like object that logs.
"""
import sys
from typing import AnyStr, Iterable, Optional
from constantly import NamedConstant
from incremental import Version
from twisted.python.deprecate import deprecatedProperty
from ._levels import LogLevel
from ._logger import Logger
class LoggingFile:
"""
File-like object that turns C{write()} calls into logging events.
Note that because event formats are L{str}, C{bytes} received via C{write()}
are converted to C{str}, which is the opposite of what C{file} does.
@ivar softspace: Attribute to make this class more file-like under Python 2;
value is zero or one. Do not use.
"""
_softspace = 0
@deprecatedProperty(Version("Twisted", 21, 2, 0))
def softspace(self):
return self._softspace
@softspace.setter # type: ignore[no-redef]
def softspace(self, value):
self._softspace = value
def __init__(
self,
logger: Logger,
level: NamedConstant = LogLevel.info,
encoding: Optional[str] = None,
) -> None:
"""
@param logger: the logger to log through.
@param level: the log level to emit events with.
@param encoding: The encoding to expect when receiving bytes via
C{write()}. If L{None}, use C{sys.getdefaultencoding()}.
"""
self.level = level
self.log = logger
if encoding is None:
self._encoding = sys.getdefaultencoding()
else:
self._encoding = encoding
self._buffer = ""
self._closed = False
@property
def closed(self) -> bool:
"""
Read-only property. Is the file closed?
@return: true if closed, otherwise false.
"""
return self._closed
@property
def encoding(self) -> str:
"""
Read-only property. File encoding.
@return: an encoding.
"""
return self._encoding
@property
def mode(self) -> str:
"""
Read-only property. File mode.
@return: "w"
"""
return "w"
@property
def newlines(self) -> None:
"""
Read-only property. Types of newlines encountered.
@return: L{None}
"""
return None
@property
def name(self) -> str:
"""
The name of this file; a repr-style string giving information about its
namespace.
@return: A file name.
"""
return "<{} {}#{}>".format(
self.__class__.__name__,
self.log.namespace,
self.level.name,
)
def close(self) -> None:
"""
Close this file so it can no longer be written to.
"""
self._closed = True
def flush(self) -> None:
"""
No-op; this file does not buffer.
"""
pass
def fileno(self) -> int:
"""
Returns an invalid file descriptor, since this is not backed by an FD.
@return: C{-1}
"""
return -1
def isatty(self) -> bool:
"""
A L{LoggingFile} is not a TTY.
@return: C{False}
"""
return False
def write(self, message: AnyStr) -> None:
"""
Log the given message.
@param message: The message to write.
"""
if self._closed:
raise ValueError("I/O operation on closed file")
if isinstance(message, bytes):
text = message.decode(self._encoding)
else:
text = message
lines = (self._buffer + text).split("\n")
self._buffer = lines[-1]
lines = lines[0:-1]
for line in lines:
self.log.emit(self.level, format="{log_io}", log_io=line)
def writelines(self, lines: Iterable[AnyStr]) -> None:
"""
Log each of the given lines as a separate message.
@param lines: Data to write.
"""
for line in lines:
self.write(line)
def _unsupported(self, *args: object) -> None:
"""
Template for unsupported operations.
@param args: Arguments.
"""
raise OSError("unsupported operation")
read = _unsupported
next = _unsupported
readline = _unsupported
readlines = _unsupported
xreadlines = _unsupported
seek = _unsupported
tell = _unsupported
truncate = _unsupported
| 24.170213 | 80 | 0.567342 | 4,135 | 0.909991 | 0 | 0 | 1,219 | 0.268266 | 0 | 0 | 2,058 | 0.452905 |
96636c79f61d2c52c4c27582d3d3210f08ece747
| 3,547 |
py
|
Python
|
bot/exts/cricket.py
|
ShakyaMajumdar/ShaqqueBot
|
f618ae21e4bf700d86674399670634e8d1cc1dc9
|
[
"MIT"
] | null | null | null |
bot/exts/cricket.py
|
ShakyaMajumdar/ShaqqueBot
|
f618ae21e4bf700d86674399670634e8d1cc1dc9
|
[
"MIT"
] | null | null | null |
bot/exts/cricket.py
|
ShakyaMajumdar/ShaqqueBot
|
f618ae21e4bf700d86674399670634e8d1cc1dc9
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
# from pprint import pprint
import aiohttp
import discord
from discord.ext import commands
from bot import constants
API_URL = "https://livescore6.p.rapidapi.com/matches/v2/"
LIVE_MATCHES_URL = API_URL + "list-live"
HEADERS = {
"x-rapidapi-key": constants.RAPIDAPI_KEY,
"x-rapidapi-host": constants.RAPIDAPI_LIVESCORE6_HOST,
}
@dataclass
class CricketMatch:
format: str
match_no: str
teams: tuple[str, str]
summary: str
scores: dict
status: str
_eid: str
class Cricket(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@staticmethod
def get_live_matches_list_embed(matches: list[CricketMatch]) -> discord.Embed:
embed = discord.Embed(title="Current Live Matches:", colour=discord.Colour.random())
for match in matches:
match_info = f"""\
{match.teams[0]}: {match.scores['T1I1']}
{match.teams[1]}: {match.scores['T2I1']}
"""
if "test" in match.format.lower():
match_info += f"""\
{match.teams[0]}: {match.scores['T1I2']}
{match.teams[1]}: {match.scores['T2I2']}
"""
match_info += f"""\
{match.summary}
{match.status}
"""
embed.add_field(
name="{} vs {}: {}".format(*match.teams, match.match_no or match.format), value=match_info, inline=False
)
return embed
@commands.command()
async def live_scores(self, ctx: commands.Context) -> None:
"""Sends information about ongoing cricket matches."""
querystring = {"Category": "cricket"}
async with aiohttp.ClientSession() as session:
async with session.get(
LIVE_MATCHES_URL, headers=HEADERS, params=querystring
) as response:
response = await response.json()
# pprint(response)
if not response:
await ctx.send("No matches in progress currently!")
return
matches = [
CricketMatch(
format=match["EtTx"],
teams=(
match["T1"][0]["Nm"],
match["T2"][0]["Nm"],
),
summary=match["ECo"],
_eid=match["Eid"],
status=match["EpsL"],
scores={
"T1I1": f"{match.get('Tr1C1', '-')}/"
f"{match.get('Tr1CW1', '-')} "
f"({match.get('Tr1CO1', '-')})",
"T2I1": f"{match.get('Tr2C1', '-')}/"
f"{match.get('Tr2CW1', '-')} "
f"({match.get('Tr2CO1', '-')})",
"T1I2": f"{match.get('Tr1C2', '-')}/"
f"{match.get('Tr1CW2', '-')} "
f"({match.get('Tr1CO2', '-')})",
"T2I2": f"{match.get('Tr2C2', '-')}/"
f"{match.get('Tr2CW2', '-')} "
f"({match.get('Tr2CO2', '-')})",
},
match_no=match.get("ErnInf", ""),
)
for match in map(lambda m: m["Events"][0], response["Stages"])
]
await ctx.send(embed=self.get_live_matches_list_embed(matches))
def setup(bot: commands.Bot):
"""Add Cricket Cog."""
bot.add_cog(Cricket(bot))
| 32.842593 | 120 | 0.480124 | 3,068 | 0.864956 | 0 | 0 | 2,973 | 0.838173 | 2,034 | 0.573442 | 981 | 0.276572 |
96640311a4d3b46c933f3f768041f09fa3a2cb24
| 3,588 |
py
|
Python
|
u24_lymphocyte/third_party/treeano/sandbox/nodes/update_dropout.py
|
ALSM-PhD/quip_classification
|
7347bfaa5cf11ae2d7a528fbcc43322a12c795d3
|
[
"BSD-3-Clause"
] | 45 |
2015-04-26T04:45:51.000Z
|
2022-01-24T15:03:55.000Z
|
u24_lymphocyte/third_party/treeano/sandbox/nodes/update_dropout.py
|
ALSM-PhD/quip_classification
|
7347bfaa5cf11ae2d7a528fbcc43322a12c795d3
|
[
"BSD-3-Clause"
] | 8 |
2018-07-20T20:54:51.000Z
|
2020-06-12T05:36:04.000Z
|
u24_lymphocyte/third_party/treeano/sandbox/nodes/update_dropout.py
|
ALSM-PhD/quip_classification
|
7347bfaa5cf11ae2d7a528fbcc43322a12c795d3
|
[
"BSD-3-Clause"
] | 22 |
2018-05-21T23:57:20.000Z
|
2022-02-21T00:48:32.000Z
|
"""
technique that randomly 0's out the update deltas for each parameter
"""
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
import treeano
import treeano.nodes as tn
fX = theano.config.floatX
@treeano.register_node("update_dropout")
class UpdateDropoutNode(treeano.Wrapper1NodeImpl):
hyperparameter_names = ("update_dropout_probability",
"rescale_updates")
def mutate_update_deltas(self, network, update_deltas):
if network.find_hyperparameter(["deterministic"]):
return
p = network.find_hyperparameter(["update_dropout_probability"], 0)
if p == 0:
return
rescale_updates = network.find_hyperparameter(["rescale_updates"],
False)
keep_prob = 1 - p
rescale_factor = 1 / keep_prob
srng = MRG_RandomStreams()
# TODO parameterize search tags (to affect not only "parameters"s)
vws = network.find_vws_in_subtree(tags={"parameter"},
is_shared=True)
for vw in vws:
if vw.variable not in update_deltas:
continue
mask = srng.binomial(size=(), p=keep_prob, dtype=fX)
if rescale_updates:
mask *= rescale_factor
update_deltas[vw.variable] *= mask
@treeano.register_node("momentum_update_dropout")
class MomentumUpdateDropoutNode(treeano.Wrapper1NodeImpl):
"""
randomly 0's out the update deltas for each parameter with momentum
like update dropout, but with some probability (momentum), whether
or not an update is dropped out is kept the same as the previous
iteration
"""
hyperparameter_names = ("update_dropout_probability",
"rescale_updates",
"update_dropout_momentum")
def mutate_update_deltas(self, network, update_deltas):
if network.find_hyperparameter(["deterministic"]):
return
p = network.find_hyperparameter(["update_dropout_probability"], 0)
if p == 0:
return
rescale_updates = network.find_hyperparameter(["rescale_updates"],
False)
momentum = network.find_hyperparameter(["update_dropout_momentum"])
keep_prob = 1 - p
rescale_factor = 1 / keep_prob
srng = MRG_RandomStreams()
# TODO parameterize search tags (to affect not only "parameters"s)
vws = network.find_vws_in_subtree(tags={"parameter"},
is_shared=True)
for vw in vws:
if vw.variable not in update_deltas:
continue
is_kept = network.create_vw(
"momentum_update_dropout_is_kept(%s)" % vw.name,
shape=(),
is_shared=True,
tags={"state"},
# TODO: Should this be a random bool with prob p for each?
default_inits=[treeano.inits.ConstantInit(1)]).variable
keep_mask = srng.binomial(size=(), p=keep_prob, dtype=fX)
momentum_mask = srng.binomial(size=(), p=momentum, dtype=fX)
new_is_kept = (momentum_mask * is_kept
+ (1 - momentum_mask) * keep_mask)
mask = new_is_kept
if rescale_updates:
mask *= rescale_factor
update_deltas[is_kept] = new_is_kept - is_kept
update_deltas[vw.variable] *= mask
| 36.612245 | 75 | 0.593924 | 3,250 | 0.905797 | 0 | 0 | 3,341 | 0.931159 | 0 | 0 | 871 | 0.242754 |
966403bf90394bfb6b137e41500328a65675b400
| 822 |
py
|
Python
|
zeromq/test/manifest.py
|
brettin/liquidhandling
|
7a96e2881ffaa0326514cf5d97ba49d65ad42a14
|
[
"MIT"
] | null | null | null |
zeromq/test/manifest.py
|
brettin/liquidhandling
|
7a96e2881ffaa0326514cf5d97ba49d65ad42a14
|
[
"MIT"
] | null | null | null |
zeromq/test/manifest.py
|
brettin/liquidhandling
|
7a96e2881ffaa0326514cf5d97ba49d65ad42a14
|
[
"MIT"
] | null | null | null |
import os
import os.path
from datetime import datetime
import time
from stat import *
import pathlib
import json
def generateFileManifest(filename, manifest_filename=None):
string = ""
data = {}
if os.path.isfile(filename):
f = pathlib.Path(filename)
data[os.path.abspath(filename)] = {
'ctime': [str(f.stat().st_ctime), str(datetime.fromtimestamp(f.stat().st_ctime))],
'mtime':[str(f.stat().st_mtime), str(datetime.fromtimestamp(f.stat().st_mtime))]
}
json_data = json.dumps(data)
if manifest_filename != None:
with open(manifest_filename, "w+") as manifest_file:
manifest_file.write(json_data)
else:
print ("skipping bad filename: {}".format(filename))
return data
| 26.516129 | 102 | 0.611922 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 47 | 0.057178 |
9665d6688a28f3e81d1997d0e6bd30513e85d853
| 1,111 |
py
|
Python
|
shops/shop_util/test_shop_names.py
|
ikp4success/shopasource
|
9a9ed5c58a8b37b6ff169b45f7fdfcb44809fd88
|
[
"Apache-2.0"
] | 3 |
2019-12-04T07:08:55.000Z
|
2020-12-08T01:38:46.000Z
|
shops/shop_util/test_shop_names.py
|
ikp4success/shopasource
|
9a9ed5c58a8b37b6ff169b45f7fdfcb44809fd88
|
[
"Apache-2.0"
] | null | null | null |
shops/shop_util/test_shop_names.py
|
ikp4success/shopasource
|
9a9ed5c58a8b37b6ff169b45f7fdfcb44809fd88
|
[
"Apache-2.0"
] | null | null | null |
from enum import Enum
class TestShopNames(Enum):
AMAZON = ("AMAZON",)
TARGET = ("TARGET",)
WALMART = ("WALMART",)
TJMAXX = ("TJMAXX",)
GOOGLE = ("GOOGLE",)
NEWEGG = ("NEWEGG",)
HM = ("HM",)
MICROCENTER = ("MICROCENTER",)
FASHIONNOVA = ("FASHIONNOVA",)
SIXPM = ("SIXPM",)
POSHMARK = ("POSHMARK",)
MACYS = ("MACYS",)
ASOS = ("ASOS",)
JCPENNEY = ("JCPENNEY",)
KOHLS = "KOHLS"
FOOTLOCKER = ("FOOTLOCKER",)
BESTBUY = ("BESTBUY",)
EBAY = ("EBAY",)
KMART = ("KMART",)
BIGLOTS = ("BIGLOTS",)
BURLINGTON = ("BURLINGTON",)
MVMTWATCHES = ("MVMTWATCHES",)
BOOHOO = ("BOOHOO",)
FOREVER21 = ("FOREVER21",)
STYLERUNNER = ("STYLERUNNER",)
LEVI = ("LEVI",)
ZARA = ("ZARA",)
NORDSTROM = "NORDSTROM"
NORDSTROMRACK = "NORDSTROMRACK"
HAUTELOOK = "HAUTELOOK"
SAKSFIFTHAVENUE = "SAKSFIFTHAVENUE"
EXPRESS = "EXPRESS"
CHARLOTTERUSSE = "CHARLOTTERUSSE"
ALDO = "ALDO"
SHOPQUEEN = "SHOPQUEEN"
NIKE = "NIKE"
ADIDAS = "ADIDAS"
DICKSSPORTINGGOODS = "DICKSSPORTINGGOODS"
BINK = "BINK"
| 25.25 | 45 | 0.568857 | 1,086 | 0.977498 | 0 | 0 | 0 | 0 | 0 | 0 | 374 | 0.336634 |
9666c26fde73b6b01161a9c3fc47311bfae3372e
| 2,477 |
py
|
Python
|
package_manager/util_test.py
|
shahriak/dotnet5
|
6b96c38b0f351b79750bd2b8bc0f77dc434afe00
|
[
"Apache-2.0"
] | 10,302 |
2018-04-17T17:06:57.000Z
|
2022-03-31T17:29:36.000Z
|
package_manager/util_test.py
|
unasuke/distroless
|
859ce06093a899f31fffc1cd151cf9867faf49d5
|
[
"Apache-2.0"
] | 623 |
2018-04-17T20:43:43.000Z
|
2022-03-30T13:08:57.000Z
|
package_manager/util_test.py
|
unasuke/distroless
|
859ce06093a899f31fffc1cd151cf9867faf49d5
|
[
"Apache-2.0"
] | 726 |
2018-05-09T16:20:46.000Z
|
2022-03-31T15:09:07.000Z
|
import unittest
import os
from six import StringIO
from package_manager import util
CHECKSUM_TXT = "1915adb697103d42655711e7b00a7dbe398a33d7719d6370c01001273010d069"
DEBIAN_JESSIE_OS_RELEASE = """PRETTY_NAME="Distroless"
NAME="Debian GNU/Linux"
ID="debian"
VERSION_ID="8"
VERSION="Debian GNU/Linux 8 (jessie)"
HOME_URL="https://github.com/GoogleContainerTools/distroless"
SUPPORT_URL="https://github.com/GoogleContainerTools/distroless/blob/master/README.md"
BUG_REPORT_URL="https://github.com/GoogleContainerTools/distroless/issues/new"
"""
DEBIAN_STRETCH_OS_RELEASE = """PRETTY_NAME="Distroless"
NAME="Debian GNU/Linux"
ID="debian"
VERSION_ID="9"
VERSION="Debian GNU/Linux 9 (stretch)"
HOME_URL="https://github.com/GoogleContainerTools/distroless"
SUPPORT_URL="https://github.com/GoogleContainerTools/distroless/blob/master/README.md"
BUG_REPORT_URL="https://github.com/GoogleContainerTools/distroless/issues/new"
"""
DEBIAN_BUSTER_OS_RELEASE = """PRETTY_NAME="Distroless"
NAME="Debian GNU/Linux"
ID="debian"
VERSION_ID="10"
VERSION="Debian GNU/Linux 10 (buster)"
HOME_URL="https://github.com/GoogleContainerTools/distroless"
SUPPORT_URL="https://github.com/GoogleContainerTools/distroless/blob/master/README.md"
BUG_REPORT_URL="https://github.com/GoogleContainerTools/distroless/issues/new"
"""
# VERSION and VERSION_ID aren't set on unknown distros
DEBIAN_UNKNOWN_OS_RELEASE = """PRETTY_NAME="Distroless"
NAME="Debian GNU/Linux"
ID="debian"
HOME_URL="https://github.com/GoogleContainerTools/distroless"
SUPPORT_URL="https://github.com/GoogleContainerTools/distroless/blob/master/README.md"
BUG_REPORT_URL="https://github.com/GoogleContainerTools/distroless/issues/new"
"""
osReleaseForDistro = {
"jessie": DEBIAN_JESSIE_OS_RELEASE,
"stretch": DEBIAN_STRETCH_OS_RELEASE,
"buster": DEBIAN_BUSTER_OS_RELEASE,
"???": DEBIAN_UNKNOWN_OS_RELEASE,
}
class TestUtil(unittest.TestCase):
def test_sha256(self):
current_dir = os.path.dirname(__file__)
filename = os.path.join(current_dir, 'testdata', 'checksum.txt')
actual = util.sha256_checksum(filename)
self.assertEqual(CHECKSUM_TXT, actual)
def test_generate_debian_os_release(self):
for distro, expected_output in osReleaseForDistro.items():
output_file = StringIO()
util.generate_os_release(distro, output_file)
self.assertEqual(expected_output, output_file.getvalue())
if __name__ == '__main__':
unittest.main()
| 34.887324 | 86 | 0.774727 | 558 | 0.225273 | 0 | 0 | 0 | 0 | 0 | 0 | 1,526 | 0.616068 |
9667f9974ca754b017d3785df5cd5e5a88c0fff5
| 9,337 |
py
|
Python
|
testscripts/RDKB/component/TAD/TS_TAD_Download_SetInvalidDiagnosticsState.py
|
cablelabs/tools-tdkb
|
1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69
|
[
"Apache-2.0"
] | null | null | null |
testscripts/RDKB/component/TAD/TS_TAD_Download_SetInvalidDiagnosticsState.py
|
cablelabs/tools-tdkb
|
1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69
|
[
"Apache-2.0"
] | null | null | null |
testscripts/RDKB/component/TAD/TS_TAD_Download_SetInvalidDiagnosticsState.py
|
cablelabs/tools-tdkb
|
1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69
|
[
"Apache-2.0"
] | null | null | null |
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2016 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version='1.0' encoding='utf-8'?>
<xml>
<id></id>
<!-- Do not edit id. This will be auto filled while exporting. If you are adding a new script keep the id empty -->
<version>2</version>
<!-- Do not edit version. This will be auto incremented while updating. If you are adding a new script you can keep the vresion as 1 -->
<name>TS_TAD_Download_SetInvalidDiagnosticsState</name>
<!-- If you are adding a new script you can specify the script name. Script Name should be unique same as this file name with out .py extension -->
<primitive_test_id> </primitive_test_id>
<!-- Do not change primitive_test_id if you are editing an existing script. -->
<primitive_test_name>TADstub_Get</primitive_test_name>
<!-- -->
<primitive_test_version>3</primitive_test_version>
<!-- -->
<status>FREE</status>
<!-- -->
<synopsis>To check if Diagnostics state of download can be set with invalid value. Requested and Canceled are the only writable values.If the test fails,set any writable parameter and check if the DiagnosticsState changes to None</synopsis>
<!-- -->
<groups_id />
<!-- -->
<execution_time>1</execution_time>
<!-- -->
<long_duration>false</long_duration>
<!-- -->
<advanced_script>false</advanced_script>
<!-- execution_time is the time out time for test execution -->
<remarks>RDKB doesn't support Download Diagnostics feature till now</remarks>
<!-- Reason for skipping the tests if marked to skip -->
<skip>false</skip>
<!-- -->
<box_types>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
<!-- -->
</rdk_versions>
<test_cases>
<test_case_id>TC_TAD_34</test_case_id>
<test_objective>To check if Diagnostics state of download can be set with invalid value. Requested and Canceled are the only writable values.If the test fails,set any writable parameter and check if the DiagnosticsState changes to None</test_objective>
<test_type>Positive</test_type>
<test_setup>XB3,Emulator</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state else invoke cosa_start.sh manually that includes all the ccsp components.
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>TADstub_Get</api_or_interface_used>
<input_parameters>Device.IP.Diagnostics.DownloadDiagnostics.DiagnosticsState
Device.IP.Diagnostics.DownloadDiagnostics.Interface
Device.IP.Diagnostics.DownloadDiagnostics.DownloadURL</input_parameters>
<automation_approch>1. Load TAD modules
2. From script invoke TADstub_Set to set all the writable parameters
3. Check whether the result params get changed along with the download DignosticsState
4. Validation of the result is done within the python script and send the result status to Test Manager.
5.Test Manager will publish the result in GUI as PASS/FAILURE based on the response from TAD stub.</automation_approch>
<except_output>CheckPoint 1:
The output should be logged in the Agent console/Component log
CheckPoint 2:
Stub function result should be success and should see corresponding log in the agent console log
CheckPoint 3:
TestManager GUI will publish the result as PASS in Execution/Console page of Test Manager</except_output>
<priority>High</priority>
<test_stub_interface>None</test_stub_interface>
<test_script>TS_TAD_Download_SetInvalidDiagnosticsState</test_script>
<skipped>No</skipped>
<release_version></release_version>
<remarks></remarks>
</test_cases>
<script_tags />
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("tad","1");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_TAD_SetInvalidDownloadDiagnosticsState');
#Get the result of connection with test component and DUT
loadmodulestatus =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus ;
if "SUCCESS" in loadmodulestatus.upper():
#Set the result status of execution
obj.setLoadModuleStatus("SUCCESS");
tdkTestObj = obj.createTestStep('TADstub_Set');
tdkTestObj.addParameter("ParamName","Device.IP.Diagnostics.DownloadDiagnostics.DiagnosticsState");
tdkTestObj.addParameter("ParamValue","Completed");
tdkTestObj.addParameter("Type","string");
expectedresult="FAILURE";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1:Set DiagnosticsState of download as completed";
print "EXPECTED RESULT 1: DiagnosticsState of download must be Requested or Canceled";
print "ACTUAL RESULT 1: Can not set diagnosticsState of download as completed, details : %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj.createTestStep('TADstub_Set');
tdkTestObj.addParameter("ParamName","Device.IP.Diagnostics.DownloadDiagnostics.Interface");
tdkTestObj.addParameter("ParamValue","Interface_erouter0");
tdkTestObj.addParameter("Type","string");
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 2: Set the interface of Download";
print "EXPECTED RESULT 2: Should set the interface of Download ";
print "ACTUAL RESULT 2: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj.createTestStep('TADstub_Get');
tdkTestObj.addParameter("paramName","Device.IP.Diagnostics.DownloadDiagnostics.DiagnosticsState");
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details= tdkTestObj.getResultDetails();
if expectedresult in actualresult and details=="None":
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 3 :Get DiagnosticsState of download as None";
print "EXPECTED RESULT 3 :Should get the DiagnosticsState of download as None ";
print "ACTUAL RESULT 3 :The DiagnosticsState of download is , details : %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 3 :Get DiagnosticsState of download as None";
print "EXPECTED RESULT 3 :Should get the Diagnostics State of download as None";
print "ACTUAL RESULT 3 :The DiagnosticsState of download is , details : %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 2: Set the interface of Download";
print "EXPECTED RESULT 2: Should set the interface of Download ";
print "ACTUAL RESULT 2: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1:Set DiagnosticsState of download as completed";
print "EXPECTED RESULT 1: DiagnosticsState of download must be Requested or Canceled";
print "ACTUAL RESULT 1: DiagnosticsState of download is set as completed, details : %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
obj.unloadModule("tad");
else:
print "Failed to load tad module";
obj.setLoadModuleStatus("FAILURE");
print "Module loading failed";
| 49.402116 | 256 | 0.698297 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,949 | 0.744243 |
966918b396e78cd0cc9c87fbc4a01ea21e115709
| 2,926 |
py
|
Python
|
Tutorials/tutorial_01_intro.py
|
mseinstein/openCV
|
94bfd55e0d77fd78f2669244bbab5d9ea8f32666
|
[
"MIT"
] | null | null | null |
Tutorials/tutorial_01_intro.py
|
mseinstein/openCV
|
94bfd55e0d77fd78f2669244bbab5d9ea8f32666
|
[
"MIT"
] | null | null | null |
Tutorials/tutorial_01_intro.py
|
mseinstein/openCV
|
94bfd55e0d77fd78f2669244bbab5d9ea8f32666
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 01 14:09:03 2017
@author: BJ
"""
import cv2
import os
from matplotlib import pyplot as plt
os.chdir('E:\\GitHub\\openCV\\Tutorials')
# %% IMAGES
# Load color image
# Add the flags 1, 0 or -1, to load a color image, load an image in
# grayscale mode or load image as is, respectively
img = cv2.imread('LegoAd.jpg',1)
# Display an image
# The first argument is the window name (string), the second argument is the image
cv2.imshow('image',img)
# You can display multiple windows using different window names
cv2.imshow('image2',img)
# Displaying an image using Matplotlib
# OpenCV loads color in BGR, while matplotlib displays in RGB, so to use
# matplotlib you need to reverse the order of the color layers
plt.imshow(img[:,:,::-1],interpolation = 'bicubic')
# note there is an argument in imshow to set the colormap, this is ignored if
# the input is 3D as it assumes the third dimension directly specifies the RGB
# values
plt.imshow(img[:,:,::-1], cmap= "Greys", interpolation = 'bicubic')
# you can remove the tick marks using the following code
plt.xticks([]), plt.yticks([])
# Here is a list of the full matplot lib colormaps
# https://matplotlib.org/examples/color/colormaps_reference.html
# Closing an image
# To close a specific window use the following command withi its name as the argument
cv2.destroyWindow('image2')
# To close all windows use the following command with no arguments
cv2.destroyAllWindows()
# Writing an image
# The first argument is the name of the file to write and the second arguemnt
# is the image
cv2.imwrite('testimg.jpg',img)
# Resizing and image
img = cv2.imread('LegoAd.jpg',1)
# need to declare window before showing image
cv2.namedWindow('image',cv2.WINDOW_NORMAL)
cv2.imshow('image',img)
img_height = 600
img_width = int(img_height*float(img.shape[0])/img.shape[1])
cv2.resizeWindow('image', img_height,img_width)
# %% VIDEO
# You first need to create a capture object with the argument either being the
# name of the video file or the index of the capture device (starting from 0)
# only need more indexes when have additional video capture equipment attached
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
# frame returns the captured image and ret is a boolean which returns TRUE
# if frame is read correctly
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# gray2 = frame
# Display the resulting frame
cv2.imshow('frame',gray)
# Wait for the signal to stop the capture
# The argument is waitKey is the length of time to wait for the input before
# moving onto the next line of code
# when running on 64-bit you need to add 0xFF
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
#cv2.destroyWindow('frame')
| 31.462366 | 85 | 0.726931 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,114 | 0.722488 |
966a5cfd248281b2c96521b02313c0a59ac99d4a
| 625 |
py
|
Python
|
mutational_landscape/migrations/0003_auto_20220225_1650.py
|
protwis/Protwis
|
fdcad0a2790721b02c0d12d8de754313714c575e
|
[
"Apache-2.0"
] | null | null | null |
mutational_landscape/migrations/0003_auto_20220225_1650.py
|
protwis/Protwis
|
fdcad0a2790721b02c0d12d8de754313714c575e
|
[
"Apache-2.0"
] | null | null | null |
mutational_landscape/migrations/0003_auto_20220225_1650.py
|
protwis/Protwis
|
fdcad0a2790721b02c0d12d8de754313714c575e
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.1 on 2022-02-25 15:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mutational_landscape', '0002_auto_20180117_1457'),
]
operations = [
migrations.RemoveField(
model_name='diseasemutations',
name='protein',
),
migrations.RemoveField(
model_name='diseasemutations',
name='residue',
),
migrations.DeleteModel(
name='CancerMutations',
),
migrations.DeleteModel(
name='DiseaseMutations',
),
]
| 22.321429 | 60 | 0.5712 | 540 | 0.864 | 0 | 0 | 0 | 0 | 0 | 0 | 183 | 0.2928 |
966a7103f706d3acd97c7d32abf73b5779105922
| 491 |
py
|
Python
|
redis_benchmarks_specification/__init__.py
|
LaudateCorpus1/redis-benchmarks-specification
|
71a7e28cd130499f02dacc00bdeca2eadfa62619
|
[
"Apache-2.0"
] | 4 |
2022-01-24T10:15:55.000Z
|
2022-03-15T09:40:16.000Z
|
redis_benchmarks_specification/__init__.py
|
LaudateCorpus1/redis-benchmarks-specification
|
71a7e28cd130499f02dacc00bdeca2eadfa62619
|
[
"Apache-2.0"
] | 27 |
2021-08-13T14:20:36.000Z
|
2021-09-21T16:49:40.000Z
|
redis_benchmarks_specification/__init__.py
|
LaudateCorpus1/redis-benchmarks-specification
|
71a7e28cd130499f02dacc00bdeca2eadfa62619
|
[
"Apache-2.0"
] | 1 |
2022-02-02T14:07:55.000Z
|
2022-02-02T14:07:55.000Z
|
# Apache License Version 2.0
#
# Copyright (c) 2021., Redis Labs
# All rights reserved.
#
# This attribute is the only one place that the version number is written down,
# so there is only one place to change it when the version number changes.
import pkg_resources
PKG_NAME = "redis-benchmarks-specification"
try:
__version__ = pkg_resources.get_distribution(PKG_NAME).version
except (pkg_resources.DistributionNotFound, AttributeError):
__version__ = "99.99.99" # like redis
| 30.6875 | 79 | 0.763747 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 295 | 0.600815 |
966aec55e4c71e579f2f8dad4a1f0d280f90e1cf
| 1,354 |
py
|
Python
|
rest_framework_helpers/fields/relation.py
|
Apkawa/django-rest-framework-helpers
|
f4b24bf326e081a215ca5c1c117441ea8f78cbb4
|
[
"MIT"
] | null | null | null |
rest_framework_helpers/fields/relation.py
|
Apkawa/django-rest-framework-helpers
|
f4b24bf326e081a215ca5c1c117441ea8f78cbb4
|
[
"MIT"
] | null | null | null |
rest_framework_helpers/fields/relation.py
|
Apkawa/django-rest-framework-helpers
|
f4b24bf326e081a215ca5c1c117441ea8f78cbb4
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import unicode_literals
from collections import OrderedDict
import six
from django.db.models import Model
from rest_framework import serializers
class SerializableRelationField(serializers.RelatedField):
def __init__(self, serializer, serializer_function=None, *args, **kwargs):
super(SerializableRelationField, self).__init__(*args, **kwargs)
self.serializer = serializer
self.serializer_function = serializer_function or self.do_serialize
def to_representation(self, value):
if isinstance(value, Model):
return self.serializer_function(value, self.serializer)
return value
def to_internal_value(self, data):
return self.serializer.Meta.model.objects.get(id=data)
def do_serialize(self, obj, serializer=None):
if not serializer:
return obj.pk
return serializer(obj).data
@property
def choices(self):
queryset = self.get_queryset()
if queryset is None:
# Ensure that field.choices returns something sensible
# even when accessed with a read-only field.
return {}
return OrderedDict([
(
six.text_type(item.pk),
self.display_value(item)
)
for item in queryset
])
| 30.088889 | 78 | 0.655096 | 1,172 | 0.865583 | 0 | 0 | 433 | 0.319793 | 0 | 0 | 113 | 0.083456 |
966b4317dba886d84d6b354c4c49cfcc2476c374
| 10,426 |
py
|
Python
|
examples/guojiadianwang/devops/devopsPro.py
|
peng5550/DecryptLogin
|
43be0f3d9b68e4ea171cd4c3c200d29a4409d2e4
|
[
"MIT"
] | null | null | null |
examples/guojiadianwang/devops/devopsPro.py
|
peng5550/DecryptLogin
|
43be0f3d9b68e4ea171cd4c3c200d29a4409d2e4
|
[
"MIT"
] | null | null | null |
examples/guojiadianwang/devops/devopsPro.py
|
peng5550/DecryptLogin
|
43be0f3d9b68e4ea171cd4c3c200d29a4409d2e4
|
[
"MIT"
] | null | null | null |
import requests
from utils import loginFile, dataAnalysis
import os
import datetime
from dateutil.relativedelta import relativedelta
import json
from utils.logCls import Logger
dirpath = os.path.dirname(__file__)
cookieFile = f"{dirpath}/utils/cookies.txt"
dataFile = f"{dirpath}/datas"
class DevopsProject:
def __init__(self, logFileName):
# 初始化搜索起始与截止时间
self.endDate = datetime.datetime.today().date()
self.startDate = self.endDate - relativedelta(months=+1)
# log日志
self.logger = Logger("[告警信息通报({}-{})]".format(self.startDate, self.endDate), logFileName)
def _load_cookies(self):
print("----------_load_cookies----------")
# 加载cookie
if not os.path.exists(cookieFile):
return False
# 3、判断cookies是否过期
try:
with open(cookieFile, "r")as f:
cookies = f.read()
if self.login_check(cookies):
return cookies
else:
return
except Exception as e:
print(e.args)
os.remove(cookieFile)
self.logger.get_log().debug("[cookies过期]")
return False
def login_check(self, cookies):
# cookie验证是否有效
self.logger.get_log().debug("[正在验证cookie]")
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh-TW;q=0.9,zh;q=0.8,en-US;q=0.7,en;q=0.6',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Cookie': cookies,
'Host': 'xt.devops123.net',
'Referer': 'http://xt.devops123.net/Welcome/login/',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'
}
checkUrl = "http://xt.devops123.net/portal/substation_list/991"
response = requests.get(checkUrl, headers=headers)
if response.status_code == 200:
if "管理面板" in response.text:
self.logger.get_log().debug("[加载cookie成功]")
return True
else:
self.logger.get_log().debug("[加载失败, 正在进行登录]")
return False
raise response.raise_for_status()
def login(self):
# 登录
cookies = self._load_cookies()
if cookies:
return cookies
cookies = loginFile.loginDevops().login()
return cookies
def getReportData(self, cookies):
self.logger.get_log().debug("[正在搜索告警信息]")
self.searchTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# 搜索告警信息
downloadUrl = "http://xt.devops123.net/alarm?selCity=&selCounty=0&selSubstation=&selRoom=&level=1&selDevModel=&selStatus%5B%5D=unresolved&reportDate={}%E8%87%B3{}&selSignalName=&substationType%5B%5D=A%E7%BA%A7%E5%B1%80%E7%AB%99&substationType%5B%5D=B%E7%BA%A7%E5%B1%80%E7%AB%99&substationType%5B%5D=C%E7%BA%A7%E5%B1%80%E7%AB%99&substationType%5B%5D=D%E7%BA%A7%E5%B1%80%E7%AB%99&substationType%5B%5D=D1%E7%BA%A7%E5%B1%80%E7%AB%99&word=&export=exporttoexcel"
headers = {
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Referer': 'http://xt.devops123.net/alarm?level=1',
'Cookie': cookies,
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh-TW;q=0.9,zh;q=0.8,en-US;q=0.7,en;q=0.6'
}
response = requests.get(downloadUrl.format(str(self.startDate), str(self.endDate)), headers=headers)
return response.text
def getDingDingInfo(self, cityName):
# 加载钉钉机器人信息
with open("utils/dingdingRobotInfo.json", "r", encoding="utf-8")as f:
robotInfo = json.loads(f.read())
if cityName in list(robotInfo.keys()):
SECRET = robotInfo.get(cityName)[0]
WEBHOOK = robotInfo.get(cityName)[1]
return SECRET, WEBHOOK
else:
self.logger.get_log().debug("[没有该{}对应的钉钉信息,请检查dingdingRobotInfo.json文件]".format(cityName))
return
def detail_data(self, dataList, monitorInfo, warn5=False, byhour=False):
if warn5:
for data in dataList:
k, group = data
SECRET, WEBHOOK = self.getDingDingInfo(k)
htmlPath = dataAnalysis.data2html(k, group, dataFile, k2="超过5天告警信息汇总")
imgFile = dataAnalysis.html2image(htmlPath)
imgUrl = dataAnalysis.img2url(imgFile)
sendTitle = f"{k}-{'超过5天告警信息汇总'}\n\n- 数据提取时间:{self.searchTime}\n- 上报时间段:\t{self.startDate}至{self.endDate} \n"
sendText = sendTitle + "\n".join(
[f"- {k}:\t{v}条" for k, v in group.groupby("信号名称")["信号名称"].count().sort_values(ascending=False).to_dict().items()])
yield k, SECRET, WEBHOOK, imgUrl, sendText
else:
for data in dataList:
k, group = data
if byhour:
group = group.loc[group["信号名称"].isin(monitorInfo)]
SECRET, WEBHOOK = self.getDingDingInfo(k)
htmlPath = dataAnalysis.data2html(k, group, dataFile)
imgFile = dataAnalysis.html2image(htmlPath)
imgUrl = dataAnalysis.img2url(imgFile)
sendText = "\n".join([f"- {k}:\t{v}条" for k, v in group.groupby("区域")["区域"].count().to_dict().items()])
yield k, SECRET, WEBHOOK, imgUrl, sendText
def reportTotal(self, totalInfo, monitorInfo):
self.logger.get_log().debug("正在汇总信息...")
cityNames = ["乌鲁木齐", "昌吉", "吐鲁番", "奎屯", "博州", "哈密", "塔城", "阿勒泰", "伊犁", "巴州",
"和田", "阿克苏", "石河子", "喀什", "克州", "克拉玛依"]
totalSendTextByCity = {}
summaryInfo = dataAnalysis.dataSummary(totalInfo)
for city in cityNames:
summaryText = "\n".join([f"- {k} : {v}条" for k, v in summaryInfo.get(city, {}).items() if k in monitorInfo])
if summaryText:
totalSendText = f"{self.startDate}至{self.endDate}\n- #告警消息汇总#\n- 数据提取时间:{self.searchTime}\n- #按照信号名称汇总如下#\n" + summaryText
else:
totalSendText = f"{self.startDate}至{self.endDate}\n- 数据提取时间:{self.searchTime}\n" + "无告警信息."
totalSendTextByCity[city] = totalSendText
return totalSendTextByCity
def monitorByHour(self):
try:
monitorInfo = ["通信状态", "烟感", "温度", "交流输入停电警告", "交流输入停电告警", "蓄电池组总电压过低", "水浸", "电池熔丝故障告警", "蓄电池总电压过高"]
self.logger.get_log().debug("[正在登录]")
new_cookie = self.login()
# 获取excel的xml
self.logger.get_log().debug("[进入【温度】【交流输入停电告警】【蓄电池组总电压过低】监控...(监控频率:每小时一次)]")
xmlData = self.getReportData(new_cookie)
# 分析xml
if dataAnalysis.parseData(xmlData, dataFile):
totalInfo, warn5days, dataList = dataAnalysis.parseData(xmlData, dataFile, byhour=True)
totalSendTextByCity = self.reportTotal(totalInfo, monitorInfo)
self.logger.get_log().debug("[发送告警信息]")
for k, SECRET, WEBHOOK, imgUrl, sendText in self.detail_data(dataList, monitorInfo, byhour=True):
totalSendText = totalSendTextByCity.get(k)
if "无告警信息" in totalSendText:
dataAnalysis.sendMessage(SECRET, WEBHOOK, totalSendText, imgUrl="")
self.logger.get_log().debug(totalSendText)
else:
sendTextTotal = f"{totalSendText}\n{'- #按照县汇总如下#'}\n{sendText}"
dataAnalysis.sendMessage(SECRET, WEBHOOK, sendTextTotal, imgUrl)
self.logger.get_log().debug(sendTextTotal)
self.logger.get_log().debug("[告警信息发送结束]")
dataAnalysis.clearDir(dataFile)
except Exception as e:
self.logger.get_log().debug(e.args)
def monitorByDay(self):
try:
self.logger.get_log().debug("[进入【通信状态】【烟感】【水浸】【电池熔丝故障告警】【蓄电池总电压过高】【手动控制状态】【启动电池电压低】监控...(监控频率:每天一次)]")
monitorInfo = ["通信状态", "烟感", "水浸", "电池熔丝故障告警", "蓄电池总电压过高", "手动控制状态", "启动电池电压低", "交流输入停电警告", "交流输入停电告警", "温度",
"蓄电池组总电压过低"]
new_cookie = self.login()
# 获取excel的xml
xmlData = self.getReportData(new_cookie)
# 分析xml
if dataAnalysis.parseData(xmlData, dataFile):
totalInfo, warn5days, dataList = dataAnalysis.parseData(xmlData, dataFile)
totalSendTextByCity = self.reportTotal(totalInfo, monitorInfo)
self.logger.get_log().debug("[汇总告警时间超过5天的信息]")
for k, SECRET, WEBHOOK, imgUrl, sendText in self.detail_data(warn5days, monitorInfo, warn5=True):
self.logger.get_log().debug(sendText)
dataAnalysis.sendMessage(SECRET, WEBHOOK, sendText, imgUrl)
self.logger.get_log().debug("[汇总告警信息]")
for k1, SECRET, WEBHOOK, imgUrl, sendText in self.detail_data(dataList, monitorInfo):
totalSendText = totalSendTextByCity.get(k1)
if "无告警信息" in totalSendText:
dataAnalysis.sendMessage(SECRET, WEBHOOK, totalSendText, imgUrl="")
self.logger.get_log().debug(totalSendText)
else:
sendTextTotal = f"{totalSendText}\n{'- #按照县汇总如下#'}\n{sendText}"
self.logger.get_log().debug(sendTextTotal)
dataAnalysis.sendMessage(SECRET, WEBHOOK, sendTextTotal, imgUrl)
self.logger.get_log().debug("告警信息发送结束")
except Exception as e:
self.logger.get_log().debug(e.args)
def main(self):
# 主函数
self.monitorByDay()
# self.monitorByHour()
if __name__ == '__main__':
demo = DevopsProject("test")
demo.main()
| 45.929515 | 464 | 0.579513 | 11,114 | 0.967781 | 1,484 | 0.129223 | 0 | 0 | 0 | 0 | 3,888 | 0.338558 |
966b98dbd5e0be6079948a6cd3ac31f277a97210
| 6,186 |
py
|
Python
|
ezalor.py
|
WellerV/EzalorTools
|
8279c401c9970087af955ac094fe4ebd105e8174
|
[
"Apache-2.0"
] | 13 |
2018-02-07T06:34:14.000Z
|
2020-03-04T08:12:08.000Z
|
ezalor.py
|
WellerV/EzalorTools
|
8279c401c9970087af955ac094fe4ebd105e8174
|
[
"Apache-2.0"
] | null | null | null |
ezalor.py
|
WellerV/EzalorTools
|
8279c401c9970087af955ac094fe4ebd105e8174
|
[
"Apache-2.0"
] | 2 |
2018-03-01T09:02:38.000Z
|
2021-02-16T12:16:28.000Z
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Copyright (c) 2018 - huwei <[email protected]>
"""
This is a python script for the ezalor tools which is used to io monitor.
You can use the script to open or off the switch, or point the package name which you want to monitor it only.
The core function is to export data what ezalor is record.
"""
import os
import re
import sys, getopt
import sqlite3
import subprocess
import xlsxwriter as xw
from markhelper import MarkHelper
from record import Record
from style import Style
from datetime import datetime
DB_NAME_REG = "^ezalor_{0}(.*).db$"
tableheaders = ["path", "process", "thread", "processId", "threadId",
"readCount", "readBytes", "readTime", "writeCount", "writeBytes", "writeTime", "stacktrace",
"openTime", "closeTime", "mark"]
envDir = "/sdcard/ezalor/"
AUTOCOLUMN_WIDTH_INDEXS = [0, 1, 2, 12, 13, 14]
def print_help_and_exit():
print("\n"
"This is a python script for the ezalor tools which is used to io monitor.You can use the script to open or\n"
"off the switch, or point the package name which you want to monitor it only.The core function is to export\n"
"data what ezalor is record.\n"
"\n"
"Usage : ezalor [Options] [Args]\n"
"\n"
" Options:\n"
" -h, --help :Print the message and exit\n"
" -e, --export [packageName] [exportPath] :export a html to the path\n"
"\n"
" Examples:\n"
" ezalor -e com.wellerv.ezalor.sample export excel\n"
)
sys.exit(0)
def write_to_file(path, content):
if ("." == path):
htmlPath = "export.html"
else:
htmlPath = path + "export.html"
fo = open(htmlPath, "w")
fo.write(content)
fo.close()
return htmlPath
def export(packageName, path):
print("export to path:" + path + " begin.")
workbook = xw.Workbook(path)
# style
style = Style(workbook)
worksheet = workbook.add_worksheet("ioHistory")
# get process by packageName
processes = get_process_by_package(packageName)
# init column_max_width_array
column_max_width_array = [0] * len(AUTOCOLUMN_WIDTH_INDEXS)
# loop create table group by process
row = 0
for process in processes:
row = create_table(worksheet, style, process, row, get_data_by_process(packageName, process),
column_max_width_array)
# auto fit column width
auto_fit_column_width(worksheet, column_max_width_array)
workbook.close()
print("\nexport successful:" + path)
def auto_fit_column_width(worksheet, column_max_width_array):
# set column width
for j in range(len(column_max_width_array)):
worksheet.set_column(AUTOCOLUMN_WIDTH_INDEXS[j], AUTOCOLUMN_WIDTH_INDEXS[j], column_max_width_array[j])
def get_data_by_process(packageName, process):
# pull db file from mobile
os.system("adb pull /sdcard/ezalor/" + packageName + "/ezalor_" + process + ".db ezalor.db")
# fetch data from db file
cursor = get_cursor("ezalor.db")
cursor.execute("select * from iohistory")
results = cursor.fetchall()
# clear db file
os.remove("ezalor.db")
return results
def create_table(worksheet, style, process, row, data, column_max_width_array):
# write a title of table
worksheet.set_row(row, 24)
worksheet.merge_range(row, 0, row, 14, process + " ioHistory", style.title)
row += 1
# write headers of table
for index, item in enumerate(tableheaders):
worksheet.write(row, index, tableheaders[index], style.table_headers)
row += 1
for recordFieldValues in data:
# fill the mark
record = Record(recordFieldValues)
mark = MarkHelper.get_io_mark(record, style)
for column, columnValue in enumerate(recordFieldValues):
value = get_value(column, recordFieldValues)
worksheet.write(row, column, value, mark.style)
# get max width
if (column in AUTOCOLUMN_WIDTH_INDEXS):
i = AUTOCOLUMN_WIDTH_INDEXS.index(column)
column_max_width_array[i] = max(column_max_width_array[i], len(value))
# write mark
column += 1
if (column in AUTOCOLUMN_WIDTH_INDEXS):
i = AUTOCOLUMN_WIDTH_INDEXS.index(column)
column_max_width_array[i] = max(column_max_width_array[i], len(mark.message))
worksheet.write(row, column, mark.message, mark.style)
row += 1
return row
def get_value(column, record):
if column == 13 or column == 12:
java_timestamp = record[column]
return datetime.fromtimestamp(java_timestamp / 1000.0).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
return record[column]
def get_process_by_package(packageName):
# exec adb shell ls
dbDir = envDir + packageName
results = subprocess.getstatusoutput("adb shell ls " + dbDir)
# get db fileName by reg
files = []
if (results[0] == 0):
for file in results[1].split("\n"):
print(file)
if (re.match(DB_NAME_REG.format(packageName), file)):
files.append(re.findall(r"ezalor_(.+?).db", file)[0])
return files
# os.system("rm " + path + "ezalor.db")
def get_cursor(dbpath):
conn = sqlite3.connect(dbpath)
return conn.cursor()
def main(argv):
try:
opts, args = getopt.getopt(argv, "hs:e:", ["help", "switch", "export"])
except getopt.GetoptError:
print_help_and_exit()
if len(opts) == 0:
print_help_and_exit()
return
for opt, arg in opts:
if opt in ("-h", "--help"):
print_help_and_exit()
elif opt in ("-e", "--export"):
if (len(arg) == 0):
print_help_and_exit()
packageName = arg
filename = packageName + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ".xlsx"
outPath = filename if len(args) == 0 \
else args[0] + "/" + filename
export(packageName, outPath)
if __name__ == "__main__":
main(sys.argv[1:])
| 32.387435 | 120 | 0.624313 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,832 | 0.296153 |
966b9c223ecd09480f1a0fd34d6be56ce22a2ada
| 11,772 |
py
|
Python
|
revisions/models.py
|
debrouwere/django-revisions
|
d5b95806c65e66a720a2d9ec2f5ffb16698d9275
|
[
"BSD-2-Clause-FreeBSD"
] | 6 |
2015-11-05T11:48:46.000Z
|
2021-04-14T07:10:16.000Z
|
revisions/models.py
|
debrouwere/django-revisions
|
d5b95806c65e66a720a2d9ec2f5ffb16698d9275
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
revisions/models.py
|
debrouwere/django-revisions
|
d5b95806c65e66a720a2d9ec2f5ffb16698d9275
|
[
"BSD-2-Clause-FreeBSD"
] | 1 |
2019-02-13T21:01:48.000Z
|
2019-02-13T21:01:48.000Z
|
# encoding: utf-8
import uuid
import difflib
from datetime import date
from django.db import models
from django.utils.translation import ugettext as _
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.db import IntegrityError
from django.contrib.contenttypes.models import ContentType
from revisions import managers, utils
import inspect
# the crux of all errors seems to be that, with VersionedBaseModel,
# doing setattr(self, self.pk_name, None) does _not_ lead to creating
# a new object, and thus versioning as a whole doesn't work
# the only thing lacking from the VersionedModelBase is a version id.
# You may use VersionedModelBase if you need to specify your own
# AutoField (e.g. using UUIDs) or if you're trying to adapt an existing
# model to ``django-revisions`` and have an AutoField not named
# ``vid``.
class VersionedModelBase(models.Model, utils.ClonableMixin):
@classmethod
def get_base_model(cls):
base = cls
while isinstance(base._meta.pk, models.OneToOneField):
base = base._meta.pk.rel.to
return base
@property
def base_model(self):
return self.get_base_model()
@property
def pk_name(self):
return self.base_model._meta.pk.attname
# For UUIDs in particular, we need a way to know the order of revisions
# e.g. through a ``changed`` datetime field.
@classmethod
def get_comparator_name(cls):
if hasattr(cls.Versioning, 'comparator'):
return cls.Versioning.comparator
else:
return cls.get_base_model()._meta.pk.attname
@property
def comparator_name(self):
return self.get_comparator_name()
@property
def comparator(self):
return getattr(self, self.comparator_name)
@classmethod
def get_implementations(cls):
models = [contenttype.model_class() for contenttype in ContentType.objects.all()]
return [model for model in models if isinstance(model, cls)]
@property
def _base_model(self):
base = self
while isinstance(base._meta.pk, models.OneToOneField):
base = base._meta.pk.rel.to
return base
@property
def _base_table(self):
return self._base_model._meta.db_table
# content bundle id
cid = models.CharField(max_length=36, editable=False, null=True, db_index=True)
# managers
latest = managers.LatestManager()
objects = models.Manager()
# all related revisions, plus easy shortcuts to the previous and next revision
def get_revisions(self):
qs = self.__class__.objects.filter(cid=self.cid).order_by(self.comparator_name)
try:
qs.prev = qs.filter(**{self.comparator_name + '__lt': self.comparator}).order_by('-' + self.comparator_name)[0]
except IndexError:
qs.prev = None
try:
qs.next = qs.filter(**{self.comparator_name + '__gt': self.comparator})[0]
except IndexError:
qs.next = None
return qs
def check_if_latest_revision(self):
return self.comparator >= max([version.comparator for version in self.get_revisions()])
@classmethod
def fetch(cls, criterion):
if isinstance(criterion, int) or isinstance(criterion, str):
return cls.objects.get(pk=criterion)
elif isinstance(criterion, models.Model):
return criterion
elif isinstance(criterion, date):
pub_date = cls.Versioning.publication_date
if pub_date:
return cls.objects.filter(**{pub_date + '__lte': criterion}).order('-' + self.comparator_name)[0]
else:
raise ImproperlyConfigured("""Please specify which field counts as the publication
date for this model. You can do so inside a Versioning class. Read the docs
for more info.""")
else:
raise TypeError("Can only fetch an object using a primary key, a date or a datetime object.")
def revert_to(self, criterion):
revert_to_obj = self.__class__.fetch(criterion)
# You can only revert a model instance back to a previous instance.
# Not any ol' object will do, and we check for that.
if revert_to_obj.pk not in self.get_revisions().values_list('pk', flat=True):
raise IndexError("Cannot revert to a primary key that is not part of the content bundle.")
else:
return revert_to_obj.revise()
def get_latest_revision(self):
return self.get_revisions().order_by('-' + self.comparator)[0]
def make_current_revision(self):
if not self.check_if_latest_revision():
self.save()
def show_diff_to(self, to, field):
frm = unicode(getattr(self, field)).split()
to = unicode(getattr(to, field)).split()
differ = difflib.HtmlDiff()
return differ.make_table(frm, to)
def _get_unique_checks(self, exclude=[]):
# for parity with Django's unique_together notation shortcut
def parse_shortcut(unique_together):
unique_together = tuple(unique_together)
if len(unique_together) and isinstance(unique_together[0], basestring):
unique_together = (unique_together, )
return unique_together
# Django actually checks uniqueness for a single field in the very same way it
# does things for unique_together, something we happily take advantage of
unique = tuple([(field,) for field in getattr(self.Versioning, 'unique', ())])
unique_together = \
unique + \
parse_shortcut(getattr(self.Versioning, 'unique_together', ())) + \
parse_shortcut(getattr(self._meta, 'unique_together', ()))
model = self.__class__()
model._meta.unique_together = unique_together
return models.Model._get_unique_checks(model, exclude)
def _get_attribute_history(self, name):
if self.__dict__.get(name, False):
return [(version.__dict__[name], version) for version in self.get_revisions()]
else:
raise AttributeError(name)
def _get_related_objects(self, relatedmanager):
""" This method extends a regular related-manager by also including objects
that are related to other versions of the same content, instead of just to
this one object. """
related_model = relatedmanager.model
related_model_name = related_model._meta.module_name
# The foreign key field name on related objects often, by convention,
# coincides with the name of the class it relates to, but not always,
# e.g. you could do something like
# class Book(models.Model):
# thingmabob = models.ForeignKey(Author)
#
# There is, afaik, no elegant way to get a RelatedManager to tell us that
# related objects refer to this class by 'thingmabob', leading to this
# kind of convoluted deep dive into the internals of the related class.
#
# By all means, I'd welcome suggestions for prettier code.
ref_name = self._meta._name_map[related_model_name][0].field.name
pks = [story.pk for story in self.get_revisions()]
objs = related_model._default_manager.filter(**{ref_name + '__in': pks})
return objs
def __getattr__(self, name):
# we catch all lookups that start with 'related_'
if name.startswith('related_'):
related_name = "_".join(name.split("_")[1:])
attribute = getattr(self, related_name, False)
# we piggyback off of an existing relationship,
# so the attribute has to exist and it has to be a
# RelatedManager or ManyRelatedManager
if attribute:
# (we check the module instead of using isinstance, since
# ManyRelatedManager is created using a factory so doesn't
# actually exist inside of the module)
if attribute.__class__.__dict__['__module__'] == 'django.db.models.fields.related':
return self._get_related_objects(attribute)
if name.endswith('_history'):
attribute = name.replace('_history', '')
return self._get_attribute_history(attribute)
raise AttributeError(name)
def prepare_for_writing(self):
"""
This method allows you to clear out certain fields in the model that are
specific to each revision, like a log message.
"""
for field in self.Versioning.clear_each_revision:
super(VersionedModelBase, self).__setattr__(field, '')
def validate_bundle(self):
# uniqueness constraints per bundle can't be checked at the database level,
# which means we'll have to do so in the save method
if getattr(self.Versioning, 'unique_together', None) or getattr(self.Versioning, 'unique', None):
# replace ValidationError with IntegrityError because this is what users will expect
try:
self.validate_unique()
except ValidationError, error:
raise IntegrityError(error)
def revise(self):
self.validate_bundle()
return self.clone()
def save(self, *vargs, **kwargs):
# The first revision of a piece of content won't have a bundle id yet,
# and because the object isn't persisted in the database, there's no
# primary key either, so we use a UUID as the bundle ID.
#
# (Note for smart alecks: Django chokes on using super/save() more than
# once in the save method, so doing a preliminary save to get the PK
# and using that value for a bundle ID is rather hard.)
if not self.cid:
self.cid = uuid.uuid4().hex
self.validate_bundle()
super(VersionedModelBase, self).save(*vargs, **kwargs)
def delete_revision(self, *vargs, **kwargs):
super(VersionedModelBase, self).delete(*vargs, **kwargs)
def delete(self, *vargs, **kwargs):
for revision in self.get_revisions():
revision.delete_revision(*vargs, **kwargs)
class Meta:
abstract = True
class Versioning:
clear_each_revision = []
publication_date = None
unique_together = ()
class VersionedModel(VersionedModelBase):
vid = models.AutoField(primary_key=True)
class Meta:
abstract = True
class TrashableModel(models.Model):
""" Users wanting a version history may also expect a trash bin
that allows them to recover deleted content, as is e.g. the
case in WordPress. This is that thing. """
_is_trash = models.BooleanField(db_column='is_trash', default=False, editable=False)
@property
def is_trash(self):
return self._is_trash
def get_content_bundle(self):
if isinstance(self, VersionedModelBase):
return self.get_revisions()
else:
return [self]
def delete(self):
"""
It makes no sense to trash individual revisions: either you keep a version history or you don't.
If you want to undo a revision, you should use obj.revert_to(preferred_revision) instead.
"""
for obj in self.get_content_bundle():
obj._is_trash = True
obj.save()
def delete_permanently(self):
for obj in self.get_content_bundle():
super(TrashableModel, obj).delete()
class Meta:
abstract = True
| 39.503356 | 123 | 0.641777 | 10,909 | 0.92669 | 0 | 0 | 2,094 | 0.17788 | 0 | 0 | 3,788 | 0.32178 |
966c228f07ae23bcd47d41a07f74a33292ad5f8f
| 1,260 |
py
|
Python
|
noir/templating.py
|
gi0baro/noir
|
b187922d4f6055dbcb745c5299db907aac574398
|
[
"BSD-3-Clause"
] | 2 |
2021-06-10T13:09:27.000Z
|
2021-06-11T09:37:02.000Z
|
noir/templating.py
|
gi0baro/noir
|
b187922d4f6055dbcb745c5299db907aac574398
|
[
"BSD-3-Clause"
] | null | null | null |
noir/templating.py
|
gi0baro/noir
|
b187922d4f6055dbcb745c5299db907aac574398
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import os
from typing import Any, Dict, Optional
import tomlkit
import yaml
from renoir.apis import Renoir, ESCAPES, MODES
from renoir.writers import Writer as _Writer
from .utils import adict, obj_to_adict
class Writer(_Writer):
@staticmethod
def _to_unicode(data):
if data is None:
return ""
if isinstance(data, bool):
return str(data).lower()
return _Writer._to_unicode(data)
class Templater(Renoir):
_writers = {**Renoir._writers, **{ESCAPES.common: Writer}}
def _indent(text: str, spaces: int = 2) -> str:
offset = " " * spaces
rv = f"\n{offset}".join(text.split("\n"))
return rv
def _to_json(obj: Any, indent: Optional[int] = None) -> str:
return json.dumps(obj, indent=indent)
def _to_toml(obj: Any) -> str:
return tomlkit.dumps(obj)
def _to_yaml(obj: Any) -> str:
return yaml.dump(obj)
def base_ctx(ctx: Dict[str, Any]):
ctx.update(
env=obj_to_adict(os.environ),
indent=_indent,
to_json=_to_json,
to_toml=_to_toml,
to_yaml=_to_yaml
)
yaml.add_representer(adict, yaml.representer.Representer.represent_dict)
templater = Templater(mode=MODES.plain, adjust_indent=True, contexts=[base_ctx])
| 21.355932 | 80 | 0.666667 | 314 | 0.249206 | 0 | 0 | 200 | 0.15873 | 0 | 0 | 22 | 0.01746 |
966d171d2b44d0254f7759c28f9717f4faa2dc4c
| 3,904 |
py
|
Python
|
GUI/lib/plotData.py
|
apajon/GUIPythonEncodeur
|
05b58809ed7a287369c5bfa04b5fb69f5d9e36aa
|
[
"MIT"
] | null | null | null |
GUI/lib/plotData.py
|
apajon/GUIPythonEncodeur
|
05b58809ed7a287369c5bfa04b5fb69f5d9e36aa
|
[
"MIT"
] | 4 |
2021-06-03T23:34:17.000Z
|
2021-06-04T21:31:19.000Z
|
GUI/lib/plotData.py
|
apajon/GUIPythonEncodeur
|
05b58809ed7a287369c5bfa04b5fb69f5d9e36aa
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 Adrien Pajon ([email protected])
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from api_phidget_n_MQTT.src.lib_global_python import searchLoggerFile
def PlotData(config):
############
# Encoder's resolution in mm per pulse
# Encoder_mm_per_Pulse = 0.02
Encoder_mm_per_Pulse = config.getfloat('encoder', 'resolution')
print("encoder resolution : " + str(Encoder_mm_per_Pulse))
############
# search for the last logger file based on the indentation
# filename="Logger_encoder_07.txt"
filename = searchLoggerFile.searchLoggerFile(config)
print(filename)
try:
data = np.genfromtxt(filename, delimiter=",", names=True)
except:
return False
# convert the number of pulse position change into mm
PositionChange_mm = data['PositionChange'] * Encoder_mm_per_Pulse
# recorded time when datas are received in s
time = data['TimeRecording']
time -= time[0] # the beginning time at 0
# vel is the velocity measured by the encoder
# as the positionChange_mm is in mm and the TimeChange is in ms
# the velocity is given in m/s
# If a 'detach' from the encoder, TimeChange=0 and vel will be Inf
vel = np.divide(PositionChange_mm, data['TimeChange'])
############
# initialize the plot
fig, ax1 = plt.subplots()
# plot the encoder velocity in time
color = 'tab:blue'
lns1 = ax1.plot(time, vel, label="Velocity", color=color)
ax1.set_xlabel("time[s]")
ax1.set_ylabel("Velocity[m/s]", color=color)
color = 'tab:blue'
ax1.tick_params(axis='y', labelcolor=color)
ax1.grid()
# # Create a Rectangle patch
# rect = patches.Rectangle((0,0),20,0.2,linewidth=1,edgecolor='k',facecolor='tab:grey')
# # Add the patch to the Axes
# ax1.add_patch(rect)
# Draw a grey rectangle patch for each detach of the encoder aka 'missing values' aka TimeChange=0
for k in np.argwhere(data['TimeChange'] == 0):
if k == 0:
rect = patches.Rectangle((time[k], 0), time[k + 1] - time[k], 2, linewidth=1, edgecolor='k',
facecolor='tab:grey')
ax1.add_patch(rect)
lns3 = rect
elif k != len(data['TimeChange']):
if k == np.argwhere(data['TimeChange'] == 0)[0]:
rect = patches.Rectangle((time[k - 1], 0), time[k + 1] - time[k - 1], 2, linewidth=1, edgecolor='k',
facecolor='tab:grey')
lns3 = ax1.add_patch(rect)
else:
rect = patches.Rectangle((time[k - 1], 0), time[k + 1] - time[k - 1], 2, linewidth=1, edgecolor='k',
facecolor='tab:grey')
ax1.add_patch(rect)
# plot the encoder distance measured in m
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:red'
ax2.set_ylabel('Position[m]', color=color) # we already handled the x-label with ax1
lns2 = ax2.plot(time, np.cumsum(PositionChange_mm / 1000), color=color, label="Position")
ax2.tick_params(axis='y', labelcolor=color)
plt.title("velocity and position measured by encoder \n in file : " + filename)
# Legend manage if there is no missing value meaning lns3 does not exist
try:
lns = [lns1[0], lns2[0], lns3]
labs = ('Velocity', 'Position', 'Missing velocity')
except:
lns = [lns1[0], lns2[0]]
labs = ('Velocity', 'Position')
ax1.legend(lns, labs) # , loc=0)
fig.tight_layout() # otherwise the right y-label is slightly clipped
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
plt.show()
return True
| 38.27451 | 116 | 0.619365 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,651 | 0.4229 |
966d9c6d207cc79829fc35c116bf0cff41eb5f9c
| 4,945 |
py
|
Python
|
nova/tests/unit/objects/test_resource.py
|
Nexenta/nova
|
ccecb507ff4bdcdd23d90e7b5b02a22c5a46ecc3
|
[
"Apache-2.0"
] | 1 |
2021-06-10T17:08:15.000Z
|
2021-06-10T17:08:15.000Z
|
nova/tests/unit/objects/test_resource.py
|
Nexenta/nova
|
ccecb507ff4bdcdd23d90e7b5b02a22c5a46ecc3
|
[
"Apache-2.0"
] | 2 |
2021-03-31T20:04:16.000Z
|
2021-12-13T20:45:03.000Z
|
nova/tests/unit/objects/test_resource.py
|
Nexenta/nova
|
ccecb507ff4bdcdd23d90e7b5b02a22c5a46ecc3
|
[
"Apache-2.0"
] | 1 |
2021-11-12T03:55:41.000Z
|
2021-11-12T03:55:41.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
import six
from nova.objects import resource
from nova.tests.unit.objects import test_objects
fake_resources = resource.ResourceList(objects=[
resource.Resource(provider_uuid=uuids.rp, resource_class='CUSTOM_RESOURCE',
identifier='foo'),
resource.Resource(provider_uuid=uuids.rp, resource_class='CUSTOM_RESOURCE',
identifier='bar')])
fake_vpmems = [
resource.LibvirtVPMEMDevice(
label='4GB', name='ns_0', devpath='/dev/dax0.0',
size=4292870144, align=2097152),
resource.LibvirtVPMEMDevice(
label='4GB', name='ns_1', devpath='/dev/dax0.0',
size=4292870144, align=2097152)]
fake_instance_extras = {
'resources': jsonutils.dumps(fake_resources.obj_to_primitive())
}
class TestResourceObject(test_objects._LocalTest):
def _create_resource(self, metadata=None):
fake_resource = resource.Resource(provider_uuid=uuids.rp,
resource_class='bar',
identifier='foo')
if metadata:
fake_resource.metadata = metadata
return fake_resource
def _test_set_malformed_resource_class(self, rc):
try:
resource.Resource(provider_uuid=uuids.rp,
resource_class=rc,
identifier='foo')
except ValueError as e:
self.assertEqual('Malformed Resource Class %s' % rc,
six.text_type(e))
else:
self.fail('Check malformed resource class failed.')
def _test_set_formed_resource_class(self, rc):
resource.Resource(provider_uuid=uuids.rp,
resource_class=rc,
identifier='foo')
def test_set_malformed_resource_classes(self):
malformed_resource_classes = ['!', ';', ' ']
for rc in malformed_resource_classes:
self._test_set_malformed_resource_class(rc)
def test_set_formed_resource_classes(self):
formed_resource_classes = ['resource', 'RESOURCE', '0123']
for rc in formed_resource_classes:
self._test_set_formed_resource_class(rc)
def test_equal_without_metadata(self):
resource_0 = resource.Resource(provider_uuid=uuids.rp,
resource_class='bar',
identifier='foo')
resource_1 = resource.Resource(provider_uuid=uuids.rp,
resource_class='bar',
identifier='foo')
self.assertEqual(resource_0, resource_1)
def test_not_equal_without_matadata(self):
self.assertNotEqual(fake_resources[0], fake_resources[1])
def test_equal_with_vpmem_metadata(self):
resource_0 = self._create_resource(metadata=fake_vpmems[0])
resource_1 = self._create_resource(metadata=fake_vpmems[0])
self.assertEqual(resource_0, resource_1)
def test_not_equal_with_vpmem_metadata(self):
resource_0 = self._create_resource(metadata=fake_vpmems[0])
resource_1 = self._create_resource(metadata=fake_vpmems[1])
self.assertNotEqual(resource_0, resource_1)
def test_not_equal_with_and_without_metadata(self):
# one resource has metadata, another one has not metadata
resource_0 = self._create_resource(metadata=fake_vpmems[0])
resource_1 = self._create_resource()
self.assertNotEqual(resource_0, resource_1)
class _TestResourceListObject(object):
@mock.patch('nova.db.api.instance_extra_get_by_instance_uuid')
def test_get_by_instance_uuid(self, mock_get):
mock_get.return_value = fake_instance_extras
resources = resource.ResourceList.get_by_instance_uuid(
self.context, 'fake_uuid')
for i in range(len(resources)):
self.assertEqual(resources[i].identifier,
fake_resources[i].identifier)
class TestResourceListObject(test_objects._LocalTest,
_TestResourceListObject):
pass
class TestRemoteResourceListObject(test_objects._RemoteTest,
_TestResourceListObject):
pass
| 39.879032 | 79 | 0.651769 | 3,474 | 0.702528 | 0 | 0 | 422 | 0.085339 | 0 | 0 | 925 | 0.187058 |
966e2e4f01ba505a9a223a3984ad9d961a218e79
| 11,532 |
py
|
Python
|
mandala/storages/rel_impl/psql_utils.py
|
amakelov/mandala
|
a9ec051ef730ada4eed216c62a07b033126e78d5
|
[
"Apache-2.0"
] | 9 |
2022-02-22T19:24:01.000Z
|
2022-03-23T04:46:41.000Z
|
mandala/storages/rel_impl/psql_utils.py
|
amakelov/mandala
|
a9ec051ef730ada4eed216c62a07b033126e78d5
|
[
"Apache-2.0"
] | null | null | null |
mandala/storages/rel_impl/psql_utils.py
|
amakelov/mandala
|
a9ec051ef730ada4eed216c62a07b033126e78d5
|
[
"Apache-2.0"
] | null | null | null |
from sqlalchemy.engine.base import Connection
from sqlalchemy.sql.selectable import Select, CompoundSelect
from sqlalchemy.engine.result import Result
from sqlalchemy.dialects import postgresql
from .utils import transaction
from ...common_imports import *
from ...util.common_ut import get_uid
from ...core.config import PSQLConfig
################################################################################
### helper functions
################################################################################
def get_db_root(psql_config:PSQLConfig):
return PSQLInterface(autocommit=True, psql_config=psql_config)
def get_connection_string(db_name:str, user:str, password:str,
host:str, port:int) -> str:
return f'postgresql+psycopg2://{user}:{password}@{host}:{port}/{db_name}'
################################################################################
### interface to postgres
################################################################################
class PSQLInterface(object):
# to connect to a postgres server
def __init__(self, psql_config:PSQLConfig, autocommit:bool=False,):
self.host = psql_config.host
self.user = psql_config.user
self.port = psql_config.port
self.password = psql_config.password
self.root_db_name = psql_config.root_db_name
self.autocommit = autocommit
self.connection_string = f'postgresql+psycopg2://{self.user}:{self.password}@{self.host}:{self.port}/{self.root_db_name}'
self.engine = self.get_engine()
self.block_subtransactions = False
############################################################################
### utils
############################################################################
def get_raw_conn(self, autocommit:bool=False) -> Connection:
conn = psycopg2.connect(host=self.host,
user=self.user,
password=self.password,
database=self.root_db_name)
conn.autocommit = autocommit
return conn
def get_engine(self) -> TAny:
"""
you might think that setting autocommit=True in conn.execution_options
would be enough to tell sqlalchemy to use autocommit mode, however
that's not the case; we must do it at the level of engine creation. See
[this](https://www.oddbird.net/2014/06/14/sqlalchemy-postgres-autocommit/)
for more details
"""
if self.autocommit:
engine = sqlalchemy.create_engine(self.connection_string,
isolation_level='AUTOCOMMIT',
connect_args={'connect_timeout': 5})
else:
engine = sqlalchemy.create_engine(self.connection_string,
connect_args={'connect_timeout': 5})
return engine
def read_rp(self, rp:Result) -> pd.DataFrame:
df = pd.DataFrame(rp.fetchall(), columns=rp.keys())
return df
@transaction()
def read(self, query:str, index_col:str=None,
conn:Connection=None) -> pd.DataFrame:
rp = conn.execute(query)
df = self.read_rp(rp=rp)
if index_col is not None:
df.set_index(index_col, inplace=True)
return df
############################################################################
### managing databases
############################################################################
@staticmethod
def _is_unnamed_db(db_name:str) -> bool:
return db_name.startswith('v5_') and len(db_name) == 35
@transaction()
def create_db(self, db_name:str, conn:Connection=None):
logging.debug('Creating database {}...'.format(db_name))
conn.execute('CREATE DATABASE {}'.format(db_name))
@transaction()
def kill_active_connections(self, db_name:str, conn:Connection=None):
query = f"""
SELECT
pg_terminate_backend(pg_stat_activity.pid)
FROM pg_stat_activity
WHERE
pg_stat_activity.datname = '{db_name}'
AND pid <> pg_backend_pid()
"""
conn.execute(query)
@transaction(retry=False) #! not retriable because it is itself used in retry loop
def drop_db(self, db_name:str, must_exist:bool=True,
kill_connections:bool=False, conn:Connection=None):
assert db_name not in ('postgres', 'template0', 'template1')
logging.info('Dropping database {}...'.format(db_name))
if kill_connections:
self.kill_active_connections(db_name=db_name, conn=conn)
if must_exist:
conn.execute('DROP DATABASE {}'.format(db_name))
else:
conn.execute(f'DROP DATABASE IF EXISTS {db_name}')
def get_all_dbs(self) -> TSet[str]:
df = self.read(query='SELECT datname FROM pg_database')
return set(df.datname.values)
def exists_db(self, db_name:str) -> bool:
return db_name in self.get_all_dbs()
@transaction()
def _drop_unnamed(self, conn:Connection=None):
#! NEVER CALL THIS
all_dbs = self.get_all_dbs()
for db_name in all_dbs:
if PSQLInterface._is_unnamed_db(db_name):
self.drop_db(db_name, conn=conn)
################################################################################
### fast operations
################################################################################
def fast_select(query:TUnion[str, Select]=None, qual_table:str=None,
index_col:str=None, cols:TList[str]=None,
conn:Connection=None) -> pd.DataFrame:
"""
Some notes:
- loading an empty table with an index (index_col=something) will not
display the index name(s), but they are in the (empty) index
"""
logging.debug('Fastread does not handle dtypes')
# quote table name
if query is None:
assert qual_table is not None
if '.' in qual_table:
schema, table = qual_table.split('.')
quoted_table = f'"{schema}"."{table}"'
else:
quoted_table = f'"{qual_table}"'
if cols is not None:
cols_string = ', '.join([f'"{col}"' for col in cols])
query = f'SELECT {cols_string} FROM {quoted_table}'
else:
query = f'SELECT * FROM {quoted_table}'
head = 'HEADER'
if isinstance(query, (Select, CompoundSelect)):
#! the query object must be converted to a pure postgresql-compatible
#! string for this to work, and in particular to render bound parameters
# in-line using the literal_binds kwarg and the particular dialect
query_string = query.compile(bind=conn.engine,
compile_kwargs={'literal_binds': True},
dialect=postgresql.dialect())
elif isinstance(query, str):
query_string = query
else:
raise NotImplementedError()
copy_sql = f"""COPY ({query_string}) TO STDOUT WITH CSV {head}"""
buffer = io.StringIO()
# Note that we need to use a *raw* connection in this method, which can be
# accessed as conn.connection
with conn.connection.cursor() as curs:
curs.copy_expert(copy_sql, buffer)
buffer.seek(0)
df:pd.DataFrame = pd.read_csv(buffer)
if index_col is not None:
df = df.set_index(index_col)
return df
def fast_insert(df:pd.DataFrame, qual_table:str, conn:Connection=None,
columns:TList[str]=None, include_index:bool=True):
"""
In psycopg 2.9, they changed the .copy_from() method, so that table names
are now quoted. This means that it won't work with a schema-qualified name.
This method fixes this by using copy_expert(), as directed by the psycopg2
docs.
"""
if columns is None:
columns = df.columns
if '.' in qual_table:
schema, table = qual_table.split('.')
quoted_table = f'"{schema}"."{table}"'
else:
quoted_table = f'"{qual_table}"'
start_time = time.time()
# save dataframe to an in-memory buffer
buffer = io.StringIO()
if include_index:
df = df.reset_index()
df.to_csv(buffer, header=False, index=False, columns=columns, na_rep='')
buffer.seek(0)
columns_string = ', '.join('"{}"'.format(k) for k in columns)
query = f"""COPY {quoted_table}({columns_string}) FROM STDIN WITH CSV"""
# Note that we need to use a *raw* connection in this method, which can be
# accessed as conn.connection
with conn.connection.cursor() as curs:
curs.copy_expert(sql=query, file=buffer)
end_time = time.time()
nrows = df.shape[0]
total_time = end_time - start_time
logging.debug(f'Inserted {nrows} rows, {nrows/total_time} rows/second')
def fast_upsert(df:pd.DataFrame, qual_table:str,
index_cols:TList[str], columns:TList[str]=None,
include_index:bool=True, conn:Connection=None):
"""
code based on
https://stackoverflow.com/questions/46934351/python-postgresql-copy-command-used-to-insert-or-update-not-just-insert
"""
if include_index:
df = df.reset_index()
#! importantly, columns are set after potentially resetting the index
if columns is None:
columns = list(df.columns)
if '.' in qual_table:
schema, table = qual_table.split('.')
quoted_table = f'"{schema}"."{table}"'
else:
schema = ''
table = qual_table
quoted_table = f'"{qual_table}"'
# create a temporary table with same columns as target table
# temp_qual_table = f'{schema}.{table}__copy'
temp_uid = get_uid()[:16]
temp_qual_table = f'{schema}_{table}__copy_{temp_uid}'
temp_index_name = f'{schema}_{table}__temp_index_{temp_uid}'
create_temp_table_query = f"""
create temporary table {temp_qual_table} as (select * from {quoted_table} limit
0);
"""
conn.execute(create_temp_table_query)
# if provided, create indices on the table
if index_cols is not None:
create_temp_index_query = f"""
CREATE INDEX {temp_index_name} ON {temp_qual_table}({','.join(index_cols)});
"""
conn.execute(create_temp_index_query)
# copy data into this table
fast_insert(df=df, qual_table=temp_qual_table, conn=conn, columns=columns,
include_index=include_index)
# comma-separated lists of various things
target_cols_string = f"{', '.join(columns)}"
source_cols_string = f"{', '.join([f'{temp_qual_table}.{col}' for col in columns])}"
index_cols_string = f"{', '.join([f'{col}' for col in index_cols])}"
# update existing records
index_conditions = ' AND '.join([f'{qual_table}.{col} = {temp_qual_table}.{col}' for col in index_cols])
update_query = f"""
UPDATE {quoted_table}
SET
({target_cols_string}) = ({source_cols_string})
FROM
{temp_qual_table}
WHERE
{index_conditions}
"""
conn.execute(update_query)
# insert new records
insert_query = f"""
INSERT INTO {quoted_table}({target_cols_string})
(
SELECT {source_cols_string}
FROM
{temp_qual_table} LEFT JOIN {quoted_table} USING({index_cols_string})
WHERE {table} IS NULL);
"""
conn.execute(insert_query)
| 40.893617 | 129 | 0.583593 | 4,415 | 0.382848 | 0 | 0 | 1,828 | 0.158515 | 0 | 0 | 4,710 | 0.408429 |
966fe34cabbdf144b4795af99c630f2fed4590e1
| 1,484 |
py
|
Python
|
tests/test_ami.py
|
seek-oss/aec
|
13a75c690542eec61727b9d92a2c11a3dbd1caba
|
[
"MIT"
] | 6 |
2019-09-10T11:23:18.000Z
|
2021-03-25T04:37:28.000Z
|
tests/test_ami.py
|
seek-oss/aec
|
13a75c690542eec61727b9d92a2c11a3dbd1caba
|
[
"MIT"
] | 162 |
2019-12-05T10:21:00.000Z
|
2022-03-27T06:00:45.000Z
|
tests/test_ami.py
|
seek-oss/aec
|
13a75c690542eec61727b9d92a2c11a3dbd1caba
|
[
"MIT"
] | 6 |
2019-10-27T22:59:35.000Z
|
2021-02-10T22:36:59.000Z
|
import pytest
from moto import mock_ec2
from moto.ec2.models import AMIS
from aec.command.ami import delete, describe, share
@pytest.fixture
def mock_aws_config():
mock = mock_ec2()
mock.start()
return {
"region": "ap-southeast-2",
}
def test_describe_images(mock_aws_config):
# describe images defined by moto
# see https://github.com/spulec/moto/blob/master/moto/ec2/resources/amis.json
canonical_account_id = "099720109477"
mock_aws_config["describe_images_owners"] = canonical_account_id
images = describe(config=mock_aws_config)
assert len(images) == 2
assert images[0]["Name"] == "ubuntu/images/hvm-ssd/ubuntu-trusty-14.04-amd64-server-20170727"
assert images[1]["Name"] == "ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-20170721"
def test_describe_images_name_match(mock_aws_config):
# describe images defined by moto
# see https://github.com/spulec/moto/blob/master/moto/ec2/resources/amis.json
canonical_account_id = "099720109477"
mock_aws_config["describe_images_owners"] = canonical_account_id
images = describe(config=mock_aws_config, name_match="*trusty*")
assert len(images) == 1
assert images[0]["Name"] == "ubuntu/images/hvm-ssd/ubuntu-trusty-14.04-amd64-server-20170727"
def test_delete_image(mock_aws_config):
delete(mock_aws_config, AMIS[0]["ami_id"])
def test_share_image(mock_aws_config):
share(mock_aws_config, AMIS[0]["ami_id"], "123456789012")
| 31.574468 | 97 | 0.735849 | 0 | 0 | 0 | 0 | 133 | 0.089623 | 0 | 0 | 573 | 0.386119 |
96702b58ef9f60b2130e8a0e754ad89b97258e50
| 691 |
py
|
Python
|
mainapp/views.py
|
H0oxy/sportcars
|
dcd76736bfe88630b3ccce7e4ee0ad9398494f08
|
[
"MIT"
] | null | null | null |
mainapp/views.py
|
H0oxy/sportcars
|
dcd76736bfe88630b3ccce7e4ee0ad9398494f08
|
[
"MIT"
] | null | null | null |
mainapp/views.py
|
H0oxy/sportcars
|
dcd76736bfe88630b3ccce7e4ee0ad9398494f08
|
[
"MIT"
] | null | null | null |
from django.views.generic import ListView
from rest_framework.permissions import AllowAny
from rest_framework.viewsets import ModelViewSet
from mainapp.models import Manufacturer, Car
from mainapp.serializers import ManufacturerSerializer, CarSerializer
class ManufacturerList(ListView):
model = Manufacturer
class CarList(ListView):
model = Car
class ManufacturerViewSet(ModelViewSet):
# queryset = Manufacturer.objects.all()
queryset = Manufacturer.objects.filter(is_active=True)
serializer_class = ManufacturerSerializer
class CarViewSet(ModelViewSet):
permission_classes = [AllowAny]
queryset = Car.objects.all()
serializer_class = CarSerializer
| 25.592593 | 69 | 0.797395 | 424 | 0.613603 | 0 | 0 | 0 | 0 | 0 | 0 | 39 | 0.05644 |
96708ac825e30302b84cfd4e3368984095dd810a
| 3,168 |
py
|
Python
|
BrandDetails.py
|
p10rahulm/brandyz-reco
|
95d5e3f291cdb5b951e0c7d83ff30c59f8a3797f
|
[
"MIT"
] | null | null | null |
BrandDetails.py
|
p10rahulm/brandyz-reco
|
95d5e3f291cdb5b951e0c7d83ff30c59f8a3797f
|
[
"MIT"
] | null | null | null |
BrandDetails.py
|
p10rahulm/brandyz-reco
|
95d5e3f291cdb5b951e0c7d83ff30c59f8a3797f
|
[
"MIT"
] | null | null | null |
# In this module we will get the brand count and the users per brand as a list.
# Add more details as deemed necessary
# I'm using mergesort here instead of quicksort as the size of data is much larger than for users
import Mergesort
import numpy as np
def get_brand_purchase_deets(shoppers,brands):
# Going to sort by brands
brand_shoppers = zip(brands,shoppers)
brand_shoppers = sorted(brand_shoppers)
brand_sorted = [brands for brands, shoppers in brand_shoppers]
shoppers_sorted = [shoppers for brands, shoppers in brand_shoppers]
# Creating Brand Details
brand_deets = {}
brands_list = []
num_transactions_list = []
list_of_users = []
end_row = 0
total_transactions = len(brands)
while end_row < total_transactions:
num_transactions = 0
users_list = []
start_row = end_row
start_brand = brand_sorted[start_row]
while (end_row < total_transactions and brand_sorted[end_row] == start_brand):
num_transactions += 1
users_list.append(shoppers_sorted[end_row])
end_row += 1
users_list = Mergesort.mergesorts(users_list)
# Input into main lists
brands_list.append(start_brand)
num_transactions_list.append(num_transactions)
list_of_users.append(users_list)
# Add to data frame
brand_deets["brand_code"] = np.array(brands_list,dtype=np.int32)
brand_deets["num_transactions"] = np.array(num_transactions_list,dtype=np.int32)
brand_deets["list_of_users"] = list_of_users
brand_deets["meta"] = {"size":len(num_transactions_list),
"num_columns": 3,
"column_type_list": [("brand_code",'int32'),
("num_transactions",'int32'),
("list_of_users",'list')]}
return brand_deets
# Below was using list of tuples for storage, now going to convert to dictionary of np.arrays or lists. This could be more R or database style.
def get_brand_purchase_deets_old(shoppers,brands):
# below we are sorting by brand rather than by shoppers
brand_shoppers = zip(brands,shoppers)
brand_shoppers = sorted(brand_shoppers)
brand_sorted = [brands for brands, shoppers in brand_shoppers]
shoppers_sorted = [shoppers for brands, shoppers in brand_shoppers]
# Creating Brand Details
brand_deets = []
end_row = 0
total_transactions = len(brands)
while end_row < total_transactions:
num_transactions = 0
users_list = []
start_row = end_row
start_brand = brand_sorted[start_row]
while (end_row < total_transactions and brand_sorted[end_row] == start_brand):
num_transactions += 1
users_list.append(shoppers_sorted[end_row])
end_row += 1
users_list = Mergesort.mergesorts(users_list)
brand_deets.append((start_brand, num_transactions, users_list))
return brand_deets
if __name__== "__main__":
customers = [0,0,1,1,1,2,2,2,2]
purchases = [0,3,5,1,2,4,1,3,5]
print(get_brand_purchase_deets(customers, purchases))
| 40.615385 | 143 | 0.669192 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 690 | 0.217803 |
96709cd14fd89b69849ecd83f84c39ac23149ad2
| 4,623 |
py
|
Python
|
ATSAMD51P19A/libsrc/ATSAMD51P19A/MPU_.py
|
t-ikegami/WioTerminal-CircuitPython
|
efbdc2e13ad969fe009d88f7ec4b836ca61ae973
|
[
"MIT"
] | null | null | null |
ATSAMD51P19A/libsrc/ATSAMD51P19A/MPU_.py
|
t-ikegami/WioTerminal-CircuitPython
|
efbdc2e13ad969fe009d88f7ec4b836ca61ae973
|
[
"MIT"
] | 1 |
2022-01-19T00:16:02.000Z
|
2022-01-26T03:43:34.000Z
|
ATSAMD51P19A/libsrc/ATSAMD51P19A/MPU_.py
|
t-ikegami/WioTerminal-CircuitPython
|
efbdc2e13ad969fe009d88f7ec4b836ca61ae973
|
[
"MIT"
] | null | null | null |
import uctypes as ct
MPU_ = {
'TYPE' : ( 0x00, {
'reg' : 0x00 | ct.UINT32,
'SEPARATE' : 0x00 | ct.BFUINT32 | 0 << ct.BF_POS | 1 << ct.BF_LEN,
'DREGION' : 0x00 | ct.BFUINT32 | 8 << ct.BF_POS | 8 << ct.BF_LEN,
'IREGION' : 0x00 | ct.BFUINT32 | 16 << ct.BF_POS | 8 << ct.BF_LEN,
}),
'CTRL' : ( 0x04, {
'reg' : 0x00 | ct.UINT32,
'ENABLE' : 0x00 | ct.BFUINT32 | 0 << ct.BF_POS | 1 << ct.BF_LEN,
'HFNMIENA' : 0x00 | ct.BFUINT32 | 1 << ct.BF_POS | 1 << ct.BF_LEN,
'PRIVDEFENA' : 0x00 | ct.BFUINT32 | 2 << ct.BF_POS | 1 << ct.BF_LEN,
}),
'RNR' : ( 0x08, {
'reg' : 0x00 | ct.UINT32,
'REGION' : 0x00 | ct.BFUINT32 | 0 << ct.BF_POS | 8 << ct.BF_LEN,
}),
'RBAR' : ( 0x0C, {
'reg' : 0x00 | ct.UINT32,
'REGION' : 0x00 | ct.BFUINT32 | 0 << ct.BF_POS | 4 << ct.BF_LEN,
'VALID' : 0x00 | ct.BFUINT32 | 4 << ct.BF_POS | 1 << ct.BF_LEN,
'ADDR' : 0x00 | ct.BFUINT32 | 5 << ct.BF_POS | 27 << ct.BF_LEN,
}),
'RASR' : ( 0x10, {
'reg' : 0x00 | ct.UINT32,
'ENABLE' : 0x00 | ct.BFUINT32 | 0 << ct.BF_POS | 1 << ct.BF_LEN,
'SIZE' : 0x00 | ct.BFUINT32 | 1 << ct.BF_POS | 1 << ct.BF_LEN,
'SRD' : 0x00 | ct.BFUINT32 | 8 << ct.BF_POS | 8 << ct.BF_LEN,
'B' : 0x00 | ct.BFUINT32 | 16 << ct.BF_POS | 1 << ct.BF_LEN,
'C' : 0x00 | ct.BFUINT32 | 17 << ct.BF_POS | 1 << ct.BF_LEN,
'S' : 0x00 | ct.BFUINT32 | 18 << ct.BF_POS | 1 << ct.BF_LEN,
'TEX' : 0x00 | ct.BFUINT32 | 19 << ct.BF_POS | 3 << ct.BF_LEN,
'AP' : 0x00 | ct.BFUINT32 | 24 << ct.BF_POS | 3 << ct.BF_LEN,
'XN' : 0x00 | ct.BFUINT32 | 28 << ct.BF_POS | 1 << ct.BF_LEN,
}),
'RBAR_A1' : ( 0x14, {
'reg' : 0x00 | ct.UINT32,
'REGION' : 0x00 | ct.BFUINT32 | 0 << ct.BF_POS | 4 << ct.BF_LEN,
'VALID' : 0x00 | ct.BFUINT32 | 4 << ct.BF_POS | 1 << ct.BF_LEN,
'ADDR' : 0x00 | ct.BFUINT32 | 5 << ct.BF_POS | 27 << ct.BF_LEN,
}),
'RASR_A1' : ( 0x18, {
'reg' : 0x00 | ct.UINT32,
'ENABLE' : 0x00 | ct.BFUINT32 | 0 << ct.BF_POS | 1 << ct.BF_LEN,
'SIZE' : 0x00 | ct.BFUINT32 | 1 << ct.BF_POS | 1 << ct.BF_LEN,
'SRD' : 0x00 | ct.BFUINT32 | 8 << ct.BF_POS | 8 << ct.BF_LEN,
'B' : 0x00 | ct.BFUINT32 | 16 << ct.BF_POS | 1 << ct.BF_LEN,
'C' : 0x00 | ct.BFUINT32 | 17 << ct.BF_POS | 1 << ct.BF_LEN,
'S' : 0x00 | ct.BFUINT32 | 18 << ct.BF_POS | 1 << ct.BF_LEN,
'TEX' : 0x00 | ct.BFUINT32 | 19 << ct.BF_POS | 3 << ct.BF_LEN,
'AP' : 0x00 | ct.BFUINT32 | 24 << ct.BF_POS | 3 << ct.BF_LEN,
'XN' : 0x00 | ct.BFUINT32 | 28 << ct.BF_POS | 1 << ct.BF_LEN,
}),
'RBAR_A2' : ( 0x1C, {
'reg' : 0x00 | ct.UINT32,
'REGION' : 0x00 | ct.BFUINT32 | 0 << ct.BF_POS | 4 << ct.BF_LEN,
'VALID' : 0x00 | ct.BFUINT32 | 4 << ct.BF_POS | 1 << ct.BF_LEN,
'ADDR' : 0x00 | ct.BFUINT32 | 5 << ct.BF_POS | 27 << ct.BF_LEN,
}),
'RASR_A2' : ( 0x20, {
'reg' : 0x00 | ct.UINT32,
'ENABLE' : 0x00 | ct.BFUINT32 | 0 << ct.BF_POS | 1 << ct.BF_LEN,
'SIZE' : 0x00 | ct.BFUINT32 | 1 << ct.BF_POS | 1 << ct.BF_LEN,
'SRD' : 0x00 | ct.BFUINT32 | 8 << ct.BF_POS | 8 << ct.BF_LEN,
'B' : 0x00 | ct.BFUINT32 | 16 << ct.BF_POS | 1 << ct.BF_LEN,
'C' : 0x00 | ct.BFUINT32 | 17 << ct.BF_POS | 1 << ct.BF_LEN,
'S' : 0x00 | ct.BFUINT32 | 18 << ct.BF_POS | 1 << ct.BF_LEN,
'TEX' : 0x00 | ct.BFUINT32 | 19 << ct.BF_POS | 3 << ct.BF_LEN,
'AP' : 0x00 | ct.BFUINT32 | 24 << ct.BF_POS | 3 << ct.BF_LEN,
'XN' : 0x00 | ct.BFUINT32 | 28 << ct.BF_POS | 1 << ct.BF_LEN,
}),
'RBAR_A3' : ( 0x24, {
'reg' : 0x00 | ct.UINT32,
'REGION' : 0x00 | ct.BFUINT32 | 0 << ct.BF_POS | 4 << ct.BF_LEN,
'VALID' : 0x00 | ct.BFUINT32 | 4 << ct.BF_POS | 1 << ct.BF_LEN,
'ADDR' : 0x00 | ct.BFUINT32 | 5 << ct.BF_POS | 27 << ct.BF_LEN,
}),
'RASR_A3' : ( 0x28, {
'reg' : 0x00 | ct.UINT32,
'ENABLE' : 0x00 | ct.BFUINT32 | 0 << ct.BF_POS | 1 << ct.BF_LEN,
'SIZE' : 0x00 | ct.BFUINT32 | 1 << ct.BF_POS | 1 << ct.BF_LEN,
'SRD' : 0x00 | ct.BFUINT32 | 8 << ct.BF_POS | 8 << ct.BF_LEN,
'B' : 0x00 | ct.BFUINT32 | 16 << ct.BF_POS | 1 << ct.BF_LEN,
'C' : 0x00 | ct.BFUINT32 | 17 << ct.BF_POS | 1 << ct.BF_LEN,
'S' : 0x00 | ct.BFUINT32 | 18 << ct.BF_POS | 1 << ct.BF_LEN,
'TEX' : 0x00 | ct.BFUINT32 | 19 << ct.BF_POS | 3 << ct.BF_LEN,
'AP' : 0x00 | ct.BFUINT32 | 24 << ct.BF_POS | 3 << ct.BF_LEN,
'XN' : 0x00 | ct.BFUINT32 | 28 << ct.BF_POS | 1 << ct.BF_LEN,
}),
}
MPU = ct.struct(0xe000ed90, MPU_)
| 48.663158 | 76 | 0.499459 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 452 | 0.097772 |
96724dd749d0959e504802c78aa325cae8171f97
| 8,550 |
py
|
Python
|
lib/core/postprocess.py
|
Chris1nexus/tmp
|
a76b477d491688add434f1ef84bcc0e2dbedbef3
|
[
"BSD-3-Clause"
] | null | null | null |
lib/core/postprocess.py
|
Chris1nexus/tmp
|
a76b477d491688add434f1ef84bcc0e2dbedbef3
|
[
"BSD-3-Clause"
] | null | null | null |
lib/core/postprocess.py
|
Chris1nexus/tmp
|
a76b477d491688add434f1ef84bcc0e2dbedbef3
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
from lib.utils import is_parallel
import numpy as np
np.set_printoptions(threshold=np.inf)
import cv2
from sklearn.cluster import DBSCAN
def build_targets(cfg, predictions, targets, model, bdd=True):
'''
predictions
[16, 3, 32, 32, 85]
[16, 3, 16, 16, 85]
[16, 3, 8, 8, 85]
torch.tensor(predictions[i].shape)[[3, 2, 3, 2]]
[32,32,32,32]
[16,16,16,16]
[8,8,8,8]
targets[3,x,7]
t [index, class, x, y, w, h, head_index]
'''
# Build targets for compute_loss(), input targets(image,class,x,y,w,h)
if bdd:
if is_parallel(model):
det = model.module.det_out_bdd
else:
det = model.det_out_bdd
else:
if is_parallel(model):
det = model.module.det_out_bosch
else:
det = model.det_out_bosch
# print(type(model))
# det = model.model[model.detector_index]
# print(type(det))
na, nt = det.na, targets.shape[0] # number of anchors, targets
tcls, tbox, indices, anch = [], [], [], []
gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
g = 0.5 # bias
off = torch.tensor([[0, 0],
[1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
# [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
], device=targets.device).float() * g # offsets
for i in range(det.nl):
anchors = det.anchors[i] #[3,2]
gain[2:6] = torch.tensor(predictions[i].shape)[[3, 2, 3, 2]] # xyxy gain
# Match targets to anchors
t = targets * gain
if nt:
# Matches
r = t[:, :, 4:6] / anchors[:, None] # wh ratio
j = torch.max(r, 1. / r).max(2)[0] < cfg.TRAIN.ANCHOR_THRESHOLD # compare
# j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
t = t[j] # filter
# Offsets
gxy = t[:, 2:4] # grid xy
gxi = gain[[2, 3]] - gxy # inverse
j, k = ((gxy % 1. < g) & (gxy > 1.)).T
l, m = ((gxi % 1. < g) & (gxi > 1.)).T
j = torch.stack((torch.ones_like(j), j, k, l, m))
t = t.repeat((5, 1, 1))[j]
offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
else:
t = targets[0]
offsets = 0
# Define
b, c = t[:, :2].long().T # image, class
gxy = t[:, 2:4] # grid xy
gwh = t[:, 4:6] # grid wh
gij = (gxy - offsets).long()
gi, gj = gij.T # grid xy indices
# Append
a = t[:, 6].long() # anchor indices
indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
anch.append(anchors[a]) # anchors
tcls.append(c) # class
return tcls, tbox, indices, anch
def morphological_process(image, kernel_size=5, func_type=cv2.MORPH_CLOSE):
"""
morphological process to fill the hole in the binary segmentation result
:param image:
:param kernel_size:
:return:
"""
if len(image.shape) == 3:
raise ValueError('Binary segmentation result image should be a single channel image')
if image.dtype is not np.uint8:
image = np.array(image, np.uint8)
kernel = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=(kernel_size, kernel_size))
# close operation fille hole
closing = cv2.morphologyEx(image, func_type, kernel, iterations=1)
return closing
def connect_components_analysis(image):
"""
connect components analysis to remove the small components
:param image:
:return:
"""
if len(image.shape) == 3:
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
gray_image = image
# print(gray_image.dtype)
return cv2.connectedComponentsWithStats(gray_image, connectivity=8, ltype=cv2.CV_32S)
def if_y(samples_x):
for sample_x in samples_x:
if len(sample_x):
# if len(sample_x) != (sample_x[-1] - sample_x[0] + 1) or sample_x[-1] == sample_x[0]:
if sample_x[-1] == sample_x[0]:
return False
return True
def fitlane(mask, sel_labels, labels, stats):
H, W = mask.shape
for label_group in sel_labels:
states = [stats[k] for k in label_group]
x, y, w, h, _ = states[0]
# if len(label_group) > 1:
# print('in')
# for m in range(len(label_group)-1):
# labels[labels == label_group[m+1]] = label_group[0]
t = label_group[0]
# samples_y = np.linspace(y, H-1, 30)
# else:
samples_y = np.linspace(y, y+h-1, 30)
samples_x = [np.where(labels[int(sample_y)]==t)[0] for sample_y in samples_y]
if if_y(samples_x):
samples_x = [int(np.mean(sample_x)) if len(sample_x) else -1 for sample_x in samples_x]
samples_x = np.array(samples_x)
samples_y = np.array(samples_y)
samples_y = samples_y[samples_x != -1]
samples_x = samples_x[samples_x != -1]
func = np.polyfit(samples_y, samples_x, 2)
x_limits = np.polyval(func, H-1)
# if (y_max + h - 1) >= 720:
if x_limits < 0 or x_limits > W:
# if (y_max + h - 1) > 720:
# draw_y = np.linspace(y, 720-1, 720-y)
draw_y = np.linspace(y, y+h-1, h)
else:
# draw_y = np.linspace(y, y+h-1, y+h-y)
draw_y = np.linspace(y, H-1, H-y)
draw_x = np.polyval(func, draw_y)
# draw_y = draw_y[draw_x < W]
# draw_x = draw_x[draw_x < W]
draw_points = (np.asarray([draw_x, draw_y]).T).astype(np.int32)
cv2.polylines(mask, [draw_points], False, 1, thickness=15)
else:
# if ( + w - 1) >= 1280:
samples_x = np.linspace(x, W-1, 30)
# else:
# samples_x = np.linspace(x, x_max+w-1, 30)
samples_y = [np.where(labels[:, int(sample_x)]==t)[0] for sample_x in samples_x]
samples_y = [int(np.mean(sample_y)) if len(sample_y) else -1 for sample_y in samples_y]
samples_x = np.array(samples_x)
samples_y = np.array(samples_y)
samples_x = samples_x[samples_y != -1]
samples_y = samples_y[samples_y != -1]
try:
func = np.polyfit(samples_x, samples_y, 2)
except:
pass
# y_limits = np.polyval(func, 0)
# if y_limits > 720 or y_limits < 0:
# if (x + w - 1) >= 1280:
# draw_x = np.linspace(x, 1280-1, 1280-x)
# else:
y_limits = np.polyval(func, 0)
if y_limits >= H or y_limits < 0:
draw_x = np.linspace(x, x+w-1, w+x-x)
else:
y_limits = np.polyval(func, W-1)
if y_limits >= H or y_limits < 0:
draw_x = np.linspace(x, x+w-1, w+x-x)
# if x+w-1 < 640:
# draw_x = np.linspace(0, x+w-1, w+x-x)
else:
draw_x = np.linspace(x, W-1, W-x)
draw_y = np.polyval(func, draw_x)
draw_points = (np.asarray([draw_x, draw_y]).T).astype(np.int32)
cv2.polylines(mask, [draw_points], False, 1, thickness=15)
return mask
def connect_lane(image, shadow_height=0):
if len(image.shape) == 3:
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
gray_image = image
if shadow_height:
image[:shadow_height] = 0
mask = np.zeros((image.shape[0], image.shape[1]), np.uint8)
num_labels, labels, stats, centers = cv2.connectedComponentsWithStats(gray_image, connectivity=8, ltype=cv2.CV_32S)
# ratios = []
selected_label = []
for t in range(1, num_labels, 1):
_, _, _, _, area = stats[t]
if area > 400:
selected_label.append(t)
if len(selected_label) == 0:
return mask
else:
split_labels = [[label,] for label in selected_label]
mask_post = fitlane(mask, split_labels, labels, stats)
return mask_post
| 36.228814 | 119 | 0.535906 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,065 | 0.24152 |
96736097d0c8c249aee5be77f87a2ac3b77a5f45
| 44 |
py
|
Python
|
name.py
|
dachuanz/crash-course
|
f7068e3ea502c1859e01f81772eafb179e5d2536
|
[
"MIT"
] | null | null | null |
name.py
|
dachuanz/crash-course
|
f7068e3ea502c1859e01f81772eafb179e5d2536
|
[
"MIT"
] | null | null | null |
name.py
|
dachuanz/crash-course
|
f7068e3ea502c1859e01f81772eafb179e5d2536
|
[
"MIT"
] | null | null | null |
name = "ada lovelace"
print(name.title())
| 14.666667 | 22 | 0.659091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.318182 |
96740cc6a710cea9535394b9dcdca4bd9278e075
| 11,578 |
py
|
Python
|
buildbot/runCI.py
|
DenisBakhvalov/perf-ninja
|
d9d0a7ff3984e7cc3823bca3a3f106e7fbc00da0
|
[
"CC-BY-3.0"
] | 1 |
2021-08-06T08:54:55.000Z
|
2021-08-06T08:54:55.000Z
|
buildbot/runCI.py
|
DenisBakhvalov/perf-ninja
|
d9d0a7ff3984e7cc3823bca3a3f106e7fbc00da0
|
[
"CC-BY-3.0"
] | null | null | null |
buildbot/runCI.py
|
DenisBakhvalov/perf-ninja
|
d9d0a7ff3984e7cc3823bca3a3f106e7fbc00da0
|
[
"CC-BY-3.0"
] | null | null | null |
import sys
import subprocess
import os
import shutil
import argparse
import json
import re
from enum import Enum
from dataclasses import dataclass
import gbench
from gbench import util, report
from gbench.util import *
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class ScoreResult(Enum):
SKIPPED = READY = 0
BUILD_FAILED = 1
BENCH_FAILED = 2
PASSED = 3
@dataclass
class LabParams:
threshold: float = 10.0
result: ScoreResult = ScoreResult.SKIPPED
@dataclass
class LabPath:
category: str
name: str
parser = argparse.ArgumentParser(description='test results')
parser.add_argument("-workdir", type=str, help="working directory", default="")
parser.add_argument("-v", help="verbose", action="store_true", default=False)
args = parser.parse_args()
workdir = args.workdir
verbose = args.v
Labs = dict()
Labs["memory_bound"] = dict()
Labs["core_bound"] = dict()
Labs["bad_speculation"] = dict()
Labs["frontend_bound"] = dict()
Labs["data_driven"] = dict()
Labs["misc"] = dict()
Labs["memory_bound"]["data_packing"] = LabParams(threshold=15.0)
Labs["memory_bound"]["loop_interchange_1"] = LabParams(threshold=85.0)
Labs["memory_bound"]["loop_interchange_2"] = LabParams(threshold=75.0)
Labs["misc"]["warmup"] = LabParams(threshold=50.0)
Labs["core_bound"]["function_inlining_1"] = LabParams(threshold=35.0)
Labs["core_bound"]["compiler_intrinsics_1"] = LabParams(threshold=60.0)
Labs["core_bound"]["vectorization_1"] = LabParams(threshold=90.0)
def getLabCurrentStatus(labPath):
return Labs[labPath.category][labPath.name].result
def setLabCurrentStatus(labPath, status):
Labs[labPath.category][labPath.name].result = status
return True
def getLabThreshold(labPath):
return Labs[labPath.category][labPath.name].threshold
def getLabNameStr(labPath):
return labPath.category + ":" + labPath.name
def buildAndValidate(labBuildDir):
try:
subprocess.check_call("cmake -E make_directory " + labBuildDir, shell=True)
print("Prepare build directory - OK")
except:
print(bcolors.FAIL + "Prepare build directory - Failed" + bcolors.ENDC)
return False
os.chdir(labBuildDir)
try:
subprocess.check_call("cmake -DCMAKE_BUILD_TYPE=Release -DCI=ON " + os.path.join(labBuildDir, ".."), shell=True)
print("CMake - OK")
except:
print(bcolors.FAIL + "CMake - Failed" + bcolors.ENDC)
return False
try:
subprocess.check_call("cmake --build . --config Release --target clean", shell=True)
subprocess.check_call("cmake --build . --config Release --parallel 8", shell=True)
print("Build - OK")
except:
print(bcolors.FAIL + "Build - Failed" + bcolors.ENDC)
return False
try:
subprocess.check_call("cmake --build . --config Release --target validateLab", shell=True)
print("Validation - OK")
except:
print(bcolors.FAIL + "Validation - Failed" + bcolors.ENDC)
return False
return True
def buildLab(labDir, solutionOrBaseline):
os.chdir(labDir)
buildDir = os.path.join(labDir, "build_" + solutionOrBaseline)
print("Build and Validate the " + solutionOrBaseline)
if not buildAndValidate(buildDir):
return False
return True
def noChangesToTheBaseline(labDir):
solutionDir = os.path.join(labDir, "build_solution")
baselineDir = os.path.join(labDir, "build_baseline")
solutionExe = os.path.join(solutionDir, "lab" if sys.platform != 'win32' else os.path.join("Release", "lab.exe"))
baselineExe = os.path.join(baselineDir, "lab" if sys.platform != 'win32' else os.path.join("Release", "lab.exe"))
exit_code = subprocess.call(("cmp " if sys.platform != 'win32' else "fc /b >NUL ") + solutionExe + " " + baselineExe, shell=True)
return exit_code == 0
def checkoutBaseline(workdir):
os.chdir(workdir)
try:
# Branch 'main' is always the baseline
subprocess.check_call("git checkout main", shell=True)
print("Checkout baseline - OK")
except:
print(bcolors.FAIL + "Checkout baseline - Failed" + bcolors.ENDC)
return False
return True
def getSpeedUp(diff_report):
old = diff_report[0]['measurements'][0]['real_time']
new = diff_report[0]['measurements'][0]['real_time_other']
diff = old - new
speedup = (diff / old ) * 100
return speedup
def benchmarkSolutionOrBaseline(labBuildDir, solutionOrBaseline):
#os.chdir(labBuildDir)
try:
subprocess.check_call("cmake --build " + labBuildDir + " --config Release --target benchmarkLab", shell=True)
print("Benchmarking " + solutionOrBaseline + " - OK")
except:
print(bcolors.FAIL + "Benchmarking " + solutionOrBaseline + " - Failed" + bcolors.ENDC)
return False
return True
def benchmarkLab(labPath):
print("Benchmark solution against the baseline")
labDir = os.path.join(workdir, labPath.category, labPath.name)
solutionDir = os.path.join(labDir, "build_solution")
baselineDir = os.path.join(labDir, "build_baseline")
benchmarkSolutionOrBaseline(solutionDir, "solution")
benchmarkSolutionOrBaseline(baselineDir, "baseline")
outJsonSolution = gbench.util.load_benchmark_results(os.path.join(solutionDir, "result.json"))
outJsonBaseline = gbench.util.load_benchmark_results(os.path.join(baselineDir, "result.json"))
# Parse two report files and compare them
diff_report = gbench.report.get_difference_report(
outJsonBaseline, outJsonSolution, True)
output_lines = gbench.report.print_difference_report(
diff_report,
False, True, 0.05, True)
for ln in output_lines:
print(ln)
speedup = getSpeedUp(diff_report)
if abs(speedup) < 2.0:
print (bcolors.FAIL + "New version has performance similar to the baseline (<2% difference). Submission for the lab " + getLabNameStr(labPath) + " failed." + bcolors.ENDC)
return False
if speedup < 0:
print (bcolors.FAIL + "New version is slower. Submission for the lab " + getLabNameStr(labPath) + " failed." + bcolors.ENDC)
return False
if (speedup < getLabThreshold(labPath)):
print (bcolors.FAIL + "Submission for the lab " + getLabNameStr(labPath) + " failed. New version is not fast enough." + bcolors.ENDC)
print ("Measured speedup:", "{:.2f}".format(speedup), "%")
print ("Pass threshold:", "{:.2f}".format(getLabThreshold(labPath)), "%")
return False
print ("Measured speedup:", "{:.2f}".format(speedup), "%")
print (bcolors.OKGREEN + "Submission succeded" + bcolors.ENDC)
return True
def runActionForAllLabs(workdir, func):
for labCategory in os.listdir(workdir):
if labCategory in Labs:
categoryDir = os.path.join(workdir, labCategory)
for labName in os.listdir(categoryDir):
if labName in Labs[labCategory]:
labPath = LabPath(labCategory, labName)
if (getLabCurrentStatus(labPath) == ScoreResult.READY):
func(labPath)
def buildSolutionAction(labPath):
labWorkDir = os.path.join(workdir, labPath.category, labPath.name)
if not buildLab(labWorkDir, "solution"):
setLabCurrentStatus(labPath, ScoreResult.BUILD_FAILED)
def buildBaselineAction(labPath):
labWorkDir = os.path.join(workdir, labPath.category, labPath.name)
if not buildLab(labWorkDir, "baseline"):
setLabCurrentStatus(labPath, ScoreResult.BUILD_FAILED)
def benchmarkAction(labPath):
labWorkDir = os.path.join(workdir, labPath.category, labPath.name)
if noChangesToTheBaseline(labWorkDir):
setLabCurrentStatus(labPath, ScoreResult.SKIPPED)
elif not benchmarkLab(labPath):
setLabCurrentStatus(labPath, ScoreResult.BENCH_FAILED)
else:
setLabCurrentStatus(labPath, ScoreResult.PASSED)
def checkAllLabs(workdir):
runActionForAllLabs(workdir, buildSolutionAction)
if not checkoutBaseline(workdir):
return False
runActionForAllLabs(workdir, buildBaselineAction)
runActionForAllLabs(workdir, benchmarkAction)
return True
def changedMultipleLabs(lines):
percent1, path1 = lines[1].split(b'%')
GitShowLabPath1 = DirLabPathRegex.search(str(path1))
if (GitShowLabPath1):
for i in range(2, len(lines)):
if len(lines[i]) == 0:
continue
percent_i, path_i = lines[i].split(b'%')
GitShowLabPath_i = DirLabPathRegex.search(str(path_i))
if (GitShowLabPath_i):
if GitShowLabPath1.group(1) != GitShowLabPath_i.group(1) or GitShowLabPath1.group(2) != GitShowLabPath_i.group(2):
return True
return False
if not workdir:
print ("Error: working directory is not provided.")
sys.exit(1)
os.chdir(workdir)
checkAll = False
benchLabPath = 0
DirLabPathRegex = re.compile(r'labs/(.*)/(.*)/')
try:
outputGitLog = subprocess.check_output("git log -1 --oneline" , shell=True)
# If the commit message has '[CheckAll]' substring, benchmark everything
if b'[CheckAll]' in outputGitLog:
checkAll = True
print("Will benchmark all the labs")
# Otherwise, analyze the changes made in the last commit and identify which lab to benchmark
else:
outputGitShow = subprocess.check_output("git show -1 --dirstat --oneline" , shell=True)
lines = outputGitShow.split(b'\n')
# Expect at least 2 lines in the output
if (len(lines) < 2 or len(lines[1]) == 0):
print("Can't figure out which lab was changed in the last commit. Will benchmark all the labs.")
checkAll = True
elif changedMultipleLabs(lines):
print("Multiple labs changed. Will benchmark all the labs.")
checkAll = True
else:
# Skip the first line that has the commit hash and message
percent, path = lines[1].split(b'%')
GitShowLabPath = DirLabPathRegex.search(str(path))
if (GitShowLabPath):
benchLabPath = LabPath(GitShowLabPath.group(1), GitShowLabPath.group(2))
print("Will benchmark the lab: " + getLabNameStr(benchLabPath))
else:
print("Can't figure out which lab was changed in the last commit. Will benchmark all the labs.")
checkAll = True
except:
print("Error: can't fetch the last commit from git history")
sys.exit(1)
result = False
if checkAll:
if not checkAllLabs(workdir):
sys.exit(1)
print(bcolors.HEADER + "\nLab Assignments Summary:" + bcolors.ENDC)
allSkipped = True
for category in Labs:
print(bcolors.HEADER + " " + category + ":" + bcolors.ENDC)
for lab in Labs[category]:
if ScoreResult.SKIPPED == Labs[category][lab].result:
print(bcolors.OKCYAN + " " + lab + ": Skipped" + bcolors.ENDC)
else:
allSkipped = False
if ScoreResult.PASSED == Labs[category][lab].result:
print(bcolors.OKGREEN + " " + lab + ": Passed" + bcolors.ENDC)
# Return true if at least one lab succeeded
result = True
if ScoreResult.BENCH_FAILED == Labs[category][lab].result:
print(bcolors.FAIL + " " + lab + ": Failed: not fast enough" + bcolors.ENDC)
if ScoreResult.BUILD_FAILED == Labs[category][lab].result:
print(bcolors.FAIL + " " + lab + ": Failed: build error" + bcolors.ENDC)
if allSkipped:
result = True
else:
labdir = os.path.join(workdir, benchLabPath.category, benchLabPath.name)
if not buildLab(labdir, "solution"):
sys.exit(1)
if not checkoutBaseline(workdir):
sys.exit(1)
if not buildLab(labdir, "baseline"):
sys.exit(1)
if noChangesToTheBaseline(labdir):
print(bcolors.OKCYAN + "The solution and the baseline are identical. Skipped." + bcolors.ENDC)
result = True
else:
result = benchmarkLab(benchLabPath)
if not result:
sys.exit(1)
else:
sys.exit(0)
| 34.458333 | 175 | 0.70228 | 467 | 0.040335 | 0 | 0 | 158 | 0.013647 | 0 | 0 | 2,786 | 0.240629 |
967557de1befe9d5c89674990959f86af65d7c4c
| 1,156 |
py
|
Python
|
main.py
|
TheSkidSlayer/VissageMassBanner
|
38f0a83ad9d625930cef5004787f8c4966312fd0
|
[
"BSL-1.0"
] | 1 |
2021-12-31T23:15:47.000Z
|
2021-12-31T23:15:47.000Z
|
main.py
|
TheSkidSlayer/VissageMassBanner
|
38f0a83ad9d625930cef5004787f8c4966312fd0
|
[
"BSL-1.0"
] | null | null | null |
main.py
|
TheSkidSlayer/VissageMassBanner
|
38f0a83ad9d625930cef5004787f8c4966312fd0
|
[
"BSL-1.0"
] | null | null | null |
try:
from concurrent.futures import ThreadPoolExecutor
import random, time, os, httpx
from colorama import Fore, Style
except ImportError:
print("Error [!] -> Modules Are not installed")
token, guild = input("Token -> "), input("\nGuild ID -> ")
threads = []
apiv = [6, 7, 8, 9]
codes = [200, 201, 204]
def worker(user: str):
try:
response = httpx.put(
"https://discord.com/api/v{}/guilds/{}/bans/{}".format(
random.choice(apiv), guild, user
),
headers={"Authorization": f"Bot {token}"},
)
if response.status_code in codes:
print(
f"{Fore.CYAN}{Style.BRIGHT} Succesfully Punished User --> {Fore.RESET}"
+ user
)
else:
return worker(user)
except (Exception):
return worker(user)
def theadpool():
with ThreadPoolExecutor() as executor:
time.sleep(0.015)
with open("members.txt") as f:
Ids = f.readlines()
for user in Ids:
threads.append(executor.submit(worker, user))
if __name__ == "__main__":
theadpool()
| 24.083333 | 87 | 0.553633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 237 | 0.205017 |
9676900dd098082bdefdd8316547347e26bd4ef9
| 376 |
py
|
Python
|
panos/example_with_output_template/loader.py
|
nembery/Skillets
|
4c0a259d4fb49550605c5eb5316d83f109612271
|
[
"Apache-2.0"
] | 1 |
2019-04-17T19:30:46.000Z
|
2019-04-17T19:30:46.000Z
|
panos/example_with_output_template/loader.py
|
nembery/Skillets
|
4c0a259d4fb49550605c5eb5316d83f109612271
|
[
"Apache-2.0"
] | null | null | null |
panos/example_with_output_template/loader.py
|
nembery/Skillets
|
4c0a259d4fb49550605c5eb5316d83f109612271
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
from skilletlib import SkilletLoader
sl = SkilletLoader('.')
skillet = sl.get_skillet_with_name('panos_cli_example')
context = dict()
context['cli_command'] = 'show system info'
context['username'] = 'admin'
context['password'] = 'NOPE'
context['ip_address'] = 'NOPE'
output = skillet.execute(context)
print(output.get('output_template', 'n/a'))
| 19.789474 | 55 | 0.723404 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 148 | 0.393617 |
967705c9e8a9fd17fb6a029cb268db7aef64d726
| 198 |
py
|
Python
|
treasurehunt/views.py
|
code-haven/Django-treasurehunt-demo
|
c22aa88486d57fa97363d9d57dbbb7bc68a8ddd4
|
[
"MIT"
] | 1 |
2017-04-30T05:46:40.000Z
|
2017-04-30T05:46:40.000Z
|
treasurehunt/views.py
|
code-haven/Django-treasurehunt-demo
|
c22aa88486d57fa97363d9d57dbbb7bc68a8ddd4
|
[
"MIT"
] | null | null | null |
treasurehunt/views.py
|
code-haven/Django-treasurehunt-demo
|
c22aa88486d57fa97363d9d57dbbb7bc68a8ddd4
|
[
"MIT"
] | null | null | null |
from django.views.generic import View
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return render(request, 'treasurehunt/treasurehunt_index.html')
| 33 | 66 | 0.823232 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.191919 |
9677e8577eb71d6a56fc8178b8340df0cf85efc4
| 2,184 |
py
|
Python
|
setup.py
|
pletnes/cloud-pysec
|
4f91e3875ee36cb3e9b361e8b598070ce9523128
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
pletnes/cloud-pysec
|
4f91e3875ee36cb3e9b361e8b598070ce9523128
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
pletnes/cloud-pysec
|
4f91e3875ee36cb3e9b361e8b598070ce9523128
|
[
"Apache-2.0"
] | null | null | null |
""" xssec setup """
import codecs
from os import path
from setuptools import setup, find_packages
from sap.conf.config import USE_SAP_PY_JWT
CURRENT_DIR = path.abspath(path.dirname(__file__))
README_LOCATION = path.join(CURRENT_DIR, 'README.md')
VERSION = ''
with open(path.join(CURRENT_DIR, 'version.txt'), 'r') as version_file:
VERSION = version_file.read()
with codecs.open(README_LOCATION, 'r', 'utf-8') as readme_file:
LONG_DESCRIPTION = readme_file.read()
sap_py_jwt_dep = ''
if USE_SAP_PY_JWT:
sap_py_jwt_dep = 'sap_py_jwt>=1.1.1'
else:
sap_py_jwt_dep = 'cryptography'
setup(
name='sap_xssec',
url='https://github.com/SAP/cloud-pysec',
version=VERSION.strip(),
author='SAP SE',
description=('SAP Python Security Library'),
packages=find_packages(include=['sap*']),
data_files=[('.', ['version.txt', 'CHANGELOG.md'])],
test_suite='tests',
install_requires=[
'deprecation>=2.1.0',
'requests>=2.21.0',
'six>=1.11.0',
'pyjwt>=1.7.0',
'{}'.format(sap_py_jwt_dep)
],
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
classifiers=[
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 5 - Production/Stable",
"Topic :: Security",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: POSIX :: BSD",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
)
| 34.125 | 70 | 0.628663 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,102 | 0.504579 |
96785881acee4c6b6e5fbf6dfa6bcd4a371b2db4
| 1,957 |
py
|
Python
|
mutators/implementations/mutation_change_proto.py
|
freingruber/JavaScript-Raider
|
d1c1fff2fcfc60f210b93dbe063216fa1a83c1d0
|
[
"Apache-2.0"
] | 91 |
2022-01-24T07:32:34.000Z
|
2022-03-31T23:37:15.000Z
|
mutators/implementations/mutation_change_proto.py
|
zeusguy/JavaScript-Raider
|
d1c1fff2fcfc60f210b93dbe063216fa1a83c1d0
|
[
"Apache-2.0"
] | null | null | null |
mutators/implementations/mutation_change_proto.py
|
zeusguy/JavaScript-Raider
|
d1c1fff2fcfc60f210b93dbe063216fa1a83c1d0
|
[
"Apache-2.0"
] | 11 |
2022-01-24T14:21:12.000Z
|
2022-03-31T23:37:23.000Z
|
# Copyright 2022 @ReneFreingruber
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import utils
import tagging_engine.tagging as tagging
from tagging_engine.tagging import Tag
import mutators.testcase_mutators_helpers as testcase_mutators_helpers
def mutation_change_proto(content, state):
# utils.dbg_msg("Mutation operation: Change proto")
tagging.add_tag(Tag.MUTATION_CHANGE_PROTO1)
# TODO
# Currently I don't return in "lhs" or "rhs" the __proto__ of a function
# So code like this:
# Math.abs.__proto__ = Math.sign.__proto__
# Can currently not be created. Is this required?
# => has such code an effect?
random_line_number = testcase_mutators_helpers.get_random_line_number_to_insert_code(state)
(start_line_with, end_line_with) = testcase_mutators_helpers.get_start_and_end_line_symbols(state, random_line_number, content)
(lhs, code_possibilities) = testcase_mutators_helpers.get_proto_change_lhs(state, random_line_number)
rhs = testcase_mutators_helpers.get_proto_change_rhs(state, random_line_number, code_possibilities)
new_code_line = "%s%s.__proto__ = %s%s" % (start_line_with, lhs, rhs, end_line_with)
# Now just insert the new line to the testcase & state
lines = content.split("\n")
lines.insert(random_line_number, new_code_line)
new_content = "\n".join(lines)
state.state_insert_line(random_line_number, new_content, new_code_line)
return new_content, state
| 40.770833 | 131 | 0.772611 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 918 | 0.469085 |
9678930e9778ab9e49deebe9a98a3436e67de53f
| 1,809 |
py
|
Python
|
homeassistant/components/smarthab/light.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 22,481 |
2020-03-02T13:09:59.000Z
|
2022-03-31T23:34:28.000Z
|
homeassistant/components/smarthab/light.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 31,101 |
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
homeassistant/components/smarthab/light.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 11,411 |
2020-03-02T14:19:20.000Z
|
2022-03-31T22:46:07.000Z
|
"""Support for SmartHab device integration."""
from datetime import timedelta
import logging
import pysmarthab
from requests.exceptions import Timeout
from homeassistant.components.light import LightEntity
from . import DATA_HUB, DOMAIN
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=60)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up SmartHab lights from a config entry."""
hub = hass.data[DOMAIN][config_entry.entry_id][DATA_HUB]
entities = (
SmartHabLight(light)
for light in await hub.async_get_device_list()
if isinstance(light, pysmarthab.Light)
)
async_add_entities(entities, True)
class SmartHabLight(LightEntity):
"""Representation of a SmartHab Light."""
def __init__(self, light):
"""Initialize a SmartHabLight."""
self._light = light
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._light.device_id
@property
def name(self) -> str:
"""Return the display name of this light."""
return self._light.label
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return self._light.state
async def async_turn_on(self, **kwargs):
"""Instruct the light to turn on."""
await self._light.async_turn_on()
async def async_turn_off(self, **kwargs):
"""Instruct the light to turn off."""
await self._light.async_turn_off()
async def async_update(self):
"""Fetch new state data for this light."""
try:
await self._light.async_update()
except Timeout:
_LOGGER.error(
"Reached timeout while updating light %s from API", self.entity_id
)
| 26.602941 | 82 | 0.6534 | 1,107 | 0.61194 | 0 | 0 | 347 | 0.191819 | 921 | 0.509121 | 436 | 0.241017 |
9679546d86fe3d9ab266b6fcd96932146df7b271
| 406 |
py
|
Python
|
hello.py
|
AaronTrip/cgi-lab
|
cc932dfe21c27f3ca054233fe5bc73783facee6b
|
[
"Apache-2.0"
] | null | null | null |
hello.py
|
AaronTrip/cgi-lab
|
cc932dfe21c27f3ca054233fe5bc73783facee6b
|
[
"Apache-2.0"
] | null | null | null |
hello.py
|
AaronTrip/cgi-lab
|
cc932dfe21c27f3ca054233fe5bc73783facee6b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import os, json
print("Content-type: text/html\r\n\r\n")
print()
print("<Title>Test CGI</title>")
print("<p>Hello World cmput404 class!<p/>")
print(os.environ)
json_object = json.dumps(dict(os.environ), indent = 4)
print(json_object)
'''for param in os.environ.keys():
if(param == "HTTP_USER_AGENT"):
print("<b>%20s<b/>: %s<br>" % (param, os.environ[param]))
'''
| 18.454545 | 65 | 0.640394 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 256 | 0.630542 |
96797b706de9c91fb5c25b82dfacdb6b113734eb
| 14,277 |
py
|
Python
|
VCD/utils/utils.py
|
Xingyu-Lin/VCD
|
46ca993f79b23e5c73f5a7eb72b39dfacf3b282c
|
[
"MIT"
] | 25 |
2022-01-28T02:13:42.000Z
|
2022-03-19T15:33:38.000Z
|
VCD/utils/utils.py
|
Xingyu-Lin/VCD
|
46ca993f79b23e5c73f5a7eb72b39dfacf3b282c
|
[
"MIT"
] | 2 |
2022-01-28T05:58:32.000Z
|
2022-01-30T11:37:50.000Z
|
VCD/utils/utils.py
|
Xingyu-Lin/VCD
|
46ca993f79b23e5c73f5a7eb72b39dfacf3b282c
|
[
"MIT"
] | 4 |
2022-01-31T08:22:49.000Z
|
2022-02-17T16:28:32.000Z
|
import os.path as osp
import numpy as np
import cv2
import torch
from torchvision.utils import make_grid
from VCD.utils.camera_utils import project_to_image
import pyflex
import re
import h5py
import os
from softgym.utils.visualization import save_numpy_as_gif
from chester import logger
import random
class VArgs(object):
def __init__(self, vv):
for key, val in vv.items():
setattr(self, key, val)
def vv_to_args(vv):
args = VArgs(vv)
return args
# Function to extract all the numbers from the given string
def extract_numbers(str):
array = re.findall(r'[0-9]+', str)
if len(array) == 0:
return [0]
return array
################## Pointcloud Processing #################
import pcl
# def get_partial_particle(full_particle, observable_idx):
# return np.array(full_particle[observable_idx], dtype=np.float32)
def voxelize_pointcloud(pointcloud, voxel_size):
cloud = pcl.PointCloud(pointcloud)
sor = cloud.make_voxel_grid_filter()
sor.set_leaf_size(voxel_size, voxel_size, voxel_size)
pointcloud = sor.filter()
pointcloud = np.asarray(pointcloud).astype(np.float32)
return pointcloud
from softgym.utils.misc import vectorized_range, vectorized_meshgrid
def pc_reward_model(pos, cloth_particle_radius=0.00625, downsample_scale=3):
cloth_particle_radius *= downsample_scale
pos = np.reshape(pos, [-1, 3])
min_x = np.min(pos[:, 0])
min_y = np.min(pos[:, 2])
max_x = np.max(pos[:, 0])
max_y = np.max(pos[:, 2])
init = np.array([min_x, min_y])
span = np.array([max_x - min_x, max_y - min_y]) / 100.
pos2d = pos[:, [0, 2]]
offset = pos2d - init
slotted_x_low = np.maximum(np.round((offset[:, 0] - cloth_particle_radius) / span[0]).astype(int), 0)
slotted_x_high = np.minimum(np.round((offset[:, 0] + cloth_particle_radius) / span[0]).astype(int), 100)
slotted_y_low = np.maximum(np.round((offset[:, 1] - cloth_particle_radius) / span[1]).astype(int), 0)
slotted_y_high = np.minimum(np.round((offset[:, 1] + cloth_particle_radius) / span[1]).astype(int), 100)
grid = np.zeros(10000) # Discretization
listx = vectorized_range(slotted_x_low, slotted_x_high)
listy = vectorized_range(slotted_y_low, slotted_y_high)
listxx, listyy = vectorized_meshgrid(listx, listy)
idx = listxx * 100 + listyy
idx = np.clip(idx.flatten(), 0, 9999)
grid[idx] = 1
res = np.sum(grid) * span[0] * span[1]
return res
################## IO #################################
def downsample(cloth_xdim, cloth_ydim, scale):
cloth_xdim, cloth_ydim = int(cloth_xdim), int(cloth_ydim)
new_idx = np.arange(cloth_xdim * cloth_ydim).reshape((cloth_ydim, cloth_xdim))
new_idx = new_idx[::scale, ::scale]
cloth_ydim, cloth_xdim = new_idx.shape
new_idx = new_idx.flatten()
return new_idx, cloth_xdim, cloth_ydim
def load_h5_data(data_names, path):
hf = h5py.File(path, 'r')
data = {}
for name in data_names:
d = np.array(hf.get(name))
data[name] = d
hf.close()
return data
def store_h5_data(data_names, data, path):
hf = h5py.File(path, 'w')
for name in data_names:
hf.create_dataset(name, data=data[name])
hf.close()
def load_data(data_dir, idx_rollout, idx_timestep, data_names):
data_path = os.path.join(data_dir, str(idx_rollout), str(idx_timestep) + '.h5')
return load_h5_data(data_names, data_path)
def load_data_list(data_dir, idx_rollout, idx_timestep, data_names):
data_path = os.path.join(data_dir, str(idx_rollout), str(idx_timestep) + '.h5')
d = load_h5_data(data_names, data_path)
return [d[name] for name in data_names]
def store_data():
raise NotImplementedError
def transform_info(all_infos):
""" Input: All info is a nested list with the index of [episode][time]{info_key:info_value}
Output: transformed_infos is a dictionary with the index of [info_key][episode][time]
"""
if len(all_infos) == 0:
return []
transformed_info = {}
num_episode = len(all_infos)
T = len(all_infos[0])
for info_name in all_infos[0][0].keys():
infos = np.zeros([num_episode, T], dtype=np.float32)
for i in range(num_episode):
infos[i, :] = np.array([info[info_name] for info in all_infos[i]])
transformed_info[info_name] = infos
return transformed_info
def draw_grid(list_of_imgs, nrow, padding=10, pad_value=200):
img_list = torch.from_numpy(np.array(list_of_imgs).transpose(0, 3, 1, 2))
img = make_grid(img_list, nrow=nrow, padding=padding, pad_value=pad_value)
# print(img.shape)
img = img.numpy().transpose(1, 2, 0)
return img
def inrange(x, low, high):
if x >= low and x < high:
return True
else:
return False
################## Visualization ######################
def draw_edge(frame, predicted_edges, matrix_world_to_camera, pointcloud, camera_height, camera_width):
u, v = project_to_image(matrix_world_to_camera, pointcloud, camera_height, camera_width)
for edge_idx in range(predicted_edges.shape[1]):
s = predicted_edges[0][edge_idx]
r = predicted_edges[1][edge_idx]
start = (u[s], v[s])
end = (u[r], v[r])
color = (255, 0, 0)
thickness = 1
image = cv2.line(frame, start, end, color, thickness)
return image
def cem_make_gif(all_frames, save_dir, save_name):
# Convert to T x index x C x H x W for pytorch
all_frames = np.array(all_frames).transpose([1, 0, 4, 2, 3])
grid_imgs = [make_grid(torch.from_numpy(frame), nrow=5).permute(1, 2, 0).data.cpu().numpy() for frame in all_frames]
save_numpy_as_gif(np.array(grid_imgs), osp.join(save_dir, save_name))
def draw_policy_action(obs_before, obs_after, start_loc_1, end_loc_1, matrix_world_to_camera, start_loc_2=None, end_loc_2=None):
height, width, _ = obs_before.shape
if start_loc_2 is not None:
l = [(start_loc_1, end_loc_1), (start_loc_2, end_loc_2)]
else:
l = [(start_loc_1, end_loc_1)]
for (start_loc, end_loc) in l:
# print(start_loc, end_loc)
suv = project_to_image(matrix_world_to_camera, start_loc.reshape((1, 3)), height, width)
su, sv = suv[0][0], suv[1][0]
euv = project_to_image(matrix_world_to_camera, end_loc.reshape((1, 3)), height, width)
eu, ev = euv[0][0], euv[1][0]
if inrange(su, 0, width) and inrange(sv, 0, height) and inrange(eu, 0, width) and inrange(ev, 0, height):
cv2.arrowedLine(obs_before, (su, sv), (eu, ev), (255, 0, 0), 3)
obs_before[sv - 5:sv + 5, su - 5:su + 5, :] = (0, 0, 0)
res = np.concatenate((obs_before, obs_after), axis=1)
return res
def draw_planned_actions(save_idx, obses, start_poses, end_poses, matrix_world_to_camera, log_dir):
height = width = obses[0].shape[0]
start_uv = []
end_uv = []
for sp in start_poses:
suv = project_to_image(matrix_world_to_camera, sp.reshape((1, 3)), height, width)
start_uv.append((suv[0][0], suv[1][0]))
for ep in end_poses:
euv = project_to_image(matrix_world_to_camera, ep.reshape((1, 3)), height, width)
end_uv.append((euv[0][0], euv[1][0]))
res = []
for idx in range(len(obses) - 1):
obs = obses[idx]
su, sv = start_uv[idx]
eu, ev = end_uv[idx]
if inrange(su, 0, width) and inrange(sv, 0, height) and inrange(eu, 0, width) and inrange(ev, 0, height):
cv2.arrowedLine(obs, (su, sv), (eu, ev), (255, 0, 0), 3)
obs[sv - 5:sv + 5, su - 5:su + 5, :] = (0, 0, 0)
res.append(obs)
res.append(obses[-1])
res = np.concatenate(res, axis=1)
cv2.imwrite(osp.join(log_dir, '{}_planned.png'.format(save_idx)), res[:, :, ::-1])
def draw_cem_elites(obs_, start_poses, end_poses, mean_start_pos, mean_end_pos,
matrix_world_to_camera, log_dir, save_idx=None):
obs = obs_.copy()
start_uv = []
end_uv = []
height = width = obs.shape[0]
for sp in start_poses:
suv = project_to_image(matrix_world_to_camera, sp.reshape((1, 3)), height, width)
start_uv.append((suv[0][0], suv[1][0]))
for ep in end_poses:
euv = project_to_image(matrix_world_to_camera, ep.reshape((1, 3)), height, width)
end_uv.append((euv[0][0], euv[1][0]))
for idx in range(len(start_poses)):
su, sv = start_uv[idx]
eu, ev = end_uv[idx]
# poses at the front have higher reward
if inrange(su, 0, 255) and inrange(sv, 0, 255) and inrange(eu, 0, 255) and inrange(ev, 0, 255):
cv2.arrowedLine(obs, (su, sv), (eu, ev), (255 * (1 - idx / len(start_poses)), 0, 0), 2)
obs[sv - 2:sv + 2, su - 2:su + 2, :] = (0, 0, 0)
mean_s_uv = project_to_image(matrix_world_to_camera, mean_start_pos.reshape((1, 3)), height, width)
mean_e_uv = project_to_image(matrix_world_to_camera, mean_end_pos.reshape((1, 3)), height, width)
mean_su, mean_sv = mean_s_uv[0][0], mean_s_uv[1][0]
mean_eu, mean_ev = mean_e_uv[0][0], mean_e_uv[1][0]
if inrange(mean_su, 0, 255) and inrange(mean_sv, 0, 255) and \
inrange(mean_eu, 0, 255) and inrange(mean_ev, 0, 255):
cv2.arrowedLine(obs, (mean_su, mean_sv), (mean_eu, mean_ev), (0, 0, 255), 3)
obs[mean_su - 5:mean_sv + 5, mean_eu - 5:mean_ev + 5, :] = (0, 0, 0)
if save_idx is not None:
cv2.imwrite(osp.join(log_dir, '{}_elite.png'.format(save_idx)), obs)
return obs
def set_shape_pos(pos):
shape_states = np.array(pyflex.get_shape_states()).reshape(-1, 14)
shape_states[:, 3:6] = pos.reshape(-1, 3)
shape_states[:, :3] = pos.reshape(-1, 3)
pyflex.set_shape_states(shape_states)
def visualize(env, particle_positions, shape_positions, config_id, sample_idx=None, picked_particles=None, show=False):
""" Render point cloud trajectory without running the simulation dynamics"""
env.reset(config_id=config_id)
frames = []
for i in range(len(particle_positions)):
particle_pos = particle_positions[i]
shape_pos = shape_positions[i]
p = pyflex.get_positions().reshape(-1, 4)
p[:, :3] = [0., -0.1, 0.] # All particles moved underground
if sample_idx is None:
p[:len(particle_pos), :3] = particle_pos
else:
p[:, :3] = [0, -0.1, 0]
p[sample_idx, :3] = particle_pos
pyflex.set_positions(p)
set_shape_pos(shape_pos)
rgb = env.get_image(env.camera_width, env.camera_height)
frames.append(rgb)
if show:
if i == 0: continue
picked_point = picked_particles[i]
phases = np.zeros(pyflex.get_n_particles())
for id in picked_point:
if id != -1:
phases[sample_idx[int(id)]] = 1
pyflex.set_phases(phases)
img = env.get_image()
cv2.imshow('picked particle images', img[:, :, ::-1])
cv2.waitKey()
return frames
def add_occluded_particles(observable_positions, observable_vel_history, particle_radius=0.00625, neighbor_distance=0.0216):
occluded_idx = np.where(observable_positions[:, 1] > neighbor_distance / 2 + particle_radius)
occluded_positions = []
for o_idx in occluded_idx[0]:
pos = observable_positions[o_idx]
occlude_num = np.floor(pos[1] / neighbor_distance).astype('int')
for i in range(occlude_num):
occluded_positions.append([pos[0], particle_radius + i * neighbor_distance, pos[2]])
print("add occluded particles num: ", len(occluded_positions))
occluded_positions = np.asarray(occluded_positions, dtype=np.float32).reshape((-1, 3))
occluded_velocity_his = np.zeros((len(occluded_positions), observable_vel_history.shape[1]), dtype=np.float32)
all_positions = np.concatenate([observable_positions, occluded_positions], axis=0)
all_vel_his = np.concatenate([observable_vel_history, occluded_velocity_his], axis=0)
return all_positions, all_vel_his
def sort_pointcloud_for_fold(pointcloud, dim):
pointcloud = list(pointcloud)
sorted_pointcloud = sorted(pointcloud, key=lambda k: (k[0], k[2]))
for idx in range(len(sorted_pointcloud) - 1):
assert sorted_pointcloud[idx][0] < sorted_pointcloud[idx + 1][0] or (
sorted_pointcloud[idx][0] == sorted_pointcloud[idx + 1][0] and
sorted_pointcloud[idx][2] < sorted_pointcloud[idx + 1][2]
)
real_sorted = []
for i in range(dim):
points_row = sorted_pointcloud[i * dim: (i + 1) * dim]
points_row = sorted(points_row, key=lambda k: k[2])
real_sorted += points_row
sorted_pointcloud = real_sorted
return np.asarray(sorted_pointcloud)
def get_fold_idx(dim=4):
group_a = []
for i in range(dim - 1):
for j in range(dim - i - 1):
group_a.append(i * dim + j)
group_b = []
for j in range(dim - 1, 0, -1):
for i in range(dim - 1, dim - 1 - j, -1):
group_b.append(i * dim + j)
return group_a, group_b
############################ Other ########################
def updateDictByAdd(dict1, dict2):
'''
update dict1 by dict2
'''
for k1, v1 in dict2.items():
for k2, v2 in v1.items():
dict1[k1][k2] += v2.cpu().item()
return dict1
def configure_logger(log_dir, exp_name):
# Configure logger
logger.configure(dir=log_dir, exp_name=exp_name)
logdir = logger.get_dir()
assert logdir is not None
os.makedirs(logdir, exist_ok=True)
def configure_seed(seed):
# Configure seed
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
############### for planning ###############################
def set_picker_pos(pos):
shape_states = pyflex.get_shape_states().reshape((-1, 14))
shape_states[1, :3] = -1
shape_states[1, 3:6] = -1
shape_states[0, :3] = pos
shape_states[0, 3:6] = pos
pyflex.set_shape_states(shape_states)
pyflex.step()
def set_resource():
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
| 35.426799 | 128 | 0.6397 | 120 | 0.008405 | 0 | 0 | 0 | 0 | 0 | 0 | 1,107 | 0.077537 |
967f854c2cc3d7839a4210800ff6ac34aa126d0b
| 3,493 |
py
|
Python
|
tests/test_classes.py
|
fossabot/RPGenie
|
eb3ee17ede0dbbec787766d607b2f5b89d65533d
|
[
"MIT"
] | 32 |
2017-09-03T21:14:17.000Z
|
2022-01-12T04:26:28.000Z
|
tests/test_classes.py
|
fossabot/RPGenie
|
eb3ee17ede0dbbec787766d607b2f5b89d65533d
|
[
"MIT"
] | 9 |
2017-09-12T13:16:43.000Z
|
2022-01-19T18:53:48.000Z
|
tests/test_classes.py
|
fossabot/RPGenie
|
eb3ee17ede0dbbec787766d607b2f5b89d65533d
|
[
"MIT"
] | 19 |
2017-10-12T03:14:54.000Z
|
2021-06-12T18:30:33.000Z
|
#! python3
""" Pytest-compatible tests for src/classes.py """
import sys
from pathlib import Path
from copy import deepcopy
from unittest import mock
# A workaround for tests not automatically setting
# root/src/ as the current working directory
path_to_src = Path(__file__).parent.parent / "src"
sys.path.insert(0, str(path_to_src))
from classes import Item, Inventory, Player, Character
from settings import *
def initialiser(testcase):
""" Initialises all test cases with data """
def inner(*args, **kwargs):
items = [Item(i) for i in range(kwargs.get("itemcount", 3))]
inv = Inventory(items=deepcopy(items), **kwargs)
return testcase(items, inv, *args, **kwargs)
return inner
@initialiser
def test_testData(items, inv, *args, **kwargs):
""" Assert the test data itself is valid """
assert items == inv.items
@initialiser
def test_inv_append(items, inv, *args, **kwargs):
""" Test for inventory append functionality """
itemcount = len(items)
for i in range(inv.max_capacity - itemcount):
assert inv.append(Item(2)) == f"{Item(2).name} added to inventory"
assert inv.append(Item(1)) == "No room in inventory"
assert len(inv) == inv.max_capacity
#Separate tests for stackable items
assert inv.append(Item(0)) == f"2 {Item(0).name} in container"
assert inv.items[inv.items.index(Item(0))]._count == 2
@initialiser
def test_inv_remove(items, inv, *args, **kwargs):
""" Test for inventory item removal """
inv.items[inv.items.index(Item(0))]._count += 2
# Non-stackable items
assert inv.remove(Item(1)) == f"{Item(1).name} was successfully removed"
assert inv.items.count(Item(1)) == 0
# Stackable items
assert inv.remove(Item(0)) == f"1/{inv.items[inv.items.index(Item(0))]._count+1} {Item(0).name} removed"
assert inv.items.count(Item(0)) == 1
assert inv.remove(Item(0), count=3) == "You don't have that many"
assert inv.remove(Item(0), count=2) == f"{Item(0).name} was successfully removed"
assert inv.items.count(Item(0)) == 0
@initialiser
def test_inv_equip_unequip(items, inv, *args, **kwargs):
""" Test for inventory item equip/unequip functionality """
# Equipping items
assert inv.equip(Item(1)) == f"You equip {Item(1).name}"
assert inv.equip(Item(2)) == "You can't equip that"
# Unequipping items
assert inv.unequip('weapon') == f"You unequip {Item(1).name}"
assert inv.unequip('off-hand') == "That slot is empty"
assert inv.gear['head'] is None
assert inv.gear['weapon'] is None
@initialiser
def test_inv_combine(items, inv, *args, **kwargs):
""" Test for item combining functionality """
assert inv.better_combine_item(inv.items[1], 0, inv.items[2]) == "Combination successful"
assert len(inv) == 2
assert inv.better_combine_item(inv.items[0], 0, inv.items[1]) == "Could not combine those items"
assert len(inv) == 2
def test_char_levelmixin():
""" Test for level-up functionality """
char = Character('John Doe', max_level = 5)
assert 1 == char.level
assert 85 == char.next_level
assert char.give_exp(85) == f"Congratulations! You've levelled up; your new level is {char.level}\nEXP required for next level: {int(char.next_level-char.experience)}\nCurrent EXP: {char.experience}"
for _ in range(char.max_level - char.level):
char.give_exp(char.next_level)
assert char.level == char.max_level
assert char.give_exp(char.next_level) == f""
| 36.768421 | 203 | 0.678214 | 0 | 0 | 0 | 0 | 2,197 | 0.628972 | 0 | 0 | 1,239 | 0.354709 |
967fc22994b7e8387bc0009833f00fda8cc5c3ce
| 18,675 |
py
|
Python
|
biosteam/units/_shortcut_column.py
|
tylerhuntington222/biosteam
|
234959180a3210d95e39a012454f455723c92686
|
[
"MIT"
] | null | null | null |
biosteam/units/_shortcut_column.py
|
tylerhuntington222/biosteam
|
234959180a3210d95e39a012454f455723c92686
|
[
"MIT"
] | null | null | null |
biosteam/units/_shortcut_column.py
|
tylerhuntington222/biosteam
|
234959180a3210d95e39a012454f455723c92686
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules
# Copyright (C) 2020, Yoel Cortes-Pena <[email protected]>
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
"""
"""
from ._binary_distillation import BinaryDistillation
import flexsolve as flx
from thermosteam.exceptions import InfeasibleRegion
from thermosteam.equilibrium import DewPoint, BubblePoint
import numpy as np
__all__ = ('ShortcutColumn',)
# %% Functions
@flx.njitable(cache=True)
def geometric_mean(a, b):
return (a * b) ** 0.5
@flx.njitable(cache=True)
def compute_mean_volatilities_relative_to_heavy_key(K_distillate, K_bottoms, HK_index):
alpha_distillate = K_distillate / K_distillate[HK_index]
alpha_bottoms = K_bottoms / K_bottoms[HK_index]
alpha_mean = geometric_mean(alpha_distillate,
alpha_bottoms)
return alpha_mean
@flx.njitable(cache=True)
def compute_partition_coefficients(y, x):
x[x <= 1e-16] = 1e-16
return y / x
@flx.njitable(cache=True)
def compute_distillate_recoveries_Hengsteback_and_Gaddes(d_Lr, b_Hr,
alpha_mean,
LHK_index):
LK_index = LHK_index[0]
alpha_LK = alpha_mean[LK_index]
A_dummy = (1. - b_Hr) / b_Hr
A = np.log10(A_dummy)
B = np.log10(d_Lr / (1. - d_Lr) / A_dummy) / np.log10(alpha_LK)
dummy = 10.**A * alpha_mean**B
distillate_recoveries = dummy / (1. + dummy)
distillate_recoveries[LHK_index] = [d_Lr, 1. - b_Hr]
distillate_recoveries[distillate_recoveries < 1e-12] = 0.
return distillate_recoveries
@flx.njitable(cache=True)
def compute_minimum_theoretical_stages_Fenske(LHK_distillate, LHK_bottoms, alpha_LK):
LK, HK = LHK_distillate
LHK_ratio_distillate = LK / HK
LK, HK = LHK_bottoms
HLK_ratio_bottoms = HK / LK
N = np.log10(LHK_ratio_distillate * HLK_ratio_bottoms) / np.log10(alpha_LK)
return N
@flx.njitable(cache=True)
def objective_function_Underwood_constant(theta, q, z_f, alpha_mean):
return (alpha_mean * z_f / (alpha_mean - theta)).sum() - 1.0 + q
@flx.njitable(cache=True)
def compute_minimum_reflux_ratio_Underwood(alpha_mean, z_d, theta):
Rm = (alpha_mean * z_d / (alpha_mean - theta)).sum() - 1.0
return Rm
@flx.njitable(cache=True)
def compute_theoretical_stages_Gilliland(Nm, Rm, R):
X = (R - Rm) / (R + 1.)
Y = 1. - np.exp((1. + 54.4*X) / (11. + 117.2*X) * (X - 1.) / X**0.5)
N = (Y + Nm) / (1. - Y)
return np.ceil(N)
@flx.njitable(cache=True)
def compute_feed_stage_Kirkbride(N, B, D,
feed_HK_over_LK,
z_LK_bottoms,
z_HK_distillate):
m_over_p = (B/D * feed_HK_over_LK * (z_LK_bottoms / z_HK_distillate)**2.) ** 0.206
return np.floor(N / (m_over_p + 1.))
# %%
class ShortcutColumn(BinaryDistillation,
new_graphics=False):
r"""
Create a multicomponent distillation column that relies on the
Fenske-Underwood-Gilliland method to solve for the theoretical design
of the distillation column and the separation of non-keys [1]_.The Murphree
efficiency (i.e. column efficiency) is based on the modified O'Connell
correlation [2]_. The diameter is based on tray separation and flooding
velocity [1]_ [3]_. Purchase costs are based on correlations compiled by
Warren et. al. [4]_.
Parameters
----------
ins : streams
Inlet fluids to be mixed into the feed stage.
outs : stream sequence
* [0] Distillate
* [1] Bottoms product
LHK : tuple[str]
Light and heavy keys.
y_top : float
Molar fraction of light key to the light and heavy keys in the
distillate.
x_bot : float
Molar fraction of light key to the light and heavy keys in the bottoms
product.
Lr : float
Recovery of the light key in the distillate.
Hr : float
Recovery of the heavy key in the bottoms product.
k : float
Ratio of reflux to minimum reflux.
Rmin : float, optional
User enforced minimum reflux ratio. If the actual minimum reflux ratio is less than `Rmin`, this enforced value is ignored. Defaults to 0.6.
specification="Composition" : "Composition" or "Recovery"
If composition is used, `y_top` and `x_bot` must be specified.
If recovery is used, `Lr` and `Hr` must be specified.
P=101325 : float
Operating pressure [Pa].
vessel_material : str, optional
Vessel construction material. Defaults to 'Carbon steel'.
tray_material : str, optional
Tray construction material. Defaults to 'Carbon steel'.
tray_type='Sieve' : 'Sieve', 'Valve', or 'Bubble cap'
Tray type.
tray_spacing=450 : float
Typically between 152 to 915 mm.
stage_efficiency=None :
User enforced stage efficiency. If None, stage efficiency is
calculated by the O'Connell correlation [2]_.
velocity_fraction=0.8 : float
Fraction of actual velocity to maximum velocity allowable before
flooding.
foaming_factor=1.0 : float
Must be between 0 to 1.
open_tray_area_fraction=0.1 : float
Fraction of open area to active area of a tray.
downcomer_area_fraction=None : float
Enforced fraction of downcomer area to net (total) area of a tray.
If None, estimate ratio based on Oliver's estimation [1]_.
is_divided=False : bool
True if the stripper and rectifier are two separate columns.
References
----------
.. [1] J.D. Seader, E.J. Henley, D.K. Roper. (2011)
Separation Process Principles 3rd Edition. John Wiley & Sons, Inc.
.. [2] M. Duss, R. Taylor. (2018)
Predict Distillation Tray Efficiency. AICHE
.. [3] Green, D. W. Distillation. In Perry’s Chemical Engineers’
Handbook, 9 ed.; McGraw-Hill Education, 2018.
.. [4] Seider, W. D., Lewin, D. R., Seader, J. D., Widagdo, S., Gani, R.,
& Ng, M. K. (2017). Product and Process Design Principles. Wiley.
Cost Accounting and Capital Cost Estimation (Chapter 16)
Examples
--------
>>> from biosteam.units import ShortcutColumn
>>> from biosteam import Stream, settings
>>> settings.set_thermo(['Water', 'Methanol', 'Glycerol'])
>>> feed = Stream('feed', flow=(80, 100, 25))
>>> bp = feed.bubble_point_at_P()
>>> feed.T = bp.T # Feed at bubble point T
>>> D1 = ShortcutColumn('D1', ins=feed,
... outs=('distillate', 'bottoms_product'),
... LHK=('Methanol', 'Water'),
... y_top=0.99, x_bot=0.01, k=2,
... is_divided=True)
>>> D1.simulate()
>>> # See all results
>>> D1.show(T='degC', P='atm', composition=True)
ShortcutColumn: D1
ins...
[0] feed
phase: 'l', T: 76.129 degC, P: 1 atm
composition: Water 0.39
Methanol 0.488
Glycerol 0.122
-------- 205 kmol/hr
outs...
[0] distillate
phase: 'g', T: 64.91 degC, P: 1 atm
composition: Water 0.01
Methanol 0.99
-------- 100 kmol/hr
[1] bottoms_product
phase: 'l', T: 100.06 degC, P: 1 atm
composition: Water 0.754
Methanol 0.00761
Glycerol 0.239
-------- 105 kmol/hr
>>> D1.results()
Distillation Units D1
Cooling water Duty kJ/hr -7.9e+06
Flow kmol/hr 5.4e+03
Cost USD/hr 2.64
Low pressure steam Duty kJ/hr 1.43e+07
Flow kmol/hr 368
Cost USD/hr 87.5
Design Theoretical feed stage 8
Theoretical stages 16
Minimum reflux Ratio 1.06
Reflux Ratio 2.12
Rectifier stages 13
Stripper stages 26
Rectifier height ft 31.7
Stripper height ft 50.9
Rectifier diameter ft 4.53
Stripper diameter ft 3.67
Rectifier wall thickness in 0.312
Stripper wall thickness in 0.312
Rectifier weight lb 6.46e+03
Stripper weight lb 7.98e+03
Purchase cost Rectifier trays USD 1.52e+04
Stripper trays USD 2.02e+04
Rectifier tower USD 8.44e+04
Stripper tower USD 1.01e+05
Condenser USD 4.17e+04
Boiler USD 2.99e+04
Total purchase cost USD 2.92e+05
Utility cost USD/hr 90.1
"""
line = 'Distillation'
_ins_size_is_fixed = False
_N_ins = 1
_N_outs = 2
def _run(self):
# Initial mass balance
self._run_binary_distillation_mass_balance()
# Initialize objects to calculate bubble and dew points
vle_chemicals = self.feed.vle_chemicals
reset_cache = self._vle_chemicals != vle_chemicals
if reset_cache:
self._dew_point = DewPoint(vle_chemicals, self.thermo)
self._bubble_point = BubblePoint(vle_chemicals, self.thermo)
self._IDs_vle = self._dew_point.IDs
self._vle_chemicals = vle_chemicals
# Setup light and heavy keys
LHK = [i.ID for i in self.chemicals[self.LHK]]
IDs = self._IDs_vle
self._LHK_vle_index = np.array([IDs.index(i) for i in LHK], dtype=int)
# Add temporary specification
composition_spec = self.product_specification_format == 'Composition'
if composition_spec:
feed = self.feed
distillate, bottoms = self.outs
LK_index, HK_index = LHK_index = self._LHK_index
LK_feed, HK_feed = feed.mol[LHK_index]
self._Lr = distillate.mol[LK_index] / LK_feed
self._Hr = bottoms.mol[HK_index] / HK_feed
# Set starting point for solving column
if reset_cache:
self._add_trace_heavy_and_light_non_keys_in_products()
distillate_recoveries = self._estimate_distillate_recoveries()
self._distillate_recoveries = distillate_recoveries
self._update_distillate_recoveries(distillate_recoveries)
else:
distillate_recoveries = self._distillate_recoveries
lb = 1e-6; ub = 1 - 1e-6
distillate_recoveries[distillate_recoveries < lb] = lb
distillate_recoveries[distillate_recoveries > ub] = ub
self._update_distillate_recoveries(distillate_recoveries)
# Solve for new recoveries
self._solve_distillate_recoveries()
self._update_distillate_and_bottoms_temperature()
# Remove temporary data
if composition_spec: self._Lr = self._Hr = None
def reset_cache(self):
self._vle_chemicals = None
def plot_stages(self):
raise TypeError('cannot plot stages for shortcut column')
def _design(self):
self._run_FenskeUnderwoodGilliland()
self._run_condenser_and_boiler()
self._complete_distillation_column_design()
def _run_FenskeUnderwoodGilliland(self):
LHK_index = self._LHK_index
alpha_mean = self._estimate_mean_volatilities_relative_to_heavy_key()
LK_index = self._LHK_vle_index[0]
alpha_LK = alpha_mean[LK_index]
feed, = self.ins
distillate, bottoms = self.outs
Nm = compute_minimum_theoretical_stages_Fenske(distillate.mol[LHK_index],
bottoms.mol[LHK_index],
alpha_LK)
theta = self._solve_Underwood_constant(alpha_mean, alpha_LK)
IDs = self._IDs_vle
z_d = distillate.get_normalized_mol(IDs)
Rm = compute_minimum_reflux_ratio_Underwood(alpha_mean, z_d, theta)
if Rm < self.Rmin: Rm = self.Rmin
R = self.k * Rm
N = compute_theoretical_stages_Gilliland(Nm, Rm, R)
feed_HK, feed_LK = feed.mol[LHK_index]
feed_HK_over_LK = feed_HK / feed_LK
Bs = bottoms.imol[IDs]
Ds = distillate.imol[IDs]
B = Bs.sum()
D = Ds.sum()
LK_index, HK_index = LHK_index
z_LK_bottoms = bottoms.mol[LK_index] / B
z_HK_distillate = distillate.mol[HK_index] / D
feed_stage = compute_feed_stage_Kirkbride(N, B, D,
feed_HK_over_LK,
z_LK_bottoms,
z_HK_distillate)
design = self.design_results
design['Theoretical feed stage'] = N - feed_stage
design['Theoretical stages'] = N
design['Minimum reflux'] = Rm
design['Reflux'] = R
def _get_relative_volatilities_LHK(self):
distillate, bottoms = self.outs
LHK = self.LHK
condensate = self.condensate
K_light, K_heavy = distillate.get_molar_composition(LHK) / condensate.get_molar_composition(LHK)
alpha_LHK_distillate = K_light/K_heavy
boilup = self.boilup
K_light, K_heavy = boilup.get_molar_composition(LHK) / bottoms.get_molar_composition(LHK)
alpha_LHK_distillate = K_light/K_heavy
alpha_LHK_bottoms = K_light/K_heavy
return alpha_LHK_distillate, alpha_LHK_bottoms
def _get_feed_quality(self):
feed = self.feed
feed = feed.copy()
H_feed = feed.H
try: dp = feed.dew_point_at_P()
except: pass
else: feed.T = dp.T
feed.phase = 'g'
H_vap = feed.H
try: bp = feed.bubble_point_at_P()
except: pass
else: feed.T = bp.T
feed.phase = 'l'
H_liq = feed.H
q = (H_vap - H_feed) / (H_vap - H_liq)
return q
def _solve_Underwood_constant(self, alpha_mean, alpha_LK):
q = self._get_feed_quality()
z_f = self.ins[0].get_normalized_mol(self._IDs_vle)
args = (q, z_f, alpha_mean)
ub = np.inf
lb = -np.inf
bracket = flx.find_bracket(objective_function_Underwood_constant,
1.0, alpha_LK, lb, ub, args)
theta = flx.IQ_interpolation(objective_function_Underwood_constant,
*bracket, args=args, checkiter=False,
checkbounds=False)
return theta
def _add_trace_heavy_and_light_non_keys_in_products(self):
distillate, bottoms = self.outs
LNK_index = self._LNK_index
HNK_index = self._HNK_index
feed_mol = self.feed.mol
LNK_mol = feed_mol[LNK_index]
HNK_mol = feed_mol[HNK_index]
bottoms.mol[LNK_index] = LNK_trace = 0.0001 * LNK_mol
distillate.mol[LNK_index] = LNK_mol - LNK_trace
distillate.mol[HNK_index] = HNK_trace = 0.0001 * HNK_mol
bottoms.mol[HNK_index] = HNK_mol - HNK_trace
def _estimate_mean_volatilities_relative_to_heavy_key(self):
# Mean volatilities taken at distillate and bottoms product
distillate, bottoms = self.outs
dew_point = self._dew_point
bubble_point = self._bubble_point
IDs = self._IDs_vle
z_distillate = distillate.get_normalized_mol(IDs)
z_bottoms = bottoms.get_normalized_mol(IDs)
dp = dew_point(z_distillate, P=self.P)
bp = bubble_point(z_bottoms, P=self.P)
K_distillate = compute_partition_coefficients(dp.z, dp.x)
K_bottoms = compute_partition_coefficients(bp.y, bp.z)
HK_index = self._LHK_vle_index[1]
alpha_mean = compute_mean_volatilities_relative_to_heavy_key(K_distillate,
K_bottoms,
HK_index)
return alpha_mean
def _estimate_distillate_recoveries(self):
# Use Hengsteback and Geddes equations
alpha_mean = self._estimate_mean_volatilities_relative_to_heavy_key()
return compute_distillate_recoveries_Hengsteback_and_Gaddes(self.Lr, self.Hr,
alpha_mean,
self._LHK_vle_index)
def _update_distillate_recoveries(self, distillate_recoveries):
feed = self.feed
distillate, bottoms = self.outs
IDs = self._IDs_vle
feed_mol = feed.imol[IDs]
distillate.imol[IDs] = distillate_mol = distillate_recoveries * feed_mol
bottoms.imol[IDs] = feed_mol - distillate_mol
def _solve_distillate_recoveries(self):
distillate_recoveries = self._distillate_recoveries
flx.aitken(self._recompute_distillate_recoveries,
distillate_recoveries, 1e-8, checkiter=False)
def _recompute_distillate_recoveries(self, distillate_recoveries):
if np.logical_or(distillate_recoveries > 1., distillate_recoveries < 0.).any():
raise InfeasibleRegion('distillate composition')
self._update_distillate_recoveries(distillate_recoveries)
distillate_recoveries = self._estimate_distillate_recoveries()
if hasattr(self, '_distillate_recoveries_hook'):
self._distillate_recoveries_hook(self._IDs_vle, distillate_recoveries)
self._distillate_recoveries = distillate_recoveries
return distillate_recoveries
| 42.636986 | 148 | 0.584257 | 15,635 | 0.837036 | 0 | 0 | 2,421 | 0.129611 | 0 | 0 | 7,417 | 0.397077 |
9681b53ab62bfb5ddd55b122e2a997c7da50a56f
| 11,477 |
py
|
Python
|
vital/bindings/python/vital/types/camera_intrinsics.py
|
dstoup/kwiver
|
a3a36317b446baf0feb6274235ab1ac6b4329ead
|
[
"BSD-3-Clause"
] | null | null | null |
vital/bindings/python/vital/types/camera_intrinsics.py
|
dstoup/kwiver
|
a3a36317b446baf0feb6274235ab1ac6b4329ead
|
[
"BSD-3-Clause"
] | null | null | null |
vital/bindings/python/vital/types/camera_intrinsics.py
|
dstoup/kwiver
|
a3a36317b446baf0feb6274235ab1ac6b4329ead
|
[
"BSD-3-Clause"
] | null | null | null |
"""
ckwg +31
Copyright 2016 by Kitware, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither name of Kitware, Inc. nor the names of any contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================================================================
Interface to VITAL camera_intrinsics objects
"""
import collections
import ctypes
import numpy
from vital.types.eigen import EigenArray
from vital.util import VitalErrorHandle, VitalObject
class CameraIntrinsics (VitalObject):
def __init__(self, focal_length=1., principle_point=(0, 0),
aspect_ratio=1., skew=0., dist_coeffs=(), from_cptr=None):
"""
:param focal_length: Focal length (default=1.0)
:type focal_length: float
:param principle_point: Principle point (default: [0,0]).
Values are copied into this structure.
:type principle_point: collections.Sequence[float]
:param aspect_ratio: Aspect ratio (default: 1.0)
:type aspect_ratio: float
:param skew: Skew (default: 0.0)
:type skew: float
:param dist_coeffs: Existing distortion coefficients (Default: empty).
Values are copied into this structure.
:type dist_coeffs: collections.Sequence[float]
"""
super(CameraIntrinsics, self).__init__(from_cptr, focal_length,
principle_point, aspect_ratio,
skew, dist_coeffs)
def _new(self, focal_length, principle_point, aspect_ratio, skew,
dist_coeffs):
"""
Construct a new vital::camera_intrinsics instance
:type focal_length: float
:type principle_point: collections.Sequence[float]
:type aspect_ratio: float
:type skew: float
:type dist_coeffs: collections.Sequence[float]
"""
ci_new = self.VITAL_LIB['vital_camera_intrinsics_new']
ci_new.argtypes = [
ctypes.c_double,
EigenArray.c_ptr_type(2, 1, ctypes.c_double),
ctypes.c_double,
ctypes.c_double,
EigenArray.c_ptr_type('X', 1, ctypes.c_double),
VitalErrorHandle.C_TYPE_PTR,
]
ci_new.restype = self.C_TYPE_PTR
# Make "vectors"
pp = EigenArray.from_iterable(principle_point, target_shape=(2, 1))
dc = EigenArray(len(dist_coeffs), dynamic_rows=True)
if len(dist_coeffs):
dc.T[:] = dist_coeffs
with VitalErrorHandle() as eh:
return ci_new(focal_length, pp, aspect_ratio, skew, dc, eh)
def _destroy(self):
ci_dtor = self.VITAL_LIB['vital_camera_intrinsics_destroy']
ci_dtor.argtypes = [self.C_TYPE_PTR, VitalErrorHandle.C_TYPE_PTR]
with VitalErrorHandle() as eh:
ci_dtor(self, eh)
@property
def focal_length(self):
f = self.VITAL_LIB['vital_camera_intrinsics_get_focal_length']
f.argtypes = [self.C_TYPE_PTR, VitalErrorHandle.C_TYPE_PTR]
f.restype = ctypes.c_double
with VitalErrorHandle() as eh:
return f(self, eh)
@property
def principle_point(self):
f = self.VITAL_LIB['vital_camera_intrinsics_get_principle_point']
f.argtypes = [self.C_TYPE_PTR, VitalErrorHandle.C_TYPE_PTR]
f.restype = EigenArray.c_ptr_type(2, 1, ctypes.c_double)
with VitalErrorHandle() as eh:
m_ptr = f(self, eh)
return EigenArray(2, from_cptr=m_ptr, owns_data=True)
@property
def aspect_ratio(self):
f = self.VITAL_LIB['vital_camera_intrinsics_get_aspect_ratio']
f.argtypes = [self.C_TYPE_PTR, VitalErrorHandle.C_TYPE_PTR]
f.restype = ctypes.c_double
with VitalErrorHandle() as eh:
return f(self, eh)
@property
def skew(self):
f = self.VITAL_LIB['vital_camera_intrinsics_get_skew']
f.argtypes = [self.C_TYPE_PTR, VitalErrorHandle.C_TYPE_PTR]
f.restype = ctypes.c_double
with VitalErrorHandle() as eh:
return f(self, eh)
@property
def dist_coeffs(self):
""" Get the distortion coefficients array """
f = self.VITAL_LIB['vital_camera_intrinsics_get_dist_coeffs']
f.argtypes = [self.C_TYPE_PTR, VitalErrorHandle.C_TYPE_PTR]
f.restype = EigenArray.c_ptr_type('X', 1, ctypes.c_double)
with VitalErrorHandle() as eh:
m_ptr = f(self, eh)
return EigenArray(dynamic_rows=1, from_cptr=m_ptr, owns_data=True)
def __eq__(self, other):
if isinstance(other, CameraIntrinsics):
return (
self.focal_length == other.focal_length and
numpy.allclose(self.principle_point, other.principle_point) and
self.aspect_ratio == other.aspect_ratio and
self.skew == other.skew and
numpy.allclose(self.dist_coeffs, other.dist_coeffs)
)
return False
def __ne__(self, other):
return not (self == other)
def as_matrix(self):
"""
Access the intrinsics as an upper triangular matrix
**Note:** *This matrix includes the focal length, principal point,
aspect ratio, and skew, but does not model distortion.*
:return: 3x3 upper triangular matrix
"""
f = self.VITAL_LIB['vital_camera_intrinsics_as_matrix']
f.argtypes = [self.C_TYPE_PTR, VitalErrorHandle.C_TYPE_PTR]
f.restype = EigenArray.c_ptr_type(3, 3, ctypes.c_double)
with VitalErrorHandle() as eh:
m_ptr = f(self, eh)
return EigenArray(3, 3, from_cptr=m_ptr, owns_data=True)
def map_2d(self, norm_pt):
"""
Map normalized image coordinates into actual image coordinates
This function applies both distortion and application of the
calibration matrix to map into actual image coordinates.
:param norm_pt: Normalized image coordinate to map to an image
coordinate (2-element sequence).
:type norm_pt: collections.Sequence[float]
:return: Mapped 2D image coordinate
:rtype: EigenArray[float]
"""
assert len(norm_pt) == 2, "Input sequence was not of length 2"
f = self.VITAL_LIB['vital_camera_intrinsics_map_2d']
f.argtypes = [self.C_TYPE_PTR,
EigenArray.c_ptr_type(2, 1, ctypes.c_double),
VitalErrorHandle.C_TYPE_PTR]
f.restype = EigenArray.c_ptr_type(2, 1, ctypes.c_double)
p = EigenArray(2)
p.T[:] = norm_pt
with VitalErrorHandle() as eh:
m_ptr = f(self, p, eh)
return EigenArray(2, 1, from_cptr=m_ptr, owns_data=True)
def map_3d(self, norm_hpt):
"""
Map a 3D point in camera coordinates into actual image coordinates
:param norm_hpt: Normalized coordinate to map to an image coordinate
(3-element sequence)
:type norm_hpt: collections.Sequence[float]
:return: Mapped 2D image coordinate
:rtype: EigenArray[float]
"""
assert len(norm_hpt) == 3, "Input sequence was not of length 3"
f = self.VITAL_LIB['vital_camera_intrinsics_map_3d']
f.argtypes = [self.C_TYPE_PTR,
EigenArray.c_ptr_type(3, 1, ctypes.c_double),
VitalErrorHandle.C_TYPE_PTR]
f.restype = EigenArray.c_ptr_type(2, 1, ctypes.c_double)
p = EigenArray(3)
p.T[:] = norm_hpt
with VitalErrorHandle() as eh:
m_ptr = f(self, p, eh)
return EigenArray(2, 1, from_cptr=m_ptr, owns_data=True)
def unmap_2d(self, pt):
"""
Unmap actual image coordinates back into normalized image coordinates
This function applies both application of the inverse calibration matrix
and undistortion of the normalized coordinates
:param pt: Actual image 2D point to un-map.
:return: Un-mapped normalized image coordinate.
"""
assert len(pt) == 2, "Input sequence was not of length 2"
f = self.VITAL_LIB['vital_camera_intrinsics_unmap_2d']
f.argtypes = [self.C_TYPE_PTR,
EigenArray.c_ptr_type(2, 1, ctypes.c_double),
VitalErrorHandle.C_TYPE_PTR]
f.restype = EigenArray.c_ptr_type(2, 1, ctypes.c_double)
p = EigenArray(2)
p.T[:] = pt
with VitalErrorHandle() as eh:
m_ptr = f(self, p, eh)
return EigenArray(2, 1, from_cptr=m_ptr, owns_data=True)
def distort_2d(self, norm_pt):
"""
Map normalized image coordinates into distorted coordinates
:param norm_pt: Normalized 2D image coordinate.
:return: Distorted 2D coordinate.
"""
assert len(norm_pt) == 2, "Input sequence was not of length 2"
f = self.VITAL_LIB['vital_camera_intrinsics_distort_2d']
f.argtypes = [self.C_TYPE_PTR,
EigenArray.c_ptr_type(2, 1, ctypes.c_double),
VitalErrorHandle.C_TYPE_PTR]
f.restype = EigenArray.c_ptr_type(2, 1, ctypes.c_double)
p = EigenArray(2)
p.T[:] = norm_pt
with VitalErrorHandle() as eh:
m_ptr = f(self, p, eh)
return EigenArray(2, 1, from_cptr=m_ptr, owns_data=True)
def undistort_2d(self, dist_pt):
"""
Unmap distorted normalized coordinates into normalized coordinates
:param dist_pt: Distorted 2D coordinate to un-distort.
:return: Normalized 2D image coordinate.
"""
assert len(dist_pt) == 2, "Input sequence was not of length 2"
f = self.VITAL_LIB['vital_camera_intrinsics_undistort_2d']
f.argtypes = [self.C_TYPE_PTR,
EigenArray.c_ptr_type(2, 1, ctypes.c_double),
VitalErrorHandle.C_TYPE_PTR]
f.restype = EigenArray.c_ptr_type(2, 1, ctypes.c_double)
p = EigenArray(2)
p.T[:] = dist_pt
with VitalErrorHandle() as eh:
m_ptr = f(self, p, eh)
return EigenArray(2, 1, from_cptr=m_ptr, owns_data=True)
| 38.773649 | 80 | 0.640673 | 9,709 | 0.845953 | 0 | 0 | 1,659 | 0.14455 | 0 | 0 | 5,046 | 0.439662 |
9681c77a186723aca18c1c0fe154bf16a7a4024b
| 2,333 |
py
|
Python
|
utils/lanzouyun.py
|
Firesuiry/jdmm-client
|
33defde409222ae49c6301cec3389ca72d19953c
|
[
"BSD-3-Clause"
] | null | null | null |
utils/lanzouyun.py
|
Firesuiry/jdmm-client
|
33defde409222ae49c6301cec3389ca72d19953c
|
[
"BSD-3-Clause"
] | null | null | null |
utils/lanzouyun.py
|
Firesuiry/jdmm-client
|
33defde409222ae49c6301cec3389ca72d19953c
|
[
"BSD-3-Clause"
] | null | null | null |
from lanzou.api import LanZouCloud
import urllib.parse
final_files = []
final_share_infos = []
def lzy_login(cookies):
lzy = LanZouCloud()
cookie_dic = cookie_to_dic(cookies)
print(cookie_dic)
cookies = {
'ylogin': cookie_dic['ylogin'],
'phpdisk_info': urllib.parse.quote(cookie_dic['phpdisk_info'])
}
print(cookies)
code = lzy.login_by_cookie(cookies)
print('登录结果', code)
if code != 0:
return None
return lzy
def cookie_to_dic(mycookie):
dic = {}
for i in mycookie.split('; '):
dic[i.split('=', 1)[0]] = i.split('=', 1)[1]
return dic
def lzy_get_files(lzy, dir=-1, deepth=9999, path='/', include_dir=False, dir_name_filter=''):
print(f'lzy_get_files {locals()}')
datas = []
if dir_name_filter and dir_name_filter not in path.split('/'):
print(f'跳过当前目录 {path}')
else:
files = lzy.get_file_list(dir)
for file in files:
data = get_share_info(lzy, file, path)
datas.append(data)
if deepth > 1:
dirs = lzy.get_dir_list(dir)
for dir in dirs:
new_datas = lzy_get_files(lzy, dir.id, deepth - 1, path + dir.name + '/', include_dir=include_dir,
dir_name_filter=dir_name_filter)
datas += new_datas
if include_dir:
datas.append(get_share_info(lzy, dir, path, isdir=True))
return datas
def get_share_info(lzy, file, path, isdir=False):
print(file)
share_info = lzy.get_share_info(file.id)
print(share_info)
size = file.size if not isdir else '0 B'
size_num = float(size.split()[0])
size_unit = size.split()[1]
if size_unit == 'K':
size_num *= 2 ** 10
if size_unit == 'M':
size_num *= 2 ** 20
share_info_dic = {
'name': file.name,
'size': size_num,
'type': file.type if not isdir else '文件夹',
'isdir': isdir,
'download_url': share_info.url,
'tiquma': share_info.pwd,
'des': path + file.name,
'parent': path,
}
return share_info_dic
def test():
lzy = lzy_login(cookies.replace('\n',''))
# datas = lzy_get_files(lzy, include_dir=True)
# for data in datas:
# print(data)
cookies = '''
'''
if __name__ == '__main__':
test()
| 25.637363 | 110 | 0.582512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 322 | 0.136499 |
9681dcb1c6a4a00147da8d82baecc6e355120cf5
| 1,948 |
py
|
Python
|
util/ndcg.py
|
voschezang/Data-Mining
|
0762df1d9a63f81d6f44d8a35cc61802baad4c37
|
[
"MIT"
] | null | null | null |
util/ndcg.py
|
voschezang/Data-Mining
|
0762df1d9a63f81d6f44d8a35cc61802baad4c37
|
[
"MIT"
] | null | null | null |
util/ndcg.py
|
voschezang/Data-Mining
|
0762df1d9a63f81d6f44d8a35cc61802baad4c37
|
[
"MIT"
] | null | null | null |
import numpy as np
import util.data
def ndcg(X_test, y_test, y_pred, ):
Xy_pred = X_test.copy([['srch_id', 'prop_id', 'score']])
Xy_pred['score_pred'] = y_pred
Xy_pred['score'] = y_test
Xy_pred.sort_values(['srch_id', 'score_pred'], ascending=[True, False])
dcg_test = DCG_dict(Xy_pred)
ndcg = np.mean(np.array(list(dcg_test.values())))
return ndcg
def sort_pred_test(x_test, y_test, y_pred):
# calculate dcg of test set per srch_id
Xy_pred = util.data.Xy_pred(x_test, y_pred)
# put true y values on indexes, do not sort !
Xy_true = util.data.Xy_pred(x_test, y_test)
return Xy_pred, Xy_true
def dcg_at_k(r, k, method=0):
r = np.asfarray(r)[:k]
if r.size:
if method == 0:
return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
elif method == 1:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError('method must be 0 or 1.')
return 0.
def ndcg_at_k(r, k, method=0):
dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
if not dcg_max:
return 0.
return dcg_at_k(r, k, method) / dcg_max
def DCG_dict(data):
DCG = {}
# for id in data['srch_id']:
# rows = rows_srch_id(data, id)
# r = relevance_scores(rows)
r = []
prev_srch_id = -1
position = 0
for i in data.index.tolist():
if prev_srch_id == -1:
row = data.loc[i]
cur_srch_id = row.srch_id
prev_srch_id = 0
row = data.loc[i]
next_id = row.srch_id
score = row.score
# compute position
if cur_srch_id != next_id:
DCG[cur_srch_id] = ndcg_at_k(r, k=len(r))
cur_srch_id = next_id
r = []
r.append(score)
position += 1
else:
r.append(score)
position += 1
DCG[cur_srch_id] = ndcg_at_k(r, k=len(r))
return DCG
| 27.828571 | 75 | 0.569302 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 282 | 0.144764 |
9681fb5a4ab9afe1cfd4688c372d7a335ae0a5d6
| 4,364 |
py
|
Python
|
pycorrector/bert/bert_detector.py
|
zouning68/pycorrector
|
4daaf13e566f2cecc724fb5a77db5d89f1f25203
|
[
"Apache-2.0"
] | 45 |
2020-01-18T03:46:07.000Z
|
2022-03-26T13:06:36.000Z
|
pycorrector/bert/bert_detector.py
|
zouning68/pycorrector
|
4daaf13e566f2cecc724fb5a77db5d89f1f25203
|
[
"Apache-2.0"
] | 1 |
2020-08-16T12:42:05.000Z
|
2020-08-16T12:42:05.000Z
|
pycorrector/bert/bert_detector.py
|
zouning68/pycorrector
|
4daaf13e566f2cecc724fb5a77db5d89f1f25203
|
[
"Apache-2.0"
] | 9 |
2020-01-04T09:09:01.000Z
|
2022-01-17T08:56:23.000Z
|
# -*- coding: utf-8 -*-
"""
@author:XuMing([email protected])
@description: use bert detect chinese char error
"""
import sys
import time
import numpy as np
import torch
from pytorch_transformers import BertForMaskedLM
from pytorch_transformers import BertTokenizer
sys.path.append('../..')
from pycorrector.detector import ErrorType
from pycorrector.utils.logger import logger
from pycorrector.bert import config
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids,
segment_ids=None,
mask_ids=None,
masked_lm_labels=None,
input_tokens=None,
id=None,
token=None):
self.input_ids = input_ids
self.segment_ids = segment_ids
self.mask_ids = mask_ids
self.masked_lm_labels = masked_lm_labels
self.input_tokens = input_tokens
self.id = id
self.token = token
class BertDetector(object):
def __init__(self, bert_model_dir=config.bert_model_dir,
bert_model_vocab=config.bert_model_vocab,
threshold=0.1):
self.name = 'bert_detector'
self.bert_model_dir = bert_model_dir
self.bert_model_vocab = bert_model_vocab
self.initialized_bert_detector = False
self.threshold = threshold
def check_bert_detector_initialized(self):
if not self.initialized_bert_detector:
self.initialize_bert_detector()
def initialize_bert_detector(self):
t1 = time.time()
self.bert_tokenizer = BertTokenizer(vocab_file=self.bert_model_vocab)
self.MASK_TOKEN = "[MASK]"
self.MASK_ID = self.bert_tokenizer.convert_tokens_to_ids([self.MASK_TOKEN])[0]
# Prepare model
self.model = BertForMaskedLM.from_pretrained(self.bert_model_dir)
logger.debug("Loaded model ok, path: %s, spend: %.3f s." % (self.bert_model_dir, time.time() - t1))
self.initialized_bert_detector = True
def _convert_sentence_to_detect_features(self, sentence):
"""Loads a sentence into a list of `InputBatch`s."""
self.check_bert_detector_initialized()
features = []
tokens = self.bert_tokenizer.tokenize(sentence)
token_ids = self.bert_tokenizer.convert_tokens_to_ids(tokens)
for idx, token_id in enumerate(token_ids):
masked_lm_labels = [-1] * len(token_ids)
masked_lm_labels[idx] = token_id
features.append(
InputFeatures(input_ids=token_ids,
masked_lm_labels=masked_lm_labels,
input_tokens=tokens,
id=idx,
token=tokens[idx]))
return features
def predict_token_prob(self, sentence):
self.check_bert_detector_initialized()
result = []
eval_features = self._convert_sentence_to_detect_features(sentence)
for f in eval_features:
input_ids = torch.tensor([f.input_ids])
masked_lm_labels = torch.tensor([f.masked_lm_labels])
outputs = self.model(input_ids, masked_lm_labels=masked_lm_labels)
masked_lm_loss, predictions = outputs[:2]
prob = np.exp(-masked_lm_loss.item())
result.append([prob, f])
return result
def detect(self, sentence):
"""
句子改错
:param sentence: 句子文本
:param threshold: 阈值
:return: list[list], [error_word, begin_pos, end_pos, error_type]
"""
maybe_errors = []
for prob, f in self.predict_token_prob(sentence):
logger.debug('prob:%s, token:%s, idx:%s' % (prob, f.token, f.id))
if prob < self.threshold:
maybe_errors.append([f.token, f.id, f.id + 1, ErrorType.char])
return maybe_errors
if __name__ == "__main__":
d = BertDetector()
error_sentences = ['少先队员因该为老人让座',
'少先队员因该为老人让坐',
'少 先 队 员 因 该 为老人让座',
'少 先 队 员 因 该 为老人让坐',
'机七学习是人工智能领遇最能体现智能的一个分支',
'机七学习是人工智能领遇最能体现智能的一个分知']
t1 = time.time()
for sent in error_sentences:
err = d.detect(sent)
print("original sentence:{} => detect sentence:{}".format(sent, err))
| 35.770492 | 107 | 0.613886 | 3,487 | 0.764358 | 0 | 0 | 0 | 0 | 0 | 0 | 843 | 0.184787 |
968244c4e4821aa3176a5163d517e2a86b8ed427
| 98 |
py
|
Python
|
example_app/core/models/input.py
|
dazza-codes/serverless-fast-api
|
c4cdce62326a22778157a8555b7cdaafc2519b8d
|
[
"MIT"
] | 2 |
2021-01-22T12:27:59.000Z
|
2021-09-09T14:54:11.000Z
|
example_app/core/models/input.py
|
dazza-codes/serverless-fast-api
|
c4cdce62326a22778157a8555b7cdaafc2519b8d
|
[
"MIT"
] | 4 |
2020-05-03T01:54:53.000Z
|
2021-01-21T18:20:27.000Z
|
example_app/core/models/input.py
|
dazza-codes/serverless-fast-api
|
c4cdce62326a22778157a8555b7cdaafc2519b8d
|
[
"MIT"
] | 1 |
2021-09-09T14:49:54.000Z
|
2021-09-09T14:49:54.000Z
|
from pydantic import BaseModel
class InputExample(BaseModel):
a: int = ...
b: int = ...
| 14 | 30 | 0.622449 | 64 | 0.653061 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9683761b0e8ad668daa9ae8b7d5e998f46a35736
| 9,609 |
py
|
Python
|
2020/8/input.py
|
sishtiaq/aoc
|
1c200ebed048bbd8ad6a684aaef8921d826f3d1b
|
[
"Apache-2.0"
] | null | null | null |
2020/8/input.py
|
sishtiaq/aoc
|
1c200ebed048bbd8ad6a684aaef8921d826f3d1b
|
[
"Apache-2.0"
] | null | null | null |
2020/8/input.py
|
sishtiaq/aoc
|
1c200ebed048bbd8ad6a684aaef8921d826f3d1b
|
[
"Apache-2.0"
] | null | null | null |
test = [
'nop +0',
'acc +1',
'jmp +4',
'acc +3',
'jmp -3',
'acc -99',
'acc +1',
'jmp -4',
'acc +6',
]
actual = [
'acc +17',
'acc +37',
'acc -13',
'jmp +173',
'nop +100',
'acc -7',
'jmp +447',
'nop +283',
'acc +41',
'acc +32',
'jmp +1',
'jmp +585',
'jmp +1',
'acc -5',
'nop +71',
'acc +49',
'acc -18',
'jmp +527',
'jmp +130',
'jmp +253',
'acc +11',
'acc -11',
'jmp +390',
'jmp +597',
'jmp +1',
'acc +6',
'acc +0',
'jmp +588',
'acc -17',
'jmp +277',
'acc +2',
'nop +163',
'jmp +558',
'acc +38',
'jmp +369',
'acc +13',
'jmp +536',
'acc +38',
'acc +39',
'acc +6',
'jmp +84',
'acc +11',
'nop +517',
'acc +48',
'acc +47',
'jmp +1',
'acc +42',
'acc +0',
'acc +2',
'acc +24',
'jmp +335',
'acc +44',
'acc +47',
'jmp +446',
'nop +42',
'nop +74',
'acc +45',
'jmp +548',
'jmp +66',
'acc +1',
'jmp +212',
'acc +18',
'jmp +1',
'acc +4',
'acc -16',
'jmp +366',
'acc +0',
'jmp +398',
'acc +45',
'jmp +93',
'acc +40',
'acc +38',
'acc +21',
'nop +184',
'jmp -46',
'nop -9',
'jmp +53',
'acc +46',
'acc +36',
'jmp +368',
'acc +16',
'acc +8',
'acc -9',
'acc -4',
'jmp +328',
'acc -15',
'acc -5',
'acc +21',
'jmp +435',
'acc -5',
'acc +36',
'jmp +362',
'acc +26',
'jmp +447',
'jmp +1',
'jmp +412',
'acc +11',
'acc +41',
'nop -32',
'acc +17',
'jmp -63',
'jmp +1',
'nop +393',
'jmp +62',
'acc +18',
'acc +30',
'nop +417',
'jmp +74',
'acc +29',
'acc +23',
'jmp +455',
'jmp +396',
'jmp +395',
'acc +33',
'nop +137',
'nop +42',
'jmp +57',
'jmp +396',
'acc +7',
'acc +0',
'jmp +354',
'acc +15',
'acc +50',
'jmp -12',
'jmp +84',
'nop +175',
'acc +5',
'acc -2',
'jmp -82',
'acc +1',
'acc +26',
'jmp +288',
'nop -113',
'nop +366',
'acc +45',
'jmp +388',
'acc +21',
'acc +38',
'jmp +427',
'acc +33',
'jmp -94',
'nop -118',
'nop +411',
'jmp +472',
'nop +231',
'nop +470',
'acc +48',
'jmp -124',
'jmp +1',
'acc +5',
'acc +37',
'acc +42',
'jmp +301',
'acc -11',
'acc -17',
'acc +14',
'jmp +357',
'acc +6',
'acc +20',
'acc +13',
'jmp +361',
'jmp -65',
'acc +29',
'jmp +26',
'jmp +329',
'acc +32',
'acc +32',
'acc +17',
'jmp -102',
'acc -6',
'acc +33',
'acc +9',
'jmp +189',
'acc +3',
'jmp -128',
'jmp -142',
'acc +24',
'acc -5',
'jmp +403',
'acc +28',
'jmp +310',
'acc +34',
'acc +4',
'acc +33',
'acc +18',
'jmp +227',
'acc -8',
'acc -15',
'jmp +112',
'jmp +54',
'acc +21',
'acc +23',
'acc +20',
'jmp +320',
'acc +13',
'jmp -77',
'acc +15',
'nop +310',
'nop +335',
'jmp +232',
'acc -3',
'nop +50',
'acc +41',
'jmp +112',
'nop -10',
'acc +29',
'acc +27',
'jmp +52',
'acc +40',
'nop -132',
'acc -16',
'acc +27',
'jmp +309',
'acc -8',
'nop +147',
'acc +20',
'acc +46',
'jmp +202',
'acc +27',
'jmp -43',
'jmp +1',
'acc +33',
'acc -13',
'jmp +300',
'acc +1',
'jmp -202',
'acc -17',
'acc +0',
'acc +34',
'jmp -5',
'nop +335',
'acc -16',
'acc -17',
'jmp -120',
'acc -19',
'acc -13',
'acc +4',
'jmp +368',
'jmp +21',
'acc +39',
'acc +39',
'acc -18',
'jmp -157',
'nop +280',
'acc +33',
'nop -37',
'jmp +32',
'acc -16',
'acc +18',
'acc +46',
'jmp -121',
'acc -19',
'jmp +195',
'acc +28',
'jmp +124',
'jmp +331',
'jmp -228',
'jmp -146',
'jmp +85',
'jmp +60',
'acc +20',
'acc -9',
'jmp +303',
'jmp -122',
'jmp +111',
'acc +32',
'acc +0',
'acc +39',
'acc +29',
'jmp -31',
'nop +320',
'jmp -63',
'jmp +223',
'nop -149',
'acc -12',
'acc -11',
'acc +32',
'jmp +309',
'jmp -13',
'acc -19',
'jmp -123',
'acc +21',
'acc +18',
'acc +49',
'jmp +175',
'acc -14',
'nop -129',
'acc -2',
'acc +31',
'jmp +79',
'acc +23',
'acc +50',
'acc +39',
'acc +7',
'jmp -235',
'jmp -166',
'acc +9',
'jmp +293',
'acc -11',
'jmp +76',
'acc +44',
'acc +3',
'acc +37',
'jmp +123',
'nop -104',
'jmp -157',
'acc +14',
'acc +10',
'acc +28',
'jmp +25',
'acc +37',
'jmp +188',
'jmp -49',
'acc -11',
'jmp -90',
'acc -8',
'jmp +197',
'acc +5',
'jmp +115',
'acc +44',
'jmp -228',
'nop -2',
'acc +46',
'jmp +130',
'nop +183',
'nop +106',
'acc +27',
'acc +37',
'jmp -309',
'acc +28',
'acc -4',
'acc -12',
'acc +38',
'jmp +93',
'acc +8',
'acc +23',
'acc -9',
'acc +6',
'jmp -42',
'acc +10',
'acc +35',
'acc +4',
'jmp -231',
'acc +19',
'acc +7',
'acc +23',
'acc +11',
'jmp -90',
'acc +0',
'nop +158',
'nop -150',
'acc +33',
'jmp +107',
'acc +48',
'acc -2',
'jmp -104',
'acc +6',
'nop -57',
'nop +172',
'acc -11',
'jmp -7',
'acc +6',
'acc +50',
'acc -9',
'acc +12',
'jmp -171',
'acc +3',
'jmp +26',
'acc +42',
'acc +31',
'acc +20',
'acc +32',
'jmp -48',
'acc +13',
'jmp -6',
'jmp +178',
'acc +47',
'jmp -153',
'acc +28',
'nop +74',
'jmp -162',
'acc -15',
'nop -104',
'acc -9',
'jmp -227',
'acc +49',
'acc -19',
'acc +41',
'jmp -318',
'acc +9',
'acc +12',
'acc +7',
'jmp +34',
'jmp +137',
'nop -143',
'acc -8',
'acc +5',
'acc +31',
'jmp -20',
'jmp -237',
'acc +39',
'acc +0',
'jmp -298',
'acc +45',
'acc -19',
'acc +11',
'jmp -151',
'acc +40',
'acc +27',
'nop +150',
'nop -391',
'jmp -341',
'acc +1',
'acc +11',
'acc +18',
'nop -234',
'jmp +77',
'nop +104',
'jmp -65',
'acc +32',
'jmp -27',
'nop -317',
'nop +159',
'acc +14',
'acc -10',
'jmp -348',
'acc +29',
'jmp +32',
'acc +48',
'acc -19',
'jmp +17',
'jmp -201',
'jmp -224',
'nop +26',
'acc -7',
'acc +23',
'acc +46',
'jmp -6',
'acc +22',
'acc +39',
'acc +9',
'acc +23',
'jmp -30',
'jmp -243',
'acc +47',
'acc -15',
'jmp -298',
'jmp -393',
'jmp +1',
'acc +3',
'nop -24',
'acc +7',
'jmp -59',
'acc -6',
'acc +26',
'jmp -102',
'acc +34',
'acc +24',
'jmp -207',
'acc +36',
'acc +40',
'acc +41',
'jmp +1',
'jmp -306',
'jmp +57',
'jmp +1',
'nop +99',
'acc +28',
'jmp -391',
'acc +50',
'jmp -359',
'acc -5',
'jmp +9',
'jmp -355',
'acc +5',
'acc +2',
'jmp -77',
'acc +40',
'acc +28',
'acc +22',
'jmp -262',
'nop -287',
'acc +34',
'acc -4',
'nop +112',
'jmp -195',
'acc +29',
'nop -94',
'nop -418',
'jmp +24',
'jmp -190',
'acc +2',
'jmp -311',
'jmp -178',
'jmp -276',
'acc -12',
'acc -18',
'jmp +62',
'jmp -174',
'nop +31',
'acc +33',
'nop -158',
'jmp -417',
'acc +3',
'acc +21',
'acc +47',
'jmp +87',
'acc +45',
'jmp -77',
'acc +6',
'acc -10',
'jmp +1',
'jmp -240',
'acc +7',
'acc +47',
'jmp -379',
'acc -14',
'acc +50',
'nop -75',
'acc +30',
'jmp +70',
'jmp -392',
'jmp -430',
'acc +22',
'acc -2',
'jmp -492',
'jmp +1',
'acc -6',
'acc +38',
'jmp -36',
'nop -336',
'jmp -32',
'jmp +61',
'acc +20',
'acc -9',
'acc +2',
'jmp -175',
'acc +21',
'acc -2',
'jmp -6',
'jmp -527',
'acc +11',
'acc +16',
'jmp -262',
'jmp +1',
'nop -327',
'acc +29',
'jmp -114',
'acc +11',
'acc +17',
'acc +26',
'nop -104',
'jmp -428',
'nop -178',
'nop -242',
'acc +29',
'acc +5',
'jmp -245',
'jmp -417',
'jmp -278',
'acc +35',
'acc +21',
'jmp +1',
'nop -263',
'jmp +8',
'acc +42',
'jmp -95',
'nop -312',
'acc -11',
'acc +34',
'acc +0',
'jmp +19',
'acc +8',
'acc -13',
'acc +32',
'acc +21',
'jmp -208',
'acc +15',
'acc +39',
'nop -194',
'jmp -280',
'jmp +24',
'nop -516',
'acc +21',
'acc +48',
'jmp -367',
'jmp -121',
'acc +49',
'acc -16',
'jmp -136',
'acc +0',
'jmp -148',
'jmp -85',
'jmp -103',
'nop -446',
'jmp -242',
'acc -12',
'acc +13',
'acc +31',
'acc -1',
'jmp -435',
'nop -420',
'acc +22',
'acc -5',
'jmp -567',
'nop -354',
'acc +11',
'acc +33',
'acc +45',
'jmp -76',
'acc -2',
'acc +0',
'acc +25',
'acc +46',
'jmp -555',
'acc +0',
'acc +11',
'nop -2',
'jmp -394',
'jmp -395',
'acc +8',
'acc +14',
'acc +47',
'acc +22',
'jmp +1',]
| 15.037559 | 15 | 0.338329 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,777 | 0.601207 |
96856b2747e7c36d91fb23b1dc5b4f022aab0d68
| 17,925 |
py
|
Python
|
islecler.py
|
mrtyasar/PythonLearn
|
b8fa5d97b9c811365db8457f42f1e1d04e4dc8a4
|
[
"Apache-2.0"
] | null | null | null |
islecler.py
|
mrtyasar/PythonLearn
|
b8fa5d97b9c811365db8457f42f1e1d04e4dc8a4
|
[
"Apache-2.0"
] | null | null | null |
islecler.py
|
mrtyasar/PythonLearn
|
b8fa5d97b9c811365db8457f42f1e1d04e4dc8a4
|
[
"Apache-2.0"
] | null | null | null |
#----------------------------------#
######### ARİTMETİK İŞLEÇLER #######
#----------------------------------#
# + toplama
# - çıkarma
# * çarpma
# / bölme
# ** kuvvet
# % modülüs/kalan bulma
# // taban bölme/ tam bölme
#aritmetik işleçler sayısal işlemler yapmamızı sağlar
print(45+57)#102
#yalnız + ve * işaretleri karakter dizileri içinde kullanılabilir
#karakter dizilerini birleştirmek için + işareti
print("Selam "+"Bugün "+"Hava çok güzel.")#Selam Bugün Hava çok güzel.
# * işareti karakter dizileri tekrarlamak için kullanılabilir
print("w"*3+".tnbc1"+".com")#www.tnbc1.com
# % işleci sayının bölümünden kalanı bulur
print(30 % 4)#2
#sayının kalanını bularak tek mi çift mi olduğunu bulabiliriz
sayi = int(input("Bir sayı giriniz: "))
if sayi % 2 == 0:
print("Girdiğiniz sayı bir çift sayıdır.")
else:
print("Girdiğiniz sayı bir tek sayıdır.")
#eğer bir sayının 2 ye bölümünden kalan 0 ise o sayı çift bir sayıdır
#veya bu % işleci ile sayının başka bir sayı ile tam bölünüp bölünmediğini
# bulabiliriz
print(36 % 9)#0 #yani 36 9 a tam bölünüyor
#program yazalım:
bolunen = int(input("Herhangi bir sayı giriniz: "))
bolen = int(input("Herhangi bir sayı daha giriniz: "))
sablon = "{} sayısı {} sayısına tam".format(bolunen,bolen)
if bolunen % bolen == 0:
print(sablon,"bölünüyor!")
else:
print(sablon,"bölünmüyor!")
#çıktı:
#Herhangi bir sayı giriniz: 2876
#Herhangi bir sayı daha giriniz: 123
#2876 sayısı 123 sayısına tam bölünmüyor!
# bir sayının son basamağını elde etmek içinde kullanabiliriz
#bu yüzden bir sayının 10 bölümünde kalanını buluruz
print(65 % 10)#5
print(543 % 10)#3
#----------------#
#--//-tam bölme--#
#----------------#
a = 6 / 3
print(type(a))#float # 2.0
#pythonda sayıların bölmelerin sonucu kesirli olur yani float tipinde
b = 6 // 3
print(b)#3
print(type(b))#int #tam bölebildik
print(int(a))#2 # bu şekilde de float tipini inte çevirebildik
#----------------#
# ROUND #
#----------------#
#round() bir gömülü fonksiyondur
#bu fonksiyonun bir sayının değerini yuvarlamamızı sağlar
print(round(2.70))#3
print(round(2.30))#2
print(round(5.68,1))#5.7
print(round(5.68,2))#5.68
print(round(7.9,2))#7.9
#-----------------#
# ** #
#-----------------#
#bir sayının karesini bulmak
#bunun için 2 rakamına ihityacımız vardır
print(124**2)#15376
#bir sayının karakökünü bulmak
#karakökünü bulmak için 0.5 e ihtiyacımız vardır
print(625 ** 0.5)#25.0
#eğer ondalıklı sayı yani float tipli sayı istemiyorsak
#ifadeyi işlemi int tipine çevirmemiz gerekir
print(int(625 ** 0.5))#25
#bir sayının küpünü bulmak
#küpünü bulmak için 3 rakamına ihtiyacımız vardır
print(124 ** 3)#1906624
#bu işlemleri pow() fonksiyonları ile de yapabiliriz
print(pow(24,3))#13824
print(pow(96,2))#9216
#-------------------------------------#
# KARŞILAŞTIRMA İŞLEÇLERİ #
#-------------------------------------#
#işlenenler arasında bir karşılaştırma ilişkisi kuran işleçlerdir
# == eşittir
# != eşit değildir
# > büyüktür
# < küçüktür
# >= büyük eşittir
# <= küçük eşittir
parola = "xyz05"
soru = input("parolanız: ")
if soru == parola:
print("doğru parola!")
elif soru != parola:
print("yanlış parola!")
#başka bir örnek:
sayi = input("sayı: ")
if int(sayi) <= 100:
print("sayı 100 veya 100'den küçük")
elif int(sayi) >= 100:
print("sayı 100 veya 100'den büyük")
#-------------------------#
# BOOL İŞLEÇLERİ #
#-------------------------#
#bool da sadece iki değer vardır true ve false
#bilgisayar biliminde olduğu gibi 0 false dir 1 true dur
a = 1
print(a == 1)#a değeri 1 e eşit midir?
#True
print(a == 2)#False
# o değeri ve boş veri tipleri False'Dir
# bunun haricinde kalan her şey True
#bu durumu bool() adlı fonksiyondan yararlanarak öğrenebiliriz
print(bool(4))#True
print(bool("armut"))#True
print(bool(" "))#True
print(bool(2288281))#True
print(bool("0"))#True
print(bool(0))#False
print(bool(""))#False
#bool değerleri yazılım dünyasında önemli bir yeri vardır
#daha önce kullandığım koşul bloglarında koşulun gerçekleşmesi
#veya gerçekleşmemesi bool a bağlıdır yan, true ve false
isim = input("isminiz: ")
if isim == "Ferhat":
print("Ne güzel bir isminiz vardır")
else:
print(isim,"ismini pek sevmem!")
#isminiz: caner
#caner ismini pek sevmem!
# eğer diyoruz isim ferhat ifadesi true ise şunu göster diyoruz
# eğer true değeri dışında herhangi bir şey yani false ise şunu göster diyoruz
isim = input("isminiz: ")
print(isim == "Ferhat")#True
#
b = ""
print(bool(b))#False
#içi boş veri tiplerin her zaman false olacağını bilerek şöyle
#program yazabiliriz:
kullanici = input("Kullanıcı adınız: ")
if bool(kullanici) == True:
print("Teşekkürler")
else:
print("Kullanıcı adı alanı boş bırakılamaz!")
# eğer kullancı bir şeyler yazarsa bool(kullanici) komutu true verecek
# ekrana teşekkürler yazısı yazılacak
# eğer kullanıcı bir şey yazmadan entera tıklar ise false olacak ve else çalışacaktır
#bu işlemi genellikle şu şekilde yazarız:
kullaniciOne = input("Kullanıcı adınızı yazınız: ")
if kullaniciOne:
print("Teşekkürler")
else:
print("kullanıcı adı boş bırakılamaz")
#---------------------------------#
# BOOL İŞLEÇLERİ #
#---------------------------------#
#AND
#OR
#NOT
#and
#gmail giriş sistemi yazalım
#gmail giriş sisteminde kullanıcı adı ve parola yani her ikisi de doğru olmalıdır
kullaniciAdi = input("Kullanıcı adınız: ")
parola = input("Parolanınız: ")
if kullaniciAdi == "AliVeli":
if parola == "123456":
print("Sisteme hoşgeldiniz")
else :
print("Yanlış kullanıcı adı veya parola!")
else:
print("Yanlış kullanıcı adı veya parola")
#bu işlemi daha kolay yazabiliriz
kullanici = input("Kullanıcı adınızı yazınız: ")
sifre = input("şifrenizi yazınız: ")
if kullanici == "aliveli" and sifre == "12345":
print("programa hoşgeldiniz")
else:
print("Yanlış kullanıcı adı veya parola")
#and işlecini kullanarak iki durumu bağladık
#and işlecinin mantığı her iki durumun gerçekleşmesidir
#bütün koşullar gerçekleşiyorsa true döner
#onun haricinde tüm sonuçlar false dir
a = 23
b = 10
print(a == 23)#True
print(b == 10)#True
print(a == 23 and b == 10)#True
print(a == 23 and b == 15)#False
# OR
#or veya demektir
#her iki koşuldan biri true olursa yine de çalışır
c = 10
d = 100
print(c == 10)#True
print(d == 100)#True
print(c == 1 or d == 100)#True
# c koşulu yanlış olsa da d koşulu doğru olduğu için çıktı True oldu
# sınavdan alınan notların harf karşılığını gösteren program
x = int(input("Notunuz: "))
if x > 100 or x < 0:
print("Böyle bir not yok")
elif x >= 90 and x <= 100:
print("A aldınız")
elif x >= 80 and x <= 89:
print("B aldınız.")
elif x >= 70 and x <= 79:
print("C aldınız")
elif x >=60 and x <= 69:
print("D aldınız.")
elif x >= 0 and x <= 59:
print("F aldınız.")
#şu şekilde daha kısa biçimde yazabiliriz
z = int(input("notunuz: "))
if x > 100 or x < 0:
print("Böyle bir not yoktur.")
elif z >= 90 <= 100:
print("A aldınız")
elif z >=80 <= 89:
print("B aldınız")
elif z >= 70 <= 79:
print("C aldınız")
elif z >= 60 <=69:
print("D aldınız")
elif z >=0 <=59:
print("F aldınız")
# and i kaldırdığımızda aynı sonucu alabiliyoruz
## not ##
# not bir bool işlecidir. türkçe karşılığı değil demektir
# özellikle kullanıcı tarafından değer girilip girilmediğini
#denetlmek için kullanılır
#eğer kullanıcı değer girilise not değeri çalışacak
#eğer kullanıcı boş bırakılsa true değeri çalışacak
parola = input("Şifrenizi giriniz Lütfen: ")
if not parola:
print("Şifre boş bırakılamaz")
#şifrenizi giriniz yazısı geldiğinizde cevap vermeyip entera tıkladım
#değer true olunca print fonksiyonu çalıştı
print(bool(parola))#false
#makineye şunu soruyoruz aslında:
#parola boş bırakılmamış değil mi?
#makinede bize: hayır boş bırakılmış diyor
print(bool(not parola))#True
#makineye parola boş bırakılmış değil mi? sorusunu soruyoruz
#makine de bize true evet boş bırakılmış diyor
#yani ikisinin arasındaki fark bırakılmamış/bırakılmış değil? midir
#yani not isleçi makineye "boş bırakılmış değil mi?" sorusunu soruyor
#eğer boş bırakıldıysa cevap True oluyor evet bırakılmış demek oluyor
#----------------------------------#
# Değer Atama İşleçleri #
#----------------------------------#
# değer atama işlemi "=" işleciyle yapılır
a = 25
#a değişkenin içine 25 değerini atadık
## += işleci
#değişkenin değerine değer eklemek için kullanılır
a += 10 # a değişkenin değerine 10 değeri daha ekledik
print(a) # 35
## -=
#değişkenin değerinin düşürmek yani çıkarmak için kullanılır
a -= 5 #a değişkeninden 5 değer çıkardık
print(a)#30
## /=
# değişkenin değeriyle bölme işlemi yapmak için kullanılır
a /= 2 #a değişkenin değerini 2 sayısıyla böldük
print(a)#15.0
## *=
#değişkenin değerini çarpmak için kullanılır
a *= 4 # a değişkenin değerini 4 ile çarptık
print(a)#60.0
## %=
#değişkenin değerinin bölme işleminde kalanını bulmak için kullanılır
a %= 7 #a değişkenin değerinin 7 ile bölünmesinden kalanını bulduk
print(a)#4.0
## **=
#değişkenin değerinin kuvvetini, küpünü ve karakökünü bulmak için kullanılır
a **= 2#a değişkenin kuvvetini bulduk
print(a)#16.0
## //=
#değişkenin değerinin tam bölünmesini bulmak için kullanılır
a //= 2
print(a)#8
#bu işleçler normalde şu işlemi yapar örneğin
#a = a + 5
#print(a)#5
#fakat bu işlem hızlı bir seçenek değildir ama mantıksal olarak bu şekilde işlem yapar
#işleçlerin sağ ve solda olma farkı
# += veya =+ -= veya =-
a =- 5
print(a) # -5
# a değerine -5 değerini verdik
## := (walrus operatörü)
#örnek:
giris = len(input("Adın ne?"))
if giris < 4:
print("Adın kısaymış")
elif giris < 6:
print("Adın biraz uzunmuş")
else:
print("Uzun bir adın varmış.")
#bu kodu := işlecini kullanarakta yazabiliriz
if (giris := len(input("Adınız nedir?"))) < 4:
print("Adın kısaymış")
elif giris < 6:
print("Adın biraz uzunmuş")
else:
print("Çok uzun bir adın varmış.")
# := tek avantajı işlemimizi tek satıra sığdırması
# çok kullanılmaz
#zaten yeni bir işleç olduğundan sadece python 3.8.1 de çalışır
#--------------------------------#
# AİTLİK İŞLEÇLERİ #
#--------------------------------#
#bir karakter dizisinin değişkenin içinde bulunup bulunmadığını
#kontrol edebilmemizi sağlar
#bu işlemi in adlı işleç sayesinde yaparız
a = "asdfg"
print("a" in a)#True
#makineye "a" değeri a değişkenin içinde var mı? sorruyoruz
print("A" in a)#False
print("j" in a)#False
# "j" değeri a değişkenin içinde var mı? cevap: Hayır yok False
#--------------------------------#
# KİMLİK İŞLEÇLERİ #
#--------------------------------#
#pythonda her şeyin yani her nesnenin arka planda bir kimlik numarası vardır
#bunu öğrenmek için id() adlı fonskiyondan yararlanırız
a = 50
print(id(a))#140705130925248
# a nın kimlik numarasını yazdır dedik
name = "Hello my name is Murat"
print(id(name))#2704421625648
#pythonda her nesenin eşsiz tek ve benzersiz bir kimlikleri vardır
#python belli bir değere kadar önbellekte aynı kimlik numarasıyla tutar
nameOr = 100
print(id(nameOr))#140705130926848
nameOrOne = 100
print(id(nameOrOne))#140705130926848
#belli bir değeri artan değerleri önbellekte farklı kimlik no larıyla tutar
y = 1000
print(id(y))#2467428862544
u = 1000
print(id(u))#1586531830352
#aynı değere sahip olarak gözükselerde python farklı kimlikle tanıtıyor
#bunun nedeni python sadece ufak nesneleri önbellekte tutar
#diğer büyük nesneleri ise yeni bir depolama işlemi yapar
#ufak ve büyük değerleri öğrenmek için:
for k in range(-1000,1000):
for v in range(-1000,1000):
if k is v:
print(k)
#çıkan sonuca göre -5 ila 256 arasındaki değerleri önbellekte tutabiliyor
## is
number = 1000
numberOne = 1000
print(id(number))#2209573079632
print(id(numberOne))#2756858382928
print(number is 1000)#False
print(numberOne is 1000)#False
#is kimlikliklerine göre eşit midir aynı mıdır sorusunu sorar
#is ve == işleci çok kere karıştılır ikisinin arasındaki fark:
#is nesnelerin kimliklerine bakarak aynı mı olduklarını inceler
# == ise nesnelerin değerlerine bakarak aynı mı olduklarını inceler
print(number is 1000)#false
#ayrı kimlikleri olduklarından cevap false
print(number == 1000)#True
#a 1000 değerine sahip oldukları için cevap true
#is in arka planda yaptığı şey kabaca bu:
print(id(number)==id(1000))#false
ornek = "Python"
print(ornek is "Python") #True
ornekOne = "Python güçlü ve kolay bir proglama dilidir"
print(ornekOne is "Python güçlü ve kolay bir proglama dilidir")#False
print(ornekOne == "Python güçlü ve kolay bir proglama dilidir")#True
#sayısal değerlerde olduğu gibi karakter dizilerinde de küçük olanlar önbellekte
#büyük olan karakter dizileri içinde yeni bir kimlik ve depolama tanınmaktadır
## UYGULAMA ÖRNEKLERİ ##
#------------------------------------#
# BASİT BİR HESAP MAKİNESİ #
#------------------------------------#
#programımız bir hesap makinesi olacak
#kullanıya bir sayı girecek ve bu sayı ile topla mı çıkarma mı yapacak karar verecek
#buna göre ise işlemler yapacak
#kullanıcıya bazı seçenekler sunalım:
giris = """
(1) topla
(2) çıkar
(3) çarp
(4) böl
(5) karesini hesapla
(6) karakökünü hesapla
"""
print(giris)
soru = input("Yapmak istediğiniz işlemin numarasını giriniz: ")#kullanıcan hangi işlemi yapacağını soracağız
if soru == "1":
sayi1 = int(input("Toplama işlemi için ilk sayıyı giriniz: "))
sayi2 = int(input("Toplama işlemi için ikinci sayıyı giriniz: "))
print(sayi1,"+",sayi2,"=",sayi1+sayi2)
elif soru == "2":
sayi3 = int(input("Çıkarma işlemi için ilk sayıyı giriniz: "))
sayi4 = int(input("Çıkarma işlemi için ikinci sayıyı giriniz: "))
print(sayi3,"-",sayi4,"=",sayi3-sayi4)
elif soru == "3":
sayi5 = int(input("Çarpma işlemi için ilk sayıyı giriniz: "))
sayi6 = int(input("Çarpma işlemi için ikinci sayıyı giriniz:"))
print(sayi5,"*",sayi6,"=",sayi5*sayi6)
elif soru == "4":
sayi7 = int(input("Bölme işlemi için ilk sayıyı giriniz: "))
sayi8 = int(input("Bölme işlemi için ikinci sayıyı giriniz: "))
print(sayi7,"/",sayi8,"=",sayi7/sayi8)
elif soru == "5":
sayi9 = int(input("Karesini hesaplamak istediğiniz bir sayıyı giriniz: "))
print(sayi9,"sayının karesi =",sayi9 ** 2)
elif soru == "6":
sayi10 = int(input("Karekökünü hesaplamak için istediğiniz sayıyı giriniz: "))
print(sayi10,"sayısının karakökü =",sayi10 ** 0.5)
else:
print("Yanlış giriş.")
print("Aşağıdaki seçeneklerden birini giriniz: ",giris)
"""
Temel olarak program şu şekilde:
eğer böyle bir durum varsa:
şöyle bir işlem yap
yok eğer şöyle bir durum varsa:
böyle bir işlem yap
eğer bambaşka bir durum varsa:
şöyle bir şey yap
"""
#-----------------------------------#
# SÜRÜME GÖRE İŞLEM YAPAN PROGRAM
#-----------------------------------#
#Pythonda 3.x serisinde yazılan kodlar 2.x serinde çalışmaz
#yazdığımız kodların hangi python sürümünde çalıştırılmasını isteyebilirz
#veya 3.x de yazdığımız kodların 2.x çalıştırılması haline kullanıya hata mesajı verdilebiliriz
#sys modulünü çağıralım içe aktaralım
import sys
#modül içindeki istediğimiz değişkene erişelim
print(sys.version_info)
#sys.version_info(major=3, minor=7, micro=4, releaselevel='final', serial=0)
#birde version değişkenin vereceği çıktıya bakalım
print(sys.version)#3.7.4 (default, Aug 9 2019, 18:34:13) [MSC v.1915 64 bit (AMD64)]
#fakat işimize version_info değişkeni yarıyor
#version_info nun verdiği çıktı gözüken bazı şeyler:
#major, python serisinin ana sürüm numarası
#minor, alt sürüm numarası
#micro, en alt sürüm numarasını verir
#bu değerlere ulaşmak için:
print(sys.version_info.major)#3
print(sys.version_info.minor)#7
print(sys.version_info.micro)#4
#Programımızı hangi sürüm ile çalıştırılması gerektiğini kontrol eden bir program yazalım
#bu program için major ve minor u kullanacağız ihtiyaç dahilinde micro da kullanabiliriz
import sys
_2x_metni = """
Python'ın 2.x sürümlerinden birini kullanıyorsunuz
Programı çalıştırabilmek için sisteminizde Python'ın
3.x sürümlerinden biri kurulu olmalı."""
_3x_metni = "Programa Hoşgeldiniz!"
if sys.version_info.major < 3:
print(_2x_metni)
else:
print(_3x_metni)
#burada ilk başta modül içindeki araçları kullanmak için import ediyoruz
#daha sonra 2.x serisini kullanan biri için hata mesajı oluşturuyoruz
#değişkenlerin adları sayıyla başlayamayacağı için alt çizgi ile başladık
#sonra python3 kullanıcıları için merhaba metni yarattık
#eğer dedik major numarası yani ana sürümü 3 ten küçükse şunu yazdır
#bunun dışındaki bütün durumları için ise _3x_metnini bastır dedik
# 2.x sürümlerinde türkçe karakterleri makine algılayamıyordu
#bunu çözmek için ise :
# -*- coding: utf-8 -*-
#bu kodu yapıştırıyorduk 3.x te bu sorun kalkmıştı
#fakat bu sadece programın çökmesini engeller türkçe karakterler bozuk gözükür
#örneğin _2x_metin 2.x sürümlerinde çalışınca şöyle gözükür:
"""
Python'ın 2.x sürümlerinden birini kullanıyorsunuz.
Programı çalıştırabilmek için sisteminizde Python'ın
3.x sürümlerinden biri kurulu olmalı."""
#bunu engellemek için karakter dizimizin önüne u eklemek
# u ise unicode kavramından gelmektedir
_2x_metni = u"""
Python'ın 2.x sürümlerinden birini kullanıyorsunuz.
Programı çalıştırabilmek için sisteminizde Python'ın
3.x sürümlerinden biri kurulu olmalı."""
#3 ten küçük sürümlere hata mesajı yazdırabildik
#şimdi ise 3.4 gibi küçük sürümlere hata mesajı yazdırabiliriz
hataMesaj3 = u"""
Şuan Python'un eski sürümünü kullanıyorsunuz.
Lütfen güncelleyiniz!
"""
if sys.version_info.major == 3 and sys.version_info.minor == 8:
print("bla bla")
else:
print(hataMesaj3)
#böylece 3.8 altı kullanan kullancılara bir heta mesajı gösterdik
#bu işlemi için version değişkenini de kullanabiliriz
if "3.7" in sys.version:
print("Güncel versiyondasınız")
else:
print(hataMesaj3)
| 27.283105 | 108 | 0.699749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14,714 | 0.767033 |
9685eb2eb0a92cde4c6bccfdc2397e3ea1a606c7
| 1,056 |
py
|
Python
|
twindb_backup/modifiers/gzip.py
|
akuzminsky/twindb-mysql-backup
|
35755f18efb372dd05f856ca4732fba796de2549
|
[
"Apache-2.0"
] | 1 |
2019-03-22T00:04:40.000Z
|
2019-03-22T00:04:40.000Z
|
twindb_backup/modifiers/gzip.py
|
akuzminsky/twindb-mysql-backup
|
35755f18efb372dd05f856ca4732fba796de2549
|
[
"Apache-2.0"
] | null | null | null |
twindb_backup/modifiers/gzip.py
|
akuzminsky/twindb-mysql-backup
|
35755f18efb372dd05f856ca4732fba796de2549
|
[
"Apache-2.0"
] | 1 |
2019-03-21T16:03:11.000Z
|
2019-03-21T16:03:11.000Z
|
# -*- coding: utf-8 -*-
"""
Module defines modifier that compresses a stream with gzip
"""
from contextlib import contextmanager
from subprocess import Popen, PIPE
from twindb_backup.modifiers.base import Modifier
class Gzip(Modifier):
"""
Modifier that compresses the input_stream with gzip.
"""
@contextmanager
def get_stream(self):
"""
Compress the input stream and return it as the output stream
:return: output stream handle
:raise: OSError if failed to call the gzip command
"""
with self.input as input_stream:
proc = Popen(['gzip', '-c', '-'],
stdin=input_stream,
stdout=PIPE)
yield proc.stdout
proc.communicate()
def revert_stream(self):
"""
Decompress the input stream and return it as the output stream
:return: output stream handle
:raise: OSError if failed to call the gpg command
"""
return self._revert_stream(['gunzip', '-c'])
| 27.076923 | 70 | 0.604167 | 838 | 0.793561 | 443 | 0.419508 | 463 | 0.438447 | 0 | 0 | 547 | 0.517992 |
968781224af215504c720d13564d694353e11612
| 8,795 |
py
|
Python
|
heat/objects/resource.py
|
larsks/heat
|
11064586e90166a037f8868835e6ce36f7306276
|
[
"Apache-2.0"
] | null | null | null |
heat/objects/resource.py
|
larsks/heat
|
11064586e90166a037f8868835e6ce36f7306276
|
[
"Apache-2.0"
] | null | null | null |
heat/objects/resource.py
|
larsks/heat
|
11064586e90166a037f8868835e6ce36f7306276
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Resource object."""
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_versionedobjects import base
from oslo_versionedobjects import fields
import retrying
import six
from heat.common import crypt
from heat.common import exception
from heat.common.i18n import _
from heat.db import api as db_api
from heat.objects import base as heat_base
from heat.objects import fields as heat_fields
from heat.objects import resource_data
cfg.CONF.import_opt('encrypt_parameters_and_properties', 'heat.common.config')
def retry_on_conflict(func):
def is_conflict(ex):
return isinstance(ex, exception.ConcurrentTransaction)
wrapper = retrying.retry(stop_max_attempt_number=11,
wait_random_min=0.0, wait_random_max=2.0,
retry_on_exception=is_conflict)
return wrapper(func)
class Resource(
heat_base.HeatObject,
base.VersionedObjectDictCompat,
base.ComparableVersionedObject,
):
fields = {
'id': fields.IntegerField(),
'uuid': fields.StringField(),
'stack_id': fields.StringField(),
'created_at': fields.DateTimeField(read_only=True),
'updated_at': fields.DateTimeField(nullable=True),
'physical_resource_id': fields.StringField(nullable=True),
'name': fields.StringField(nullable=True),
'status': fields.StringField(nullable=True),
'status_reason': fields.StringField(nullable=True),
'action': fields.StringField(nullable=True),
'rsrc_metadata': heat_fields.JsonField(nullable=True),
'properties_data': heat_fields.JsonField(nullable=True),
'properties_data_encrypted': fields.BooleanField(default=False),
'data': fields.ListOfObjectsField(
resource_data.ResourceData,
nullable=True
),
'engine_id': fields.StringField(nullable=True),
'atomic_key': fields.IntegerField(nullable=True),
'current_template_id': fields.IntegerField(),
'needed_by': heat_fields.ListField(nullable=True, default=None),
'requires': heat_fields.ListField(nullable=True, default=None),
'replaces': fields.IntegerField(nullable=True),
'replaced_by': fields.IntegerField(nullable=True),
'root_stack_id': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(resource, context, db_resource):
if db_resource is None:
return None
for field in resource.fields:
if field == 'data':
resource['data'] = [resource_data.ResourceData._from_db_object(
resource_data.ResourceData(context), resd
) for resd in db_resource.data]
else:
resource[field] = db_resource[field]
if resource.properties_data_encrypted and resource.properties_data:
properties_data = {}
for prop_name, prop_value in resource.properties_data.items():
method, value = prop_value
decrypted_value = crypt.decrypt(method, value)
prop_string = jsonutils.loads(decrypted_value)
properties_data[prop_name] = prop_string
resource.properties_data = properties_data
resource._context = context
resource.obj_reset_changes()
return resource
@classmethod
def get_obj(cls, context, resource_id, refresh=False):
resource_db = db_api.resource_get(context, resource_id,
refresh=refresh)
return cls._from_db_object(cls(context), context, resource_db)
@classmethod
def get_all(cls, context):
resources_db = db_api.resource_get_all(context)
resources = [
(
resource_name,
cls._from_db_object(cls(context), context, resource_db)
)
for resource_name, resource_db in six.iteritems(resources_db)
]
return dict(resources)
@classmethod
def create(cls, context, values):
return cls._from_db_object(cls(context), context,
db_api.resource_create(context, values))
@classmethod
def delete(cls, context, resource_id):
db_api.resource_delete(context, resource_id)
@classmethod
def exchange_stacks(cls, context, resource_id1, resource_id2):
return db_api.resource_exchange_stacks(
context,
resource_id1,
resource_id2)
@classmethod
def get_all_by_stack(cls, context, stack_id, filters=None):
resources_db = db_api.resource_get_all_by_stack(context, stack_id,
filters)
return cls._resources_to_dict(context, resources_db)
@classmethod
def _resources_to_dict(cls, context, resources_db):
resources = [
(
resource_name,
cls._from_db_object(cls(context), context, resource_db)
)
for resource_name, resource_db in six.iteritems(resources_db)
]
return dict(resources)
@classmethod
def get_all_active_by_stack(cls, context, stack_id):
resources_db = db_api.resource_get_all_active_by_stack(context,
stack_id)
resources = [
(
resource_id,
cls._from_db_object(cls(context), context, resource_db)
)
for resource_id, resource_db in six.iteritems(resources_db)
]
return dict(resources)
@classmethod
def get_all_by_root_stack(cls, context, stack_id, filters):
resources_db = db_api.resource_get_all_by_root_stack(
context,
stack_id,
filters)
return cls._resources_to_dict(context, resources_db)
@classmethod
def purge_deleted(cls, context, stack_id):
return db_api.resource_purge_deleted(context, stack_id)
@classmethod
def get_by_name_and_stack(cls, context, resource_name, stack_id):
resource_db = db_api.resource_get_by_name_and_stack(
context,
resource_name,
stack_id)
return cls._from_db_object(cls(context), context, resource_db)
@classmethod
def get_by_physical_resource_id(cls, context, physical_resource_id):
resource_db = db_api.resource_get_by_physical_resource_id(
context,
physical_resource_id)
return cls._from_db_object(cls(context), context, resource_db)
@classmethod
def update_by_id(cls, context, resource_id, values):
db_api.resource_update_and_save(context, resource_id, values)
def update_and_save(self, values):
db_api.resource_update_and_save(self._context, self.id, values)
def select_and_update(self, values, expected_engine_id=None,
atomic_key=0):
return db_api.resource_update(self._context, self.id, values,
atomic_key=atomic_key,
expected_engine_id=expected_engine_id)
def refresh(self):
resource_db = db_api.resource_get(self._context, self.id, refresh=True)
return self.__class__._from_db_object(
self,
self._context,
resource_db)
@staticmethod
def encrypt_properties_data(data):
if cfg.CONF.encrypt_parameters_and_properties and data:
result = {}
for prop_name, prop_value in data.items():
prop_string = jsonutils.dumps(prop_value)
encrypted_value = crypt.encrypt(prop_string)
result[prop_name] = encrypted_value
return (True, result)
return (False, data)
def update_metadata(self, metadata):
if self.rsrc_metadata != metadata:
rows_updated = self.select_and_update(
{'rsrc_metadata': metadata}, self.engine_id, self.atomic_key)
if not rows_updated:
action = _('metadata setting for resource %s') % self.name
raise exception.ConcurrentTransaction(action=action)
| 37.909483 | 79 | 0.648778 | 7,347 | 0.835361 | 0 | 0 | 4,752 | 0.540307 | 0 | 0 | 967 | 0.109949 |
96897393cb06471fec8c0393bde8aeb577d2894c
| 228 |
py
|
Python
|
pyrez/exceptions/IdOrAuthEmpty.py
|
CLeendert/Pyrez
|
598d72d8b6bb9484f0c42c6146a262817332c666
|
[
"MIT"
] | 25 |
2018-07-26T02:32:14.000Z
|
2021-09-20T03:26:17.000Z
|
pyrez/exceptions/IdOrAuthEmpty.py
|
CLeendert/Pyrez
|
598d72d8b6bb9484f0c42c6146a262817332c666
|
[
"MIT"
] | 93 |
2018-08-26T11:44:25.000Z
|
2022-03-28T08:22:18.000Z
|
pyrez/exceptions/IdOrAuthEmpty.py
|
CLeendert/Pyrez
|
598d72d8b6bb9484f0c42c6146a262817332c666
|
[
"MIT"
] | 13 |
2018-09-05T09:38:07.000Z
|
2021-08-16T04:39:41.000Z
|
from .PyrezException import PyrezException
class IdOrAuthEmpty(PyrezException):
"""Raises an error that the current Credentials is invalid or missing"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
| 38 | 73 | 0.763158 | 184 | 0.807018 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.315789 |
968998458ff06ecf90db3e1b78179a0db936b801
| 907 |
py
|
Python
|
leetcode/practise/59.py
|
Weis-98/learning_journey
|
2ef40880d4551d5d44fa71eff98eca98361022d0
|
[
"MIT"
] | null | null | null |
leetcode/practise/59.py
|
Weis-98/learning_journey
|
2ef40880d4551d5d44fa71eff98eca98361022d0
|
[
"MIT"
] | null | null | null |
leetcode/practise/59.py
|
Weis-98/learning_journey
|
2ef40880d4551d5d44fa71eff98eca98361022d0
|
[
"MIT"
] | null | null | null |
class Solution:
def generateMatrix(self, n):
matrix = []
num = 1
for i in range(n):
matrix.append([0 for i in range(n)])
top = 0
bottom = n - 1
left = 0
right = n - 1
while top <= bottom and left <= right:
for i in range(left, right + 1):
matrix[top][i] = num
num += 1
for j in range(top + 1, bottom + 1):
matrix[j][right] = num
num += 1
if top < bottom and left < right:
for i in range(right - 1, left - 1, -1):
matrix[bottom][i] = num
num += 1
for j in range(bottom - 1, top, -1):
matrix[j][left] = num
num += 1
top, bottom, left, right = top + 1, bottom - 1, left + 1, right - 1
return matrix
| 34.884615 | 79 | 0.406836 | 907 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
968a0ed358f9602c37b149e837fd6e25f4d5114f
| 4,492 |
py
|
Python
|
capnpy/segment/builder.py
|
GambitResearch/capnpy
|
3b8d9ed0623e160f69dee07ec2fc6303683c2a3c
|
[
"MIT"
] | 45 |
2016-10-28T10:16:07.000Z
|
2022-03-06T20:16:57.000Z
|
capnpy/segment/builder.py
|
GambitResearch/capnpy
|
3b8d9ed0623e160f69dee07ec2fc6303683c2a3c
|
[
"MIT"
] | 42 |
2016-12-20T18:10:53.000Z
|
2021-09-08T12:29:04.000Z
|
capnpy/segment/builder.py
|
GambitResearch/capnpy
|
3b8d9ed0623e160f69dee07ec2fc6303683c2a3c
|
[
"MIT"
] | 21 |
2017-02-28T06:39:15.000Z
|
2021-09-07T05:30:46.000Z
|
import struct
from six import binary_type
from capnpy import ptr
from capnpy.packing import mychr
from capnpy.printer import print_buffer
class SegmentBuilder(object):
def __init__(self, length=None):
self.buf = bytearray()
def get_length(self):
return len(self.buf)
def as_string(self):
return binary_type(self.buf)
def _print(self):
print_buffer(self.as_string())
def write_generic(self, ifmt, i, value):
struct.pack_into(mychr(ifmt), self.buf, i, value)
def write_int8(self, i, value):
struct.pack_into('b', self.buf, i, value)
def write_uint8(self, i, value):
struct.pack_into('B', self.buf, i, value)
def write_int16(self, i, value):
struct.pack_into('h', self.buf, i, value)
def write_uint16(self, i, value):
struct.pack_into('H', self.buf, i, value)
def write_int32(self, i, value):
struct.pack_into('i', self.buf, i, value)
def write_uint32(self, i, value):
struct.pack_into('I', self.buf, i, value)
def write_int64(self, i, value):
struct.pack_into('q', self.buf, i, value)
def write_uint64(self, i, value):
struct.pack_into('Q', self.buf, i, value)
def write_float32(self, i, value):
struct.pack_into('f', self.buf, i, value)
def write_float64(self, i, value):
struct.pack_into('d', self.buf, i, value)
def write_bool(self, byteoffset, bitoffset, value):
current = struct.unpack_from('B', self.buf, byteoffset)[0]
current |= (value << bitoffset)
struct.pack_into('B', self.buf, byteoffset, current)
def write_slice(self, i, src, start, n):
self.buf[i:i+n] = src.buf[start:start+n]
def allocate(self, length):
# XXX: check whether there is a better method to zero-extend the array in PyPy
result = len(self.buf)
self.buf += b'\x00'*length
return result
def alloc_struct(self, pos, data_size, ptrs_size):
"""
Allocate a new struct of the given size, and write the resulting pointer
at position i. Return the newly allocated position.
"""
length = (data_size+ptrs_size) * 8
result = self.allocate(length)
offet = result - (pos+8)
p = ptr.new_struct(offet//8, data_size, ptrs_size)
self.write_int64(pos, p)
return result
def alloc_list(self, pos, size_tag, item_count, body_length):
"""
Allocate a new list of the given size, and write the resulting pointer
at position i. Return the newly allocated position.
"""
body_length = ptr.round_up_to_word(body_length)
result = self.allocate(body_length)
offet = result - (pos+8)
p = ptr.new_list(offet//8, size_tag, item_count)
self.write_int64(pos, p)
return result
def alloc_text(self, pos, s, trailing_zero=1):
if s is None:
self.write_int64(pos, 0)
return -1
n = len(s)
nn = n + trailing_zero
result = self.alloc_list(pos, ptr.LIST_SIZE_8, nn, nn)
self.buf[result:result+n] = s
# there is no need to write the trailing 0 as the byte is already
# guaranteed to be 0
return result
def alloc_data(self, pos, s):
return self.alloc_text(pos, s, trailing_zero=0)
def copy_from_struct(self, dst_pos, structcls, value):
if value is None:
self.write_int64(dst_pos, 0)
return
if not isinstance(value, structcls):
raise TypeError("Expected %s instance, got %s" %
(structcls.__class__.__name__, value))
self.copy_from_pointer(dst_pos, value._seg, value._as_pointer(0), 0)
def copy_from_pointer(self, dst_pos, src, p, src_pos):
return copy_pointer(src, p, src_pos, self, dst_pos)
def copy_inline_struct(self, dst_pos, src, p, src_pos):
"""
Similar to copy_from_pointer but:
1. it assumes that p is a pointer to a struct
2. it does NOT allocate a new struct in dst_pos: instead, it writes
the struct directly into dst_pos
"""
return _copy_struct_inline(src, p, src_pos, self, dst_pos)
def copy_from_list(self, pos, item_type, lst):
return copy_from_list(self, pos, item_type, lst)
from capnpy.segment._copy_pointer import copy_pointer, _copy_struct_inline
from capnpy.segment._copy_list import copy_from_list
| 33.274074 | 86 | 0.629564 | 4,222 | 0.939893 | 0 | 0 | 0 | 0 | 0 | 0 | 784 | 0.174533 |
968a32ab6cc052ecd19269370677c3356ed68536
| 1,028 |
py
|
Python
|
test_project/test_app/migrations/0002_auto_20180514_0720.py
|
iCHEF/queryfilter
|
0ae4faf525e162d2720d328b96fa179d68277f1e
|
[
"Apache-2.0"
] | 4 |
2018-05-11T18:07:32.000Z
|
2019-07-30T13:38:49.000Z
|
test_project/test_app/migrations/0002_auto_20180514_0720.py
|
iCHEF/queryfilter
|
0ae4faf525e162d2720d328b96fa179d68277f1e
|
[
"Apache-2.0"
] | 6 |
2018-02-26T04:46:36.000Z
|
2019-04-10T06:17:12.000Z
|
test_project/test_app/migrations/0002_auto_20180514_0720.py
|
iCHEF/queryfilter
|
0ae4faf525e162d2720d328b96fa179d68277f1e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-14 07:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('test_app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='data',
name='address',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='data',
name='age',
field=models.IntegerField(default=22),
),
migrations.AlterField(
model_name='data',
name='name',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='data',
name='price',
field=models.IntegerField(default=10),
),
migrations.AlterField(
model_name='data',
name='type',
field=models.IntegerField(default=0),
),
]
| 25.073171 | 50 | 0.535992 | 869 | 0.845331 | 0 | 0 | 0 | 0 | 0 | 0 | 163 | 0.15856 |
968a7365ef23f40cb8759f1ce7dce18d8b7a6114
| 6,371 |
py
|
Python
|
file.py
|
AllenGao6/P2P-File-Sharing
|
059f9ec75d10b7802d1a363f718ade640ac18223
|
[
"MIT"
] | null | null | null |
file.py
|
AllenGao6/P2P-File-Sharing
|
059f9ec75d10b7802d1a363f718ade640ac18223
|
[
"MIT"
] | null | null | null |
file.py
|
AllenGao6/P2P-File-Sharing
|
059f9ec75d10b7802d1a363f718ade640ac18223
|
[
"MIT"
] | null | null | null |
'''
objective of file.py:
define the object class for file,
information to include:
name
size of file
chunk list (should automaticlly be splitted into chunks, each chunk should have indicator)
'''
import base64
import sys
import hashlib
JPG, PNG, PDF, MP3, MP4, UNKNOWN = 1, 2, 3, 4, 5, 0
class File:
file_name = ""
file_type = ""
file_size = 0
chunk_list_size = 0
# will be decided later how to handle indicator and hashing for each chunk, most likely will define a chunk hash function
chunk_list = []
hashed_chunk_list = []
SINGLE_CHUNK_SIZE = 131072
def __init__(self, file_name, file_size=None, full_info=False):
self.file_name = file_name
self.file_type = self.get_fileType()
if not file_size:
self.chunk_list = []
self.hashed_chunk_list = []
self.chunkize()
else:
self.file_size = file_size
if full_info:
self.chunk_list = [True] * (self.file_size // self.SINGLE_CHUNK_SIZE + 1)
self.chunk_list_size = len(self.chunk_list)
self.hashed_chunk_list = [True] * (self.file_size // self.SINGLE_CHUNK_SIZE + 1)
else:
self.chunk_list = [None] * (self.file_size // self.SINGLE_CHUNK_SIZE + 1)
self.chunk_list_size = len(self.chunk_list)
self.hashed_chunk_list = [None] * (self.file_size // self.SINGLE_CHUNK_SIZE + 1)
# create the file in local folder
def create_file(self):
true_file_name = self.file_name.split('/')[-1]
converted_string = b"".join(self.chunk_list)
if len(converted_string) != self.file_size:
print("Error when constructing file, file size does not match")
f = open('FILE_RECIEVE/'+true_file_name, 'wb')
f.write(base64.b64decode((converted_string)))
f.close()
print("File Created locally!!!!!!!!")
# pre-define chunk indicator given the size of the file
def chunkize(self):
# the encoded binary format of file
if self.file_type == JPG or self.file_type == PNG or self.file_type == PDF or self.file_type == MP4:
# base64 encode all these file from local file path
with open(self.file_name, "rb") as image2string:
converted_string = base64.b64encode(image2string.read())
print(len(converted_string))
# set the total file size in byte
self.file_size = len(converted_string)
for i in range(0, (self.file_size // self.SINGLE_CHUNK_SIZE)+1):
if i == len(converted_string) // self.SINGLE_CHUNK_SIZE:
chunk_block = converted_string[i*self.SINGLE_CHUNK_SIZE: self.file_size]
else:
chunk_block = converted_string[i*self.SINGLE_CHUNK_SIZE: (i+1)*self.SINGLE_CHUNK_SIZE]
# append original block and hashed block into local storage
# hashed block is used to verification for data integrity
self.chunk_list.append(chunk_block)
self.hashed_chunk_list.append(self.hash_chunk(chunk_block))
# set the total chunk list size
self.chunk_list_size = len(self.chunk_list)
elif self.file_type == MP3:
print("not supported yet")
elif self.file_type == MP4:
print("not supported yet")
else:
print("INVALID FILE TYPE")
def get_fileType(self):
extention = self.file_name.split('.')[-1]
if extention == "jpg":
return JPG
elif extention == "png":
return PNG
elif extention == 'pdf':
return PDF
elif extention == 'mp3':
return MP3
elif extention == 'mp4':
return MP4
else:
return UNKNOWN
# get the name of this file
def getName(self):
return self.file_name
# get the size of this file
def get_file_size(self):
return self.file_size
# get the file type
def get_file_type(self):
return self.file_type
# define a hash algorithm for the chunk
def hash_chunk(self, data):
# implement sha1 for verification
obj_sha3_256 = hashlib.sha3_256(data)
# print in hexadecimal
# print("\nSHA3-256 Hash: ", obj_sha3_256.hexdigest())
return obj_sha3_256.hexdigest()
# add a piece to chunk_list
def add_chunk(self, chunk, index):
self.chunk_list[index] = chunk
# add a piece to hash chunk_list
def add_hash_chunk(self, hash_chunk, index):
self.hashed_chunk_list[index] = hash_chunk
# get the size of chunk_list
def get_chunk_list_size(self):
return self.chunk_list_size
# check the chunk of a file
def check_file_chunk(self, index):
if index >= self.chunk_list_size:
return False
# print("actual chunk_size:", len(self.chunk_list))
# print("chunk_size:", self.chunk_list_size)
return self.chunk_list[index] != None
# get a particular chunk in chunk_list
def get_index_chunk(self, index):
if self.check_file_chunk(index):
return self.chunk_list[index]
return None
# get a particular chunk hash in chunk_list
def get_index_chunk_hash(self, index):
if self.check_file_chunk(index):
return self.hashed_chunk_list[index]
return None
# register a chunk in file given index
def register_chunk(self, index):
try:
self.chunk_list[index] = True
return True
except:
return False
# get chunk ownership info
def get_chunk_info(self, find_miss=False):
# this part of the code is not optimized, but this should do the trick
index_list = []
for i in range(len(self.chunk_list)):
if find_miss:
if self.chunk_list[i] == None:
index_list.append(i)
else:
if self.chunk_list[i] != None:
index_list.append(i)
return index_list
# get the number of available chunk in file
def get_aval_chunk_size(self):
result = self.get_chunk_info(find_miss=False)
return len(result)
| 33.888298 | 125 | 0.605086 | 6,043 | 0.948517 | 0 | 0 | 0 | 0 | 0 | 0 | 1,594 | 0.250196 |
968afeca8b633bb5b9753043627e7d2f6a06eb50
| 360 |
py
|
Python
|
pdx-extract/tests/test_utils.py
|
michaelheyman/PSU-Code-Review
|
5a55d981425aaad69dc9ee06baaaef22bc426893
|
[
"MIT"
] | null | null | null |
pdx-extract/tests/test_utils.py
|
michaelheyman/PSU-Code-Review
|
5a55d981425aaad69dc9ee06baaaef22bc426893
|
[
"MIT"
] | null | null | null |
pdx-extract/tests/test_utils.py
|
michaelheyman/PSU-Code-Review
|
5a55d981425aaad69dc9ee06baaaef22bc426893
|
[
"MIT"
] | null | null | null |
import unittest.mock as mock
from app import utils
@mock.patch("app.utils.get_current_timestamp")
def test_generate_filename_generates_formatted_timestamp(mock_timestamp):
mock_timestamp.return_value = 1_555_555_555.555_555
filename = utils.generate_filename()
assert mock_timestamp.called is True
assert filename == "20190417194555.json"
| 25.714286 | 73 | 0.802778 | 0 | 0 | 0 | 0 | 305 | 0.847222 | 0 | 0 | 54 | 0.15 |
968b59a333622be17af9c9620da7baac069e951b
| 1,328 |
py
|
Python
|
.my_scripts/network/analyze_speedtest.py
|
infokiller/config-public
|
73fd61a0ad4d2f1ac7e7a73b13de8c4f1b80e5c4
|
[
"MIT"
] | 17 |
2020-06-01T14:18:49.000Z
|
2022-03-23T04:32:52.000Z
|
.my_scripts/network/analyze_speedtest.py
|
Laworigin/config-public
|
527c42e0c5c274dd23c537674d789499b03ef912
|
[
"MIT"
] | 1 |
2021-11-28T10:43:08.000Z
|
2021-11-28T10:43:08.000Z
|
.my_scripts/network/analyze_speedtest.py
|
Laworigin/config-public
|
527c42e0c5c274dd23c537674d789499b03ef912
|
[
"MIT"
] | 3 |
2020-07-02T12:37:27.000Z
|
2021-12-15T17:03:54.000Z
|
#!/usr/bin/env python3
import argparse
import datetime
import os
import matplotlib.pyplot as plt
import pandas as pd
def main():
parser = argparse.ArgumentParser(description='Analyze speedtest csv file')
parser.add_argument('speedtest_file', help='Path to speedtest csv file')
parser.add_argument('--min-datetime',
help='Minimum datetime of data to include')
parser.add_argument('--last-days',
type=int,
default=30,
help='Number of days to include')
args = parser.parse_args()
min_datetime_str = datetime.datetime(1970, 1, 1)
if args.min_datetime:
min_datetime_str = '0'
elif args.last_days:
min_datetime_str = (
datetime.datetime.now() -
datetime.timedelta(days=args.last_days)).strftime('%Y-%m-%d')
# pylint: disable=invalid-name
df = pd.read_csv(args.speedtest_file)
df['Timestamp'] = pd.to_datetime(df['Timestamp'])
df = df[df['Timestamp'] >= min_datetime_str]
df = df.set_index('Timestamp')
df['Download'] = df['Download'] / (1000**2)
df['Upload'] = df['Upload'] / (1000**2)
_, ax = plt.subplots()
df[['Download', 'Upload']].ewm(alpha=0.1).mean().plot(ax=ax)
plt.show()
if __name__ == '__main__':
main()
| 30.883721 | 78 | 0.610693 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 338 | 0.254518 |
968b5a9ecbc7c7427f6fc38ea644b75122f74f7f
| 5,403 |
py
|
Python
|
tensorflow_datasets/text/wikipedia_toxicity_subtypes.py
|
stwind/datasets
|
118d3d2472a3bf2703d1374e25c2223dc7942c13
|
[
"Apache-2.0"
] | 1 |
2020-10-11T19:15:49.000Z
|
2020-10-11T19:15:49.000Z
|
tensorflow_datasets/text/wikipedia_toxicity_subtypes.py
|
cbaront/datasets
|
b097e0985eaaadc6b0c1f4dfa3b3cf88d116c607
|
[
"Apache-2.0"
] | 1 |
2021-02-23T20:16:05.000Z
|
2021-02-23T20:16:05.000Z
|
tensorflow_datasets/text/wikipedia_toxicity_subtypes.py
|
cbaront/datasets
|
b097e0985eaaadc6b0c1f4dfa3b3cf88d116c607
|
[
"Apache-2.0"
] | 1 |
2022-03-14T16:17:53.000Z
|
2022-03-14T16:17:53.000Z
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WikipediaToxicitySubtypes from Jigsaw Toxic Comment Classification Challenge."""
import csv
import os
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """
@inproceedings{10.1145/3038912.3052591,
author = {Wulczyn, Ellery and Thain, Nithum and Dixon, Lucas},
title = {Ex Machina: Personal Attacks Seen at Scale},
year = {2017},
isbn = {9781450349130},
publisher = {International World Wide Web Conferences Steering Committee},
address = {Republic and Canton of Geneva, CHE},
url = {https://doi.org/10.1145/3038912.3052591},
doi = {10.1145/3038912.3052591},
booktitle = {Proceedings of the 26th International Conference on World Wide Web},
pages = {1391-1399},
numpages = {9},
keywords = {online discussions, wikipedia, online harassment},
location = {Perth, Australia},
series = {WWW '17}
}
"""
_DESCRIPTION = """
This version of the Wikipedia Toxicity Subtypes dataset provides access to the
primary toxicity label, as well the five toxicity subtype labels annotated by
crowd workers. The toxicity and toxicity subtype labels are binary values
(0 or 1) indicating whether the majority of annotators assigned that
attributes to the comment text.
The comments in this dataset come from an archive of Wikipedia talk pages
comments. These have been annotated by Jigsaw for toxicity, as well as a variety
of toxicity subtypes, including severe toxicity, obscenity, threatening
language, insulting language, and identity attacks. This dataset is a replica of
the data released for the Jigsaw Toxic Comment Classification Challenge on
Kaggle, with the training set unchanged, and the test dataset merged with the
test_labels released after the end of the competition. Test data not used for
scoring has been dropped. This dataset is released under CC0, as is the
underlying comment text.
See the Kaggle documentation or
https://figshare.com/articles/Wikipedia_Talk_Labels_Toxicity/4563973 for more
details.
"""
_DOWNLOAD_URL = 'https://storage.googleapis.com/jigsaw-unintended-bias-in-toxicity-classification/wikipedia_toxicity_subtypes.zip'
class WikipediaToxicitySubtypes(tfds.core.GeneratorBasedBuilder):
"""Classification of 220K Wikipedia talk page comments for types of toxicity.
This version of the Wikipedia Toxicity Subtypes dataset provides access to the
primary toxicity label, as well the five toxicity subtype labels annotated by
crowd workers. The toxicity and toxicity subtype labels are binary values
(0 or 1) indicating whether the majority of annotators assigned that
attributes to the comment text.
See the Kaggle documentation or
https://figshare.com/articles/Wikipedia_Talk_Labels_Toxicity/4563973 for more
details.
"""
VERSION = tfds.core.Version('0.2.0')
RELEASE_NOTES = {
'0.2.0': 'Updated features for consistency with CivilComments dataset.',
}
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'text': tfds.features.Text(),
'toxicity': tf.float32,
'severe_toxicity': tf.float32,
'obscene': tf.float32,
'threat': tf.float32,
'insult': tf.float32,
'identity_attack': tf.float32,
}),
supervised_keys=('text', 'toxicity'),
homepage='https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge/data',
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_path = dl_manager.download_and_extract(_DOWNLOAD_URL)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
'filename': os.path.join(dl_path, 'wikidata_train.csv')
},
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={'filename': os.path.join(dl_path, 'wikidata_test.csv')},
),
]
def _generate_examples(self, filename):
"""Yields examples.
Each example contains a text input and then six annotation labels.
Args:
filename: the path of the file to be read for this split.
Yields:
A dictionary of features, all floating point except the input text.
"""
with tf.io.gfile.GFile(filename) as f:
reader = csv.DictReader(f)
for row in reader:
example = {}
example['text'] = row['comment_text']
example['toxicity'] = float(row['toxic'])
example['severe_toxicity'] = float(row['severe_toxic'])
example['identity_attack'] = float(row['identity_hate'])
for label in ['obscene', 'threat', 'insult']:
example[label] = float(row[label])
yield row['id'], example
| 38.049296 | 130 | 0.713122 | 2,672 | 0.49454 | 784 | 0.145105 | 0 | 0 | 0 | 0 | 3,835 | 0.709791 |
968b8d46ee387fdd215e0605696e260154647af3
| 480 |
py
|
Python
|
estimate-retrofit-impact-on-heat-pump-viability/plot_uvalue_distribution.py
|
rdmolony/projects
|
8cbbe215710cb9f1b1bf80f8c6a39153181d61a0
|
[
"MIT"
] | 3 |
2021-09-02T16:38:27.000Z
|
2022-01-19T13:11:09.000Z
|
estimate-retrofit-impact-on-heat-pump-viability/plot_uvalue_distribution.py
|
rdmolony/projects
|
8cbbe215710cb9f1b1bf80f8c6a39153181d61a0
|
[
"MIT"
] | 5 |
2021-10-17T16:25:47.000Z
|
2021-11-14T17:51:24.000Z
|
estimate-retrofit-impact-on-heat-pump-viability/plot_uvalue_distribution.py
|
rdmolony/projects
|
8cbbe215710cb9f1b1bf80f8c6a39153181d61a0
|
[
"MIT"
] | 3 |
2021-10-04T08:34:26.000Z
|
2022-02-06T15:56:03.000Z
|
import pandas as pd
import seaborn as sns
sns.set()
# + tags=["parameters"]
upstream = ["download_buildings"]
product = None
# -
buildings = pd.read_csv(upstream["download_buildings"])
buildings["wall_uvalue"].plot.hist(bins=30)
buildings["roof_uvalue"].plot.hist(bins=30)
buildings["window_uvalue"].plot.hist(bins=30)
buildings["wall_uvalue"].to_csv(product["wall"])
buildings["roof_uvalue"].to_csv(product["roof"])
buildings["window_uvalue"].to_csv(product["window"])
| 19.2 | 55 | 0.739583 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.35 |
968d0a57d12d55c489796f697b6f5c53a64a6d4e
| 925 |
py
|
Python
|
src/core/base/Logging.py
|
albertmonfa/Banhmi
|
30052155316d3ba65e9bc7261f13f7e081c14ab2
|
[
"Apache-2.0"
] | null | null | null |
src/core/base/Logging.py
|
albertmonfa/Banhmi
|
30052155316d3ba65e9bc7261f13f7e081c14ab2
|
[
"Apache-2.0"
] | 1 |
2021-06-01T22:54:10.000Z
|
2021-06-01T22:54:10.000Z
|
src/core/base/Logging.py
|
albertmonfa/Banhmi
|
30052155316d3ba65e9bc7261f13f7e081c14ab2
|
[
"Apache-2.0"
] | 1 |
2018-11-08T10:18:19.000Z
|
2018-11-08T10:18:19.000Z
|
#!/usr/bin/python
'''
Copyright 2018 Albert Monfa
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
class Logging():
__severity = {
0 : logging.DEBUG,
1 : logging.INFO,
2 : logging.WARNING,
3 : logging.ERROR,
4 : logging.CRITICAL
}
def severity(log_level):
if log_level not in range(0,4):
return logging.ERROR
return Logging.__severity[log_level]
| 27.205882 | 72 | 0.701622 | 328 | 0.354595 | 0 | 0 | 0 | 0 | 0 | 0 | 577 | 0.623784 |
968d107ac6d19ef65f3e2a523c368a10dd9ff203
| 8,243 |
py
|
Python
|
IzVerifier/test/test_izverifier.py
|
ahmedlawi92/IzVerifier
|
b367935f66810b4c4897cc860c5a3e2070f1890f
|
[
"MIT"
] | null | null | null |
IzVerifier/test/test_izverifier.py
|
ahmedlawi92/IzVerifier
|
b367935f66810b4c4897cc860c5a3e2070f1890f
|
[
"MIT"
] | null | null | null |
IzVerifier/test/test_izverifier.py
|
ahmedlawi92/IzVerifier
|
b367935f66810b4c4897cc860c5a3e2070f1890f
|
[
"MIT"
] | null | null | null |
from IzVerifier.izspecs.containers.izclasses import IzClasses
__author__ = 'fcanas'
import unittest
from IzVerifier.izspecs.containers.izconditions import IzConditions
from IzVerifier.izspecs.containers.izstrings import IzStrings
from IzVerifier.izspecs.containers.izvariables import IzVariables
from IzVerifier.izverifier import IzVerifier
from IzVerifier.izspecs.containers.constants import *
path1 = 'data/sample_installer_iz5/izpack/'
path2 = 'data/sample_installer_iz5/resources/'
source_path2 = 'data/sample_code_base/src/'
pom = 'data/sample_installer_iz5/pom.xml'
class TestVerifier(unittest.TestCase):
"""
Basic testing of verifier class.
"""
def setUp(self):
args = {
'specs_path': path1,
'sources': [source_path2],
'resources_path': path2,
'pom': pom,
'specs': ['conditions', 'strings', 'variables']
}
self.izv = IzVerifier(args)
self.izv.reporter.set_terminal_width() # Sets width to width of terminal
def test_IzPaths(self):
"""
Testing install.xml path parsing.
"""
specs = [('variables', 'variables.xml'),
('conditions', 'conditions.xml'),
('dynamicvariables', 'dynamic_variables.xml'),
('resources', 'resources.xml'),
('panels', 'panels.xml'),
('packs', 'packs.xml')]
self.assertTrue(self.izv != None)
for spec in specs:
path = self.izv.paths.get_path(spec[0])
self.assertTrue(spec[1] in path,
msg=path + "!=" + spec[1])
def test_IzConditions(self):
"""
Testing the strings container.
"""
conditions = self.izv.paths.get_path('conditions')
self.assertEquals(conditions, 'data/sample_installer_iz5/izpack/conditions.xml')
izc = IzConditions(conditions)
self.assertTrue(izc != None)
# Test for number of keys in conditions.xml plus white list
num = len(izc.get_keys()) - len(izc.properties[WHITE_LIST])
print num
self.assertEquals(num, 15, str(num) + "!=15")
def test_langpack_paths(self):
"""
Test that we parsed the langpack paths from resources.xml
"""
langpacks = [('default', 'data/sample_installer_iz5/resources/langpacks/CustomLangPack.xml'),
('eng', 'data/sample_installer_iz5/resources/langpacks/CustomLangPack.xml')]
for tpack, fpack in zip(langpacks, self.izv.paths.get_langpacks().keys()):
self.assertEquals(tpack[1], self.izv.paths.get_langpack_path(tpack[0]))
def test_IzStrings(self):
"""
Testing the strings container.
"""
langpack = self.izv.paths.get_langpack_path()
izs = IzStrings(langpack)
self.assertTrue(izs != None)
# Test for number of strings
num = len(izs.get_keys())
self.assertEquals(num, 5, str(num) + '!=4')
def test_IzVariables(self):
"""
Testing the variables container.
"""
variables = self.izv.paths.get_path('variables')
self.assertEquals(variables, 'data/sample_installer_iz5/izpack/variables.xml')
izv = IzVariables(variables)
self.assertTrue(izv != None)
num = len(izv.get_keys()) - len(izv.properties[WHITE_LIST])
self.assertEquals(num, 3, str(num) + '!=3')
def test_verifyStrings(self):
"""
Verify strings in sample installer
"""
hits = self.izv.verify('strings', verbosity=2, filter_classes=True)
undefined_strings = {'some.string.4',
'my.error.message.id.test',
'password.empty',
'password.not.equal',
'some.user',
'some.user.panel.info',
'some.user.password',
'some.user.password.confirm',
'some.string.5',
'some.string.6',
'hello.world',
'my.izpack5.key.1',
'my.izpack5.key.2',
'my.izpack5.key.3'}
found_strings, location = zip(*hits)
strings_not_found = undefined_strings - set(found_strings)
additional_found_strings = set(found_strings) - undefined_strings
self.assertTrue(len(strings_not_found) == 0, "Strings not found: " + str(strings_not_found))
self.assertTrue(len(additional_found_strings) == 0, "Should not have been found: " + str(additional_found_strings))
def test_verifyConditions(self):
"""
Verify conditions in sample installer.
"""
hits = self.izv.verify('conditions', verbosity=2)
undefined_conditions = {'myinstallerclass.condition',
'some.condition.2',
'some.condition.1'}
found_conditions, location = zip(*hits)
for id in undefined_conditions:
self.assertTrue(id in found_conditions)
def test_verifyVariables(self):
"""
Verify conditions in sample installer.
"""
hits = self.izv.verify('variables', verbosity=1)
num = len(hits)
self.assertTrue(num == 5)
def test_verifyAll(self):
"""
Verify all specs on sample installer.
"""
hits = self.izv.verify_all(verbosity=1)
num = len(hits)
assert (num != 0)
def test_findReference(self):
"""
Find some references to items in source code and specs.
"""
hits = self.izv.find_references('some.user.password', verbosity=2)
self.assertEquals(len(hits), 2)
hits = self.izv.find_references('password.empty', verbosity=2)
self.assertEquals(len(hits), 1)
# Ref in code
hits = self.izv.find_references('some.string.3', verbosity=2)
self.assertEquals(len(hits), 1)
# var substitution not yet implemented for find references, so this
# test will miss the ref in Foo.java
hits = self.izv.find_references('some.condition.1', verbosity=2)
self.assertEquals(len(hits), 1)
def test_verifyClasses(self):
"""
Testing the izclasses container.
"""
classes = IzClasses(source_path2)
classes.print_keys()
self.assertEquals(len(classes.get_keys()), 5)
hits = self.izv.verify('classes', verbosity=2)
self.assertEquals(len(hits), 5)
referenced = self.izv.get_referenced('classes')
self.assertTrue(referenced.has_key('com.sample.installer.Foo'))
self.assertTrue(referenced.has_key('com.sample.installer.SuperValidator'))
self.assertTrue(referenced.has_key('com.sample.installer.SuperDuperValidator'))
self.assertTrue(referenced.has_key('com.sample.installer.BarListener'))
def test_findReferencedClasses(self):
"""
Testing the IzVerifiers ability to find the classes used in an installer
"""
found_referenced_classes = self.izv.referenced_classes
actual_referenced_classes = {
'data/sample_code_base/src/com/sample/installer/Foo.java',
'data/sample_code_base/src/com/sample/installer/Apples.java',
'data/sample_code_base/src/com/sample/installer/Pineapples.java',
'data/sample_code_base/src/com/sample/installer/Bar.java'
}
found_referenced_classes = set(found_referenced_classes)
extra_classes_found = found_referenced_classes - actual_referenced_classes
classes_not_found = actual_referenced_classes - found_referenced_classes
self.assertTrue(len(extra_classes_found) == 0)
self.assertTrue(len(classes_not_found) == 0)
for reffed_class in extra_classes_found:
print "this class shouldn't have been found %s" % reffed_class
for reffed_class in classes_not_found:
print "this class should have been found %s" % reffed_class
if __name__ == '__main__':
unittest.main()
| 35.076596 | 123 | 0.605241 | 7,610 | 0.923208 | 0 | 0 | 0 | 0 | 0 | 0 | 2,685 | 0.325731 |
968e31be6937f07c12c6da1be7d293d1db2611e3
| 5,690 |
py
|
Python
|
src/envoxy/postgresql/client.py
|
muzzley/envoxy
|
b70f2d19ee27f7b4b12e68d441b1317966d87041
|
[
"MIT"
] | 2 |
2018-10-29T09:39:43.000Z
|
2019-06-18T11:29:00.000Z
|
src/envoxy/postgresql/client.py
|
muzzley/envoxy
|
b70f2d19ee27f7b4b12e68d441b1317966d87041
|
[
"MIT"
] | null | null | null |
src/envoxy/postgresql/client.py
|
muzzley/envoxy
|
b70f2d19ee27f7b4b12e68d441b1317966d87041
|
[
"MIT"
] | null | null | null |
from psycopg2.pool import ThreadedConnectionPool
import psycopg2.extras
import psycopg2.sql as sql
from contextlib import contextmanager
from threading import Semaphore
from ..db.exceptions import DatabaseException
from ..utils.logs import Log
from ..constants import MIN_CONN, MAX_CONN, TIMEOUT_CONN, DEFAULT_OFFSET_LIMIT, DEFAULT_CHUNK_SIZE
from ..asserts import assertz
class SemaphoreThreadedConnectionPool(ThreadedConnectionPool):
def __init__(self, minconn, maxconn, *args, **kwargs):
self._semaphore = Semaphore(maxconn)
super().__init__(minconn, maxconn, *args, **kwargs)
def getconn(self, *args, **kwargs):
self._semaphore.acquire()
return super().getconn(*args, **kwargs)
def putconn(self, *args, **kwargs):
super().putconn(*args, **kwargs)
self._semaphore.release()
class Client:
_instances = {}
__conn = None
def __init__(self, server_conf):
for _server_key in server_conf.keys():
_conf = server_conf[_server_key]
self._instances[_server_key] = {
'server': _server_key,
'conf': _conf
}
self.connect(self._instances[_server_key])
def connect(self, instance):
conf = instance['conf']
_max_conn = int(conf.get('max_conn', MAX_CONN))
_timeout = int(conf.get('timeout', TIMEOUT_CONN))
try:
_conn_pool = SemaphoreThreadedConnectionPool(MIN_CONN, _max_conn, host=conf['host'], port=conf['port'],
dbname=conf['db'], user=conf['user'], password=conf['passwd'],
connect_timeout=_timeout)
instance['conn_pool'] = _conn_pool
Log.trace('>>> Successfully connected to POSTGRES: {}, {}:{}'.format(instance['server'],
conf['host'], conf['port']))
except psycopg2.OperationalError as e:
Log.error('>>PGSQL ERROR {} {}'.format(conf.get('server'), e))
def query(self, server_key=None, sql=None, params=None):
"""
Executes any given sql query
:param sql_query:
:return:
"""
conn = None
try:
if not sql:
raise DatabaseException("Sql cannot be empty")
conn = self.__conn if self.__conn is not None else self._get_conn(
server_key)
cursor = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
schema = self._get_conf(server_key, 'schema')
if schema:
cursor.execute(f"SET search_path TO {schema}")
data = []
chunk_size = params.get('chunk_size') or DEFAULT_CHUNK_SIZE
offset_limit = params.get('offset_limit') or DEFAULT_OFFSET_LIMIT
params.update({
'chunk_size': chunk_size,
'offset_limit': offset_limit
})
try:
while True:
cursor.execute(sql, params)
rowcount = cursor.rowcount
rows = cursor.fetchall()
data.extend(list(map(dict, rows)))
offset_limit += chunk_size
params.update({'offset_limit': offset_limit})
if rowcount != chunk_size or 'limit' not in sql.lower():
break
if self.__conn is None:
# query is not using transaction
self.release_conn(server_key, conn)
return data
except KeyError as e:
Log.error(e)
if conn is not None:
self.release_conn(server_key, conn)
except psycopg2.DatabaseError as e:
Log.error(e)
if conn is not None:
self.release_conn(server_key, conn)
return None
def insert(self, db_table: str, data: dict):
if not self.__conn:
raise DatabaseException(
"Insert must be inside a transaction block")
columns = data.keys()
query = sql.SQL("""insert into {} ({}) values ({})""").format(
sql.Identifier(db_table),
sql.SQL(', ').join(map(sql.Identifier, columns)),
sql.SQL(', ').join(sql.Placeholder() * len(columns)))
conn = self.__conn
cursor = conn.cursor()
cursor.execute(query, list(data.values()))
def _get_conn(self, server_key):
"""
:param server_key: database identifier
:return: raw psycopg2 connector instance
"""
_instance = self._instances[server_key]
assertz('conn_pool' in _instance,
f"getconn failed on {server_key} db", _error_code=0, _status_code=412)
return _instance.get('conn_pool').getconn()
def _get_conf(self, server_key, key):
return self._instances[server_key]['conf'].get(key, None)
def release_conn(self, server_key, conn):
_instance = self._instances[server_key]
_instance['conn_pool'].putconn(conn)
@contextmanager
def transaction(self, server_key):
self.__conn = self._get_conn(server_key)
self.__conn.autocommit = False
try:
yield self
except (psycopg2.DatabaseError, DatabaseException) as e:
Log.error("rollback transaction, {}".format(e))
self.__conn.rollback()
finally:
self.__conn.commit()
self.release_conn(server_key, self.__conn)
self.__conn = None
| 31.787709 | 119 | 0.564323 | 5,310 | 0.933216 | 458 | 0.080492 | 478 | 0.084007 | 0 | 0 | 739 | 0.129877 |
968eb0ff0a358625bf80189ad1c3b24c8d3d2439
| 4,380 |
py
|
Python
|
treadmill/cli/admin/blackout.py
|
gaocegege/treadmill
|
04325d319c0ee912c066f07b88b674e84485f154
|
[
"Apache-2.0"
] | 2 |
2017-03-20T07:13:33.000Z
|
2017-05-03T03:39:53.000Z
|
treadmill/cli/admin/blackout.py
|
gaocegege/treadmill
|
04325d319c0ee912c066f07b88b674e84485f154
|
[
"Apache-2.0"
] | 12 |
2017-07-10T07:04:06.000Z
|
2017-07-26T09:32:54.000Z
|
treadmill/cli/admin/blackout.py
|
gaocegege/treadmill
|
04325d319c0ee912c066f07b88b674e84485f154
|
[
"Apache-2.0"
] | 2 |
2017-05-04T11:25:32.000Z
|
2017-07-11T09:10:01.000Z
|
"""Kills all connections from a given treadmill server."""
import logging
import re
import click
import kazoo
from treadmill import presence
from treadmill import utils
from treadmill import zkutils
from treadmill import context
from treadmill import cli
from treadmill import zknamespace as z
_LOGGER = logging.getLogger(__name__)
_ON_EXCEPTIONS = cli.handle_exceptions([
(kazoo.exceptions.NoAuthError, 'Error: not authorized.'),
(context.ContextError, None),
])
def _gen_formatter(mapping, formatter):
"""Generate real formatter to have item index in position."""
pattern = re.compile(r'(%(\w))')
match = pattern.findall(formatter)
# (symbol, key) should be ('%t', 't')
for (symbol, key) in match:
index = mapping[key]
formatter = formatter.replace(symbol, '{%d}' % index, 1)
return formatter
def _list_server_blackouts(zkclient, fmt):
"""List server blackouts."""
# List currently blacked out nodes.
blacked_out = []
try:
blacked_out_nodes = zkclient.get_children(z.BLACKEDOUT_SERVERS)
for server in blacked_out_nodes:
node_path = z.path.blackedout_server(server)
data, metadata = zkutils.get(zkclient, node_path,
need_metadata=True)
blacked_out.append((metadata.created, server, data))
except kazoo.client.NoNodeError:
pass
# [%t] %h %r will be printed as below
# [Thu, 05 May 2016 02:59:58 +0000] <hostname> -
mapping = {'t': 0, 'h': 1, 'r': 2}
formatter = _gen_formatter(mapping, fmt)
for when, server, reason in reversed(sorted(blacked_out)):
reason = '-' if reason is None else reason
print(formatter.format(utils.strftime_utc(when), server, reason))
def _clear_server_blackout(zkclient, server):
"""Clear server blackout."""
path = z.path.blackedout_server(server)
zkutils.ensure_deleted(zkclient, path)
def _blackout_server(zkclient, server, reason):
"""Blackout server."""
if not reason:
raise click.UsageError('--reason is required.')
path = z.path.blackedout_server(server)
zkutils.ensure_exists(
zkclient,
path,
acl=[zkutils.make_host_acl(server, 'rwcda')],
data=str(reason)
)
presence.kill_node(zkclient, server)
def _blackout_app(zkclient, app, clear):
"""Blackout app."""
# list current blacklist
blacklisted_node = z.path.blackedout_app(app)
if clear:
zkutils.ensure_deleted(zkclient, blacklisted_node)
else:
zkutils.ensure_exists(zkclient, blacklisted_node)
def _list_blackedout_apps(zkclient):
"""List blackedout apps."""
try:
for blacklisted in zkclient.get_children(z.BLACKEDOUT_APPS):
print(blacklisted)
except kazoo.client.NoNodeError:
pass
def init():
"""Top level command handler."""
@click.group()
@click.option('--cell', required=True,
envvar='TREADMILL_CELL',
callback=cli.handle_context_opt,
expose_value=False)
def blackout():
"""Manage server and app blackouts."""
pass
@blackout.command(name='server')
@click.option('--server', help='Server name to blackout.')
@click.option('--reason', help='Blackout reason.')
@click.option('--fmt', help='Format of the blackout output.',
default='[%t] %h %r')
@click.option('--clear', is_flag=True, default=False,
help='Clear blackout.')
@_ON_EXCEPTIONS
def server_cmd(server, reason, fmt, clear):
"""Manage server blackout."""
if server is not None:
if clear:
_clear_server_blackout(context.GLOBAL.zk.conn, server)
else:
_blackout_server(context.GLOBAL.zk.conn, server, reason)
else:
_list_server_blackouts(context.GLOBAL.zk.conn, fmt)
@blackout.command(name='app')
@click.option('--app', help='App name to blackout.')
@click.option('--clear', is_flag=True, default=False,
help='Clear blackout.')
def app_cmd(app, clear):
"""Manage app blackouts."""
if app:
_blackout_app(context.GLOBAL.zk.conn, app, clear)
_list_blackedout_apps(context.GLOBAL.zk.conn)
del server_cmd
del app_cmd
return blackout
| 29.594595 | 73 | 0.638128 | 0 | 0 | 0 | 0 | 1,408 | 0.321461 | 0 | 0 | 866 | 0.197717 |
9690871dfe5b99b44cb726d4b08a75cadca848bb
| 3,200 |
py
|
Python
|
tests/view_tests/urls.py
|
peteralexandercharles/django
|
61c7350f41f2534daf3888709f3c987b7d779a29
|
[
"BSD-3-Clause",
"0BSD"
] | null | null | null |
tests/view_tests/urls.py
|
peteralexandercharles/django
|
61c7350f41f2534daf3888709f3c987b7d779a29
|
[
"BSD-3-Clause",
"0BSD"
] | null | null | null |
tests/view_tests/urls.py
|
peteralexandercharles/django
|
61c7350f41f2534daf3888709f3c987b7d779a29
|
[
"BSD-3-Clause",
"0BSD"
] | null | null | null |
import os
from functools import partial
from django.conf.urls.i18n import i18n_patterns
from django.urls import include, path, re_path
from django.utils.translation import gettext_lazy as _
from django.views import defaults, i18n, static
from . import views
base_dir = os.path.dirname(os.path.abspath(__file__))
media_dir = os.path.join(base_dir, "media")
locale_dir = os.path.join(base_dir, "locale")
urlpatterns = [
path("", views.index_page),
# Default views
path("nonexistent_url/", partial(defaults.page_not_found, exception=None)),
path("server_error/", defaults.server_error),
# a view that raises an exception for the debug view
path("raises/", views.raises),
path("raises400/", views.raises400),
path("raises400_bad_request/", views.raises400_bad_request),
path("raises403/", views.raises403),
path("raises404/", views.raises404),
path("raises500/", views.raises500),
path("custom_reporter_class_view/", views.custom_reporter_class_view),
path("technical404/", views.technical404, name="my404"),
path("classbased404/", views.Http404View.as_view()),
# i18n views
path("i18n/", include("django.conf.urls.i18n")),
path("jsi18n/", i18n.JavaScriptCatalog.as_view(packages=["view_tests"])),
path("jsi18n/app1/", i18n.JavaScriptCatalog.as_view(packages=["view_tests.app1"])),
path("jsi18n/app2/", i18n.JavaScriptCatalog.as_view(packages=["view_tests.app2"])),
path("jsi18n/app5/", i18n.JavaScriptCatalog.as_view(packages=["view_tests.app5"])),
path(
"jsi18n_english_translation/",
i18n.JavaScriptCatalog.as_view(packages=["view_tests.app0"]),
),
path(
"jsi18n_multi_packages1/",
i18n.JavaScriptCatalog.as_view(packages=["view_tests.app1", "view_tests.app2"]),
),
path(
"jsi18n_multi_packages2/",
i18n.JavaScriptCatalog.as_view(packages=["view_tests.app3", "view_tests.app4"]),
),
path(
"jsi18n_admin/",
i18n.JavaScriptCatalog.as_view(packages=["django.contrib.admin", "view_tests"]),
),
path("jsi18n_template/", views.jsi18n),
path("jsi18n_multi_catalogs/", views.jsi18n_multi_catalogs),
path("jsoni18n/", i18n.JSONCatalog.as_view(packages=["view_tests"])),
# Static views
re_path(
r"^site_media/(?P<path>.*)$",
static.serve,
{"document_root": media_dir, "show_indexes": True},
),
]
urlpatterns += i18n_patterns(
re_path(_(r"^translated/$"), views.index_page, name="i18n_prefixed"),
)
urlpatterns += [
path("template_exception/", views.template_exception, name="template_exception"),
path(
"raises_template_does_not_exist/<path:path>",
views.raises_template_does_not_exist,
name="raises_template_does_not_exist",
),
path("render_no_template/", views.render_no_template, name="render_no_template"),
re_path(
r"^test-setlang/(?P<parameter>[^/]+)/$",
views.with_parameter,
name="with_parameter",
),
# Patterns to test the technical 404.
re_path(r"^regex-post/(?P<pk>[0-9]+)/$", views.index_page, name="regex-post"),
path("path-post/<int:pk>/", views.index_page, name="path-post"),
]
| 38.095238 | 88 | 0.684688 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,126 | 0.351875 |
969196b838a5ad3282e2d2bf20e3669c40ce0f82
| 20,323 |
py
|
Python
|
sympy/solvers/ode/systems.py
|
nsfinkelstein/sympy
|
cf87897234ad0d7eaac705ba47267caec2a6bcb1
|
[
"BSD-3-Clause"
] | 2 |
2019-05-18T22:36:49.000Z
|
2019-05-24T05:56:16.000Z
|
sympy/solvers/ode/systems.py
|
mmelotti/sympy
|
bea29026d27cc50c2e6a5501b6a70a9629ed3e18
|
[
"BSD-3-Clause"
] | 1 |
2020-04-22T12:45:26.000Z
|
2020-04-22T12:45:26.000Z
|
sympy/solvers/ode/systems.py
|
mmelotti/sympy
|
bea29026d27cc50c2e6a5501b6a70a9629ed3e18
|
[
"BSD-3-Clause"
] | 3 |
2021-02-16T16:40:49.000Z
|
2022-03-07T18:28:41.000Z
|
from sympy import (Derivative, Symbol)
from sympy.core.numbers import I
from sympy.core.relational import Eq
from sympy.core.symbol import Dummy
from sympy.functions import exp, im, cos, sin, re
from sympy.functions.combinatorial.factorials import factorial
from sympy.matrices import zeros, Matrix
from sympy.simplify import simplify, collect
from sympy.solvers.deutils import ode_order
from sympy.solvers.solveset import NonlinearError
from sympy.utilities import numbered_symbols, default_sort_key
from sympy.utilities.iterables import ordered, uniq
def _get_func_order(eqs, funcs):
return {func: max(ode_order(eq, func) for eq in eqs) for func in funcs}
class ODEOrderError(ValueError):
"""Raised by linear_ode_to_matrix if the system has the wrong order"""
pass
class ODENonlinearError(NonlinearError):
"""Raised by linear_ode_to_matrix if the system is nonlinear"""
pass
def linear_ode_to_matrix(eqs, funcs, t, order):
r"""
Convert a linear system of ODEs to matrix form
Explanation
===========
Express a system of linear ordinary differential equations as a single
matrix differential equation [1]. For example the system $x' = x + y + 1$
and $y' = x - y$ can be represented as
.. math:: A_1 X' + A_0 X = b
where $A_1$ and $A_0$ are $2 \times 2$ matrices and $b$, $X$ and $X'$ are
$2 \times 1$ matrices with $X = [x, y]^T$.
Higher-order systems are represented with additional matrices e.g. a
second-order system would look like
.. math:: A_2 X'' + A_1 X' + A_0 X = b
Examples
========
>>> from sympy import (Function, Symbol, Matrix, Eq)
>>> from sympy.solvers.ode.systems import linear_ode_to_matrix
>>> t = Symbol('t')
>>> x = Function('x')
>>> y = Function('y')
We can create a system of linear ODEs like
>>> eqs = [
... Eq(x(t).diff(t), x(t) + y(t) + 1),
... Eq(y(t).diff(t), x(t) - y(t)),
... ]
>>> funcs = [x(t), y(t)]
>>> order = 1 # 1st order system
Now ``linear_ode_to_matrix`` can represent this as a matrix
differential equation.
>>> (A1, A0), b = linear_ode_to_matrix(eqs, funcs, t, order)
>>> A1
Matrix([
[1, 0],
[0, 1]])
>>> A0
Matrix([
[-1, -1],
[-1, 1]])
>>> b
Matrix([
[1],
[0]])
The original equations can be recovered from these matrices:
>>> eqs_mat = Matrix([eq.lhs - eq.rhs for eq in eqs])
>>> X = Matrix(funcs)
>>> A1 * X.diff(t) + A0 * X - b == eqs_mat
True
If the system of equations has a maximum order greater than the
order of the system specified, a ODEOrderError exception is raised.
>>> eqs = [Eq(x(t).diff(t, 2), x(t).diff(t) + x(t)), Eq(y(t).diff(t), y(t) + x(t))]
>>> linear_ode_to_matrix(eqs, funcs, t, 1)
Traceback (most recent call last):
...
ODEOrderError: Cannot represent system in 1-order form
If the system of equations is nonlinear, then ODENonlinearError is
raised.
>>> eqs = [Eq(x(t).diff(t), x(t) + y(t)), Eq(y(t).diff(t), y(t)**2 + x(t))]
>>> linear_ode_to_matrix(eqs, funcs, t, 1)
Traceback (most recent call last):
...
ODENonlinearError: The system of ODEs is nonlinear.
Parameters
==========
eqs : list of sympy expressions or equalities
The equations as expressions (assumed equal to zero).
funcs : list of applied functions
The dependent variables of the system of ODEs.
t : symbol
The independent variable.
order : int
The order of the system of ODEs.
Returns
=======
The tuple ``(As, b)`` where ``As`` is a tuple of matrices and ``b`` is the
the matrix representing the rhs of the matrix equation.
Raises
======
ODEOrderError
When the system of ODEs have an order greater than what was specified
ODENonlinearError
When the system of ODEs is nonlinear
See Also
========
linear_eq_to_matrix: for systems of linear algebraic equations.
References
==========
.. [1] https://en.wikipedia.org/wiki/Matrix_differential_equation
"""
from sympy.solvers.solveset import linear_eq_to_matrix
if any(ode_order(eq, func) > order for eq in eqs for func in funcs):
msg = "Cannot represent system in {}-order form"
raise ODEOrderError(msg.format(order))
As = []
for o in range(order, -1, -1):
# Work from the highest derivative down
funcs_deriv = [func.diff(t, o) for func in funcs]
# linear_eq_to_matrix expects a proper symbol so substitute e.g.
# Derivative(x(t), t) for a Dummy.
rep = {func_deriv: Dummy() for func_deriv in funcs_deriv}
eqs = [eq.subs(rep) for eq in eqs]
syms = [rep[func_deriv] for func_deriv in funcs_deriv]
# Ai is the matrix for X(t).diff(t, o)
# eqs is minus the remainder of the equations.
try:
Ai, b = linear_eq_to_matrix(eqs, syms)
except NonlinearError:
raise ODENonlinearError("The system of ODEs is nonlinear.")
As.append(Ai)
if o:
eqs = [-eq for eq in b]
else:
rhs = b
return As, rhs
def matrix_exp(A, t):
r"""
Matrix exponential $\exp(A*t)$ for the matrix ``A`` and scalar ``t``.
Explanation
===========
This functions returns the $\exp(A*t)$ by doing a simple
matrix multiplication:
.. math:: \exp(A*t) = P * expJ * P^{-1}
where $expJ$ is $\exp(J*t)$. $J$ is the Jordan normal
form of $A$ and $P$ is matrix such that:
.. math:: A = P * J * P^{-1}
The matrix exponential $\exp(A*t)$ appears in the solution of linear
differential equations. For example if $x$ is a vector and $A$ is a matrix
then the initial value problem
.. math:: \frac{dx(t)}{dt} = A \times x(t), x(0) = x0
has the unique solution
.. math:: x(t) = \exp(A t) x0
Examples
========
>>> from sympy import Symbol, Matrix, pprint
>>> from sympy.solvers.ode.systems import matrix_exp
>>> t = Symbol('t')
We will consider a 2x2 matrix for comupting the exponential
>>> A = Matrix([[2, -5], [2, -4]])
>>> pprint(A)
[2 -5]
[ ]
[2 -4]
Now, exp(A*t) is given as follows:
>>> pprint(matrix_exp(A, t))
[ -t -t -t ]
[3*e *sin(t) + e *cos(t) -5*e *sin(t) ]
[ ]
[ -t -t -t ]
[ 2*e *sin(t) - 3*e *sin(t) + e *cos(t)]
Parameters
==========
A : Matrix
The matrix $A$ in the expression $\exp(A*t)$
t : Symbol
The independent variable
See Also
========
matrix_exp_jordan_form: For exponential of Jordan normal form
References
==========
.. [1] https://en.wikipedia.org/wiki/Jordan_normal_form
.. [2] https://en.wikipedia.org/wiki/Matrix_exponential
"""
P, expJ = matrix_exp_jordan_form(A, t)
return P * expJ * P.inv()
def matrix_exp_jordan_form(A, t):
r"""
Matrix exponential $\exp(A*t)$ for the matrix *A* and scalar *t*.
Explanation
===========
Returns the Jordan form of the $\exp(A*t)$ along with the matrix $P$ such that:
.. math::
\exp(A*t) = P * expJ * P^{-1}
Examples
========
>>> from sympy import Matrix, Symbol
>>> from sympy.solvers.ode.systems import matrix_exp, matrix_exp_jordan_form
>>> t = Symbol('t')
We will consider a 2x2 defective matrix. This shows that our method
works even for defective matrices.
>>> A = Matrix([[1, 1], [0, 1]])
It can be observed that this function gives us the Jordan normal form
and the required invertible matrix P.
>>> P, expJ = matrix_exp_jordan_form(A, t)
Here, it is shown that P and expJ returned by this function is correct
as they satisfy the formula: P * expJ * P_inverse = exp(A*t).
>>> P * expJ * P.inv() == matrix_exp(A, t)
True
Parameters
==========
A : Matrix
The matrix $A$ in the expression $\exp(A*t)$
t : Symbol
The independent variable
References
==========
.. [1] https://en.wikipedia.org/wiki/Defective_matrix
.. [2] https://en.wikipedia.org/wiki/Jordan_matrix
.. [3] https://en.wikipedia.org/wiki/Jordan_normal_form
"""
N, M = A.shape
if N != M:
raise ValueError('Needed square matrix but got shape (%s, %s)' % (N, M))
elif A.has(t):
raise ValueError('Matrix A should not depend on t')
def jordan_chains(A):
'''Chains from Jordan normal form analogous to M.eigenvects().
Returns a dict with eignevalues as keys like:
{e1: [[v111,v112,...], [v121, v122,...]], e2:...}
where vijk is the kth vector in the jth chain for eigenvalue i.
'''
P, blocks = A.jordan_cells()
basis = [P[:,i] for i in range(P.shape[1])]
n = 0
chains = {}
for b in blocks:
eigval = b[0, 0]
size = b.shape[0]
if eigval not in chains:
chains[eigval] = []
chains[eigval].append(basis[n:n+size])
n += size
return chains
eigenchains = jordan_chains(A)
# Needed for consistency across Python versions:
eigenchains_iter = sorted(eigenchains.items(), key=default_sort_key)
isreal = not A.has(I)
blocks = []
vectors = []
seen_conjugate = set()
for e, chains in eigenchains_iter:
for chain in chains:
n = len(chain)
if isreal and e != e.conjugate() and e.conjugate() in eigenchains:
if e in seen_conjugate:
continue
seen_conjugate.add(e.conjugate())
exprt = exp(re(e) * t)
imrt = im(e) * t
imblock = Matrix([[cos(imrt), sin(imrt)],
[-sin(imrt), cos(imrt)]])
expJblock2 = Matrix(n, n, lambda i,j:
imblock * t**(j-i) / factorial(j-i) if j >= i
else zeros(2, 2))
expJblock = Matrix(2*n, 2*n, lambda i,j: expJblock2[i//2,j//2][i%2,j%2])
blocks.append(exprt * expJblock)
for i in range(n):
vectors.append(re(chain[i]))
vectors.append(im(chain[i]))
else:
vectors.extend(chain)
fun = lambda i,j: t**(j-i)/factorial(j-i) if j >= i else 0
expJblock = Matrix(n, n, fun)
blocks.append(exp(e * t) * expJblock)
expJ = Matrix.diag(*blocks)
P = Matrix(N, N, lambda i,j: vectors[j][i])
return P, expJ
def _neq_linear_first_order_const_coeff_homogeneous(match_):
r"""
System of n first-order constant-coefficient linear homogeneous differential equations
.. math:: y'_k = a_{k1} y_1 + a_{k2} y_2 +...+ a_{kn} y_n; k = 1,2,...,n
or that can be written as `\vec{y'} = A . \vec{y}`
where `\vec{y}` is matrix of `y_k` for `k = 1,2,...n` and `A` is a `n \times n` matrix.
Since these equations are equivalent to a first order homogeneous linear
differential equation. So the general solution will contain `n` linearly
independent parts and solution will consist some type of exponential
functions. Assuming `y = \vec{v} e^{rt}` is a solution of the system where
`\vec{v}` is a vector of coefficients of `y_1,...,y_n`. Substituting `y` and
`y' = r v e^{r t}` into the equation `\vec{y'} = A . \vec{y}`, we get
.. math:: r \vec{v} e^{rt} = A \vec{v} e^{rt}
.. math:: r \vec{v} = A \vec{v}
where `r` comes out to be eigenvalue of `A` and vector `\vec{v}` is the eigenvector
of `A` corresponding to `r`. There are three possibilities of eigenvalues of `A`
- `n` distinct real eigenvalues
- complex conjugate eigenvalues
- eigenvalues with multiplicity `k`
1. When all eigenvalues `r_1,..,r_n` are distinct with `n` different eigenvectors
`v_1,...v_n` then the solution is given by
.. math:: \vec{y} = C_1 e^{r_1 t} \vec{v_1} + C_2 e^{r_2 t} \vec{v_2} +...+ C_n e^{r_n t} \vec{v_n}
where `C_1,C_2,...,C_n` are arbitrary constants.
2. When some eigenvalues are complex then in order to make the solution real,
we take a linear combination: if `r = a + bi` has an eigenvector
`\vec{v} = \vec{w_1} + i \vec{w_2}` then to obtain real-valued solutions to
the system, replace the complex-valued solutions `e^{rx} \vec{v}`
with real-valued solution `e^{ax} (\vec{w_1} \cos(bx) - \vec{w_2} \sin(bx))`
and for `r = a - bi` replace the solution `e^{-r x} \vec{v}` with
`e^{ax} (\vec{w_1} \sin(bx) + \vec{w_2} \cos(bx))`
3. If some eigenvalues are repeated. Then we get fewer than `n` linearly
independent eigenvectors, we miss some of the solutions and need to
construct the missing ones. We do this via generalized eigenvectors, vectors
which are not eigenvectors but are close enough that we can use to write
down the remaining solutions. For a eigenvalue `r` with eigenvector `\vec{w}`
we obtain `\vec{w_2},...,\vec{w_k}` using
.. math:: (A - r I) . \vec{w_2} = \vec{w}
.. math:: (A - r I) . \vec{w_3} = \vec{w_2}
.. math:: \vdots
.. math:: (A - r I) . \vec{w_k} = \vec{w_{k-1}}
Then the solutions to the system for the eigenspace are `e^{rt} [\vec{w}],
e^{rt} [t \vec{w} + \vec{w_2}], e^{rt} [\frac{t^2}{2} \vec{w} + t \vec{w_2} + \vec{w_3}],
...,e^{rt} [\frac{t^{k-1}}{(k-1)!} \vec{w} + \frac{t^{k-2}}{(k-2)!} \vec{w_2} +...+ t \vec{w_{k-1}}
+ \vec{w_k}]`
So, If `\vec{y_1},...,\vec{y_n}` are `n` solution of obtained from three
categories of `A`, then general solution to the system `\vec{y'} = A . \vec{y}`
.. math:: \vec{y} = C_1 \vec{y_1} + C_2 \vec{y_2} + \cdots + C_n \vec{y_n}
"""
eq = match_['eq']
func = match_['func']
fc = match_['func_coeff']
n = len(eq)
t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0]
constants = numbered_symbols(prefix='C', cls=Symbol, start=1)
# This needs to be modified in future so that fc is only of type Matrix
M = -fc if type(fc) is Matrix else Matrix(n, n, lambda i,j:-fc[i,func[j],0])
P, J = matrix_exp_jordan_form(M, t)
P = simplify(P)
Cvect = Matrix(list(next(constants) for _ in range(n)))
sol_vector = P * (J * Cvect)
sol_vector = [collect(s, ordered(J.atoms(exp)), exact=True) for s in sol_vector]
sol_dict = [Eq(func[i], sol_vector[i]) for i in range(n)]
return sol_dict
def _matrix_is_constant(M, t):
"""Checks if the matrix M is independent of t or not."""
return all(coef.as_independent(t, as_Add=True)[1] == 0 for coef in M)
def _canonical_equations(eqs, funcs, t):
"""Helper function that solves for first order derivatives in a system"""
from sympy.solvers.solvers import solve
# For now the system of ODEs dealt by this function can have a
# maximum order of 1.
if any(ode_order(eq, func) > 1 for eq in eqs for func in funcs):
msg = "Cannot represent system in {}-order canonical form"
raise ODEOrderError(msg.format(1))
canon_eqs = solve(eqs, *[func.diff(t) for func in funcs], dict=True)
if len(canon_eqs) != 1:
raise ODENonlinearError("System of ODEs is nonlinear")
canon_eqs = canon_eqs[0]
canon_eqs = [Eq(func.diff(t), canon_eqs[func.diff(t)]) for func in funcs]
return canon_eqs
def neq_nth_linear_constant_coeff_match(eqs, funcs, t):
r"""
Returns a dictionary with details of the eqs if every equation is constant coefficient
and linear else returns None
Explanation
===========
This function takes the eqs, converts it into a form Ax = b where x is a vector of terms
containing dependent variables and their derivatives till their maximum order. If it is
possible to convert eqs into Ax = b, then all the equations in eqs are linear otherwise
they are non-linear.
To check if the equations are constant coefficient, we need to check if all the terms in
A obtained above are constant or not.
To check if the equations are homogeneous or not, we need to check if b is a zero matrix
or not.
Parameters
==========
eqs: List
List of ODEs
funcs: List
List of dependent variables
t: Symbol
Independent variable of the equations in eqs
Returns
=======
match = {
'no_of_equation': len(eqs),
'eq': eqs,
'func': funcs,
'order': order,
'is_linear': is_linear,
'is_constant': is_constant,
'is_homogeneous': is_homogeneous,
}
Dict or None
Dict with values for keys:
1. no_of_equation: Number of equations
2. eq: The set of equations
3. func: List of dependent variables
4. order: A dictionary that gives the order of the
dependent variable in eqs
5. is_linear: Boolean value indicating if the set of
equations are linear or not.
6. is_constant: Boolean value indicating if the set of
equations have constant coefficients or not.
7. is_homogeneous: Boolean value indicating if the set of
equations are homogeneous or not.
This Dict is the answer returned if the eqs are linear and constant
coefficient. Otherwise, None is returned.
"""
# Error for i == 0 can be added but isn't for now
# Removing the duplicates from the list of funcs
# meanwhile maintaining the order. This is done
# since the line in classify_sysode: list(set(funcs)
# cause some test cases to fail when gives different
# results in different versions of Python.
funcs = list(uniq(funcs))
# Check for len(funcs) == len(eqs)
if len(funcs) != len(eqs):
raise ValueError("Number of functions given is not equal to the number of equations %s" % funcs)
# ValueError when functions have more than one arguments
for func in funcs:
if len(func.args) != 1:
raise ValueError("dsolve() and classify_sysode() work with "
"functions of one variable only, not %s" % func)
# Getting the func_dict and order using the helper
# function
order = _get_func_order(eqs, funcs)
if not all(order[func] == 1 for func in funcs):
return None
else:
# TO be changed when this function is updated.
# This will in future be updated as the maximum
# order in the system found.
system_order = 1
# Not adding the check if the len(func.args) for
# every func in funcs is 1
# Linearity check
try:
canon_eqs = _canonical_equations(eqs, funcs, t)
As, b = linear_ode_to_matrix(canon_eqs, funcs, t, system_order)
# When the system of ODEs is non-linear, an ODENonlinearError is raised.
# When system has an order greater than what is specified in system_order,
# ODEOrderError is raised.
# This function catches these errors and None is returned
except (ODEOrderError, ODENonlinearError):
return None
A = As[1]
is_linear = True
# Constant coefficient check
is_constant = _matrix_is_constant(A, t)
# Homogeneous check
is_homogeneous = True if b.is_zero_matrix else False
match = {
'no_of_equation': len(eqs),
'eq': eqs,
'func': funcs,
'order': order,
'is_linear': is_linear,
'is_constant': is_constant,
'is_homogeneous': is_homogeneous,
}
# The match['is_linear'] check will be added in the future when this
# function becomes ready to deal with non-linear systems of ODEs
if match['is_constant']:
# Converting the equation into canonical form if the
# equation is first order. There will be a separate
# function for this in the future.
if all([order[func] == 1 for func in funcs]) and match['is_homogeneous']:
match['func_coeff'] = A
match['type_of_equation'] = "type1"
return match
return None
| 32.310016 | 104 | 0.59814 | 233 | 0.011465 | 0 | 0 | 0 | 0 | 0 | 0 | 14,047 | 0.691187 |
96929dbf83193019e408fa5ab401d32d84324a98
| 104 |
py
|
Python
|
python/torch_mlir/eager_mode/__init__.py
|
burntfalafel/torch-mlir-internal
|
d3ef58450fc94e9337dc0434fa3af6dd7b54b37f
|
[
"Apache-2.0"
] | 2 |
2022-02-16T21:56:00.000Z
|
2022-02-20T17:34:47.000Z
|
python/torch_mlir/eager_mode/__init__.py
|
burntfalafel/torch-mlir-internal
|
d3ef58450fc94e9337dc0434fa3af6dd7b54b37f
|
[
"Apache-2.0"
] | null | null | null |
python/torch_mlir/eager_mode/__init__.py
|
burntfalafel/torch-mlir-internal
|
d3ef58450fc94e9337dc0434fa3af6dd7b54b37f
|
[
"Apache-2.0"
] | null | null | null |
import os
EAGER_MODE_DEBUG = os.environ.get("EAGER_MODE_DEBUG", 'False').lower() in ('true', '1', 't')
| 26 | 92 | 0.673077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.355769 |
96930cd599eda3b260c1fca7b9aaa84eeb3c1530
| 985 |
py
|
Python
|
docs/rips/tests/test_surfaces.py
|
OPM/ResInsight-UserDocumentation
|
2af2c3a5ef297c0061d842944360a83bf8e49c36
|
[
"MIT"
] | 1 |
2020-04-25T21:24:45.000Z
|
2020-04-25T21:24:45.000Z
|
docs/rips/tests/test_surfaces.py
|
OPM/ResInsight-UserDocumentation
|
2af2c3a5ef297c0061d842944360a83bf8e49c36
|
[
"MIT"
] | 7 |
2020-02-11T07:42:10.000Z
|
2020-09-28T17:18:01.000Z
|
docs/rips/tests/test_surfaces.py
|
OPM/ResInsight-UserDocumentation
|
2af2c3a5ef297c0061d842944360a83bf8e49c36
|
[
"MIT"
] | 2 |
2020-04-02T09:33:45.000Z
|
2020-04-09T19:44:53.000Z
|
import sys
import os
import tempfile
from pathlib import Path
import pytest
sys.path.insert(1, os.path.join(sys.path[0], "../../"))
import rips
import dataroot
@pytest.mark.skipif(
sys.platform.startswith("linux"),
reason="Brugge is currently exceptionally slow on Linux",
)
def test_create_and_export_surface(rips_instance, initialize_test):
case_path = dataroot.PATH + "/Case_with_10_timesteps/Real0/BRUGGE_0000.EGRID"
case = rips_instance.project.load_case(path=case_path)
assert len(case.grids()) == 1
surface_collection = rips_instance.project.descendants(rips.SurfaceCollection)[0]
surface = surface_collection.new_surface(case, 5)
assert surface
with tempfile.TemporaryDirectory(prefix="rips") as tmpdirname:
path = Path(tmpdirname, "mysurface.ts")
print("Temporary folder: ", path.as_posix())
fname = surface.export_to_file(path.as_posix())
assert len(fname.values) == 1
assert path.exists()
| 28.142857 | 85 | 0.722843 | 0 | 0 | 0 | 0 | 820 | 0.832487 | 0 | 0 | 153 | 0.15533 |
96935625868f5df6499326134d54ac7ad8bc8a3f
| 1,172 |
py
|
Python
|
samcli/cli/main.py
|
langn/aws-sam-cli
|
160d87ff3c07f092315e1ac71ddc00257fde011b
|
[
"Apache-2.0"
] | null | null | null |
samcli/cli/main.py
|
langn/aws-sam-cli
|
160d87ff3c07f092315e1ac71ddc00257fde011b
|
[
"Apache-2.0"
] | 1 |
2018-05-23T19:51:18.000Z
|
2018-05-23T19:51:18.000Z
|
samcli/cli/main.py
|
langn/aws-sam-cli
|
160d87ff3c07f092315e1ac71ddc00257fde011b
|
[
"Apache-2.0"
] | null | null | null |
"""
Entry point for the CLI
"""
import logging
import click
from samcli import __version__
from .options import debug_option
from .context import Context
from .command import BaseCommand
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
pass_context = click.make_pass_decorator(Context)
def common_options(f):
"""
Common CLI options used by all commands. Ex: --debug
:param f: Callback function passed by Click
:return: Callback function
"""
f = debug_option(f)
return f
@click.command(cls=BaseCommand)
@common_options
@click.version_option(version=__version__, prog_name="SAM CLI")
@pass_context
def cli(ctx):
"""
AWS Serverless Application Model (SAM) CLI
The AWS Serverless Application Model extends AWS CloudFormation to provide a simplified way of defining the
Amazon API Gateway APIs, AWS Lambda functions, and Amazon DynamoDB tables needed by your serverless application.
You can find more in-depth guide about the SAM specification here:
https://github.com/awslabs/serverless-application-model.
"""
pass
| 26.636364 | 116 | 0.740614 | 0 | 0 | 0 | 0 | 573 | 0.488908 | 0 | 0 | 651 | 0.555461 |
9693a5d83793a6353d6917bc38d706bca8e15158
| 1,816 |
py
|
Python
|
ast_to_json.py
|
visr/Py2Jl.jl
|
b2dee947299d064da2e443d0b1ad2ca90bbd1753
|
[
"MIT"
] | 53 |
2018-08-20T12:47:47.000Z
|
2022-03-17T02:21:07.000Z
|
ast_to_json.py
|
visr/Py2Jl.jl
|
b2dee947299d064da2e443d0b1ad2ca90bbd1753
|
[
"MIT"
] | 14 |
2019-01-24T15:27:15.000Z
|
2021-06-13T13:24:18.000Z
|
ast_to_json.py
|
visr/Py2Jl.jl
|
b2dee947299d064da2e443d0b1ad2ca90bbd1753
|
[
"MIT"
] | 9 |
2019-02-12T01:07:11.000Z
|
2021-11-11T19:33:36.000Z
|
import ast
import typing as t
import numbers
import json
from wisepy.talking import Talking
from Redy.Tools.PathLib import Path
talking = Talking()
def to_dict(node: t.Union[ast.AST, str, numbers.Number, list]):
if isinstance(node, complex):
return {"class": "complex", "real": node.real, "imag": node.imag}
elif isinstance(node, str):
return node
elif isinstance(node, numbers.Number):
return node
elif isinstance(node, list):
return [to_dict(each) for each in node]
elif isinstance(node, ast.AST):
data = {
"class": node.__class__.__name__,
**{
field: to_dict(value)
for field, value in ast.iter_fields(node)
}
}
if hasattr(node, 'lineno'):
data['lineno'] = node.lineno
if hasattr(node, 'col_offset'):
data['colno'] = node.col_offset
return data
return node
@talking.alias('file')
def from_file(input: 'filename', to: 'filename'):
"""
from python source to json file
"""
path = Path(input)
with path.open('r') as fr, Path(to).open('w') as fw:
try:
data = to_dict(ast.parse(fr.read()))
data['name'] = path.relative()[:-3] # remove `.py`
json.dump([str(path), data], fw, indent=2)
except SyntaxError as e:
print(e)
pass
@talking.alias('text')
def from_code(input: 'text', to: 'filename'):
"""
from python source code to json file
"""
with Path(to).open('w') as fw:
try:
data = to_dict(ast.parse(input))
data['name'] = 'Default'
json.dump(['<stdin>', data], fw, indent=2)
except SyntaxError:
pass
if __name__ == '__main__':
talking.on()
| 25.942857 | 73 | 0.55837 | 0 | 0 | 0 | 0 | 811 | 0.446586 | 0 | 0 | 280 | 0.154185 |
96940da789aefea52af00e66e63f6bfcc1df6521
| 18,232 |
py
|
Python
|
src/python/pants/backend/docker/util_rules/docker_build_context_test.py
|
pantsbuild/pants
|
22c566e78b4dd982958429813c82e9f558957817
|
[
"Apache-2.0"
] | 1,806 |
2015-01-05T07:31:00.000Z
|
2022-03-31T11:35:41.000Z
|
src/python/pants/backend/docker/util_rules/docker_build_context_test.py
|
pantsbuild/pants
|
22c566e78b4dd982958429813c82e9f558957817
|
[
"Apache-2.0"
] | 9,565 |
2015-01-02T19:01:59.000Z
|
2022-03-31T23:25:16.000Z
|
src/python/pants/backend/docker/util_rules/docker_build_context_test.py
|
pantsbuild/pants
|
22c566e78b4dd982958429813c82e9f558957817
|
[
"Apache-2.0"
] | 443 |
2015-01-06T20:17:57.000Z
|
2022-03-31T05:28:17.000Z
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
from typing import Any, ContextManager
import pytest
from pants.backend.docker.goals import package_image
from pants.backend.docker.subsystems import dockerfile_parser
from pants.backend.docker.subsystems.dockerfile_parser import DockerfileInfo
from pants.backend.docker.target_types import DockerImageTarget
from pants.backend.docker.util_rules import (
dependencies,
docker_binary,
docker_build_args,
docker_build_context,
docker_build_env,
dockerfile,
)
from pants.backend.docker.util_rules.docker_build_args import DockerBuildArgs
from pants.backend.docker.util_rules.docker_build_context import (
DockerBuildContext,
DockerBuildContextRequest,
)
from pants.backend.docker.util_rules.docker_build_env import DockerBuildEnvironment
from pants.backend.docker.value_interpolation import (
DockerBuildArgsInterpolationValue,
DockerInterpolationContext,
DockerInterpolationValue,
)
from pants.backend.python import target_types_rules
from pants.backend.python.goals import package_pex_binary
from pants.backend.python.goals.package_pex_binary import PexBinaryFieldSet
from pants.backend.python.target_types import PexBinary
from pants.backend.python.util_rules import pex_from_targets
from pants.backend.shell.target_types import ShellSourcesGeneratorTarget, ShellSourceTarget
from pants.backend.shell.target_types import rules as shell_target_types_rules
from pants.core.goals.package import BuiltPackage
from pants.core.target_types import FilesGeneratorTarget
from pants.core.target_types import rules as core_target_types_rules
from pants.engine.addresses import Address
from pants.engine.fs import EMPTY_DIGEST, EMPTY_SNAPSHOT, Snapshot
from pants.engine.internals.scheduler import ExecutionError
from pants.testutil.pytest_util import no_exception
from pants.testutil.rule_runner import QueryRule, RuleRunner
def create_rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*core_target_types_rules(),
*dependencies.rules(),
*docker_binary.rules(),
*docker_build_args.rules(),
*docker_build_context.rules(),
*docker_build_env.rules(),
*dockerfile.rules(),
*dockerfile_parser.rules(),
*package_image.rules(),
*package_pex_binary.rules(),
*pex_from_targets.rules(),
*shell_target_types_rules(),
*target_types_rules.rules(),
QueryRule(BuiltPackage, [PexBinaryFieldSet]),
QueryRule(DockerBuildContext, (DockerBuildContextRequest,)),
],
target_types=[
DockerImageTarget,
FilesGeneratorTarget,
PexBinary,
ShellSourcesGeneratorTarget,
ShellSourceTarget,
],
)
return rule_runner
@pytest.fixture
def rule_runner() -> RuleRunner:
return create_rule_runner()
def assert_build_context(
rule_runner: RuleRunner,
address: Address,
*,
build_upstream_images: bool = False,
expected_files: list[str],
expected_interpolation_context: dict[str, dict[str, str] | DockerInterpolationValue]
| None = None,
pants_args: list[str] | None = None,
runner_options: dict[str, Any] | None = None,
) -> DockerBuildContext:
if runner_options is None:
runner_options = {}
runner_options.setdefault("env_inherit", set()).update({"PATH", "PYENV_ROOT", "HOME"})
rule_runner.set_options(pants_args or [], **runner_options)
context = rule_runner.request(
DockerBuildContext,
[
DockerBuildContextRequest(
address=address,
build_upstream_images=build_upstream_images,
)
],
)
snapshot = rule_runner.request(Snapshot, [context.digest])
assert sorted(expected_files) == sorted(snapshot.files)
if expected_interpolation_context is not None:
if "build_args" in expected_interpolation_context:
expected_interpolation_context["build_args"] = DockerBuildArgsInterpolationValue(
expected_interpolation_context["build_args"]
)
assert context.interpolation_context == DockerInterpolationContext.from_dict(
expected_interpolation_context
)
return context
def test_file_dependencies(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
# img_A -> files_A
# img_A -> img_B
"src/a/BUILD": dedent(
"""\
docker_image(name="img_A", dependencies=[":files_A", "src/b:img_B"])
files(name="files_A", sources=["files/**"])
"""
),
"src/a/Dockerfile": "FROM base",
"src/a/files/a01": "",
"src/a/files/a02": "",
# img_B -> files_B
"src/b/BUILD": dedent(
"""\
docker_image(name="img_B", dependencies=[":files_B"])
files(name="files_B", sources=["files/**"])
"""
),
"src/b/Dockerfile": "FROM base",
"src/b/files/b01": "",
"src/b/files/b02": "",
# Mixed
"src/c/BUILD": dedent(
"""\
docker_image(name="img_C", dependencies=["src/a:files_A", "src/b:files_B"])
"""
),
"src/c/Dockerfile": "FROM base",
}
)
# We want files_B in build context for img_B
assert_build_context(
rule_runner,
Address("src/b", target_name="img_B"),
expected_files=["src/b/Dockerfile", "src/b/files/b01", "src/b/files/b02"],
)
# We want files_A in build context for img_A, but not files_B
assert_build_context(
rule_runner,
Address("src/a", target_name="img_A"),
expected_files=["src/a/Dockerfile", "src/a/files/a01", "src/a/files/a02"],
)
# Mixed.
assert_build_context(
rule_runner,
Address("src/c", target_name="img_C"),
expected_files=[
"src/c/Dockerfile",
"src/a/files/a01",
"src/a/files/a02",
"src/b/files/b01",
"src/b/files/b02",
],
)
def test_from_image_build_arg_dependency(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/upstream/BUILD": dedent(
"""\
docker_image(
name="image",
repository="upstream/{name}",
instructions=["FROM alpine"],
)
"""
),
"src/downstream/BUILD": "docker_image(name='image')",
"src/downstream/Dockerfile": dedent(
"""\
ARG BASE_IMAGE=src/upstream:image
FROM $BASE_IMAGE
"""
),
}
)
assert_build_context(
rule_runner,
Address("src/downstream", target_name="image"),
expected_files=["src/downstream/Dockerfile"],
build_upstream_images=True,
expected_interpolation_context={
"baseimage": {"tag": "latest"},
"stage0": {"tag": "latest"},
"build_args": {
"BASE_IMAGE": "upstream/image:latest",
},
},
)
def test_files_out_of_tree(rule_runner: RuleRunner) -> None:
# src/a:img_A -> res/static:files
rule_runner.write_files(
{
"src/a/BUILD": dedent(
"""\
docker_image(name="img_A", dependencies=["res/static:files"])
"""
),
"res/static/BUILD": dedent(
"""\
files(name="files", sources=["!BUILD", "**/*"])
"""
),
"src/a/Dockerfile": "FROM base",
"res/static/s01": "",
"res/static/s02": "",
"res/static/sub/s03": "",
}
)
assert_build_context(
rule_runner,
Address("src/a", target_name="img_A"),
expected_files=[
"src/a/Dockerfile",
"res/static/s01",
"res/static/s02",
"res/static/sub/s03",
],
)
def test_packaged_pex_path(rule_runner: RuleRunner) -> None:
# This test is here to ensure that we catch if there is any change in the generated path where
# built pex binaries go, as we rely on that for dependency inference in the Dockerfile.
rule_runner.write_files(
{
"src/docker/BUILD": """docker_image(dependencies=["src/python/proj/cli:bin"])""",
"src/docker/Dockerfile": """FROM python""",
"src/python/proj/cli/BUILD": """pex_binary(name="bin", entry_point="main.py")""",
"src/python/proj/cli/main.py": """print("cli main")""",
}
)
assert_build_context(
rule_runner,
Address("src/docker", target_name="docker"),
expected_files=["src/docker/Dockerfile", "src.python.proj.cli/bin.pex"],
)
def test_interpolation_context_from_dockerfile(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/docker/BUILD": "docker_image()",
"src/docker/Dockerfile": dedent(
"""\
FROM python:3.8
FROM alpine as interim
FROM interim
FROM scratch:1-1 as output
"""
),
}
)
assert_build_context(
rule_runner,
Address("src/docker"),
expected_files=["src/docker/Dockerfile"],
expected_interpolation_context={
"baseimage": {"tag": "3.8"},
"stage0": {"tag": "3.8"},
"interim": {"tag": "latest"},
"stage2": {"tag": "latest"},
"output": {"tag": "1-1"},
"build_args": {},
},
)
def test_synthetic_dockerfile(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/docker/BUILD": dedent(
"""\
docker_image(
instructions=[
"FROM python:3.8",
"FROM alpine as interim",
"FROM interim",
"FROM scratch:1-1 as output",
]
)
"""
),
}
)
assert_build_context(
rule_runner,
Address("src/docker"),
expected_files=["src/docker/Dockerfile.docker"],
expected_interpolation_context={
"baseimage": {"tag": "3.8"},
"stage0": {"tag": "3.8"},
"interim": {"tag": "latest"},
"stage2": {"tag": "latest"},
"output": {"tag": "1-1"},
"build_args": {},
},
)
def test_shell_source_dependencies(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/docker/BUILD": dedent(
"""\
docker_image(dependencies=[":entrypoint", ":shell"])
shell_source(name="entrypoint", source="entrypoint.sh")
shell_sources(name="shell", sources=["scripts/**/*.sh"])
"""
),
"src/docker/Dockerfile": "FROM base",
"src/docker/entrypoint.sh": "",
"src/docker/scripts/s01.sh": "",
"src/docker/scripts/s02.sh": "",
"src/docker/scripts/random.file": "",
}
)
assert_build_context(
rule_runner,
Address("src/docker"),
expected_files=[
"src/docker/Dockerfile",
"src/docker/entrypoint.sh",
"src/docker/scripts/s01.sh",
"src/docker/scripts/s02.sh",
],
)
def test_build_arg_defaults_from_dockerfile(rule_runner: RuleRunner) -> None:
# Test that only explicitly defined build args in the BUILD file or pants configuraiton use the
# environment for its values.
rule_runner.write_files(
{
"src/docker/BUILD": dedent(
"""\
docker_image(
extra_build_args=[
"base_version",
]
)
"""
),
"src/docker/Dockerfile": dedent(
"""\
ARG base_name=python
ARG base_version=3.8
FROM ${base_name}:${base_version}
ARG NO_DEF
ENV opt=${NO_DEF}
"""
),
}
)
assert_build_context(
rule_runner,
Address("src/docker"),
runner_options={
"env": {
"base_name": "no-effect",
"base_version": "3.9",
},
},
expected_files=["src/docker/Dockerfile"],
expected_interpolation_context={
"baseimage": {"tag": "${base_version}"},
"stage0": {"tag": "${base_version}"},
"build_args": {
# `base_name` is not listed here, as it was not an explicitly defined build arg.
"base_version": "3.9",
},
},
)
@pytest.mark.parametrize(
"dockerfile_arg_value, extra_build_arg_value, expect",
[
pytest.param(None, None, no_exception(), id="No args defined"),
pytest.param(
None,
"",
pytest.raises(ExecutionError, match=r"variable 'MY_ARG' is undefined"),
id="No default value for build arg",
),
pytest.param(None, "some default value", no_exception(), id="Default value for build arg"),
pytest.param("", None, no_exception(), id="No build arg defined, and ARG without default"),
pytest.param(
"",
"",
pytest.raises(ExecutionError, match=r"variable 'MY_ARG' is undefined"),
id="No default value from ARG",
),
pytest.param(
"", "some default value", no_exception(), id="Default value for build arg, ARG present"
),
pytest.param(
"some default value", None, no_exception(), id="No build arg defined, only ARG"
),
pytest.param("some default value", "", no_exception(), id="Default value from ARG"),
pytest.param(
"some default value",
"some other default",
no_exception(),
id="Default value for build arg, ARG default",
),
],
)
def test_undefined_env_var_behavior(
rule_runner: RuleRunner,
dockerfile_arg_value: str | None,
extra_build_arg_value: str | None,
expect: ContextManager,
) -> None:
dockerfile_arg = ""
if dockerfile_arg_value is not None:
dockerfile_arg = "ARG MY_ARG"
if dockerfile_arg_value:
dockerfile_arg += f"={dockerfile_arg_value}"
extra_build_args = ""
if extra_build_arg_value is not None:
extra_build_args = 'extra_build_args=["MY_ARG'
if extra_build_arg_value:
extra_build_args += f"={extra_build_arg_value}"
extra_build_args += '"],'
rule_runner.write_files(
{
"src/docker/BUILD": dedent(
f"""\
docker_image(
{extra_build_args}
)
"""
),
"src/docker/Dockerfile": dedent(
f"""\
FROM python:3.8
{dockerfile_arg}
"""
),
}
)
with expect:
assert_build_context(
rule_runner,
Address("src/docker"),
expected_files=["src/docker/Dockerfile"],
)
@pytest.fixture(scope="session")
def build_context() -> DockerBuildContext:
rule_runner = create_rule_runner()
rule_runner.write_files(
{
"src/docker/BUILD": dedent(
"""\
docker_image(
extra_build_args=["DEF_ARG"],
instructions=[
"FROM python:3.8",
"ARG MY_ARG",
"ARG DEF_ARG=some-value",
],
)
"""
),
}
)
return assert_build_context(
rule_runner,
Address("src/docker"),
expected_files=["src/docker/Dockerfile.docker"],
)
@pytest.mark.parametrize(
"fmt_string, result, expectation",
[
pytest.param(
"{build_args.MY_ARG}",
None,
pytest.raises(
ValueError,
match=(r"The build arg 'MY_ARG' is undefined\. Defined build args are: DEF_ARG\."),
),
id="ARG_NAME",
),
pytest.param(
"{build_args.DEF_ARG}",
"some-value",
no_exception(),
id="DEF_ARG",
),
],
)
def test_build_arg_behavior(
build_context: DockerBuildContext,
fmt_string: str,
result: str | None,
expectation: ContextManager,
) -> None:
with expectation:
assert fmt_string.format(**build_context.interpolation_context) == result
def test_create_docker_build_context() -> None:
context = DockerBuildContext.create(
build_args=DockerBuildArgs.from_strings("ARGNAME=value1"),
snapshot=EMPTY_SNAPSHOT,
build_env=DockerBuildEnvironment.create({"ENVNAME": "value2"}),
dockerfile_info=DockerfileInfo(
address=Address("test"),
digest=EMPTY_DIGEST,
source="test/Dockerfile",
putative_target_addresses=(),
version_tags=("base latest", "stage1 1.2", "dev 2.0", "prod 2.0"),
build_args=DockerBuildArgs.from_strings(),
from_image_build_arg_names=(),
copy_sources=(),
),
)
assert list(context.build_args) == ["ARGNAME=value1"]
assert dict(context.build_env.environment) == {"ENVNAME": "value2"}
assert context.dockerfile == "test/Dockerfile"
assert context.stages == ("base", "dev", "prod")
| 32.042179 | 99 | 0.557975 | 0 | 0 | 0 | 0 | 4,042 | 0.221698 | 0 | 0 | 6,507 | 0.3569 |
969769f903879e83d04d75567c8891aa3f6d52df
| 726 |
py
|
Python
|
build_you/models/company.py
|
bostud/build_you
|
258a336a82a1da9efc102770f5d8bf83abc13379
|
[
"MIT"
] | null | null | null |
build_you/models/company.py
|
bostud/build_you
|
258a336a82a1da9efc102770f5d8bf83abc13379
|
[
"MIT"
] | null | null | null |
build_you/models/company.py
|
bostud/build_you
|
258a336a82a1da9efc102770f5d8bf83abc13379
|
[
"MIT"
] | null | null | null |
import enum
from sqlalchemy import Column, ForeignKey, String, JSON, Integer, Enum
from sqlalchemy.orm import relationship
from build_you.models.base import BaseModel
from build_you.database import Base
class Company(BaseModel, Base):
__tablename__ = 'company'
class Status(enum.Enum):
ACTIVE = 1
INACTIVE = 2
DELETED = 3
name = Column(String(length=255), unique=True, index=True)
owner_id = Column(Integer, ForeignKey('user.id'))
settings = Column(JSON, default={}, nullable=True)
status = Column(Enum(Status), default=Status.ACTIVE)
owner = relationship('User', back_populates='companies')
company_objects = relationship('BuildObject', back_populates='company')
| 30.25 | 75 | 0.717631 | 519 | 0.714876 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.078512 |
9697bf800b99049dd85751a04350650f26e3d26b
| 293 |
py
|
Python
|
deeplab_resnet/__init__.py
|
tramper2/SIGGRAPH18SSS
|
9bf22fa242044edfcf11cc4a58b93c63fcc71ff0
|
[
"MIT"
] | 390 |
2018-07-30T08:41:49.000Z
|
2022-03-29T15:44:13.000Z
|
deeplab_resnet/__init__.py
|
tramper2/SIGGRAPH18SSS
|
9bf22fa242044edfcf11cc4a58b93c63fcc71ff0
|
[
"MIT"
] | 20 |
2018-08-15T14:51:29.000Z
|
2020-04-21T09:49:49.000Z
|
deeplab_resnet/__init__.py
|
tramper2/SIGGRAPH18SSS
|
9bf22fa242044edfcf11cc4a58b93c63fcc71ff0
|
[
"MIT"
] | 109 |
2018-08-04T05:58:23.000Z
|
2021-10-17T12:02:29.000Z
|
from .model import DeepLabResNetModel
from .hc_deeplab import HyperColumn_Deeplabv2
from .image_reader import ImageReader, read_data_list, get_indicator_mat, get_batch_1chunk, read_an_image_from_disk, tf_wrap_get_patch, get_batch
from .utils import decode_labels, inv_preprocess, prepare_label
| 73.25 | 145 | 0.880546 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9697d247dc37a959099c3ca64ced69e9f31cf6d0
| 670 |
py
|
Python
|
ESPNet/commons/general_details.py
|
sanket1414/Priority-Based-Alert-System
|
89d61d43eab8d7251fe99796e657bc95da1cc48c
|
[
"MIT"
] | 1 |
2019-10-24T03:19:14.000Z
|
2019-10-24T03:19:14.000Z
|
ESPNet/commons/general_details.py
|
sanket1414/Priority-Based-Alert-System
|
89d61d43eab8d7251fe99796e657bc95da1cc48c
|
[
"MIT"
] | null | null | null |
ESPNet/commons/general_details.py
|
sanket1414/Priority-Based-Alert-System
|
89d61d43eab8d7251fe99796e657bc95da1cc48c
|
[
"MIT"
] | null | null | null |
# classification related details
classification_datasets = ['imagenet', 'coco']
classification_schedulers = ['fixed', 'clr', 'hybrid', 'linear', 'poly']
classification_models = ['espnetv2', 'dicenet', 'shufflenetv2']
classification_exp_choices = ['main', 'ablation']
# segmentation related details
segmentation_schedulers = ['poly', 'fixed', 'clr', 'linear', 'hybrid']
segmentation_datasets = ['pascal', 'city']
segmentation_models = ['espnetv2', 'dicenet']
segmentation_loss_fns = ['ce', 'bce']
# detection related details
detection_datasets = ['coco', 'pascal']
detection_models = ['espnetv2', 'dicenet']
detection_schedulers = ['poly', 'hybrid', 'clr', 'cosine']
| 35.263158 | 72 | 0.720896 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 324 | 0.483582 |
9699ab49dc0c20db4bb4ee78fa2411605bb8f673
| 1,758 |
py
|
Python
|
resources/dot_PyCharm/system/python_stubs/-762174762/win32profile.py
|
basepipe/developer_onboarding
|
05b6a776f8974c89517868131b201f11c6c2a5ad
|
[
"MIT"
] | 1 |
2020-04-20T02:27:20.000Z
|
2020-04-20T02:27:20.000Z
|
resources/dot_PyCharm/system/python_stubs/cache/9edeeb97ae7c1ec358f9620843984323739bcf3221eaa5ee1fd68961c7a6b26a/win32profile.py
|
basepipe/developer_onboarding
|
05b6a776f8974c89517868131b201f11c6c2a5ad
|
[
"MIT"
] | null | null | null |
resources/dot_PyCharm/system/python_stubs/cache/9edeeb97ae7c1ec358f9620843984323739bcf3221eaa5ee1fd68961c7a6b26a/win32profile.py
|
basepipe/developer_onboarding
|
05b6a776f8974c89517868131b201f11c6c2a5ad
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
# module win32profile
# from C:\Python27\lib\site-packages\win32\win32profile.pyd
# by generator 1.147
# no doc
# no imports
# Variables with simple values
PI_APPLYPOLICY = 2
PI_NOUI = 1
PT_MANDATORY = 4
PT_ROAMING = 2
PT_TEMPORARY = 1
# functions
def CreateEnvironmentBlock(*args, **kwargs): # real signature unknown
""" Retrieves environment variables for a user """
pass
def DeleteProfile(*args, **kwargs): # real signature unknown
""" Remove a user's profile """
pass
def ExpandEnvironmentStringsForUser(*args, **kwargs): # real signature unknown
""" Replaces environment variables in a string with per-user values """
pass
def GetAllUsersProfileDirectory(*args, **kwargs): # real signature unknown
""" Retrieve All Users profile directory """
pass
def GetDefaultUserProfileDirectory(*args, **kwargs): # real signature unknown
""" Retrieve profile path for Default user """
pass
def GetEnvironmentStrings(*args, **kwargs): # real signature unknown
""" Retrieves environment variables for current process """
pass
def GetProfilesDirectory(*args, **kwargs): # real signature unknown
""" Retrieves directory where user profiles are stored """
pass
def GetProfileType(*args, **kwargs): # real signature unknown
""" Returns type of current user's profile """
pass
def GetUserProfileDirectory(*args, **kwargs): # real signature unknown
""" Returns profile directory for a logon token """
pass
def LoadUserProfile(*args, **kwargs): # real signature unknown
""" Load user settings for a login token """
pass
def UnloadUserProfile(*args, **kwargs): # real signature unknown
""" Unload profile loaded by LoadUserProfile """
pass
# no classes
| 27.46875 | 78 | 0.711035 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,002 | 0.569966 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.