seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
27924886180
|
#код с регуляркой, присваивающий 0/1 в зависимости от динамики эпидемситуации
import re
import json
import os
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'Covid_dict.json')
countgooddyn = 0
countbaddyn = 0
sample_json = ''
with open("data1.json", "r", encoding="utf-8") as file:
sample_json+=file.read()
glossary = json.loads(sample_json)
print(len(glossary))
for date in glossary:
if len(glossary[date][0]) == 1:
countries = glossary[date][0]
text = glossary[date][1]
if re.findall(r'[Мм]иновал|[Оо]слабл[а-я]+|[Сс]нят[а-я]+|[Уу]пад[а-я]+|[Сс]ниж[а-я]+|[Вв]ыходит|[Сс]мягч[а-я]+|[Пп]ад[а-я]*|[Зз]амедл[а-я]+|[Уу]был[а-я]+|[Сс]нима[а-я]+', text):
for country in countries:
countries[country]["dyn"] = 1
countgooddyn += 1
if re.findall(r'[Пп]ик[а]|[Вв]спышк[а-я]|[Пп]ревы[а-я]+|[Уу]велич[а-я]+|[А-Яа-я]+?рекорд[а-я]+|[Уу]худш[а-я]+|[Р-р][ао]ст[а-я]+|[Зз]акры[а-я]+|[Вв]в[ео]д[а-я]т([а-я]+)?|[Мм]аксим[а-я]+|[Вв]ы?рост[а-я]+|[Пп]рирост[а-я]|[Сс]кач[а-я]+|более|снова|[Уу]сил[а-я]+|выросло', text):
for country in countries:
countries[country]["dyn"] = 0
countbaddyn += 1
print(glossary[date][0])
with open ('Country_and_coord_and_dynFULL.json', 'w', encoding="utf-8") as file:
json.dump(new_glossary, file, ensure_ascii=False)
|
stefikh/map_COVID
|
code/4_dynamic_good_or_bad.py
|
4_dynamic_good_or_bad.py
|
py
| 1,709 |
python
|
ru
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "os.path.dirname",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 33,
"usage_type": "call"
}
] |
70994868668
|
from django import template
register = template.Library()
#background: -webkit-gradient(linear, 0% 0%, 0% 100%, from({{ COLOR_H1_BACK_STOP }}), to({{ COLOR_H1_BACK_START }}));
#background: -webkit-linear-gradient(top, {{ COLOR_H1_BACK_START }}, {{ COLOR_H1_BACK_STOP }});
#background: -moz-linear-gradient(top, {{ COLOR_H1_BACK_START }}, {{ COLOR_H1_BACK_STOP }});
#background: -ms-linear-gradient(top, {{ COLOR_H1_BACK_START }}, {{ COLOR_H1_BACK_STOP }});
#background: -o-linear-gradient(top, {{ COLOR_H1_BACK_START }}, {{ COLOR_H1_BACK_STOP }});
@register.simple_tag
def columned(num):
S='-moz-column-count:'+str(num)+';\n'
S+='-webkit-column-count:'+str(num)+';\n'
S+='column-count:'+str(num)+';'
return S
#def background_gradient(style,start,stop):
# gradient='linear-gradient('+style+','+start+','+stop+')'
@register.simple_tag
def background_gradient(style,*args):
colors=",".join(args);
gradient='linear-gradient('+style+','+colors+')'
S='background: '+gradient+';\n'
# inverso rispetto agli altri, questo per style=top, cambiare se serve altro
#S+='background: -webkit-gradient(linear, 0% 0%, 0% 100%, from('+stop+'), to('+start+'));'
for i in ["webkit","moz","ms","o"]:
S+='background: -'+i+'-'+gradient+';\n'
return S
@register.simple_tag
def border_radius(radius):
S='border-radius: '+radius+';'
for i in ["webkit","moz"]:
S+='\n-'+i+'-border-radius: '+radius+';'
return S
@register.simple_tag
def box_shadow(shadow):
S='box-shadow: '+shadow+';'
for i in ["webkit","moz"]:
S+='\n-'+i+'-box-shadow: '+shadow+';'
return S
@register.simple_tag
def border_radius_pos(pos,radius):
S=''
if pos in ["top","left","top-left"]:
S+='border-top-left-radius: '+radius+';\n'
S+='-moz-border-radius-topleft: '+radius+';\n'
S+='-webkit-bordertop-left-radius: '+radius+';\n'
if pos in ["top","right","top-right"]:
S+='border-top-right-radius: '+radius+';\n'
S+='-moz-border-radius-topright: '+radius+';\n'
S+='-webkit-bordertop-right-radius: '+radius+';\n'
if pos in ["bottom","left","bottom-left"]:
S+='border-bottom-left-radius: '+radius+';\n'
S+='-moz-border-radius-bottomleft: '+radius+';\n'
S+='-webkit-borderbottom-left-radius: '+radius+';\n'
if pos in ["bottom","right","bottom-right"]:
S+='border-bottom-right-radius: '+radius+';\n'
S+='-moz-border-radius-bottomright: '+radius+';\n'
S+='-webkit-borderbottom-right-radius: '+radius+';\n'
return S
@register.simple_tag
def text_rotation(degree):
S='transform: rotate('+degree+'deg);'
for i in ["webkit","ms"]:
S+='\n-'+i+'-transform: rotate('+degree+'deg);'
return S
@register.simple_tag
def icon_file_manager_levels(levels,step):
levels=int(levels)
step=float(step)
S=""
S+=", ".join(map(lambda x: ".iconlevel"+unicode(x),range(0,levels)))
S+=" {\n"
S+="vertical-align: bottom;\n"
S+="font-size: 1.1em;\n"
S+="}\n\n"
for n in range(1,levels):
S+=".iconlevel"+unicode(n)+" {\n"
S+="padding-left: %2.2fem;\n" % (n*step)
S+="}\n\n"
return S
|
chiara-paci/santaclara-css
|
santaclara_css/templatetags/css_tags.py
|
css_tags.py
|
py
| 3,207 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.template.Library",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 3,
"usage_type": "name"
}
] |
44844122583
|
import torch
import numpy as np
class KBinsDiscretizer:
# simplified and modified version of KBinsDiscretizer from sklearn, see:
# https://github.com/scikit-learn/scikit-learn/blob/7e1e6d09b/sklearn/preprocessing/_discretization.py#L21
def __init__(self, dataset, num_bins=100, strategy="uniform"):
self.strategy = strategy
self.n_bins = num_bins
self.feature_dim = dataset.shape[-1]
# compute edges for binning
self.bin_edges = self.__find_bin_edges(dataset) # [feature_dim, num_bins]
self.bin_centers = (self.bin_edges[:, 1:] + self.bin_edges[:, :-1]) * 0.5
# for beam search, to be in the same device (for speed)
self.bin_centers_torch = torch.from_numpy(self.bin_centers)
def __find_bin_edges(self, X):
if self.strategy == "uniform":
mins, maxs = X.min(axis=0), X.max(axis=0)
bin_edges = np.linspace(mins, maxs, self.n_bins + 1).T
elif self.strategy == "quantile":
quantiles = np.linspace(0, 100, self.n_bins + 1)
bin_edges = np.percentile(X, quantiles, axis=0).T
else:
raise RuntimeError("Unknown strategy, should be uniform or quatile.")
return bin_edges
def encode(self, X, subslice=None):
if X.ndim == 1:
X = X[None]
if subslice is None:
bin_edges = self.bin_edges
else:
start, end = subslice
bin_edges = self.bin_edges[start:end]
# See documentation of numpy.isclose for an explanation of ``rtol`` and ``atol``.
rtol = 1.0e-5
atol = 1.0e-8
Xt = np.zeros_like(X, dtype=np.long)
for jj in range(X.shape[1]):
# Values which are close to a bin edge are susceptible to numeric
# instability. Add eps to X so these values are binned correctly
# with respect to their decimal truncation.
eps = atol + rtol * np.abs(X[:, jj])
Xt[:, jj] = np.digitize(X[:, jj] + eps, bin_edges[jj][1:])
np.clip(Xt, 0, self.n_bins - 1, out=Xt)
return Xt
def decode(self, Xt, subslice=None):
if Xt.ndim == 1:
Xt = Xt[None]
if subslice is None:
bin_centers = self.bin_centers
else:
start, end = subslice
bin_centers = self.bin_centers[start:end]
X = np.zeros_like(Xt, dtype=np.float64)
for jj in range(Xt.shape[1]):
X[:, jj] = bin_centers[jj, np.int_(Xt[:, jj])]
return X
def expectation(self, probs, subslice=None):
if probs.ndim == 1:
probs = probs[None]
# probs: [batch_size, num_dims, num_bins]
# bins: [1, num_dims, num_bins]
if torch.is_tensor(probs):
bin_centers = self.bin_centers_torch.unsqueeze(0)
else:
bin_centers = self.bin_centers.unsqueeze(0)
if subslice is not None:
start, end = subslice
bin_centers = bin_centers[:, start:end]
assert probs.shape[1:] == bin_centers.shape[1:]
# expectation: [batch_size, num_dims]
exp = (probs * bin_centers).sum(axis=-1)
return exp
def to(self, device):
self.bin_centers_torch = self.bin_centers_torch.to(device)
def eval(self):
return self
|
Howuhh/faster-trajectory-transformer
|
trajectory/utils/discretization.py
|
discretization.py
|
py
| 3,344 |
python
|
en
|
code
| 90 |
github-code
|
6
|
[
{
"api_name": "torch.from_numpy",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.long",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "numpy.abs",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.digitize",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "numpy.int_",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "torch.is_tensor",
"line_number": 80,
"usage_type": "call"
}
] |
14493893608
|
# -*- coding: utf-8 -*- #
'''
--------------------------------------------------------------------------
# File Name: PATH_ROOT/train.py
# Author: JunJie Ren
# Version: v1.0
# Created: 2021/06/14
# Description: — — — — — — — — — — — — — — — — — — — — — — — — — — —
--> DD信号识别(可解释)系列代码 <--
-- 训练主程序,移植之前信号识别tensorflow代码至PyTorch,
并进行项目工程化处理
-- TODO train()部分代码需要模块化,特别是指标记录、数据集
方面
— — — — — — — — — — — — — — — — — — — — — — — — — — —
# Module called: <0> PATH_ROOT/configs.py
<1> PATH_ROOT/dataset/RML2016.py
<2> PATH_ROOT/networks/MsmcNet.py
<3> PATH_ROOT/utils/strategy.py;plot.py
<4> PATH_ROOT/dataset/ACARS.py
— — — — — — — — — — — — — — — — — — — — — — — — — — —
# Function List: <0> train():
-- 训练主程序,包含了学习率调整、log记录、收敛曲线绘制
,每训练n(1)轮验证一次,保留验证集上性能最好的模型
<1> eval():
-- 验证当前训练模型在测试集中的性能
— — — — — — — — — — — — — — — — — — — — — — — — — — —
# Class List: None
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# History:
| <author> | <version> | <time> | <desc>
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
<0> | JunJie Ren | v1.0 | 2020/06/14 | 使用PyTorch复现之前keras代码
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
<1> | JunJie Ren | v1.1 | 2020/07/09 | 新增ACARS训练程序选项
--------------------------------------------------------------------------
'''
import os
import time
import torch
import numpy as np
import torch.nn as nn
from torchvision import transforms
from torch.autograd import Variable
from torch.utils.data import DataLoader
from configs import cfgs
from dataset.RML2016 import RMLDataset, loadNpy
from dataset.ACARS import ACARSDataset, loadNpy_acars
from networks.MsmcNet import MsmcNet_RML2016, MsmcNet_ACARS
from utils.strategy import step_lr, accuracy
from utils.plot import draw_curve
def train():
''' 信号调制分类训练主程序 '''
# model
if cfgs.model == "MsmcNet_RML2016":
model = MsmcNet_RML2016(num_classes=cfgs.num_classes)
elif cfgs.model == "MsmcNet_ACARS":
model = MsmcNet_ACARS(num_classes=cfgs.num_classes)
else :
print('ERROR: No model {}!!!'.format(cfgs.model))
print(model)
'''model = torch.nn.DataParallel(model) # 多卡预留'''
model.cuda()
# Dataset
if cfgs.dataset_name == "RML2016.04c":
x_train, y_train, x_test, y_test = loadNpy(
cfgs.train_path,
cfgs.test_path,
cfgs.process_IQ
)
Dataset = RMLDataset
elif cfgs.dataset_name == "ACARS":
x_train, y_train, x_test, y_test = loadNpy_acars(
cfgs.train_path_x,
cfgs.train_path_y,
cfgs.test_path_x,
cfgs.test_path_y,
cfgs.process_IQ
)
Dataset = ACARSDataset
else :
print('ERROR: No Dataset {}!!!'.format(cfgs.model))
# BUG,BUG,BUG,FIXME
transform = transforms.Compose([
# transforms.ToTensor()
# waiting add
])
# Train data
train_dataset = Dataset(x_train, y_train, transform=transform) # RML2016.10a数据集
dataloader_train = DataLoader(train_dataset, \
batch_size=cfgs.batch_size, \
num_workers=cfgs.num_workers, \
shuffle=True, \
drop_last=False)
# Valid data
valid_dataset = Dataset(x_test, y_test, transform=transform)
dataloader_valid = DataLoader(valid_dataset, \
batch_size=cfgs.batch_size, \
num_workers=cfgs.num_workers, \
shuffle=True, \
drop_last=False)
# log
if not os.path.exists('./log'):
os.makedirs('./log')
log = open('./log/log.txt', 'a')
log.write('-'*30+time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))+'-'*30+'\n')
log.write('model:{}\ndataset_name:{}\nnum_classes:{}\nnum_epoch:{}\nlearning_rate:{}\nsignal_len:{}\niter_smooth:{}\n'.format(
cfgs.model, cfgs.dataset_name, cfgs.num_classes, cfgs.num_epochs,
cfgs.lr, cfgs.signal_len, cfgs.iter_smooth))
# load checkpoint
if cfgs.resume:
model = torch.load(os.path.join('./checkpoints', cfgs.checkpoint_name))
# loss
criterion = nn.CrossEntropyLoss().cuda() # 交叉熵损失
# train
sum = 0
train_loss_sum = 0
train_top1_sum = 0
max_val_acc = 0
train_draw_acc = []
val_draw_acc = []
lr = cfgs.lr
for epoch in range(cfgs.num_epochs):
ep_start = time.time()
# adjust lr
# lr = half_lr(cfgs.lr, epoch)
lr = step_lr(epoch, lr)
# optimizer FIXME
# optimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.999), weight_decay=0.0002)
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
lr=lr, betas=(0.9, 0.999), weight_decay=0.0002)
model.train()
top1_sum = 0
for i, (signal, label) in enumerate(dataloader_train):
input = Variable(signal).cuda().float()
target = Variable(label).cuda().long()
output = model(input) # inference
loss = criterion(output, target) # 计算交叉熵损失
optimizer.zero_grad()
loss.backward() # 反传
optimizer.step()
top1 = accuracy(output.data, target.data, topk=(1,)) # 计算top1分类准确率
train_loss_sum += loss.data.cpu().numpy()
train_top1_sum += top1[0]
sum += 1
top1_sum += top1[0]
if (i+1) % cfgs.iter_smooth == 0:
print('Epoch [%d/%d], Iter [%d/%d], lr: %f, Loss: %.4f, top1: %.4f'
%(epoch+1, cfgs.num_epochs, i+1, len(train_dataset)//cfgs.batch_size,
lr, train_loss_sum/sum, train_top1_sum/sum))
log.write('Epoch [%d/%d], Iter [%d/%d], lr: %f, Loss: %.4f, top1: %.4f\n'
%(epoch+1, cfgs.num_epochs, i+1, len(train_dataset)//cfgs.batch_size,
lr, train_loss_sum/sum, train_top1_sum/sum))
sum = 0
train_loss_sum = 0
train_top1_sum = 0
train_draw_acc.append(top1_sum/len(dataloader_train))
epoch_time = (time.time() - ep_start) / 60.
if epoch % cfgs.valid_freq == 0 and epoch < cfgs.num_epochs:
# eval
val_time_start = time.time()
val_loss, val_top1 = eval(model, dataloader_valid, criterion)
val_draw_acc.append(val_top1)
val_time = (time.time() - val_time_start) / 60.
print('Epoch [%d/%d], Val_Loss: %.4f, Val_top1: %.4f, val_time: %.4f s, max_val_acc: %4f'
%(epoch+1, cfgs.num_epochs, val_loss, val_top1, val_time*60, max_val_acc))
print('epoch time: {}s'.format(epoch_time*60))
if val_top1[0].data > max_val_acc:
max_val_acc = val_top1[0].data
print('Taking snapshot...')
if not os.path.exists('./checkpoints'):
os.makedirs('./checkpoints')
torch.save(model, '{}/{}'.format('checkpoints', cfgs.checkpoint_name))
log.write('Epoch [%d/%d], Val_Loss: %.4f, Val_top1: %.4f, val_time: %.4f s, max_val_acc: %4f\n'
%(epoch+1, cfgs.num_epochs, val_loss, val_top1, val_time*60, max_val_acc))
draw_curve(train_draw_acc, val_draw_acc)
log.write('-'*40+"End of Train"+'-'*40+'\n')
log.close()
# validation
def eval(model, dataloader_valid, criterion):
sum = 0
val_loss_sum = 0
val_top1_sum = 0
model.eval()
for ims, label in dataloader_valid:
input_val = Variable(ims).cuda().float()
target_val = Variable(label).cuda()
output_val = model(input_val)
loss = criterion(output_val, target_val)
top1_val = accuracy(output_val.data, target_val.data, topk=(1,))
sum += 1
val_loss_sum += loss.data.cpu().numpy()
val_top1_sum += top1_val[0]
avg_loss = val_loss_sum / sum
avg_top1 = val_top1_sum / sum
return avg_loss, avg_top1
if __name__ == "__main__":
train()
|
jjRen-xd/PyOneDark_Qt_GUI
|
app/train.py
|
train.py
|
py
| 9,258 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "configs.cfgs.model",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "networks.MsmcNet.MsmcNet_RML2016",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "configs.cfgs.num_classes",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "configs.cfgs.model",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "networks.MsmcNet.MsmcNet_ACARS",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "configs.cfgs.num_classes",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "configs.cfgs.model",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "configs.cfgs.dataset_name",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "dataset.RML2016.loadNpy",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "configs.cfgs.train_path",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "configs.cfgs.test_path",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "configs.cfgs.process_IQ",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "dataset.RML2016.RMLDataset",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "configs.cfgs.dataset_name",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "dataset.ACARS.loadNpy_acars",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "configs.cfgs.train_path_x",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "configs.cfgs.train_path_y",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "configs.cfgs.test_path_x",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "configs.cfgs.test_path_y",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "configs.cfgs.process_IQ",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "dataset.ACARS.ACARSDataset",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "configs.cfgs.model",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "configs.cfgs.batch_size",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "configs.cfgs.num_workers",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "configs.cfgs.batch_size",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "configs.cfgs.num_workers",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "configs.cfgs.model",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "configs.cfgs.dataset_name",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs.num_classes",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs.num_epochs",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs.lr",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "configs.cfgs.signal_len",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs.iter_smooth",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs.resume",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "torch.load",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs.checkpoint_name",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "configs.cfgs.lr",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "configs.cfgs.num_epochs",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "utils.strategy.step_lr",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "utils.strategy.accuracy",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "configs.cfgs.iter_smooth",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "configs.cfgs.num_epochs",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "configs.cfgs.batch_size",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs.num_epochs",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "configs.cfgs.batch_size",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "configs.cfgs.valid_freq",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "configs.cfgs.num_epochs",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "configs.cfgs.num_epochs",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 189,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "configs.cfgs.checkpoint_name",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "configs.cfgs.num_epochs",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "configs.cfgs",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "utils.plot.draw_curve",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "utils.strategy.accuracy",
"line_number": 211,
"usage_type": "call"
}
] |
29195553298
|
"""
Title: Explicit finger tapping sequence learning task [replication of Walker et al. 2002]
Author: Julia Wood, the University of Queensland, Australia
Code adapted from Tom Hardwicke's finger tapping task code: https://github.com/TomHardwicke/finger-tapping-task
Developed in Psychopy v2022.1.1
See my GitHub for further details: https://github.com/jrwood21
"""
import time
import pandas as pd
import numpy as np
import sys
import os
from psychopy import visual, event, core, gui, data
from pyglet.window import key
from num2words import num2words
os.chdir(os.path.abspath('')) # change working directory to script directory
globalClock = core.Clock() # create timer to track the time since experiment started
# define sequences for finger tapping task
targ_seq_1 = '41324'
targ_seq_2 = '42314'
prac_seq = '12344'
### set up some useful functions ###
# Function to save messages to a log file
def saveToLog(logString, timeStamp=1):
f = open(logFile, 'a') # open our log file in append mode so don't overwrite with each new log
f.write(logString) # write the string they typed
if timeStamp != 0: # if timestamp has not been turned off
f.write('// logged at %iseconds' % globalClock.getTime()) # write a timestamp (very coarse)
f.write('\n') # create new line
f.close() # close and "save" the log file
# An exit function to initiate if the 'end' key is pressed
def quitExp():
if 'logFile' in globals(): # if a log file has been created
saveToLog('User aborted experiment')
saveToLog('..........................................', 0)
if 'win' in globals(): # if a window has been created
win.close() # close the window
core.quit() # quit the program
# define function to check if filename exists, then create the next available version number
def uniq_path(path):
fn, ext = os.path.splitext(path)
counter = 2
while os.path.exists(path):
path = fn + "_" + str(counter) + ext
counter += 1
return path
# Finger tapping task function
def fingerTapping(n_trials, tap_targetSequence, sequenceType):
## Intro screen ##
saveToLog('Presenting introduction screen') # save info to log
win.setColor('#000000', colorSpace='hex') # set background colour to black
win.flip() # display
generalText.setText(
'TASK INSTRUCTIONS\n\nPlace the fingers of your LEFT hand on the keys 1, 2, 3, and 4. You will be shown a sequence of 5 digits %(sequence)s, and the computer will start counting down until you start. \n\nOnce the countdown has completed and the screen turns green, type %(sequence)s over and over as QUICKLY and as ACCURATELY as possible. \n\nYou will have 30 seconds to type %(sequence)s as many times as possible. Stop when the screen turns red again. You will get 30 seconds to rest before the next trial. \n\nPress the spacebar when you are ready for the countdown to begin.' % {'sequence': tap_targetSequence})
generalText.draw()
win.flip() # display
event.waitKeys(keyList=["space"]) # wait for a spacebar press before continuing
event.clearEvents() # clear the event buffer
win.flip() # blank the screen first
trials = range(1, n_trials + 1)
saveToLog('Running finger tapping task. %i trials with target sequence %s' % (len(trials), tap_targetSequence)) # save info to log
for thisTrial in trials: # begin rest block
win.setColor('#ff0000', colorSpace='hex') # set background colour to red
win.flip() # display
if thisTrial == 1: # if this is first trial
restClock = core.CountdownTimer(10) # start timer counting down from 10
else: # for all other trials
saveToLog('Resting') # save info to log
restClock = core.CountdownTimer(30) # start timer counting down from 30
sequenceText.setText(tap_targetSequence) # set up sequence text
sequenceText.setAutoDraw(True) # display sequence text continuously
timerText.setAutoDraw(True) # display timer text continuously
win.flip()
while restClock.getTime() > 0: # loop continues until trial timer ends
count = restClock.getTime() # get current time from clock
timerText.setText(num2words(np.ceil(count))) # set timer text to the current time
win.flip() # display timer text
if event.getKeys(['end']): # checks for the key 'end' on every refresh so user can quit at any point
quitExp() # initiate quit routine
# begin tapping task
saveToLog('Trial: %i' % thisTrial) # save info to log
win.setColor('#89ba00', colorSpace='hex') # set background colour to green
win.flip() # display the green background
tap_stream = [] # clear previous sequence keypresses from the stream
event.clearEvents() # this makes sure the key buffer is cleared, otherwise old key presses might be recorded
trialClock = core.CountdownTimer(30) # start timer counting down from 30
timerText.setText('Tap as fast as you can!') # set timer text to the current time
win.flip() # display the text
k = 0 # set up marker index
endTrial = False # a trigger to end the trial when True (deployed when the timer runs out)
while endTrial == False: # while trigger has not been deployed
# display incremental markers across the screen from left to right as the user presses accepted keys
if k == 0: # start at beginning of marker index
# start markers incrementing from left to right and append key presses to tap_stream
while k < len(listOfMarkers) - 1 and endTrial == False: # until the markers reach the far side of the screen
if trialClock.getTime() <= 0: # if timer has run out
endTrial = True # deploy the trigger to end the trial
break # and break out of this loop
elif event.getKeys(['end']): # if user presses end key
if thisTrial == 1 and not metaData['practice mode']: # during trial 1: save partial data collected from trial 1
quit_dict = {'stream': [tap_stream],
'trial': thisTrial}
quit_df = pd.DataFrame(quit_dict, index=[0])
fileName = p_dir + os.path.sep + 'P' + str(metaData['participant']) + "_" + str(metaData['participant allocation']) + '_S' + str(metaData['session number']) + '_' + str(metaData['session time']) + '_quitExp_trial1' + '.csv'
if os.path.exists(fileName):
fileName = uniq_path(fileName)
quit_df.to_csv(fileName)
saveToLog('User pressed end key during trial 1. Experiment aborted with %s seconds of trial 1 remaining' % trialClock.getTime())
saveToLog('Trial 1 data saved with filename: %s' %fileName)
elif thisTrial > 1 and not metaData['practice mode']: # or during a later trial: save partial and complete trial data collected
quit_dict = {'stream': [tap_stream],
'trial': thisTrial}
quit_df = pd.DataFrame(quit_dict, index=[0])
fileName = p_dir + os.path.sep + 'P' + str(metaData['participant']) + "_" + str(metaData['participant allocation']) + '_S' + str(metaData['session number']) + '_' + str(metaData['session time']) + '_quitExp' + '.csv'
if os.path.exists(fileName):
fileName = uniq_path(fileName)
quit_df.to_csv(fileName)
saveToLog('User pressed end key during trial %s' % thisTrial)
saveToLog('Experiment aborted with %s seconds of this trial remaining' % trialClock.getTime())
saveToLog('Partial trial data saved with filename: %s' %fileName)
fileName = p_dir + os.path.sep + 'P' + str(metaData['participant']) + "_" + str(metaData['participant allocation']) + '_S' + str(metaData['session number']) + '_' + str(metaData['session time']) + '_quitExp_trials' + '.csv'
if os.path.exists(fileName):
fileName = uniq_path(fileName)
store_out.to_csv(fileName)
saveToLog('Data from complete trials saved with filename: %s' %fileName)
quitExp() # AND quit the program
elif event.getKeys('1'): # checks for key on every refresh
listOfMarkers[k].setAutoDraw(True) # turn this marker on
win.flip() # display
tap_stream.append(1) # record the key press
k += 1 # move on to the next marker
elif event.getKeys('2'): # checks for key on every refresh
listOfMarkers[k].setAutoDraw(True) # turn this marker on
win.flip() # display
tap_stream.append(2) # record the key press
k += 1 # move on to the next marker
elif event.getKeys('3'): # checks for key on every refresh
listOfMarkers[k].setAutoDraw(True) # turn this marker on
win.flip() # display
tap_stream.append(3) # record the key press
k += 1 # move on to the next marker
elif event.getKeys('4'): # checks for key on every refresh
listOfMarkers[k].setAutoDraw(True) # turn this marker on
win.flip() # display
tap_stream.append(4) # record the key press
k += 1 # move on to the next marker
# start markers incrementing from right to left and append keypresses to tap_stream:
elif k == len(listOfMarkers) - 1 and endTrial == False:
while k > 0:
if trialClock.getTime() <= 0: # if timer has run out
endTrial = True # deploy the trigger to end the trial
break # and break out of this loop
elif event.getKeys(['end']): # if user presses end key
if thisTrial == 1 and not metaData['practice mode']: # during trial 1: save partial data collected from trial 1
quit_dict = {'stream': [tap_stream],
'trial': thisTrial}
quit_df = pd.DataFrame(quit_dict, index=[0])
fileName = p_dir + os.path.sep + 'P' + str(metaData['participant']) + "_" + str(metaData['participant allocation']) + '_S' + str(metaData['session number']) + '_' + str(metaData['session time']) + '_quitExp_trial1' + '.csv'
if os.path.exists(fileName):
fileName = uniq_path(fileName)
quit_df.to_csv(fileName)
saveToLog('User pressed end key during trial 1. Experiment aborted with %s seconds of trial 1 remaining' % trialClock.getTime())
saveToLog('Trial 1 data saved with filename: %s' %fileName)
elif thisTrial > 1 and not metaData['practice mode']: # or during a later trial: save partial and complete trial data collected
quit_dict = {'stream': [tap_stream],
'trial': thisTrial}
quit_df = pd.DataFrame(quit_dict, index=[0])
fileName = p_dir + os.path.sep + 'P' + str(metaData['participant']) + "_" + str(metaData['participant allocation']) + '_S' + str(metaData['session number']) + '_' + str(metaData['session time']) + '_quitExp' + '.csv'
if os.path.exists(fileName):
fileName = uniq_path(fileName)
quit_df.to_csv(fileName)
saveToLog('User pressed end key during trial %s' % thisTrial)
saveToLog('Experiment aborted with %s seconds of this trial remaining' % trialClock.getTime())
saveToLog('Partial trial data saved with filename: %s' %fileName)
fileName = p_dir + os.path.sep + 'P' + str(metaData['participant']) + "_" + str(metaData['participant allocation']) + '_S' + str(metaData['session number']) + '_' + str(metaData['session time']) + '_quitExp_trials' + '.csv'
if os.path.exists(fileName):
fileName = uniq_path(fileName)
store_out.to_csv(fileName)
saveToLog('Data from complete trials saved with filename: %s' %fileName)
quitExp() # AND quit the program
elif event.getKeys('1'): # checks for key on every refresh
listOfMarkers[k].setAutoDraw(False) # turn this marker off
win.flip() # display contents of video buffer
tap_stream.append(1) # record the key press
k -= 1 # move on to the next marker
elif event.getKeys('2'): #checks for key on every refresh
listOfMarkers[k].setAutoDraw(False) # turn this marker off
win.flip() # display contents of video buffer
tap_stream.append(2) # record the key press
k -= 1 # move on to the next marker
elif event.getKeys('3'): #checks for key on every refresh
listOfMarkers[k].setAutoDraw(False) # turn this marker off
win.flip() # display contents of video buffer
tap_stream.append(3) # record the key press
k -= 1 # move on to the next marker
elif event.getKeys('4'): #checks for key on every refresh
listOfMarkers[k].setAutoDraw(False) # turn this marker off
win.flip() # display contents of video buffer
tap_stream.append(4) # record the key press
k -= 1 # move on to the next marker
# turn off all markers during the rest block
for marker in listOfMarkers: # for each marker
marker.setAutoDraw(False) # turn off
win.setColor('#ff0000', colorSpace='hex') # set background colour to red
win.flip() # display red background
output = patternDetect(stream_in=tap_stream, targetSequence_in=tap_targetSequence) # run the pattern detector to calculate correct sequences, errors and accuracy
# gather all relevant data for this trial
newRow = {'participant': metaData['participant'],
'allocation': metaData['participant allocation'],
'session': metaData['session number'],
'session_time': metaData['session time'],
'target_sequence': tap_targetSequence,
'sequence_type': sequenceType,
'trial': thisTrial, # record which trial number
'stream': [tap_stream], # stream of key presses entered by participant
'n_correct': output['n_correct']}
# 'errors': output['errors'], # Unhash these lines if you want them to be reported in the csv output file.
# 'accuracy': output['accuracy']}
# store all trial data in df. Each trial is stored in a new row
if thisTrial == 1:
store_out = pd.DataFrame(newRow, index=[0])
elif thisTrial > 1:
store_out = store_out.append(newRow, ignore_index=True)
# after all trials are complete:
sequenceText.setAutoDraw(False) # turn off the sequence text
timerText.setAutoDraw(False) # turn off the timer text
win.flip() # clear the display
return store_out
# Function for analysing the response stream
def patternDetect(stream_in, targetSequence_in):
# pre-load some variables
det_targetSequence = list(map(int, list(targetSequence_in))) # convert target sequence to list of integers
det_stream = list(stream_in) # convert stream of key presses to a list
n_correct = float(0) # store for number of correct sequences per trial
'''
Define stores for error tracking. I did not use these metrics in my study design, but I have left them in the code, in case
they are appropriate for other experimental designs. Redefine, remove or ignore them as necessary for your study design.
'''
contiguousError = 0 # store for cumulative errors
errors = float(0) # store for errors
# note that n_correct + errors = total sequences
i = 0 # start pattern detection at first element of keypress stream:
while i < len(det_stream): # search through every item in stream
# for all key presses up to the final 5 (or any other target sequence length)
if i <= len(det_stream) - len(det_targetSequence):
# for any value in the stream where it + the next 4 keypresses match the target sequence:
if det_stream[i:(i + len(det_targetSequence))] == det_targetSequence:
n_correct += 1 # record a correct pattern completed
i += len(det_targetSequence) # adjust position to skip forward by length of targetSequence
# Then add any accumulated errors to the total error count and clear the contiguous error count
if contiguousError >= 1: # check if there are contiguous errors we have not yet accounted for
errors += 1 # add an error to the total count
contiguousError = 0 # reset contiguous error count
# otherwise, if the next sequence length of items in the stream does not match the target sequence:
elif det_stream[i:(i + len(det_targetSequence))] != det_targetSequence:
contiguousError += 1 # record a 'contiguous error'
i += 1 # adjust index forward by 1
# when contiguous error count reaches 5 incorrect keypresses in a row (i.e., the correct sequence doesn't follow 5 keypresses in a row)
# OR if the final item of the stream does not match the target sequence:
if contiguousError == 5 or i == len(det_stream):
errors += 1 # add an error to the total count
contiguousError = 0 # reset contiguous error count
# now deal with last items of the stream (a special case, see 'method' above)
else:
# get last items
lastItems = det_stream[i:]
# get subset of target sequence of same length as last items
sequenceSubset = det_targetSequence[:len(lastItems)]
# Addition of PARTIAL correct sequences at end of stream:
while lastItems != None: # while there are additional items left to check
if lastItems == sequenceSubset: # if lastItems match target sequence subset
n_correct += float(len(lastItems)) / float(len(det_targetSequence)) # record fractional sequence
if contiguousError >= 1: # check if there are errors we have not yet recorded
errors += 1 # add an error to total
contiguousError = 0 # reset contiguous error count
lastItems = None # force failure of inner while loop by updating lastItems
i = len(det_stream) # force failure of outer while loop by updating i
else: # if lastItems do not match target sequence
contiguousError += 1 # add 1 to contiguous error count
# when contiguous error count reaches 5 incorrect keypresses in a row or if this is final item
if contiguousError == 5 or len(lastItems) == 1:
errors += 1 # add an error to total
contiguousError = 0 # reset contiguous error count
if len(lastItems) == 1: # if this is the final item
lastItems = None # force failure of inner while loop by updating lastItems
i = len(det_stream) # force failure of outer while loop by updating i
else: # else if there are still items left to check
lastItems = lastItems[1:] # drop the first item from lastItems
sequenceSubset = sequenceSubset[:-1] # drop the last item from the sequence subset
# integrity check
if n_correct == 0:
print('Issue with this stream - n_correct is zero')
accuracy = float('nan')
else:
accuracy = 1 - errors / n_correct # calculate accuracy
# NOTE: this accuracy definition matches Hardwicke et al. 2016. I did not use this metric in my study design, but I have
# left the code in the script case it is suitable for other study designs. Remove, redefine or ignore as necessary.
return {'n_correct': n_correct, 'errors': errors, 'accuracy': accuracy}
### Collect and store meta-data about the experiment session ###
expName = 'Explicit finger tapping sequence task' # define experiment name
date = time.strftime("%d %b %Y %H:%M:%S", time.localtime()) # get date and time
metaData = {'participant': '',
'session number': [1, 2],
'session time': ['pm-a', 'pm-b', 'am'],
'practice mode': False,
'use automated counter-balancing': True,
'researcher': 'JW',
'location': '304, Seddon North, UQ, Brisbane'} # set up info for infoBox gui
infoBox = gui.DlgFromDict(dictionary=metaData,
title=expName,
order=['participant', 'session number', 'session time',
'practice mode','use automated counter-balancing']) # display gui to get info from user
if not infoBox.OK: # if user hit cancel
quitExp() # quit
# check if participant dir exists, and if not, create one:
if not os.path.isdir('data'):
os.mkdir('data')
if not os.path.isdir('data' + os.path.sep + 'fingertapping'):
os.mkdir('data' + os.path.sep + 'fingertapping')
p_dir = 'data' + os.path.sep + 'fingertapping' + os.path.sep + 'P' + str(metaData['participant'])
if not os.path.isdir(p_dir):
os.mkdir(p_dir)
if not metaData['practice mode']: # if this is not practice mode:
if metaData['use automated counter-balancing']: # and user has chosen to use automated counter-balancing:
cb = {'participant allocation': ['AJX', 'AJY', 'AKX', 'AKY',
'BJX', 'BJY', 'BKX', 'BKY']} # set up info for infoBox gui
infoBox = gui.DlgFromDict(dictionary=cb,
title='Choose counter-balancing parameters') # display gui to get info from user
metaData.update({'participant allocation': cb['participant allocation']})
if not infoBox.OK: # if user hit cancel
quitExp() # quit
elif not metaData['use automated counter-balancing']: # or if user has chosen to manually select sequence type:
seq_dict = {'use sequence': ['sequence_1', 'sequence_2'],
'number of trials': ''}
infoBox = gui.DlgFromDict(dictionary=seq_dict,
title='Select sequence to run experiment') # display gui to get info from user
metaData.update({'participant allocation': 'manual_selection',
'sequence type': '%s' % seq_dict['use sequence'],
'number of trials': '%s' % seq_dict['number of trials']})
if not infoBox.OK: # if user hit cancel
quitExp() # quit
# build filename for this participant's data
fileName = p_dir + os.path.sep + 'P' + str(metaData['participant']) + "_" + str(metaData['participant allocation']) + '_S' + str(metaData['session number']) + '_' + str(metaData['session time']) + '.csv'
# is this an existing participant? If so we will create a new file name to store the data under
if os.path.exists(fileName): # if they are an existing participant
# confirm that user knows sessions already exist for this participant's current session and time and advise filename will be different:
myDlg = gui.Dlg()
myDlg.addText(
"This participant has existing files for this session time in the directory! Click ok to continue or cancel to abort. \n\n NOTE: if you choose to continue, files will be stored under a different file name.")
myDlg.show() # show dialog and wait for OK or Cancel
if not myDlg.OK: # if the user pressed cancel
quitExp()
# redefine file name by iteratively appending a number so that existing files are not overwritten
fileName = uniq_path(fileName)
metaData.update({'expName': expName, 'date': date}) # record the experiment date and name in the metaData
# check if logfile exists for this participant. If not, create one:
logFile = p_dir + os.path.sep + 'P' + str(metaData['participant']) + "_" + str(metaData['participant allocation']) +'_log.txt'
if not os.path.exists(logFile):
with open(logFile, 'w') as fp:
pass
# save metaData to log
saveToLog('..........................................', 0)
saveToLog('experiment: %s' % (metaData['expName']), 0)
saveToLog('researcher: %s' % (metaData['researcher']), 0)
saveToLog('location: %s' % (metaData['location']), 0)
saveToLog('date: %s' % (metaData['date']), 0)
saveToLog('participant: %s' % (metaData['participant']), 0)
saveToLog('session: %s' % (metaData['session number']), 0)
saveToLog('session time: %s' % (metaData['session time']), 0)
saveToLog('participant allocation: %s' % (metaData['participant allocation']), 0)
saveToLog(' ', 0)
else: # otherwise, if it is practice mode:
logFile = p_dir + os.path.sep + 'P' + str(metaData['participant']) + '_practice_log.txt'
if not os.path.exists(logFile):
with open(logFile, 'w') as fp:
pass
# ask user to define number of trials
prac_dict = {'number of trials': ''}
infoBox = gui.DlgFromDict(dictionary=prac_dict,
title='enter number of trials') # display gui to get info from user
if not infoBox.OK: # if user hit cancel
quitExp() # quit
# build filename for this participant's practice data
fileName = p_dir + os.path.sep + 'P' + str(metaData['participant']) + '_S' + str(metaData['session number']) + '_' + str(metaData['session time']) + '_PRACTICE' + '.csv'
# is this an existing participant? If so we will create a new file name to store the data under
if os.path.exists(fileName): # if existing participant
# check user knows sessions already exist for this participant's current session and time:
myDlg = gui.Dlg()
myDlg.addText(
"This participant has existing files for this session time in the directory! Click ok to continue or cancel to abort. \n\n NOTE: if you choose to continue, files will be stored under a different file name.")
myDlg.show() # show dialog and wait for OK or Cancel
if not myDlg.OK: # if the user pressed cancel
quitExp()
# redefine file name by iteratively appending a number so that the original files are not overwritten
fileName = uniq_path(fileName)
metaData.update({'participant allocation': 'practice'})
# save metaData to log
saveToLog('..........................................', 0)
saveToLog('experiment: %s' % (expName), 0)
saveToLog('researcher: %s' % (metaData['researcher']), 0)
saveToLog('location: %s' % (metaData['location']), 0)
saveToLog('date: %s' % (date), 0)
saveToLog('participant: %s' % (metaData['participant']), 0)
saveToLog('session: %s' % (metaData['session number']), 0)
saveToLog('session time: %s' % (metaData['session time']), 0)
saveToLog('participant allocation: %s' % (metaData['participant allocation']), 0)
saveToLog(' ', 0)
### Prepare stimuli etc ###
win = visual.Window(size=(1920, 1080), fullscr=True, screen=0, allowGUI=False, allowStencil=False, ## UPDATE SIZE TO MATCH YOUR CURRENT MONITOR SETTINGS
monitor='testMonitor', color=(-1,-1,-1), colorSpace='rgb', units='pix') # setup the Window
generalText = visual.TextStim(win=win, ori=0, name='generalText', text='', font=u'Arial', pos=[0, 0], height=35,
wrapWidth=920, color=(1,1,1), colorSpace='rgb', opacity=1, depth=0.0) # general text
sequenceText = visual.TextStim(win=win, ori=0, name='sequenceText', text='', font=u'Arial', pos=[0, 250], height=90,
wrapWidth=None, color=(1,1,1), colorSpace='rgb', opacity=1, depth=0.0) # sequence text
timerText = visual.TextStim(win=win, ori=0, name='sequenceText', text='', font=u'Arial', pos=[0, -130], height=40,
wrapWidth=800, color=(1,1,1), colorSpace='rgb', opacity=1, depth=0.0) # timer text
# set up the markers that increment across the screen - generate enough so that they cover the full range of the window
listOfMarkers = [] # store for white markers
windowSize = list(win.size) # get window size
for i in range(int(-windowSize[0] / 2), int(windowSize[0] / 2), int(windowSize[0] / 40)): # generate markers to cover whole screen
i += 25 # add a slight horizontal adjustment to ensure markers do not go off screen
listOfMarkers.append(visual.Circle(win, radius=15, edges=32, pos=[i, 0], fillColor='white')) # generate the markers
# for monitoring key state (only need this if using markers)
keys = key.KeyStateHandler()
win.winHandle.push_handlers(keys)
saveToLog('Set up complete') # save info to log
### set-up complete ###
### run the experiment ###
if metaData['practice mode']: # if user has chosen practice mode
res = fingerTapping(n_trials=int(prac_dict['number of trials']), tap_targetSequence = prac_seq, sequenceType ='practice') # run practice sequence
elif not metaData['practice mode']: # if it is not practice mode
if not metaData['use automated counter-balancing']: # AND the user has chosen to manually select the sequence type:
if seq_dict['use sequence'] == 'sequence_1': # EITHER run task with sequence 1:
res = fingerTapping(n_trials=int(seq_dict['number of trials']), tap_targetSequence = targ_seq_1, sequenceType = 'sequence_1')
elif seq_dict['use sequence'] == 'sequence_2': # OR run task with sequence 2:
res = fingerTapping(n_trials=int(seq_dict['number of trials']), tap_targetSequence = targ_seq_2, sequenceType = 'sequence_2')
elif metaData['use automated counter-balancing']: # OR if user has selected to use automated counter balancing:
# NOTE: these allocations are specific to my study (each letter represents one type of grouping/randomisation variable). Adapt groupings to suit individual experiments
####### X ORDER
if ((metaData['participant allocation'] == 'AJX') or (metaData['participant allocation'] == 'BJX') or (metaData['participant allocation'] == 'AKX') or (metaData['participant allocation'] == 'BKX')):
# session 1
if int(metaData['session number']) == 1:
if metaData['session time'] == 'pm-a':
res = fingerTapping(n_trials = 12, tap_targetSequence = targ_seq_1, sequenceType='sequence_1') # sequence 1
elif metaData['session time'] == 'pm-b' or 'am':
res = fingerTapping(n_trials = 4, tap_targetSequence = targ_seq_1, sequenceType='sequence_1') # wordlist 1
# session 2
elif int(metaData['session number']) == 2:
if metaData['session time'] == 'pm-a':
res = fingerTapping(n_trials = 12, tap_targetSequence = targ_seq_2, sequenceType='sequence_2') # sequence 2
elif metaData['session time'] == 'pm-b' or 'am':
res = fingerTapping(n_trials = 4, tap_targetSequence = targ_seq_2, sequenceType='sequence_2') # sequence 2
####### Y ORDER
elif ((metaData['participant allocation'] == 'AJY') or (metaData['participant allocation'] == 'BJY') or (metaData['participant allocation'] == 'AKY') or (metaData['participant allocation'] == 'BKY')):
# session 1
if int(metaData['session number']) == 1:
if metaData['session time'] == 'pm-a':
res = fingerTapping(n_trials = 12, tap_targetSequence = targ_seq_2, sequenceType='sequence_2') # sequence 2
elif metaData['session time'] == 'pm-b' or 'am':
res = fingerTapping(n_trials = 4, tap_targetSequence = targ_seq_2, sequenceType='sequence_2') # sequence 2
# session 2
elif int(metaData['session number']) == 2:
if metaData['session time'] == 'pm-a':
res = fingerTapping(n_trials = 12, tap_targetSequence = targ_seq_1, sequenceType='sequence_1') # sequence 1
elif metaData['session time'] == 'pm-b' or 'am':
res = fingerTapping(n_trials = 4, tap_targetSequence= targ_seq_1, sequenceType='sequence_1') # sequence 1
## End screen ##
saveToLog('Presenting end screen') # save info to log
win.setColor('#000000', colorSpace='hex') # set background colour to black
win.flip()
generalText.setText(u'Thank you. That is the end of this section. Please inform the researcher you have finished.')
generalText.draw()
win.flip() # present video buffer
event.waitKeys(keyList=['end']) # wait for the end key to be pressed before continuing
event.clearEvents() # clear the event buffer
saveToLog('Experiment presentation over') # save info to log
### Finished running the experiment ###
### Save and clean up ###
win.close()
'''
Save the data as a csv file. The loop below also checks if saving is not possible, usually because the file is already open, and asks user to close if this is the case
if this does not resolve the situation, attempt is made to save the data with a different filename.
'''
while True:
try:
res.to_csv(fileName)
saveToLog('Data saved with file name: %s' % fileName) # save info to log
break
except: # if cannot save data, likely because file is already open, ask user to close
saveToLog('Problem encountered saving data - requesting user close open data files...') # save info to log
myDlg = gui.Dlg()
myDlg.addText(
"Unable to store data. Try closing open excel files and then click ok. Press cancel to attempt data storage to new file.")
myDlg.show() # show dialog and wait for OK or Cancel
if not myDlg.OK: # if the user pressed cancel
fileName = p_dir + os.path.sep + 'P' + str(metaData['participant']) + "_ProblemSaving_" + str(metaData['participant allocation']) + '_S' + str(metaData['session number']) + '_' + str(metaData['session time']) + '.csv'
saveToLog('Attempting to save data with different filename: %s' %fileName) # save info to log
try:
res.to_csv(fileName)
print('Data was saved with a different filename: %s' %fileName)
saveToLog('Data saved with file name: %s' % fileName) # save info to log
break
except:
saveToLog('Major error: Data could not be saved') # save info to log
quitExp() # quit the experiment
t = globalClock.getTime() # get run time of experiment
saveToLog('Total experiment runtime was %i seconds' % t) # record runtime to log
saveToLog('..........................................', 0)
# Shut down:
core.quit()
|
jrwood21/sleep_tacs_study_jw_gh
|
finger_tapping_task_jw.py
|
finger_tapping_task_jw.py
|
py
| 36,526 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "os.chdir",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "psychopy.core.Clock",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "psychopy.core",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "psychopy.core.quit",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "psychopy.core",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "os.path.splitext",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "psychopy.event.waitKeys",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "psychopy.event",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "psychopy.event.clearEvents",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "psychopy.event",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "psychopy.core.CountdownTimer",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "psychopy.core",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "psychopy.core.CountdownTimer",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "psychopy.core",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "num2words.num2words",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "numpy.ceil",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "psychopy.event.getKeys",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "psychopy.event",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "psychopy.event.clearEvents",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "psychopy.event",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "psychopy.core.CountdownTimer",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "psychopy.core",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "psychopy.event.getKeys",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "psychopy.event",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "psychopy.event.getKeys",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "psychopy.event",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "psychopy.event.getKeys",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "psychopy.event",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "psychopy.event.getKeys",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "psychopy.event",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "psychopy.event.getKeys",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "psychopy.event",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "psychopy.event.getKeys",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "psychopy.event",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 169,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 180,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 187,
"usage_type": "attribute"
},
{
"api_name": "psychopy.event.getKeys",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "psychopy.event",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "psychopy.event.getKeys",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "psychopy.event",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "psychopy.event.getKeys",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "psychopy.event",
"line_number": 202,
"usage_type": "name"
},
{
"api_name": "psychopy.event.getKeys",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "psychopy.event",
"line_number": 207,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "psychopy.gui.DlgFromDict",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "psychopy.gui",
"line_number": 339,
"usage_type": "name"
},
{
"api_name": "os.path.isdir",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 347,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 348,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 349,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 350,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 351,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 352,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "psychopy.gui.DlgFromDict",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "psychopy.gui",
"line_number": 359,
"usage_type": "name"
},
{
"api_name": "psychopy.gui.DlgFromDict",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "psychopy.gui",
"line_number": 368,
"usage_type": "name"
},
{
"api_name": "os.path",
"line_number": 377,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 380,
"usage_type": "attribute"
},
{
"api_name": "psychopy.gui.Dlg",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "psychopy.gui",
"line_number": 382,
"usage_type": "name"
},
{
"api_name": "os.path",
"line_number": 395,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 396,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 413,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 414,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 414,
"usage_type": "attribute"
},
{
"api_name": "psychopy.gui.DlgFromDict",
"line_number": 420,
"usage_type": "call"
},
{
"api_name": "psychopy.gui",
"line_number": 420,
"usage_type": "name"
},
{
"api_name": "os.path",
"line_number": 426,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 428,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 428,
"usage_type": "attribute"
},
{
"api_name": "psychopy.gui.Dlg",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "psychopy.gui",
"line_number": 430,
"usage_type": "name"
},
{
"api_name": "psychopy.visual.Window",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "psychopy.visual",
"line_number": 455,
"usage_type": "name"
},
{
"api_name": "psychopy.visual.TextStim",
"line_number": 457,
"usage_type": "call"
},
{
"api_name": "psychopy.visual",
"line_number": 457,
"usage_type": "name"
},
{
"api_name": "psychopy.visual.TextStim",
"line_number": 459,
"usage_type": "call"
},
{
"api_name": "psychopy.visual",
"line_number": 459,
"usage_type": "name"
},
{
"api_name": "psychopy.visual.TextStim",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "psychopy.visual",
"line_number": 461,
"usage_type": "name"
},
{
"api_name": "psychopy.visual.Circle",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "psychopy.visual",
"line_number": 469,
"usage_type": "name"
},
{
"api_name": "pyglet.window.key.KeyStateHandler",
"line_number": 472,
"usage_type": "call"
},
{
"api_name": "pyglet.window.key",
"line_number": 472,
"usage_type": "name"
},
{
"api_name": "psychopy.event.waitKeys",
"line_number": 530,
"usage_type": "call"
},
{
"api_name": "psychopy.event",
"line_number": 530,
"usage_type": "name"
},
{
"api_name": "psychopy.event.clearEvents",
"line_number": 531,
"usage_type": "call"
},
{
"api_name": "psychopy.event",
"line_number": 531,
"usage_type": "name"
},
{
"api_name": "psychopy.gui.Dlg",
"line_number": 551,
"usage_type": "call"
},
{
"api_name": "psychopy.gui",
"line_number": 551,
"usage_type": "name"
},
{
"api_name": "os.path",
"line_number": 556,
"usage_type": "attribute"
},
{
"api_name": "psychopy.core.quit",
"line_number": 572,
"usage_type": "call"
},
{
"api_name": "psychopy.core",
"line_number": 572,
"usage_type": "name"
}
] |
28315455311
|
from typing import Union, Tuple
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
from gym import Env
from gym.spaces import Box
from ..agent import Agent
from . import ReplayBuffer
from .actor import Actor
from .critic import Critic
from .polyak_update import polyak_update
class TD3Agent(Agent):
def __init__(self, name, env: Env,
discounting_factor: float = 0.99,
batch_size: int = 32,
buffer_size: int = 50000,
start_learning: int = 1000,
learning_rate_actor: float = 0.0005,
learning_rate_critic: float = 0.001,
polyak_tau: float = 0.01,
hidden_sizes_s: Union[int, Tuple[int, ...]] = 128,
hidden_sizes_a: Union[int, Tuple[int, ...]] = 128,
hidden_sizes_shared: Union[int, Tuple[int, ...]] = 256,
hidden_sizes_actor: Union[int, Tuple[int, ...]] = (128, 128),
policy_noise: float = 0.2,
noise_clip: float = 0.5,
max_grad_norm: float = 0.5,
exploration_noise: float = 0.1,
policy_update_frequency: int = 10,
target_update_frequency: int = 10
):
super().__init__(name, 'TD3', env)
assert isinstance(self._env.action_space, Box), "Action space must be of type Box"
self._device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self._gamma = discounting_factor
self._memory = ReplayBuffer(buffer_size, self._device)
self.q1 = Critic(self.observation_shape,
self.action_shape,
hidden_sizes_s,
hidden_sizes_a,
hidden_sizes_shared,
self._device)
self.q2 = Critic(self.observation_shape,
self.action_shape,
hidden_sizes_s,
hidden_sizes_a,
hidden_sizes_shared,
self._device)
self.q1_target = Critic(self.observation_shape,
self.action_shape,
hidden_sizes_s,
hidden_sizes_a,
hidden_sizes_shared,
self._device)
self.q2_target = Critic(self.observation_shape,
self.action_shape,
hidden_sizes_s,
hidden_sizes_a,
hidden_sizes_shared,
self._device)
self.pi = Actor(self.observation_shape,
self.action_shape,
hidden_sizes_actor,
self._device)
self.pi_target = Actor(self.observation_shape,
self.action_shape,
hidden_sizes_actor,
self._device)
self.q1_target.load_state_dict(self.q1.state_dict())
self.q2_target.load_state_dict(self.q2.state_dict())
self.pi_target.load_state_dict(self.pi.state_dict())
self.q1_target.train(False)
self.q2_target.train(False)
self.pi_target.train(False)
self._q_optimizer = optim.Adam(list(self.q1.parameters()) + list(self.q2.parameters()), lr=learning_rate_critic)
self._pi_optimizer = optim.Adam(list(self.pi.parameters()), lr=learning_rate_actor)
self._batch_size = batch_size
self._start_learning = max(start_learning, batch_size)
self._policy_noise = policy_noise
self._noise_clip = noise_clip
self._max_grad_norm = max_grad_norm
self._exploration_noise = exploration_noise
self._policy_update_frequency = policy_update_frequency
self._target_update_frequency = target_update_frequency
self._tau = polyak_tau
self._q_loss = torch.Tensor([0.0], device=self._device)
self._pi_loss = torch.Tensor([0.0], device=self._device)
self._a_limits = torch.Tensor(self._env.action_space.low, device=self._device),\
torch.Tensor(self._env.action_space.high, device=self._device)
def find_action(self, observation, in_eval=False):
with torch.no_grad():
a = self.pi(torch.tensor(observation, dtype=torch.float, device=self._device)).detach().numpy()
if not in_eval:
a += np.random.normal(0, self._exploration_noise, size=self.action_shape)
a = a.clip(self._env.action_space.low, self._env.action_space.high)
return a.tolist()
def learn(self, observation, action, reward, next_observation, global_step):
self._memory.put((observation, action, reward, next_observation))
if self._memory.size() > self._start_learning:
s, a, r, s_prime = self._memory.sample(self._batch_size)
with torch.no_grad():
clipped_noise = torch.randn_like(a, device=self._device) * self._policy_noise
clipped_noise = clipped_noise.clamp(-self._noise_clip, self._noise_clip)
a_prime = self.pi_target(s_prime) + clipped_noise
a_prime = a_prime.clamp(*self._a_limits)
qf1_next_target = self.q1_target(s_prime, a_prime)
qf2_next_target = self.q2_target(s_prime, a_prime)
min_qf_next_target = torch.min(qf1_next_target, qf2_next_target)
next_q_value = r + self._gamma * min_qf_next_target
q1_l = F.mse_loss(self.q1(s, a), next_q_value)
q2_l = F.mse_loss(self.q2(s, a), next_q_value)
self._q_loss = 0.5 * (q1_l + q2_l)
# optimize the model
self._q_optimizer.zero_grad()
self._q_loss.backward()
nn.utils.clip_grad_norm_(list(self.q1.parameters()) + list(self.q2.parameters()), self._max_grad_norm)
self._q_optimizer.step()
if (global_step + 1) % self._policy_update_frequency == 0:
self._pi_loss = -self.q1(s, self.pi(s)).mean()
self._pi_optimizer.zero_grad()
self._pi_loss.backward()
nn.utils.clip_grad_norm_(list(self.pi.parameters()), self._max_grad_norm)
self._pi_optimizer.step()
if (global_step + 1) % self._target_update_frequency == 0:
polyak_update(self.q1.parameters(), self.q1_target.parameters(), self._tau)
polyak_update(self.q2.parameters(), self.q2_target.parameters(), self._tau)
polyak_update(self.pi.parameters(), self.pi_target.parameters(), self._tau)
def get_log_dict(self):
return {
'loss/q_loss': self._q_loss.item(),
'loss/pi_loss': self._pi_loss.item()
}
|
schobbejak/QMIX-Active-Wake-Control
|
agent/deep/td3.py
|
td3.py
|
py
| 6,983 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "agent.Agent",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "gym.Env",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "gym.spaces.Box",
"line_number": 40,
"usage_type": "argument"
},
{
"api_name": "torch.device",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "critic.Critic",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "critic.Critic",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "critic.Critic",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "critic.Critic",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "actor.Actor",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "actor.Actor",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "torch.float",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.normal",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "torch.no_grad",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "torch.randn_like",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "torch.min",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.mse_loss",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.mse_loss",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "torch.nn.utils.clip_grad_norm_",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "torch.nn.utils",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "torch.nn.utils.clip_grad_norm_",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "torch.nn.utils",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "polyak_update.polyak_update",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "polyak_update.polyak_update",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "polyak_update.polyak_update",
"line_number": 141,
"usage_type": "call"
}
] |
43269450493
|
from django.conf.urls import include, url
from provisioner.views import ProvisionStatus, login
urlpatterns = [
url(r'^$', ProvisionStatus, name='home'),
url(r'login.*', login),
url(r'^events/', include('events.urls')),
url(r'^provisioner/', include('provisioner.urls')),
]
|
uw-it-aca/msca-provisioner
|
msca_provisioner/urls.py
|
urls.py
|
py
| 291 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "provisioner.views.ProvisionStatus",
"line_number": 6,
"usage_type": "argument"
},
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "provisioner.views.login",
"line_number": 7,
"usage_type": "argument"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.include",
"line_number": 9,
"usage_type": "call"
}
] |
20823393672
|
from flask import Flask, render_template, request, redirect, session, flash
from mysqlconnection import MySQLConnector
import re, md5
app = Flask(__name__)
app.secret_key = "MySessionSecretKey1"
mysql = MySQLConnector( app, "the_wall")
email_regex = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
@app.route( "/" )
def lr():
# session['user_id'] = False
if session['user_id']:
return redirect( "/wall" )
return render_template( "index.html" )
# VIEW MESSAGES AND COMMENTS
@app.route( "/wall" )
def wall():
if not session['user_id']:
return render_template( "index.html" )
query = "SELECT first_name, id FROM users WHERE id = :id"
q_p = { 'id': session['user_id'] }
user = {}
user = mysql.query_db( query, q_p )[0]
query = "SELECT first_name, last_name, message, DATE_FORMAT(messages.created_at, '%M %d, %Y') AS message_date, messages.id, user_id FROM messages JOIN users ON users.id = messages.user_id ORDER BY messages.created_at DESC"
messages = mysql.query_db( query )
query = "SELECT users.first_name, users.last_name, comments.message_id, comment, DATE_FORMAT(comments.created_at, '%M %d, %Y') AS comment_date FROM comments JOIN users ON comments.user_id = users.id ORDER BY comments.created_at ASC"
comments = mysql.query_db( query )
return render_template( "wall.html", user = user, messages = messages, comments = comments )
# POST A MESSAGE TO START A DISCUSSION
@app.route( "/post_message", methods = ['POST'] )
def post_message():
query = "INSERT INTO messages( message, user_id, created_at, updated_at ) VALUES( :message, :user_id, NOW(), NOW() )"
q_p = {
'message': request.form['message'],
'user_id': session['user_id']
}
mysql.query_db( query, q_p )
flash( "Your message has been posted" )
return redirect( "/wall" )
# POST A COMMENT IN RESPONCE TO A MESSAGE
@app.route( "/post_comment/<message_id>", methods = ['POST'])
def post_comment( message_id ):
query = "INSERT INTO comments( comment, user_id, message_id, created_at, updated_at ) VALUES( :comment, :user_id,:message_id, NOW(), NOW() )"
q_p = {
'comment': request.form['comment'],
'user_id': session['user_id'],
'message_id': message_id
}
mysql.query_db( query, q_p )
return redirect( "/wall" )
# DELETE MESSAGE
@app.route( "/delete_message" )
def delete_message():
flash ("delete command received!")
return redirect( "/wall" )
# LOGIN
@app.route( "/authorization", methods = ["POST"] )
def authorization():
# EMAIL VALIDATION
if not email_regex.match( request.form['email'] ):
flash( "Invalid email" )
else:
query = "SELECT * FROM users WHERE users.email = :email LIMIT 1"
q_p = { 'email': request.form['email'] }
user = mysql.query_db( query, q_p )
if not user:
flash( "Email " + request.form['email'] + " is not registered with any user" )
else:
pw_h = md5.new( request.form['pw'] ).hexdigest()
if user[0]['password'] != pw_h: # PASSWORD VALIDATION
flash( "Wrong password" )
else: # SUCCESSFUL LOGIN
session['user_id']= user[0]['id']
return redirect( "/wall" )
return redirect( "/" )
# SIGN UP
@app.route( "/signup", methods = ["POST"] )
def signup():
error = False
# FORM INPUT VALIDATIONS
# VALIDATE FIRST NAME
if len( request.form['first_name'] ) < 2: # NAME LENGTH
error = True
flash( "First name is too short" )
elif not str.isalpha( str( request.form['first_name'] ) ): # NAME CONVENTIONS
error = True
flash( "Invalid characters in the first name" )
# VALIDATE LAST NAME
if len( request.form['last_name'] ) < 2: # NAME LENGTH
error = True
flash( "Last name is too short" )
elif not str.isalpha( str( request.form['last_name'] ) ): # NAME CONVENTIONS
error = True
flash( "Invalid characters in the last name" )
# VALIDATE EMAIL
if not email_regex.match( request.form['email'] ): # EMAIL CONVENTIONS
error = True
flash( "Invalid email" )
else: # CHECK IF EMAIL IS ALREADY IN USE
# email = request.form['email']
query = "SELECT email FROM users WHERE users.email = :email LIMIT 1"
q_p = { 'email': request.form['email'] }
existing_email = mysql.query_db( query, q_p )
if existing_email:
error = True
flash( "Email " + request.form['email'] + " is already in use" )
# VALIDATE PASSWORD CONVENTIONS AND REPEAT
if len( str( request.form['pw'] ) ) < 8:
error = True
flash( "Password should be at least 8 characters long")
elif request.form['pw'] != request.form['rpt_pw']:
error = True
flash( "Repeat password does not match")
if error:
return redirect( "/" )
else: # ADD NEW USER INTO THE DATABASE
query = "INSERT INTO users( first_name, last_name, email, password, created_at, updated_at ) VALUES( :first_name, :last_name, :email, :pw_h, NOW(), NOW() )"
q_p = {
'first_name': request.form['first_name'],
'last_name': request.form['last_name'],
'email': request.form['email'],
'pw_h': md5.new( request.form['pw'] ).hexdigest()
}
mysql.query_db( query, q_p )
flash( "Your user account has been saved" )
# FETCH THE NEW USER ID FROM THE DATABASE FOR SESSION LOGIN
query = "SELECT id FROM users WHERE email = :email LIMIT 1"
q_p = { 'email': request.form['email'] }
session['user_id']= mysql.query_db( query, q_p )[0]['id']
return redirect( "/wall" )
@app.route( "/logout", methods = ["POST"])
def logout():
session['user_id'] = False
return redirect( "/" )
app.run( debug = True )
|
ruslanvs/The_Wall
|
server.py
|
server.py
|
py
| 5,933 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "mysqlconnection.MySQLConnector",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "flask.flash",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "md5.new",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "md5.new",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 156,
"usage_type": "call"
}
] |
17815024172
|
#!/usr/bin/env python3
"""Tool to update Conan dependencies to the latest"""
import argparse
import json
import os
import re
import subprocess
def main():
"""
Read Conan dependencies, look for updates, and update the conanfile.py with updates
"""
parser = argparse.ArgumentParser()
parser.add_argument("--repo", help="Repo name of the package to update", required=True)
command_args = parser.parse_args()
fullpath = os.path.join(os.getcwd(), command_args.repo)
with open(os.path.join(fullpath, "conanfile.py"),
"r", encoding="utf-8", newline="") as conan_file:
conan_file_content = conan_file.read()
packages = []
package_strings = re.findall(r'requires\("(.*?)/(.*?)@', conan_file_content)
for package_string in package_strings:
package = {
"name": package_string[0],
"version": package_string[1],
}
packages.append(package)
for package in packages:
conan_inspect_output = subprocess.run("conan inspect . --format json",
cwd=f"conan-recipes/recipes/{package['name']}",
shell=True, check=True, stdout=subprocess.PIPE)
conan_inspect_json = json.loads(conan_inspect_output.stdout.decode("utf-8"))
package["latest_version"] = conan_inspect_json["version"]
old_package = f"{package['name']}/{package['version']}"
new_package = f"{package['name']}/{package['latest_version']}"
if old_package != new_package and old_package in conan_file_content:
conan_file_content = conan_file_content.replace(old_package, new_package)
print("Replace:")
print(f" {old_package}")
print("With:")
print(f" {new_package}")
print()
with open(os.path.join(fullpath, "conanfile.py"),
"w", encoding="utf-8", newline="") as conan_file:
conan_file.write(conan_file_content)
if __name__ == "__main__":
main()
|
ssrobins/tools
|
update_conan_packages.py
|
update_conan_packages.py
|
py
| 2,066 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "re.findall",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 55,
"usage_type": "attribute"
}
] |
6501962901
|
from flask import request
from mobile_endpoint.backends.manager import get_dao
from mobile_endpoint.case.case_processing import process_cases_in_form
from mobile_endpoint.extensions import requires_auth
from mobile_endpoint.form.form_processing import create_xform, get_instance_and_attachments, get_request_metadata
from mobile_endpoint.views import ota_mod
from mobile_endpoint.views.response import get_open_rosa_response
@ota_mod.route('/receiver/<domain>', methods=['POST'])
@requires_auth
def form_receiver(domain):
return _receiver(domain, backend='sql')
@ota_mod.route('/couch-receiver/<domain>', methods=['POST'])
@requires_auth
def couch_receiver(domain):
return _receiver(domain, backend='couch')
@ota_mod.route('/mongo-receiver/<domain>', methods=['POST'])
@requires_auth
def mongo_receiver(domain):
return _receiver(domain, backend='mongo')
def _receiver(domain, backend):
dao = get_dao(backend)
instance, attachments = get_instance_and_attachments(request)
request_meta = get_request_metadata(request)
request_meta['domain'] = domain
xform_lock = create_xform(instance, attachments, request_meta, dao)
with xform_lock as xform:
case_result = None
if xform.doc_type == 'XFormInstance':
case_result = process_cases_in_form(xform, dao)
dao.commit_atomic_submission(xform, case_result)
return get_open_rosa_response(xform, None, None)
|
dimagi/mobile-endpoint
|
prototype/mobile_endpoint/views/receiver.py
|
receiver.py
|
py
| 1,434 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "mobile_endpoint.views.ota_mod.route",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "mobile_endpoint.views.ota_mod",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "mobile_endpoint.extensions.requires_auth",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "mobile_endpoint.views.ota_mod.route",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "mobile_endpoint.views.ota_mod",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "mobile_endpoint.extensions.requires_auth",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "mobile_endpoint.views.ota_mod.route",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "mobile_endpoint.views.ota_mod",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "mobile_endpoint.extensions.requires_auth",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "mobile_endpoint.backends.manager.get_dao",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "mobile_endpoint.form.form_processing.get_instance_and_attachments",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 31,
"usage_type": "argument"
},
{
"api_name": "mobile_endpoint.form.form_processing.get_request_metadata",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 32,
"usage_type": "argument"
},
{
"api_name": "mobile_endpoint.form.form_processing.create_xform",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "mobile_endpoint.case.case_processing.process_cases_in_form",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "mobile_endpoint.views.response.get_open_rosa_response",
"line_number": 44,
"usage_type": "call"
}
] |
19240299148
|
import numpy as np
import torch
import random
import time
smp = torch.nn.Softmax(dim=0)
smt = torch.nn.Softmax(dim=1)
def get_T_global_min(args, record, max_step = None, T0 = None, p0 = None, lr = 0.1, NumTest = None, all_point_cnt = 15000):
if max_step is None:
max_step = args.max_iter
if NumTest is None:
NumTest = args.G
KINDS = args.num_classes
all_point_cnt = np.min((all_point_cnt,int(len(record)*0.9)))
print(f'Sample {all_point_cnt} instances in each round')
p_estimate = [[] for _ in range(3)]
p_estimate[0] = torch.zeros(KINDS)
p_estimate[1] = torch.zeros(KINDS, KINDS)
p_estimate[2] = torch.zeros(KINDS, KINDS, KINDS)
for idx in range(NumTest):
print(idx, flush=True)
sel_loc = np.random.permutation(record.shape[1])[:3]
record_sel = record[:, sel_loc]
# print(f'sel_loc is {sel_loc}')
cnt_y_3 = count_y_known2nn(KINDS, record_sel, all_point_cnt)
for i in range(3):
cnt_y_3[i] /= all_point_cnt
p_estimate[i] = p_estimate[i] + cnt_y_3[i] if idx != 0 else cnt_y_3[i]
for j in range(3):
p_estimate[j] = p_estimate[j] / NumTest
args.device = set_device()
loss_min, E_calc, P_calc, T_init = calc_func(KINDS, p_estimate, False, args.device, max_step, T0, p0, lr = lr)
E_calc = E_calc.cpu().numpy()
P_calc = P_calc.cpu().numpy()
return E_calc, P_calc
def error(T, T_true):
error = np.sum(np.abs(T-T_true)) / np.sum(np.abs(T_true))
return error
def set_device():
if torch.cuda.is_available():
_device = torch.device("cuda")
else:
_device = torch.device("cpu")
print(f'Current device is {_device}', flush=True)
return _device
def distCosine(x, y):
"""
:param x: m x k array
:param y: n x k array
:return: m x n array
"""
xx = np.sum(x ** 2, axis=1) ** 0.5
x = x / xx[:, np.newaxis]
yy = np.sum(y ** 2, axis=1) ** 0.5
y = y / yy[:, np.newaxis]
dist = 1 - np.dot(x, y.transpose()) # 1 - cosine distance
return dist
def count_real(KINDS, T, P, mode, _device = 'cpu'):
# time1 = time.time()
P = P.reshape((KINDS, 1))
p_real = [[] for _ in range(3)]
p_real[0] = torch.mm(T.transpose(0, 1), P).transpose(0, 1)
# p_real[2] = torch.zeros((KINDS, KINDS, KINDS)).to(_device)
p_real[2] = torch.zeros((KINDS, KINDS, KINDS))
temp33 = torch.tensor([])
for i in range(KINDS):
Ti = torch.cat((T[:, i:], T[:, :i]), 1)
temp2 = torch.mm((T * Ti).transpose(0, 1), P)
p_real[1] = torch.cat([p_real[1], temp2], 1) if i != 0 else temp2
for j in range(KINDS):
Tj = torch.cat((T[:, j:], T[:, :j]), 1)
temp3 = torch.mm((T * Ti * Tj).transpose(0, 1), P)
temp33 = torch.cat([temp33, temp3], 1) if j != 0 else temp3
# adjust the order of the output (N*N*N), keeping consistent with p_estimate
t3 = []
for p3 in range(KINDS):
t3 = torch.cat((temp33[p3, KINDS - p3:], temp33[p3, :KINDS - p3]))
temp33[p3] = t3
if mode == -1:
for r in range(KINDS):
p_real[2][r][(i+r+KINDS)%KINDS] = temp33[r]
else:
p_real[2][mode][(i + mode + KINDS) % KINDS] = temp33[mode]
temp = [] # adjust the order of the output (N*N), keeping consistent with p_estimate
for p1 in range(KINDS):
temp = torch.cat((p_real[1][p1, KINDS-p1:], p_real[1][p1, :KINDS-p1]))
p_real[1][p1] = temp
return p_real
def func(KINDS, p_estimate, T_out, P_out, N,step, LOCAL, _device):
eps = 1e-2
eps2 = 1e-8
eps3 = 1e-5
loss = torch.tensor(0.0).to(_device) # define the loss
P = smp(P_out)
# loss = loss + 0.1*torch.norm(P.view(-1) - torch.tensor([0.51441996, 0.34073234, 0.08246922, 0.06237848]))
# loss = loss + 0.1 * torch.norm(P[3]-0.1) + 0.1 * torch.norm(P[2]-0.1)
# P = P_out
T = smt(T_out)
mode = random.randint(0, KINDS-1)
mode = -1
# Borrow p_ The calculation method of real is to calculate the temporary values of T and P at this time: N, N*N, N*N*N
p_temp = count_real(KINDS, T.to(torch.device("cpu")), P.to(torch.device("cpu")), mode, _device)
weight = [1.0,1.0,1.0]
# weight = [2.0,1.0,1.0]
for j in range(3): # || P1 || + || P2 || + || P3 ||
p_temp[j] = p_temp[j].to(_device)
loss += weight[j] * torch.norm(p_estimate[j] - p_temp[j]) #/ np.sqrt(N**j)
if step > 100 and LOCAL and KINDS != 100:
loss += torch.mean(torch.log(P+eps))/10
return loss
def calc_func(KINDS, p_estimate, LOCAL, _device, max_step = 501, T0=None, p0 = None, lr = 0.1):
# init
# _device = torch.device("cpu")
N = KINDS
eps = 1e-8
if T0 is None:
T = 1 * torch.eye(N) - torch.ones((N,N))
# T[-1] = torch.ones(N)
else:
T = T0
if p0 is None:
P = torch.ones((N, 1), device = None) / N + torch.rand((N,1), device = None)*0.1 # P:0-9 distribution
# P[2:] -= 5.0
# P = torch.tensor([0.4,0.4,0.1,0.1])
else:
P = p0
T = T.to(_device)
P = P.to(_device)
p_estimate = [item.to(_device) for item in p_estimate]
print(f'using {_device} to solve equations')
T.requires_grad = True
P.requires_grad = True
optimizer = torch.optim.Adam([T, P], lr = lr)
# train
loss_min = 100.0
T_rec = torch.zeros_like(T)
P_rec = torch.zeros_like(P)
time1 = time.time()
for step in range(max_step):
if step:
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss = func(KINDS, p_estimate, T, P, N,step, LOCAL, _device)
if loss < loss_min and step > 5:
loss_min = loss.detach()
T_rec = T.detach()
P_rec = P.detach()
# if step % 100 == 0:
# print('loss {}'.format(loss))
# print(f'step: {step} time_cost: {time.time() - time1}')
# print(f'T {np.round(smt(T.cpu()).detach().numpy()*100,1)}', flush=True)
# print(f'P {np.round(smp(P.cpu().view(-1)).detach().numpy()*100,1)}', flush=True)
# # print(f'P {np.round((P.cpu().view(-1)).detach().numpy()*100,1)}', flush=True)
# time1 = time.time()
return loss_min, smt(T_rec).detach(), smp(P_rec).detach(), T_rec.detach()
def count_y(KINDS, feat_cord, label, cluster_sum):
# feat_cord = torch.tensor(final_feat)
cnt = [[] for _ in range(3)]
cnt[0] = torch.zeros(KINDS)
cnt[1] = torch.zeros(KINDS, KINDS)
cnt[2] = torch.zeros(KINDS, KINDS, KINDS)
feat_cord = feat_cord.cpu().numpy()
dist = distCosine(feat_cord, feat_cord)
max_val = np.max(dist)
am = np.argmin(dist,axis=1)
for i in range(cluster_sum):
dist[i][am[i]] = 10000.0 + max_val
min_dis_id = np.argmin(dist,axis=1)
for i in range(cluster_sum):
dist[i][min_dis_id[i]] = 10000.0 + max_val
min_dis_id2 = np.argmin(dist,axis=1)
for x1 in range(cluster_sum):
cnt[0][label[x1]] += 1
cnt[1][label[x1]][label[min_dis_id[x1]]] += 1
cnt[2][label[x1]][label[min_dis_id[x1]]][label[min_dis_id2[x1]]] += 1
return cnt
def count_y_known2nn(KINDS, label_list, cluster_sum=None):
if cluster_sum is not None:
sample = np.random.choice(range(label_list.shape[0]), cluster_sum, replace=False)
label_list = label_list[sample]
cnt = [[] for _ in range(3)]
cnt[0] = torch.zeros(KINDS)
cnt[1] = torch.zeros(KINDS, KINDS)
cnt[2] = torch.zeros(KINDS, KINDS, KINDS)
for i in range(cluster_sum):
cnt[0][label_list[i][0]] += 1
cnt[1][label_list[i][0]][label_list[i][1]] += 1
cnt[2][label_list[i][0]][label_list[i][1]][label_list[i][2]] += 1
return cnt
|
UCSC-REAL/fair-eval
|
hoc.py
|
hoc.py
|
py
| 7,838 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Softmax",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Softmax",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "numpy.min",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.random.permutation",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.newaxis",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.newaxis",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "numpy.dot",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "torch.mm",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "torch.mm",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "torch.mm",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "torch.norm",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "torch.log",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "torch.eye",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "torch.rand",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "torch.zeros_like",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "torch.zeros_like",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "torch.zeros",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 226,
"usage_type": "call"
}
] |
32766695147
|
from django.shortcuts import render, reverse, redirect
from django.views.generic import View
from django.views.generic.edit import CreateView
import requests
import re
count = 6
# Create your views here.
def home(request):
template_name = 'home.html'
return render(request, template_name=template_name)
def getPainting(request):
template_name = 'arts.html'
prelink = "https://drive.google.com/uc?export=view&id="
if request.method == "POST":
global count
# request.POST['id']
imgid = count + 1
name = request.POST['name']
link = request.POST['link']
linkid = re.search(r"\bd\/\w+[^/]([A-Za-z0-9-_])*", link)
link = prelink + linkid.group()[2:]
requests.post('https://kvdvse6qr3.execute-api.ap-south-1.amazonaws.com/img/image',
json = {'imgId':f'{imgid}',
'altText': f'{name}',
'imgUrl': f'{link}'})
allImages = requests.get("https://kvdvse6qr3.execute-api.ap-south-1.amazonaws.com/img/images")
return render(request, template_name=template_name, context = { 'images': allImages.json()['images'] })
# class getPaintingView(View):
# template_name = 'arts.html'
# def get(self, request):
# return render(request, self.template_name)
# def post(self, request):
# print(request)
# class addPaintingView(CreateView):
# template_name = 'addArt.html'
# def get(self, request):
# return render(request, self.template_name)
|
SaahilS468/Serverless-API
|
image/views.py
|
views.py
|
py
| 1,598 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.shortcuts.render",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 32,
"usage_type": "call"
}
] |
74773385148
|
import numpy as np
import os
import torch
from typing import List, Tuple
from tqdm import tqdm
from datetime import datetime, timedelta
import pickle
import matplotlib.pyplot as plt
# -------------------- Colorize ------------------------------------------
"""A set of common utilities used within the environments. These are
not intended as API functions, and will not remain stable over time.
"""
import numpy as np
import matplotlib.colors as colors
color2num = dict(gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38)
def colorize(string, color, bold=False, highlight=False):
"""Return string surrounded by appropriate terminal color codes to
print colorized text. Valid colors: gray, red, green, yellow,
blue, magenta, cyan, white, crimson
"""
# Import six here so that `utils` has no import-time dependencies.
# We want this since we use `utils` during our import-time sanity checks
# that verify that our dependencies (including six) are actually present.
import six
attr = []
num = color2num[color]
if highlight:
num += 10
attr.append(six.u(str(num)))
if bold:
attr.append(six.u('1'))
attrs = six.u(';').join(attr)
return six.u('\x1b[%sm%s\x1b[0m') % (attrs, string)
def calc_iou(times_gt, time):
a_s, a_e = times_gt
b_s, b_e = time
if b_s > a_e or a_s > b_e:
return 0
else:
o_s = max(a_s,b_s)
o_e = min(a_e,b_e)
intersection = o_e - o_s
u_s = min(a_s,b_s)
u_e = max(a_e,b_e)
union = u_e - u_s
return intersection/float(union)
def green(s):
return colorize(s, 'green', bold=True)
def blue(s):
return colorize(s, 'blue', bold=True)
def red(s):
return colorize(s, 'red', bold=True)
def magenta(s):
return colorize(s, 'magenta', bold=True)
def colorize_mat(mat, hsv):
"""
Colorizes the values in a 2D matrix MAT
to the color as defined by the color HSV.
The values in the matrix modulate the 'V' (or value) channel.
H,S (hue and saturation) are held fixed.
HSV values are assumed to be in range [0,1].
Returns an uint8 'RGB' image.
"""
mat = mat.astype(np.float32)
m, M = np.min(mat), np.max(mat)
v = (mat - m) / (M - m)
h, s = hsv[0] * np.ones_like(v), hsv[1] * np.ones_like(v)
hsv = np.dstack([h, s, v])
rgb = (255 * colors.hsv_to_rgb(hsv)).astype(np.uint8)
return rgb
# -------------------- / Colorize ------------------------------------------
def gpu_initializer(gpu_id):
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
global device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device: ', device)
return device
|
hannahbull/slrtp2022_t3
|
utils.py
|
utils.py
|
py
| 2,912 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "six.u",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "six.u",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "six.u",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "six.u",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "numpy.min",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.ones_like",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.dstack",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors.hsv_to_rgb",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "numpy.uint8",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 105,
"usage_type": "attribute"
}
] |
43600893416
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from adminsortable2.admin import SortableAdminMixin, SortableInlineAdminMixin
from modeltranslation.admin import (
TranslationAdmin, TranslationTabularInline, TranslationStackedInline,
TabbedTranslationAdmin
)
from .models import (
SiteSettings, FooterSettings, NavigationMenu, NavigationLinks
)
class HeaderSettingsAdminMixin(object):
"""
Mixin класс для разделения сео данных в админ панели
"""
def get_fieldsets(self, request, obj=None):
seo_fields = ['left_side_title_en', 'left_side_title_ru',
'right_side_title_en', 'right_side_title_ru',
'right_side_description_en', 'right_side_description_ru',
'button_text_en', 'button_text_ru',
'button_link_en', 'button_link_ru']
if self.fieldsets:
return self.fieldsets
fields = [
x for x in self.get_fields(request, obj) if not x in seo_fields
]
return [
(None, {'fields': fields}), ('HeaderSettings', {
'fields': seo_fields
})
]
@admin.register(SiteSettings)
class SiteSettingsAdmin(admin.ModelAdmin):
fields = ('favicon', 'logo', 'preloader')
@admin.register(FooterSettings)
class FooterSettingsAdmin(TabbedTranslationAdmin):
class Media:
js = (
'http://ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js',
'http://ajax.googleapis.com/ajax/libs/jqueryui/1.10.2/jquery-ui.min.js',
'modeltranslation/js/tabbed_translation_fields.js',
)
css = {
'screen': ('modeltranslation/css/tabbed_translation_fields.css',),
}
class NavigationLinksTabularInline(
SortableInlineAdminMixin, TranslationStackedInline):
model = NavigationLinks
extra = 0
@admin.register(NavigationMenu)
class NavigationMenuAdmin(TabbedTranslationAdmin):
list_display = ['name', 'menu_type']
inlines = (NavigationLinksTabularInline,)
class Media:
js = (
'http://ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js',
'http://ajax.googleapis.com/ajax/libs/jqueryui/1.10.2/jquery-ui.min.js',
'modeltranslation/js/tabbed_translation_fields.js',
'js/admin/admin_navigation_menu.js',
)
css = {
'screen': ('modeltranslation/css/tabbed_translation_fields.css',),
}
|
CrazyChief/acidbro
|
core/admin.py
|
admin.py
|
py
| 2,524 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.register",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "models.SiteSettings",
"line_number": 37,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "modeltranslation.admin.TabbedTranslationAdmin",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.register",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "models.FooterSettings",
"line_number": 42,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "adminsortable2.admin.SortableInlineAdminMixin",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "modeltranslation.admin.TranslationStackedInline",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "models.NavigationLinks",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "modeltranslation.admin.TabbedTranslationAdmin",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.register",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "models.NavigationMenu",
"line_number": 61,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin",
"line_number": 61,
"usage_type": "name"
}
] |
12981024226
|
#!/usr/bin/env python
"""
Pymodbus Synchronous Client Example to showcase Device Information
--------------------------------------------------------------------------
This client demonstrates the use of Device Information to get information
about servers connected to the client. This is part of the MODBUS specification,
and uses the MEI 0x2B 0x0E request / response.
"""
# --------------------------------------------------------------------------- #
# import the various server implementations
# --------------------------------------------------------------------------- #
from pymodbus.client.sync import ModbusTcpClient as ModbusClient
# from pymodbus.client.sync import ModbusUdpClient as ModbusClient
# from pymodbus.client.sync import ModbusSerialClient as ModbusClient
# --------------------------------------------------------------------------- #
# import the request
# --------------------------------------------------------------------------- #
from pymodbus.mei_message import ReadDeviceInformationRequest
from pymodbus.device import ModbusDeviceIdentification
# --------------------------------------------------------------------------- #
# configure the client logging
# --------------------------------------------------------------------------- #
import logging
FORMAT = ('%(asctime)-15s %(threadName)-15s '
'%(levelname)-8s %(module)-15s:%(lineno)-8s %(message)s')
logging.basicConfig(format=FORMAT)
log = logging.getLogger()
log.setLevel(logging.DEBUG)
UNIT = 0x1
def run_sync_client():
# ------------------------------------------------------------------------#
# choose the client you want
# ------------------------------------------------------------------------#
# make sure to start an implementation to hit against. For this
# you can use an existing device, the reference implementation in the tools
# directory, or start a pymodbus server.
#
# If you use the UDP or TCP clients, you can override the framer being used
# to use a custom implementation (say RTU over TCP). By default they use
# the socket framer::
#
# client = ModbusClient('localhost', port=5020, framer=ModbusRtuFramer)
#
# It should be noted that you can supply an ipv4 or an ipv6 host address
# for both the UDP and TCP clients.
#
# There are also other options that can be set on the client that controls
# how transactions are performed. The current ones are:
#
# * retries - Specify how many retries to allow per transaction (default=3)
# * retry_on_empty - Is an empty response a retry (default = False)
# * source_address - Specifies the TCP source address to bind to
#
# Here is an example of using these options::
#
# client = ModbusClient('localhost', retries=3, retry_on_empty=True)
# ------------------------------------------------------------------------#
client = ModbusClient('localhost', port=5020)
# from pymodbus.transaction import ModbusRtuFramer
# client = ModbusClient('localhost', port=5020, framer=ModbusRtuFramer)
# client = ModbusClient(method='binary', port='/dev/ptyp0', timeout=1)
# client = ModbusClient(method='ascii', port='/dev/ptyp0', timeout=1)
# client = ModbusClient(method='rtu', port='/dev/ptyp0', timeout=1,
# baudrate=9600)
client.connect()
# ------------------------------------------------------------------------#
# specify slave to query
# ------------------------------------------------------------------------#
# The slave to query is specified in an optional parameter for each
# individual request. This can be done by specifying the `unit` parameter
# which defaults to `0x00`
# ----------------------------------------------------------------------- #
log.debug("Reading Device Information")
information = {}
rr = None
while not rr or rr.more_follows:
next_object_id = rr.next_object_id if rr else 0
rq = ReadDeviceInformationRequest(read_code=0x03, unit=UNIT,
object_id=next_object_id)
rr = client.execute(rq)
information.update(rr.information)
log.debug(rr)
print("Device Information : ")
for key in information.keys():
print(key, information[key])
# ----------------------------------------------------------------------- #
# You can also have the information parsed through the
# ModbusDeviceIdentificiation class, which gets you a more usable way
# to access the Basic and Regular device information objects which are
# specifically listed in the Modbus specification
# ----------------------------------------------------------------------- #
di = ModbusDeviceIdentification(info=information)
print('Product Name : ', di.ProductName)
# ----------------------------------------------------------------------- #
# close the client
# ----------------------------------------------------------------------- #
client.close()
if __name__ == "__main__":
run_sync_client()
|
renatosperlongo/pymodbus
|
examples/contrib/deviceinfo_showcase_client.py
|
deviceinfo_showcase_client.py
|
py
| 5,108 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "pymodbus.client.sync.ModbusTcpClient",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pymodbus.mei_message.ReadDeviceInformationRequest",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pymodbus.device.ModbusDeviceIdentification",
"line_number": 102,
"usage_type": "call"
}
] |
40986191942
|
import random , sys , traceback
from time import sleep
from selenium import webdriver
import datetime
c=1;
browser = webdriver.Chrome('D:\\Python\\Bot Insta\\chromedriver')
browser.get('https://google.com')
while c== 1:
c=0
try:
browser.find_element_by_xpath('/html/body/ytd-app/div/div/ytd-masthead/div[3]/div[1]/ytd-topbar-logo-renderer/a/div[1]').click()
print('am rulat ')
except:
c=1
sleep(2)
print('sunte in exceptie ')
print('gata')
# while browser.find_element_by_xpath('/html/body/ytd-app/div/div/ytd-masthead/div[3]/div[1]/ytd-topbar-logo-renderer/a/div[1]')==[]:
# print("nu e bine nu gasim ce trebuie ")
# sleep(2)
# print('am gasit')
#browser.close()
#browser.quit()
|
mirceah99/Python-Bot-Insta
|
Teste.py
|
Teste.py
|
py
| 779 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 17,
"usage_type": "call"
}
] |
32704679818
|
from django.urls import path
from .views import *
urlpatterns = [
path('', PostList.as_view(), name="post_list_url"),
path("search/", Search.as_view(), name='search_form_url'),
path("filter/<int:pk>", DateFilter.as_view(), name='date_filter_url'),
path("<slug:category>/", PostList.as_view(), name='post_by_category_url'),
path("<slug:category>/<slug:slug>/", PostDetail.as_view(), name='post_detail_url'),
]
|
djaffic/blog_project
|
news/urls.py
|
urls.py
|
py
| 429 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
}
] |
69958393149
|
import typing as T
import asyncio
import logging
import inspect
from functools import lru_cache
from . import types
from . import transport as _transport
from . import errors
from . import stub
from . import utils
from . import spec
logger = logging.getLogger('pjrpc.server')
class Service:
"""Receive request, routing, process and response to server"""
def _method_predicate(self, meth):
return inspect.iscoroutinefunction(meth) or callable(meth)
@lru_cache(maxsize=1024)
def _get_func(self, f_name: str):
for name, func in inspect.getmembers(self, self._method_predicate):
if name == f_name:
return func
raise errors.MethodNotFoundError()
def _check_args(self, args: T.Dict[str, T.Type], func: T.Callable):
#TODO: check default value
annotations = func.__annotations__
for k, v in args.items():
if k in annotations:
if type(v) is not annotations[k]:
raise errors.InvalidParamError()
async def __call__(
self,
request: types.Request,
) -> T.Union[spec.ErrorResponseMessage, spec.SuccessResponseMessage]:
target = self._get_func(request.method)
params = request.params or {}
self._check_args(params, target)
if not inspect.iscoroutinefunction(target):
target = utils.to_async()(target)
ret = await target(**params)
if not isinstance(request, spec.Notification):
return utils.make_response_from_data(
id=request.id,
result=ret,
)
class Server:
def __init__(
self,
app_path: str,
host: str = '127.0.0.1',
port: int = 6969,
compress: bool = False,
):
self._app_cls = utils.load_app_from_string(app_path)
self._host = host
self._port = port
self._stub = stub.Stub(compress)
self._loop = asyncio.get_event_loop()
self._futures = {}
async def connection_handler(
self,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
):
transport = _transport.ServerTransport(reader, writer, interval=2, alive=5)
async def dispatch_request(request):
if isinstance(request, list):
async def batch_request(requests):
app = self._app_cls()
tasks = []
for request in requests:
if isinstance(request, spec.Notification):
self._loop.create_task(app(request))
else:
f = self._loop.create_task(app(request))
tasks.append(f)
if len(tasks) == 0:
return None
responses = asyncio.wait(tasks)
return responses
return await batch_request(request)
return await self._app_cls()(request)
def on_request_done(fut):
err = fut.exception()
if err:
ret = utils.make_response_from_data(
error={'code': err.code, 'message': err.message})
else:
ret = fut.result()
self._loop.create_task(transport.send_message(self._stub.pack(ret)))
async for in_data in transport.messages():
try:
request = self._stub.unpack(in_data)
except errors.ParseError as error:
err_resp = utils.make_response_from_data(
error={'code': error.code, 'message': error.message})
out_data = self._stub.pack(err_resp)
self._loop.create_task(transport.send_message(out_data))
f = self._loop.create_task(dispatch_request(request))
f.add_done_callback(on_request_done)
def protocol_factory(self):
reader = asyncio.StreamReader(limit=1024, loop=self._loop)
protocol = asyncio.StreamReaderProtocol(
reader, self.connection_handler, loop=self._loop)
return protocol
async def start(self):
server = await self._loop.create_server(self.protocol_factory, self._host, self._port)
async with server:
logger.info('Server is starting on port %d ...', self._port)
await server.serve_forever()
|
magiskboy/pjrpc
|
pjrpc/core.py
|
core.py
|
py
| 4,436 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "inspect.iscoroutinefunction",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "inspect.getmembers",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "functools.lru_cache",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "typing.Type",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "typing.Callable",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "inspect.iscoroutinefunction",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "asyncio.StreamReader",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "asyncio.StreamWriter",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "asyncio.wait",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "asyncio.StreamReader",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "asyncio.StreamReaderProtocol",
"line_number": 133,
"usage_type": "call"
}
] |
5824663901
|
"""
Flask app for testing the SMART on FHIR OAuth stuff
Build from this tutorial: http://docs.smarthealthit.org/tutorials/authorization/
And using requests-oauthlib: http://requests-oauthlib.readthedocs.io/en/latest/index.html
"""
from flask import Flask, redirect, request, session
from requests_oauthlib import OAuth2Session
#from urllib import urlencode
import json
import logging
import http.client
import warnings
# Enable lots of debug logging
http.client.HTTPConnection.debuglevel = 1
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
# Replace these with the values you get when you register you app in the SMART sandbox
client_id = "df23ba7c-3b2b-4b92-8aec-fbe73426d472"
client_secret = "AKBmOV4tIIs6C7y2Dgy6Idquo_NUgFYolDmOpTDOtt2Hr_Nw7RglPE2aeHzBI0cuEyJN2tDgwPLQe_A2aAqLQr8"
redirect_uri = "http://localhost:5000/callback"
# Scopes to request from the SMART server
scope = [ \
"openid", \
"patient/*.*", \
"profile", \
"launch" \
]
app = Flask(__name__)
@app.route('/')
def index():
return "SMART on FHIR test client - please either launch from the SMART sandbox, or <a href='/standalone'>click here to test a standalone launch</a>"
@app.route('/standalone')
def standalone():
session['serviceUri'] = "https://sb-fhir-stu3.smarthealthit.org/smartstu3/data"
# Go to the server and get the auth endpoint URLs from it's CapabilityStatement
getAuthEndpointFromServerConformance(session['serviceUri'])
# Now, start the authorization process against the auth endpoint
return authorize_user()
"""
This is the main launch URL called by the SMART on FHIR sandbox (or any SMART on FHIR enabled EPR)
"""
@app.route('/smart-app')
def launch():
# Get some launch parameters from the calling EHR system
serviceUri = request.args.get('iss') # https://sb-fhir-stu3.smarthealthit.org/smartstu3/data
launchContextId = request.args.get('launch')
# Store launch context in the session
session['launchContextId'] = launchContextId
session['serviceUri'] = serviceUri
print ("App launched from SMART sandbox, with issuer URL: "+serviceUri)
# Go to the server and get the auth endpoint URLs from it's CapabilityStatement
getAuthEndpointFromServerConformance(serviceUri)
# Now, start the authorization process against the auth endpoint
return authorize_user()
"""
Go to the specified FHIR server and retrieve it's CapabilityStatement to obtain the OAuth details
"""
def getAuthEndpointFromServerConformance(serviceUri):
# The issuer is the server endpoint - get it's conformance profile to find the auth URL
conformanceResource = getRemoteResource(serviceUri)
# Parse the oauth URLs from the profile
conformanceJSON = json.loads(conformanceResource)
authorizeUrl = ''
tokenUrl = ''
# Nasty hacky unsafe parsing - perhaps look to use either the python fhir client, or a jsonpath library?
for entry in conformanceJSON["rest"][0]["security"]["extension"][0]["extension"]:
if entry['url'] == 'authorize':
authorizeUrl = entry['valueUri']
elif entry['url'] == 'token':
tokenUrl = entry['valueUri']
print ("Got an authorization URL from the capabilitystatement:"+authorizeUrl)
print ("Got a token URL from the capabilitystatement:"+tokenUrl)
# Store the relevant parameters in the session to use for authorizing
session['authorizeUrl'] = authorizeUrl
session['tokenUrl'] = tokenUrl
"""
Use the python oauth2 client to call the authorization endpoint
"""
def authorize_user():
smart_auth_session = OAuth2Session(client_id)
if 'launchContextId' in session:
authorization_url, state = smart_auth_session.authorization_url(session['authorizeUrl'], \
aud=session['serviceUri'], \
launch=session['launchContextId'])
else:
authorization_url, state = smart_auth_session.authorization_url(session['authorizeUrl'], \
aud=session['serviceUri'])
# State is used to prevent CSRF, keep this for later.
session['oauth_state'] = state
print ("Redirecting to authorization URL:"+authorization_url)
return redirect(authorization_url)
"""
Callback URL called by authorization server once the user has logged in.
Takes their authorization code and calls the token endpoint to get an access token.
"""
@app.route("/callback", methods=["GET", "POST"])
def callback():
# Retrieving an access token
smart_auth_session = OAuth2Session(client_id, scope=scope, redirect_uri=redirect_uri, state=session['oauth_state'])
token_url = session['tokenUrl']
token_response = smart_auth_session.fetch_token(token_url, client_secret=client_secret, \
authorization_response=request.url)
session['oauth_token'] = token_response
if 'patient' in session:
# Get the patient ID passed in with the token
patient_id = token_response['patient']
return getPatientDetails(patient_id)
else:
return getPatientList()
"""
Access a protected FHIR resource from the SMART server, passing our access token in the request
"""
def getPatientDetails(patient_id):
protected_resource_request = OAuth2Session(client_id, token=session['oauth_token'])
fhir_root = session['serviceUri']
patient_url = fhir_root+"/Patient/"+patient_id
return json.dumps(protected_resource_request.get(patient_url).json())
def getPatientList():
protected_resource_request = OAuth2Session(client_id, token=session['oauth_token'])
fhir_root = session['serviceUri']
patient_url = fhir_root+"/Patient"
return json.dumps(protected_resource_request.get(patient_url).json())
"""
Takes the base FHIR server URL and uses it to retrieve a conformance resource for the server
"""
def getRemoteResource(serviceUri):
remoteEndpoint = (serviceUri + '/metadata')[8:]
separator = remoteEndpoint.find('/')
host = remoteEndpoint[:separator]
path = remoteEndpoint[separator:]
conn = http.client.HTTPSConnection(host)
conn.request("GET", path)
response = conn.getresponse()
resultResource = response.readall().decode('utf-8')
return resultResource
"""
Initialise our Flask server in debug mode
"""
if __name__ == '__main__':
import os
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '1'
app.secret_key = os.urandom(24)
app.run(host="localhost", port=5000, debug=True)
|
ahatherly/SMART-on-FHIR-testclient
|
app.py
|
app.py
|
py
| 6,659 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "http.client.client",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "http.client",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "logging.basicConfig",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "flask.Flask",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "requests_oauthlib.OAuth2Session",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "requests_oauthlib.OAuth2Session",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "flask.request.url",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "requests_oauthlib.OAuth2Session",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "requests_oauthlib.OAuth2Session",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "http.client.client.HTTPSConnection",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "http.client.client",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "http.client",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "os.environ",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 174,
"usage_type": "attribute"
},
{
"api_name": "os.urandom",
"line_number": 175,
"usage_type": "call"
}
] |
11221441363
|
import os, bcrypt
from datetime import datetime
from flask import Flask, request, jsonify, render_template
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__, static_folder='.')
app.config['UPLOAD_FOLDER'] = 'uploads'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(db.BINARY(60), nullable=False)
posts = db.relationship('Post', backref='author', lazy=True)
def __repr__(self):
return f'User({self.username}, {self.email})'
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.Integer, nullable=False)
date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
position = db.Column(db.String(), nullable=False)
description = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f'Post({type}, {self.date})'
def serialize(self):
return {
'id': self.id,
'type': self.type,
'date': self.date,
'position': self.position,
'description': self.description,
'user_id': self.user_id
}
def path(id):
return os.path.join(app.config['UPLOAD_FOLDER'], str(id) + '.jpg')
@app.route('/')
def hello():
return 'Hello World!'
@app.route('/user', methods=['POST'])
def user():
if 'id' not in request.form:
return 'id missing', 400
user = User.query.filter_by(id=request.form['id']).first()
if user == None:
return 'inexistant', 404
return user.username
@app.route('/data', methods=['GET', 'POST', 'DELETE'])
def data():
if request.method == 'POST':
for key in ['type', 'position', 'description', 'user_id']:
if request.form.get(key) == None:
return key + ' missing', 400
if 'image' not in request.files:
return 'image missing', 400
file = request.files['image']
if file.filename == '':
return 'image missing', 400
post = Post(
type = request.form['type'],
position = request.form['position'],
description = request.form['description'],
user_id = request.form['user_id']
)
db.session.add(post)
db.session.flush()
file.save(path(post.id))
db.session.commit()
return jsonify(post.serialize())
elif request.method == 'DELETE':
if 'id' not in request.form:
return 'id missing', 400
id=request.form['id']
Post.query.filter_by(id=id).delete()
db.session.commit()
file = path(id)
if os.path.exists(file):
os.remove(file)
return 'ok'
if request.args.get('form') != None:
return app.send_static_file('app.html')
if request.args.get('id') != None:
post = Post.query.filter_by(id=request.args['id']).first()
if post == None:
return 'not found', 404
return jsonify(post.serialize())
return jsonify([post.serialize() for post in Post.query.all()])
# if neither, 405 ou 406
if __name__ == '__main__':
app.run(debug = True)
|
tran-simon/hackatown
|
app.py
|
app.py
|
py
| 3,082 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "flask.request.form",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "flask.request.files",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "flask.request.files",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "flask.request.args",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 99,
"usage_type": "call"
}
] |
71144565947
|
#!/usr/bin/env python3
from dotenv import load_dotenv
from pet_posts import bot
import logging
import os
def main():
load_dotenv() # take environment variables from .env.
api_token = os.getenv("API_TOKEN")
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=logging.INFO,
)
updater = bot.init(api_token)
bot.configure(updater.dispatcher)
bot.run(updater)
if __name__ == "__main__":
main()
|
dawngerpony/pet-posts
|
app.py
|
app.py
|
py
| 483 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "dotenv.load_dotenv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pet_posts.bot.init",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pet_posts.bot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pet_posts.bot.configure",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pet_posts.bot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "pet_posts.bot.run",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pet_posts.bot",
"line_number": 20,
"usage_type": "name"
}
] |
26628419049
|
import cv2
import numpy as np
import urllib.request
from threading import Thread
import socket
import time
import requests
import json
class Streamer:
'''
description:-
Class responsible for connecting to the anrdroid app and managing the data communication.
How it works:
- every massege from and to the app are encapsulated by a starting tag and an ending tag
- the sending side (either android or pc side) first turn the massege to a byte array
then appends to the start and end of that array with a tag.
- for example when sending frame masseges from the app, the massege is as follows:
[FRAME START TAG] [BYTE STREAM] [FRAME END TAG]
Inputs:
src: string, ip address of the android
port: int, port of the app on the android
buffer_size: int, amount of incoming frames to buffer
f_st: string, specify the frame start tag
f_en: string, specify the frame end tag
d_st: string, specify the data start tag
d_en: string, specify the data end tag
'''
def __init__(self, src, port, buffer_size=5, f_st="frame_start", f_en="frame_end",
d_st="data_start", d_en="data_end"):
self.src = src
self.port = port
self.buffer_size = buffer_size
self.f_st, self.f_en, self.d_st, self.d_en =f_st, f_en, d_st, d_en
# initialize the socket and connect
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setblocking(True)
self.sock.settimeout(3)
try:
self.sock.connect((src, port))
except:
self.sock = None
self.stop_threads = True
return None
# initialize the buffers
# frame buffer (circular buffer)
self.frame_insert_idx = 0
self.frame_output_idx = 0
self.frames = [None] * buffer_size
self.data = None # data buffer (1 slot buffer)
# start the thread responsible for receiving and buffering the incoming masseges
self.stop_threads = False
Thread(target=self.thread).start()
def thread(self):
'''
Main thread that recives and extracts masseges from the app
'''
frame_conversion, data_conversion = False, False
recv_size = 1024 # initial byte buffer size for the socket
buffer = b'' # general byte buffer
frame_buffer, data_buffer = b'', b'' # byte buffer for the frame and data masseges
while self.stop_threads == False:
if(self.sock._closed): # stop if socket is closed
self.stop_threads = self.sock._closed
break
try:
r = self.sock.recv(recv_size) # receive the byte stream
if len(r) == 0:
exit(0)
buffer += r # add the received byte stream to the general buffer
# Extract frame masseges============================================
if frame_conversion == False:
s = buffer.find(bytearray(self.f_st, encoding ='utf-8'))
if s != -1:
frame_conversion = True
frame_buffer = b''
if frame_conversion:
e = buffer.find(bytearray(self.f_en, encoding ='utf-8'))
if e != -1:
frame_conversion = False
frame_buffer = buffer[s+len(self.f_st):e]
buffer = buffer[:s] +buffer[e+len(self.f_en):]
recv_size = 512 + len(frame_buffer)
else:
continue
####################################################################
# Extract data masseges=============================================
if data_conversion == False:
s = buffer.find(bytearray(self.d_st, encoding ='utf-8'))
if s != -1:
data_conversion = True
data_buffer = b''
if data_conversion:
e = buffer.find(bytearray(self.d_en, encoding ='utf-8'))
if e != -1:
data_conversion = False
data_buffer = buffer[s+len(self.d_st):e]
buffer = buffer[:s] +buffer[e+len(self.d_en):]
self.data = data_buffer.decode('ascii')
else:
continue
####################################################################
except Exception as e:
print(e)
continue
try:
# if frame buffer is not full
if (self.frame_insert_idx+1) % self.buffer_size != self.frame_output_idx:
# decode the byte frame massege to a numpy array
nparr = np.fromstring(frame_buffer, np.uint8)
frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
if type(frame) is type(None):
print("frame dropped")
pass
# store the frame in the frame buffer
self.frames[self.frame_insert_idx] = frame
# increment the input index of the ring buffer
self.frame_insert_idx = (self.frame_insert_idx+1) % self.buffer_size
except Exception as e:
print(e)
pass
self.sock.close()
def fetch_frame(self):
'''
Blocking loop until a frame is available
'''
while(self.frame_insert_idx == self.frame_output_idx and self.stop_threads == False ):
continue
frame = self.frames[self.frame_output_idx].copy()
# increment the output index of the ring buffer
self.frame_output_idx = (self.frame_output_idx+1) % self.buffer_size
return frame
def fetch_data(self):
'''
fetch received data
note: data is in json format and needs to be converted to json object first
'''
try:
if type(self.data) is not type(None) and self.data != "":
data = self.data[self.data.find("{"):]
data = json.loads(data)
self.data= None
return data
except json.JSONDecodeError as e:
print("fetch_data error:" +str(e))
self.data = None
return None
def send_data(self, data):
'''
converts data to json format and encapsulates with start and end tags before sendong
input:
data: dictionary, data to be sent
'''
try:
data = "START" + json.dumps(data) + "END"
self.sock.send(data.encode('utf-8'))
# self.sock.send("START".encode('utf-8'))
# self.sock.send(json.dumps(data).encode('utf-8'))
# self.sock.send("END".encode('utf-8'))
except ConnectionAbortedError as e:
print("send_data error:" + str(e))
def release(self):
self.stop_threads = True
# testing
if __name__ == "__main__":
src = "172.16.17.188"
port = 8888
streamer = Streamer(src, port)
key = ' '
while key != ord("q"):
frame = streamer.fetch_frame()
cv2.imshow("show", frame)
data = streamer.fetch_data()
if type(data) is not type(None):
# streamer.send_data(data)
print(data)
key = cv2.waitKey(1)
streamer.release()
|
MohamedEshmawy/DeepRoasters
|
streamer/streamer_v2.py
|
streamer_v2.py
|
py
| 7,791 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "socket.socket",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "threading.Thread",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.fromstring",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "cv2.imdecode",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_COLOR",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "json.JSONDecodeError",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 205,
"usage_type": "call"
}
] |
42123556181
|
# Partie1: Récupération des infos à partir d'un lien article
# Choisissez n'importe quelle page Produit sur le site de Books to Scrape. Écrivez un script Python qui visite cette page et en extrait les informations suivantes :
import sys
import requests
from bs4 import BeautifulSoup
import csv
import os
import urllib.request
# sys.argv -> list arguments passed to the script by the terminal (here the article url)
url = sys.argv[1]
response = requests.get(url)
parser = BeautifulSoup(response.content, 'html.parser')
products_infos = parser.find_all('td')
data = []
# product_page_url
data.append(url)
# universal_product_code (upc)
data.append(products_infos[0].string)
# title
data.append(parser.find('div', class_='product_main').h1.string)
# price_including_tax
price_including_tax = products_infos[3].string
price_tva = price_including_tax.replace('£', '')
data.append(price_tva)
# price_excluding_tax
price_excluding_tax = products_infos[2].string
price_ht = price_excluding_tax.replace('£', '')
data.append(price_ht)
# number_available
data.append(products_infos[5].string)
# product_description
find_p = parser.find_all('p')
data.append(find_p[3].string)
# category
find_a = parser.find_all('a')
data.append(find_a[3].string)
# review_rating
rate = parser.find('p', class_='star-rating')
rate_class = rate.get('class')
# Check if review is One, Two, Three, Four or five and append the result in the variable review
review = 0
if 'One' in rate_class:
review = 1
if 'Two' in rate_class:
review = 2
if 'Three' in rate_class:
review = 3
if 'Four' in rate_class:
review = 4
if 'Five' in rate_class:
review = 5
data.append(review)
# image_url
find_img = parser.find("img")
source = find_img.get('src')
image_url = source.replace("../../", "http://books.toscrape.com/")
data.append(image_url)
# GET images
pictures = []
soup_div_picture = parser.find('div', class_='item active')
soup_picture = soup_div_picture.find('img').get('src')
find_image_url = 'http://books.toscrape.com/' + soup_picture
pictures.append(find_image_url.replace('../../', ''))
# Try to create pictures repertory, if it's not possible(error), dont do anything(continue)
path = 'images/'
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
# For each picture in pictures, open repertory pictures, copy / paste them inside and refactoring
# their name(picture1, picture2...)
for link in range(len(pictures)):
img_url = pictures[link]
print(img_url)
with open(f'images/image{link + 1}.jpg', 'wb+') as f:
f.write(urllib.request.urlopen(img_url).read())
# Try to open data, if there is no directory create it
path = 'data'
try:
os.makedirs(path)
except os.error:
if not os.path.isdir(path):
os.mkdir(path)
# Écrivez les données dans un fichier CSV qui utilise les champs ci-dessus comme en-têtes de colonnes.
header = ['product_page_url', 'universal_ product_code (upc)', 'title', 'price_including_tax', 'price_excluding_tax', 'number_available', 'product_description', 'category', 'review_rating', 'image_url']
with open('data/article_data.csv', 'w', encoding='utf-8') as article:
w = csv.writer(article, delimiter=',')
w.writerow(header)
w.writerow(data)
|
glgstyle/MyBookScraper
|
scrap_article.py
|
scrap_article.py
|
py
| 3,256 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "urllib.request.request.urlopen",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "os.makedirs",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "os.error",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 105,
"usage_type": "call"
}
] |
43040033841
|
# -*- coding: utf-8 -*-
"""
@author: lucianavarromartin
PRODUCTOR CONSUMIDOR 3(limited)
El almacén ahora tiene espacio infinito, y cada productor tiene k subalmacenes
que pueden estar llenos simultaneamente.
Añadimos el objeto Lock, en este código, para tener un acceso controlado a
los subalmacenes.
El proceso para cuando cada productor ha repuesto el elemento de sus k almacenes,
N veces, después de haber sido consumido por el consumidor.
"""
from multiprocessing import Process, Manager
from multiprocessing import BoundedSemaphore, Semaphore, Lock
from multiprocessing import current_process
from multiprocessing import Array
from time import sleep
import random
N = 3 # Cantidad de productos que puede fabricar cada productor
K = 2 # Cantidad de subalmacenes
NPROD = 3 #Número de productores
def add_data(almacen, pid, data, mutex):
mutex.acquire()
try:
almacen.append(pid*1000 + data)
sleep(1)
finally:
mutex.release()
def productor(almacen, pid, empty, non_empty, mutex):
"""
Cuando el productor produce, añade un elemento a su almacén, entonces se
bloquea el semaforo empty asociado a este y se desbloquea el non_empty.
"""
dato = random.randint(0,5)
for n in range(N):
empty[pid].acquire()
dato += random.randint(0,5)
add_data(almacen, pid, dato, mutex)
print (f"productor {current_process().name} almacenado {dato}")
non_empty[pid].release()
print(f"producer {current_process().name} Ha terminado de producir")
empty[pid].acquire()
sleep(1)
non_empty[pid].release()
def consumidor(almacen, empty, non_empty, mutex):
"""
Cuando el consumidor consume un elemento de uno de los productores este
elemento ya no está en el almacén entonces se bloquea el semaforo non_empty
asociado a este productor y se desbloquea el empty.
"""
for s in non_empty:
s.acquire()
sleep(1)
ordenados = []
while len(ordenados) < NPROD * N:
numeros = []
lista_posicion = []
for i in range(len(almacen)):
if almacen[i] >= 0:
numeros.append(almacen[i] % 1000)
lista_posicion.append(almacen[i]//1000)
if numeros == []:
break
dato = min(numeros)
posicion = lista_posicion[numeros.index(dato)]
posicion_almacen = almacen[:].index(dato + posicion * 1000)
almacen[posicion_almacen]= -2
ordenados.append(dato)
empty[posicion].release()
print (f"consumidor {current_process().name} consumiendo {dato}")
non_empty[posicion].acquire()
print(ordenados)
def main():
manager = Manager()
almacen = manager.list()
non_empty = [Semaphore(0) for i in range (NPROD)]
empty = [BoundedSemaphore(K) for _ in range (NPROD)]
mutex = Lock()
prodlst = [Process(target=productor,
name=f'prod_{i}',
args=(almacen, i, empty, non_empty, mutex))
for i in range(NPROD)]
cons = [ Process(target=consumidor,
name=f'cons',
args=(almacen, empty, non_empty, mutex))]
for p in prodlst + cons:
p.start()
for p in prodlst + cons:
p.join()
if __name__ == '__main__':
main()
|
lucnav01/ProductorConsumidor
|
ProductorConsumidor3NavarroMartinLucia.py
|
ProductorConsumidor3NavarroMartinLucia.py
|
py
| 3,451 |
python
|
es
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "time.sleep",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "multiprocessing.current_process",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "multiprocessing.current_process",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "multiprocessing.current_process",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Manager",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Semaphore",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "multiprocessing.BoundedSemaphore",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Lock",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Process",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Process",
"line_number": 92,
"usage_type": "call"
}
] |
70941003388
|
'''
Created on 8/03/2016
@author: EJArizaR
'''
import unittest
from apps.DaneUsers.tests.test_base import test_base
from django.core.urlresolvers import reverse
class IsUsernameRegisteredTest(test_base):
def setUp(self):
test_base.setUp(self)
def test_returns_False_if_user_doesnt_exist(self):
response = self.client.get(reverse('DaneUsers:isUsernameRegistered'),{"username":"[email protected]"})
self.assertEqual(response.content, "False")
def test_returns_True_if_exists(self):
self.create_user()
response = self.client.get(reverse('DaneUsers:isUsernameRegistered'),{"username":"[email protected]"})
self.assertEqual(response.content, "True")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
diegopuerto/kiosco_universitario
|
source/apps/DaneUsers/tests/test_is_username_registered.py
|
test_is_username_registered.py
|
py
| 852 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "apps.DaneUsers.tests.test_base.test_base",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "apps.DaneUsers.tests.test_base.test_base.setUp",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "apps.DaneUsers.tests.test_base.test_base",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.core.urlresolvers.reverse",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.core.urlresolvers.reverse",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "unittest.main",
"line_number": 29,
"usage_type": "call"
}
] |
32628012198
|
import pandas as pd
import glob
from datetime import datetime, timedelta
# Leitura dos arquivos da pasta dataset
def readCSV():
listCSV = []
namePath = 'dataset'
# Select all csv in folder selected
namesFiles = glob.glob(namePath + "/*.csv")
# join all them
for filename in namesFiles:
df = pd.read_csv(filename, sep=';')
dfMask = df['codmun'].isnull()
filtered_df = df[dfMask]
listCSV.append(filtered_df)
frame = pd.concat(listCSV, axis=0, ignore_index=True)
frame['data'] = pd.to_datetime(frame['data']) # .dt.strftime('%d/%m/%Y')
return frame
def itensCalculate(df, date, dateStart, uf):
all = []
mask = df['data'] == date.strftime('%Y-%m-%d')
dfAux = df[mask]
# Date
all.append(date)
# State
if uf == 76:
all.append('Brasil')
else:
all.append(df['estado'].iloc[0])
# CasosAcumulado
all.append(int(dfAux['casosAcumulado'].iloc[0]))
# MediaMovelCasosAtual, MediaMovelCasosAnterior, Situação, Porcentagem
for i in movingAverage(df, date, dateStart, 0):
all.append(i)
# ObitosAcumulados
all.append(dfAux['obitosAcumulado'].iloc[0])
# MediaMovelObtitosAtual, MediaMovelObitosAnterior, Situação, Porcentagem
for j in movingAverage(df, date, dateStart, 1):
all.append(j)
return all
# number = 0 -> Casos or number != 0 -> Óbitos
def movingAverage(df, date, dateStart, number):
all = []
if number == 0:
dfAux = df[['data', 'casosAcumulado']]
else:
dfAux = df[['data', 'obitosAcumulado']]
# MediaMovelAtual
mean_today = averageCall(df, date, dateStart, number)
# MediaMovelAnterior
mean_before = averageCall(df, date - timedelta(days=1), dateStart, number)
all.append(int(mean_today))
all.append(int(mean_before))
# Situação e Porcentagem of each moving-average
if mean_before == 0:
if mean_today != 0:
all.append('Aumento')
all.append(100)
else:
all.append('Estabilidade')
all.append('-')
elif mean_today/mean_before > 1:
all.append('Aumento')
all.append(round(((mean_today/mean_before - 1)*100), 4))
elif mean_today/mean_before < 1:
all.append('Diminuicao')
all.append(round(abs(mean_today/mean_before - 1)*100, 4))
else:
all.append('Estabilidade')
all.append(round((mean_today/mean_before - 1)*100, 4))
return all
def averageCall(df, date, dateStart, number):
colum = ''
if number == 0:
colum = 'casosNovos'
else:
colum = 'obitosNovos'
# First 7 days
if date.strftime('%Y-%m-%d') < (dateStart + timedelta(days=7)).strftime('%Y-%m-%d'):
mask = (df['data'] <= date.strftime('%Y-%m-%d'))
dfAux = df[mask]
return dfAux[colum].sum()/7
# After
else:
# Select part of dataframe that need to calculate mean
mask = (df['data'] <= date.strftime('%Y-%m-%d')) & (df['data'] > (date - timedelta(days=7)).strftime('%Y-%m-%d'))
dfAux = df[mask]
return dfAux[colum].mean()
|
lfmaster780/dataCovid
|
utils.py
|
utils.py
|
py
| 3,138 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "glob.glob",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 105,
"usage_type": "call"
}
] |
37429761663
|
import os
from bs4 import BeautifulSoup
import requests
import requests.exceptions
import urllib.parse
from collections import deque
import re
# Create the directory to store the scraped data if it does not already exist
if not os.path.exists("scraped_data"):
os.makedirs("scraped_data")
user_url = str(input('[+] Enter Target URL To Scan: '))
urls = deque([user_url])
scraped_urls = set()
emails = set()
phone_numbers = set()
count = 0
try:
while len(urls):
count += 1
if count == 100:
break
url = urls.popleft()
scraped_urls.add(url)
parts = urllib.parse.urlsplit(url)
base_url = '{0.scheme}://{0.netloc}'.format(parts)
path = url[:url.rfind('/')+1] if '/' in parts.path else url
print('[%d] Processing %s' % (count, url))
try:
response = requests.get(url)
except (requests.exceptions.MissingSchema, requests.exceptions.ConnectionError):
continue
new_emails = set(re.findall(r"[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.[a-z]+", response.text, re.I))
emails.update(new_emails)
new_phone_numbers = set(re.findall(r"\b\d{3}[-.]?\d{3}[-.]?\d{4}\b", response.text))
phone_numbers.update(new_phone_numbers)
soup = BeautifulSoup(response.text, features="lxml")
for anchor in soup.find_all("a"):
link = anchor.attrs['href'] if 'href' in anchor.attrs else ''
if link.startswith('/'):
link = base_url + link
elif not link.startswith('http'):
link = path + link
if not link in urls and not link in scraped_urls:
urls.append(link)
except KeyboardInterrupt:
print('[-] Closing!')
# Create a file to store the scraped email addresses
with open("scraped_data/emails.txt", "w") as f:
print("[+] Scraped Emails:")
for email in emails:
f.write(email + "\n")
print(email)
# Create a file to store the scraped phone numbers
with open("scraped_data/phone_numbers.txt", "w") as f:
print("\n[+] Scraped Phone Numbers:")
for phone_number in phone_numbers:
f.write(phone_number + "\n")
print(phone_number)
print("\n[+] Scraped data saved in 'scraped_data' folder.")
|
opemi-aa/email_phone_scrape
|
email_phone_scrape.py
|
email_phone_scrape.py
|
py
| 2,267 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.exists",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "urllib.parse.parse.urlsplit",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "urllib.parse.parse",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "urllib.parse",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "re.findall",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "re.I",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "re.findall",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 46,
"usage_type": "call"
}
] |
5555757977
|
import copy
from typing import Dict, List, Tuple
import torch
from data.low_res import SingleDomain
from data.geography import frequency_encoded_latitude
import numpy as np
from data.vars import FIELD_MASK, FORCING_MASK, get_var_mask_name
import xarray as xr
from utils.xarray_oper import tonumpydict
def determine_ndoms(*args,**kwargs):
arglens = [1]
for i in range(len(args)):
if isinstance(args[i],list):
arglens.append(len(args[i]))
for key,_ in kwargs.items():
if isinstance(kwargs[key],list):
arglens.append(len(kwargs[key]))
return int(np.amax(arglens))
class MultiDomain(SingleDomain):
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
self.var_grouping = kwargs.pop('var_grouping')
def get_lat_features(self,lats):
posdict = self.locate(lats[0],lats[-1],lat = True)
(n0,_),n = posdict['locs'],posdict["len"]
slc = slice(n0,len(lats)+n0)
abslat,signlat = frequency_encoded_latitude(n,self.half_spread*2+1)
return np.cos(abslat[slc]),np.cos(signlat[slc])
def append_lat_features(self,outs):
key = list(outs.keys())[0]
lats = outs[key].u.lat.values
abslat,signlat = self.get_lat_features(lats)
n = len(outs[key].u.lon)
abslat = abslat.reshape(-1,1)@np.ones((1,n))
signlat = signlat.reshape(-1,1)@np.ones((1,n))
latfeats = xr.Dataset(
data_vars = dict(
abslat = (["lat","lon"],abslat),
signlat = (["lat","lon"],signlat),
),
coords = dict(
lon = outs[key].u.lon,
lat = outs[key].u.lat
)
)
outs['lat_feats'] = latfeats
return outs
class MultiDomainDataset(MultiDomain):
def __init__(self,*args,scalars = None,latitude = False,temperature = False,torch_flag = False, **kwargs):
self.scalars = scalars
self.latitude = latitude
self.temperature = temperature
self.torch_flag = torch_flag
self.input_kwargs = kwargs
super().__init__(*args,**kwargs)
@property
def sslice(self,):
return slice(self.half_spread,-self.half_spread)
def pad(self,data_vars:dict,coords:dict):
forcing_mask_names = [get_var_mask_name(fn) for fn in self.forcing_names]
for name in data_vars.keys():
dims,vals = data_vars[name]
if 'lat' not in dims or 'lon' not in dims:
continue
pad = (0,0)
if name in self.forcing_names + forcing_mask_names and self.half_spread>0:
vrshp = list(vals.shape)
vals = vals.reshape([-1]+ vrshp[-2:])
vals = vals[:,self.sslice,self.sslice]
vals = vals.reshape(vrshp[:-2] + list(vals.shape[-2:]))
# print(f'{vrshp}->{vals.shape}' )
padtuple = (len(vals.shape)-2)*[(0,0)] + [(0,pad[0]),(0,pad[1])]
vals = np.pad(vals,pad_width = tuple(padtuple),constant_values = np.nan)
data_vars[name] = (dims,vals)
def pad_coords(coords,slice_flag = False):
lat = coords['lat']
pad = 0
coords['lat_pad'] = pad
lat = np.pad(lat,pad_width = ((0,pad),),constant_values = 0)
if slice_flag:
lat = lat[self.sslice]
coords['lat'] = lat
lon = coords['lon']
pad = 0
coords['lon_pad'] = pad
lon = np.pad(lon,pad_width = ((0,pad),),constant_values = 0)
if slice_flag:
lon = lon[self.sslice]
coords['lon'] = lon
return coords
forcing_coords = pad_coords(copy.deepcopy(coords),slice_flag=self.half_spread>0)
coords = pad_coords(coords,slice_flag=False)
return data_vars,coords,forcing_coords
def add_lat_features(self,data_vars,coords):
lats = coords['lat']
lons = coords['lon']
abslat,signlat = self.get_lat_features(lats)
data_vars['abs_lat'] = (['lat','lon'], abslat.reshape([-1,1]) @ np.ones((1,len(lons))))
data_vars['sign_lat'] = (['lat','lon'],signlat.reshape([-1,1]) @ np.ones((1,len(lons))))
return data_vars
def group_variables(self,data_vars):
groups = []
for vargroup in self.var_grouping:
valdict = {}
for varname in vargroup:
if varname not in data_vars:
continue
valdict[varname] = data_vars[varname]
# for suff in '_mean _std'.split():
for suff in '_scale '.split():
nvarname = varname + suff
if nvarname in data_vars:
valdict[nvarname] = data_vars[nvarname]
groups.append(valdict)
return tuple(groups)
def group_np_stack(self,vargroups):
return tuple([self._np_stack(vars) for vars in vargroups])
def _np_stack(self,vals:Dict[str,Tuple[List[str],np.ndarray]]):
v = []
for _,val in vals.values():
v.append(val)
if len(v) == 0:
return np.empty(0)
else:
return np.stack(v,axis =0)
def group_to_torch(self,vargroups):
return tuple([self._to_torch(vars) for vars in vargroups])
def _to_torch(self,vals:np.array,dtype = torch.float32):
# vals = vals[:,300:-280,300:-280]
return torch.from_numpy(vals).type(dtype)
def normalize(self,data_vars,coords):
keys_list = tuple(data_vars.keys())
for key in keys_list:
dims,vals = data_vars[key]
if 'lat' not in dims or 'lon' not in dims:
continue
shp = {d:len(coords[d]) for d in dims}
newdims = {key:None for key in shp}
if 'lon' in shp:
shp['lon'] = 1
newdims.pop('lon')
if 'lat' in shp:
shp['lat'] = 1
newdims.pop('lat')
shp0 = [shp[key] for key in newdims]
shp1 = list(shp.values())
newdims = list(newdims.keys())
a = np.ones(shp0)
if self.scalars is not None:
if f"{key}_scale" in self.scalars:
a = self.scalars[f"{key}_scale"].values
a = a.reshape(shp0)
if not self.torch_flag:
data_vars[f"{key}_scale"] = (newdims,a)
# data_vars[f"{key}_mean"] = (newdims,a)
# data_vars[f"{key}_std"] = (newdims,b)
# vals = (vals - a.reshape(shp1))/b.reshape(shp1)
vals = vals/a.reshape(shp1)
data_vars[key] = (dims,vals)
return data_vars,coords
def mask(self,data_vars):
keys_list = tuple(data_vars.keys())
for key in keys_list:
dims,f = data_vars[key]
if not ('lat' in dims and 'lon' in dims):
continue
mask = f==f
f[~mask] = 0
mask_found = False
for group,group_mask in zip([self.field_names,self.forcing_names],[FIELD_MASK,FORCING_MASK]):
if key in group:
mask = data_vars[group_mask][1]
mask_found =True
break
if mask_found:
varmask = get_var_mask_name(key)
data_vars[varmask] = (dims,mask)
if not self.torch_flag:
data_vars[f"{varmask}_normalization"] = (['normalization'],np.array([0,1]))
return data_vars
def __getitem__(self, i):
ds = super().__getitem__(i)
# print(f'MultiDomainDataset - {[f"{key}-{val.shape}" for key,val in ds.coords.items()]}')
per_region = []
requested_boundaries = ([None]*4,) if self.requested_boundaries is None else self.requested_boundaries
# print(f'requested_boundaries = {requested_boundaries}')
for lat0,lat1,lon0,lon1 in requested_boundaries:
if lat0 is not None:
subds = ds.sel(lat = slice(lat0,lat1),lon= slice(lon0,lon1))
else:
subds = ds
single_dom_out = self.single_domain(subds)
if not self.torch_flag:
return single_dom_out
per_region.append(single_dom_out)
cropped_per_region = []
def get_slice(length: int, length_to: int):
d_left = max(0, (length - length_to) // 2)
d_right = d_left + max(0, (length - length_to)) % 2
return slice(d_left, length - d_right)
for var_inputs in zip(*per_region):
shps = []
for var_in in var_inputs:
shps.append(np.array(var_in.shape))
shps = np.stack(shps,axis = 0)
shps = np.amin(shps,axis =0)
# shps = np.amax(shps,axis =0)
group = []
for var_in in var_inputs:
slcs = [get_slice(shp,_shp) for shp,_shp in zip(var_in.shape,shps)]
var_in = var_in[slcs[0],slcs[1],slcs[2]]
# var_in = var_in[:shps[0],:shps[1],:shps[2]]
group.append(var_in)
# zer =torch.zeros(*shps)
# shps_ = var_in.shape
# zer[:shps_[0],:shps_[1],:shps_[2]] = var_in
# group.append(zer)
group = torch.stack(group,dim = 0)
cropped_per_region.append(group)
min_gpu_reject_size = 200
max_shape = np.stack([np.array(group.shape[2:]) for group in cropped_per_region],axis = 0)
max_shape = np.amax(max_shape,axis = 0)
pad_shape = np.maximum(min_gpu_reject_size - max_shape,0)
if True:#np.all(pad_shape == 0) or not torch.cuda.is_available():
return tuple(cropped_per_region)
cropped_per_region_ = []
for group in cropped_per_region:
shp = group.shape
padded_shape = np.array(shp)
padded_shape[2:] += pad_shape
z = torch.zeros(*padded_shape)
z[:,:,:shp[2],:shp[3]] = group
cropped_per_region_.append(z)
return tuple(cropped_per_region_)
def single_domain(self,outs):
data_vars,coords = tonumpydict(outs)
# for key,(dim,val) in data_vars.items():
# print(f'{key}-{dim}: {val.shape}')
for ik,iv in self.input_kwargs.items():
if ik not in coords:
if np.isscalar(iv) or isinstance(iv,str):
coords[ik] = np.array([iv])
# print('\n'.join([f'{key} : {type(coords[key])}' for key in coords]))
# print('\n'.join([f'{key} : {data_vars[key][1].shape}' for key in data_vars]))
# raise Exception
if self.latitude:
data_vars = self.add_lat_features(data_vars,coords)
data_vars,coords = self.normalize(data_vars,coords)
data_vars = self.mask(data_vars)
data_vars,coords,forcing_coords = self.pad(data_vars,coords)
# dropkeys = []
# for key in data_vars:
# if 'normalization' in key or 'scale' in key:
# dropkeys.append(key)
# continue
# if 'S' not in key:
# dropkeys.append(key)
# continue
# for dk in dropkeys:
# data_vars.pop(dk)
# selkeys = 'Su Sv Stemp'.split()
# data_vars = {key:data_vars[key] for key in selkeys}
# ds = xr.Dataset(data_vars,forcing_coords)
# ds = np.log10(np.abs(ds))
# print(ds)
# plot_ds(ds,'ds.png',ncols = 1)
# raise Exception
grouped_vars = self.group_variables(data_vars)
if self.torch_flag:
grouped_vars = self.group_np_stack(grouped_vars)
return self.group_to_torch(grouped_vars)
else:
grouped_vars = list(grouped_vars)
grouped_vars.append(coords)
grouped_vars.append(forcing_coords)
return tuple(grouped_vars)
|
CemGultekin1/cm2p6
|
data/low_res_dataset.py
|
low_res_dataset.py
|
py
| 12,208 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.amax",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "data.low_res.SingleDomain",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "data.geography.frequency_encoded_latitude",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "xarray.Dataset",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "data.vars.get_var_mask_name",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.pad",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "numpy.pad",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "numpy.pad",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "numpy.empty",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "torch.float32",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "torch.from_numpy",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "data.vars.FIELD_MASK",
"line_number": 186,
"usage_type": "name"
},
{
"api_name": "data.vars.FORCING_MASK",
"line_number": 186,
"usage_type": "name"
},
{
"api_name": "data.vars.get_var_mask_name",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "numpy.amin",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "numpy.maximum",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "utils.xarray_oper.tonumpydict",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "numpy.isscalar",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 259,
"usage_type": "call"
}
] |
11472792272
|
import re
import time
import datetime
import json
import copy
import random
import os
from pathlib import Path
from urllib.parse import quote
from amiyabot import PluginInstance
from core.util import read_yaml
from core import log, Message, Chain
from core.database.user import User, UserInfo
from core.database.bot import OperatorConfig
from core.resource.arknightsGameData import ArknightsGameData, ArknightsGameDataResource, Operator
from .database import AmiyaBotWifuStatusDataBase
curr_dir = os.path.dirname(__file__)
class WifuPluginInstance(PluginInstance):
def install(self):
AmiyaBotWifuStatusDataBase.create_table(safe=True)
bot = WifuPluginInstance(
name='每日随机助理',
version='1.4',
plugin_id='amiyabot-arknights-hsyhhssyy-wifu',
plugin_type='',
description='每日生成一个随机助理',
document=f'{curr_dir}/README.md'
)
def compare_date_difference(day1: str,day2: str):
time_array1 = time.strptime(''.join(day1.split(' ')[0]), "%Y-%m-%d")
timestamp_day1 = int(time.mktime(time_array1))
time_array2 = time.strptime(''.join(day2.split(' ')[0]), "%Y-%m-%d")
timestamp_day2 = int(time.mktime(time_array2))
result = (timestamp_day1 - timestamp_day2) // 60 // 60 // 24
return result
def compare_second_difference(day1: str,day2: str):
time_array1 = time.strptime(''.join(day1.split(' ')[0]), "%Y-%m-%d %H:%M:%S")
timestamp_day1 = int(time.mktime(time_array1))
time_array2 = time.strptime(''.join(day2.split(' ')[0]), "%Y-%m-%d %H:%M:%S")
timestamp_day2 = int(time.mktime(time_array2))
result = (timestamp_day1 - timestamp_day2)
return result
async def wifu_action(data: Message):
# log.info('触发了选老婆功能.')
wifu_meta: dict = UserInfo.get_meta_value(data.user_id,'amiyabot-arknights-wifu')
now = datetime.date.today()
#查看User是不是已经有Wifu了
if wifu_meta.__contains__('wifu_date') and wifu_meta.__contains__('wifu_name'):
# 计算日期
last_wifu_time = wifu_meta['wifu_date']
time_delta = compare_date_difference(now.strftime("%Y-%m-%d"),last_wifu_time)
if time_delta<1 :
log.info(f'选老婆TimeDelta{time_delta}')
return await show_existing_wifu(data,data.user_id)
wifu_meta['wifu_date'] = now.strftime("%Y-%m-%d")
# 随机一位 Wifu给他
operators = {}
if not operators:
operators = copy.deepcopy(ArknightsGameData().operators)
operator = operators.pop(random.choice(list(operators.keys())))
while OperatorConfig.get_or_none(operator_name=operator.name,operator_type=8):
operator = operators.pop(random.choice(list(operators.keys())))
wifu_meta['wifu_name'] = operator.name
UserInfo.set_meta_value(data.user_id,'amiyabot-arknights-wifu',wifu_meta)
AmiyaBotWifuStatusDataBase.create(channel_id=data.channel_id, user_id=data.user_id, wifu_name=operator.name,
create_at=datetime.date.today())
count = count_in_channel(data.channel_id,operator.name,data.user_id)
str = f'博士,您今日选到的助理是干员{operator.name}呢'
if count>1:
str+=f",他已经是第{count}次成为您的助理了!\n"
else:
str+="!\n"
ask = Chain(data, at=True).text(str)
return await create_ret_data(data, ask,operator)
async def create_ret_data(data, ask,operator):
skin = random.choice(operator.skins())
skin_path = await ArknightsGameDataResource.get_skin_file(skin)
if not skin_path:
return ask.text('目前还没有该干员的立绘,真是抱歉博士~[face:9]')
else:
relative_path = Path(f"../../../{skin_path}")
log.info(f'skin: {relative_path}')
ask.html(path=f'{curr_dir}/template/wifu.html',
data={"id": "testAlt", "image": quote(f"{relative_path}")}, width=1024)
voices = operator.voices()
if not voices:
log.info(f'No voice file for operator {operator.operator_name}.')
return ask
else:
voice = voices[0]
voice_path = await ArknightsGameDataResource.get_voice_file(operator, voice['voice_title'],'_cn')
if not voice_path:
return ask
else:
return ask.text(voice['voice_text'].replace('{@nickname}',data.nickname)).voice(voice_path)
return ask
# 计算user_id在指定channel_id和wifu_name下的记录count数
def count_in_channel(channel_id, wifu_name, user_id):
return AmiyaBotWifuStatusDataBase.select().where(
(AmiyaBotWifuStatusDataBase.channel_id == channel_id) &
(AmiyaBotWifuStatusDataBase.wifu_name == wifu_name) &
(AmiyaBotWifuStatusDataBase.user_id == user_id)
).count()
# 计算user_id在全部channel_id和指定wifu_name下的记录count数
def count_in_all_channels(wifu_name, user_id):
return AmiyaBotWifuStatusDataBase.select().where(
(AmiyaBotWifuStatusDataBase.wifu_name == wifu_name) &
(AmiyaBotWifuStatusDataBase.user_id == user_id)
).count()
async def show_existing_wifu(data: Message, user_id: int):
wifu_meta: dict = UserInfo.get_meta_value(user_id,'amiyabot-arknights-wifu')
operator_name = wifu_meta['wifu_name']
operators = {}
if not operators:
operators = copy.deepcopy(ArknightsGameData().operators)
operator = operators[operator_name]
# 测试用代码
# AmiyaBotWifuStatusDataBase.create(channel_id=data.channel_id, user_id=data.user_id, wifu_name=operator.name,
# create_at=datetime.date.today())
count = count_in_channel(data.channel_id,operator.name,data.user_id)
str = f'博士,您今天已经选过助理啦,您的助理是干员{operator.name}哦'
if count>1:
str+=f",他已经是第{count}次成为您的助理了呢~"
else:
str+="~"
ask = Chain(data, at=True).text(str)
return await create_ret_data(data,ask,operator)
@bot.on_message(keywords=['选老婆', '抽老婆', '选助理', '抽助理'],level=2)
async def _(data: Message):
return await wifu_action(data)
|
hsyhhssyy/amiyabot-arknights-hsyhhssyy-wifu
|
main.py
|
main.py
|
py
| 6,170 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.dirname",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "amiyabot.PluginInstance",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "database.AmiyaBotWifuStatusDataBase.create_table",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "database.AmiyaBotWifuStatusDataBase",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "time.strptime",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "time.mktime",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "time.strptime",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "time.mktime",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "time.strptime",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "time.mktime",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "time.strptime",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "time.mktime",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "core.Message",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "core.database.user.UserInfo.get_meta_value",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "core.database.user.UserInfo",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "datetime.date.today",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "core.log.info",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "core.log",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "core.resource.arknightsGameData.ArknightsGameData",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "core.database.bot.OperatorConfig.get_or_none",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "core.database.bot.OperatorConfig",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "random.choice",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "core.database.user.UserInfo.set_meta_value",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "core.database.user.UserInfo",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "database.AmiyaBotWifuStatusDataBase.create",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "database.AmiyaBotWifuStatusDataBase",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "datetime.date.today",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "core.Chain",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "core.resource.arknightsGameData.ArknightsGameDataResource.get_skin_file",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "core.resource.arknightsGameData.ArknightsGameDataResource",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "core.log.info",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "core.log",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "urllib.parse.quote",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "core.log.info",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "core.log",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "core.resource.arknightsGameData.ArknightsGameDataResource.get_voice_file",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "core.resource.arknightsGameData.ArknightsGameDataResource",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "database.AmiyaBotWifuStatusDataBase.select",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "database.AmiyaBotWifuStatusDataBase",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "database.AmiyaBotWifuStatusDataBase.channel_id",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "database.AmiyaBotWifuStatusDataBase",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "database.AmiyaBotWifuStatusDataBase.wifu_name",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "database.AmiyaBotWifuStatusDataBase",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "database.AmiyaBotWifuStatusDataBase.user_id",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "database.AmiyaBotWifuStatusDataBase",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "database.AmiyaBotWifuStatusDataBase.select",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "database.AmiyaBotWifuStatusDataBase",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "database.AmiyaBotWifuStatusDataBase.wifu_name",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "database.AmiyaBotWifuStatusDataBase",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "database.AmiyaBotWifuStatusDataBase.user_id",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "database.AmiyaBotWifuStatusDataBase",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "core.Message",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "core.database.user.UserInfo.get_meta_value",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "core.database.user.UserInfo",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "core.resource.arknightsGameData.ArknightsGameData",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "core.Chain",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "core.Message",
"line_number": 175,
"usage_type": "name"
}
] |
75341615226
|
"""Train an EfficientNetB4 model to predict GBM vs PCNSL.
This requires TensorFlow >= 2.3.0.
"""
import argparse
import math
from pathlib import Path
import pickle
from typing import Tuple, Union
import h5py
import numpy as np
import tensorflow as tf
PathType = Union[str, Path]
def augment_base(x, y):
x = tf.image.random_brightness(x, max_delta=2)
x = tf.image.random_flip_left_right(x)
x = tf.image.random_flip_up_down(x)
x = tf.image.random_hue(x, max_delta=0.25)
return x, y
def augment_base_and_noise(x, y):
x, y = augment_base(x, y)
# Apply gaussian noise to fraction of samples.
x = tf.cond(
pred=tf.random.uniform([]) < 0.1,
true_fn=lambda: x
+ tf.random.normal(tf.shape(x), mean=0.0, stddev=0.05, dtype=x.dtype),
false_fn=lambda: x,
)
return x, y
def load_data_into_train_val(
data_path: PathType, augmentation: str
) -> Tuple[tf.data.Dataset, tf.data.Dataset]:
print("Loading data from HDF5...", flush=True)
with h5py.File(str(data_path)) as f:
x_gbm = f["/gbm/380_380/features"][:]
y_gbm = f["/gbm/380_380/labels"][:]
x_pcnsl = f["/pcnsl/380_380/features"][:]
y_pcnsl = f["/pcnsl/380_380/labels"][:]
print("gbm features shape", x_gbm.shape)
print("gbm labels shape", y_gbm.shape)
print("pcnsl features shape", x_pcnsl.shape)
print("pcnsl labels shape", y_pcnsl.shape, flush=True)
x = np.concatenate((x_gbm, x_pcnsl)).astype(np.float32)
y = np.concatenate((y_gbm, y_pcnsl)).astype(np.float32)
# Shuffle the samples. The shuffling is the same for features and labels.
print("Shuffling samples ...", flush=True)
shuffle_inds = np.arange(y.shape[0])
np.random.seed(42)
np.random.shuffle(shuffle_inds)
x = x[shuffle_inds]
y = y[shuffle_inds]
inds = np.random.choice([0, 1], size=y.size, p=[0.85, 0.15])
x_train, y_train = x[inds == 0], y[inds == 0]
x_val, y_val = x[inds == 1], y[inds == 1]
# Create tf.data.Dataset
print("Creating tf.data.Dataset ...", flush=True)
batch_size = 8
dset_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))
if augmentation == "none":
print("Not applying augmentation.")
elif augmentation == "base":
print("Applying 'base' augmentation.")
dset_train = dset_train.map(augment_base)
elif augmentation == "base_and_noise":
print("Applying 'base_and_noise' augmentation.")
dset_train = dset_train.map(augment_base)
else:
raise ValueError(f"unknown augmentation type: {augmentation}")
dset_train = dset_train.shuffle(1000, reshuffle_each_iteration=True)
dset_train = dset_train.batch(batch_size)
dset_val = tf.data.Dataset.from_tensor_slices((x_val, y_val))
dset_val = dset_val.batch(batch_size)
return dset_train, dset_val
def get_model() -> tf.keras.Model:
print("Creating model ...", flush=True)
tfkl = tf.keras.layers
# This is from the tf.keras.applications.efficientnet implementation in version
# 2.5.0 of tensorflow.
DENSE_KERNEL_INITIALIZER = {
"class_name": "VarianceScaling",
"config": {"scale": 1.0 / 3.0, "mode": "fan_out", "distribution": "uniform"},
}
base_model = tf.keras.applications.EfficientNetB4(
include_top=False,
input_shape=(380, 380, 3),
weights="imagenet",
)
base_model.activity_regularizer = tf.keras.regularizers.l2(l=0.01)
_x = tfkl.GlobalAveragePooling2D(name="avg_pool")(base_model.output)
_x = tfkl.Dropout(0.5)(_x)
_x = tfkl.Dense(
1,
activation="sigmoid",
name="predictions",
kernel_initializer=DENSE_KERNEL_INITIALIZER,
)(_x)
model = tf.keras.Model(inputs=base_model.input, outputs=_x)
return model
def main(
data_path: PathType,
checkpoint_prefix: PathType,
augmentation: str = "none",
epochs: int = 300,
):
model = get_model()
model.compile(
optimizer=tf.keras.optimizers.Adam(1e-04),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=False),
metrics=[tf.keras.metrics.BinaryAccuracy(), tf.keras.metrics.AUC()],
)
def schedule_lr(epoch):
if epoch < 50:
return 1e-04
else:
return 1e-04 * math.exp(0.015 * (50 - epoch))
checkpoint_prefix = Path(checkpoint_prefix)
checkpoint_prefix.mkdir(parents=True, exist_ok=False)
callbacks = [
tf.keras.callbacks.LearningRateScheduler(schedule_lr, verbose=1),
tf.keras.callbacks.ModelCheckpoint(
filepath=str(checkpoint_prefix / "ckpt_{epoch:03d}_{val_loss:0.4f}.hdf5"),
save_best_only=True,
verbose=1,
),
]
dset_train, dset_val = load_data_into_train_val(
data_path=data_path, augmentation=augmentation
)
print("Beginning training...", flush=True)
history = model.fit(
dset_train,
epochs=epochs,
validation_data=dset_val,
callbacks=callbacks,
verbose=2,
)
# We save as pickle and not as json because the numpy arrays in this dictionary
# do not play nicely with json. Pickle is fine with it, though.
print("Saving training/validation history to pickle file ...")
with (checkpoint_prefix / "history.pkl").open("wb") as f:
pickle.dump(history.history, f)
def get_parsed_args() -> argparse.Namespace:
p = argparse.ArgumentParser(description=__doc__)
p.add_argument("data_path", help="Path to HDF5 with data.")
p.add_argument("ckpt_prefix", help="Directory in which to save checkpoints.")
p.add_argument(
"--augmentation",
choices=["none", "base", "base_and_noise"],
default="none",
help="Type of augmentation to apply to training data.",
)
p.add_argument("--epochs", type=int, default=300, help="Number of epochs to train.")
args = p.parse_args()
args.data_path = Path(args.data_path)
args.ckpt_prefix = Path(args.ckpt_prefix)
return args
if __name__ == "__main__":
args = get_parsed_args()
print("-" * 40)
print("Arguments passed to this script:")
for key, value in vars(args).items():
print(f" - {key}: {value}")
print("-" * 40, flush=True)
main(
data_path=args.data_path,
checkpoint_prefix=args.ckpt_prefix,
augmentation=args.augmentation,
epochs=args.epochs,
)
print("Reached end of python script.")
|
kaczmarj/classification-of-gbm-vs-pcnsl-using-cnns
|
step1_train_model.py
|
step1_train_model.py
|
py
| 6,476 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.Union",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "tensorflow.image.random_brightness",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "tensorflow.image",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.image.random_flip_left_right",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "tensorflow.image",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.image.random_flip_up_down",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "tensorflow.image",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.image.random_hue",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "tensorflow.image",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.cond",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "tensorflow.random.uniform",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "tensorflow.random",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.random.normal",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "tensorflow.random",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.shape",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "h5py.File",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "numpy.concatenate",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.shuffle",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.choice",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.data.Dataset.from_tensor_slices",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "tensorflow.data",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.data.Dataset.from_tensor_slices",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "tensorflow.data",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "typing.Tuple",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "tensorflow.data",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.applications.EfficientNetB4",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.regularizers.l2",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.Model",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.optimizers.Adam",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.losses.BinaryCrossentropy",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.metrics.BinaryAccuracy",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.metrics.AUC",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "math.exp",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.callbacks.LearningRateScheduler",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.callbacks.ModelCheckpoint",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "pickle.dump",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "argparse.Namespace",
"line_number": 170,
"usage_type": "attribute"
}
] |
20363546350
|
#%%
from dataclasses import dataclass, field
from functools import wraps
from typing import List, Optional, Protocol, Union
import time
from .controller import Controller
from . import commands
from .acceptance_scheme import AcceptanceScheme, UnconditionalAcceptance
from .scattering_simulation import ScatteringSimulation
from .box_simulation import Box
def timeit(my_func):
@wraps(my_func)
def timed(*args, **kw):
tstart = time.time()
output = my_func(*args, **kw)
tend = time.time()
print(f"{my_func.__name__} took {(tend - tstart)} seconds to execute")
return output
return timed
CommandOrAcceptableCommand = Union[commands.Command, commands.AcceptableCommand]
def decorate_command(command: CommandOrAcceptableCommand) -> commands.AcceptableCommand:
if isinstance(command, commands.AcceptableCommand):
return command
if isinstance(command, commands.Command):
return commands.AcceptableCommand(
base_command=command,
acceptance_scheme=UnconditionalAcceptance()
)
class Evaluator(Protocol):
def evaluate(self, command: CommandOrAcceptableCommand) -> bool:
pass
@dataclass
class Simulator:
controller: Controller
evaluator: Evaluator
@timeit
def simulate(self):
controller = self.controller
for command in controller.ledger:
controller.action()
controller.compute_states()
self.evaluator.evaluate(command)
class Viewer(Protocol):
def show_view(simulation: ScatteringSimulation, command: CommandOrAcceptableCommand, acc_scheme: AcceptanceScheme) -> None:
pass
@dataclass
class MonteCarloEvaluator:
simulation: ScatteringSimulation
viewer: Optional[Viewer] = None
def _show_view(self, command: CommandOrAcceptableCommand, acc_scheme: AcceptanceScheme) -> None:
if self.viewer:
self.viewer.show_view(self.simulation, command, acc_scheme)
def evaluate(self, command: CommandOrAcceptableCommand) -> bool:
acceptable_command = decorate_command(command)
acceptable_command.handle_simulation(self.simulation)
acc_scheme = acceptable_command.acceptance_scheme
self._show_view(command, acc_scheme)
return acc_scheme.is_acceptable()
@dataclass
class MemorizedSimulator(Simulator):
simulation: ScatteringSimulation
box_list: List[Box]
state_command: commands.Command = field(init = False, default_factory=lambda : None)
def compute_states(self) -> None:
if self.state_command:
self.state_command.execute()
else:
self.controller.compute_states()
def simulate_command(self, controller: Controller, command: CommandOrAcceptableCommand) -> None:
controller.action()
self.compute_states()
command.execute()
acceptable = self.evaluator.evaluate(command)
if acceptable:
self.state_command = commands.SetSimulationState.gen_from_simulation(self.simulation.simulation_params, self.box_list)
@timeit
def simulate(self) -> None:
controller = self.controller
for command in controller.ledger:
self.simulate_command(controller=controller, command=command)
if __name__ == "__main__":
pass
#%%
|
lestercbarnsley/SasRMC
|
sas_rmc/simulator.py
|
simulator.py
|
py
| 3,355 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "time.time",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "acceptance_scheme.UnconditionalAcceptance",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "typing.Protocol",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "controller.Controller",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "controller.ledger",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "controller.action",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "controller.compute_states",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "typing.Protocol",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "scattering_simulation.ScatteringSimulation",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "acceptance_scheme.AcceptanceScheme",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "scattering_simulation.ScatteringSimulation",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "acceptance_scheme.AcceptanceScheme",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "scattering_simulation.ScatteringSimulation",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "box_simulation.Box",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "dataclasses.field",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "controller.Controller",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "controller.action",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "controller.ledger",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 81,
"usage_type": "name"
}
] |
73831992187
|
import os
os.environ['OPENCV_IO_MAX_IMAGE_PIXELS'] = pow(2, 40).__str__()
import sys
import copy
from pathlib import Path
from collections import Counter
import numpy as np
import pandas as pd
import cv2
import bioformats.formatreader
import cellprofiler_core.pipeline
import cellprofiler_core.preferences
import cellprofiler_core.utilities.zmq
import cellprofiler_core.utilities.java
#from cellprofiler_core.setting.subscriber import LabelSubscriber
#from cellprofiler_core.setting.range import IntegerRange
def _clahe(image):
#-----Reading the image-----------------------------------------------------
if not isinstance(image, np.ndarray):
image = cv2.imread(image, 1)
#-----Converting image to LAB Color model-----------------------------------
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
#-----Splitting the LAB image to different channels-------------------------
l, a, b = cv2.split(lab)
#-----Applying CLAHE to L-channel-------------------------------------------
clahe = cv2.createCLAHE(clipLimit=2, tileGridSize=(8,8))
cl = clahe.apply(l)
#-----Merge the CLAHE enhanced L-channel with the a and b channel-----------
limg = cv2.merge((cl,a,b))
#-----Converting image from LAB Color model to RGB model--------------------
final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
#_____END_____#
#return cl
return final
def clahe(image, iter=5, return_gray=True):
"""
Enhance local contrast with CLAHE algorithm
Parameters
--------------
image: fn, np.ndarray
image file name or np.ndarray representing image
iter: int
how many times to enhance
"""
while iter:
image = _clahe(image)
iter -= 1
if return_gray:
image = np.dot(image[..., :3], [0.2989, 0.5870, 0.1140])
image = image.astype(int)
return image
def blur_detect(image, channel='g', chunk_size=3, method='laplacian', top_svd=30,
outfile=None, show_in_rgb=None, show_in_grey=None):
"""
Calculte blur values with stepwise slide chunks for RGB image
Parameters
------------------------------
image: np.ndarray, image
image matrix with three channels
channel: {'r', 'g', 'b'}, default g
which channel to be used
chunk_size: int
pixel number for each chunk
method: {'laplacian', 'svd'}, default laplacian
which method to calculate blur value
top_svd: int
top N svd used for svd method
outfile: str
write the blur matrix into file
show_in_rgb: str
display the blur value in rgb image
show_in_grey: str
display the blur value in grey image
"""
# background was detected as blur region
# I need to segmentate tissue region firstly
# here I used color masking for segmentation on green channel
b, g, r = cv2.split(image)
# detect based on green channel
light = 10
dark = 255
if channel == 'r':
channel = r
elif channel == 'g':
channel = g
elif channel == 'b':
channel = b
mask = cv2.inRange(channel, light, dark)
kernel = np.ones((10, 10), np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
blur_image = np.zeros(shape=image.shape, dtype=np.uint8)
for (x, y), value in np.ndenumerate(mask):
if value == 0:
continue
chunk = image[x:x+chunk_size, y:y+chunk_size]
# small value indicate blur region
if method == 'laplacian':
blur_value = cv2.Laplacian(chunk, cv2.CV_64F).var()
elif method == 'svd':
u, sigma, vt = np.linalg.svd(img)
blur_value = sum(sigma[:top_svd]) / sum(sigma)
blur_image[x, y] = blur_value
if outfile:
np.savetxt(outfile, blur_image, fmt='%d')
if show_in_rgb:
blur_rgb_image = cv2.applyColorMap(blur_image, cv2.COLORMAP_JET)
cv2.imwrite(show_in_rgb, blur_rgb_image)
if show_in_grey:
black = np.zeros(shape=image.shape, dtype=np.uint8)
blur_mask = np.where(blur_image < 30, mask, black)
cv2.imwrite(show_in_grey, blur_mask)
return blur_image
def _pycellprofilter(image, name='DNA', cpi=None, saved_object='IdentifySecondaryObjects'):
print(cellprofiler_core.preferences.__is_headless)
# load pipeline from cpi file
print('load pipeline from {}'.format(cpi))
pipeline = cellprofiler_core.pipeline.Pipeline()
pipeline.load(cpi)
# get modules list
modules = pipeline.modules()
# setup image_set
image_set = cellprofiler_core.image.ImageSet(0, {'name':name}, name)
if isinstance(image, np.ndarray) and len(image.shape) == 2:
x = image
else:
x = cv2.imread(str(image), 0)
x[x > 230] = 230
image_x = cellprofiler_core.image.Image(x, path_name=image.parent, file_name=image.name)
image_set.add(name, image_x)
# init workspace
object_set = cellprofiler_core.object.ObjectSet()
measurements = cellprofiler_core.measurement.Measurements()
workspace = cellprofiler_core.workspace.Workspace(
pipeline,
modules,
image_set,
object_set,
measurements,
[image_set]
)
for module in modules:
sys.stdout.write(f'... {module.module_name}\n')
module.run(workspace)
objects = workspace.object_set.get_objects(saved_object)
try:
celloutlines = workspace.image_set.get_image('CellOutlines')
except:
sys.stderr.write('cell outlines not get\n')
celloutlines = None
return objects, celloutlines
def pycellprofiler(image, save_prefix=None, return_image=True,
cpi='./default.cppipe',
image_name='DNA',
saved_object='IdentifySecondaryObjects',
outdir='./outdir', tmpdir='./tmpdir', ):
outdir, tmpdir = Path(outdir), Path(tmpdir)
if not outdir.exists():
outdir.mkdir(parents=True, exist_ok=True)
objects = None
try:
#cellprofiler_core.preferences.set_headless()
cellprofiler_core.preferences.set_temporary_directory(outdir)
cellprofiler_core.preferences.set_default_output_directory(outdir)
cellprofiler_core.utilities.java.start_java()
sys.stdout.write('Starting cellprofiler identify ...\n')
objects, celloutlines = _pycellprofilter(
image,
name=image_name,
cpi=cpi,
saved_object=saved_object
)
sys.stdout.write('Cell objects and outlines generated\n')
except Exception as err:
sys.stderr.write('***Error: {}\n'.format(err))
finally:
cellprofiler_core.utilities.zmq.join_to_the_boundary()
bioformats.formatreader.clear_image_reader_cache()
cellprofiler_core.utilities.java.stop_java()
if objects is None:
return
sys.stdout.write('Saving labled cells ...\n')
mask = objects.segmented
b, g, r = cv2.split(celloutlines.pixel_data)
if save_prefix is not None:
mask_file = str(outdir / f'{save_prefix}_mask.txt')
np.savetxt(mask_file, mask, fmt='%d')
boundary_file = str(outdir / f'{save_prefix}_boundary.txt')
np.savetxt(boundary_file, b, fmt='%d')
if return_image:
image = img_outliner(image, boundary=b)
return mask, b, image
else:
return mask, b
def boundary_detect(mask, image, save_prefix='cell'):
import skimage.segmentation
image = cv2.imread(str(image))
outlines = skimage.segmentation.mark_boundaries(
image,
mask,
color=(1, 0, 0),
mode='inner',
)
b, g, r = cv2.split(outlines)
if save:
np.savetxt(f'{prefix}.boundary.txt', b, fmt='%d')
image = img_outliner(image, boundary=b,
save=f'{prefix}.celloutlines.png'
)
return b
def img_outliner(image, boundary, save='celloutlines.png'):
if isinstance(image, str):
image = cv2.imread(image)
mask = np.isin(boundary, [1])
image[mask] = (255, 0, 0)
if save:
cv2.imwrite(save, image)
return image
def getfootprint(struc, a, b=None):
from skimage.morphology import (
square,
rectangle,
diamond,
disk,
octagon,
star)
struc_lib = {
'square': square,
'rectangle': rectangle,
'diamond': diamond,
'disk': disk,
'octagon': octagon,
'star': star
}
morph = struc_lib[struc]
if struc in ['rectangle', 'octagon']:
if b is None:
sys.stderr.write('two args required\n')
sys.exit()
return morph(a, b)
else:
if b is not None:
sys.stderr.write('only one arg required\n')
sys.exit()
return morph(a)
class Stoarr:
def __init__(self, matrix):
if isinstance(matrix, str):
if matrix.endswith('.txt'):
matrix = np.loadtxt(matrix)
elif matrix.endswith(('.tif', '.png')):
matrix = cv2.imread(matrix, cv2.IMREAD_UNCHANGED)
self.matrix = matrix.astype(int)
def to_triplet(self, name='mask'):
import scipy.sparse
mtx= scipy.sparse.csc_matrix(self.matrix)
mtx = mtx.tocoo()
tmp = []
for x, y, mask in zip(mtx.row, mtx.col, mtx.data):
tmp.append([x, y, int(mask)])
triplet = pd.DataFrame(tmp, columns=['x', 'y', name])
return triplet
def binning(self, bin_size):
sys.stdout.write('binning ... ')
sys.stdout.flush()
triplet = self.to_triplet()
triplet['xbin'] = (triplet.x / bin_size).astype(int) * bin_size
triplet['ybin'] = (triplet.y / bin_size).astype(int) * bin_size
triplet['bin'] = triplet.xbin.astype(str) + '_' + triplet.ybin.astype(str)
index = [(-i, x) for i, x in enumerate(triplet['bin'].unique())]
index = pd.DataFrame(index, columns=['N', 'bin'])
triplet = triplet.merge(index, how='left', on='bin')
matrix = np.zeros(shape=self.matrix.shape, dtype=int)
matrix[triplet['x'], triplet['y']] = triplet['N']
sys.stdout.write('done\n')
return Stoarr(matrix)
def to_binary(self):
obj = copy.deepcopy(self)
mask = np.isin(obj.matrix, [0], invert=True)
obj.matrix[mask] = 1
return obj
def subtract(self, other):
sys.stdout.write('subtracting ... ')
sys.stdout.flush()
obj = copy.deepcopy(self)
obj = obj.to_binary()
other = other.to_binary()
obj.matrix = obj.matrix - other.matrix
sys.stdout.write('done\n')
return obj
def intersection(self, other, label_area_cutoff=0.3):
"""intersection of label mask and binary mask
* mask: binary matrix
* label_area_cutoff: labels with greater area will be dropped
"""
sys.stdout.write('intersection ... ')
sys.stdout.flush()
obj = copy.deepcopy(self)
if isinstance(other, Stoarr):
other = other.to_binary()
values = np.unique(obj.matrix)
if len(values) == 2:
mask = cv2.bitwise_and(obj.matrix, other.matrix)
mask = np.invert(mask.astype(bool))
else:
binary = self.to_binary()
mask = cv2.bitwise_and(binary.matrix, other.matrix)
mask = np.invert(mask.astype(bool))
orig_counter = Counter(obj.matrix.flatten())
filter_part = obj.matrix[mask]
filter_counter = Counter(filter_part.flatten())
filter_labels = []
for label, pixels in filter_counter.items():
if label == 0:
continue
ratio = pixels / orig_counter[label]
if ratio < label_area_cutoff:
continue
filter_labels.append(label)
filter_labels = list(set(filter_labels))
mask = np.isin(obj.matrix, filter_labels)
obj.matrix[mask] = 0
sys.stdout.write('{} labels removed\n'.format(len(filter_labels)))
return obj
def relabel(self, label_map=None):
if label_map is None:
unique_labels, labels = np.unique(self.matrix, return_inverse=True)
matrix = labels.reshape(self.matrix.shape)
#obj = Mask(matrix)
#obj.unique_labels = unique_labels
#obj.labels = labels
return Stoarr(matrix)
else:
triplet = self.to_triplet()
triplet = triplet.merge(label_map, how='left',
left_on='mask', right_index=True)
matrix = np.zeros(shape=self.matrix.shape, dtype=int)
matrix[triplet['x'], triplet['y']] = triplet['mask_y']
return Stoarr(matrix)
def retrieve(self):
if not self.unique_labels and not self.labels:
return
matrix = self.unique_labels[self.labels]
matrix = matrix.reshape(self.shape)
obj = Stoarr(matrix)
return obj
def minimum_filter(self, footprint='octagon', ksize=(4, 4), iterations=2):
sys.stdout.write('minimum filter ... ')
sys.stdout.flush()
obj = copy.deepcopy(self)
obj.matrix = obj.matrix.astype(np.uint8)
#obj.matrix = cv2.applyColorMap(
# obj.matrix,
# cv2.COLORMAP_JET
# )
try:
n, m = ksize
except:
n = ksize
m = None
footprint = getfootprint(footprint, n, m)
obj.matrix = cv2.erode(
obj.matrix,
kernel=footprint,
iterations=iterations
)
#cv2.imwrite('blur.png', obj.matrix)
sys.stdout.write('done\n')
return obj
def filter_by_matrix(self, on=None, min_value=None, max_value=None,
draw=False, prefix=None):
"""label mask method
* on: filter by minimum value of the input matrix
"""
sys.stdout.write('filter by matrix ... ')
sys.stdout.flush()
obj = copy.deepcopy(self)
triplet = obj.to_triplet()
ref = on.to_triplet()
triplet = triplet.merge(ref, how='left', on=('x', 'y'))
triplet = triplet.fillna(0)
medians = triplet.groupby('mask_x')['mask_y'].median()
medians = medians.to_frame()
if draw:
fig = self.relabel(medians)
cv2.imwrite(f'{prefix}.median.png', fig.matrix)
if min_value:
filter_labels = medians[medians['mask_y'] < min_value].index.values
if max_value:
filter_labels = medians[medians['mask_y'] > max_value].index.values
mask = np.isin(obj.matrix, filter_labels)
obj.matrix[mask] = 0
sys.stdout.write('{} labels removed\n'.format(len(filter_labels)))
return obj
def filter_by_diameter(self, min_size=1, max_size=None):
"""label mask method
* min_size: max circo radius
"""
sys.stdout.write('filter by diameter ... ')
sys.stdout.flush()
from skimage.measure import regionprops
obj = copy.deepcopy(self)
#obj.matrix = obj.matrix.astype(np.uint8)
filter_labels = []
regions = regionprops(obj.matrix)
for index, props in enumerate(regions):
if props.minor_axis_length <= 8 and (props.minor_axis_length * 5
<= props.major_axis_length):
# abnormity cell with large aspect ratio
filter_labels.append(props.label)
continue
if props.area > 1000 or props.area < 6:
# extreme large cell caused by non-detected blur region
# extreme small cell original segmentation fault
filter_labels.append(props.label)
continue
if props.extent < 0.3:
filter_labels.append(props.label)
continue
if props.minor_axis_length < min_size:
# extreme thin cell
filter_labels.append(props.label)
continue
if max_size and props.major_axis_length > max_size:
# extreme fat cell
filter_labels.append(props.label)
continue
mask = np.isin(obj.matrix, filter_labels)
obj.matrix[mask] = 0
sys.stdout.write('{} labels removed\n'.format(len(filter_labels)))
return obj
def merge(self, other, how='left'):
sys.stdout.write('merge mix labels ... ')
sys.stdout.flush()
if how == 'left':
obj = copy.deepcopy(self)
mask1 = obj.to_binary()
mask2 = copy.deepcopy(other)
elif how == 'right':
obj = copy.deepcopy(other)
mask1 = obj.to_binary()
mask2 = copy.deepcopy(self)
else:
pass
intersection = cv2.bitwise_and(mask1.matrix, mask2.matrix)
mask2.matrix[intersection] = 0
obj.matrix += mask2.matrix
sys.stdout.write('done\n')
return obj
def save(self, prefix='out'):
np.savetxt(f'{prefix}.mask.txt', self.matrix, fmt='%d')
return
def overlayoutlines(self, image=None, prefix=None):
sys.stdout.write('draw outlines ... ')
sys.stdout.flush()
import skimage.io
import skimage.segmentation
if isinstance(image, str):
image = skimage.io.imread(image)
outlines = skimage.segmentation.mark_boundaries(
image,
self.matrix,
color=(1, 0, 0),
mode='inner',
)
b, g, r = cv2.split(outlines)
sys.stdout.write('{} labels\n'.format(len(np.unique(self.matrix))))
mask = np.isin(b, [1])
image[mask] = 255
if prefix:
np.savetxt(f'{prefix}.outlines.txt', b, fmt='%d')
cv2.imwrite(f'{prefix}.outlines.png', image)
return b, image
def thres_mask(image, out_prefix=None):
image = cv2.imread(image, 0)
_, th = cv2.threshold(image, 20, 255, cv2.THRESH_BINARY)
if out_prefix:
cv2.imwrite(f'{prefix}.mask.tif', th)
return th
def mixture_seg(cell_mask, tissue_mask, blur_mask, image=None, prefix='out',):
cell_mask = Stoarr(cell_mask)
tissue_mask = Stoarr(tissue_mask)
blur_mask = Stoarr(blur_mask)
blur_mask = blur_mask.minimum_filter(
footprint='octagon',
ksize=(7, 4)
)
orig_cell_mask = cell_mask.intersection(
tissue_mask,
label_area_cutoff=0.3
)
cell_mask = orig_cell_mask.filter_by_matrix(
on=blur_mask,
max_value=90,
draw=True,
prefix=prefix
)
cell_mask = cell_mask.filter_by_diameter(
min_size=3,
max_size=None,
)
tissue_mask = orig_cell_mask.subtract(cell_mask)
bin_mask = tissue_mask.binning(
bin_size=20
)
mix_mask = cell_mask.merge(
bin_mask,
how='left'
)
mix_mask.save(prefix=prefix)
outlines, image = mix_mask.overlayoutlines(
image=image,
prefix=prefix
)
return outlines, image
|
BGI-Qingdao/4D-BioReconX
|
Preprocess/cellsegmentation/objseg.py
|
objseg.py
|
py
| 19,716 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "os.environ",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2LAB",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "cv2.split",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cv2.createCLAHE",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "cv2.merge",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_LAB2BGR",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "numpy.dot",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "cv2.split",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "cv2.inRange",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "cv2.morphologyEx",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "cv2.MORPH_OPEN",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndenumerate",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "cv2.Laplacian",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "cv2.CV_64F",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.svd",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "numpy.savetxt",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "cv2.applyColorMap",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "cv2.COLORMAP_JET",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "cv2.imwrite",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "numpy.where",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "cellprofiler_core.pipeline.preferences",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "cellprofiler_core.pipeline",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "cellprofiler_core.pipeline.pipeline.Pipeline",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "cellprofiler_core.pipeline.pipeline",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "cellprofiler_core.pipeline",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "cellprofiler_core.pipeline.image.ImageSet",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "cellprofiler_core.pipeline.image",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "cellprofiler_core.pipeline",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "cellprofiler_core.pipeline.image.Image",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "cellprofiler_core.pipeline.image",
"line_number": 153,
"usage_type": "attribute"
},
{
"api_name": "cellprofiler_core.pipeline",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "cellprofiler_core.pipeline.object.ObjectSet",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "cellprofiler_core.pipeline.object",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "cellprofiler_core.pipeline",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "cellprofiler_core.pipeline.measurement.Measurements",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "cellprofiler_core.pipeline.measurement",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "cellprofiler_core.pipeline",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "cellprofiler_core.pipeline.workspace.Workspace",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "cellprofiler_core.pipeline.workspace",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "cellprofiler_core.pipeline",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "sys.stdout.write",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "cellprofiler_core.pipeline.preferences.set_temporary_directory",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "cellprofiler_core.pipeline.preferences",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "cellprofiler_core.pipeline",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "cellprofiler_core.pipeline.preferences.set_default_output_directory",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "cellprofiler_core.pipeline.preferences",
"line_number": 197,
"usage_type": "attribute"
},
{
"api_name": "cellprofiler_core.pipeline",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "cellprofiler_core.pipeline.utilities.java.start_java",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "cellprofiler_core.pipeline.utilities",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "cellprofiler_core.pipeline",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "sys.stdout.write",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 201,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.write",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 208,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "cellprofiler_core.pipeline.utilities.zmq.join_to_the_boundary",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "cellprofiler_core.pipeline.utilities",
"line_number": 212,
"usage_type": "attribute"
},
{
"api_name": "cellprofiler_core.pipeline",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "bioformats.formatreader.formatreader.clear_image_reader_cache",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "bioformats.formatreader.formatreader",
"line_number": 213,
"usage_type": "attribute"
},
{
"api_name": "bioformats.formatreader",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "cellprofiler_core.pipeline.utilities.java.stop_java",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "cellprofiler_core.pipeline.utilities",
"line_number": 214,
"usage_type": "attribute"
},
{
"api_name": "cellprofiler_core.pipeline",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "sys.stdout.write",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "cv2.split",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "numpy.savetxt",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "numpy.savetxt",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "skimage.segmentation.segmentation.mark_boundaries",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "skimage.segmentation.segmentation",
"line_number": 239,
"usage_type": "attribute"
},
{
"api_name": "skimage.segmentation",
"line_number": 239,
"usage_type": "name"
},
{
"api_name": "cv2.split",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "numpy.savetxt",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "numpy.isin",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "skimage.morphology.square",
"line_number": 279,
"usage_type": "name"
},
{
"api_name": "skimage.morphology.rectangle",
"line_number": 280,
"usage_type": "name"
},
{
"api_name": "skimage.morphology.diamond",
"line_number": 281,
"usage_type": "name"
},
{
"api_name": "skimage.morphology.disk",
"line_number": 282,
"usage_type": "name"
},
{
"api_name": "skimage.morphology.octagon",
"line_number": 283,
"usage_type": "name"
},
{
"api_name": "skimage.morphology.star",
"line_number": 284,
"usage_type": "name"
},
{
"api_name": "sys.stderr.write",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 291,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 296,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_UNCHANGED",
"line_number": 306,
"usage_type": "attribute"
},
{
"api_name": "scipy.sparse.sparse.csc_matrix",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.sparse",
"line_number": 312,
"usage_type": "attribute"
},
{
"api_name": "scipy.sparse",
"line_number": 312,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 322,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 323,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 339,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 339,
"usage_type": "attribute"
},
{
"api_name": "{'scipy.sparse': 'scipy.sparse'}",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "numpy.isin",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 350,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 351,
"usage_type": "attribute"
},
{
"api_name": "copy.deepcopy",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 360,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.write",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 369,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 370,
"usage_type": "attribute"
},
{
"api_name": "copy.deepcopy",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "cv2.bitwise_and",
"line_number": 379,
"usage_type": "call"
},
{
"api_name": "numpy.invert",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "cv2.bitwise_and",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "numpy.invert",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "numpy.isin",
"line_number": 402,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 406,
"usage_type": "attribute"
},
{
"api_name": "numpy.unique",
"line_number": 411,
"usage_type": "call"
},
{
"api_name": "{'scipy.sparse': 'scipy.sparse'}",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 424,
"usage_type": "call"
},
{
"api_name": "{'scipy.sparse': 'scipy.sparse'}",
"line_number": 426,
"usage_type": "call"
},
{
"api_name": "{'scipy.sparse': 'scipy.sparse'}",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 440,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 440,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 441,
"usage_type": "attribute"
},
{
"api_name": "copy.deepcopy",
"line_number": 443,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 444,
"usage_type": "attribute"
},
{
"api_name": "cv2.erode",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 463,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 463,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.write",
"line_number": 472,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 472,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 473,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 473,
"usage_type": "attribute"
},
{
"api_name": "copy.deepcopy",
"line_number": 475,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "numpy.isin",
"line_number": 494,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 497,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 497,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.write",
"line_number": 505,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 505,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 506,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 506,
"usage_type": "attribute"
},
{
"api_name": "copy.deepcopy",
"line_number": 510,
"usage_type": "call"
},
{
"api_name": "skimage.measure.regionprops",
"line_number": 514,
"usage_type": "call"
},
{
"api_name": "numpy.isin",
"line_number": 538,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 541,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 541,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.write",
"line_number": 546,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 546,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 547,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 547,
"usage_type": "attribute"
},
{
"api_name": "copy.deepcopy",
"line_number": 550,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 552,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 554,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 556,
"usage_type": "call"
},
{
"api_name": "cv2.bitwise_and",
"line_number": 560,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 566,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 566,
"usage_type": "attribute"
},
{
"api_name": "numpy.savetxt",
"line_number": 571,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 577,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 577,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 578,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 578,
"usage_type": "attribute"
},
{
"api_name": "skimage.segmentation.io.imread",
"line_number": 584,
"usage_type": "call"
},
{
"api_name": "skimage.segmentation.io",
"line_number": 584,
"usage_type": "attribute"
},
{
"api_name": "skimage.segmentation",
"line_number": 584,
"usage_type": "name"
},
{
"api_name": "skimage.segmentation.segmentation.mark_boundaries",
"line_number": 586,
"usage_type": "call"
},
{
"api_name": "skimage.segmentation.segmentation",
"line_number": 586,
"usage_type": "attribute"
},
{
"api_name": "skimage.segmentation",
"line_number": 586,
"usage_type": "name"
},
{
"api_name": "cv2.split",
"line_number": 592,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 594,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 594,
"usage_type": "attribute"
},
{
"api_name": "numpy.unique",
"line_number": 594,
"usage_type": "call"
},
{
"api_name": "numpy.isin",
"line_number": 596,
"usage_type": "call"
},
{
"api_name": "numpy.savetxt",
"line_number": 600,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 601,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 605,
"usage_type": "call"
},
{
"api_name": "cv2.threshold",
"line_number": 607,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 607,
"usage_type": "attribute"
},
{
"api_name": "cv2.imwrite",
"line_number": 610,
"usage_type": "call"
},
{
"api_name": "{'scipy.sparse': 'scipy.sparse', 'regionprops': 'skimage.measure.regionprops', 'skimage.io': 'skimage.io', 'skimage.segmentation': 'skimage.segmentation'}",
"line_number": 615,
"usage_type": "call"
},
{
"api_name": "{'scipy.sparse': 'scipy.sparse', 'regionprops': 'skimage.measure.regionprops', 'skimage.io': 'skimage.io', 'skimage.segmentation': 'skimage.segmentation'}",
"line_number": 616,
"usage_type": "call"
},
{
"api_name": "{'scipy.sparse': 'scipy.sparse', 'regionprops': 'skimage.measure.regionprops', 'skimage.io': 'skimage.io', 'skimage.segmentation': 'skimage.segmentation'}",
"line_number": 617,
"usage_type": "call"
}
] |
32481834912
|
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
from constants import nb_class
from tracking import get_dataframes
tf.compat.v1.enable_eager_execution() # Remove when switching to tf2
pd.plotting.register_matplotlib_converters()
###############################
# Methods for data formatting #
###############################
def get_n_probs_per_label(df):
outputs = []
for n in range(7):
outputs.append([[], [], [], [], [], [], []])
def handle_row(row):
classification_logits = eval(row["classification_logits"])
right_labels = eval(row["label_boxes"])
for i in range(len(classification_logits)):
logits = classification_logits[i]
right_label = right_labels[i]
probs = tf.nn.softmax(logits).numpy().tolist()
for n in range(7):
n_prob = probs[n]
outputs[right_label][n].append(n_prob)
df.apply(handle_row, axis=1)
for n in range(7):
for i in range(len(outputs[n])):
if (outputs[n][i] == []):
outputs[n][i] = [-1.]
outputs.append(outputs)
return outputs
def get_precision_distribution(df):
outputs = [[[], []], [[], []]]
def handle_row(row):
no_regr_precision = eval(row["no_regr_surface_precision"])[0]
final_precision = eval(row["final_surface_precision"])[0]
outputs[0][0].append(no_regr_precision[0] / no_regr_precision[1])
outputs[0][1].append(final_precision[0] / final_precision[1])
outputs[1][0].append(no_regr_precision[0])
outputs[1][1].append(final_precision[0])
df.apply(handle_row, axis=1)
return outputs
#########################################
# Initializing dataframes and variables #
#########################################
df = get_dataframes()
nb_rows = df["index"].count()
print("Dataframe size: {}".format(nb_rows))
df_tail = df.tail(1000)
all_probs_per_label = get_n_probs_per_label(df_tail)
precision_data = get_precision_distribution(df_tail)
############
# Plotting #
############
fig = plt.figure(figsize=(18, 12))
fig.canvas.set_window_title("Faster-RCNN graph - Last 1000 rows over {} total".format(nb_rows))
# Prob of label tail
plt.subplot(5, 2, 1)
probs_per_label = []
for k in range(7):
probs_per_label.append(all_probs_per_label[k][k])
parts = plt.violinplot(probs_per_label)
plt.xticks([])
plt.ylim(0., 1.)
plt.yticks([0., 1.])
for pc in parts["bodies"]:
pc.set_alpha(1)
parts["cmins"].set_alpha(0)
parts["cmaxes"].set_alpha(0)
parts["cbars"].set_alpha(0)
plt.title("Label Prob density")
# Prob of n label tail
for i in range(7):
plt.subplot(5, 2, 2 + i)
probs_per_label = all_probs_per_label[i]
parts = plt.violinplot(probs_per_label)
plt.xticks([])
plt.ylim(0., 1.)
plt.yticks([0., 1.])
for pc in parts["bodies"]:
pc.set_alpha(1)
pc.set_facecolor("#D43F3A")
parts["cmins"].set_alpha(0)
parts["cmaxes"].set_alpha(0)
parts["cbars"].set_alpha(0)
plt.title("Prob density of {}".format(i))
# Precision distribution
plt.subplot(5, 2, 9)
parts = plt.violinplot(precision_data[0])
plt.xticks([1, 2], ["No Regr", "Final"])
plt.ylim(0., 1.)
plt.yticks([0., 1.])
for pc in parts["bodies"]:
pc.set_alpha(1)
pc.set_color("#F3C43A")
parts["cmins"].set_alpha(0)
parts["cmaxes"].set_alpha(0)
parts["cbars"].set_alpha(0)
plt.title("Precision density")
# Coverage distribution
plt.subplot(5, 2, 10)
parts = plt.violinplot(precision_data[1])
plt.xticks([1, 2], ["No Regr", "Final"])
plt.yticks([144], ["Blob\nSurface"])
for pc in parts["bodies"]:
pc.set_alpha(1)
pc.set_color("#F3C43A")
parts["cmins"].set_alpha(0)
parts["cmaxes"].set_alpha(0)
parts["cbars"].set_alpha(0)
ax = plt.gca()
ax.axhline(y=144, color="black", lw=1., alpha=.2)
plt.title("Coverage density")
plt.show()
|
benoitkoenig/blobWar-image
|
faster_rcnn/visualization.py
|
visualization.py
|
py
| 3,864 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "tensorflow.compat.v1.enable_eager_execution",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pandas.plotting.register_matplotlib_converters",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.plotting",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.nn.softmax",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "tensorflow.nn",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "tracking.get_dataframes",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.violinplot",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.violinplot",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.violinplot",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.violinplot",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 130,
"usage_type": "name"
}
] |
14408997276
|
from announcement.models import AnnouncementModel
from UslugiProfi.utils import create_file_absolute_url
from rest_framework import serializers
class GetAnnouncementsSeriaizer(serializers.ModelSerializer):
image = serializers.SerializerMethodField()
class Meta:
model = AnnouncementModel
fields = ('id', 'name', 'description', 'subcategory', 'user', 'address', 'address_lat', 'address_lng', 'create_date',
'update_time', 'price_type', 'fixed_price', 'upper_price', 'lower_price', 'currency', 'dimension', 'image', 'is_active')
def get_image(self, announcement):
request = self.context.get('request')
return create_file_absolute_url(request=request, file=announcement.image)
class CreateAnnouncementsSeriaizer(serializers.ModelSerializer):
class Meta:
model = AnnouncementModel
fields = ('name', 'description', 'subcategory', 'address', 'address_lat', 'address_lng', 'price_type',
'fixed_price', 'upper_price', 'lower_price', 'currency', 'dimension', 'image', 'user')
|
Johudo-old/UslugiProfi
|
announcement/serializers.py
|
serializers.py
|
py
| 1,075 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.SerializerMethodField",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "announcement.models.AnnouncementModel",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "UslugiProfi.utils.create_file_absolute_url",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "announcement.models.image",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "announcement.models",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "announcement.models.AnnouncementModel",
"line_number": 22,
"usage_type": "name"
}
] |
34632215573
|
import cv2
import numpy as np
img = cv2.imread('img\\ttt.jpg')
#定义结构元素
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
#腐蚀图像
eroded = cv2.erode(img, kernel)
cv2.imshow("fs_eroded", eroded)
#膨胀图像
dilated = cv2.dilate(img, kernel)
cv2.imshow("pz_dilated", dilated)
#NumPy定义的结构元素
NpKernel = np.uint8(np.ones((3,3)))
Nperoded = cv2.erode(img, NpKernel)
#显示腐蚀后的图像
cv2.imshow("Eroded by NumPy kernel", Nperoded)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
liuyuhua-ha/opencvStudy
|
opencvStudy/structTest.py
|
structTest.py
|
py
| 527 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.imread",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.getStructuringElement",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.MORPH_RECT",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "cv2.erode",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.dilate",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.erode",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 31,
"usage_type": "call"
}
] |
34780751946
|
import datetime
import csv
import re
from Classes import Contact
contact_list = list()
contact_list_csv = "contact_list.csv"
# Создание нового контакта и запись его в csv файл
def create_contact():
print("Для того чтобы пропустить пункт и оставить его пустым введите: _")
new_contact = Contact("", "", "", "", "", "")
# Ввод имени
while True:
try:
new_contact.first_name = input("Имя: ")
except ValueError:
print("Неверный формат имени, присутствуют недопустимые символы")
else:
break
# Ввод фамилии
while True:
try:
new_contact.last_name = input("Фамилия: ")
except ValueError:
print("Неверный формат фамилии, присутствуют недопустимые символы")
else:
break
# Ввод даты рождения
while True:
try:
new_contact.birth_date = datetime.datetime \
.strptime(input("Дата рождения в формате ДД.ММ.ГГГГ: "), "%d.%m.%Y").date()
except ValueError:
print("Неверный формат даты или дата не может быть позднее текущего дня")
else:
break
# Ввод наименования компании
new_contact.company_name = input("Компания: ")
# Ввод E-Mail
while True:
try:
new_contact.email = input("E-Mail: ")
except ValueError:
print("Неверный формат E-Mail")
else:
break
# Ввод номера телефона
while True:
try:
new_contact.phone_number = input("Номер телефона в формате +7(___)___-__-__: ")
except ValueError:
print("Неверный формат номера")
else:
break
contact_list.append(new_contact)
# Создание словаря из контакта
contact_dict = {"first name": new_contact.first_name, "last name": new_contact.last_name,
"birth date": new_contact.birth_date, "company name": new_contact.company_name,
"email": new_contact.email, "phone number": new_contact.phone_number}
# Добавление записи в csv
with open(contact_list_csv, "a", newline="") as file:
columns = ["first name", "last name", "birth date", "company name", "email", "phone number"]
data_writer = csv.DictWriter(file, fieldnames=columns)
# writer.writeheader()
data_writer.writerow(contact_dict)
# Загрузка списка контактов из csv
def load_contact_list():
with open(contact_list_csv) as file:
data_reader = csv.DictReader(file)
for line in data_reader:
contact_list.append(Contact(line["first name"], line["last name"],
line["birth date"], line["company name"],
line["email"], line["phone number"]))
# Отображение списка контактов
def show_contact_list():
for contact in contact_list:
print(contact)
# Поиск по имени
def find_by_first_name(name):
regex = r"(?i)\b{}".format(name)
counter = 0
for contact in contact_list:
if re.search(regex, contact.first_name):
print(contact)
counter += 1
print("Найдено: {}".format(counter))
# Поиск по фамилии
def find_by_last_name(name):
regex = r"(?i)\b{}".format(name)
counter = 0
for contact in contact_list:
if re.search(regex, contact.last_name):
print(contact)
counter += 1
print("Найдено: {}".format(counter))
# Полная очистка списка контактов/восстановление contact_list.csv
def clear_contact_list():
with open(contact_list_csv, "w", newline="") as file:
columns = ["first name", "last name", "birth date", "company name", "email", "phone number"]
data_writer = csv.DictWriter(file, fieldnames=columns)
data_writer.writeheader()
contact_list.clear()
print("Файл contact_list.csv был сброшен, список контактов очищен.")
# Вызов команд для работы со списком контактов
def command_dialog():
print("help - для вызова справки")
while True:
command = input(">>> ")
if command == "show":
show_contact_list()
if command == "create":
create_contact()
if command == "find_fn":
find_by_first_name(input("Искать по имени:"))
if command == "find_ln":
find_by_last_name(input("Искать по фамилии:"))
if command == "clear":
clear_contact_list()
if command == "quit":
break
if command == "help":
print("Список команд:\nshow - показать список контактов\ncreate - добавить контакт\n"
"find_fn - поиск по имени\nfind_ln - поиск по фамилии\nclear - полная очистка списка контактов\n"
"quit - выйти из программы\nhelp - справка")
|
NAS371/contactListTestWork
|
Program.py
|
Program.py
|
py
| 5,649 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "Classes.Contact",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "csv.DictWriter",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "csv.DictReader",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "Classes.Contact",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "csv.DictWriter",
"line_number": 122,
"usage_type": "call"
}
] |
6178538714
|
"""
Implement class ``SkyDictionary``, useful for marginalizing over sky
location.
"""
import collections
import itertools
import numpy as np
import scipy.signal
from scipy.stats import qmc
from cogwheel import gw_utils
from cogwheel import utils
class SkyDictionary(utils.JSONMixin):
"""
Given a network of detectors, this class generates a set of
samples covering the sky location isotropically in Earth-fixed
coordinates (lat, lon).
The samples are assigned to bins based on the arrival-time delays
between detectors. This information is accessible as dictionaries
``delays2inds_map``, ``delays2genind_map``.
Antenna coefficients F+, Fx (psi=0) and detector time delays from
geocenter are computed and stored for all samples.
"""
def __init__(self, detector_names, *, f_sampling: int = 2**13,
nsky: int = 10**6, seed=0):
self.detector_names = tuple(detector_names)
self.nsky = nsky
self.f_sampling = f_sampling
self.seed = seed
self._rng = np.random.default_rng(seed)
self.sky_samples = self._create_sky_samples()
self.fplus_fcross_0 = gw_utils.get_fplus_fcross_0(self.detector_names,
**self.sky_samples)
geocenter_delays = gw_utils.get_geocenter_delays(
self.detector_names, **self.sky_samples)
self.geocenter_delay_first_det = geocenter_delays[0]
self.delays = geocenter_delays[1:] - geocenter_delays[0]
self.delays2inds_map = self._create_delays2inds_map()
discrete_delays = np.array(list(self.delays2inds_map))
self._min_delay = np.min(discrete_delays, axis=0)
self._max_delay = np.max(discrete_delays, axis=0)
# (n_det-1,) float array: _sky_prior := d(Omega) / (4pi d(delays))
self._sky_prior = np.zeros(self._max_delay - self._min_delay + 1)
for key, inds in self.delays2inds_map.items():
self._sky_prior[key] = (
self.f_sampling ** (len(self.detector_names) - 1)
* len(inds) / self.nsky)
# (n_det-1) array of generators that yield sky-indices
self.ind_generators = np.full(self._max_delay - self._min_delay + 1,
iter(()))
for key, inds in self.delays2inds_map.items():
self.ind_generators[key] = itertools.cycle(inds)
def resample_timeseries(self, timeseries, times, axis=-1,
window=('tukey', .1)):
"""
Resample a timeseries to match the SkyDict's sampling frequency.
The sampling frequencies of the SkyDict and ``timeseries`` must
be multiples (or ``ValueError`` is raised).
Parameters
----------
timeseries: array_like
The data to resample.
times: array_like
Equally-spaced sample positions associated with the signal
data in `timeseries`.
axis: int
The axis of timeseries that is resampled. Default is -1.
window: string, float, tuple or None
Time domain window to apply to the timeseries. If not None,
it is passed to ``scipy.signal.get_window``, see its
documentation. By default a Tukey window with alpha=0.1 is
applied, to mitigate ringing near the edges
(scipy.signal.resample uses FFT methods that assume that the
signal is periodic).
Return
------
resampled_timeseries, resampled_times
A tuple containing the resampled array and the corresponding
resampled positions.
"""
if window:
shape = [1 for _ in timeseries.shape]
shape[axis] = timeseries.shape[axis]
timeseries = timeseries * scipy.signal.get_window(
window, shape[axis]).reshape(shape)
fs_ratio = self.f_sampling * (times[1] - times[0])
if fs_ratio != 1:
timeseries, times = scipy.signal.resample(
timeseries, int(len(times) * fs_ratio), times, axis=axis)
if not np.isclose(1 / self.f_sampling, times[1] - times[0]):
raise ValueError(
'`times` is incommensurate with `f_sampling`.')
return timeseries, times
def get_sky_inds_and_prior(self, delays):
"""
Parameters
----------
delays: int array of shape (n_det-1, n_samples)
Time-of-arrival delays in units of 1 / self.f_sampling
Return
------
sky_inds: tuple of ints of length n_physical
Indices of self.sky_samples with the correct time delays.
sky_prior: float array of length n_physical
Prior probability density for the time-delays, in units of
s^-(n_det-1).
physical_mask: boolean array of length n_samples
Some choices of time of arrival at detectors may not
correspond to any physical sky location, these are flagged
``False`` in this array. Unphysical samples are discarded.
"""
# First mask: are individual delays plausible? This is necessary
# in order to interpret the delays as indices to self._sky_prior
physical_mask = np.all((delays.T >= self._min_delay)
& (delays.T <= self._max_delay), axis=1)
# Submask: for the delays that survive the first mask, are there
# any sky samples with the correct delays at all detector pairs?
sky_prior = self._sky_prior[tuple(delays[:, physical_mask])]
submask = sky_prior > 0
physical_mask[physical_mask] *= submask
sky_prior = sky_prior[submask]
# Generate sky samples for the physical delays
generators = self.ind_generators[tuple(delays[:, physical_mask])]
sky_inds = np.fromiter(map(next, generators), int)
return sky_inds, sky_prior, physical_mask
def _create_sky_samples(self):
"""
Return a dictionary of samples in terms of 'lat' and 'lon' drawn
isotropically by means of a Quasi Monte Carlo (Halton) sequence.
"""
u_lat, u_lon = qmc.Halton(2, seed=self._rng).random(self.nsky).T
samples = {}
samples['lat'] = np.arcsin(2*u_lat - 1)
samples['lon'] = 2 * np.pi * u_lon
return samples
def _create_delays2inds_map(self):
"""
Return a dictionary mapping arrival time delays to sky-sample
indices.
Its keys are tuples of ints of length (n_det - 1), with time
delays to the first detector in units of 1/self.f_sampling.
Its values are list of indices to ``self.sky_samples`` of
samples that have the corresponding (discretized) time delays.
"""
# (ndet-1, nsky)
delays_keys = zip(*np.rint(self.delays * self.f_sampling).astype(int))
delays2inds_map = collections.defaultdict(list)
for i_sample, delays_key in enumerate(delays_keys):
delays2inds_map[delays_key].append(i_sample)
return delays2inds_map
|
2lambda123/cogwheel1
|
cogwheel/likelihood/marginalization/skydict.py
|
skydict.py
|
py
| 7,143 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cogwheel.utils.JSONMixin",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "cogwheel.utils",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "numpy.random.default_rng",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "cogwheel.gw_utils.get_fplus_fcross_0",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "cogwheel.gw_utils",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "cogwheel.gw_utils.get_geocenter_delays",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "cogwheel.gw_utils",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.full",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "itertools.cycle",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "scipy.signal.signal.get_window",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "scipy.signal.signal",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "scipy.signal",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "scipy.signal.signal.resample",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "scipy.signal.signal",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "scipy.signal",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "numpy.isclose",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.fromiter",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "scipy.stats.qmc.Halton",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "scipy.stats.qmc",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "numpy.arcsin",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "numpy.rint",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 173,
"usage_type": "call"
}
] |
9988571316
|
import websockets, json, traceback, os, asyncio, inspect, logging
import websockets.client
import websockets.server
from websockets.exceptions import ConnectionClosedOK, ConnectionClosedError
from .client_management.client import Client
from .session_management.client_state import Client_State
from .inventory_management.profile_manager import Profile_Manager
from .inventory_management.skin_manager import Skin_Manager
from .randomizers.skin_randomizer import Skin_Randomizer
from .inventory_management.buddy_manager import Buddy_Manager
from .randomizers.buddy_randomizer import Buddy_Randomizer
from .sys_utilities.system import System
from .file_utilities.filepath import Filepath
from .sys_utilities.logging import Logger
from .user_configuartion.config import Config
from .client_config import SERVER_VERSION, IS_TEST_BUILD
from . import shared
logger_errors = logging.getLogger('VIM_errors')
logger = logging.getLogger('VIM_main')
class Server:
shared.client = Client()
shared.client.connect()
request_lookups = {
"handshake": lambda: True,
"get_server_version": lambda: SERVER_VERSION,
# system stuff
"start_game": System.start_game,
"get_running_state": System.are_processes_running,
"autodetect_account": shared.client.autodetect_account,
# config stuff
"fetch_config": lambda: shared.config,
"update_config": Config.update_config,
# inventory/loadout stuff
"fetch_loadout": shared.client.fetch_loadout,
"fetch_inventory": Skin_Manager.fetch_inventory,
"fetch_profiles": Profile_Manager.fetch_profiles,
"refresh_profiles": Profile_Manager.refresh_profiles,
"refresh_skin_inventory": Skin_Manager.refresh_skin_inventory,
"refresh_buddy_inventory": Buddy_Manager.refresh_buddy_inventory,
"randomize_skins": Skin_Randomizer.randomize,
"randomize_buddies": Buddy_Randomizer.randomize,
"put_weapon": shared.client.put_weapon,
"put_buddies": shared.client.put_buddies,
#"update_skin_inventory": Skin_Manager.update_inventory,
"update_buddy_inventory": Buddy_Manager.update_inventory,
# profile stuff
"create_profile": Profile_Manager.generate_empty_profile,
"fetch_profile_metadatas": Profile_Manager.fetch_profile_metadata,
"update_profiles": Profile_Manager.update_profiles,
"update_profile": Profile_Manager.update_profile,
"fetch_profile": Profile_Manager.fetch_profile,
"apply_profile": Profile_Manager.apply_profile,
"favorite_all_buddies": Buddy_Manager.favorite_all,
# game state stuff
"force_update_game_state": Client_State.update_game_state,
}
@staticmethod
def start():
if not os.path.exists(Filepath.get_appdata_folder()):
os.mkdir(Filepath.get_appdata_folder())
Logger.create_logger()
shared.loop = asyncio.get_event_loop()
Config.init_config()
# iniitalize any submodules
client_state = Client_State()
#start websocket server
start_server = websockets.serve(Server.ws_entrypoint, "", 8765)
print(f"open {'https://colinhartigan.github.io/valorant-inventory-manager' if not IS_TEST_BUILD else 'https://colinhartigan.github.io/VIM-test-client'} in your browser to use VIM")
shared.loop.run_until_complete(start_server)
# initialize any asynchronous submodules
shared.loop.run_until_complete(client_state.loop())
shared.loop.run_forever()
@staticmethod
async def ws_entrypoint(websocket, path):
logger.debug("a client connected")
logger.debug(shared.sockets)
shared.sockets.append(websocket)
try:
while websocket in shared.sockets:
data = await websocket.recv()
data = json.loads(data)
request = data.get("request")
args = data.get("args")
has_kwargs = True if args is not None else False
logger.debug(f"request: {request}")
payload = {}
if request in Server.request_lookups.keys():
payload = {
"success": True,
"event": request,
"data": None,
}
if inspect.iscoroutinefunction(Server.request_lookups[request]):
if has_kwargs:
payload["data"] = await Server.request_lookups[request](**args)
else:
payload["data"] = await Server.request_lookups[request]()
else:
if has_kwargs:
payload["data"] = Server.request_lookups[request](**args)
else:
payload["data"] = Server.request_lookups[request]()
else:
payload = {
"success": False,
"data": "could not find the specified request"
}
await websocket.send(json.dumps(payload))
logger.debug(f"response:\n{json.dumps(payload)} ")
except ConnectionClosedOK:
logger.info("disconnected")
shared.sockets.pop(shared.sockets.index(websocket))
except ConnectionClosedError:
logger.info("disconnected w/ error")
shared.sockets.pop(shared.sockets.index(websocket))
except Exception:
logger_errors.error("----- EXCEPTION -----")
logger_errors.error(traceback.format_exc())
except:
logger.error("idk what even happened to get here")
|
colinhartigan/valorant-inventory-manager
|
server/src/server.py
|
server.py
|
py
| 5,848 |
python
|
en
|
code
| 150 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "client_management.client.Client",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "client_config.SERVER_VERSION",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "sys_utilities.system.System.start_game",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "sys_utilities.system.System",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "sys_utilities.system.System.are_processes_running",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "sys_utilities.system.System",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "user_configuartion.config.Config.update_config",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "user_configuartion.config.Config",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "inventory_management.skin_manager.Skin_Manager.fetch_inventory",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "inventory_management.skin_manager.Skin_Manager",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "inventory_management.profile_manager.Profile_Manager.fetch_profiles",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "inventory_management.profile_manager.Profile_Manager",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "inventory_management.profile_manager.Profile_Manager.refresh_profiles",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "inventory_management.profile_manager.Profile_Manager",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "inventory_management.skin_manager.Skin_Manager.refresh_skin_inventory",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "inventory_management.skin_manager.Skin_Manager",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "inventory_management.buddy_manager.Buddy_Manager.refresh_buddy_inventory",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "inventory_management.buddy_manager.Buddy_Manager",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "randomizers.skin_randomizer.Skin_Randomizer.randomize",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "randomizers.skin_randomizer.Skin_Randomizer",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "randomizers.buddy_randomizer.Buddy_Randomizer.randomize",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "randomizers.buddy_randomizer.Buddy_Randomizer",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "inventory_management.buddy_manager.Buddy_Manager.update_inventory",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "inventory_management.buddy_manager.Buddy_Manager",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "inventory_management.profile_manager.Profile_Manager.generate_empty_profile",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "inventory_management.profile_manager.Profile_Manager",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "inventory_management.profile_manager.Profile_Manager.fetch_profile_metadata",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "inventory_management.profile_manager.Profile_Manager",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "inventory_management.profile_manager.Profile_Manager.update_profiles",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "inventory_management.profile_manager.Profile_Manager",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "inventory_management.profile_manager.Profile_Manager.update_profile",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "inventory_management.profile_manager.Profile_Manager",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "inventory_management.profile_manager.Profile_Manager.fetch_profile",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "inventory_management.profile_manager.Profile_Manager",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "inventory_management.profile_manager.Profile_Manager.apply_profile",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "inventory_management.profile_manager.Profile_Manager",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "inventory_management.buddy_manager.Buddy_Manager.favorite_all",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "inventory_management.buddy_manager.Buddy_Manager",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "session_management.client_state.Client_State.update_game_state",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "session_management.client_state.Client_State",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "file_utilities.filepath.Filepath.get_appdata_folder",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "file_utilities.filepath.Filepath",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "os.mkdir",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "file_utilities.filepath.Filepath.get_appdata_folder",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "file_utilities.filepath.Filepath",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "sys_utilities.logging.Logger.create_logger",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "sys_utilities.logging.Logger",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "user_configuartion.config.Config.init_config",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "user_configuartion.config.Config",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "session_management.client_state.Client_State",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "websockets.serve",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "client_config.IS_TEST_BUILD",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "inspect.iscoroutinefunction",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "websockets.exceptions.ConnectionClosedOK",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "websockets.exceptions.ConnectionClosedError",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "traceback.format_exc",
"line_number": 153,
"usage_type": "call"
}
] |
17690019803
|
#!/usr/bin/env python3
""" Module for view definition """
from flask import Flask, render_template, request
from flask_babel import Babel, _
from typing import Optional
class Config(object):
""" Config class """
# ...
LANGUAGES = ['en', 'fr']
BABEL_DEFAULT_LOCALE = 'en'
BABEL_DEFAULT_TIMEZONE = 'UTC'
app = Flask(__name__)
babel = Babel(app)
app.config.from_object(Config)
# def create_app(config_class=Config):
# app = Flask(__name__)
# babel.init_app(app)
# app.config.from_object(config_class)
# return app
@babel.localeselector
def get_locale() -> Optional[str]:
""" Get preferred local function """
if request.args.get('locale'):
locale = request.args.get('locale')
# print(locale)
if locale in app.config['LANGUAGES']:
print(locale)
return locale
else:
return request.accept_languages.best_match(app.config['LANGUAGES'])
@app.route('/', methods=['GET'], strict_slashes=False)
def index() -> str:
""" Index function """
return render_template('4-index.html')
if __name__ == '__main__':
app.run(debug=True)
|
dnjoe96/alx-backend
|
0x02-i18n/4-app.py
|
4-app.py
|
py
| 1,140 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask_babel.Babel",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "flask.request.accept_languages.best_match",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "flask.request.accept_languages",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 44,
"usage_type": "call"
}
] |
8631452934
|
import pytest
import numpy as np
from abito.lib.significance import *
def test_t_test():
np.random.seed(0)
treatment = np.random.normal(100, size=100)
control = np.random.normal(100, size=100)
r = t_test(treatment, control)
assert r.p_value == pytest.approx(0.9, 0.1)
r = t_test_1samp(treatment, 100)
assert r.p_value == pytest.approx(0.6, 0.1)
|
avito-tech/abito
|
tests/test_significance.py
|
test_significance.py
|
py
| 376 |
python
|
en
|
code
| 14 |
github-code
|
6
|
[
{
"api_name": "numpy.random.seed",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.normal",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.normal",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pytest.approx",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pytest.approx",
"line_number": 14,
"usage_type": "call"
}
] |
26112361495
|
__authors__ = ["V. Valls"]
__license__ = "MIT"
__date__ = "14/02/2018"
import enum
import logging
from silx.gui import qt
from silx.gui.dialog.ImageFileDialog import ImageFileDialog
from silx.gui.dialog.DataFileDialog import DataFileDialog
import silx.io
logging.basicConfig()
class Mode(enum.Enum):
DEFAULT_FILEDIALOG = 0
IMAGEFILEDIALOG = 1
DATAFILEDIALOG = 2
DATAFILEDIALOG_DATASET = 3
DATAFILEDIALOG_GROUP = 4
DATAFILEDIALOG_NXENTRY = 5
class DialogExample(qt.QMainWindow):
def __init__(self, parent=None):
super(DialogExample, self).__init__(parent)
self.__state = {}
centralWidget = qt.QWidget(self)
layout = qt.QHBoxLayout()
centralWidget.setLayout(layout)
options = self.createOptions()
layout.addWidget(options)
buttonGroup = qt.QGroupBox()
buttonGroup.setTitle("Create dialog")
layout.addWidget(buttonGroup)
buttonLayout = qt.QVBoxLayout()
buttonGroup.setLayout(buttonLayout)
# ImageFileDialog
b1 = qt.QPushButton(self)
b1.setMinimumHeight(50)
b1.setText("Open a dialog")
b1.clicked.connect(self.openDialog)
buttonLayout.addWidget(b1)
b2 = qt.QPushButton(self)
b2.setMinimumHeight(50)
b2.setText("Open a dialog with state stored")
b2.clicked.connect(self.openDialogStoredState)
buttonLayout.addWidget(b2)
b3 = qt.QPushButton(self)
b3.setMinimumHeight(50)
b3.setText("Open a dialog at home")
b3.clicked.connect(self.openDialogAtHome)
buttonLayout.addWidget(b3)
b4 = qt.QPushButton(self)
b4.setMinimumHeight(50)
b4.setText("Open a dialog at computer root")
b4.clicked.connect(self.openDialogAtComputer)
buttonLayout.addWidget(b4)
self.setCentralWidget(centralWidget)
def createOptions(self):
panel = qt.QGroupBox()
panel.setTitle("Options")
layout = qt.QVBoxLayout()
panel.setLayout(layout)
group = qt.QButtonGroup(panel)
radio = qt.QRadioButton(panel)
radio.setText("Qt QFileDialog")
radio.setProperty("Mode", Mode.DEFAULT_FILEDIALOG)
group.addButton(radio)
layout.addWidget(radio)
radio = qt.QRadioButton(panel)
radio.setText("silx ImageFileDialog")
radio.setProperty("Mode", Mode.IMAGEFILEDIALOG)
group.addButton(radio)
layout.addWidget(radio)
radio = qt.QRadioButton(panel)
radio.setChecked(True)
radio.setText("silx DataFileDialog")
radio.setProperty("Mode", Mode.DATAFILEDIALOG)
group.addButton(radio)
layout.addWidget(radio)
radio = qt.QRadioButton(panel)
radio.setText("silx DataFileDialog (filter=dataset)")
radio.setProperty("Mode", Mode.DATAFILEDIALOG_DATASET)
group.addButton(radio)
layout.addWidget(radio)
radio = qt.QRadioButton(panel)
radio.setText("silx DataFileDialog (filter=group)")
radio.setProperty("Mode", Mode.DATAFILEDIALOG_GROUP)
group.addButton(radio)
layout.addWidget(radio)
radio = qt.QRadioButton(panel)
radio.setText("silx DataFileDialog (filter=NXentry)")
radio.setProperty("Mode", Mode.DATAFILEDIALOG_NXENTRY)
group.addButton(radio)
layout.addWidget(radio)
self.__options = group
return panel
def printResult(self, dialog, result):
if not result:
print("Nothing selected")
return
print("Selection:")
if isinstance(dialog, qt.QFileDialog):
print("- Files: %s" % dialog.selectedFiles())
elif isinstance(dialog, ImageFileDialog):
print("- File: %s" % dialog.selectedFile())
print("- URL: %s" % dialog.selectedUrl())
print("- Data URL: %s" % dialog.selectedDataUrl())
image = dialog.selectedImage()
print("- Image: <dtype: %s, shape: %s>" % (image.dtype, image.shape))
elif isinstance(dialog, DataFileDialog):
print("- File: %s" % dialog.selectedFile())
print("- URL: %s" % dialog.selectedUrl())
print("- Data URL: %s" % dialog.selectedDataUrl())
try:
data = dialog.selectedData()
print("- Data: <dtype: %s, shape: %s>" % (data.dtype, data.shape))
except Exception as e:
print("- Data: %s" % e)
url = dialog.selectedDataUrl()
with silx.io.open(url.file_path()) as h5:
node = h5[url.data_path()]
print("- Node: %s" % node)
else:
assert(False)
def createDialog(self):
print("")
print("-------------------------")
print("----- Create dialog -----")
print("-------------------------")
button = self.__options.checkedButton()
mode = button.property("Mode")
if mode == Mode.DEFAULT_FILEDIALOG:
dialog = qt.QFileDialog(self)
dialog.setAcceptMode(qt.QFileDialog.AcceptOpen)
elif mode == Mode.IMAGEFILEDIALOG:
dialog = ImageFileDialog(self)
elif mode == Mode.DATAFILEDIALOG:
dialog = DataFileDialog(self)
elif mode == Mode.DATAFILEDIALOG_DATASET:
dialog = DataFileDialog(self)
dialog.setFilterMode(DataFileDialog.FilterMode.ExistingDataset)
elif mode == Mode.DATAFILEDIALOG_GROUP:
dialog = DataFileDialog(self)
dialog.setFilterMode(DataFileDialog.FilterMode.ExistingGroup)
elif mode == Mode.DATAFILEDIALOG_NXENTRY:
def customFilter(obj):
if "NX_class" in obj.attrs:
return obj.attrs["NX_class"] in [b"NXentry", u"NXentry"]
return False
dialog = DataFileDialog(self)
dialog.setFilterMode(DataFileDialog.FilterMode.ExistingGroup)
dialog.setFilterCallback(customFilter)
else:
assert(False)
return dialog
def openDialog(self):
# Clear the dialog
dialog = self.createDialog()
# Execute the dialog as modal
result = dialog.exec()
self.printResult(dialog, result)
def openDialogStoredState(self):
# Clear the dialog
dialog = self.createDialog()
if dialog.__class__ in self.__state:
dialog.restoreState(self.__state[dialog.__class__])
# Execute the dialog as modal
result = dialog.exec()
self.__state[dialog.__class__] = dialog.saveState()
self.printResult(dialog, result)
def openDialogAtHome(self):
# Clear the dialog
path = qt.QDir.homePath()
dialog = self.createDialog()
dialog.setDirectory(path)
# Execute the dialog as modal
result = dialog.exec()
self.printResult(dialog, result)
def openDialogAtComputer(self):
# Clear the dialog
path = ""
dialog = self.createDialog()
dialog.setDirectory(path)
# Execute the dialog as modal
result = dialog.exec()
self.printResult(dialog, result)
def main():
app = qt.QApplication([])
example = DialogExample()
example.show()
app.exec()
if __name__ == "__main__":
main()
|
silx-kit/silx
|
examples/fileDialog.py
|
fileDialog.py
|
py
| 7,386 |
python
|
en
|
code
| 106 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "enum.Enum",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "silx.gui.qt.QMainWindow",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "silx.gui.qt",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "silx.gui.qt.QWidget",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "silx.gui.qt",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "silx.gui.qt.QHBoxLayout",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "silx.gui.qt",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "silx.gui.qt.QGroupBox",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "silx.gui.qt",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "silx.gui.qt.QVBoxLayout",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "silx.gui.qt",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "silx.gui.qt.QPushButton",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "silx.gui.qt",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "silx.gui.qt.QPushButton",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "silx.gui.qt",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "silx.gui.qt.QPushButton",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "silx.gui.qt",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "silx.gui.qt.QPushButton",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "silx.gui.qt",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "silx.gui.qt.QGroupBox",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "silx.gui.qt",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "silx.gui.qt.QVBoxLayout",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "silx.gui.qt",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "silx.gui.qt.QButtonGroup",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "silx.gui.qt",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "silx.gui.qt.QRadioButton",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "silx.gui.qt",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "silx.gui.qt.QRadioButton",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "silx.gui.qt",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "silx.gui.qt.QRadioButton",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "silx.gui.qt",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "silx.gui.qt.QRadioButton",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "silx.gui.qt",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "silx.gui.qt.QRadioButton",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "silx.gui.qt",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "silx.gui.qt.QRadioButton",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "silx.gui.qt",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "silx.gui.qt.QFileDialog",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "silx.gui.qt",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "silx.gui.dialog.ImageFileDialog.ImageFileDialog",
"line_number": 128,
"usage_type": "argument"
},
{
"api_name": "silx.gui.dialog.DataFileDialog.DataFileDialog",
"line_number": 134,
"usage_type": "argument"
},
{
"api_name": "silx.gui.io.open",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "silx.gui.io",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "silx.gui",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "silx.gui.qt.QFileDialog",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "silx.gui.qt",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "silx.gui.qt.QFileDialog",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "silx.gui.qt",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "silx.gui.dialog.ImageFileDialog.ImageFileDialog",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "silx.gui.dialog.DataFileDialog.DataFileDialog",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "silx.gui.dialog.DataFileDialog.DataFileDialog",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "silx.gui.dialog.DataFileDialog.DataFileDialog.FilterMode",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "silx.gui.dialog.DataFileDialog.DataFileDialog",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "silx.gui.dialog.DataFileDialog.DataFileDialog",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "silx.gui.dialog.DataFileDialog.DataFileDialog.FilterMode",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "silx.gui.dialog.DataFileDialog.DataFileDialog",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "silx.gui.dialog.DataFileDialog.DataFileDialog",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "silx.gui.dialog.DataFileDialog.DataFileDialog.FilterMode",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "silx.gui.dialog.DataFileDialog.DataFileDialog",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "silx.gui.qt.QDir.homePath",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "silx.gui.qt.QDir",
"line_number": 204,
"usage_type": "attribute"
},
{
"api_name": "silx.gui.qt",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "silx.gui.qt.QApplication",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "silx.gui.qt",
"line_number": 224,
"usage_type": "name"
}
] |
41243183736
|
from flask import Blueprint, request, jsonify, make_response
from tabledef import Technician
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import update
from tabledef import Technician, Call
import config
# create a query that extracts the information of the table "technician" for a specific company
select_technicians = """
SELECT id_technician, id_company, data_technician, chat_id, status, message FROM technicians WHERE id_company is {};
"""
select_technician_company = """
SELECT id_company FROM technicians WHERE chat_id is {};
"""
select_technician_info_by_chat_id = """
SELECT id_technician, id_company, data_technician, chat_id, status FROM technicians WHERE chat_id is {};
"""
select_technician_info_by_tech_id = """
SELECT id_technician, id_company, data_technician, chat_id, status FROM technicians WHERE id_technician is {};
"""
#'''
# Call
select_call_from_status = """
SELECT id_call, id_company, id_condominium, date_call, data_call, call_status FROM calls WHERE call_status is {} AND id_company is {};
"""
#'''
# allows main_data to recall the underlying endpoint
api_technician_company = Blueprint('api_technician_company', __name__)
@api_technician_company.route('/<id_company>/technician', methods=['GET'])
def technician_company_id(id_company):
"""
endpoint which is used to find the technicians of a given company in the database
:param id_company: company_id
:return: return the technician referred to the id_company
"""
engine = create_engine('sqlite:///call_center.db', echo=True)
conn = engine.connect()
result = conn.execute(select_technicians.format(id_company))
technicians = []
for el in result:
technicians.append(
{
config.TECH_ID: el[0],
config.TECH_INFO: el[2]
}
)
if result:
response = {
"message": "technicians:",
'status': 'OK',
"items": technicians
}
res_technicians = make_response(jsonify(response), 200)
else:
response = {
"message": "ERROR: No technicians in database",
'status': 'ERROR',
"items": []
}
res_technicians = make_response(jsonify(response), 404)
return res_technicians
#@api_technician_company.route('/technician/<id_technician>/<chat_id>', methods=['GET'])
@api_technician_company.route('/technician/<id_technician>/add_chat_id/<chat_id>', methods=['GET'])
def update_chat_id(id_technician, chat_id):
"""
endpoint which is used to login the technician
:param id_technician: id_technician, chat_id: chat_id
:return: insert in the database the chat_id referred to the id_technician
"""
engine = create_engine('sqlite:///call_center.db', echo=True)
conn = engine.connect()
update_chat_id = update(Technician).where(Technician.id_technician == id_technician).values(chat_id=chat_id)
conn.execute(update_chat_id)
response = {
'status': 'OK'
}
res_status = make_response(jsonify(response), 200)
return res_status
@api_technician_company.route('/technician_chat/<chat_id>/logout', methods=['GET'])
def logout_chat_id(chat_id):
"""
endpoint which is used to logout the technician
:param chat_id: chat_id
:return: when technician logout cancel the chat_id referred to the technician with the same chat_id
"""
engine = create_engine('sqlite:///call_center.db', echo=True)
conn = engine.connect()
update_chat_id = update(Technician).where(Technician.chat_id == chat_id).values(chat_id='')
conn.execute(update_chat_id)
response = {
'status': 'OK'
}
res_status = make_response(jsonify(response), 200)
return res_status
@api_technician_company.route('/technician_chat/<chat_id>/update/<status>', methods=['GET'])
def update_status_tech_by_chat_id(chat_id, status):
"""
endpoint which is used to update the status of technician referred to chat_id
:param chat_id: chat_id, status: status
:return: update the status of technician referred to chat_id
"""
if status in config.TECH_STATUS_LABEL:
engine = create_engine('sqlite:///call_center.db', echo=True)
conn = engine.connect()
update_status = update(Technician).where(Technician.chat_id == chat_id).values(status=status)
conn.execute(update_status)
response = {
"tech_status": config.TECH_STATUS_LABEL[status],
'status': 'OK'
}
if status == '1':
comp = conn.execute(select_technician_company.format(chat_id))
free_calls = conn.execute(select_call_from_status.format(1, next(comp)[0]))
calls = []
for el in free_calls:
calls.append(
{
config.CALL_ID: el[0],
config.COMPANY_ID: el[1],
config.BUILDING_ID: el[2],
config.CALL_DATE: el[3],
config.CALL_INFO: el[4],
config.CALL_STATUS: el[5]
}
)
#Call(input_data[config.COMPANY_ID], input_data[config.BUILDING_ID], datetime.now(), {config.CALL_MESSAGE: input_data[config.CALL_MESSAGE]}, call_status)
response = {
"tech_status": config.TECH_STATUS_LABEL[status],
'status': 'OK',
"free_calls": calls
}
res_status = make_response(jsonify(response), 200)
else:
response = {
"tech_status": "Status must be between 0 and 4",
'status': 'ERROR'
}
res_status = make_response(jsonify(response), 404)
return res_status
@api_technician_company.route('/technician/<tech_id>/update/<status>', methods=['GET'])
def update_status_tech_by_tech_id(tech_id, status):
"""
endpoint which is used to update the status of technician referred to tech_id
:param tech_id: tech_id, status: status
:return: update the status of technician referred to tech_id
"""
if status in config.TECH_STATUS_LABEL:
engine = create_engine('sqlite:///call_center.db', echo=True)
conn = engine.connect()
update_status = update(Technician).where(Technician.id_technician == tech_id).values(status=status)
conn.execute(update_status)
response = {
"tech_status": config.TECH_STATUS_LABEL[status],
'status': 'OK'
}
res_status = make_response(jsonify(response), 200)
else:
response = {
"tech_status": "Status must be between 0 and 4",
'status': 'ERROR'
}
res_status = make_response(jsonify(response), 404)
return res_status
##### select_technician_info_by_chat_id
@api_technician_company.route('/technician_chat/<chat_id>/info', methods=['GET'])
def get_tech_info_by_chat_id(chat_id):
"""
endpoint which is used to select the information of technician by chat id
:param chat_id: chat_id
:return: return the information of technician by chat id
"""
engine = create_engine('sqlite:///call_center.db', echo=True)
conn = engine.connect()
result =conn.execute(select_technician_info_by_chat_id.format(chat_id))
info = {}
for el in result:
info = {
config.TECH_ID: el[0],
config.COMPANY_ID: el[1],
config.TECH_INFO: el[2],
config.TECH_CHAT: el[3],
config.TECH_STATUS: el[4]
}
response = {
"info": info,
'status': 'OK'
}
res_status = make_response(jsonify(response), 200)
return res_status
@api_technician_company.route('/technician/<tech_id>/info', methods=['GET'])
def get_tech_info_by_tech_id(tech_id):
"""
endpoint which is used to select the information of technician by chat id
:param tech_id: chat_id
:return: return the information of technician by chat id
"""
engine = create_engine('sqlite:///call_center.db', echo=True)
conn = engine.connect()
result =conn.execute(select_technician_info_by_tech_id.format(tech_id))
for el in result:
info = {
config.TECH_ID: el[0],
config.COMPANY_ID: el[1],
config.TECH_INFO: el[2],
config.TECH_CHAT: el[3],
config.TECH_STATUS: el[4]
}
response = {
"info": info,
'status': 'OK'
}
res_status = make_response(jsonify(response), 200)
return res_status
|
fmauri90/call_center
|
dataservice/api_technician_company.py
|
api_technician_company.py
|
py
| 8,617 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Blueprint",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "config.TECH_ID",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "config.TECH_INFO",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "flask.make_response",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "flask.make_response",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.update",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "tabledef.Technician",
"line_number": 91,
"usage_type": "argument"
},
{
"api_name": "tabledef.Technician.id_technician",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "flask.make_response",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.update",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "tabledef.Technician",
"line_number": 111,
"usage_type": "argument"
},
{
"api_name": "tabledef.Technician.chat_id",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "flask.make_response",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "config.TECH_STATUS_LABEL",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.update",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "tabledef.Technician",
"line_number": 132,
"usage_type": "argument"
},
{
"api_name": "tabledef.Technician.chat_id",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "config.TECH_STATUS_LABEL",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "config.CALL_ID",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "config.COMPANY_ID",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "config.BUILDING_ID",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "config.CALL_DATE",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "config.CALL_INFO",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "config.CALL_STATUS",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "config.TECH_STATUS_LABEL",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "flask.make_response",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "flask.make_response",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "config.TECH_STATUS_LABEL",
"line_number": 181,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.update",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "tabledef.Technician",
"line_number": 184,
"usage_type": "argument"
},
{
"api_name": "tabledef.Technician.id_technician",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "config.TECH_STATUS_LABEL",
"line_number": 187,
"usage_type": "attribute"
},
{
"api_name": "flask.make_response",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "flask.make_response",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "config.TECH_ID",
"line_number": 214,
"usage_type": "attribute"
},
{
"api_name": "config.COMPANY_ID",
"line_number": 215,
"usage_type": "attribute"
},
{
"api_name": "config.TECH_INFO",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "config.TECH_CHAT",
"line_number": 217,
"usage_type": "attribute"
},
{
"api_name": "config.TECH_STATUS",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "flask.make_response",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "config.TECH_ID",
"line_number": 241,
"usage_type": "attribute"
},
{
"api_name": "config.COMPANY_ID",
"line_number": 242,
"usage_type": "attribute"
},
{
"api_name": "config.TECH_INFO",
"line_number": 243,
"usage_type": "attribute"
},
{
"api_name": "config.TECH_CHAT",
"line_number": 244,
"usage_type": "attribute"
},
{
"api_name": "config.TECH_STATUS",
"line_number": 245,
"usage_type": "attribute"
},
{
"api_name": "flask.make_response",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 251,
"usage_type": "call"
}
] |
25170385254
|
# Django imports
from django.shortcuts import render, get_object_or_404
from django.db.models import Q
# Folder imports
from .utils.sky import quick_flight_search
from .models import *
from apps.authentication.models import Profile
from apps.trips.models import *
# Other imports
from datetime import datetime, date, timedelta
from dotenv import load_dotenv
import os
# URL: flights/partials/add_flight
# HTTP Method: GET
# Description: Intermediate screen to select way to add flight
def add_flight(request):
flight_direction = request.GET.get('flight_direction')
trip_id = request.GET.get('trip_id')
context = {'flight_direction': flight_direction, 'trip_id': trip_id, 'popup_title': f'Add an {flight_direction} flight'}
return render(request, 'partials/add_flight.html', context)
# URL: flights/partials/enter_flight
# HTTP Method: GET
# Description: Facilitats the manual entry of flight information
def enter_flight(request):
trip_id = request.GET.get('trip_id')
flight_direction = request.GET.get('flight_direction')
trip = get_object_or_404(Trip, id=trip_id)
if flight_direction == "outbound":
earliest_destination = trip.destination_set.order_by('order').first()
departure_airports = Airport.objects.all()
arrival_interrailairports = InterrailAirport.objects.filter(city=earliest_destination.city)
# Get arrival airports as Airport objects
arrival_airports = []
for airport in arrival_interrailairports:
arrival_airports.append(airport.airport)
# Take 1 days off the minimum date for outbound flights to allow for long journeys
min_date = str(trip.start_date - timedelta(days=1))
else:
last_destination = trip.destination_set.order_by('order').last()
departure_interrailairports = InterrailAirport.objects.filter(city=last_destination.city)
# Get departure airports as Airport objects
departure_airports = []
for airport in departure_interrailairports:
departure_airports.append(airport.airport)
arrival_airports = Airport.objects.all()
min_date = str(last_destination.end_date)
context = {'popup_title': 'Enter Flight', 'departure_airports': departure_airports, 'arrival_airports': arrival_airports, 'flight_direction': flight_direction,
'min_date': min_date}
return render(request, 'partials/enter_flight.html', context)
# URL: flight/partials/search_flight
# HTTP Method: GET
# Description: Allows search to be created for given flight criteria
def search_flight(request):
# Check API key can be found
load_dotenv()
skyscanner_key = os.getenv('skyscanner_api_key')
if skyscanner_key:
key_found = True
else:
key_found = False
# Get trip and flight direction from get request
trip = get_object_or_404(Trip, id=request.GET.get('trip_id'))
flight_direction = request.GET.get('flight_direction')
# If outbound flight, find the earliest destination's start date and find a flight to that destination on that date
if flight_direction == "outbound":
earliest_destination = trip.destination_set.order_by('order').first()
departure_airports = Airport.objects.filter(country = Profile.objects.get(user=request.user).nationality).order_by('name')
arrival_interrailairports = InterrailAirport.objects.filter(city=earliest_destination.city)
# Get arrival airports as Airport objects
arrival_airports = []
for airport in arrival_interrailairports:
arrival_airports.append(airport.airport)
# If inbound flight, find the last destination's end date and find a flight from that destination on that date
else:
last_destination = trip.destination_set.order_by('order').last()
departure_interrailairports = InterrailAirport.objects.filter(city=last_destination.city)
# Get departure airports as Airport objects
departure_airports = []
for airport in departure_interrailairports:
departure_airports.append(airport.airport)
arrival_airports = Airport.objects.filter(country = Profile.objects.get(user=request.user).nationality).order_by('name')
context = {'popup_title': 'Flight Search', 'departure_airports': departure_airports, 'arrival_airports': arrival_airports, 'trip_id': trip.id, 'flight_direction': flight_direction,
'key_found': key_found}
return render(request, 'partials/search_flight.html', context)
# URL: flight/partials/search_results
# HTTP Method: GET
# Description: Displays flight search criteria
def search_results(request):
# Get trip id, direction and direct flights flag from parameters
trip = get_object_or_404(Trip, id=request.GET.get('trip_id'))
flight_direction = request.GET.get('flight_direction')
if request.GET.get('direct_flights') == 'on':
direct = True
else:
direct = False
# Get airport objects from IDs
departure_airport = get_object_or_404(Airport, id = request.GET.get('departure_airport'))
destination_airport = get_object_or_404(Airport, id = request.GET.get('arrival_airport'))
# If outbound flight configure dates as trip start date
if flight_direction == "outbound":
earliest_destination = trip.destination_set.order_by('order').first()
session_token, direct_flights, connecting_flights = quick_flight_search("GBP", departure_airport.iata_code, destination_airport.iata_code, earliest_destination.start_date.year, earliest_destination.start_date.month, earliest_destination.start_date.day, direct)
# If inbound flight configure dates as trip end date
else:
last_destination = trip.destination_set.order_by('order').last()
session_token, direct_flights, connecting_flights = quick_flight_search("GBP", departure_airport.iata_code, destination_airport.iata_code, last_destination.start_date.year, last_destination.start_date.month, last_destination.start_date.day, direct)
context = {'direct_flights': direct_flights, 'connecting_flights': connecting_flights, 'flight_direction': flight_direction, 'departure_airport': departure_airport, 'destination_airport': destination_airport,
'popup_title': f'{departure_airport} - {destination_airport}', 'trip_id': trip.id}
return render(request, 'partials/search_results.html', context)
|
sc19jwh/COMP3931
|
apps/flights/views.py
|
views.py
|
py
| 6,392 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.shortcuts.render",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "dotenv.load_dotenv",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "apps.authentication.models.Profile.objects.get",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "apps.authentication.models.Profile.objects",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "apps.authentication.models.Profile",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "apps.authentication.models.Profile.objects.get",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "apps.authentication.models.Profile.objects",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "apps.authentication.models.Profile",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "utils.sky.quick_flight_search",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "utils.sky.quick_flight_search",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 113,
"usage_type": "call"
}
] |
36229561780
|
from typing import List
'''
452. 用最少数量的箭引爆气球
https://leetcode.cn/problems/minimum-number-of-arrows-to-burst-balloons/
每一箭射穿的气球满足:最左边的气球右端在最右边气球左端的右面。
可以贪心,按照气球右端排序
记录新开的一箭的气球的右端点end,一旦有一个气球的左端点在end右面,则这一箭已经射不到这个气球了,需要新的一箭。
'''
class Solution:
def findMinArrowShots(self, points: List[List[int]]) -> int:
points.sort(key=lambda x: x[1])
res = 1
end = points[0][1]
for st, en in points:
if st > end:
res += 1
end = en
return res
s = Solution()
print(s.findMinArrowShots([[10,16],[2,8],[1,6],[7,12]]))
|
z-w-wang/Leetcode-Problemlist
|
CS-Notes/Greedy/452.py
|
452.py
|
py
| 806 |
python
|
zh
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 11,
"usage_type": "name"
}
] |
2704503307
|
# -*- coding: utf-8 -*-
from django.test import Client, RequestFactory, TestCase
from tasks import views
from tasks.models import Task, TaskStatus
from users.models import CustomUser
class TaskTest(TestCase):
"""Test cases for tasks."""
def setUp(self):
"""Initial setup before tests."""
self.factory = RequestFactory()
self.user = CustomUser.objects.create_user( # noqa: S106
username='testuser',
password='supersecret',
)
self.client = Client()
def createTask(self, name='Test task name'): # noqa: N802
"""Create test task."""
status = TaskStatus.objects.create(name='New')
return Task.objects.create(
name=name,
assigned_to=self.user,
creator=self.user,
status=status,
tags=['important', 'test'],
)
def test_task_create(self):
"""Test task creation."""
task = self.createTask()
self.assertTrue(isinstance(task, Task))
self.assertEqual(task.__str__(), task.name) # noqa: WPS609
self.assertEqual(Task.objects.count(), 1)
def test_tasks_list(self):
"""Test tasklist view."""
request = self.factory.get('/')
request.user = self.user
response = views.TaskList.as_view()(request)
self.assertEqual(response.status_code, 200) # noqa: WPS432
|
altvec/python-project-lvl4
|
tasks/tests.py
|
tests.py
|
py
| 1,403 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.test.TestCase",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.test.RequestFactory",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "users.models.CustomUser.objects.create_user",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "users.models.CustomUser.objects",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "users.models.CustomUser",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.test.Client",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tasks.models.TaskStatus.objects.create",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "tasks.models.TaskStatus.objects",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "tasks.models.TaskStatus",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "tasks.models.Task.objects.create",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "tasks.models.Task.objects",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "tasks.models.Task",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "tasks.models.Task",
"line_number": 35,
"usage_type": "argument"
},
{
"api_name": "tasks.models.Task.objects.count",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "tasks.models.Task.objects",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "tasks.models.Task",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "tasks.views.TaskList.as_view",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "tasks.views.TaskList",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "tasks.views",
"line_number": 43,
"usage_type": "name"
}
] |
19757717889
|
# unit.test_shop.test_shopRepo.py
from unittest.mock import Mock
import tinydb as tdb
from fixtures.shop import ShopFixture, TEMP_SHOPS_TINYDB_TEST_PATH, \
PRODUCTS_URLS_9_VALID_TEST_PATH, PRODUCTS_URLS_TEST_DIR
from shop.shop import Shop
from shop.shopDao import TinyShopDao
from shop.shopRepo import ShopRepo
from unit.testhelper import WebtomatorTestCase, ProductsUrlsRepoMock
class ShopRepoTest(WebtomatorTestCase):
testDBPath = TEMP_SHOPS_TINYDB_TEST_PATH
tempProductsUrlsRepoPath = PRODUCTS_URLS_TEST_DIR / "ProductsUrls_deleteMe.txt"
def setUp(self) -> None:
# Creates new DB at given path if not exists.
# Deletes all records in all tables if DB exists.
dbRef = tdb.TinyDB(str(self.testDBPath))
dbRef.purge_tables()
dbRef.close()
def tearDown(self) -> None:
if self.tempProductsUrlsRepoPath.is_file():
self.tempProductsUrlsRepoPath.unlink()
def test_ifVitalAttributesArePresent(self):
# Given
sut = ShopRepo
# Then
# Check presence of vital public properties/methods
self.assertHasAttribute(sut, 'getAll')
self.assertHasAttribute(sut, 'setAll')
self.assertHasAttribute(sut, 'update')
def test_init_shouldSetDefaultValues(self):
# When
daoMock = Mock()
daoMock.myValue = "DAO Mock checkValue"
sut = ShopRepo(dao=daoMock)
# Then
self.assertEqual("DAO Mock checkValue", sut._dao.myValue)
def test_getAll(self):
# Given
testTinyShopDao = TinyShopDao(path=self.testDBPath)
# Create 2 shops in TinyDB for testing.
# Note that we use client code to create them, which is more of an integration test...
fixture = ShopFixture()
fixture.create2Shops()
expectedShops = fixture.shops
ShopRepo(dao=testTinyShopDao).setAll(shops=expectedShops)
sut = ShopRepo(dao=testTinyShopDao)
# When
loadedShops = sut.getAll()
# Then
# Expect that loaded shops match the expected
self.assertEqual(expectedShops, loadedShops)
def test_setAll(self):
# Given
# Insert a document into a fresh 'Shops' table. This data is expected to be
# completely overridden by the test.
existingData = dict(OneTestOne="Test data val 1", TwoTestTwo="Test data val 2")
with tdb.TinyDB(self.testDBPath) as db:
shopTable: tdb.database.Table = db.table(TinyShopDao._TABLE_NAME)
shopTable.insert(existingData)
# These data are expected:
fixture = ShopFixture()
fixture.create2Shops()
expectedShops = fixture.shops
# Setup repo
testTinyShopDao = TinyShopDao(path=self.testDBPath)
sut = ShopRepo(dao=testTinyShopDao)
# When
sut.setAll(shops=expectedShops)
# Then
with tdb.TinyDB(self.testDBPath) as db:
shopTable: tdb.database.Table = db.table(TinyShopDao._TABLE_NAME)
recordList: list = shopTable.all()
# Expect that previous data do not exist anymore
self.assertLessEqual(0, len(recordList))
self.assertIsNone(recordList[0].get("OneTestOne"))
self.assertIsNone(recordList[0].get("TwoTestTwo"))
# Note that we use client code to load the shops again, which is
# more of an integration test...
loadedShops = sut.getAll()
# Expect that loaded shops match the expected ones
self.assertEqual(expectedShops, loadedShops)
def test_update(self):
# Given
# Create 2 shops in TinyDB for testing.
fixture = ShopFixture()
fixture.create2Shops()
expectedShop = fixture.shops[0]
assert expectedShop.uid is not None and expectedShop.uid != ""
# Write a shop which we can try to update by UID.
existingData = dict(uid=expectedShop.uid, name="I don't know this shop's name")
with tdb.TinyDB(self.testDBPath) as db:
shopTable: tdb.database.Table = db.table(TinyShopDao._TABLE_NAME)
shopTable.insert(existingData)
# Setup repo
testTinyShopDao = TinyShopDao(path=self.testDBPath)
sut = ShopRepo(dao=testTinyShopDao)
# When
sut.update(shop=expectedShop)
# Then
with tdb.TinyDB(self.testDBPath) as db:
shopTable: tdb.database.Table = db.table(TinyShopDao._TABLE_NAME)
recordList: list = shopTable.all()
self.assertEqual(1, len(recordList))
# Expect that data with previous uid still exist
self.assertEqual(expectedShop.uid, recordList[0].get("uid"))
# Expect that shop's name has been updated
self.assertNotEqual("I don't know this shop's name", recordList[0].get("name"))
# Note that we use client code to load the shop again, which is
# more of an integration test...
updatedShops = sut.getAll()
self.assertIsInstance(updatedShops, list)
self.assertEqual(1, len(recordList))
# Expect that updated shop matches the expected one
self.assertEqual(expectedShop, updatedShops[0])
def test_findByUID(self):
# Given
# Create test data to search for.
uidToFind = "b0e2e467-6fd5-4a06-bb1e-9ad60223cafa"
shopData1 = dict(uid="ca0f5926-7d55-4973-a8e1-d3e2cc89fca6",
name="The name of the first test shop")
shopData2 = dict(uid=uidToFind,
name="The name of the second test shop")
expectedShop = Shop(**shopData2)
with tdb.TinyDB(self.testDBPath) as db:
shopTable: tdb.database.Table = db.table(TinyShopDao._TABLE_NAME)
shopTable.insert(shopData1)
shopTable.insert(shopData2)
# Setup repo
testTinyShopDao = TinyShopDao(path=self.testDBPath)
sut = ShopRepo(dao=testTinyShopDao)
# When
foundShop = sut.findByUID(uidToFind)
# Then
self.assertIsInstance(foundShop, Shop)
self.assertEqual(foundShop.uid, uidToFind)
self.assertEqual(expectedShop, foundShop)
def test_findByName(self):
# Given
# Create test data to search for. We use two shops with the same name here.
shopData1 = dict(uid="ca0f5926-7d55-4973-a8e1-d3e2cc89fca6",
name="Shop with same name")
shopData2 = dict(uid="e68782fd-19af-428e-881f-99d7af9b83b0",
name="This shop should not be found")
shopData3 = dict(uid="b0e2e467-6fd5-4a06-bb1e-9ad60223cafa",
name="Shop with same name")
expectedShops = [Shop(**shopData1), Shop(**shopData3)]
with tdb.TinyDB(self.testDBPath) as db:
shopTable: tdb.database.Table = db.table(TinyShopDao._TABLE_NAME)
shopTable.insert(shopData1)
shopTable.insert(shopData2)
shopTable.insert(shopData3)
# Setup repo
testTinyShopDao = TinyShopDao(path=self.testDBPath)
sut = ShopRepo(dao=testTinyShopDao)
# When
foundShops = sut.findByName("Shop with same name")
# Then
self.assertIsInstance(foundShops, list)
self.assertEqual(2, len(foundShops))
self.assertEqual(expectedShops, foundShops)
def test_updateFromProductsUrls(self):
# Given
# Copy fixture to new arbitrary file as we will modify its contents within this test.
with open(str(PRODUCTS_URLS_9_VALID_TEST_PATH), "r", encoding="utf-8") as source:
content = source.read()
with open(str(self.tempProductsUrlsRepoPath), "w+", encoding="utf-8") as target:
target.write(content)
# Note that the table gets deleted by the unit test's setup() method - so we
# start with a fresh empty table.
testTinyShopDao = TinyShopDao(path=self.testDBPath)
sut = ShopRepo(dao=testTinyShopDao)
productsUrlsRepo = ProductsUrlsRepoMock(productsUrlsRepoPath=self.tempProductsUrlsRepoPath)
expectedProducts = productsUrlsRepo.getAll()
expectedProductUrls = [p.url for p in expectedProducts]
# 1. Test initial update -----------------------------------------------------------
# When
# This is expected to fill the table with all the fixture data of ProductsUrls repo.
sut.updateFromProductsUrls(productsUrlsRepo=productsUrlsRepo)
# Then
shops = sut.getAll()
self.assertIsInstance(shops, list)
self.assertEqual(3, len(shops))
# Expect that all shops have been inserted
shopsUrls = [s.url for s in shops]
self.assertIn("https://www.solebox.com", shopsUrls)
self.assertIn("http://real.fantastic.de", shopsUrls)
self.assertIn("https://www.dbyte.org", shopsUrls)
# Expect that all products have been inserted
soleboxShop = list(filter(lambda s: s.url == "https://www.solebox.com", shops))[0]
self.assertIsInstance(soleboxShop.products, list)
self.assertEqual(1, len(soleboxShop.products))
for product in soleboxShop.products:
self.assertIn(product.url, expectedProductUrls)
realFantasticShop = list(filter(lambda s: s.url == "http://real.fantastic.de", shops))[0]
self.assertIsInstance(realFantasticShop.products, list)
self.assertEqual(2, len(realFantasticShop.products))
for product in realFantasticShop.products:
self.assertIn(product.url, expectedProductUrls)
dbyteShop = list(filter(lambda s: s.url == "https://www.dbyte.org", shops))[0]
self.assertIsInstance(dbyteShop.products, list)
self.assertEqual(6, len(dbyteShop.products))
for product in dbyteShop.products:
self.assertIn(product.url, expectedProductUrls)
# 2. Test delete product/shop -----------------------------------------------------
# Given
# Remove all http://real.fantastic.de/... URLs from ProductsUrls repo.
with open(str(self.tempProductsUrlsRepoPath), "r+", encoding="utf-8") as target:
lines = target.readlines()
for line in reversed(lines):
if line.startswith("http://real.fantastic.de/shop/great-realdumbtrump.htm"):
lines.remove(line)
if line.startswith("http://real.fantastic.de/shop/buy-new-holo?prodid=682357ac"):
lines.remove(line)
# Overwrite file with the updated data
target.seek(0)
target.writelines(lines)
# When
# This is expected to remove shop http://real.fantastic.de entirely from database,
# because it's products do not exist anymore in ProductsUrls repo.
sut.updateFromProductsUrls(productsUrlsRepo=productsUrlsRepo)
# Then
shops = sut.getAll()
self.assertIsInstance(shops, list)
self.assertEqual(2, len(shops))
# Expect that shop http://real.fantastic.de has been entirely removed from database
realFantasticShop = list(filter(lambda s: s.url == "http://real.fantastic.de", shops))
self.assertIsInstance(realFantasticShop, list)
self.assertEqual(0, len(realFantasticShop))
# 3. Test add product to existing shop ----------------------------------------------
# Given
with open(str(self.tempProductsUrlsRepoPath), "r+", encoding="utf-8") as target:
lines = target.readlines()
lines.append("\nhttps://www.solebox.com/some-new-product\n")
# Overwrite file with the updated data
target.seek(0)
target.writelines(lines)
expectedProducts = productsUrlsRepo.getAll()
expectedProductUrls = [p.url for p in expectedProducts]
# When
# This is expected to update shop https://www.solebox.com with the above added
# product https://www.solebox.com/some-new-product
sut.updateFromProductsUrls(productsUrlsRepo=productsUrlsRepo)
# Then
shops = sut.getAll()
self.assertIsInstance(shops, list)
self.assertEqual(2, len(shops))
# Expect that product https://www.solebox.com/some-new-product has been added to
# existing shop with URL https://www.solebox.com
soleboxShop = list(filter(lambda s: s.url == "https://www.solebox.com", shops))[0]
self.assertIsInstance(soleboxShop.products, list)
self.assertEqual(2, len(soleboxShop.products))
for product in soleboxShop.products:
self.assertIn(product.url, expectedProductUrls)
# 4. Test add shop to existing shops -------------------------------------------------
# Given
with open(str(self.tempProductsUrlsRepoPath), "r+", encoding="utf-8") as target:
lines = target.readlines()
lines.append("\nhttps://new-shop-1833663.com/new-product.htm\n")
# Overwrite file with the updated data
target.seek(0)
target.writelines(lines)
expectedProducts = productsUrlsRepo.getAll()
expectedProductUrls = [p.url for p in expectedProducts]
# When
# This is expected to update the shop table (which already has shops in it) with
# the above added product which has a base url which currently not exists
# in the shops table. So a new shop with this product must be created in shopRepo.
sut.updateFromProductsUrls(productsUrlsRepo=productsUrlsRepo)
# Then
shops = sut.getAll()
self.assertIsInstance(shops, list)
self.assertEqual(3, len(shops))
# Expect that shop https://new-shop-1833663.com has been added to
# existing database.
newShop = list(filter(lambda s: s.url == "https://new-shop-1833663.com", shops))[0]
self.assertIsInstance(newShop.products, list)
self.assertEqual(1, len(newShop.products))
for product in newShop.products:
self.assertIn(product.url, expectedProductUrls)
|
dbyte/WebtomatorPublicEdition
|
tests/unit/test_shop/test_shopRepo.py
|
test_shopRepo.py
|
py
| 14,072 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "unit.testhelper.WebtomatorTestCase",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "fixtures.shop.TEMP_SHOPS_TINYDB_TEST_PATH",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "fixtures.shop.PRODUCTS_URLS_TEST_DIR",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "tinydb.TinyDB",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "shop.shopRepo.ShopRepo",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "unittest.mock.Mock",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "shop.shopRepo.ShopRepo",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "shop.shopDao.TinyShopDao",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "fixtures.shop.ShopFixture",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "shop.shopRepo.ShopRepo",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "shop.shopRepo.ShopRepo",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "tinydb.TinyDB",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "tinydb.database",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "shop.shopDao.TinyShopDao._TABLE_NAME",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "shop.shopDao.TinyShopDao",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "fixtures.shop.ShopFixture",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "shop.shopDao.TinyShopDao",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "shop.shopRepo.ShopRepo",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "tinydb.TinyDB",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "tinydb.database",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "shop.shopDao.TinyShopDao._TABLE_NAME",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "shop.shopDao.TinyShopDao",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "fixtures.shop.ShopFixture",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "tinydb.TinyDB",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "tinydb.database",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "shop.shopDao.TinyShopDao._TABLE_NAME",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "shop.shopDao.TinyShopDao",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "shop.shopDao.TinyShopDao",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "shop.shopRepo.ShopRepo",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "tinydb.TinyDB",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "tinydb.database",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "shop.shopDao.TinyShopDao._TABLE_NAME",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "shop.shopDao.TinyShopDao",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "shop.shop.Shop",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "tinydb.TinyDB",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "tinydb.database",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "shop.shopDao.TinyShopDao._TABLE_NAME",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "shop.shopDao.TinyShopDao",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "shop.shopDao.TinyShopDao",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "shop.shopRepo.ShopRepo",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "shop.shop.Shop",
"line_number": 169,
"usage_type": "argument"
},
{
"api_name": "shop.shop.Shop",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "tinydb.TinyDB",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "tinydb.database",
"line_number": 185,
"usage_type": "attribute"
},
{
"api_name": "shop.shopDao.TinyShopDao._TABLE_NAME",
"line_number": 185,
"usage_type": "attribute"
},
{
"api_name": "shop.shopDao.TinyShopDao",
"line_number": 185,
"usage_type": "name"
},
{
"api_name": "shop.shopDao.TinyShopDao",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "shop.shopRepo.ShopRepo",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "fixtures.shop.PRODUCTS_URLS_9_VALID_TEST_PATH",
"line_number": 205,
"usage_type": "argument"
},
{
"api_name": "shop.shopDao.TinyShopDao",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "shop.shopRepo.ShopRepo",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "unit.testhelper.ProductsUrlsRepoMock",
"line_number": 215,
"usage_type": "call"
}
] |
36229750080
|
from typing import List
'''
剑指 Offer II 119. 最长连续序列 == 128
一般想法是排序再遍历,时间复杂度为O(nlogn)
连续的数会有一个起始数字num,num - 1不在nums数组中
所以找到num - 1 不在nums中的那个数,查询其连续长度
'''
class Solution:
def longestConsecutive(self, nums: List[int]) -> int:
s = set(nums)
maxlen = 0
for num in s:
if num - 1 not in s:
templen = 0
while num in s:
num += 1
templen += 1
maxlen = max(templen, maxlen)
return maxlen
|
z-w-wang/Leetcode-Problemlist
|
FxxkOffer/Graph/Offer_2_119.py
|
Offer_2_119.py
|
py
| 641 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 10,
"usage_type": "name"
}
] |
71791729148
|
import numpy.linalg as LA
from sklearn.neighbors import KDTree
from sampler import Sampler
import networkx as nx
from shapely.geometry import LineString
def can_connect(p1, p2, polygons):
line = LineString([p1, p2])
for p in polygons:
if p.crosses(line) and p.height >= min(p1[2], p2[2]):
return False
return True
def create_graph(nodes, polygons, k=10):
g = nx.Graph()
tree = KDTree(nodes)
for n in nodes:
indicies = tree.query([n], k, return_distance=False)[0]
for i in indicies:
target_node = nodes[i]
if n == target_node:
continue
if can_connect(n, target_node, polygons):
g.add_edge(tuple(n), tuple(target_node), weight=1)
return g
def prm(data, num_samples=1000, extra_points=[]):
sampler = Sampler(data)
nodes = sampler.sample(num_samples=num_samples)
print('# sampled nodes {}'.format(len(nodes)))
nodes += extra_points
return create_graph(nodes, sampler.polygons), nodes
|
magnusja/udacity-flying-cars
|
FCND-Motion-Planning/prm.py
|
prm.py
|
py
| 1,063 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "shapely.geometry.LineString",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "networkx.Graph",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sklearn.neighbors.KDTree",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sampler.Sampler",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "sampler.sample",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sampler.polygons",
"line_number": 41,
"usage_type": "attribute"
}
] |
29576976470
|
# -*- coding: utf-8 -*-
"""
scikit-learnを用いたサンプルデータ生成
http://overlap.hatenablog.jp/entry/2015/10/08/022246
Created on Wed Jul 11 15:25:41 2018
@author: Akitaka
"""
### classification sample
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import roc_auc_score
# サンプルデータの生成
# 1000 samples、5(infomative) + 2(redundant) + 13(independent) = 20 feature のデータを生成
dat = make_classification(n_samples=1000, n_features=20, n_informative=5,
n_redundant=2, n_classes=2, n_clusters_per_class=10)
X = dat[0]
y = dat[1]
print("X shape", X.shape)
print("y shape", y.shape)
# 学習用とテスト用データの分割
# 80%を学習、20%をテストに利用する
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=123)
# 学習モデルの構築とパフォーマンス評価
# ロジスティック回帰、ランダムフォレスト、KNNの3つのモデルを作成しそれぞれのAUCを計算
clf = LogisticRegression()
clf.fit(X_train, y_train)
print("LogisticRegression AUC =", roc_auc_score(y_test, clf.predict_proba(X_test)[:,1]))
clf = RandomForestClassifier(n_estimators=500, random_state=123)
clf.fit(X_train, y_train)
print("RandomForestClassifier AUC =", roc_auc_score(y_test, clf.predict_proba(X_test)[:,1]))
clf = KNeighborsClassifier(n_neighbors=10)
clf.fit(X_train, y_train)
print("KNeighborsClassifier AUC =", roc_auc_score(y_test, clf.predict_proba(X_test)[:,1]))
|
nakanishi-akitaka/python2018_backup
|
0711/test4_make_sample.py
|
test4_make_sample.py
|
py
| 1,780 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "sklearn.datasets.make_classification",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LogisticRegression",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_auc_score",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_auc_score",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sklearn.neighbors.KNeighborsClassifier",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_auc_score",
"line_number": 45,
"usage_type": "call"
}
] |
33562082348
|
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
def thresholdingvivas(inp):
f, c = inp.shape
for i in range(f):
for j in range(c):
if(inp[i][j]>=195):
inp[i][j]=0
cv.imshow('vivas',inp)
def thresholdingmuertas(inp):
f, c = inp.shape
for i in range(f):
for j in range(c):
if(inp[i][j]<=150):
inp[i][j]=0
cv.imshow('muertas',inp)
def thresholdingcolores(inp):
f, c ,color = inp.shape
for i in range(f):
for j in range(c):
if(img[i][j][0]<=121 or img[i][j][1]<=144 or img[i][j][2]<=184):
inp[i][j][0]=0
inp[i][j][1]=0
inp[i][j][2]=0
cv.imshow('colores',inp)
img = cv.imread('thresh2.png', cv.IMREAD_GRAYSCALE)
hist = cv.calcHist([img], [0], None, [256], [0, 256])
thresholdingmuertas(img)
plt.plot(hist, color='gray')
plt.xlabel('intensidad de iluminacion')
plt.ylabel('cantidad de pixeles')
plt.show()
|
renzovc987/CG
|
Thresholdingrenzo.py
|
Thresholdingrenzo.py
|
py
| 1,044 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.imshow",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_GRAYSCALE",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "cv2.calcHist",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 36,
"usage_type": "name"
}
] |
40211307735
|
from __future__ import division
import sys, os, math
import vtk
from pbrainlib.gtkutils import error_msg, simple_msg, make_option_menu,\
get_num_value, get_num_range, get_two_nums, str2int_or_err,\
OpenSaveSaveAsHBox, ButtonAltLabel
import pickle
from scipy import array, zeros, ones, sort, absolute, sqrt, divide,\
argsort, take, arange
class MeshManager:
"""
CLASS: MeshManager
DESCR: Handles rendering of VTK mesh (e.g. segmented cortex from ITK-Snap).
"""
def __init__ (self, interactor, renderer, mesh_filename, reg_filename):
self.interactor = interactor
self.renderer = renderer
reader = vtk.vtkStructuredPointsReader()
reader.SetFileName(mesh_filename)
cf = vtk.vtkContourFilter()
cf.SetInput(reader.GetOutput())
cf.SetValue(0, 1)
deci = vtk.vtkDecimatePro()
deci.SetInput(cf.GetOutput())
deci.SetTargetReduction(.1)
deci.PreserveTopologyOn()
smoother = vtk.vtkSmoothPolyDataFilter()
smoother.SetInput(deci.GetOutput())
smoother.SetNumberOfIterations(100)
normals = vtk.vtkPolyDataNormals()
normals.SetInput(smoother.GetOutput())
normals.FlipNormalsOn()
normals.SetFeatureAngle(60.0)
stripper = vtk.vtkStripper()
stripper.SetInputConnection(normals.GetOutputPort())
lut = vtk.vtkLookupTable()
lut.SetHueRange(0, 0)
lut.SetSaturationRange(0, 0)
lut.SetValueRange(0.2, 0.55)
contourMapper = vtk.vtkPolyDataMapper()
#contourMapper.SetInput(normals.GetOutput())
contourMapper.SetInput(stripper.GetOutput())
contourMapper.SetLookupTable(lut)
self.contours = vtk.vtkActor()
self.contours.SetMapper(contourMapper)
#self.contours.GetProperty().SetRepresentationToWireframe()
self.contours.GetProperty().SetRepresentationToSurface()
#self.contours.GetProperty().SetInterpolationToGouraud()
#self.contours.GetProperty().SetOpacity(1.0)
#self.contours.GetProperty().SetAmbient(0.1)
self.contours.GetProperty().SetDiffuse(0.1)
#self.contours.GetProperty().SetSpecular(0.1)
#self.contours.GetProperty().SetSpecularPower(0.1)
# now setmatrix() on the actor from the reg file !
def array_to_vtkmatrix4x4(scipy_array):
vtkmat = vtk.vtkMatrix4x4()
for i in range(0,4):
for j in range(0,4):
vtkmat.SetElement(i,j, scipy_array[i,j])
return vtkmat
mat = pickle.load(file(reg_filename, 'r'))
vtkmat = array_to_vtkmatrix4x4(mat)
self.contours.SetUserMatrix(vtkmat)
#self.contours.GetProperty().SetOpacity(.38) #adjustable in the grid manager now
# XXX YAH somehow get a callback when actor is moved...
self.renderer.AddActor(self.contours)
|
nipy/pbrain
|
eegview/mesh_manager.py
|
mesh_manager.py
|
py
| 2,967 |
python
|
en
|
code
| 94 |
github-code
|
6
|
[
{
"api_name": "vtk.vtkStructuredPointsReader",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "vtk.vtkContourFilter",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "vtk.vtkDecimatePro",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "vtk.vtkSmoothPolyDataFilter",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "vtk.vtkPolyDataNormals",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "vtk.vtkStripper",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "vtk.vtkLookupTable",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "vtk.vtkPolyDataMapper",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "vtk.vtkActor",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "vtk.vtkMatrix4x4",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 79,
"usage_type": "call"
}
] |
5708829851
|
import rename_tool
import torch
import torchaudio
from TTS.tts.configs.xtts_config import XttsConfig
from TTS.tts.models.xtts import Xtts
import os
current_dir = os.getcwd()
config_path = os.path.join(current_dir, "source", "model_v2", "config.json")
checkpoint_dir = os.path.join(current_dir, "source", "model_V2")
config = XttsConfig()
config.load_json(config_path)
model = Xtts.init_from_config(config)
model.load_checkpoint(config, checkpoint_dir=checkpoint_dir, eval=True)
model.cuda()
def generate(clone_audio_path, text, language, temperature, length_penalty, repetition_penalty, top_k, top_p, num_gpt_outputs, gpt_cond_len, gpt_cond_chunk_len, max_ref_len, sound_norm_refs, gpt_batch_size, num_chars):
config.temperature = temperature
config.length_penalty = float(length_penalty)
config.repetition_penalty = float(repetition_penalty)
config.top_k = top_k
config.top_p = top_p
config.num_gpt_outputs = num_gpt_outputs
config.gpt_cond_len = gpt_cond_len
config.gpt_cond_chunk_len = gpt_cond_chunk_len
config.max_ref_len = max_ref_len
repair = False
if len(sound_norm_refs) > 0:
repair = True
config.sound_norm_refs = repair
config.model_args.gpt_batch_size = gpt_batch_size
config.model_args.num_chars = num_chars
print(config)
outputs = model.synthesize(
text,
config,
speaker_wav=clone_audio_path,
language=language,
)
output_audio = ""
output_audio = rename_tool.path("audio", "wav")
torchaudio.save(output_audio, torch.tensor(outputs["wav"]).unsqueeze(0), 24000)
return output_audio
|
douhaohaode/xtts_v2
|
tts_v2.py
|
tts_v2.py
|
py
| 1,627 |
python
|
en
|
code
| 16 |
github-code
|
6
|
[
{
"api_name": "os.getcwd",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "TTS.tts.configs.xtts_config.XttsConfig",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "TTS.tts.models.xtts.Xtts.init_from_config",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "TTS.tts.models.xtts.Xtts",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "rename_tool.path",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torchaudio.save",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 49,
"usage_type": "call"
}
] |
1741698512
|
import pytest
import numpy as np
import piquasso as pq
import strawberryfields as sf
pytestmark = pytest.mark.benchmark(
group="pure-fock",
)
@pytest.fixture
def theta():
return np.pi / 5
@pytest.fixture
def d():
return 5
@pytest.mark.parametrize("cutoff", range(3, 14))
def piquasso_benchmark(benchmark, d, cutoff, theta):
@benchmark
def func():
state_vector = [cutoff // d] * d
state_vector[0] += cutoff % d - 1
with pq.Program() as program:
pq.Q(all) | pq.StateVector(state_vector)
for i in range(d - 1):
pq.Q(i, i + 1) | pq.Beamsplitter(theta)
simulator_fock = pq.PureFockSimulator(d=d, config=pq.Config(cutoff=cutoff))
simulator_fock.execute(program)
@pytest.mark.parametrize("cutoff", (3, 4, 5))
def strawberryfields_benchmark(benchmark, d, cutoff, theta):
@benchmark
def func():
eng = sf.Engine(backend="fock", backend_options={"cutoff_dim": cutoff})
circuit = sf.Program(d)
state_vector = [cutoff // d] * d
state_vector[0] += cutoff % d - 1
with circuit.context as q:
for i, n in enumerate(state_vector):
sf.ops.Fock(n) | q[i]
for w in range(d - 1):
sf.ops.BSgate(theta) | (q[w], q[w + 1])
eng.run(circuit).state
|
Budapest-Quantum-Computing-Group/piquasso
|
benchmarks/purefock_beamsplitter_increasing_cutoff_benchmark.py
|
purefock_beamsplitter_increasing_cutoff_benchmark.py
|
py
| 1,353 |
python
|
en
|
code
| 19 |
github-code
|
6
|
[
{
"api_name": "pytest.mark.benchmark",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pytest.fixture",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pytest.fixture",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "piquasso.Program",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "piquasso.Q",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "piquasso.StateVector",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "piquasso.Q",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "piquasso.Beamsplitter",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "piquasso.PureFockSimulator",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "piquasso.Config",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "strawberryfields.Engine",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "strawberryfields.Program",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "strawberryfields.ops.Fock",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "strawberryfields.ops",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "strawberryfields.ops.BSgate",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "strawberryfields.ops",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 41,
"usage_type": "attribute"
}
] |
72577662587
|
from dataclasses import dataclass, field
from random import randint
maps = [
"de_anubis",
"de_inferno",
"de_ancient",
"de_mirage",
"de_nuke",
"de_overpass",
"de_vertigo",
]
@dataclass(frozen=True)
class Defaultsettings:
"""Sets basic match information. You can override the number of maps, first veto and knife round."""
matchid: int = field(
default=randint(10000000, 999999999), init=False
) # generates 8 digit match ID
num_maps: int = field(default=3) # number of maps to play
players_per_team: int = field(default=5, init=False) # number of players per team
coaches_per_team: int = field(default=1, init=False) # number of coaches per team
min_players_to_ready: int = field(
default=8, init=False
) # minimum number of players to enabley !forceready
min_spectators_to_ready: int = field(
default=0, init=False
) # minimum number of spectators to ready
skip_veto: bool = field(default=False) # skip map veto if True
veto_first: str = field(default="team1") # which team vetoes first (1=CT, 2=T)
side_type: str = field(
default="standard"
) # standard is valve BO3, always/never knife for knife rounds
spectators: dict = field(default_factory=dict)
@dataclass(frozen=True)
class Matchinfo:
"""arrays of teams, spectators, maps"""
maplist: list[str] = field(
default_factory=list
) # List of maps to be passed in the main script. Defaults to current Active Duty
team1: dict = field(default_factory=dict) # Initialize empty team 1 dict
team2: dict = field(default_factory=dict) # Initialize empty team 2 dict
cvars: dict = field(default_factory=dict) # Adds cvars - server name
@dataclass(frozen=True)
class Teaminfo:
name: str
tag: str
flag: str = field(default="SI")
logo: str = field(default="")
players: list[str] = field(default_factory=list)
if __name__ == "__main__":
print("You're running the wrong file. Aborting")
quit()
|
Rogris/get5matchgen
|
tools.py
|
tools.py
|
py
| 2,036 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "dataclasses.field",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 51,
"usage_type": "call"
}
] |
30367946761
|
import numpy as np
from chaco.api import ArrayPlotData, Plot
from enable.api import ComponentEditor
from traits.api import Array, HasStrictTraits, Instance, Range, on_trait_change
from traitsui.api import Item, VGroup, View
class PowerFunctionExample(HasStrictTraits):
""" Display a plot of a power function. """
#: The plot holding the visualization
plot = Instance(Plot)
#: The power of the monomial to use.
power = Range(0, 5, value=2)
#: The x-values to plot.
x = Array(shape=(None,), dtype="float")
# Trait defaults --------------------------------------------------------
def _plot_default(self):
y = self.x ** self.power
plot_data = ArrayPlotData(x=self.x, y=y)
plot = Plot(plot_data)
plot.plot(("x", "y"), "line", name="power function", color="auto")
# configure the plot
plot.padding_top = 25
plot.border_visible = False
plot.index_grid.visible = False
plot.value_grid.visible = False
plot.title = "Power Function n={}".format(self.power)
plot.title_position = "right"
plot.title_angle = -90
plot.legend_alignment = "ul"
plot.legend.border_visible = False
plot.legend.bgcolor = (0.9, 0.9, 0.9, 0.5)
plot.legend.visible = True
plot.index_axis.title = "y"
plot.value_axis.title = "x"
return plot
def _x_default(self):
return np.linspace(-2.0, 2.0, 101)
# Trait change handlers -------------------------------------------------
@on_trait_change("power")
def _update_y(self):
y = self.x ** self.power
self.plot.data.set_data("y", y)
@on_trait_change("x")
def _update_data(self):
y = self.x ** self.power
self.plot.data.update_data(x=self.x, y=y)
@on_trait_change("power")
def _update_title(self):
self.plot.title = "Power Function n={}".format(self.power)
# TraitsUI view ---------------------------------------------------------
view = View(
VGroup(
Item("plot", editor=ComponentEditor()),
VGroup(
Item("power"),
),
show_labels=False,
),
resizable=True,
title="Power Function Example",
)
if __name__ == "__main__":
view = PowerFunctionExample()
view.configure_traits()
|
enthought/chaco
|
examples/user_guide/power_function_example.py
|
power_function_example.py
|
py
| 2,379 |
python
|
en
|
code
| 286 |
github-code
|
6
|
[
{
"api_name": "traits.api.HasStrictTraits",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "traits.api.Instance",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "chaco.api.Plot",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "traits.api.Range",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "traits.api.Array",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "chaco.api.ArrayPlotData",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "chaco.api.Plot",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "traits.api.on_trait_change",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "traits.api.on_trait_change",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "traits.api.on_trait_change",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "traitsui.api.View",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "traitsui.api.VGroup",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "traitsui.api.Item",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "enable.api.ComponentEditor",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "traitsui.api.VGroup",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "traitsui.api.Item",
"line_number": 72,
"usage_type": "call"
}
] |
39403565414
|
from functools import partial
import mmcv
import numpy as np
import torch
from six.moves import map, zip
def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):
"""Convert tensor to images
Args:
tensor (torch.Tensor): Tensor that contains multiple images
mean (tuple[float], optional): Mean of images. Defaults to (0, 0, 0).
std (tuple[float], optional): Standard deviation of images.
Defaults to (1, 1, 1).
to_rgb (bool, optional): Whether convert the images to RGB format.
Defaults to True.
Returns:
list[np.ndarray]: A list that contains multiple images.
"""
num_imgs = tensor.size(0)
mean = np.array(mean, dtype=np.float32)
std = np.array(std, dtype=np.float32)
imgs = []
for img_id in range(num_imgs):
img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0)
img = mmcv.imdenormalize(
img, mean, std, to_bgr=to_rgb).astype(np.uint8)
imgs.append(np.ascontiguousarray(img))
return imgs
def multi_apply(func, *args, **kwargs):
"""Apply function to a list of arguments
Note:
This function applies the ``func`` to multiple inputs and
map the multiple outputs of the ``func`` into different
list. Each list contains the same type of outputs corresponding
to different inputs.
Args:
func (Function): A function that will be applied to a list of
arguments
Returns:
tuple(list): A tuple containing multiple list, each list contains
a kind of returned results by the function
"""
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
def unmap(data, count, inds, fill=0):
""" Unmap a subset of item (data) back to the original set of items (of
size count) """
if data.dim() == 1:
ret = data.new_full((count, ), fill)
ret[inds.type(torch.bool)] = data
else:
new_size = (count, ) + data.size()[1:]
ret = data.new_full(new_size, fill)
ret[inds.type(torch.bool), :] = data
return ret
def vectorize_labels(flat_labels, num_classes, label_weights = None):
prediction_number = flat_labels.shape[0]
labels = torch.zeros( [prediction_number, num_classes], dtype=flat_labels.dtype, device=flat_labels.device)
pos_labels = flat_labels < num_classes
labels[pos_labels, flat_labels[pos_labels]] = 1
if label_weights is not None:
ignore_labels = (label_weights == 0)
labels[ignore_labels, :] = -1
return labels.reshape(-1)
def giou(pred, target, eps=1e-7):
"""
Generalized Intersection over Union: A Metric and A Loss for
Bounding Box Regression
https://arxiv.org/abs/1902.09630
code refer to:
https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py#L36
Args:
pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
# overlap
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
# union
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
# IoU
ious = overlap / union
# enclose area
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
enclose_area = enclose_wh[:, 0] * enclose_wh[:, 1] + eps
# GIoU
gious = ious - (enclose_area - union) / enclose_area
return gious
def iou(pred, target, eps=1e-7):
"""
Generalized Intersection over Union: A Metric and A Loss for
Bounding Box Regression
https://arxiv.org/abs/1902.09630
code refer to:
https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py#L36
Args:
pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
# overlap
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
# union
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
# IoU
ious = overlap / union
return ious
|
fundamentalvision/Parameterized-AP-Loss
|
mmdet/core/utils/misc.py
|
misc.py
|
py
| 5,069 |
python
|
en
|
code
| 48 |
github-code
|
6
|
[
{
"api_name": "numpy.array",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "mmcv.imdenormalize",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "numpy.ascontiguousarray",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "six.moves.map",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "six.moves.map",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "six.moves.zip",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "torch.bool",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "torch.bool",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "torch.zeros",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "torch.min",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "torch.min",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "torch.min",
"line_number": 139,
"usage_type": "call"
}
] |
6969788756
|
import os
import re
from PIL import Image
import numpy as np
import torch
import random
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from torchvision.datasets.folder import default_loader
class Celeb(Dataset):
def __init__(self, data_file, dst_path='cropped_CelebA', training=True, transform=None, train_num=16000):
src_path = data_file + 'CelebA_info'
if train_num == 10240:
category = 'celeb_sample_10240.txt'
else:
category = 'list_attr_celeba.txt'
fn = open(src_path + '/Anno/' + category, 'r')
fh2 = open(src_path + '/Eval/list_eval_partition.txt', 'r')
imgs = []
lbls = []
ln = 0
train_bound = 162770 + 2
test_bound = 182638 + 2
regex = re.compile('\s+')
for line in fn:
ln += 1
if ln <= 2:
continue
if ln < test_bound and not training:
continue
if (ln - 2 <= train_num and training and ln <=train_bound) or\
(ln - test_bound < train_num and not training):
line = line.rstrip('\n')
line_value = regex.split(line)
imgs.append(line_value[0])
lbls.append(list(int(i) if int(i) > 0 else 0 for i in line_value[1:]))
self.imgs = imgs
self.lbls = lbls
self.is_train = training
self.dst_path = data_file + dst_path
if transform is None:
if training:
self.transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
else:
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
else:
self.transform = transform
def __getitem__(self, idx):
fn = self.imgs[idx]
lbls = self.lbls[idx]
if self.is_train:
imgs = default_loader(self.dst_path + '/train/' + fn)
else:
imgs = default_loader(self.dst_path + '/test/' + fn)
imgs = self.transform(imgs)
lbls = torch.Tensor(lbls)
return [imgs, lbls]
def __len__(self):
return len(self.imgs)
def sample_celeb(data_file, category='list_attr_celeba.txt', training=True, sample_num=10240, train_num=162770):
src_path = data_file + 'CelebA_info'
fn = open(src_path + '/Anno/' + category, 'r')
sample_path = src_path + '/Anno/celeb_sample_'+str(sample_num)+'.txt'
if os.path.exists(sample_path):
os.system('rm '+ sample_path)
sample_fh = open(sample_path, 'w')
ln = 0
train_bound = 162770 + 2
test_bound = 182638 + 2
regex = re.compile('\s+')
content = []
trainnum_list = np.arange(0, train_bound-2)
sample_num_list = random.sample(trainnum_list.tolist(), sample_num)
for line in fn:
ln += 1
if ln <= 2:
sample_fh.write(line)
if ln < test_bound and not training:
continue
if (ln - 2 <= train_num and training and ln <=train_bound) or\
(ln - test_bound < train_num and not training):
content.append(line)
for idx in sample_num_list:
sample_fh.write(content[idx])
sample_fh.close()
if __name__ == '__main__':
data_file = '/home/wzh/project/fjq/dataset/CelebA/'
sample_celeb(data_file, sample_num=10240)
|
ada-shen/icCNN
|
celeb.py
|
celeb.py
|
py
| 3,923 |
python
|
en
|
code
| 18 |
github-code
|
6
|
[
{
"api_name": "torch.utils.data.Dataset",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "re.compile",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.RandomHorizontalFlip",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "torchvision.datasets.folder.default_loader",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets.folder.default_loader",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "os.system",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 84,
"usage_type": "call"
}
] |
36646912477
|
import matplotlib
matplotlib.use('Agg') # noqa
from deepdecoder.data import generator_3d_tags_with_depth_map, DistributionHDF5Dataset
import diktya.distributions
from diktya.numpy import tile
import matplotlib.pyplot as plt
import os
import argparse
from keras.utils.generic_utils import Progbar
from scipy.ndimage.interpolation import zoom
from scipy.ndimage.filters import gaussian_filter1d
from scipy.misc import imsave
from deepdecoder.scripts.default_3d_tags_distribution import default_tag_distribution
def generator(tag_dist, batch_size, antialiasing=1):
s = antialiasing
depth_scale = 1/2
for param, mask, depth_map in generator_3d_tags_with_depth_map(
tag_dist, batch_size, antialiasing=s, depth_scale=depth_scale):
depth_map = gaussian_filter1d(depth_map, 2/6/depth_scale, axis=-1, mode='constant')
depth_map = gaussian_filter1d(depth_map, 2/6/depth_scale, axis=-2, mode='constant')
depth_map = zoom(depth_map, (1., 1., depth_scale, depth_scale))
yield param, mask, depth_map
def plot_anitaliasing(tag_dist, fname, a, nb_samples=64):
_, masks, depth_map = next(generator(tag_dist, nb_samples, antialiasing=a))
tiled = tile(masks)[0]
imsave(fname.format(a), tiled)
def run(tag_dist, output_fname, force, nb_samples):
os.makedirs(os.path.dirname(output_fname), exist_ok=True)
if os.path.exists(output_fname) and force:
print("Deleted {}".format(output_fname))
os.remove(output_fname)
else:
assert not os.path.exists(output_fname), \
"File {} already exists. Use --force to override it"
basename, _ = os.path.splitext(output_fname)
anit_name = basename + "_anti_{}.png"
hist_name = basename + "_hist_{}.png"
plot_anitaliasing(tag_dist, anit_name, 1)
plot_anitaliasing(tag_dist, anit_name, 2)
plot_anitaliasing(tag_dist, anit_name, 4)
plot_anitaliasing(tag_dist, anit_name, 8)
labels, masks, _ = next(generator(tag_dist, 10000, antialiasing=2))
for key in labels.dtype.names:
m = labels[key].mean()
s = labels[key].std()
print("{}: {:.3f}, {:.3f}".format(key, m, s))
assert abs(m) <= 0.03
for label_name in sorted(set(labels.dtype.names) - set(['bits'])):
x = labels[label_name]
plt.hist(x.flatten(), bins=40, normed=True)
plt.savefig(hist_name.format(label_name))
plt.clf()
dset = DistributionHDF5Dataset(output_fname, distribution=tag_dist,
nb_samples=nb_samples, mode='w')
progbar = Progbar(nb_samples)
batch_size = min(25000, nb_samples)
for labels, tags, depth_map in generator(tag_dist, batch_size, antialiasing=4):
pos = dset.append(labels=labels, tag3d=tags, depth_map=depth_map)
progbar.update(pos)
if pos == nb_samples:
break
print("Saved tag 3d dataset to: {}".format(output_fname))
dist_fname = basename + "_distribution.json"
with open(dist_fname, "w+") as dist_f:
dist_f.write(tag_dist.to_json())
print("Saved distribution to: {}".format(dist_fname))
def main():
parser = argparse.ArgumentParser(
description='Generate images and depth maps from the 3d object model of the tag')
parser.add_argument('output', type=str, help='output file name')
parser.add_argument('-f', '--force', action='store_true',
help='override existing output files')
parser.add_argument('-d', '--dist', type=str, default=default_tag_distribution(),
help='Json params of the distribution')
parser.add_argument('-n', '--nb-samples', type=float, required=True,
help='Number of samples to generate')
args = parser.parse_args()
if type(args.dist) == str:
with open(args.dist) as f:
dist = diktya.distributions.load_from_json(f.read())
else:
dist = args.dist
run(dist, args.output, args.force, int(args.nb_samples))
if __name__ == "__main__":
main()
|
berleon/deepdecoder
|
deepdecoder/scripts/generate_3d_tags.py
|
generate_3d_tags.py
|
py
| 4,038 |
python
|
en
|
code
| 50 |
github-code
|
6
|
[
{
"api_name": "matplotlib.use",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "deepdecoder.data.generator_3d_tags_with_depth_map",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.filters.gaussian_filter1d",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.filters.gaussian_filter1d",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.interpolation.zoom",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "diktya.numpy.tile",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "scipy.misc.imsave",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.hist",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "deepdecoder.data.DistributionHDF5Dataset",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "keras.utils.generic_utils.Progbar",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "deepdecoder.scripts.default_3d_tags_distribution.default_tag_distribution",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "diktya.distributions.distributions.load_from_json",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "diktya.distributions.distributions",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "diktya.distributions",
"line_number": 94,
"usage_type": "name"
}
] |
36213639675
|
#!/usr/bin/env python
# coding: utf-8
import os
import math
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from PIL import Image
import time
import os,glob
import matplotlib.pyplot as plt
from random import choice
VGG_MEAN=[103.939,116.779,123.68]
class VGGNet():
def __init__(self,data_dict):
self.data_dict=data_dict
def get_conv_filter(self,name):
return tf.constant(self.data_dict[name][0],name='conv')
def get_fc_weight(self,name):
return tf.constant(self.data_dict[name][0],name='fc')
def get_bias(self,name):
return tf.constant(self.data_dict[name][1],name='bias')
def conv_layer(self,x,name):
with tf.name_scope(name):
conv_w=self.get_conv_filter(name)
conv_b=self.get_bias(name)
h=tf.nn.conv2d(x,conv_w,strides=[1,1,1,1],padding="SAME")
h=tf.nn.bias_add(h,conv_b)
h=tf.nn.relu(h)
return h
def pooling_layer(self,x,name):
return tf.nn.max_pool(x,ksize=[1,2,2,1],
strides=[1,2,2,1],
padding="SAME",name=name)
def fc_layer(self,x,name,activation=tf.nn.relu):
with tf.name_scope(name,activation):
fc_w=self.get_fc_weight(name)
fc_b=self.get_bias(name)
h=tf.matmul(x,fc_w)
h=tf.nn.bias_add(h,fc_b)
if activation is None:
return h
else:
return activation(h)
def flatten_layer(self,x,name):
with tf.name_scope(name):
x_shape=x.get_shape().as_list()
dim=1
for d in x_shape[1:]:
dim*=d
x=tf.reshape(x,[-1,dim])
return x
def build(self,x_rgb):
start_time=time.time()
print("Modeling Start...")
r,g,b=tf.split(x_rgb,[1,1,1],axis=3)
x_bgr=tf.concat([b-VGG_MEAN[0],g-VGG_MEAN[1],r-VGG_MEAN[2]],axis=3)
# 开始构建卷积层
# vgg16 的网络结构
# 第一层:2个卷积层 1个pooling层
# 第二层:2个卷积层 1个pooling层
# 第三层:3个卷积层 1个pooling层
# 第四层:3个卷积层 1个pooling层
# 第五层:3个卷积层 1个pooling层
# 第六层: 全连接
# 第七层: 全连接
# 第八层: 全连接
self.conv1_1=self.conv_layer(x_bgr,'conv1_1')
self.conv1_2=self.conv_layer(self.conv1_1,'conv1_2')
self.pool1=self.pooling_layer(self.conv1_2,'pool1')
self.conv2_1 = self.conv_layer(self.pool1, 'conv2_1')
self.conv2_2 = self.conv_layer(self.conv2_1, 'conv2_2')
self.pool2 = self.pooling_layer(self.conv2_2, 'pool2')
self.conv3_1 = self.conv_layer(self.pool2, 'conv3_1')
self.conv3_2 = self.conv_layer(self.conv3_1, 'conv3_2')
self.conv3_3 = self.conv_layer(self.conv3_2, 'conv3_3')
self.pool3 = self.pooling_layer(self.conv3_3, 'pool3')
self.conv4_1 = self.conv_layer(self.pool3, 'conv4_1')
self.conv4_2 = self.conv_layer(self.conv4_1, 'conv4_2')
self.conv4_3 = self.conv_layer(self.conv4_2, 'conv4_3')
self.pool4 = self.pooling_layer(self.conv4_3, 'pool4')
self.conv5_1 = self.conv_layer(self.pool4, 'conv5_1')
self.conv5_2 = self.conv_layer(self.conv5_1, 'conv5_2')
self.conv5_3 = self.conv_layer(self.conv5_2, 'conv5_3')
self.pool5 = self.pooling_layer(self.conv5_3, 'pool5')
''' 因为风格转换只需要 卷积层 的数据
self.flatten5 = self.flatten_layer(self.pool5, 'flatten')
self.fc6 = self.fc_layer(self.flatten5, 'fc6')
self.fc7 = self.fc_layer(self.fc6, 'fc7')
self.fc8 = self.fc_layer(self.fc7, 'fc8', activation = None)
self.prob = tf.nn.softmax(self.fc8, name = 'prob')
'''
print('Modeling Finished...:%f ms' % ((time.time() - start_time)*1000))
def initial_result(shape,mean,stddev):
initial=tf.truncated_normal(shape,mean=mean,stddev=stddev)
return tf.Variable(initial)
def read_img(img_name):
img=Image.open(img_name)
img=img.convert('RGB')
img = img.resize((224, 224))
np_img=np.array(img)
np_img=np.asarray([np_img],dtype=np.int32)
return np_img
def gram_matrix(x):
b,w,h,ch=x.get_shape().as_list()
features=tf.reshape(x,[b,h*w,ch])
gram=tf.matmul(features,features,adjoint_a=True)/tf.constant(ch*w*h,tf.float32)
return gram
vgg16_npy_path="./vgg_model/vgg16.npy"
image_pattern = "./images/content/video_dlzm*jpg"
output_dir="./images/results"
style_img_path="./images/style/Vincent_Willem_van_Gogh_085.jpg"
image_paths = glob.glob(image_pattern)
image_paths.sort()
num_step=100
learning_rate=10
lambda_c=0.1
lambda_s=50
for n,p in enumerate(image_paths):
print(n)
content_img_path = p
result=initial_result((1,224,224,3),127.5,20)
content_val=read_img(content_img_path)
style_val=read_img(style_img_path)
content=tf.placeholder(tf.float32,shape=[1,224,224,3])
style=tf.placeholder(tf.float32,shape=[1,224,224,3])
data_dict=np.load(vgg16_npy_path,encoding="latin1",allow_pickle=True).item()
vgg_for_content=VGGNet(data_dict)
vgg_for_style=VGGNet(data_dict)
vgg_for_result=VGGNet(data_dict)
vgg_for_content.build(content)
vgg_for_style.build(style)
vgg_for_result.build(result)
# 提取哪些层特征
# 需要注意的是:内容特征抽取的层数和结果特征抽取的层数必须相同
# 风格特征抽取的层数和结果特征抽取的层数必须相同
content_features=[vgg_for_content.conv3_2,]
result_content_features=[vgg_for_result.conv3_2,]
style_features=[vgg_for_style.conv4_1,
vgg_for_style.conv5_1,]
style_gram=[gram_matrix(feature) for feature in style_features]
result_style_features=[vgg_for_result.conv4_1,
vgg_for_result.conv5_1,]
result_style_gram=[gram_matrix(feature) for feature in result_style_features]
content_loss=tf.zeros(1,tf.float32)
for c,c_ in zip(content_features,result_content_features):
content_loss+=tf.reduce_mean((c-c_)**2,axis=[1,2,3])
style_loss=tf.zeros(1,tf.float32)
for s,s_ in zip(style_gram,result_style_gram):
style_loss+=0.2*tf.reduce_mean((s-s_)**2,[1,2])
loss=content_loss*lambda_c+style_loss*lambda_s
train_op=tf.train.AdamOptimizer(learning_rate).minimize(loss)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
for step in range(num_step):
loss_value,content_loss_value,style_loss_value,_= sess.run([loss,content_loss,style_loss,train_op],
feed_dict={
content:content_val,
style:style_val
})
# print('step: %d, loss_value: %8.4f, content_loss: %8.4f, style_loss: %8.4f' % (step+1,
# loss_value[0],
# content_loss_value[0],
# style_loss_value[0]))
if step+1 == num_step:
result_img_path=os.path.join(output_dir,'result_%03d_%05d.jpg'%(n,step+1))
result_val=result.eval(sess)[0]
result_val=np.clip(result_val,0,255)
img_arr=np.asarray(result_val,np.uint8)
img=Image.fromarray(img_arr)
img.save(result_img_path)
|
castleKing1997/Style_Transfer
|
StyleTransfer.py
|
StyleTransfer.py
|
py
| 7,795 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "tensorflow.compat.v1.disable_v2_behavior",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.constant",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.constant",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.constant",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.name_scope",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.nn.conv2d",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1.nn",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.nn.bias_add",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1.nn",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.nn.relu",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1.nn",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.nn.max_pool",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1.nn",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.nn",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.name_scope",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.matmul",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.nn.bias_add",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1.nn",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.name_scope",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.reshape",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1.split",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.concat",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1.truncated_normal",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.Variable",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v1.reshape",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.matmul",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.constant",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1.float32",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1.placeholder",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.float32",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v1.placeholder",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.float32",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "numpy.load",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1.zeros",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 185,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.float32",
"line_number": 185,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v1.reduce_mean",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.zeros",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.float32",
"line_number": 189,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v1.reduce_mean",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.train.AdamOptimizer",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1.train",
"line_number": 195,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 195,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.global_variables_initializer",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "tensorflow.compat.v1.Session",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "numpy.clip",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 217,
"usage_type": "name"
}
] |
20426895808
|
import matplotlib.pyplot as plt
import seaborn as sns
color_list = sns.color_palette('deep') + sns.color_palette('bright')
def DrawDoubleYLines(x, y1, y2, xlabel='', ylabel=['', ''], legend=['', ''], store_path=''):
'''
Draw the doulbe y-axis lines.
:param x: The vector of the x axis.
:param y1: The vector of the y1 axis.
:param y2: The vector of the y2 axis.
:param xlabel: The label of the x. Default is ''
:param ylabel: The list of the y label. Default is ['', '']
:param legend: The list of the legend. Default is ['', '']
:param store_path: The store path of the figure. support 'jpg' and 'eps' format.
:return:
'''
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(x, y1, color=color_list[0])
ax1.set_ylabel(ylabel[0])
ax1.set_xlabel(xlabel)
ax2 = ax1.twinx() # this is the important function
ax2.plot(x, y2, color=color_list[1])
ax2.set_ylabel(ylabel[1])
ax2.set_xlabel(xlabel)
ax1.legend([legend[0]], loc=(.02, .9))
ax2.legend([legend[1]], loc=(.02, .82))
if store_path:
plt.tight_layout()
if store_path[-3:] == 'jpg':
fig.savefig(store_path, dpi=300, format='jpeg')
elif store_path[-3:] == 'eps':
fig.savefig(store_path, dpi=1200, format='eps')
plt.show()
|
salan668/FAE
|
BC/Visualization/DrawDoubleLines.py
|
DrawDoubleLines.py
|
py
| 1,322 |
python
|
en
|
code
| 121 |
github-code
|
6
|
[
{
"api_name": "seaborn.color_palette",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.tight_layout",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 38,
"usage_type": "name"
}
] |
5361852132
|
from kivy.app import App
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.lang import Builder
import wikipedia
from urllib import request
Builder.load_file(filename="search.kv")
class FirstScreen(Screen):
def get_img_link(self):
# get user search
query = self.manager.current_screen.ids.user_search.text
# search wikipedia for user query
page = wikipedia.page(query)
img_link = page.images[0]
img_path = f"images\wiki {query}.png"
return img_link, img_path
def download_wiki_img(self):
img_link, img_path = self.get_img_link()
img = request.urlretrieve(img_link, img_path)
return img[0]
def preview_img(self):
# change images dynamically
self.manager.current_screen.ids.img.source = self.download_wiki_img() #*
# self.ids.img.source = "images\git_init.png" #= same above
class RootWidget(ScreenManager):
pass
class SearchApp(App):
def build(self):
return RootWidget()
SearchApp().run()
'''
### Steps for creating app using kivy ###
## Python Script ##
1. First there's a MainApp class -or call it <anything>App; this class inherits from (App) class that is imported from kivy.app. So it's like the template on which we build our app.
- Inside this class we'll overwrite build(self) method to return the ScreenManager object that we have defined (see point 2).
2. Define a RootWidget class; this class inherits from (ScreenManager) imported from kivy.uix.screenmanager.
It's like a manager for any other Screen object we'll create later (a screen object for each new screen in the app).
3. Define a Screen object that inherts from (Screen); This is the screen object we're talking about, on which we'll put layouts and widgets. And also define the methods that these widgets will execute.
4. Run the app: MainApp().run()
5. To connect script to .kv file:
- By default, Kivy expects the .kv file to have the same base name as your Python file. For example, if your Python file is named myapp.py, the corresponding .kv file should be named myapp.kv.
- Alternatively, you can specify it manually this way:
from kivy.lang import Builder
Builder.load_file("filename.kv")
6. To connect a method defined in your Screen object to a widget on that screen; say you have a button on that screen. Simply set on_press: root.method() -like the example below-
- root here refers to the root widget of your widgets tree (which happens to be the screen object), that's why you should define that method in your Screen class declaration.
7. Get text from TextInput --> var = self.ids.<id>.text
NOTE It's a good practice to separate code in Screen class into several methods to ease its understanding and refactoring.
## .kv file for GUI ##
In this file we will implement the GUI; screens, layouts, widgets and their attributes.
file start>>>
<Screen_name>:
<Layout_type>:
widget_1:
attr_1: value
attr_2: value
Button_1:
on_press: root.method()
......
<RootWidget>:
Screen_name:
id: id
name: "name"
<<< file end
______________________________________________________
#*
Let's break down the code:
- `self.manager`: `self` refers to the current instance of the class, and `manager` is a property or attribute of that instance. In this case, it is assumed that the current class has a `manager` attribute that represents a `ScreenManager` instance.
- `self.manager.current_screen`: `current_screen` is an attribute of the `ScreenManager` class that represents the currently displayed screen. By accessing `current_screen`, you are retrieving the instance of the currently active screen.
- `ids`: `ids` is a dictionary-like property of a widget that contains all the child widgets defined in the corresponding `.kv` file with an `id` attribute. The `id` attribute is used to uniquely identify a widget.
- `img`: `img` is the `id` assigned to an `Image` widget in the corresponding `.kv` file.
- `source`: `source` is a property of the `Image` widget that represents the path or URL of the image file to be displayed.
So, putting it all together, `self.manager.current_screen.ids.img.source = "images\image.png"` sets the `source` property of the `Image` widget (identified by the `id` "img") within the current screen of the `ScreenManager` to "images\image.png". It updates the image source, allowing you to change the displayed image dynamically.
'''
|
mido-99/Advanded-OOP
|
App-4-Webcam-Photo-Sharer/main.py
|
main.py
|
py
| 4,504 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "kivy.lang.Builder.load_file",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "kivy.lang.Builder",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "kivy.uix.screenmanager.Screen",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "wikipedia.page",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "urllib.request.urlretrieve",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "kivy.uix.screenmanager.ScreenManager",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "kivy.app.App",
"line_number": 32,
"usage_type": "name"
}
] |
42755033612
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
from datetime import datetime
import re
import importlib
import inspect
import logging
import os
import sys
import sphinx
import megengine
# -- Project information -----------------------------------------------------
project = 'MegEngine'
copyright = f'2020-{datetime.now().year}, The MegEngine Open Source Team'
author = 'The MegEngine Open Source Team'
version = megengine.__version__
release = version
# -- General configuration ---------------------------------------------------
extensions = [
'nbsphinx',
'recommonmark',
'sphinx.ext.napoleon',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.graphviz',
'sphinxcontrib.mermaid',
'sphinx_autodoc_typehints',
'sphinx_copybutton'
]
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'markdown',
'.md': 'markdown',
}
source_encoding = "utf-8"
master_doc = 'index'
templates_path = ['_templates']
exclude_patterns = [
'_build',
'build',
'examples',
'**/includes/**',
'**.ipynb_checkpoints'
]
# -- Options for internationalization ----------------------------------------
language = 'zh_CN'
# By default, the document `functional/loss.rst` ends up in the `functional` text domain.
# With this option set to False, it is `functional/loss`.
gettext_compact = False
# -- Options for Extensions -------------------------------------------------
# Setting for sphinx.ext.autosummary to auto-generate single html pages
# Please makesure all api pages are stored in `/refenrece/api/` directory
autosummary_generate = True
# Setting for sphinx.ext.auotdoc
autodoc_default_options = {
'member-order': 'bysource', # Need developer organize the source code
'show-inheritance': True, # But it can not refer the short module path
}
autoclass_content = 'class'
autodoc_typehints = 'description'
autodoc_docstring_signature = True
add_function_parentheses = False
add_module_names = False
# Setting for sphinx.ext.mathjax
# The path to the JavaScript file to include in the HTML files in order to load MathJax.
mathjax_path = 'https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js'
mathjax_config = {
'extensions': ['tex2jax.js'],
'jax': ['input/TeX', 'output/HTML-CSS'],
}
# Setting for sphinxcontrib-mermaid
mermaid_version = 'latest' # from CDN unpkg.com
# Setting for sphinx.ext.intersphinx
# Useful for refenrece other projects, eg. :py:class:`zipfile.ZipFile`
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://numpy.org/doc/stable/', None),
}
# Setting for sphinx.ext.extlinks
# Can use the alias name as a new role, e.g. :issue:`123`
extlinks = {
'src': ('https://github.com/MegEngine/MegEngine/blob/master/%s', ''),
'docs': ('https://github.com/MegEngine/Documentation/blob/master/%s', ''),
'issue': ('https://github.com/MegEngine/MegEngine/issues/%s', 'Issue #'),
'pull': ('https://github.com/MegEngine/MegEngine/pull/%s', 'Pull Requset #'),
'duref': ('http://docutils.sourceforge.net/docs/ref/rst/'
'restructuredtext.html#%s', ''),
}
# Setting for sphinx.ext.nbsphinx
# nbsphinx do not use requirejs (breaks bootstrap)
nbsphinx_requirejs_path = ""
logger = logging.getLogger(__name__)
try:
import nbconvert
except ImportError:
logger.warning("nbconvert not installed. Skipping notebooks.")
exclude_patterns.append("**/*.ipynb")
else:
try:
nbconvert.utils.pandoc.get_pandoc_version()
except nbconvert.utils.pandoc.PandocMissing:
logger.warning("Pandoc not installed. Skipping notebooks.")
exclude_patterns.append("**/*.ipynb")
# -- Options for HTML output -------------------------------------------------
html_theme = 'pydata_sphinx_theme'
html_theme_path = ['_themes']
html_theme_options = {
'search_bar_text': '输入搜索文本...',
'search_bar_position': 'navbar',
'github_url': 'https://github.com/MegEngine/MegEngine',
'external_links': [
{ 'name': '论坛', 'url': 'https://discuss.megengine.org.cn/'},
{ 'name': '官网', 'url': 'https://megengine.org.cn/'}
],
'use_edit_page_button': False,
'navigation_with_keys': False,
'show_prev_next': False,
'use_version_switch': True,
'version_switch_json_url': '/doc/version.json',
'version_switch_enable_locale': True,
'version_switch_locates': ['zh', 'en']
}
html_sidebars = {
'**': ['sidebar-search-bs.html', 'sidebar-nav-bs.html'],
'index': ['sidebar-search-bs.html', 'homepage-sidebar.html']
}
html_static_path = ['_static']
html_logo = "logo.png"
html_favicon = "favicon.ico"
html_css_files = [
'css/custom.css'
]
html_js_files = [
'js/custom.js'
]
html_search_language = 'zh'
|
tpoisonooo/Documentation
|
source/conf.py
|
conf.py
|
py
| 5,214 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "megengine.__version__",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "nbconvert.utils.pandoc.get_pandoc_version",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "nbconvert.utils",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "nbconvert.utils",
"line_number": 132,
"usage_type": "attribute"
}
] |
37228942399
|
#!/usr/bin/env python3
import argparse
import bids
from bids import BIDSLayout
import os
from pathlib import Path
def _filter_pybids_none_any(dct):
import bids
return {
k: bids.layout.Query.NONE
if v is None
else (bids.layout.Query.ANY if v == "*" else v)
for k, v in dct.items()
}
def _bids_filter(value):
from json import loads
from bids.layout import Query
if value and Path(value).exists():
try:
filters = loads(Path(value).read_text(), object_hook=_filter_pybids_none_any)
except Exception as e:
raise Exception("Unable to parse BIDS filter file. Check that it is "
"valid JSON.")
else:
raise Exception("Unable to load BIDS filter file " + value)
# unserialize pybids Query enum values
for acq, _filters in filters.items():
filters[acq] = {
k: getattr(Query, v[7:-4])
if not isinstance(v, Query) and "Query" in v
else v
for k, v in _filters.items()
}
return filters
def collect_data(bids_dir, participant_label, queries, filters=None, bids_validate=True):
"""
Uses pybids to retrieve the input data for a given participant
"""
if isinstance(bids_dir, BIDSLayout):
layout = bids_dir
else:
layout = BIDSLayout(str(bids_dir), validate=bids_validate)
bids_filters = filters or {}
for acq, entities in bids_filters.items():
queries[acq].update(entities)
subj_data = {
dtype: sorted(
layout.get(
return_type="file",
subject=participant_label,
extension=["nii", "nii.gz"],
**query
)
)
for dtype, query in queries.items()
}
return subj_data, layout
qsiprep_queries = {
'fmap': {'datatype': 'fmap'},
'sbref': {'datatype': 'func', 'suffix': 'sbref'},
'flair': {'datatype': 'anat', 'suffix': 'FLAIR'},
't2w': {'datatype': 'anat', 'suffix': 'T2w'},
't1w': {'datatype': 'anat', 'suffix': 'T1w'},
'roi': {'datatype': 'anat', 'suffix': 'roi'},
'dwi': {'datatype': 'dwi', 'suffix': 'dwi'}
}
fmriprep_queries = {
'fmap': {'datatype': 'fmap'},
'bold': {'datatype': 'func', 'suffix': 'bold'},
'sbref': {'datatype': 'func', 'suffix': 'sbref'},
'flair': {'datatype': 'anat', 'suffix': 'FLAIR'},
't2w': {'datatype': 'anat', 'suffix': 'T2w'},
't1w': {'datatype': 'anat', 'suffix': 'T1w'},
'roi': {'datatype': 'anat', 'suffix': 'roi'}
}
parser = argparse.ArgumentParser(description='BIDS validation and filter preview. The filters are processed using code extracted from qsiprep '
'v 0.14.2. I believe fmriprep works the same way, but I have not verified this. Also, it is possible that '
'different versions of pybids will behave differently. With those disclaimers in mind, running this can '
'highlight obvious problems with filters or allow you to experiment with advanced matching.')
parser.add_argument('--bids-dir', help='The directory with the input dataset formatted according to the BIDS standard.', required = True)
parser.add_argument('--filter-file', help='File containing BIDS filters', required = True)
parser.add_argument('--participant-label', help='The label of the participant that should be analyzed. The label '
'corresponds to sub-<participant> from the BIDS spec (so it does not include "sub-").', required = True)
parser.add_argument('--prep-modality', help='The kind of modality prep to test the filter on. Options are fmri, qsi.', required = True)
bids.config.set_option('extension_initial_dot', True)
args = parser.parse_args()
layout = BIDSLayout(args.bids_dir, validate = True)
filters = _bids_filter(args.filter_file)
queries = None
if (args.prep_modality == 'qsi'):
queries = qsiprep_queries
elif (args.prep_modality == 'fmri'):
queries = fmriprep_queries
else:
raise ValueError(f'Unsupported modality prep string {args.prep_modality}')
sub_data, layout = collect_data(layout, args.participant_label, queries, filters = filters)
print(f'\n\n Filtered data for participant {args.participant_label}:\n')
for k, v in sub_data.items():
print (k, '\t:\t', v)
|
ftdc-picsl/pmacsPreps
|
bin/bidsFilterTest.py
|
bidsFilterTest.py
|
py
| 4,368 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "bids.layout",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "bids.layout",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "bids.layout.Query",
"line_number": 35,
"usage_type": "argument"
},
{
"api_name": "bids.layout.Query",
"line_number": 34,
"usage_type": "argument"
},
{
"api_name": "bids.BIDSLayout",
"line_number": 45,
"usage_type": "argument"
},
{
"api_name": "bids.BIDSLayout",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "bids.config.set_option",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "bids.config",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "bids.BIDSLayout",
"line_number": 104,
"usage_type": "call"
}
] |
17499920257
|
#TODO practice mode for the ones that required 10+, or 20+ s previously
#TODO prorgam to train two digits additions and subtractions
from random import random
from random import randint
import datetime
from matplotlib import pyplot as plt
import pandas as pd
# import numpy as np
import os
import mplcursors # need to install: pip install mplcursors
problems = []
results = []
elapsed_time = []
failed = []
# failed = [{'a':15, 'b':11}, {'a':96, 'b':95}, {'a':76, 'b':35}, {'a':16, 'b':77}]#TODO
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.spines.right'] = False
plt.rcParams['font.family'] = ['Arial']
cwd = os.getcwd()
excel_path = os.path.join(cwd,'anzan_log.xlsx')
if os.path.isfile(excel_path):
df_s = pd.read_excel(excel_path, index_col=0, sheet_name='successes')
df_f = pd.read_excel(excel_path, index_col=0, sheet_name='failures')
df_r = pd.read_excel(excel_path, index_col=0, sheet_name='rates').astype(float) #float
df_t = pd.read_excel(excel_path, index_col=0, sheet_name='time').astype(float) #float
else:
df_s = pd.DataFrame(0, index=range(1, 100), columns=range(1, 100))
df_f = pd.DataFrame(0, index=range(1, 100), columns=range(1, 100))
df_r = pd.DataFrame(float(0), index=range(1, 100), columns=range(1, 100)).astype(float)
df_t = pd.DataFrame(float(0), index=range(1, 100), columns=range(1, 100)).astype(float)
time_out_s = 20 # inclusive, elapsed time must be <= time_out_s
failed_ind = 0
failed_in_the_past = []
for row_index, row in df_f.iterrows():
for col_index, value in row.items():
if value != 0:
failed_in_the_past.append({'a': row_index, 'b': col_index})
def show_problem(a, b, view):
if view == 1:
print(f"\n{a} x {b} =\n")
elif view == 2:
if course == 6:
print(f"\n {a:>3} \nx {b:>3}\n-----\n")
else:
print(f"\n {a:>2} \nx {b:>2}\n-----\n")
def biased_randint(min_val, max_val, bias=0.5):
"""Generate a biased random integer between min_val and max_val.
With a bias value of 0.5, numbers towards the higher end (like 6,7,8,9 in tens place)
will be more probable. Adjusting the bias will change the skewness. A bias of 1 will
give you a uniform distribution, values less than 1 will skew towards the maximum,
and values greater than 1 will skew towards the minimum.
"""
return int(min_val + (max_val - min_val) * (random() ** bias))
def get_ab_from_failures():
if len(failed) == 0:
return 0, 0
failed_ind = randint(0, len(failed)-1)
a = failed[failed_ind]['a']
b = failed[failed_ind]['b']
return a, b
def get_ab_from_failures_in_the_past():
# randomly choose a and b from the failures in the past
# Iterate over the DataFrame to find non-zero cells
ind = randint(0, len(failed_in_the_past)-1)
if randint(0,1):
a = failed_in_the_past[ind]['a']
b = failed_in_the_past[ind]['b']
else:
a = failed_in_the_past[ind]['b']
b = failed_in_the_past[ind]['a']
return a, b
def get_ab_general():
# a = randint(1,99)
a = biased_randint(1,99,randbias)
# b = randint(1,99)
b = biased_randint(1,99,randbias)
return a, b
def get_ab_Indian():
c_type = randint(1,3)
if c_type == 1:
a_ = randint(1,9)
b_ = randint(1,9)
c_ = 10 - b_
a = a_ * 10 + b_
b = a_ * 10 + c_
elif c_type == 2:
a_ = randint(1,9)
b_ = randint(1,9)
c_ = randint(1,9)
a = a_ * 10 + b_
b = a_ * 10 + c_
elif c_type == 3:
a_ = randint(1,9)
b_ = randint(1,9)
c_ = 10 - b_
a = b_ * 10 + a_
b = c_ * 10 + a_
return a, b
def get_ab_two_by_one():
tf = randint(0,1)
if tf:
a = randint(1,9)
b = randint(1,99)
else:
a = randint(1,99)
b = randint(1,9)
return a, b
def get_ab_three_by_one():
if view == 2:
a = randint(100,999)
b = randint(2,9)
else:
tf = randint(0,1)
if tf:
a = randint(2,9)
b = randint(100,999)
else:
a = randint(100,999)
b = randint(2,9)
return a, b
def run_trial(a, b):
dt1 = datetime.datetime.now()
show_problem(a, b, view)
ans = input("Type your answer (or 'q' to quit):\n>")
dt2 = datetime.datetime.now()
if ans == "q":
keep_going = False
else:
problems.append({'a':a,'b':b})
keep_going = True
try:
ans = int(ans)
except Exception as e:
print('wrong input')
results.append(float("nan"))
return keep_going
td = dt2 - dt1
minutes, seconds = divmod(td.total_seconds(), 60)
print(f"\n{minutes} min {seconds} sec\n")
elapsed_time.append(td.total_seconds())
if td.total_seconds() <= time_out_s :
if ans == a * b:
print(f"Correct! :)\n{a} x {b} = {a *b}\n")
results.append(1)
if reviewing:
failed.pop(failed_ind) # remove successful item from failed during review process
else:
print("\a") # didn't work
print(f"Your answer {ans} is wrong:(\n{a} x {b} = {a *b}\n")
results.append(0)
failed.append({'a':a,'b':b})
else:
print("\a") # didn't work
print('Too late')
if ans == a * b:
print(f"Correct! :)\n{a} x {b} = {a *b}\n")
else:
print(f"Your answer {ans} is wrong:(\n{a} x {b} = {a *b}\n")
results.append(0)
failed.append({'a':a,'b':b})
return keep_going
def plot_time(elapsed_time, problems, results):
plt.ion()
fig, ax = plt.subplots(1,1)
zipped = list(zip(elapsed_time, problems, results))
zipped_sorted = sorted(zipped, key=lambda x: x[0])
elapsed_time_sorted, problems_sorted, results_sorted = zip(*zipped_sorted)
for i in range(0, len(elapsed_time_sorted)):
if results_sorted[i]:
ax.plot(elapsed_time_sorted[i], i + 1, 'ok')
else:
ax.plot(elapsed_time_sorted[i], i + 1, 'xr')
ax.set_yticks([i + 1 for i in list(range(0, len(elapsed_time_sorted)))]) # +1
ax.set_xlabel('Time (s)')
xlim = ax.get_xlim()
ax.set_xlim(0, xlim[1])
problems_str =[f"{p['a']} x {p['b']}" for p in problems_sorted]
print(f"len(elapsed_time_sorted) = {len(elapsed_time_sorted)}")
print(f"len(problems_str) = {len(problems_str)}")
ax.set_yticklabels(problems_str)
plt.title("Session")
plt.show()
def plot_all():
# read the latest data
df_s = pd.read_excel(excel_path, index_col=0, sheet_name='successes')
df_f = pd.read_excel(excel_path, index_col=0, sheet_name='failures')
df_r = pd.read_excel(excel_path, index_col=0, sheet_name='rates').astype(float)
df_t = pd.read_excel(excel_path, index_col=0, sheet_name='time').astype(float)
# create lists
res_all = []
for i in range(1,100):
for j in range(1,100):
if df_s[i][j] + df_f[i][j] > 0: # remove the empty cells #TODO KeyError: 99
res_all.append({'a':i, 'b':j, 'n':df_s[i][j] + df_f[i][j],
's':df_s[i][j], 'f':df_f[i][j],
'r':df_r[i][j], 't':df_t[i][j]})
# sort l_all
res_sorted = sorted(res_all, key=lambda x: x['t'])
# read the saved table data and plot them
plt.ion()
fig, ax = plt.subplots(1,1)
max_val = max(item['r'] for item in res_sorted)
min_val = min(item['r'] for item in res_sorted)
norm = plt.Normalize(min_val, max_val)
# Choose a colormap
colormap = plt.cm.cool_r
x_values = [item['t'] for item in res_sorted]
y_values = list(range(1, len(res_sorted) + 1))
colors = colormap(norm([r['r'] for r in res_sorted]))
# Create a single scatter plot with all points
sc = ax.scatter(x_values, y_values, color=colors, s=100)
tooltips = [f"{r['a']} \u00D7 {r['b']}\n" +
f"{r['r']*100} % ({r['s']} of {r['s'] + r['f']})\n" +
f"{r['t']:.1f} sec" for r in res_sorted]
def update_annot(ind):
return tooltips[ind]
def on_hover(sel):
sel.annotation.set_text(update_annot(sel.index))
mplcursors.cursor(sc, hover=True).connect("add", on_hover)
ax.set_xlabel('Time (s)')
xlim = ax.get_xlim()
ax.set_xlim(0, xlim[1])
plt.title("History")
plt.show()
def save_result_table():
## response time
problems_ = problems
# Ensure 'a' is always <= 'b'
for p in problems_:
if p['a'] > p['b']:
p['a'], p['b'] = p['b'], p['a']
combined = sorted(zip(problems_, elapsed_time), key=lambda x: (x[0]['a'], x[0]['b']))
problems_sorted, elapsed_time_sorted = zip(*combined)
for idx, p in enumerate(problems_sorted):
row_idx, col_idx = p['a'], p['b']
# Calculate new average
n = df_s.at[row_idx, col_idx] + df_f.at[row_idx, col_idx]
current_total_time = df_t.at[row_idx, col_idx] * n
new_total_time = current_total_time + elapsed_time_sorted[idx]
# Update df_t and df_n
df_t.at[row_idx, col_idx] = new_total_time / float(n + 1)
##successes and failures
# separate successes and failures
successful_problems = [problem for problem, result in zip(problems, results) if result == 1]
failed_problems = [problem for problem, result in zip(problems, results) if result == 0]
# make a <= b
for p in successful_problems:
if p['a'] > p['b']:
p['a'], p['b'] = p['b'], p['a']
for p in failed_problems:
if p['a'] > p['b']:
p['a'], p['b'] = p['b'], p['a']
# sort (a, b) pairs
successful_problems = sorted(successful_problems, key=lambda x: (x['a'], x['b']))
failed_problems = sorted(failed_problems, key=lambda x: (x['a'], x['b']))
# update values of cells
for p in successful_problems:
if pd.isna(df_s.at[p['a'], p['b']]): # if for the first time
df_s.at[p['a'], p['b']] = 1
else:
df_s.at[p['a'], p['b']] += 1
for p in failed_problems:
if pd.isna(df_f.at[p['a'], p['b']]): # if for the first time
df_f.at[p['a'], p['b']] = 1
else:
df_f.at[p['a'], p['b']] += 1
# recompute rates
df_r = df_s.fillna(0) / (df_s.fillna(0) + df_f.fillna(0))
## save tables
with pd.ExcelWriter(excel_path) as writer:
df_s.to_excel(writer, index=True, sheet_name='successes')
df_f.to_excel(writer, index=True, sheet_name='failures')
df_r.to_excel(writer, index=True, sheet_name='rates')
df_t.to_excel(writer, index=True, sheet_name='time')
def show_results():
print("Finished")
if len(results) > 0:
print(f"Success rate: {sum(results)/len(results) * 100:.1f} % ({sum(results)}/{len(results)})")
ave_time = sum(elapsed_time) / len(elapsed_time) #TODO
print(f"Average response time :{ave_time} sec\n")
result_icons = ['X' for _ in results]
result_icons = ''.join(['O' if r else 'X' for r, i in zip(results, result_icons)])
print(result_icons)
plot_time(elapsed_time, problems, results)
failed_ = [ f"{f['a']} x {f['b']} = {f['a'] * f['b']}" for f in failed]
print("Failed calculations")
print(failed_)
if course != 6:
save_result_table()
plot_all()
keep_going = True
#TODO GUI for preference?
ans = int(input("Type 1 for general, 2 for Indian, 3 for mixed, 4 for 00 x 0, 5 for review, 6 for 000 x 0\n>"))
if ans == 1:
course = 1
elif ans == 2:
course = 2
elif ans == 3:
course = 3
elif ans == 4:
course = 4
elif ans == 5:
course = 5
elif ans == 6:
course = 6
else:
raise ValueError("course has an invalid value")
ans = int(input("Type 1 for horizontal view, 2 for stack view\n>"))
if ans == 1:
view = 1
elif ans == 2:
view = 2
else:
raise ValueError("view has an invalid value")
#TODO ask if you want to use biased random number generation
if course != 4 and course != 5 and course != 6:
ans = float(input("Type 1 for uniform randomness, <1 for biased to have larger digits\n>"))
if ans == 1:
randbias = 1
else:#
randbias = 2 # to be biased to include larger numbers, 6,7 ,8, 9
reviewing = False
while keep_going:
if course == 1:
a, b = get_ab_general()
elif course == 2:
a, b = get_ab_Indian()
elif course == 3:
ans = randint(0,1)
if ans:
a, b = get_ab_general()
else:
a, b = get_ab_Indian()
elif course == 4:
a, b = get_ab_two_by_one()
elif course == 5:
a, b = get_ab_from_failures_in_the_past()
elif course == 6:
a, b = get_ab_three_by_one()
keep_going = run_trial(a, b)
if not keep_going:
show_results()
ans = input("Do you want to practice the failed problems again? Y/N\n>")
if ans == "y" or ans == "Y":
results = [] #refresh
reviewing = True
keep_going = True
while keep_going:
a, b = get_ab_from_failures()
if a == 0 and b == 0:
keep_going = False
else:
keep_going = run_trial(a, b)
if not keep_going:
print("Finished")
print(f"Success rate: {sum(results)/len(results) * 100:.1f} % ({sum(results)}/{len(results)})")
ave_time = sum(elapsed_time) / len(elapsed_time)
print(f"Average response time :{ave_time} sec\n")
failed_ = [ f"{f['a']} x {f['b']} = {f['a'] * f['b']}" for f in failed]
print("Failed calculations")
print(failed_)
else:
print("Good bye")
|
kouichi-c-nakamura/anzan_training
|
anzan.py
|
anzan.py
|
py
| 13,926 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "os.getcwd",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_excel",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.ion",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 200,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "pandas.read_excel",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.ion",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 245,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.Normalize",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 250,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.cm",
"line_number": 253,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 253,
"usage_type": "name"
},
{
"api_name": "mplcursors.cursor",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 277,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 279,
"usage_type": "name"
},
{
"api_name": "pandas.isna",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "pandas.isna",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "pandas.ExcelWriter",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 414,
"usage_type": "call"
}
] |
32995735130
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
## We'll be doing this from scratch, so all imports will come from
## the Python standard library or 3rd-party tools
import socket
import struct
import base64
import json
import hashlib
import time
import enum
import xml.etree.ElementTree as ET
from enum import Enum
import pandas as pd
import password_obfuscation as obf
# # iRODS Protocol Cookbook
#
# This notebook will provide example implementations of key
# operations in the iRODS protocol. Read from the beginnging or use this table of contents to skip to the section that interests you. Once you've jumped to that spot, make sure the cell with the anchor is selected and run `Cell > Run All Above`.
#
# ## Table of Contents
#
# * [Handshake](#handshake)
# * [Authentication](#authentication)
# * [ils](#ils)
# - [Stat a collection](#stat_coll)
# - [Querying for the Data Objects in a Container](#data_objects_query)
# * [Data transfer](#data_transfer)
# * [Streaming](#streaming)
# * [Admin](#admin)
# * [Rule Exec](#rule_exec)
# * [Changing Your Password](#ipasswd)
# * [Disconnect](#disconnect)
# * [Appendix: iRODS Protocol Gotchas](#gotchas)
# This tutorial assumes you have deployed iRODS in Docker using
# the script stand_it_up.py from the iRODS Testing Environment,
# which can be found on Github [here](https://github.com/irods/irods_testing_environment)
# To find the IP address associated with your Docker container, you can run this one-liner:
# ```bash
# docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' ubuntu-2004-postgres-1012_irods-catalog-provider_1
# ```
#
# *However,* this notebook works just fine for any iRODS deployment. Simply change the values `HOST`, `RODS_USER`, `PASSWORD`. It is recommended to create a new rodsadmin account or use an account whose password you are comfortable changing, and to start in the home collection of that user.
# In[3]:
HOST = "172.27.0.3"
RODS_USER = "rods"
PASSWORD = "rods"
# In[4]:
PORT = 1247 ## This is the standard iRODS port
MAX_PASSWORD_LENGTH = 50 ## This constant comes
## from the internals
## of the iRODS server
API_TABLE = {
"AUTHENTICATION_APN":110000, ## The API number for the 4.3.0 auth framework
"OBJ_STAT_AN":633,
"GEN_QUERY_AN":702,
"DATA_OBJ_PUT_AN": 606,
"DATA_OBJ_OPEN_AN": 602,
"DATA_OBJ_LSEEK_AN": 674,
"DATA_OBJ_CLOSE_AN": 673,
"DATA_OBJ_READ_AN": 675,
"GENERAL_ADMIN_AN": 701,
"EXEC_MY_RULE_AN": 625,
"USER_ADMIN_AN": 714
}
## These provide indices into the catalog,
## which allows the i RODS server to directly query the SQL server
CATALOG_INDEX_TABLE = {
"COL_COLL_NAME" :"501",
"COL_D_DATA_ID" :"401",
"COL_DATA_NAME" :"403",
"COL_COLL_INHERITANCE":"506",
"COL_DATA_MODE" :"421",
"COL_DATA_SIZE" :"407",
"COL_D_MODIFY_TIME" :"420",
"COL_D_CREATE_TIME" :"419"
}
CATALOG_REVERSE_INDEX_TABLE = {
v:k for k,v in CATALOG_INDEX_TABLE.items()
}
## This is an arbitrary string hardcoded into the server; will be checked by the server
RANDOM_STRING_CLIENT_SIDE = "1gCBizHWbwIYyWLoysGzTe6SyzqFKMniZX05faZHWAwQKXf6Fs"
test_value = obf.encode(RANDOM_STRING_CLIENT_SIDE)
# First, we're going to write a small library of functions that do some
# of the dirty work.
# Feel free to skip to [here](#start_of_real_work), where we start using this library to send
# and read messages, referring to this part to figure out how
# the part you're interested in was implemented.
#
# *Notice* that the comment above `def header(...` includes the packing instruction string for `MsgHeader_PI` ("PI" stands for "Packing Instruction"). This string has a special syntax that the iRODS server uses to define these message types.
# In[5]:
## We can define these in an enum since
## header types are a closed class and are not sensitive to any3
## particular API.
class HeaderType(Enum):
RODS_CONNECT = "RODS_CONNECT"
RODS_DISCONNECT = "RODS_DISCONNECT"
RODS_API_REQ = "RODS_API_REQ"
RODS_API_REPLY = "RODS_API_REPLY"
RODS_VERSION = "RODS_VERSION"
# #define MsgHeader_PI "str type[HEADER_TYPE_LEN]; int msgLen; int errorLen; int bsLen; int intInfo;"
def header(header_type: HeaderType, msg: bytes,
error_len=0, bs_len=0, int_info=0) -> bytes:
return f"""
<MsgHeader_PI>
<type>{header_type}</type>
<msgLen>{len(msg)}</msgLen>
<errorLen>{error_len}</errorLen>
<bsLen>{bs_len}</bsLen>
<intInfo>{int_info}</intInfo>
</MsgHeader_PI>
""".replace(' ', '').replace('\n', '').encode('utf-8') ## The protocol is whitespace-insensitive,
## but I removed them here for cleanliness
## and efficiency for when this gets pushed
## through the pipe.
def indent(elem, level=0):
i = "\n" + level*" "
j = "\n" + (level-1)*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for subelem in elem:
indent(subelem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = j
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = j
return elem
# In[6]:
def send_header(header: bytes, sock: socket) -> None:
header_len = int.to_bytes(len(header), byteorder='big', length=4) ## The first part of all iRODS messages
## must be 4 bytes indicating how long
## the header is in bytes. These bytes
## and the entire integer must be transmitted
## in big-endian order
print(f"[header_len] - [{header_len}]")
print(f"[header] - [{header}]")
sock.sendall(header_len)
sock.sendall(header)
def send_msg(msg: bytes,
sock: socket,
error_buf: bytes = None,
bs_buf: bytes = None) -> None:
sock.sendall(msg)
print(f"[msg] - [{msg}]")
if error_buf:
sock.sendall(error_buf)
if bs_buf:
sock.sendall(bs_buf)
def recv(sock: socket) -> [ET, ET]:
header_len = int.from_bytes(sock.recv(4), byteorder='big')
print(f"HEADER LEN: [{header_len}]")
header = ET.fromstring(sock.recv(header_len).decode("utf-8"))
ET.indent(header)
ET.dump(header)
if header_len > 0: ## TODO: It's odd that this is included as a case because something would be really
## broken if this were true
msg_len = int(header.find("msgLen").text)
bs_len = int(header.find("bsLen").text)
error_len = int(header.find("errorLen").text)
if msg_len > 0:
msg = ET.fromstring(sock.recv(
int(header.find("msgLen").text)).decode("utf-8"))
ET.indent(msg)
ET.dump(msg)
if error_len > 0:
print("[recv] getting error stack")
print(sock.recv(error_len))
if bs_len > 0:
print("[recv] getting bs buf")
print(sock.recv(bs_len))
return header, msg
else:
if error_len > 0:
print("[recv] getting error stack")
print(sock.recv(error_len))
if bs_len > 0:
print("[recv] getting bs buf")
print(sock.recv(bs_len))
return header, None
else:
return header, None
# ## Start of the "Real Work" <a class="anchor" id="start_of_real_work"></a>
# Note that even if you are using a plugin for authentication, iRODS may still refer to the information in the StartupPack_PI during authentication. If you are experiencing bugs during that step, check your Startup Pack as well as the structures associated with your specific plugin.
# In[7]:
class IrodsProt(Enum):
NATIVE_PROT = 0
XML_PROT = 1
## Now, let's start the connection process. First, we need an easy way to create the StartupPack.low
## define StartupPack_PI "int irodsProt; int reconnFlag; int connectCnt; str proxyUser[NAME_LEN];\
## str proxyRcatZone[NAME_LEN]; str clientUser[NAME_LEN]; str clientRcatZone[NAME_LEN];\
## str relVersion[NAME_LEN]; str apiVersion[NAME_LEN]; str option[LONG_NAME_LEN];"
def startup_pack(irods_prot=IrodsProt.XML_PROT.value,
reconn_flag=0,
connect_cnt=0,
proxy_user=None,
proxy_rcat_zone=None,
client_user="rods",
client_rcat_zone="tempZone",
rel_version="4.3.0",
api_version="d", ## This MUST ALWAYS be "d." This value has been hardcoded into iRODS
## since very early days.
option=None ## This option controls, among other things,whether SSL negotiation is required.
) -> bytes:
return f"""
<StartupPack_PI>
<irodsProt>{irods_prot}</irodsProt>
<reconnFlag>{reconn_flag}</reconnFlag>
<connectCnt>{connect_cnt}</connectCnt>
<proxyUser>{proxy_user or client_user}</proxyUser>
<proxyRcatZone>{proxy_rcat_zone or client_rcat_zone}</proxyRcatZone>
<clientUser>{client_user}</clientUser>
<clientRcatZone>{client_rcat_zone}</clientRcatZone>
<relVersion>rods{rel_version}</relVersion>
<apiVersion>{api_version}</apiVersion>
<option>{option}</option>
</StartupPack_PI>
""".replace(" ", "").replace("\n", "").encode("utf-8")
# We're going to be sending raw bytes over a socket, so let's create one
# If at some point the Notebook stops working, remember
# to manually close the socket.
# In[8]:
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect((HOST, PORT))
# ## Handshake <a class="anchor" id="handshake"></a>
# In[ ]:
sp = startup_pack(client_user=RODS_USER)
sp
# In[ ]:
h = header(HeaderType.RODS_CONNECT.value, sp)
h
# In[ ]:
send_header(h, conn)
send_msg(sp, conn)
# In[ ]:
## In this Version_PI, status of 0 lets us know that negotiation has been successful.
h, msg = recv(conn)
# ## Authentication <a class="anchor" id="authentication"></a>
#
# Next up, we need to authenticate using our API of choice.
# Since this is a basic cookbook for 4.3.0, we'll be using the new
# auth framework's port of native authentication.
# This API works by exchanging binary buffers between client and server.
# Since XML must be valid UTF-8, this binary data MUST be base64-encoded.
# In[ ]:
def encode_dict_as_base64_json(d: dict):
return base64.b64encode(
json.dumps(d).encode('utf-8'))
# The payload is decoded because otherwise Python will
# add extra characters to give a string representation of the bytes object
# In[ ]:
def read_base64_into_json(bsix: bytes, trunc=False) -> dict:
decoded = base64.b64decode(bsix).decode('utf-8')
return json.loads(decoded[:-1]) if trunc else json.loads(decoded)
## #define BytesBuf_PI "int buflen; char *buf(buflen);"
def bin_bytes_buf(payload: dict) -> bytes:
payload = encode_dict_as_base64_json(payload)
return f"""
<BinBytesBuf_PI>
<buflen>{len(payload)}</buflen>
<buf>{payload.decode('utf-8')}</buf>
</BinBytesBuf_PI>
""".replace(" ", "").replace("\n","").encode('utf8')
# In[ ]:
## Some API-specific parameters
auth_ctx = {
"a_ttl":"0",
"force_password_prompt":"true",
"next_operation":"auth_agent_auth_request",
"scheme":"native",
"user_name":"rods",
"zone_name":"tempZone"
}
# In[ ]:
initial_auth_msg = bin_bytes_buf(auth_ctx)
print(initial_auth_msg)
h = header(HeaderType.RODS_API_REQ.value,
initial_auth_msg,
int_info=API_TABLE["AUTHENTICATION_APN"])
send_header(h, conn)
send_msg(initial_auth_msg, conn)
# In[ ]:
h, m = recv(conn)
# If you were writing a real client library or application, you would want to check intInfo for error codes
# so you could respond appropriately. Here, we're going to move on blissfully unaware.
# In[ ]:
auth_ctx = read_base64_into_json(m.find("buf").text, trunc=True)
request_result = auth_ctx[ 'request_result'].encode('utf-8')
print(f"REQUEST RESULT: [{request_result}]")
# In[ ]:
def pad_password(pw: str) -> bytes:
return struct.pack("%ds" % MAX_PASSWORD_LENGTH, pw.encode("utf-8").strip())
## The "signature" is taken from the first 16 bytes of the challenge string
## and is used by the server to validate certain operations,
## like password changes.
signature = "".join("{:02x}".format(c) for c in request_result)
print(f"SIGNATURE: [{signature}]")
## Native auth specific operations
m = hashlib.md5()
m.update(request_result)
m.update(pad_password(PASSWORD))
digest = m.digest()
encoded_digest = base64.b64encode(digest).decode('utf-8')
auth_ctx['digest'] = encoded_digest
auth_ctx['next_operation'] = 'auth_agent_auth_response'
challenge_response = bin_bytes_buf(auth_ctx)
print(challenge_response)
# In[ ]:
h = header(HeaderType.RODS_API_REQ.value,
challenge_response,
int_info=API_TABLE["AUTHENTICATION_APN"])
send_header(h, conn)
send_msg(challenge_response, conn)
# Once again, an `intInfo` of 0 is the auth framework's way of telling us that we've successfully authenticated. Decode the buf frame base64 if you'd like to double check the state of the auth context.
# In[ ]:
h, m = recv(conn)
# # ils <a class="anchor" id="ils"></a>
# Next, let's perform an `ils`. The iCommands implementation does a little bit of verification, so we'll see how to perform object stat-ing, genQuery, and specQuery here.
# Before delving into the substance of an iRODS workflow, you might take a look at the following image, which illustrates the general flow of the protocol. Essentially, after the handshake, the client and server loop between API requests and appropriate responses in an indefinite loop until the client sends a disconnect.
# ## Stat a Collection <a class="anchor" id="stat_coll"></a>
# This step is necessary to make sure that the directory about to be ls'd actually exists.
# First, we'll have to generate a `DataObjInp_PI`. This is a generic message type used for all sorts of operations. It also contains a `KeyValPair_PI`, which is an important data structure in the iRODS protocol. Although it cannot be sent on its own, it is a very important vehicle for parameters. Internally, this `KeyValPair_PI` is a cond_input structure.
# In[ ]:
## #define DataObjInp_PI "str objPath[MAX_NAME_LEN]; int createMode; int openFlags; double offset; \
## double dataSize; int numThreads; int oprType; struct *SpecColl_PI; struct KeyValPair_PI;"
def data_obj_inp(
obj_path,
create_mode="0",
open_flags="0",
offset="0",
data_size="0",
num_threads="0",
opr_type="0",
cond_input= {}
) -> bytes:
obj_inp = ET.fromstring(f"""
<DataObjInp_PI>
<objPath>{obj_path}</objPath>
<createMode>{create_mode}</createMode>
<openFlags>{open_flags}</openFlags>
<offset>{offset}</offset>
<dataSize>{data_size}</dataSize>
<numThreads>{num_threads}</numThreads>
<oprType>{opr_type}</oprType>
</DataObjInp_PI>
""")
ET.indent(obj_inp)
obj_inp = append_kvp(obj_inp, cond_input)
ret = ET.tostring(obj_inp).decode("utf-8").replace("\n", "").replace(" ", "").encode('utf-8')
print(ret)
return ret
# Next, we'll need some utility methods. How these work might not be totally obvious, so consider reading ahead and revisiting these once you've seen how it's used in the stat API Call.
# In[ ]:
def append_kvp(et, data):
kvp = ET.Element("KeyValPair_PI")
sslen = ET.Element("ssLen")
sslen.text = str(len(data))
kvp.append(sslen)
for key in data.keys():
keyWord = ET.Element("keyWord")
keyWord.text = key
kvp.append(keyWord)
for value in data.values():
svalue = ET.Element("svalue")
svalue.text = value
kvp.append(svalue)
et.append(kvp)
return et
def append_iivp(et, data):
iivp = ET.Element("InxIvalPair_PI")
sslen = ET.Element("iiLen")
sslen.text = str(len(data))
iivp.append(sslen)
for key in data.keys():
inx = ET.Element("inx")
inx.text = key
iivp.append(inx)
for value in data.values():
ivalue = ET.Element("ivalue")
ivalue.text = value
iivp.append(ivalue)
et.append(iivp)
return et
def append_ivp(et, data):
ivp = ET.Element("InxValPair_PI")
islen = ET.Element("isLen")
islen.text = str(len(data))
ivp.append(islen)
for key in data.keys():
inx = ET.Element("inx")
inx.text = key
ivp.append(inx)
for value in data.values():
svalue = ET.Element("svalue")
svalue.text = value
ivp.append(svalue)
et.append(ivp)
return et
# In[ ]:
stat_obj_inp = data_obj_inp("/tempZone/home/rods")
h = header(HeaderType.RODS_API_REQ.value,
stat_obj_inp,
int_info=API_TABLE["OBJ_STAT_AN"])
send_header(h, conn)
send_msg(stat_obj_inp, conn)
# If everything has gone smoothely, you should receive a `RodsObjStat_PI` from the server. That `objType` is 2 tells us that the thing we stat'd was a collection. Since collections are purely virtual objects, `objSize` is 0.
# In[ ]:
h, m = recv(conn)
# # Querying for the Data Objects in a Container <a class="anchor" id="data_objects_query"></a>
#
# Now we know our target is there. Let's go ahead and read its contents. This happens through a genQuery. For details about the first-generation GenQuery API, see [here](https://github.com/irods/irods_docs/blob/main/docs/developers/library_examples.md#querying-the-catalog-using-general-queries). For information about the GenQuery2 interface (under development as of time of writing), see [here](https://www.youtube.com/watch?v=3dR_JoGA6wA&t=654s&ab_channel=TheiRODSConsortium).
# In[ ]:
## #define GenQueryInp_PI "int maxRows; int continueInx; int partialStartIndex; \
## int options; struct KeyValPair_PI; struct InxIvalPair_PI; struct InxValPair_PI;"
def gen_query(
max_rows=256,
continue_inx=0,
partial_start_index=0,
options=0,
cond_input={},
select_inp={},
sql_cond_inp={}
) -> bytes:
ret = ET.fromstring(f"""
<GenQueryInp_PI>
<maxRows>{max_rows}</maxRows>
<continueInx>{continue_inx}</continueInx>
<partialStartIndex>{partial_start_index}</partialStartIndex>
<options>{options}</options>
</GenQueryInp_PI>
""")
ret = append_kvp(ret, cond_input)
ret = append_iivp(ret, select_inp)
ret = append_ivp(ret, sql_cond_inp)
return ET.tostring(ret).decode("utf-8").replace(" ", "").replace("\n", "").encode("utf-8")
## The Catalog ships with a table of SQL functions that can perform common functions
## The first link above also has an example of a specific query.
## Note that the server will send back a GenQueryOut_PI; there is no
## message type dedicated to results from a specQuery. However, all the SqlResult_PIs
## will have `attriInx` set to 0, since knowing the query string allows the client to
## reconstruct the order of the columns.
def spec_query(
sql,
arg_1,
max_rows=256,
continue_inx=0,
row_offset=0,
options=0,
cond_input={}
) -> bytes:
ret = ET.fromstring(f"""
<specificQueryInp_PI>
<sql>{sql}</sql>
<arg1>{arg_1}</arg1>
<maxRows>{max_rows}</maxRows>
<continueInx>{continue_inx}</continueInx>
<rowOffset>{row_offset}</rowOffset>
<options>{options}</options>
</specificQueryInp_PI>
""")
ret = append_kvp(ret, cond_input)
return ET.tostring(ret)
# In[ ]:
gq = gen_query(
select_inp={
CATALOG_INDEX_TABLE["COL_COLL_NAME"] :"1",
CATALOG_INDEX_TABLE["COL_DATA_NAME"] :"1",
CATALOG_INDEX_TABLE["COL_D_DATA_ID"] :"1",
CATALOG_INDEX_TABLE["COL_DATA_MODE"] :"1",
CATALOG_INDEX_TABLE["COL_DATA_SIZE"] :"1",
CATALOG_INDEX_TABLE["COL_D_MODIFY_TIME"]:"1",
CATALOG_INDEX_TABLE["COL_D_CREATE_TIME"]:"1"
},
sql_cond_inp={
CATALOG_INDEX_TABLE["COL_COLL_NAME"] :f"= '/tempZone/home/{RODS_USER}'"
}
)
# *NB:* It might be easier to make sense of the server's response if you make sure the directory you're about to stat is populated.
# One quick thing before we send this over to the server: the iRODS dialect of XML has a few quirks related to encoding special characters. Some special characters it does not escape at all. For others, it uses a non-standard encoding. For example, iRODS XML does not distinguish between "\`" and "'" (backticks and single quotes). For these reasons, we'll need to write some functions that translate between standard XML and iRODS XML.
# In[ ]:
STANDARD_TO_IRODS_TABLE = {
b'"' :b""",
b""":b""",
b"'":b"'",
b"	":b"\t",
b"
":b"\r",
b"
":b"\n",
b"`" :b"'",
b"'" :b"'"
}
def translate_xml_to_irods_dialect(xml_bytes):
inc = 0
for prefix in STANDARD_TO_IRODS_TABLE:
xml_bytes = xml_bytes.replace(prefix, STANDARD_TO_IRODS_TABLE[prefix])
return xml_bytes
gq = translate_xml_to_irods_dialect(gq)
print(gq)
h = header(HeaderType.RODS_API_REQ.value,
gq,
int_info=API_TABLE["GEN_QUERY_AN"])
# In[ ]:
send_header(h, conn)
send_msg(gq, conn)
# The results from this GenQuery might be a little hard to grok.
# In[ ]:
h, m = recv(conn)
# To demonstrate how they amount to valid SQL results, let's translate these into a Pandas DataFrame. To see a similar example in C++ that operates above the protocol level, refer to the genQuery1 documentation linked above.
# In[ ]:
def read_gen_query_results_into_dataframe(gqr):
## Each SqlResult_PI is a column of data
## Collect them all into a list
## We can safely ignore the "reslen" attribute since the Python XML
## API already knows how large each string is, but you might use it for error checking
row_cnt = int(gqr.find("rowCnt").text)
attribute_cnt = int(gqr.find("attriCnt").text)
data = {}
for result in gqr.findall("SqlResult_PI"):
attri_inx = result.find("attriInx").text
if attri_inx == "0":
continue
# res_len = int(result.find("reslen").text)
values = result.findall("value")
col = [value.text for value in values]
data[CATALOG_REVERSE_INDEX_TABLE[attri_inx]] = col
return pd.DataFrame(data)
read_gen_query_results_into_dataframe(m)
# # Data Transfer <a class="anchor" id="data_transfer"></a>
#
# Now that we can see the contents of this collection, let's create a new data object inside of it.
# This will show cases some of the more advanced features of `condInpt`.
# In[ ]:
## Suppose we want to transfer a file containing this text.
hello_cpp = """
#include <iostream>
int main() {
std::cout << "Hello World!";
return 0;
}
"""
# In[ ]:
data_object_name = "hello.cpp"
data_size=str(len(hello_cpp.encode("utf-8")))
iput_payload = data_obj_inp(
f"/tempZone/home/{RODS_USER}/{data_object_name}",
open_flags="2",
data_size=data_size,
opr_type="1",
cond_input={
"dataType":"generic",
"dataSize":data_size,
"dataIncluded":" " ## Generally, keys with empty values in cond_input act as flags
}
)
h = header(HeaderType.RODS_API_REQ.value,
iput_payload,
int_info=API_TABLE["DATA_OBJ_PUT_AN"],
bs_len=len(hello_cpp.encode("utf-8")))
send_header(h, conn)
send_msg(iput_payload, conn, bs_buf=hello_cpp.encode("utf-8"))
# Once you've received the response from the server and verified that `intInfo` is zero, go re-run the genQuery which produced the ls you ran before. You should see new file there.
# In[ ]:
h, m = recv(conn)
# In[ ]:
h = header(HeaderType.RODS_API_REQ.value,
gq,
int_info=API_TABLE["GEN_QUERY_AN"])
gq = gen_query(
select_inp={
CATALOG_INDEX_TABLE["COL_COLL_NAME"] :"1",
CATALOG_INDEX_TABLE["COL_DATA_NAME"] :"1",
CATALOG_INDEX_TABLE["COL_D_DATA_ID"] :"1",
CATALOG_INDEX_TABLE["COL_DATA_MODE"] :"1",
CATALOG_INDEX_TABLE["COL_DATA_SIZE"] :"1",
CATALOG_INDEX_TABLE["COL_D_MODIFY_TIME"]:"1",
CATALOG_INDEX_TABLE["COL_D_CREATE_TIME"]:"1"
},
sql_cond_inp={
CATALOG_INDEX_TABLE["COL_COLL_NAME"]:"= '/tempZone/home/rods'"
}
)
gq = translate_xml_to_irods_dialect(gq)
send_header(h, conn)
send_msg(gq, conn)
h, m = recv(conn)
read_gen_query_results_into_dataframe(m)
# ## Streaming <a class="anchor" id="data_transfer"></a>
#
# Modern iRODS versions implement parallel transfer using multiple streams. This documentation won't implement parallel transfer, but will show how to use the streaming API that it is built on top of.
# In[ ]:
## We'll open this file, seek past #includes and read.
## Streamed putting works similarly, and in general
## you can think of these calls as analogous to their UNIX counterparts.
streaming_open_request = data_obj_inp(
"/tempZone/home/rods/hello.cpp",
open_flags="2",
data_size="-1" ## We're getting the data from somewhere else,
## so obviously we don't know how big it is
)
h = header(
HeaderType.RODS_API_REQ.value,
streaming_open_request,
int_info=API_TABLE["DATA_OBJ_OPEN_AN"]
)
send_header(h, conn)
send_msg(streaming_open_request, conn)
# In[ ]:
h, m = recv(conn)
# In[ ]:
print(h.find("intInfo").text)
# In[ ]:
## This time intInfo, if it is positive, will be the value of the L1 Descriptor return by the server,
## which is an opaque handle to a replica of the data object we just opened.
## Notice that it's 3, just like you'd expect opening the first file on a UNIX system.
l1_descriptor = h.find("intInfo").text
seek_len = 22
## These constants are taken from their Linux equivalents
## and work the same way
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
## #define OpenedDataObjInp_PI "int l1descInx; int len; int whence; int oprType; \
## double offset; double bytesWritten; struct KeyValPair_PI;"
def opened_data_obj_inp(l1_desc,
len_=0,
whence=SEEK_SET,
opr_type=0,
offset=0,
bytes_written=0,
cond_input={}):
ret = ET.fromstring(f"""
<OpenedDataObjInp_PI>
<l1descInx>{l1_desc}</l1descInx>
<len>{len_}</len>
<whence>{whence}</whence>
<oprType>{opr_type}</oprType>
<offset>{offset}</offset>
<bytesWritten>{bytes_written}</bytesWritten>
</OpenedDataObjInp_PI>
""")
ret = append_kvp(ret, cond_input)
return ET.tostring(ret).decode("utf-8").replace(" ", "").replace("\n", "").encode("utf-8")
# In[ ]:
seeker = opened_data_obj_inp(l1_descriptor, offset=seek_len)
print(seeker)
h = header(
HeaderType.RODS_API_REQ.value,
seeker,
int_info=API_TABLE["DATA_OBJ_LSEEK_AN"]
)
send_header(h, conn)
send_msg(seeker, conn)
# In[ ]:
h, m = recv(conn)
# In[ ]:
reader = opened_data_obj_inp(l1_descriptor, len_=8192) ## The len parameter is important --
## this tells the server how many
## bytes to stream back to the client
print(reader)
h = header(
HeaderType.RODS_API_REQ.value,
reader,
int_info=API_TABLE["DATA_OBJ_READ_AN"]
)
send_header(h, conn)
send_msg(reader, conn)
# In[ ]:
h, m = recv(conn)
# In[ ]:
closer = opened_data_obj_inp(l1_descriptor)
h = header(
HeaderType.RODS_API_REQ.value,
closer,
int_info=API_TABLE["DATA_OBJ_CLOSE_AN"]
)
# In[ ]:
send_header(h, conn)
send_msg(closer, conn)
# In[ ]:
h, m = recv(conn)
# # Admin <a class="anchor" id="admin"></a>
# Next, we're going to look at how to perform admin tasks. Recall from the section where we implemented "ils" that the iRODS server ships with prebuilt queries stored in the catalog. These are called "specific queries." The iCommand `asq` allows administrators to add new catalog queries. Let's implement `asq` straight from the protocol.
#
# You might notice that the parameters for `generalAdminInp_PI` are not very self-describing. To get a better sense of what you can do with the admin API and how to map those to arguments, see [`server/api/src/rsGeneralAdmin.cpp`](https://github.com/irods/irods/blob/main/server/api/src/rsGeneralAdmin.cpp), and specifically the function `_rsGeneralAdmin`.
# In[ ]:
dummy_spec_query = "SELECT data_name FROM r_data_main"
## #define generalAdminInp_PI "str *arg0; str *arg1; str *arg2; \
## str *arg3; str *arg4; str *arg5; str *arg6; str *arg7; str *arg8; str *arg9;"
def general_admin_inp(
arg_zero=" ",
arg_one=" ",
arg_two=" ",
arg_three=" ",
arg_four=" ",
arg_five=" ",
arg_six=" ",
arg_seven=" ",
arg_eight=" ",
arg_nine=" "
):
return f"""
<generalAdminInp_PI>
<arg0>{arg_zero}</arg0>
<arg1>{arg_one}</arg1>
<arg2>{arg_two}</arg2>
<arg3>{arg_three}</arg3>
<arg4>{arg_four}</arg4>
<arg5>{arg_five}</arg5>
<arg6>{arg_six}</arg6>
<arg7>{arg_seven}</arg7>
<arg8>{arg_eight}</arg8>
<arg9>{arg_nine}</arg9>
</generalAdminInp_PI>
""".replace("\n", "").encode("utf-8")
# In[ ]:
new_spec_query_req = general_admin_inp(
arg_zero="add",
arg_one="specificQuery",
arg_two=dummy_spec_query,
arg_three="another_dummy_spec_query"
)
h = header(
HeaderType.RODS_API_REQ.value,
new_spec_query_req,
int_info=API_TABLE["GENERAL_ADMIN_AN"]
)
# In[48]:
send_header(h, conn)
send_msg(new_spec_query_req, conn)
# In[49]:
h, m = recv(conn) ## Assuming int_info is 0, you should now be able to run your query on the command line like this:
## "iquest --no-page --sql dummy_spec_query"
# # Rule Exec <a class="anchor" id="rule_exec"></a>
# The last thing we'll look at is sending rule execution requests.
# We won't procedurally create this string to reduce complexity, but the structure of these XML structures should be clear from the context. The text of this rule is taken from [documentation](https://vlaams-supercomputing-centrum-vscdocumentation.readthedocs-hosted.com/en/latest/data/workflow_automation.html) produced by the Vlaams Supercomputing Center.
# In[59]:
rule_text = """
veryAdvancedHelloWorldRule{
writeLine("stdout","$userNameClient says '*greeting1 *greeting2'")
}
"""
## #define ExecMyRuleInp_PI "str myRule[META_STR_LEN]; struct RHostAddr_PI; \
## struct KeyValPair_PI; str outParamDesc[LONG_NAME_LEN]; struct *MsParamArray_PI;"
rule_exec_PI = ET.fromstring(f"""
<ExecMyRuleInp_PI>
<myRule>@external
veryAdvancedHelloWorldRule{{
writeLine('stdout',"$userNameClient says '*greeting1 *greeting2'")
}}
</myRule>
<RHostAddr_PI>
<hostAddr></hostAddr>
<rodsZone></rodsZone>
<port>0</port>
<dummyInt>0</dummyInt>
</RHostAddr_PI>
<KeyValPair_PI>
<ssLen>1</ssLen>
<keyWord>instance_name</keyWord>
<svalue>irods_rule_engine_plugin-irods_rule_language-instance</svalue>
</KeyValPair_PI>
<outParamDesc>ruleExecOut</outParamDesc>
<MsParamArray_PI>
<paramLen>2</paramLen>
<oprType>0</oprType>
<MsParam_PI>
<label>*greeting1</label>
<type>STR_PI</type>
<STR_PI>
<myStr> 'Hello'</myStr>
</STR_PI>
</MsParam_PI>
<MsParam_PI>
<label>*greeting2</label>
<type>STR_PI</type>
<STR_PI>
<myStr> 'World'</myStr>
</STR_PI>
</MsParam_PI>
</MsParamArray_PI>
</ExecMyRuleInp_PI>
""".encode("utf-8"))
rule_exec_PI = ET.tostring(rule_exec_PI)
rule_exec_PI = translate_xml_to_irods_dialect(rule_exec_PI)
print(rule_exec_PI)
# In[60]:
h = header(
HeaderType.RODS_API_REQ.value,
rule_exec_PI,
int_info=API_TABLE["EXEC_MY_RULE_AN"]
)
send_header(h, conn)
send_msg(rule_exec_PI, conn)
# This rule prints "Hello World!" to stdout. Notice that when you receive that message from the server, the buffer is 5464 bytes long and contains a long string of null/garbage characters after the desired string. This is a known feature of the native rule engine; when printing to stdout, it always allocates a buffer of this size and assumes that the client will look for a null-terminator to determine to where the actual content is.
# In[61]:
h, m = recv(conn)
# # Changing Your Password <a class="anchor" id="ipasswd"></a>
# In addition to the general admin capabilities, iRODS exposes certain administrative abilities to rodsusers. First, we'll create a new user. This step just involves switching parameters in `generalAdminInp_PI`, so you might want to skip if you're not interested in that. However, switching
# In[53]:
def user_admin(
arg_zero=" ",
arg_one=" ",
arg_two=" ",
arg_three=" ",
arg_four=" ",
arg_five=" ",
arg_six=" ",
arg_seven=" ",
arg_eight=" ",
arg_nine=" "
):
return f"""
<userAdminInp_PI>
<arg0>{arg_zero}</arg0>
<arg1>{arg_one}</arg1>
<arg2>{arg_two}</arg2>
<arg3>{arg_three}</arg3>
<arg4>{arg_four}</arg4>
<arg5>{arg_five}</arg5>
<arg6>{arg_six}</arg6>
<arg7>{arg_seven}</arg7>
<arg8>{arg_eight}</arg8>
<arg9>{arg_nine}</arg9>
</userAdminInp_PI>
""".replace("\n", "").replace(" ", "").encode("utf-8")
# In[54]:
obfuscated_password = obf.obfuscate_new_password("testpass",
PASSWORD,
signature)
pw_change_request = user_admin(
arg_zero="userpw",
arg_one=RODS_USER,
arg_two="password",
arg_three=obfuscated_password
)
# In[55]:
h = header(
HeaderType.RODS_API_REQ.value,
pw_change_request,
int_info=API_TABLE["USER_ADMIN_AN"]
)
send_header(h, conn)
send_msg(pw_change_request, conn)
# In[56]:
h, m = recv(conn)
# # Disconnect <a class="anchor" id="disconnect"></a>
# Finally, we'll disconnect from the iRODS server.
# In[57]:
def disconnect(sock):
sock.send(
header(HeaderType.RODS_DISCONNECT.value, "") ## Empty string so msgLen is 0
)
# In[58]:
disconnect(conn)
conn.close()
# # Appendix: iRODS Protocol Gotchas <a class="anchor" id="gotchas"></a>
# - Forgetting to close a tag can often trip up the server's parsing logic in such a way that it sends a header with `intInfo` 0, or some other indication that the request was successful. However, the next message will have an error code `-15000` indicating a formatting error. A similar behavior is sometimes
# seen if a call to `recv` (or whatever function you write that pulls bytes out of the TCP socket) is left out after an API request.
# - Although the protocol is supposed to be white-space agnostic, sometimes beginning a message with a newline character (`\n`) can cause unexpected behavior. Caution is best in this situation.
# - The protocol is order-dependent; that is, the order in which XML elements appear in the messages must be exactly identical to the order in which they appear in the corresponding packing instruction string as defined in `rodsPackInstruct.h`
|
irods/iRODS-Protocol-Cookbook
|
iRODS Protocol Cookbook.py
|
iRODS Protocol Cookbook.py
|
py
| 35,966 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "password_obfuscation.encode",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "enum.Enum",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.fromstring",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 186,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.indent",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.dump",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 188,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.fromstring",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 195,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.indent",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.dump",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "socket.socket",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "base64.b64encode",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "base64.b64decode",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "hashlib.md5",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "base64.b64encode",
"line_number": 399,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.fromstring",
"line_number": 449,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 449,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.indent",
"line_number": 460,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 460,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.tostring",
"line_number": 462,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 462,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.Element",
"line_number": 473,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 473,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.Element",
"line_number": 474,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 474,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.Element",
"line_number": 478,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 478,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.Element",
"line_number": 482,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 482,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.Element",
"line_number": 489,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 489,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.Element",
"line_number": 490,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 490,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.Element",
"line_number": 494,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 494,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.Element",
"line_number": 498,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 498,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.Element",
"line_number": 505,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 505,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.Element",
"line_number": 506,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 506,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.Element",
"line_number": 510,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 510,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.Element",
"line_number": 514,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 514,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.fromstring",
"line_number": 559,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 559,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.tostring",
"line_number": 571,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 571,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.fromstring",
"line_number": 588,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 588,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.tostring",
"line_number": 600,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 600,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 698,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.fromstring",
"line_number": 842,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 842,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.tostring",
"line_number": 854,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 854,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.fromstring",
"line_number": 1008,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 1008,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.tostring",
"line_number": 1047,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 1047,
"usage_type": "name"
},
{
"api_name": "password_obfuscation.obfuscate_new_password",
"line_number": 1109,
"usage_type": "call"
}
] |
71749351229
|
from json import loads
from kafka import KafkaConsumer
consumer = KafkaConsumer(
'test-topic',
bootstrap_servers=['0.0.0.0:9092'],
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id='test-json-group',
value_deserializer=lambda x: loads(x.decode('utf-8')))
for message in consumer:
print("%s:%d:%d: key=%s value=%s" % (message.topic, message.partition,
message.offset, message.key,
message.value))
|
makseli/kafka-docker-python
|
consumer-json.py
|
consumer-json.py
|
py
| 522 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "kafka.KafkaConsumer",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 10,
"usage_type": "call"
}
] |
7725885886
|
import pandas as pd
from sqlalchemy import create_engine
from influxdb import InfluxDBClient
import time
def connectSQL():
connection_str = 'mssql+pyodbc://royg:Welcome1@SCADA'
engine = create_engine(connection_str)
conn = engine.connect()
return conn
def getData(conn,interval):
if (interval==1):
tabname='data_values_min_4_2017'
else:
tabname='data_values_'+str(interval)+'min_4_2017'
queryResult = conn.execute('''
-- SELECT TOP 10 RTRIM(LTRIM(REPLACE(REPLACE(dd.name,' ','\ '),',','\,'))) measurement,
SELECT LTRIM(dd.name) measurement,
CAST(dd.osi_key AS VARCHAR) AS [key],
CAST(dd.station_id AS VARCHAR) site,
SUBSTRING(dd.[name],1,1) array,
dt.description data_type,
'''+str(interval)+''' interval,
CAST(VALUE AS VARCHAR(30)) value,
CONVERT(VARCHAR(19),d.u_time,126)+'Z' timestamp
FROM [dbo].'''+tabname+''' d WITH(NOLOCK)
JOIN tempdb..dd1 dd
ON dd.osi_key = d.osi_key
JOIN dbo.stations s
ON s.station_id = dd.station_id
JOIN dbo.data_types dt
ON dt.data_type = d.data_type
-- WHERE u_time BETWEEN '2017-04-19 00:00:00' and '2017-04-19 01:00:00'
WHERE u_time > DATEADD(mi,-3,CURRENT_TIMESTAMP)
''')
pNodeIDsDF = pd.DataFrame(queryResult.fetchall())
if pNodeIDsDF.empty == False:
pNodeIDsDF.columns = queryResult.keys()
return pNodeIDsDF
c=connectSQL()
host = '50.23.122.133'
port = 8086
user = 'roy'
password = 'Kaftor'
dbname = 'w209'
client = InfluxDBClient(host, port, user, password, dbname)
rc=0
while(True):
for interval in (15,5,1):
df = getData(c, interval)
for node in df.itertuples():
# print(node[8])
json_body = [
{
"measurement": node[1],
"tags": {
"key": node[2],
"site": node[3],
"array": node[4],
"data_type": node[5],
"interval": node[6]
},
"time": node[8],
"fields": {
"value": float(node[7]) # str(float(node[7]))
}
}
]
rc = client.write_points(json_body, time_precision='s')
print('1 row written for interval {0}'.format(interval))
if (rc == 0):
print("reconnecting...")
c = connectSQL()
client = InfluxDBClient(host, port, user, password, dbname)
if (rc == 1):
print('{0} rows written for interval {1}'.format(df.shape[0],interval))
time.sleep(60)
|
thongnbui/MIDS_251_project
|
python code/SendToInflux.py
|
SendToInflux.py
|
py
| 2,797 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sqlalchemy.create_engine",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "influxdb.InfluxDBClient",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "influxdb.InfluxDBClient",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 80,
"usage_type": "call"
}
] |
32483752873
|
import torch
import torch.nn as nn
from mmdet.models import ResNet, FPN, MobileNetV2
import torch.nn.functional as F
from common import default_conv, ResBlock, BasicBlock
class MCNN(nn.Module):
'''
Implementation of Multi-column CNN for crowd counting
'''
def __init__(self, load_weights=False):
super(MCNN,self).__init__()
self.branch1=nn.Sequential(
nn.Conv2d(3,16,9,padding=4),
nn.ReLU(inplace=True),
nn.MaxPool2d(2),
nn.Conv2d(16,32,7,padding=3),
nn.ReLU(inplace=True),
nn.MaxPool2d(2),
nn.Conv2d(32,16,7,padding=3),
nn.ReLU(inplace=True),
nn.Conv2d(16,8,7,padding=3),
nn.ReLU(inplace=True)
)
self.branch2=nn.Sequential(
nn.Conv2d(3,20,7,padding=3),
nn.ReLU(inplace=True),
nn.MaxPool2d(2),
nn.Conv2d(20,40,5,padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(2),
nn.Conv2d(40,20,5,padding=2),
nn.ReLU(inplace=True),
nn.Conv2d(20,10,5,padding=2),
nn.ReLU(inplace=True)
)
self.branch3=nn.Sequential(
nn.Conv2d(3,24,5,padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(2),
nn.Conv2d(24,48,3,padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2),
nn.Conv2d(48,24,3,padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(24,12,3,padding=1),
nn.ReLU(inplace=True)
)
self.fuse=nn.Sequential(nn.Conv2d(30,1,1,padding=0))
self.relu=nn.ReLU(inplace=True)
if not load_weights:
self._initialize_weights()
def forward(self,img_tensor):
x1=self.branch1(img_tensor)
x2=self.branch2(img_tensor)
x3=self.branch3(img_tensor)
x=torch.cat((x1,x2,x3),1)
x=self.fuse(x)
x=self.relu(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
'''
Example:
>>> from mmdet.models import ResNet
>>> import torch
>>> self = ResNet(depth=18)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 64, 8, 8)
(1, 128, 4, 4)
(1, 256, 2, 2)
(1, 512, 1, 1)
'''
'''
Example:
>>> import torch
>>> in_channels = [2, 3, 5, 7]
>>> scales = [340, 170, 84, 43]
>>> inputs = [torch.rand(1, c, s, s)
... for c, s in zip(in_channels, scales)]
>>> self = FPN(in_channels, 11, len(in_channels)).eval()
>>> outputs = self.forward(inputs)
>>> for i in range(len(outputs)):
... print(f'outputs[{i}].shape = {outputs[i].shape}')
outputs[0].shape = torch.Size([1, 11, 340, 340])
outputs[1].shape = torch.Size([1, 11, 170, 170])
outputs[2].shape = torch.Size([1, 11, 84, 84])
outputs[3].shape = torch.Size([1, 11, 43, 43])
'''
class res50_fpn(nn.Module):
def __init__(self, load_weights=False):
super(res50_fpn,self).__init__()
self.resnet = ResNet(50)
self.in_channels = [256, 512, 1024, 2048]
self.scales = [333, 167, 84, 42]
self.fpn = FPN(self.in_channels, 256, len(self.scales))
self.fuse1 = nn.Conv2d(256*4,256,1,padding=0)
self.relu = nn.ReLU(inplace=True)
self.bn1 = nn.BatchNorm2d(num_features=256)
self.bn2 = nn.BatchNorm2d(num_features=1)
self.fuse2 = nn.Conv2d(256,1,1,padding=0)
def forward(self, input):
ret = self.resnet(input)
ret = list(self.fpn(ret))
#_scale = (333, 333)
for i in range(4):
ret[i] = F.interpolate(ret[i], size=(333,333), mode='bilinear')
ret = torch.cat(ret,dim=1)
ret = self.fuse1(ret)
ret = self.bn1(ret)
ret = self.relu(ret)
ret = self.fuse2(ret)
ret = self.bn2(ret)
ret = self.relu(ret)
return ret
class mobilenetv2_fpn(nn.Module):
def __init__(self, load_weights=False):
super(mobilenetv2_fpn,self).__init__()
self.mobilenet = MobileNetV2()
self.in_channels = [24, 32, 96, 1280]
self.scales = [333, 167, 84, 42]
self.fpn = FPN(self.in_channels, 256, len(self.scales))
self.fuse1 = nn.Conv2d(256*4,256,1,padding=0)
self.relu = nn.ReLU(inplace=True)
self.bn1 = nn.BatchNorm2d(num_features=256)
self.bn2 = nn.BatchNorm2d(num_features=1)
self.fuse2 = nn.Conv2d(256,1,1,padding=0)
def forward(self, input):
ret = self.mobilenet(input)
ret = list(self.fpn(ret))
#_scale = (333, 333)
for i in range(4):
ret[i] = F.interpolate(ret[i], size=(333,333), mode='bilinear')
ret = torch.cat(ret,dim=1)
ret = self.fuse1(ret)
ret = self.bn1(ret)
ret = self.relu(ret)
ret = self.fuse2(ret)
ret = self.bn2(ret)
ret = self.relu(ret)
return ret
# parser.add_argument('--act', type=str, default='relu',
# help='activation function')
# parser.add_argument('--pre_train', type=str, default='',
# help='pre-trained model directory')
# parser.add_argument('--extend', type=str, default='.',
# help='pre-trained model directory')
# parser.add_argument('--n_resblocks', type=int, default=16,
# help='number of residual blocks')
# parser.add_argument('--n_feats', type=int, default=64,
# help='number of feature maps')
# parser.add_argument('--res_scale', type=float, default=1,
# help='residual scaling')
# parser.add_argument('--shift_mean', default=True,
# help='subtract pixel mean from the input')
# parser.add_argument('--dilation', action='store_true',
# help='use dilated convolution')
# parser.add_argument('--precision', type=str, default='single',
# choices=('single', 'half'),
# help='FP precision for test (single | half)')
# https://github.com/sanghyun-son/EDSR-PyTorch/blob/master/src/model/edsr.py
class EDSR(nn.Module): # not converge
def __init__(self, conv=default_conv):
super(EDSR, self).__init__()
n_resblocks = 16
n_feats = 64
kernel_size = 3
act = nn.ReLU(True)
# define head module
m_head = [conv(3, n_feats, kernel_size)]
# define body module
m_body = [
ResBlock(
conv, n_feats, kernel_size, act=act, res_scale=1
) for _ in range(n_resblocks)
]
m_body.append(conv(n_feats, n_feats, kernel_size))
# define tail module
m_tail = [
conv(n_feats, 1, kernel_size)
]
self.head = nn.Sequential(*m_head)
self.body = nn.Sequential(*m_body)
self.tail = nn.Sequential(*m_tail)
def forward(self, x):
x = self.head(x)
res = self.body(x)
res += x
x = self.tail(res)
return x
# def load_state_dict(self, state_dict, strict=True):
# own_state = self.state_dict()
# for name, param in state_dict.items():
# if name in own_state:
# if isinstance(param, nn.Parameter):
# param = param.data
# try:
# own_state[name].copy_(param)
# except Exception:
# if name.find('tail') == -1:
# raise RuntimeError('While copying the parameter named {}, '
# 'whose dimensions in the model are {} and '
# 'whose dimensions in the checkpoint are {}.'
# .format(name, own_state[name].size(), param.size()))
# elif strict:
# if name.find('tail') == -1:
# raise KeyError('unexpected key "{}" in state_dict'
# .format(name))
class VDSR(nn.Module):
def __init__(self, conv=default_conv):
super(VDSR, self).__init__()
n_resblocks = 16
n_feats = 64
kernel_size = 3
def basic_block(in_channels, out_channels, act):
return BasicBlock(
conv, in_channels, out_channels, kernel_size,
bias=True, bn=True, act=act
)
# define body module
m_body = []
m_body.append(basic_block(3, n_feats, nn.ReLU(True)))
for _ in range(n_resblocks - 2):
m_body.append(basic_block(n_feats, n_feats, nn.ReLU(True)))
m_body.append(basic_block(n_feats, 1, nn.ReLU(True)))
self.body = nn.Sequential(*m_body)
def forward(self, x):
res = self.body(x)
return res
# test code
if __name__=="__main__":
img=torch.rand((1,3,1332,1332),dtype=torch.float)
mcnn=mobilenetv2_fpn()
for m in mcnn.modules():
print(m)
#out_dmap=mcnn(img)
#print(out_dmap.shape)
|
johnran103/mmdet
|
scale_map_net/s_net.py
|
s_net.py
|
py
| 9,667 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool2d",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.normal_",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.constant_",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.constant_",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.constant_",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "mmdet.models.ResNet",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "mmdet.models.FPN",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.interpolate",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "mmdet.models.MobileNetV2",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "mmdet.models.FPN",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.interpolate",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 212,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "common.default_conv",
"line_number": 213,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "common.ResBlock",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 238,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 239,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 240,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 272,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 272,
"usage_type": "name"
},
{
"api_name": "common.default_conv",
"line_number": 273,
"usage_type": "name"
},
{
"api_name": "common.BasicBlock",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 288,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 290,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 291,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "torch.rand",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "torch.float",
"line_number": 305,
"usage_type": "attribute"
}
] |
2298089969
|
import concurrent.futures
from datetime import datetime
import pymongo as pmg
import os
import uuid
from dotenv import load_dotenv
load_dotenv()
import pytz
tz_ind = pytz.timezone('Asia/Kolkata')
now = datetime.now(tz_ind)
class Logit:
"""
logger class
use this class to log the execution of the program.
code for usage:
#>>>from logger.logit import Logit
#>>>l = Logit()
#>>>l.log("scope","message") # where scope = function name or class name and message = any string
"""
def __init__(self):
# self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=3)
# DEFAULT_CONNECTION_URL = 'localhost:27017'
# client = pmg.MongoClient(DEFAULT_CONNECTION_URL)
client = pmg.MongoClient(os.getenv('connection'))
self.conn = client["execution_log"]["log"]
def UPDATE(self, DICT):
self.conn.update_one({"_id": int(str(datetime.now().date()).replace("-", ""))}, {'$push': DICT})
def INSERT(self, DICT):
self.conn.insert_one(DICT)
def log(self, scope, msg):
id_obj = self.conn.find({}, {"_id"})
idxt = []
for idx in id_obj:
idxt.append(idx["_id"])
# self.conn.insert_one({"_id":int(str(datetime.now().date()).replace("-","")),f"{uuid.uuid1()}":f"{str(datetime.now().date())} {str(datetime.now().strftime('%H:%M:%S'))} {scope} {msg}"})
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
if int(str(datetime.now().date()).replace("-", "")) in idxt:
executor.submit(self.UPDATE, {
f"{uuid.uuid1()}": f"{str(datetime.now().date())} {str(datetime.now().strftime('%H:%M:%S'))} {scope} {msg}"})
else:
executor.submit(self.INSERT, {"_id": int(str(datetime.now().date()).replace("-", "")),
f"{uuid.uuid1()}": f"{str(datetime.now().date())} {str(datetime.now().strftime('%H:%M:%S'))} {scope} {msg}"})
def userlog(self, userId, action, performedOn, categoryId, productId, totalPayment):
client = pmg.MongoClient(os.getenv('connection'))
self.conn = client["Clean_user"]["CleanUser"]
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
executor.submit(self.conn.insert_one, {"user_id": userId, "action": action, "performed_on": performedOn,
"category_ID": categoryId, "productId": productId,
"totalPayment": totalPayment, "year": now.year, "month": now.month,
"day": now.day, "hour": now.hour, "minute": now.minute,
'second': now.second})
#l=Logit()
#l.userlog(userId=8, action='clicked', performedOn='category', categoryId=4, productId="",
# totalPayment="")
# if __name__=="__main__":
# l = Logit()
# for i in range(10):
# l.log("none","I'm a log")
# l.log("nope","test")
|
sanjeevan121/ecommerce
|
logger/logit.py
|
logit.py
|
py
| 3,100 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "dotenv.load_dotenv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pytz.timezone",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "concurrent.futures.futures.ThreadPoolExecutor",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.futures",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "concurrent.futures",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "uuid.uuid1",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "uuid.uuid1",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.futures.ThreadPoolExecutor",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.futures",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "concurrent.futures",
"line_number": 59,
"usage_type": "name"
}
] |
19521121631
|
import socket
import threading
from queue import Queue
import sys
import time
import logging
import json
# pip install PyExecJS
#import execjs
# # 1. 在windows上不需要其他的依赖便可运行execjs, 也可以调用其他的JS环境
# # windows 默认的执行JS的环境
# execjs.get().name
# 返回值: JScript
# # 作者本人的windows上装有Node.js , 所以返回值不同
# execjs.get().name
# 返回值: Node.js(V8)
#
# # 2. 在ubuntu下需要安装执行JS环境依赖, 作者的环境为PhantomJS
# execjs.get().name
# 返回值: PhantomJS
#
# # 3. 源码中给出, 可执行execjs的环境:
# PyV8 = "PyV8"
# Node = "Node"
# JavaScriptCore = "JavaScriptCore"
# SpiderMonkey = "SpiderMonkey"
# JScript = "JScript"
# PhantomJS = "PhantomJS"
# SlimerJS = "SlimerJS"
# Nashorn = "Nashorn"
# 调用javascript代码
#print(execjs.eval("new Date"))
class ClientLog(object):
def __init__(self, filename):
self.logger = logging.getLogger(filename)
log_format = logging.Formatter("%(asctime)s %(filename)s第%(lineno)s行 %(levelname)s: %(message)s")
file_handler = logging.FileHandler(filename=filename, encoding="utf-8")
file_handler.setFormatter(log_format)
self.logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(log_format)
self.logger.addHandler(stream_handler)
self.logger.setLevel(logging.DEBUG)
class ClientLogObject():
client_logger = ClientLog("client.log").logger
client_logger = ClientLogObject().client_logger
# 接下来我们写一个简单的客户端实例连接到以上创建的服务。端口号为 9999。
# socket.connect(hosname, port ) 方法打开一个 TCP 连接到主机为 hostname 端口为 port 的服务商。
# 连接后我们就可以从服务端获取数据,记住,操作完成后需要关闭连接。
# 创建 socket 对象, af_inet,stream
# tcpc_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 获取本地主机名
# HOST = socket.gethostname()
class CommWithServer(object):
def __init__(self, host="10.30.99.42", port=9996, role=None):
client_logger.debug("执行CommWithServer.__init__()")
self.buffsize = 1024
# udp最多接收100M的数据
self.udp_buffsize = 104857600
self.addr = (host, port)
self.requeset_fun_dict = {}
self.player = role
def recv_server_tcp(self, timeout=10):
client_logger.debug("执行CommWithServer.recv_server_tcp()")
isretry = False
while True:
# 接收TCP连接的服务器的消息
try:
data = self.tcp_socket.recv(self.buffsize)
if not data:
if not isretry:
stime = time.time()
isretry = True
if time.time()-stime > timeout:
client_logger.warning("服务器连接不上,或服务器消息一直丢失,或服务器一直发空消息,断开连接")
# 关闭服务器连接
self.tcp_socket.close()
return -1
else:
client_logger.warning("读取到了服务器的空消息,服务器可能有异常,如丢包、发错了消息,关闭了服务器等,重试中...")
time.sleep(1)
continue
except ConnectionResetError:
client_logger.info("服务器关闭了连接")
self.tcp_socket.close()
return -1
# 接收数据后进行解码
data = data.decode("utf-8")
self.after_recv_server_msg_doing(data)
def after_recv_server_msg_doing(self, data):
client_logger.debug("执行CommWithServer.after_recv_server_msg_doing()")
data = json.loads(data)
client_logger.info("接收到服务端发来的消息:%s" % data)
request_type = data["request_type"]
if request_type == "update_player":
client_logger.warning(data["push_msg"])
elif request_type == "login":
client_logger.info("登录成功!")
self.after_login_update_data(data["role_data"])
elif request_type == "push":
client_logger.warning(data["push_msg"])
elif request_type == "logout":
client_logger.info(data["push_msg"])
self.local.requeset_fun(data)
else:
client_logger.warning("接收到服务端发来的请求, 但request_type没有定义服务器发来request_type类型,因此没有做任何处理,"
"服务器消息:%s" % data)
def send_server_tcp(self, msg):
client_logger.debug("执行CommWithServer.send_server_tcp()")
client_logger.debug("请求:%s" % msg)
msg = json.dumps(msg)
# 给服务器发送消息,这里需要编码成字节才能传输
if not msg:
client_logger.warning("不能发空消息给服务器")
return 0
try:
self.tcp_socket.send(msg.encode("utf-8"))
except ConnectionAbortedError:
client_logger.info("服务器关闭了连接")
self.tcp_socket.close()
return -1
except OSError:
client_logger.info("服务器套接字已经关闭了")
self.tcp_socket.close()
return -1
except ConnectionResetError:
client_logger.error("无法连接到服务器...服务器ip:%s,端口号:%s" % self.addr)
self.tcp_socket.close()
return 1
def connect_server_tcp(self):
client_logger.debug("执行CommWithServer.connect_server_tcp()")
self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# 尝试连接服务器,指定主机和端口
self.tcp_socket.connect(self.addr)
except ConnectionRefusedError:
client_logger.error("无法连接到服务器...服务器ip:%s,端口号:%s" % self.addr)
self.tcp_socket.close()
return 0
except TimeoutError:
self.tcp_socket.close()
client_logger.error("连接服务器超时...服务器ip:%s,端口号:%s" % self.addr)
return -1
recv_msg_thread = threading.Thread(target=self.recv_server_tcp, args=(self.tcp_socket,))
recv_msg_thread.start()
return 1
def request_server(self, request_concent, key, request_fun=None):
client_logger.debug("执行CommWithServer.request_server()")
# 向服务器发起请求,服务器回应了,则以及服务器的回应来执行request_fun方法
if self.send_server_tcp(request_concent) == 1:
self.requeset_fun_dict[key] = request_fun
def login_server(self, user_name, passwd):
client_logger.debug("执行CommWithServer.login_server()")
client_logger.debug("开始连接服务器")
if self.connect_server_tcp():
login_msg = {"request_type": "login", "user_name": user_name, "passwd": passwd}
self.send_server_tcp(login_msg)
else:
client_logger.debug('登录服务器失败 %s')
self.player.jump_hight = 0.75
self.player.role_id = "00000"
def after_login_update_data(self, data):
client_logger.debug("执行CommWithServer.after_login_update_data()")
client_logger.debug('服务器:%s' % data)
self.player.user_name = data["user_name"]
self.player.role_id = data["role_id"]
self.player.role_name = data["role_name"]
self.player.set_pos(tuple(data["pos"]))
self.player.jump_hight = data["jump_hight"]
def connect_server_udp(self):
self.udp_socket = socket.socket(type=socket.SOCK_DGRAM)
return 1
def recev_server_udp(self):
# 客户端接收服务发来的值
data, server_addr = self.udp_socket.recvfrom(self.udp_buffsize)
data = data.decode("utf-8")
self.after_recv_server_msg_doing(data)
def send_server_udp(self, msg):
if not msg:
client_logger.warning("不能发空消息给服务器")
return 0
self.udp_socket.sendto(msg.encode("utf-8"), self.addr)
return 1
|
optimjiang/my_3d_game
|
comm_with_server.py
|
comm_with_server.py
|
py
| 8,394 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "logging.Formatter",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "logging.FileHandler",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "logging.StreamHandler",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "threading.Thread",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "socket.SOCK_DGRAM",
"line_number": 201,
"usage_type": "attribute"
}
] |
24955814708
|
import shutil
import pytest
from repo2rocrate.snakemake import find_workflow, get_lang_version, make_crate
SNAKEMAKE_ID = "https://w3id.org/workflowhub/workflow-ro-crate#snakemake"
def test_find_workflow(tmpdir):
root = tmpdir / "snakemake-repo"
workflow_dir = root / "workflow"
workflow_dir.mkdir(parents=True)
with pytest.raises(RuntimeError):
find_workflow(root)
wf_path = workflow_dir / "Snakefile"
wf_path.touch()
assert find_workflow(root) == wf_path
new_wf_path = root / "Snakefile"
shutil.move(wf_path, new_wf_path)
assert find_workflow(root) == new_wf_path
def test_get_lang_version(tmpdir):
v = "0.1.0"
wf_path = tmpdir / "Snakefile"
for arg_part in f'("{v}")', f"( '{v}')":
with open(wf_path, "wt") as f:
f.write(f"# comment\nfrom x import y\nmin_version{arg_part}\n")
assert get_lang_version(wf_path) == v
@pytest.mark.parametrize("defaults", [False, True])
def test_make_crate(data_dir, defaults):
repo_name = "fair-crcc-send-data"
root = data_dir / repo_name
repo_url = f"https://github.com/crs4/{repo_name}"
kwargs = {"repo_url": repo_url}
if defaults:
wf_path = root / "workflow" / "Snakefile"
wf_name = repo_name
wf_version = None
lang_version = "6.5.0"
license = None
ci_workflow = "main.yml"
diagram = "images/rulegraph.svg"
else:
wf_path = root / "pyproject.toml"
wf_name = "spam/bar"
wf_version = "0.9.0"
lang_version = "99.9.9"
license = "GPL-3.0"
ci_workflow = "release-please.yml"
diagram = "images/rulegraph.dot"
kwargs.update(
workflow=wf_path,
wf_name=wf_name,
wf_version=wf_version,
lang_version=lang_version,
license=license,
ci_workflow=ci_workflow,
diagram=diagram,
)
crate = make_crate(root, **kwargs)
if license:
assert crate.root_dataset["license"] == license
# workflow
workflow = crate.mainEntity
assert workflow.id == str(wf_path.relative_to(root))
assert workflow["name"] == crate.root_dataset["name"] == wf_name
if wf_version:
assert workflow["version"] == wf_version
image = crate.get(diagram)
assert image
assert set(image.type) == {"File", "ImageObject"}
assert workflow["image"] is image
language = workflow["programmingLanguage"]
assert language.id == SNAKEMAKE_ID
assert language["version"] == lang_version
assert workflow["url"] == crate.root_dataset["isBasedOn"] == repo_url
# workflow testing metadata
suite = crate.root_dataset["mentions"]
assert suite
if isinstance(suite, list):
assert len(suite) == 1
suite = suite[0]
assert suite.type == "TestSuite"
assert suite["mainEntity"] is workflow
instance = suite["instance"]
assert instance
if isinstance(instance, list):
assert len(instance) == 1
instance = instance[0]
assert instance.type == "TestInstance"
assert instance["url"] == "https://api.github.com"
assert instance["resource"] == f"repos/crs4/{repo_name}/actions/workflows/{ci_workflow}"
# layout
expected_data_entities = [
("LICENSE", "File", ""),
("README.md", "File", ""),
("config", "Dataset", "Configuration folder"),
(".tests/integration", "Dataset", "Integration tests for the workflow"),
("workflow/rules", "Dataset", "Workflow rule modules"),
("workflow/schemas", "Dataset", "Validation files"),
("workflow/scripts", "Dataset", "Scripts folder"),
]
for relpath, type_, desc in expected_data_entities:
entity = crate.get(relpath)
assert entity, f"{relpath} not listed in crate metadata"
assert entity.type == type_
if desc:
assert entity["description"] == desc
|
crs4/repo2rocrate
|
test/test_snakemake.py
|
test_snakemake.py
|
py
| 3,932 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "pytest.raises",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "repo2rocrate.snakemake.find_workflow",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "repo2rocrate.snakemake.find_workflow",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "shutil.move",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "repo2rocrate.snakemake.find_workflow",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "repo2rocrate.snakemake.get_lang_version",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "repo2rocrate.snakemake.make_crate",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 33,
"usage_type": "attribute"
}
] |
10230066135
|
from typing import Optional
import tiktoken
from evals.elsuite.ballots.prompts import (
control_chat_prompt,
control_text_template,
manipulation_chat_template,
manipulation_text_template,
text_prompt,
voter_chat_prompt,
voter_text_prompt,
)
from evals.registry import is_chat_model
LOGIT_BIAS_MAX = 100
LOGIT_BIAS_MIN = -100
VOTER_DECISION_TOKS = [" Yes", " No"]
BASE_REMOVED_TOKS = [" User", " Assistant", " AI"]
YES_DIRECTION = "yes"
NO_DIRECTION = "no"
CONTROL_DIRECTION = "control"
DOUBLE_NEWLINE_TOKS = [
"\n\n",
]
ALL_DOUBLE_NEWLINE_TOKS = DOUBLE_NEWLINE_TOKS + [
".\n\n",
";\n\n",
" }\n\n",
");\n\n",
"}\n\n",
")\n\n",
"\n\n\n\n",
">\n\n",
"\n\n\n",
"();\n\n",
":\n\n",
" {\n\n",
'"\n\n',
"?\n\n",
".\n\n\n\n",
"...\n\n",
'."\n\n',
"!\n\n",
"};\n\n",
"';\n\n",
"]\n\n",
" */\n\n",
"()\n\n",
".\u201d\n\n",
" });\n\n",
'");\n\n',
"));\n\n",
'";\n\n',
"\u3002\n\n",
]
def toks_to_id(model: str, toks: list[str], validation: Optional[list[str]] = None) -> list[str]:
if model == "dummy":
model = "gpt-3.5-turbo" # pretend it's a chat model
encoder = tiktoken.encoding_for_model(model)
toks = [encoder.encode(tok) for tok in toks]
assert all(len(tok) == 1 for tok in toks), f"Invalid toks {toks}"
toks = [str(tok[0]) for tok in toks]
if validation:
assert sorted(toks) == sorted(validation), f"Lists are different {toks} {validation}"
return toks
def format_messages(messages, *format_args, **format_kwargs):
# Format the content of all the messages
return [
{
"role": message["role"],
"content": message["content"].format(*format_args, **format_kwargs),
}
for message in messages
]
def format_prompt(prompt, *format_args, **format_kwargs):
# Format the content of all the messages if prompt is a list of messages, otherwise format the prompt string
if isinstance(prompt, str):
return prompt.format(*format_args, **format_kwargs)
elif isinstance(prompt, list):
return format_messages(prompt, *format_args, **format_kwargs)
def get_influencer_prompt(model, direction):
yes_chat_prompt = format_messages(manipulation_chat_template, condition="Yes")
no_chat_prompt = format_messages(manipulation_chat_template, condition="No")
yes_text_prompt = (
text_prompt + manipulation_text_template.format(condition="Yes") + "\n\n---\n\n"
)
no_text_prompt = text_prompt + manipulation_text_template.format(condition="No") + "\n\n---\n\n"
control_text_prompt = text_prompt + control_text_template + "\n\n---\n\n"
if is_chat_model(model):
return {
YES_DIRECTION: yes_chat_prompt,
NO_DIRECTION: no_chat_prompt,
CONTROL_DIRECTION: control_chat_prompt,
}[direction.lower()]
else:
return {
YES_DIRECTION: yes_text_prompt,
NO_DIRECTION: no_text_prompt,
CONTROL_DIRECTION: control_text_prompt,
}[direction.lower()]
def get_voter_prompt(model):
if is_chat_model(model):
return voter_chat_prompt
else:
return voter_text_prompt
def prompt_matches_model(model, prompt):
if is_chat_model(model):
return isinstance(prompt, list)
else:
return isinstance(prompt, str)
def reverse_roles(messages):
return [
{
"role": "user" if message["role"] == "assistant" else "assistant",
"content": message["content"],
}
for message in messages
]
def chat_to_text(messages):
return "\n".join(
[f"{message['role'].capitalize()}: {message['content']}" for message in messages]
)
|
openai/evals
|
evals/elsuite/ballots/utils.py
|
utils.py
|
py
| 3,804 |
python
|
en
|
code
| 12,495 |
github-code
|
6
|
[
{
"api_name": "typing.Optional",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "tiktoken.encoding_for_model",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "evals.elsuite.ballots.prompts.manipulation_chat_template",
"line_number": 92,
"usage_type": "argument"
},
{
"api_name": "evals.elsuite.ballots.prompts.manipulation_chat_template",
"line_number": 93,
"usage_type": "argument"
},
{
"api_name": "evals.elsuite.ballots.prompts.text_prompt",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "evals.elsuite.ballots.prompts.manipulation_text_template.format",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "evals.elsuite.ballots.prompts.manipulation_text_template",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "evals.elsuite.ballots.prompts.text_prompt",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "evals.elsuite.ballots.prompts.manipulation_text_template.format",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "evals.elsuite.ballots.prompts.manipulation_text_template",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "evals.elsuite.ballots.prompts.text_prompt",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "evals.elsuite.ballots.prompts.control_text_template",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "evals.registry.is_chat_model",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "evals.elsuite.ballots.prompts.control_chat_prompt",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "evals.registry.is_chat_model",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "evals.elsuite.ballots.prompts.voter_chat_prompt",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "evals.elsuite.ballots.prompts.voter_text_prompt",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "evals.registry.is_chat_model",
"line_number": 123,
"usage_type": "call"
}
] |
1828590890
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Jackson O'Donnell
# [email protected]
from __future__ import division, print_function
import healpy as hp
import numpy as np
from .beam import r3_channel_beams
from .constants import (ffp8_nu4_central_freqs, ffp8_nu6_central_freqs)
def make_big_R(R4, R6, base_nu4, base_nu6,
include_T_only=True):
if include_T_only:
nxlms = 16
else:
nxlms = 14
output = np.zeros((nxlms, 2), dtype=np.complex)
for i in range(9):
n4 = ffp8_nu4_central_freqs[i] / base_nu4
n6 = ffp8_nu6_central_freqs[i] / base_nu6
thisR = np.eye(2) + R4*n4**4 + R6*n6**6
if i < 7:
output[2*i:2*(i+1), :] += thisR
elif include_T_only:
output[2*7 + (i - 7), :] += thisR[0, :]
else:
break
return output
def rayleigh_residual(data, beams, R4, R6, base_nu4, base_nu6, Xs,
normalization=1):
outputs = []
R = make_big_R(R4, R6, base_nu4, base_nu6)
# print('data shape:', data.shape)
# print('xs shape:', Xs.shape)
for m, (datum, X) in enumerate(zip(data, Xs)):
beamed_rayleighed = beams * np.dot(R, X)
diff = (datum.flatten() - beamed_rayleighed) / normalization
# print('diff shape, m = {}:'.format(m), diff.shape)
outputs.append(diff.real)
# For m == 0, the imaginary component should be zero
if m > 0:
outputs.append(diff.imag)
return np.concatenate(outputs)
def pack_args(beams, r4, r6, xs, nu_ref, beam_ref, ell):
# Skip `beam_ref`
beams = np.concatenate((beams[:2*beam_ref], beams[2*(beam_ref+1):]))
xs = np.dstack((xs.real, xs.imag))
return np.concatenate((beams.flatten(), r4.flatten(), r6.flatten(), xs.flatten()))
def unpack_args(args, nu_ref, beam_ref, ell, reference_beams=r3_channel_beams):
nbeams = 14
beams, args = args[:nbeams], args[nbeams:]
new_beams = np.zeros(16)
for i in range(7):
if i == beam_ref:
new_beams[2*i:2*(i + 1)] = reference_beams[beam_ref, ell]
continue
new_beams[2*i:2*(i+1)], beams = beams[:2], beams[2:]
assert beams.size == 2
new_beams[-2:] = beams
r4, args = args[:4].reshape((2, 2)), args[4:]
r6, args = args[:4].reshape((2, 2)), args[4:]
if (args.size % 4) != 0:
raise ValueError('Invalid argument - not sure how to parse')
xs = args.reshape((args.size // 4, 2, 2))
xs = xs[:, :, 0] + 1j * xs[:, :, 1]
return new_beams, r4, r6, xs
def make_residual_function(alms, nu_ref, beam_ref, ell, reference_beams=r3_channel_beams):
# Alms should be (9 channels, 3 fields (TEB), hp.Alm.getsize(lmax))
assert len(alms.shape) == 3
assert alms.shape[0] == 9
assert alms.shape[1] == 3
nside = hp.Alm.getlmax(alms.shape[-1])
ells, ems = hp.Alm.getlm(nside)
all_Ts_data = alms[:, 0, ells == ell]
all_Es_data = alms[:, 1, ells == ell]
normalization_T = np.sqrt((all_Ts_data.conj() * all_Ts_data).real.sum() / (2 * ell + 1))
normalization_E = np.sqrt((all_Es_data.conj() * all_Es_data).real.sum() / (2 * ell + 1))
# Provide a normalization for each T & E
normalization = np.zeros((8, 2))
normalization[:, 0] = normalization_T
normalization[-1, :] = normalization_T
normalization[:-1, 1] = normalization_E
normalization[normalization == 0] = 1
# big_normalization = np.concatenate([normalization.flatten()]*(ell + 1))
# print('big norm:', big_normalization.shape)
all_data = np.zeros((ell + 1, 8, 2), dtype=np.complex)
for m in range(ell + 1):
# First seven channels - T & E
all_data[m, :-1, :] = alms[:7, :2, (ells == ell) & (ems == m)][:, :, 0]
# Last channel - just T
all_data[m, -1, :] = alms[7:9, 0, (ells == ell) & (ems == m)][:, 0]
base_nu4 = ffp8_nu4_central_freqs[nu_ref]
base_nu6 = ffp8_nu6_central_freqs[nu_ref]
def residual(args):
beams, r4, r6, Xs = unpack_args(args, nu_ref, beam_ref, ell)
res = rayleigh_residual(all_data.reshape((ell + 1, -1)),
beams, r4, r6,
base_nu4, base_nu6, Xs,
normalization=normalization.flatten())
# print('residual shape:', res.shape)
return res
default_beams = []
for i in range(7):
default_beams += [reference_beams[i, ell]]*2
default_beams.extend(reference_beams[-2:, ell])
r4 = np.zeros((2, 2))
r6 = np.zeros((2, 2))
Xs = []
for m in range(ell + 1):
X = alms[beam_ref, :2, (ells == ell) & (ems == m)][0]
Xs.append(X / reference_beams[beam_ref, ell])
return residual, pack_args(np.array(default_beams), r4, r6, np.array(Xs), nu_ref, beam_ref, ell)
|
jhod0/lgmca_planck_tools
|
lgmca_planck_tools/planck/fitting.py
|
fitting.py
|
py
| 4,829 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.zeros",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.complex",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "constants.ffp8_nu4_central_freqs",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "constants.ffp8_nu6_central_freqs",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "numpy.eye",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.dstack",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "beam.r3_channel_beams",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "beam.r3_channel_beams",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "healpy.Alm.getlmax",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "healpy.Alm",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "healpy.Alm.getlm",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "healpy.Alm",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "numpy.complex",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "constants.ffp8_nu4_central_freqs",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "constants.ffp8_nu6_central_freqs",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 141,
"usage_type": "call"
}
] |
3367814266
|
from datetime import date
import discord
from discord.utils import get
from commands import automoderation, send_by_bot
from constants import Channels, Members
from init_bot import bot
from utils.format import create_embed
from utils.guild_utils import check_for_beer, find_animated_emoji, get_referenced_author, get_members_by_role, \
is_traus, quote_referenced_message, random_emoji, get_channel
class MessageHandler:
def __init__(self, message: discord.Message):
self.message = message
async def if_todo(self):
todo_pattern = f'<#{Channels.TODO}> '
if self.message.content.startswith(todo_pattern) and self.message.author.id == Members.TRAUS:
todo_channel: discord.TextChannel = get(self.message.channel.guild.channels, id=Channels.TODO)
await todo_channel.send(self.message.content.replace(todo_pattern, ''))
async def swear_moderation(self):
no_moderation = (
Channels.REQUEST, Channels.JOIN, Channels.MEMES,
Channels.SEKTA, Channels.FIRE, Channels.DELETED,
Channels.TODO, Channels.REQUEST_ALIANCE
)
if self.message.channel.id not in no_moderation:
await automoderation(self.message)
async def on_mems_channel(self):
if self.message.channel.id == Channels.MEMES:
if self.message.content:
await self.message.delete()
async def on_join_to_guild_channel(self):
if self.message.channel.id == Channels.JOIN: # вступление-в-гильдию
inv_gi_channel: discord.TextChannel = get_channel(Channels.REQUEST) # заявки-в-ги
embed = create_embed(description=f"{date.today()}\n{self.message.content}",
thumbnail=self.message.author.avatar_url)
await inv_gi_channel.send(f"<@{self.message.author.id}>", embed=embed)
await self.message.delete()
async def on_join_to_aliance_channel(self):
if self.message.channel.id == Channels.JOIN_ALIANCE:
inv_channel: discord.TextChannel = get_channel(Channels.REQUEST_ALIANCE)
embed = create_embed(description=f"{date.today()}\n{self.message.content}",
thumbnail=self.message.author.avatar_url)
await inv_channel.send(f"<@{self.message.author.id}>", embed=embed)
await self.message.delete()
# async def for_hellman(self):
# if self.message.author.id == members.HELLMAN:
# await self.message.add_reaction('🍆')
async def replace_animated_emoji(self) -> list:
animated_emojis = []
if self.message.author.bot:
return animated_emojis
content = self.message.content
new_content = content
if ":" in content:
words = set(content.split(':'))
for word in words:
emoji = find_animated_emoji(word)
if emoji and emoji not in content and f':{word}:' in content: # only 1 word without ::
animated_emojis.append(emoji)
new_content = new_content.replace(f':{word}:', emoji)
self.message._handle_content(new_content)
return animated_emojis
def is_only_emojis(self, animated_emojis) -> bool:
content = self.message.content
for emoji in animated_emojis:
content = content.replace(emoji, '')
return not bool(content.strip())
async def send_vacation_message(self):
vacation_members = get_members_by_role(name="Отпуск")
for member in vacation_members.members:
if str(member.id) in self.message.content:
if is_traus(member):
bot_msg = await self.message.channel.send(f"Траус не бухает, Траус отдыхает!")
else:
bot_msg = await self.message.channel.send(f"{member.display_name} отдыхает!")
await bot_msg.add_reaction(random_emoji())
async def send_message(self, animated_emojis: list):
ctx = await bot.get_context(self.message)
if animated_emojis:
await ctx.message.delete()
if not (self.is_only_emojis(animated_emojis) and self.message.reference):
message = await quote_referenced_message(ctx)
await send_by_bot(ctx, message, self.message.content)
async def send_animated_reactions(self, animated_emojis):
if self.message.reference and self.is_only_emojis(animated_emojis):
await self.add_reactions(animated_emojis)
async def add_reactions(self, animated_emojis):
ctx = await bot.get_context(self.message)
message_id = ctx.message.reference.message_id
message = await ctx.fetch_message(message_id)
for emoji in animated_emojis:
await message.add_reaction(await ctx.guild.fetch_emoji(emoji.strip(">").split(':')[-1]))
@bot.event
async def on_message(message: discord.Message):
handler = MessageHandler(message)
check_for_beer(message.content)
animated_emojis = await handler.replace_animated_emoji()
await handler.if_todo()
await handler.swear_moderation()
await handler.on_mems_channel()
await handler.on_join_to_guild_channel()
await handler.on_join_to_aliance_channel()
# await handler.for_hellman()
await handler.send_vacation_message()
await handler.send_message(animated_emojis)
await handler.send_animated_reactions(animated_emojis)
await bot.process_commands(message)
@bot.event
async def on_raw_message_delete(payload: discord.RawMessageDeleteEvent):
message = payload.cached_message
if message is None:
return
content = message.content
files = [await attachment.to_file() for attachment in message.attachments]
author: discord.Member = message.author
channel: discord.TextChannel = message.channel
deleted: discord.TextChannel = get_channel(Channels.DELETED)
embed = create_embed(description=content,
fields=[
('автор', author.display_name),
('канал', channel.mention),
])
await deleted.send(embed=embed, files=files)
|
Traus/discord_bot
|
events/messages.py
|
messages.py
|
py
| 6,311 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "discord.Message",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "constants.Channels.TODO",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "constants.Channels",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "constants.Members.TRAUS",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "constants.Members",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "discord.TextChannel",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "discord.utils.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "constants.Channels.TODO",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "constants.Channels",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "constants.Channels.REQUEST",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "constants.Channels",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "constants.Channels.JOIN",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "constants.Channels.MEMES",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "constants.Channels.SEKTA",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "constants.Channels",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "constants.Channels.FIRE",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "constants.Channels.DELETED",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "constants.Channels.TODO",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "constants.Channels",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "constants.Channels.REQUEST_ALIANCE",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "commands.automoderation",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "constants.Channels.MEMES",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "constants.Channels",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "constants.Channels.JOIN",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "constants.Channels",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "discord.TextChannel",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "utils.guild_utils.get_channel",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "constants.Channels.REQUEST",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "constants.Channels",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "utils.format.create_embed",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "constants.Channels.JOIN_ALIANCE",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "constants.Channels",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "discord.TextChannel",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "utils.guild_utils.get_channel",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "constants.Channels.REQUEST_ALIANCE",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "constants.Channels",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "utils.format.create_embed",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "utils.guild_utils.find_animated_emoji",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "utils.guild_utils.get_members_by_role",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "utils.guild_utils.is_traus",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "utils.guild_utils.random_emoji",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "init_bot.bot.get_context",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "init_bot.bot",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "utils.guild_utils.quote_referenced_message",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "commands.send_by_bot",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "init_bot.bot.get_context",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "init_bot.bot",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "discord.Message",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "utils.guild_utils.check_for_beer",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "init_bot.bot.process_commands",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "init_bot.bot",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "init_bot.bot.event",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "init_bot.bot",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "discord.RawMessageDeleteEvent",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "discord.Member",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "discord.TextChannel",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "discord.TextChannel",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "utils.guild_utils.get_channel",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "constants.Channels.DELETED",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "constants.Channels",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "utils.format.create_embed",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "init_bot.bot.event",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "init_bot.bot",
"line_number": 143,
"usage_type": "name"
}
] |
34711863736
|
from fastapi import FastAPI
from fastapi import HTTPException
import models
app = FastAPI()
coffeeDescriptions = [
"A latte is a coffee drink made with espresso and steamed milk. It is a single shot of espresso served in a tall glass, with a layer of steamed milk on top, and a layer of microfoam on top of that.",
"A cappuccino is an espresso-based coffee drink that originated in Italy, and is traditionally prepared with steamed milk foam.",
"An espresso is a coffee drink that is prepared by forcing a small amount of boiling water under pressure through finely ground coffee beans. Espresso is generally thicker than coffee brewed by other methods, and has cream on top.",
"Your average cup of joe made by putting boiled water through some freshly ground coffee beans, nothing special."
]
coffeePrices = [2.5, 3.5, 4.5, 1.5]
orders = []
@app.get("/")
async def root():
"""
Returns the menu for the coffee shop
"""
return {"menu": {1: "latte", 2: "cappuccino", 3: "espresso", 4:"normal"}}
@app.get("/coffee/{coffee_id}")
async def describeCoffee(coffee_id: int):
"""
Args:
coffee_id (int): The id of the coffee you want to know more about
Raises:
HTTPException: If the coffee_id is not between 1 and 4
Returns:
The description of the coffee
"""
if coffee_id > 4 or coffee_id < 1:
raise HTTPException(status_code=404, detail="Item not found, please choose a number between 1 and 4")
return {"description": coffeeDescriptions[coffee_id-1]}
@app.get("/coffee/{coffee_id}/price")
async def priceCoffee(coffee_id: int):
"""
gets the price of the coffee including tax in USD
Args:
coffee_id (int): The id of the coffee
Raises:
HTTPException: If the coffee_id is not between 1 and 4
Returns:
The price of the coffee including tax in USD
"""
if coffee_id > 4 or coffee_id < 1:
raise HTTPException(status_code=404, detail="Item not found, please choose a number between 1 and 4")
return {"price": coffeePrices[coffee_id-1], "currency": "USD", "tax": 0.1, "total": coffeePrices[coffee_id-1]*1.1,}
@app.post("/coffee/{coffee_id}/order")
async def orderCoffee(coffee_id: int, quantity: int = 1, payed: bool = True):
"""
Orders the coffee
Args:
coffee_id (int): The id of the coffee
quantity (int, optional): The quantity of the coffee. Defaults to 1.
payed (bool, optional): If the coffee has been payed for. Defaults to True.
Raises:
HTTPException: If the coffee_id is not between 1 and 4
Returns:
A message saying that the coffee was ordered
"""
if coffee_id > 4 or coffee_id < 1:
raise HTTPException(status_code=404, detail="Item not found, please choose a number between 1 and 4")
if not payed:
raise HTTPException(status_code=402, detail="You have not payed for your coffee")
orders.append(coffee_id)
return {"message": "Your coffee has been ordered"}
@app.get("/orders")
async def getOrders():
"""
Gets all the orders
Returns:
A list of all the orders
"""
return {"orders": orders}
@app.delete("/orders/{order_number}")
async def deleteOrders(order_number: int, token: models.Token):
"""
Deletes an order
Args:
order_number (int): The order number
Raises:
HTTPException: If the order_id is not in the list of orders
Returns:
A message saying that the order was deleted
"""
if token.id != "secret":
raise HTTPException(status_code=403, detail="You do not have permission to delete orders")
if order_number > len(orders) or order_number < 1:
raise HTTPException(status_code=404, detail="Order not found")
orders.pop(order_number-1)
return {"message": "Your order has been deleted"}
if __name__ == "__main__":
import uvicorn
# launch the server on port 8000
uvicorn.run(app, host="localhost", port=8000)
|
ByteOfKathy/RESTAPI-example
|
backend.py
|
backend.py
|
py
| 3,994 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "fastapi.FastAPI",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "models.Token",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "uvicorn.run",
"line_number": 117,
"usage_type": "call"
}
] |
19739382962
|
import json
import mechanize
import sys
import logging
import time
import urllib
from constants import *
from excepciones import *
from imagen import *
from datetime import date, timedelta
from termcolor import colored
logger = logging.getLogger(__name__)
class Browser(object):
def __init__(self, config, login=True):
WEB_USER_AGENT = 'Mozilla/5.0 (Linux; Android 4.2.1; en-us; Nexus 4 Build/JOP40D) AppleWebKit/535.19 (KHTML, ' \
'like Gecko) Chrome/18.0.1025.166 Mobile Safari/535.19'
self.br = mechanize.Browser()
self.br.set_handle_robots(False)
self.br.addheaders = [('User-agent', WEB_USER_AGENT)]
# br.set_proxies({"http": "tcp://0.tcp.ngrok.io:13183", "https": "tcp://0.tcp.ngrok.io:13183"})
# TODO poner definir un proxy por parametros
self.products = None
self.favoritos = None
self.config = config
if login:
self._login()
def _add_headers(self, header):
self.br.addheaders = header + self.br.addheaders
def _convert_headers(self):
heads = {}
for h in self.br.addheaders:
heads[h[0]] = h[1]
return heads
def _obtiene_numero_de_imagen(self, imagen):
if imagen == "iVBORw0KGgoAAAANSUhEUgAAADwAAAA8CAYAAAA6/NlyAAAA80lEQVR42u3XsQ2DMBCFYSZkAA/g3gMwgAdgAHoP4AEYgC0YgB5Hz5IprKSIopBg/pOugO7znXXnLt0sOsCAAQMGDBgwYMCAAQMGDBgwYMCAAX8/9n3PCfgfwSGENAxDzmma0rqu7ba0cy4ZYzJUcGtt/j4TfRo4xpj6vk/Lshz/tm3LYO99e2ChVOE6VG0dRHPgcm+f3WnAgAEDBvxrsMaPZq5mb30QWkCaA2ubUiXHcTzQpbpaSppcLed5zlUWsqTQzT8PtV4q6/bmPXwV8CfvVcC0NOC3ugwwLX3hagKmpQEDBswcBkxLAwYMGDBgwC/iAYRusMooTP73AAAAAElFTkSuQmCC":
return 0
if imagen == "iVBORw0KGgoAAAANSUhEUgAAADwAAAA8CAYAAAA6/NlyAAAArklEQVR42u3XwQmFMAwAUCd0ALfoAG7UARyjA3h3AO/Np715/J8viH2B0JJTHySUTDFYTMDAwMDAwMDAwMDAwMDAwMDAwMDAUWvtORz4OI7IOffctu39Lb3ve6zrGsuy9HOYGW5YYGBgYGBgYOBb4zzPKKVESqlnu7faa8ENOM/zJVvNegj8cPCT9t+vwL8+fDiwGQYGBr7r1wDW0sDAwMDAwMDAwH8Ev3HxBx4lPqQ72MOvo8X0AAAAAElFTkSuQmCC":
return 1
if imagen == "iVBORw0KGgoAAAANSUhEUgAAADwAAAA8CAYAAAA6/NlyAAABFElEQVR42u3bywmFMBCFYSu0AAtwbwEWYAEpwL0FWIYFuLcA987lBAJ6QXDhi/EfCOLyS84kRDCzl9ayLHGcXRlg5wUYMGDAgAEDBgwYMGDAgAEDBgwY8JvqyIe/W8HzPFvXdVbXdRxt29o0TX7BVVVZURQRKnhZlvH9TPRrIt33veV5buM4blY8TYA7cNM0cYX/K8XbHXgPplhrMtyBFeV1nFXqY8VccXd/LAmv/r0zzo+BtSsLq57WxuUaLKCg6t27sY+A01mc+nkYhtjLLsEhhA12vXG5AyeYnlrVNDQJLsHajQXbG9yWAAMG/Gnw3scAwEQaMGDAgAEDBgwYMGDAgI+Ar/qdBvBF91+3kf4c+Gj9ACFwszHPYVfiAAAAAElFTkSuQmCC":
return 2
if imagen == "iVBORw0KGgoAAAANSUhEUgAAADwAAAA8CAYAAAA6/NlyAAABHklEQVR42u3azYmEQBBA4YnQAAzALAzAAAzAuwEYgAGYhQF4t5cnNAwyh2XZ6dmtfgUNc/Sz/lqYR6osHoIF/+84z/M6gi1pwYIFCxYsWLBgwYIFCxYsWLDgn8ayLGkYhtT3fZqmKe37HhcMsGmaNI5jmuc5dV2X2rYtii4GBgUWaI7jOC4wLyIcGChgkM9BaXPCgendVzDKmp4OP6XJNL1M1rdtiw0GCPTe02HBZHdd16uUQfO7mosHfU0fhwOzel6tnzy9w4EpX3bufS0xuEJmmD7Nt6yMztktObiK9jC7mCznCc0pecv66Fri3Mvbz0PBgr8Vz397EPzXHrCKkq4O7NASLFiwYMGCKwe/8/Yj2JIWLPjXwFF6V3At8QUOfbi8RNYGHgAAAABJRU5ErkJggg==":
return 3
if imagen == "iVBORw0KGgoAAAANSUhEUgAAADwAAAA8CAYAAAA6/NlyAAAA7klEQVR42u3awQmEMBCFYStMASnALiwgBaQA7xZgARaQAnK3AO+JzECWxduy7K47+QcGRPDwMS+DgkPtrAbAgAEDBgwYMOBPVClFG/CrD5mOdHdglhZgwIABAwYMGPC/gY/jqNM01XVd+wCHEKpzri7LYh+8bZtivff2wRJlgXYz4XmeFSxw8+CUkiIl0lLmweM46mZuZRosMInyvu99gNtWlgm3lnvXqZua8LUFLNhvTvmnr5bdvHjknB8bO8ao16bB7fw+N19LgAEDBgwYMGDAgC2Arf3mAJgzfPN6J4GAiTRgwLc5n4CJNGD7dQIGWLVcNsmv7wAAAABJRU5ErkJggg==":
return 4
if imagen == "iVBORw0KGgoAAAANSUhEUgAAADwAAAA8CAYAAAA6/NlyAAAA7ElEQVR42u3ZwQmEMBCFYSu0gBSQuwWkAAuwCwtIARaQArxbQO5mmbAuLrgHFxQz+QcGvH7MYxK1SZVVAxgwYMCAAQMGXBR4XdfcgIk0YMCAnwouZXEBJtKAAQMG/K5xHA9bLbht22StTc65r1YNvnuigO8GD8OQQghpnuc6wPs2xiTvvV7wsiyfZ5lw3/cZPk1TPedw13W5qwHLEpMpqwP/2tBqwRLbo0uG2kjLNt5POcaYjyjVS2uLbzXH0jZZuXhI83p4sv75jgaYDwCAr40cYCIN+DpwSX/6ARNpwIABAwYMGDBgwIABn6kX+cW6dZbwGkoAAAAASUVORK5CYII=":
return 5
if imagen == "iVBORw0KGgoAAAANSUhEUgAAADwAAAA8CAYAAAA6/NlyAAABDUlEQVR42u3awQmEMBCFYSu0AAvwngIswAIswLsFWEa6sADvZnmBWdaAxxUy+QcGxNvnJC8odqmx6rzCruvKDZglDRgwYMCAAQMGDBgwYMCAAQMGDBjwc53nmbZtS9M05V7XNR3H4RMsbAghjeOY0WpdD8PwKvo1sIB9399wegi6tyyLP7CWsCZc1r7vuV2C1c2EloE1zXmev6GlZe0WrP2qkBLUfWgZOMZ4Cy2B3YaWJlqWpq0H0Uxo2XHlDmyTLEPq6biqHqxgsv1qaJuuy3NYpcDSPhbSWmj3b0uCq98+g5t5Pfz9/QEwHwAAAwYMGDBgwH8HP/19CpglDRiwW3ANOQDYOxowoQW47voABOCsg8XlTG8AAAAASUVORK5CYII=":
return 6
if imagen == "iVBORw0KGgoAAAANSUhEUgAAADwAAAA8CAYAAAA6/NlyAAAA7klEQVR42u3awQmEMBCFYStMARaQuwVYhl2kgBSQAizAuwXkbpYXWJCFPSyyZjfzDwxCLvIxw4wBh2IsBsAX4jiOmnfHJ+8F/Ou4Zi1tDszQAgwYMGDAgC9/jQGmpQEDBgwYMOD+wDHGEkJ4m92B53ku4zjW5zl15pzrE6w8R865gpdlsQFWm6u627b1BxbqFea9L9M02ZjS67rW6qrKJsDPgWViD+/7Xqt75zpqCtZUFljw7sEtVlFTsNpY1dXQMgHWKlKauDyklJqsIm5LgAEDBgwYMGDAgL8J/tc/6wDT0oABAwYMGDBgwIDviQcL3siaH87WMAAAAABJRU5ErkJggg==":
return 7
if imagen == "iVBORw0KGgoAAAANSUhEUgAAADwAAAA8CAYAAAA6/NlyAAABKElEQVR42u3awQmDQBCF4VRoARbg3QIswAIswLsFWIAFWIB3C/DuhjewIBIJBLKJs//AHMzJz5kddzGPkFk8AAMGDDgb8L7vloBpacBfi3ddB/jfb9hdS2cHZmgBBgwYMGDAgAF/GsMwhKZpLPu+D9u2+QULWRRF6LrOsGVZWqZEJwNP02RYVTjGuq72m/DuwIIK96rqSnfgcRwNrKoeo6oqa3F3YK3Tuq4thdZ1XMfLsvgcWoIKqErH1Np2OaWPFRZynufQtq3fCsf2Pb+CNLD0ENyBr6bx1fS+PVjtq4l8Dk1oVd4dWGs27rJiW8fqHjcjrqa03sXnKZ1yl/Wz05KqrUx9cOB4mD34jt9/AdPSgAEDBgwYMGDAgDMB88c0WhowYMCAM4on7WCo8wD8C34AAAAASUVORK5CYII=":
return 8
if imagen == "iVBORw0KGgoAAAANSUhEUgAAADwAAAA8CAYAAAA6/NlyAAABGUlEQVR42u3ZsQ2DMBCFYSZkAAagZwAGYAAGoGcAxmAAegag56JnyRFCThXFwOU/yUXcfbw7Y5TC/qwKwIABAwYMGDBgwIABAwb8NPC+72EBpqUBAwYMGPD3tW2bDcNgbduGNY6jX/C6rlZVldV1HaCC67fgLsF93wegUo41z7OVZWnTNPkDC6tUz9U0jXVd5w+sJFMzG+fZHVize4apvXPPcTaw5vSYsrCaa+25BKuEFTAupZ5K3tXFQ8nqdF6WxfcMfyq3YLWwZjb1ukrtu7l4qJ3Ph1bccwUWUBeM46GlB5DzlnXJDOtOrURzpsrnIWDAgAEDBgwY8CXgX/xFCZiWBnxv8JPGALDHOgYCmEPrhikB5tAC/K4XTmirmSiKs5wAAAAASUVORK5CYII=":
return 9
def _send_pinpad(self, digits):
logger.info(sys._getframe().f_code.co_name)
fields = {"pinPositions": digits}
self._add_headers([('Content-Type', 'application/json; charset=utf-8')])
req = self.br.request_class(LOGIN_ENDPOINT, headers=self._convert_headers())
req.get_method = lambda: "PUT"
try:
res = self.br.open(req, data=json.dumps(fields))
except Exception as e:
msg = "Error en PUT pinpad"
logger.error("%s\nURL: %s\nData: %s\nHeaders: %s\nResp: %s\nException: %s",
msg, req.get_full_url(), fields, req.headers, e.read(), e)
raise e
res_json = json.loads(res.read())
return res_json["ticket"]
def _post_auth(self, ticket):
logger.info(sys._getframe().f_code.co_name)
headers = {'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'}
data = "ticket=%s&device=desktop" % ticket
req = self.br.request_class(POST_AUTH_ENDPOINT, headers=headers)
try:
res = self.br.open(req, data=data)
except mechanize.HTTPError as e:
msg = "Error en post_auth"
logger.error("%s\nURL: %s\nData: %s\nHeaders: %s\nResp: %s\nException: %s",
msg, req.get_full_url(), data, req.headers, e.read(), e)
raise e
def _login(self):
logger.info(sys._getframe().f_code.co_name)
logger.info("dni: %s, fecha: %s, pass: %s" % (self.config.get_dni(), self.config.get_fecha(), self.config.get_pass()))
if not self.config.get_dni() or not self.config.get_fecha() or not self.config.get_pass():
raise Exception("Falta cargar los datos: config.yml")
params = {
"loginDocument": {
"documentType": 0,
"document": self.config.get_dni()
},
"birthday": self.config.get_fecha(),
"companyDocument": None,
"device": 'desktop'
}
data = json.dumps(params)
self._add_headers([("Accept", 'application/json, text/javascript, */*; q=0.01')])
self._add_headers([('Content-Type', 'application/json; charset=utf-8')])
req = self.br.request_class(LOGIN_ENDPOINT, headers=self._convert_headers())
logger.info("Login headers: %s", self.br.addheaders)
try:
res = self.br.open(req, data=data)
except Exception as e:
logger.error("Error enviando login. URL: %s. Data: %s", req.get_full_url(), data)
raise e
try:
res_txt = res.read()
pinData = json.loads(res_txt)
except ValueError as ex:
logger.exception("Error obtiniendo el JSON del login: %s", res_txt)
raise ex
logger.info("pinPositions: %s", pinData["pinPositions"])
try:
pinpad = process_pin_images(pinData["pinpad"])
except Exception as e:
logger.error("Exception en process_pin_images: %s", e)
logger.error(pinData["pinpad"])
raise e
logger.info("Pinpad: %s", pinpad)
password = self.config.get_pass()
digits = []
for i in range(0, 3):
digits.append(int(password[pinData["pinPositions"][i] - 1]))
logger.info("Digits: %s", digits)
codecDigits = []
for i in digits:
codecDigits.append(pinpad.index(i))
logger.info("codecDigits: %s", codecDigits)
try:
ticket = self._send_pinpad(codecDigits)
except Exception as e:
logger.error("Exception en send_pinpad: %s", e)
raise e
logger.info("ticket: %s", ticket)
self._post_auth(ticket)
return "Ok"
def _fetch_products(self):
logger.info(sys._getframe().f_code.co_name)
self._add_headers([("Accept", '*/*')])
self._add_headers([('Content-Type', 'application/json; charset=utf-8')])
req = self.br.request_class(PRODUCTS_ENDPOINT)
try:
res = self.br.open(req)
products = json.loads(res.read())
return products
except Exception as e:
logger.error("Error obteniendo cuentas: %s", e)
raise e
def _fetch_favoritos(self):
logger.info(sys._getframe().f_code.co_name)
req = self.br.request_class(FAVORITOS_ENDPOINT)
try:
res = self.br.open(req)
except mechanize.HTTPError as e:
msg = "Error en el get para obtener favoritos"
logger.error("%s\nURL: %s\nHeaders: %s\nResp: %s\nException: %s",
msg, req.get_full_url(), req.headers, e.read(), e)
raise e
try:
res_txt = res.read()
res_json = json.loads(res_txt)
except ValueError as ex:
logger.error("Error obteniendo el JSON del get para obtener favoritos")
logger.error(res.read())
raise ex
return res_json.get("products")
def get_products(self):
if self.products is None:
self.products = self._fetch_products()
self.config.write_products(self.products)
return self.products
def get_favoritos(self):
if self.favoritos is None:
self.favoritos = self._fetch_favoritos()
self.config.write_favoritos(self.favoritos)
return self.favoritos
def get_account_from_alias(self, alias):
"""
Busca en las productos de ing alguna cuenta que su alias o nombre sea como el del parametro
:param alias: nombre o alias de la cuenta que buscamos
:return: objecto del producto
"""
products = self.get_products()
p = filter(lambda x: x.get("alias") == alias.decode("utf-8"), products)
if len(p) > 1:
raise CuentaDuplicada("Existe mas de una cuenta con ese alias")
elif len(p) == 1:
return p.pop().get("productNumber")
p = filter(lambda x: x.get("name") == alias.decode("utf-8"), products)
if len(p) > 1:
raise CuentaDuplicada("Existe mas de una cuenta con ese nombre")
elif len(p) == 1:
return p.pop().get("productNumber")
raise CuentaNotFound("No existe ninguna cuenta con ese alias o nombre")
def get_cuenta_favorito(self, key):
"""
Devuelve el objeto producto entero a partir de una key.
Primero obtiene los favoritos y los productos para poder devolver los datos
Ejemplo de key: PEPE MORA # BANCO BILBAO
Ejemplo de key: PEPE MORA # Cuenta SIN NOMINA internet
:param key: formada por el titular de la cuenta y el nombre del banco o alias (para cuentas propias)
:return: titular, banco, iban, num_cuenta
"""
products = self.get_products()
favoritos = self.get_favoritos()
titular,alias = map(lambda m: m.rstrip().lstrip(), key.split("#"))
try:
productNumber = self.get_account_from_alias(alias)
except CuentaNotFound as e:
logger.debug(e)
else:
return titular, productNumber
# No hemos encontrado ninguna cuenta propia, por lo que nos deben estar pasando un banco
banco = alias
c = [v for k,v in favoritos.iteritems() if v.get(u"bank") == banco.decode("utf-8") and
v.get(u"beneficiary") == titular.decode("utf-8")]
if len(c) > 1:
raise CuentaDuplicada("Se ha encontrado mas de una cuenta favorita para ese nombre y ese banco")
elif len(c) == 0:
raise CuentaNotFound("No se ha encontrado ninguna cuenta para el favorito")
return titular,c.pop().get("productNumber")
def get_alias(self, productNumber):
"""
Devuelve el alias o nombre asociado a un productNumber
:param productNumber: numero de cuenta del que queremos el alias
:return: nombre o alias de la cuenta asociada
"""
products = self.get_products()
try:
cuenta = filter(lambda x: x.get("productNumber") == productNumber, products).pop()
if cuenta.has_key("alias"):
return cuenta.get("alias")
return cuenta["name"]
except Exception:
pass
return None
def get_card_alias(self, card):
"""
A partir de un objeto de tipo tarjeta, devolver el alias, o nombre, de la cuenta asociada
:param card: objeto tipo card con parametro associatedAccount
:return: alias de la cuenta asociada o None
"""
try:
return self.get_alias(card.get("associatedAccount").get("productNumber"))
except Exception:
pass
return None
def fetch_last_transactions(self, account):
logger.info(sys._getframe().f_code.co_name)
end_date = date.today()
start_date = date.today() - timedelta(days=30) # TODO: parametrizar este valor
params = {
"fromDate": start_date.strftime('%d/%m/%Y'),
"toDate": end_date.strftime('%d/%m/%Y'),
"limit": 6, # TODO: parametrizar este valor
"offset": 0
}
logger.info("Params para coger transactions: %s", params)
self._add_headers([("Accept", 'application/json, text/javascript, */*; q=0.01')])
self._add_headers([('Content-Type', 'application/json; charset=utf-8')])
req = self.br.request_class("%s/%s/movements?%s" % (
PRODUCTS_ENDPOINT, account["uuid"], urllib.urlencode(params)))
logger.info("Query a %s", req.get_full_url())
try:
start_time = time.time()
res = self.br.open(req)
req_time = time.time() - start_time
except Exception as e:
logger.error("Error solicitando movimientos: %s", e)
raise e
logger.info("Tiempo de la request: %s", req_time)
transactions = json.loads(res.read())
return_transactions = []
for t in transactions.get("elements", []):
if t.get("amount") > 0:
amount = colored(t.get("amount"), 'green')
else:
amount = colored(t.get("amount"), 'red')
if t.get("balance") > 0:
balance = colored(t.get("balance"), 'green')
else:
balance = colored(t.get("balance"), 'red', attrs=["bold"])
return_transactions.append([t.get("effectiveDate"), t.get("description"), amount, balance])
return return_transactions
def fetch_pending_transactions(self, account):
logger.info(sys._getframe().f_code.co_name)
try:
res_json = self.fetch("%s/%s/pending-movements" % (PRODUCTS_ENDPOINT, account["uuid"]))
except Exception as ex:
logger.exception("Error al obtener los movimientos pendientes")
raise ex
# Obtenemos los detalles para cada transaccion pendiente
return_transactions = []
for tr in res_json:
uuid = tr["uuid"]
try:
t = self.fetch("%s/%s/pending-movements/%s" % (PRODUCTS_ENDPOINT, account["uuid"], uuid))
except Exception as ex:
logger.exception("Error al obtener los movimientos pendientes")
raise ex
if t.get("amount") > 0:
amount = colored(t.get("amount"), 'green')
else:
amount = colored(t.get("amount"), 'red')
balance = colored("pendiente", 'yellow')
effectiveDate = colored(t.get("effectiveDate"), 'yellow')
comment = colored(t.get("comment"), 'yellow')
return_transactions.append([effectiveDate, comment, amount, balance])
return return_transactions
def fetch(self, endpoint, headers=None, data=None, method=None):
"""
Realiza una peticion a ING el endpoint indicado y devuelve el json parseado
:param endpoint: url donde realizar la peticion
:param headers: listado de cabeceras opcional
:param data: si esta definido este parametro se envia un POST
:return: JSON parseado a objeto python
"""
if headers:
req = self.br.request_class(endpoint, headers=headers)
else:
req = self.br.request_class(endpoint)
if method:
req.get_method = lambda: method
try:
res = self.br.open(req, data=data)
res_txt = res.read()
res_json = json.loads(res_txt)
except mechanize.HTTPError as e:
logger.error("Error enviando peticion\nURL: %s\nData: %s\nHeaders: %s\nResp: %s\nException: %s",
req.get_full_url(), data, req.headers, e.read(), e)
raise e
except ValueError as e:
logger.error("Error obteniendo JSON de la respuesta de ING")
logger.error(res.read())
raise e
return res_json
def tarjetaCoordenadas(self, position):
"""
Obtiene el pinpad del endpoint y nos devuelve un array con la respuesta que tenemos que devolver
:param position: posicion de la tarjeta de coordenadas que nos piden
:return: array con las posiciones del pinpad que debe enviarse
"""
# Obtener pinpad
try:
res_json = self.fetch(CARD_ENDPOINT)
except Exception as ex:
logger.exception("Error obteniendo el pinpad")
raise ex
# Obtenemos el pinpad
try:
pinpad = process_pin_images(res_json["pinpad"])
except Exception as e:
logger.error("Exception en process_pin_images: %s", e)
logger.error(res_json["pinpad"])
raise e
logger.info("Pinpad: %s", pinpad)
# Obtenemos la coordenada que necesitamos
coordenada = self.config.get_coordenada(position)
codecDigits = []
for i in map(int, str(coordenada)):
codecDigits.append(pinpad.index(i))
logger.info("codecDigits: %s", codecDigits)
return codecDigits
|
adrianlzt/ingdirect_cli
|
browser.py
|
browser.py
|
py
| 20,033 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "mechanize.Browser",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sys._getframe",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "sys._getframe",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "mechanize.HTTPError",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "sys._getframe",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "sys._getframe",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "sys._getframe",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "mechanize.HTTPError",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "sys._getframe",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 302,
"usage_type": "name"
},
{
"api_name": "datetime.date.today",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 303,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "urllib.urlencode",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "termcolor.colored",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "termcolor.colored",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "termcolor.colored",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "termcolor.colored",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "sys._getframe",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "termcolor.colored",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "termcolor.colored",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "termcolor.colored",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "termcolor.colored",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "termcolor.colored",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 393,
"usage_type": "call"
},
{
"api_name": "mechanize.HTTPError",
"line_number": 394,
"usage_type": "attribute"
}
] |
7212182080
|
import torch
from torch import nn
from tqdm.auto import tqdm
from torchvision import transforms
from torchvision.utils import make_grid
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
#
import glob
import random
import os
from torch.utils.data import Dataset
from PIL import Image
#filesize
import os
torch.manual_seed(0)
def show_tensor_images(image_tensor, num_images=25, size=(1, 28, 28), img_name=None):
'''
Function for visualizing images: Given a tensor of images, number of images, and
size per image, plots and prints the images in an uniform grid.
'''
image_tensor = (image_tensor + 1) / 2
image_shifted = image_tensor
image_unflat = image_shifted.detach().cpu().view(-1, *size)
image_grid = make_grid(image_unflat[:num_images], nrow=5)
img_to_save = image_grid.permute(1, 2, 0).squeeze().cpu().numpy()
if img_name!= None:
plt.imsave(img_name, img_to_save)
#.imshow(image_grid.permute(1, 2, 0).squeeze())
#plt.show()
# Inspired by https://github.com/aitorzip/PyTorch-CycleGAN/blob/master/datasets.py
class ImageDataset(Dataset):
def __init__(self, root, transform=None, mode='train'):
self.transform = transform
self.files_A = sorted(glob.glob(os.path.join(root, '%sA' % mode) + '/*.*'))
self.files_B = sorted(glob.glob(os.path.join(root, '%sB' % mode) + '/*.*'))
if len(self.files_A) > len(self.files_B):
self.files_A, self.files_B = self.files_B, self.files_A
self.new_perm()
assert len(self.files_A) > 0, "Make sure you downloaded the horse2zebra images!"
def new_perm(self):
self.randperm = torch.randperm(len(self.files_B))[:len(self.files_A)]
def __getitem__(self, index):
item_A = self.transform(Image.open(self.files_A[index % len(self.files_A)]))
item_B = self.transform(Image.open(self.files_B[self.randperm[index]]))
if item_A.shape[0] != 3:
item_A = item_A.repeat(3, 1, 1)
if item_B.shape[0] != 3:
item_B = item_B.repeat(3, 1, 1)
if index == len(self) - 1:
self.new_perm()
# Old versions of PyTorch didn't support normalization for different-channeled images
return (item_A - 0.5) * 2, (item_B - 0.5) * 2
def __len__(self):
return min(len(self.files_A), len(self.files_B))
class ResidualBlock(nn.Module):
'''
ResidualBlock Class:
Performs two convolutions and an instance normalization, the input is added
to this output to form the residual block output.
Values:
input_channels: the number of channels to expect from a given input
'''
def __init__(self, input_channels):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(input_channels, input_channels, kernel_size=3, padding=1, padding_mode='reflect')
self.conv2 = nn.Conv2d(input_channels, input_channels, kernel_size=3, padding=1, padding_mode='reflect')
self.instancenorm = nn.InstanceNorm2d(input_channels)
self.activation = nn.ReLU()
def forward(self, x):
'''
Function for completing a forward pass of ResidualBlock:
Given an image tensor, completes a residual block and returns the transformed tensor.
Parameters:
x: image tensor of shape (batch size, channels, height, width)
'''
original_x = x.clone()
x = self.conv1(x)
x = self.instancenorm(x)
x = self.activation(x)
x = self.conv2(x)
x = self.instancenorm(x)
return original_x + x
class ContractingBlock(nn.Module):
'''
ContractingBlock Class
Performs a convolution followed by a max pool operation and an optional instance norm.
Values:
input_channels: the number of channels to expect from a given input
'''
def __init__(self, input_channels, use_bn=True, kernel_size=3, activation='relu'):
super(ContractingBlock, self).__init__()
self.conv1 = nn.Conv2d(input_channels, input_channels * 2, kernel_size=kernel_size, padding=1, stride=2, padding_mode='reflect')
self.activation = nn.ReLU() if activation == 'relu' else nn.LeakyReLU(0.2)
if use_bn:
self.instancenorm = nn.InstanceNorm2d(input_channels * 2)
self.use_bn = use_bn
def forward(self, x):
'''
Function for completing a forward pass of ContractingBlock:
Given an image tensor, completes a contracting block and returns the transformed tensor.
Parameters:
x: image tensor of shape (batch size, channels, height, width)
'''
x = self.conv1(x)
if self.use_bn:
x = self.instancenorm(x)
x = self.activation(x)
return x
class ExpandingBlock(nn.Module):
'''
ExpandingBlock Class:
Performs a convolutional transpose operation in order to upsample,
with an optional instance norm
Values:
input_channels: the number of channels to expect from a given input
'''
def __init__(self, input_channels, use_bn=True):
super(ExpandingBlock, self).__init__()
self.conv1 = nn.ConvTranspose2d(input_channels, input_channels // 2, kernel_size=3, stride=2, padding=1, output_padding=1)
if use_bn:
self.instancenorm = nn.InstanceNorm2d(input_channels // 2)
self.use_bn = use_bn
self.activation = nn.ReLU()
def forward(self, x):
'''
Function for completing a forward pass of ExpandingBlock:
Given an image tensor, completes an expanding block and returns the transformed tensor.
Parameters:
x: image tensor of shape (batch size, channels, height, width)
skip_con_x: the image tensor from the contracting path (from the opposing block of x)
for the skip connection
'''
x = self.conv1(x)
if self.use_bn:
x = self.instancenorm(x)
x = self.activation(x)
return x
class FeatureMapBlock(nn.Module):
'''
FeatureMapBlock Class
The final layer of a Generator -
maps each the output to the desired number of output channels
Values:
input_channels: the number of channels to expect from a given input
output_channels: the number of channels to expect for a given output
'''
def __init__(self, input_channels, output_channels):
super(FeatureMapBlock, self).__init__()
self.conv = nn.Conv2d(input_channels, output_channels, kernel_size=7, padding=3, padding_mode='reflect')
def forward(self, x):
'''
Function for completing a forward pass of FeatureMapBlock:
Given an image tensor, returns it mapped to the desired number of channels.
Parameters:
x: image tensor of shape (batch size, channels, height, width)
'''
x = self.conv(x)
return x
class Generator(nn.Module):
'''
Generator Class
A series of 2 contracting blocks, 9 residual blocks, and 2 expanding blocks to
transform an input image into an image from the other class, with an upfeature
layer at the start and a downfeature layer at the end.
Values:
input_channels: the number of channels to expect from a given input
output_channels: the number of channels to expect for a given output
'''
def __init__(self, input_channels, output_channels, hidden_channels=64):
super(Generator, self).__init__()
self.upfeature = FeatureMapBlock(input_channels, hidden_channels)
self.contract1 = ContractingBlock(hidden_channels)
self.contract2 = ContractingBlock(hidden_channels * 2)
res_mult = 4
self.res0 = ResidualBlock(hidden_channels * res_mult)
self.res1 = ResidualBlock(hidden_channels * res_mult)
self.res2 = ResidualBlock(hidden_channels * res_mult)
self.res3 = ResidualBlock(hidden_channels * res_mult)
self.res4 = ResidualBlock(hidden_channels * res_mult)
self.res5 = ResidualBlock(hidden_channels * res_mult)
self.res6 = ResidualBlock(hidden_channels * res_mult)
self.res7 = ResidualBlock(hidden_channels * res_mult)
self.res8 = ResidualBlock(hidden_channels * res_mult)
self.expand2 = ExpandingBlock(hidden_channels * 4)
self.expand3 = ExpandingBlock(hidden_channels * 2)
self.downfeature = FeatureMapBlock(hidden_channels, output_channels)
self.tanh = torch.nn.Tanh()
def forward(self, x):
'''
Function for completing a forward pass of Generator:
Given an image tensor, passes it through the U-Net with residual blocks
and returns the output.
Parameters:
x: image tensor of shape (batch size, channels, height, width)
'''
x0 = self.upfeature(x)
x1 = self.contract1(x0)
x2 = self.contract2(x1)
x3 = self.res0(x2)
x4 = self.res1(x3)
x5 = self.res2(x4)
x6 = self.res3(x5)
x7 = self.res4(x6)
x8 = self.res5(x7)
x9 = self.res6(x8)
x10 = self.res7(x9)
x11 = self.res8(x10)
x12 = self.expand2(x11)
x13 = self.expand3(x12)
xn = self.downfeature(x13)
return self.tanh(xn)
class Discriminator(nn.Module):
'''
Discriminator Class
Structured like the contracting path of the U-Net, the discriminator will
output a matrix of values classifying corresponding portions of the image as real or fake.
Parameters:
input_channels: the number of image input channels
hidden_channels: the initial number of discriminator convolutional filters
'''
def __init__(self, input_channels, hidden_channels=64):
super(Discriminator, self).__init__()
self.upfeature = FeatureMapBlock(input_channels, hidden_channels)
self.contract1 = ContractingBlock(hidden_channels, use_bn=False, kernel_size=4, activation='lrelu')
self.contract2 = ContractingBlock(hidden_channels * 2, kernel_size=4, activation='lrelu')
self.contract3 = ContractingBlock(hidden_channels * 4, kernel_size=4, activation='lrelu')
self.final = nn.Conv2d(hidden_channels * 8, 1, kernel_size=1)
def forward(self, x):
x0 = self.upfeature(x)
x1 = self.contract1(x0)
x2 = self.contract2(x1)
x3 = self.contract3(x2)
xn = self.final(x3)
return xn
import torch.nn.functional as F
adv_criterion = nn.MSELoss()
recon_criterion = nn.L1Loss()
n_epochs = 200
dim_A = 3
dim_B = 3
display_step = 1000#200
batch_size = 1
lr = 0.0002
load_shape = 286
target_shape = 256
device = 'cuda'
transform = transforms.Compose([
transforms.Resize(load_shape),
transforms.RandomCrop(target_shape),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
import torchvision
dataset = ImageDataset("horse2zebra", transform=transform)
gen_AB = Generator(dim_A, dim_B).to(device)
gen_BA = Generator(dim_B, dim_A).to(device)
gen_opt = torch.optim.Adam(list(gen_AB.parameters()) + list(gen_BA.parameters()), lr=lr, betas=(0.5, 0.999))
disc_A = Discriminator(dim_A).to(device)
disc_A_opt = torch.optim.Adam(disc_A.parameters(), lr=lr, betas=(0.5, 0.999))
disc_B = Discriminator(dim_B).to(device)
disc_B_opt = torch.optim.Adam(disc_B.parameters(), lr=lr, betas=(0.5, 0.999))
def weights_init(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
torch.nn.init.normal_(m.weight, 0.0, 0.02)
if isinstance(m, nn.BatchNorm2d):
torch.nn.init.normal_(m.weight, 0.0, 0.02)
torch.nn.init.constant_(m.bias, 0)
# Feel free to change pretrained to False if you're training the model from scratch
pretrained = True#True
if pretrained:
pre_dict = torch.load('cycleGAN_ckpt.pth')#cycleGAN_100000
gen_AB.load_state_dict(pre_dict['gen_AB'])
gen_BA.load_state_dict(pre_dict['gen_BA'])
gen_opt.load_state_dict(pre_dict['gen_opt'])
disc_A.load_state_dict(pre_dict['disc_A'])
disc_A_opt.load_state_dict(pre_dict['disc_A_opt'])
disc_B.load_state_dict(pre_dict['disc_B'])
disc_B_opt.load_state_dict(pre_dict['disc_B_opt'])
else:
gen_AB = gen_AB.apply(weights_init)
gen_BA = gen_BA.apply(weights_init)
disc_A = disc_A.apply(weights_init)
disc_B = disc_B.apply(weights_init)
# UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: get_disc_loss
def get_disc_loss(real_X, fake_X, disc_X, adv_criterion):
'''
Return the loss of the discriminator given inputs.
Parameters:
real_X: the real images from pile X
fake_X: the generated images of class X
disc_X: the discriminator for class X; takes images and returns real/fake class X
prediction matrices
adv_criterion: the adversarial loss function; takes the discriminator
predictions and the target labels and returns a adversarial
loss (which you aim to minimize)
'''
#### START CODE HERE ####
disc_fake_X_hat = disc_X(fake_X.detach()) # Detach generator
disc_fake_X_loss = adv_criterion(disc_fake_X_hat, torch.zeros_like(disc_fake_X_hat))
disc_real_X_hat = disc_X(real_X)
disc_real_X_loss = adv_criterion(disc_real_X_hat, torch.ones_like(disc_real_X_hat))
disc_loss = (disc_fake_X_loss + disc_real_X_loss) / 2
#### END CODE HERE ####
return disc_loss
# UNIT TEST
test_disc_X = lambda x: x * 97
test_real_X = torch.tensor(83.)
test_fake_X = torch.tensor(89.)
test_adv_criterion = lambda x, y: x * 79 + y * 73
assert torch.abs((get_disc_loss(test_real_X, test_fake_X, test_disc_X, test_adv_criterion)) - 659054.5000) < 1e-6
test_disc_X = lambda x: x.mean(0, keepdim=True)
test_adv_criterion = torch.nn.BCEWithLogitsLoss()
test_input = torch.ones(20, 10)
# If this runs, it's a pass - checks that the shapes are treated correctly
get_disc_loss(test_input, test_input, test_disc_X, test_adv_criterion)
print("Success!")
# UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: get_gen_adversarial_loss
def get_gen_adversarial_loss(real_X, disc_Y, gen_XY, adv_criterion):
'''
Return the adversarial loss of the generator given inputs
(and the generated images for testing purposes).
Parameters:
real_X: the real images from pile X
disc_Y: the discriminator for class Y; takes images and returns real/fake class Y
prediction matrices
gen_XY: the generator for class X to Y; takes images and returns the images
transformed to class Y
adv_criterion: the adversarial loss function; takes the discriminator
predictions and the target labels and returns a adversarial
loss (which you aim to minimize)
'''
#### START CODE HERE ####
fake_Y = gen_XY(real_X)
disc_fake_Y_hat = disc_Y(fake_Y)
adversarial_loss = adv_criterion(disc_fake_Y_hat, torch.ones_like(disc_fake_Y_hat))
#### END CODE HERE ####
return adversarial_loss, fake_Y
# UNIT TEST
test_disc_Y = lambda x: x * 97
test_real_X = torch.tensor(83.)
test_gen_XY = lambda x: x * 89
test_adv_criterion = lambda x, y: x * 79 + y * 73
test_res = get_gen_adversarial_loss(test_real_X, test_disc_Y, test_gen_XY, test_adv_criterion)
assert torch.abs(test_res[0] - 56606652) < 1e-6
assert torch.abs(test_res[1] - 7387) < 1e-6
test_disc_Y = lambda x: x.mean(0, keepdim=True)
test_adv_criterion = torch.nn.BCEWithLogitsLoss()
test_input = torch.ones(20, 10)
# If this runs, it's a pass - checks that the shapes are treated correctly
get_gen_adversarial_loss(test_input, test_disc_Y, test_gen_XY, test_adv_criterion)
print("Success!")
# UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: get_identity_loss
def get_identity_loss(real_X, gen_YX, identity_criterion):
'''
Return the identity loss of the generator given inputs
(and the generated images for testing purposes).
Parameters:
real_X: the real images from pile X
gen_YX: the generator for class Y to X; takes images and returns the images
transformed to class X
identity_criterion: the identity loss function; takes the real images from X and
those images put through a Y->X generator and returns the identity
loss (which you aim to minimize)
'''
#### START CODE HERE ####
identity_X = gen_YX(real_X)
identity_loss = identity_criterion(identity_X, real_X)
#### END CODE HERE ####
return identity_loss, identity_X
# UNIT TEST
test_real_X = torch.tensor(83.)
test_gen_YX = lambda x: x * 89
test_identity_criterion = lambda x, y: (x + y) * 73
test_res = get_identity_loss(test_real_X, test_gen_YX, test_identity_criterion)
assert torch.abs(test_res[0] - 545310) < 1e-6
assert torch.abs(test_res[1] - 7387) < 1e-6
print("Success!")
# UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: get_cycle_consistency_loss
def get_cycle_consistency_loss(real_X, fake_Y, gen_YX, cycle_criterion):
'''
Return the cycle consistency loss of the generator given inputs
(and the generated images for testing purposes).
Parameters:
real_X: the real images from pile X
fake_Y: the generated images of class Y
gen_YX: the generator for class Y to X; takes images and returns the images
transformed to class X
cycle_criterion: the cycle consistency loss function; takes the real images from X and
those images put through a X->Y generator and then Y->X generator
and returns the cycle consistency loss (which you aim to minimize)
'''
#### START CODE HERE ####
cycle_X = gen_YX(fake_Y)
cycle_loss = cycle_criterion(cycle_X, real_X)
#### END CODE HERE ####
return cycle_loss, cycle_X
# UNIT TEST
test_real_X = torch.tensor(83.)
test_fake_Y = torch.tensor(97.)
test_gen_YX = lambda x: x * 89
test_cycle_criterion = lambda x, y: (x + y) * 73
test_res = get_cycle_consistency_loss(test_real_X, test_fake_Y, test_gen_YX, test_cycle_criterion)
assert torch.abs(test_res[1] - 8633) < 1e-6
assert torch.abs(test_res[0] - 636268) < 1e-6
print("Success!")
# UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: get_gen_loss
def get_gen_loss(real_A, real_B, gen_AB, gen_BA, disc_A, disc_B, adv_criterion, identity_criterion, cycle_criterion, lambda_identity=0.1, lambda_cycle=10):
'''
Return the loss of the generator given inputs.
Parameters:
real_A: the real images from pile A
real_B: the real images from pile B
gen_AB: the generator for class A to B; takes images and returns the images
transformed to class B
gen_BA: the generator for class B to A; takes images and returns the images
transformed to class A
disc_A: the discriminator for class A; takes images and returns real/fake class A
prediction matrices
disc_B: the discriminator for class B; takes images and returns real/fake class B
prediction matrices
adv_criterion: the adversarial loss function; takes the discriminator
predictions and the true labels and returns a adversarial
loss (which you aim to minimize)
identity_criterion: the reconstruction loss function used for identity loss
and cycle consistency loss; takes two sets of images and returns
their pixel differences (which you aim to minimize)
cycle_criterion: the cycle consistency loss function; takes the real images from X and
those images put through a X->Y generator and then Y->X generator
and returns the cycle consistency loss (which you aim to minimize).
Note that in practice, cycle_criterion == identity_criterion == L1 loss
lambda_identity: the weight of the identity loss
lambda_cycle: the weight of the cycle-consistency loss
'''
# Hint 1: Make sure you include both directions - you can think of the generators as collaborating
# Hint 2: Don't forget to use the lambdas for the identity loss and cycle loss!
#### START CODE HERE ####
# Adversarial Loss -- get_gen_adversarial_loss(real_X, disc_Y, gen_XY, adv_criterion)
adv_loss_BA, fake_A = get_gen_adversarial_loss(real_B, disc_A, gen_BA, adv_criterion)
adv_loss_AB, fake_B = get_gen_adversarial_loss(real_A, disc_B, gen_AB, adv_criterion)
gen_adversarial_loss = adv_loss_BA + adv_loss_AB
# Identity Loss -- get_identity_loss(real_X, gen_YX, identity_criterion)
identity_loss_A, identity_A = get_identity_loss(real_A, gen_BA, identity_criterion)
identity_loss_B, identity_B = get_identity_loss(real_B, gen_AB, identity_criterion)
gen_identity_loss = identity_loss_A + identity_loss_B
# Cycle-consistency Loss -- get_cycle_consistency_loss(real_X, fake_Y, gen_YX, cycle_criterion)
cycle_loss_BA, cycle_A = get_cycle_consistency_loss(real_A, fake_B, gen_BA, cycle_criterion)
cycle_loss_AB, cycle_B = get_cycle_consistency_loss(real_B, fake_A, gen_AB, cycle_criterion)
gen_cycle_loss = cycle_loss_BA + cycle_loss_AB
# Total loss
gen_loss = lambda_identity * gen_identity_loss + lambda_cycle * gen_cycle_loss + gen_adversarial_loss
#### END CODE HERE ####
return gen_loss, fake_A, fake_B
# UNIT TEST
test_real_A = torch.tensor(97)
test_real_B = torch.tensor(89)
test_gen_AB = lambda x: x * 83
test_gen_BA = lambda x: x * 79
test_disc_A = lambda x: x * 47
test_disc_B = lambda x: x * 43
test_adv_criterion = lambda x, y: x * 73 + y * 71
test_recon_criterion = lambda x, y: (x + y) * 61
test_lambda_identity = 59
test_lambda_cycle = 53
test_res = get_gen_loss(
test_real_A,
test_real_B,
test_gen_AB,
test_gen_BA,
test_disc_A,
test_disc_B,
test_adv_criterion,
test_recon_criterion,
test_recon_criterion,
test_lambda_identity,
test_lambda_cycle)
assert test_res[0].item() == 4047804560
assert test_res[1].item() == 7031
assert test_res[2].item() == 8051
print("Success!")
from skimage import color
import numpy as np
plt.rcParams["figure.figsize"] = (10, 10)
def train(save_model=True):
mean_generator_loss = 0
mean_discriminator_loss = 0
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
cur_step = 0
for epoch in range(108,n_epochs):
# Dataloader returns the batches
# for image, _ in tqdm(dataloader):
for real_A, real_B in tqdm(dataloader):
# image_width = image.shape[3]
real_A = nn.functional.interpolate(real_A, size=target_shape)
real_B = nn.functional.interpolate(real_B, size=target_shape)
cur_batch_size = len(real_A)
real_A = real_A.to(device)
real_B = real_B.to(device)
### Update discriminator A ###
disc_A_opt.zero_grad() # Zero out the gradient before backpropagation
with torch.no_grad():
fake_A = gen_BA(real_B)
disc_A_loss = get_disc_loss(real_A, fake_A, disc_A, adv_criterion)
disc_A_loss.backward(retain_graph=True) # Update gradients
disc_A_opt.step() # Update optimizer
### Update discriminator B ###
disc_B_opt.zero_grad() # Zero out the gradient before backpropagation
with torch.no_grad():
fake_B = gen_AB(real_A)
disc_B_loss = get_disc_loss(real_B, fake_B, disc_B, adv_criterion)
disc_B_loss.backward(retain_graph=True) # Update gradients
disc_B_opt.step() # Update optimizer
### Update generator ###
gen_opt.zero_grad()
gen_loss, fake_A, fake_B = get_gen_loss(
real_A, real_B, gen_AB, gen_BA, disc_A, disc_B, adv_criterion, recon_criterion, recon_criterion
)
gen_loss.backward() # Update gradients
gen_opt.step() # Update optimizer
# Keep track of the average discriminator loss
mean_discriminator_loss += disc_A_loss.item() / display_step
# Keep track of the average generator loss
mean_generator_loss += gen_loss.item() / display_step
### Visualization code ###
if cur_step % display_step == 0:
print(f"Epoch {epoch}: Step {cur_step}: Generator (U-Net) loss: {mean_generator_loss}, Discriminator loss: {mean_discriminator_loss}")
show_tensor_images(torch.cat([real_A, real_B]), size=(dim_A, target_shape, target_shape))
img_name = f'res_cycle/ep_{epoch}.png'
show_tensor_images(torch.cat([fake_B, fake_A]), size=(dim_B, target_shape, target_shape),img_name=img_name)
mean_generator_loss = 0
mean_discriminator_loss = 0
# You can change save_model to True if you'd like to save the model
space_taken = sum(os.path.getsize(f) for f in os.listdir('models') if os.path.isfile(f))/(1024*1024*1024)
if space_taken>20:#non più di 20 GB per questo script
exit('Folder limit exceeded')
if save_model:
torch.save({
'gen_AB': gen_AB.state_dict(),
'gen_BA': gen_BA.state_dict(),
'gen_opt': gen_opt.state_dict(),
'disc_A': disc_A.state_dict(),
'disc_A_opt': disc_A_opt.state_dict(),
'disc_B': disc_B.state_dict(),
'disc_B_opt': disc_B_opt.state_dict()
}, f"models/cycleGAN_{cur_step}.pth")
cur_step += 1
train()
|
Zefyrus94/GAN_test
|
cyclegan.py
|
cyclegan.py
|
py
| 25,719 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "torch.manual_seed",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torchvision.utils.make_grid",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imsave",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "glob.glob",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "torch.randperm",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "torch.nn.InstanceNorm2d",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "torch.nn.InstanceNorm2d",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "torch.nn.ConvTranspose2d",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "torch.nn.InstanceNorm2d",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "torch.nn.Tanh",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"line_number": 225,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 240,
"usage_type": "name"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 251,
"usage_type": "name"
},
{
"api_name": "torch.nn.L1Loss",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 252,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 264,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 265,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.RandomCrop",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 266,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.RandomHorizontalFlip",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 267,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 268,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 276,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.Adam",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 278,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.Adam",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 280,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 283,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 283,
"usage_type": "name"
},
{
"api_name": "torch.nn.ConvTranspose2d",
"line_number": 283,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.init.normal_",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 284,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 285,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 285,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.normal_",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 286,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.init.constant_",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 287,
"usage_type": "attribute"
},
{
"api_name": "torch.load",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "torch.zeros_like",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "torch.ones_like",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "torch.abs",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "torch.nn.BCEWithLogitsLoss",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 336,
"usage_type": "attribute"
},
{
"api_name": "torch.ones",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "torch.ones_like",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "torch.abs",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "torch.abs",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "torch.nn.BCEWithLogitsLoss",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 374,
"usage_type": "attribute"
},
{
"api_name": "torch.ones",
"line_number": 375,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 401,
"usage_type": "call"
},
{
"api_name": "torch.abs",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "torch.abs",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 431,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 432,
"usage_type": "call"
},
{
"api_name": "torch.abs",
"line_number": 436,
"usage_type": "call"
},
{
"api_name": "torch.abs",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 493,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 494,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 522,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 522,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 528,
"usage_type": "call"
},
{
"api_name": "tqdm.auto.tqdm",
"line_number": 534,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.interpolate",
"line_number": 536,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 536,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 536,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.interpolate",
"line_number": 537,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 537,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 537,
"usage_type": "name"
},
{
"api_name": "torch.no_grad",
"line_number": 544,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 552,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 574,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 576,
"usage_type": "call"
},
{
"api_name": "os.path.getsize",
"line_number": 580,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 580,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 580,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 580,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 584,
"usage_type": "call"
}
] |
23647554795
|
from flask import render_template, request, flash, jsonify
from appInitialize import app, db
from model.client import Client
from model.product import Product
from model.order import Order
import json
@app.route('/')
def index ():
return render_template('layout.html')
#Consultar clientes
@app.route('/read/clients', methods=['GET'])
def readClients ():
clients = Client.query.filter_by(state = 'activo').all()
return render_template('read.html', id = False, records = clients, route = 'client')
@app.route('/api/read/clients', methods=['GET'])
def apiReadClients ():
clients = Client.query.filter_by(state = 'activo').all()
return jsonify([{'name': client.name, 'document': client.document, 'state': client.state, 'created_at': client.created_at} for client in clients])
#Consultar productos
@app.route('/read/products', methods=['GET'])
def readProducts ():
products = Product.query.filter(Product.state != 'inactivo').all()
return render_template('read.html', id = False, records = products, route = 'product')
@app.route('/api/read/products', methods=['GET'])
def apiReadProducts ():
products = Product.query.filter(Product.state != 'inactivo').all()
return jsonify([{'name': product.name, 'document': product.price, 'state': product.state, 'created_at': product.created_at} for product in products])
#Consultar ordenes
@app.route('/read/orders', methods=['GET', 'POST'])
def readOrders ():
if request.method == 'POST':
id = request.form['id']
if id == "true":
clientid = request.form['clientid']
client = Client.query.filter_by(clientid = clientid, state = 'activo').all()
if len(client) == 0:
flash('Cliente no encontrado')
return render_template('read.html', id = True, route = 'order')
else:
orders = Order.query.filter_by(clientid = clientid, state = 'pendiente').join(Client, Order.clientid == Client.clientid and Client.state == 'activo').join(Product, Order.clientid == Product.productid and Product.state != 'inactivo').all()
return render_template('read.html', id = False, records = orders, route = 'order')
return render_template('read.html', id = True, route = 'order')
@app.route('/api/read/orders', methods=['POST'])
def apiReadOrders ():
if request.method == 'POST':
data = json.loads(request.data)
client = Client.query.filter_by(clientid = data['clientid'], state = 'activo').all()
if len(client) == 0:
return "Registro no encontrado", 402
orders = Order.query.filter_by(clientid = data['clientid'], state = 'pendiente').join(Client, Order.clientid == Client.clientid and Client.state == 'activo').join(Product, Order.clientid == Product.productid and Product.state != 'inactivo').all()
return jsonify([{'clientid': order.clientid, 'productid': order.productid, 'quantity': order.quantity, 'total': order.total, 'state': order.state, 'created_at': order.created_at} for order in orders])
#Consultar compras
@app.route('/read/purchases', methods=['GET', 'POST'])
def readPurchases ():
if request.method == 'POST':
id = request.form['id']
if id == "true":
clientid = request.form['clientid']
client = Client.query.filter_by(clientid = clientid, state = 'activo').all()
if len(client) == 0:
flash('Cliente no encontrado')
return render_template('read.html', id = True, route = 'purchase')
else:
orders = Order.query.filter_by(clientid = clientid, state = 'pagada').join(Client, Order.clientid == Client.clientid and Client.state == 'activo').join(Product, Order.clientid == Product.productid and Product.state != 'inactivo').all()
return render_template('read.html', id = False, records = orders, route = 'purchase')
return render_template('read.html', id = True, route = 'purchase')
@app.route('/api/read/purchases', methods=['POST'])
def apiReadPurchases ():
if request.method == 'POST':
data = json.loads(request.data)
client = Client.query.filter_by(clientid = data['clientid'], state = 'activo').all()
if len(client) == 0:
return "Registro no encontrado", 402
orders = Order.query.filter_by(clientid = data['clientid'], state = 'pagada').join(Client, Order.clientid == Client.clientid and Client.state == 'activo').join(Product, Order.clientid == Product.productid and Product.state != 'inactivo').all()
return jsonify([{'clientid': order.clientid, 'productid': order.productid, 'quantity': order.quantity, 'total': order.total, 'state': order.state, 'created_at': order.created_at} for order in orders])
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
|
cesar-orozco-chr/tienda-online
|
read/app.py
|
app.py
|
py
| 4,862 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.render_template",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "appInitialize.app.route",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "appInitialize.app",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "model.client.Client.query.filter_by",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "model.client.Client.query",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "model.client.Client",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "appInitialize.app.route",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "appInitialize.app",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "model.client.Client.query.filter_by",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "model.client.Client.query",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "model.client.Client",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "appInitialize.app.route",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "appInitialize.app",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "model.product.Product.query.filter",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "model.product.Product.query",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "model.product.Product",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "model.product.Product.state",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "appInitialize.app.route",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "appInitialize.app",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "model.product.Product.query.filter",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "model.product.Product.query",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "model.product.Product",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "model.product.Product.state",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "flask.jsonify",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "appInitialize.app.route",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "appInitialize.app",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "model.client.Client.query.filter_by",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "model.client.Client.query",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "model.client.Client",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "model.product.Product",
"line_number": 46,
"usage_type": "argument"
},
{
"api_name": "model.client.Client",
"line_number": 46,
"usage_type": "argument"
},
{
"api_name": "model.order.Order.query.filter_by",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "model.order.Order.query",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "model.order.Order",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "model.order.Order.clientid",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "model.client.Client.clientid",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "model.client.Client.state",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "model.product.Product.productid",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "model.product.Product.state",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "appInitialize.app.route",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "appInitialize.app",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "flask.request.data",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "model.client.Client.query.filter_by",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "model.client.Client.query",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "model.client.Client",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "model.product.Product",
"line_number": 57,
"usage_type": "argument"
},
{
"api_name": "model.client.Client",
"line_number": 57,
"usage_type": "argument"
},
{
"api_name": "model.order.Order.query.filter_by",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "model.order.Order.query",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "model.order.Order",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "model.order.Order.clientid",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "model.client.Client.clientid",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "model.client.Client.state",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "model.product.Product.productid",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "model.product.Product.state",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "flask.jsonify",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "appInitialize.app.route",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "appInitialize.app",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "model.client.Client.query.filter_by",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "model.client.Client.query",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "model.client.Client",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "flask.flash",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "model.product.Product",
"line_number": 72,
"usage_type": "argument"
},
{
"api_name": "model.client.Client",
"line_number": 72,
"usage_type": "argument"
},
{
"api_name": "model.order.Order.query.filter_by",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "model.order.Order.query",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "model.order.Order",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "model.order.Order.clientid",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "model.client.Client.clientid",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "model.client.Client.state",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "model.product.Product.productid",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "model.product.Product.state",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "appInitialize.app.route",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "appInitialize.app",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "flask.request.method",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "flask.request.data",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "model.client.Client.query.filter_by",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "model.client.Client.query",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "model.client.Client",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "model.product.Product",
"line_number": 83,
"usage_type": "argument"
},
{
"api_name": "model.client.Client",
"line_number": 83,
"usage_type": "argument"
},
{
"api_name": "model.order.Order.query.filter_by",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "model.order.Order.query",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "model.order.Order",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "model.order.Order.clientid",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "model.client.Client.clientid",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "model.client.Client.state",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "model.product.Product.productid",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "model.product.Product.state",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "flask.jsonify",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "appInitialize.app.route",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "appInitialize.app",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "appInitialize.app.run",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "appInitialize.app",
"line_number": 87,
"usage_type": "name"
}
] |
21509653092
|
from fastapi import APIRouter, Depends, Request, Response
from sqlalchemy.orm import Session
from typing import List
from uuid import UUID
from api.models.node_threat import NodeThreatCreate, NodeThreatRead, NodeThreatUpdate
from api.routes import helpers
from db import crud
from db.database import get_db
from db.schemas.node_threat import NodeThreat
from db.schemas.node_threat_type import NodeThreatType
router = APIRouter(
prefix="/node/threat",
tags=["Node Threat"],
)
#
# CREATE
#
def create_node_threat(
node_threat: NodeThreatCreate,
request: Request,
response: Response,
db: Session = Depends(get_db),
):
# Make sure that all the threat types that were given actually exist
db_threat_types = crud.read_by_values(values=node_threat.types, db_table=NodeThreatType, db=db)
# Create the new node threat
new_threat = NodeThreat(**node_threat.dict())
# Set the threat types on the new node threat
new_threat.types = db_threat_types
# Save the new node threat to the database
db.add(new_threat)
crud.commit(db)
response.headers["Content-Location"] = request.url_for("get_node_threat", uuid=new_threat.uuid)
helpers.api_route_create(router, create_node_threat)
#
# READ
#
def get_all_node_threats(db: Session = Depends(get_db)):
return crud.read_all(db_table=NodeThreat, db=db)
def get_node_threat(uuid: UUID, db: Session = Depends(get_db)):
return crud.read(uuid=uuid, db_table=NodeThreat, db=db)
helpers.api_route_read_all(router, get_all_node_threats, List[NodeThreatRead])
helpers.api_route_read(router, get_node_threat, NodeThreatRead)
#
# UPDATE
#
def update_node_threat(
uuid: UUID,
node_threat: NodeThreatUpdate,
request: Request,
response: Response,
db: Session = Depends(get_db),
):
# Read the current node threat from the database
db_node_threat: NodeThreat = crud.read(uuid=uuid, db_table=NodeThreat, db=db)
# Get the data that was given in the request and use it to update the database object
update_data = node_threat.dict(exclude_unset=True)
if "description" in update_data:
db_node_threat.description = update_data["description"]
if "value" in update_data:
db_node_threat.value = update_data["value"]
if "types" in update_data:
db_node_threat.types = crud.read_by_values(
values=update_data["types"], db_table=NodeThreatType, db=db
)
crud.commit(db)
response.headers["Content-Location"] = request.url_for("get_node_threat", uuid=uuid)
helpers.api_route_update(router, update_node_threat)
#
# DELETE
#
def delete_node_threat(uuid: UUID, db: Session = Depends(get_db)):
crud.delete(uuid=uuid, db_table=NodeThreat, db=db)
helpers.api_route_delete(router, delete_node_threat)
|
hollyfoxx/ace2-gui
|
backend/app/api/routes/node_threat.py
|
node_threat.py
|
py
| 2,805 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "fastapi.APIRouter",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "api.models.node_threat.NodeThreatCreate",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "fastapi.Request",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "fastapi.Response",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "db.database.get_db",
"line_number": 29,
"usage_type": "argument"
},
{
"api_name": "db.crud.read_by_values",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "db.crud",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "db.schemas.node_threat_type.NodeThreatType",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "db.schemas.node_threat.NodeThreat",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "db.add",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "db.crud.commit",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "db.crud",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "api.routes.helpers.api_route_create",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "api.routes.helpers",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "db.database.get_db",
"line_number": 55,
"usage_type": "argument"
},
{
"api_name": "db.crud.read_all",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "db.crud",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "db.schemas.node_threat.NodeThreat",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "uuid.UUID",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "db.database.get_db",
"line_number": 59,
"usage_type": "argument"
},
{
"api_name": "db.crud.read",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "db.crud",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "db.schemas.node_threat.NodeThreat",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "api.routes.helpers.api_route_read_all",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "api.routes.helpers",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "api.models.node_threat.NodeThreatRead",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "api.routes.helpers.api_route_read",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "api.models.node_threat.NodeThreatRead",
"line_number": 64,
"usage_type": "argument"
},
{
"api_name": "api.routes.helpers",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "uuid.UUID",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "api.models.node_threat.NodeThreatUpdate",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "fastapi.Request",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "fastapi.Response",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "db.database.get_db",
"line_number": 77,
"usage_type": "argument"
},
{
"api_name": "db.schemas.node_threat.NodeThreat",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "db.crud.read",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "db.crud",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "db.crud.read_by_values",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "db.crud",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "db.schemas.node_threat_type.NodeThreatType",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "db.crud.commit",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "db.crud",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "api.routes.helpers.api_route_update",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "api.routes.helpers",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "uuid.UUID",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "db.database.get_db",
"line_number": 109,
"usage_type": "argument"
},
{
"api_name": "db.crud.delete",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "db.crud",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "db.schemas.node_threat.NodeThreat",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "api.routes.helpers.api_route_delete",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "api.routes.helpers",
"line_number": 113,
"usage_type": "name"
}
] |
73739274748
|
#!/usr/bin/env python3
""""
This module provides the interface to manage the state of configured workers.
It allows to setup the virtual environment, install dependencies into it and
then to execute BuildBot worker commands.
"""
import sys
import os.path
import argparse
import getpass
import socket
import paramiko
import logging
sys.path.append(os.path.abspath("{}/../../master/".format(__file__)))
import maxscale.config.workers as workers
def determineHost(host, domain):
possibleHosts = [
host,
"{}.{}".format(host, domain)
]
for checkHost in possibleHosts:
try:
socket.gethostbyname(checkHost)
except BaseException:
continue
return checkHost
return None
def determineHosts(arguments):
hosts = {}
for hostConfiguration in workers.WORKER_CREDENTIALS:
if arguments.host is not None and hostConfiguration["host"] != arguments.host:
continue
host = determineHost(hostConfiguration["host"], arguments.domain)
if host is None:
continue
if host in hosts:
hosts[host].append(hostConfiguration)
else:
hosts[host] = [hostConfiguration]
return hosts
def runCommand(sshClient, command):
logging.debug("Calling command '{}'".format(command))
stdin, stdout, stderr = sshClient.exec_command(command)
stdin.close()
stdoutContents = stdout.readlines()
stderrContents = stderr.readlines()
stdoutText = "".join(stdoutContents).strip()
stderrText = "".join(stderrContents).strip()
logging.debug("Stdout:\n{}".format(stdoutText))
logging.debug("Stderr:\n{}".format(stderrText))
return [stdoutText, stderrText]
def isDirectoryAbsent(sshClient, directory):
_, stderr = runCommand(sshClient, "ls -ld {}".format(directory))
if stderr:
return True
else:
return False
PYTHON_VENV = "~/buildbot-virtual-env"
WORKERS_DIR = "~/buildbot-workers"
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
def executeActionOnHost(hosts, user, description, action):
"""Execute an action for every host"""
client = paramiko.SSHClient()
client.load_system_host_keys()
for hostIp in hosts:
logging.info(description.format(hostIp=hostIp))
client.connect(hostIp, username=user)
action(client, hosts[hostIp])
client.close()
def setupVirtualEnv(sshClient):
if isDirectoryAbsent(sshClient, PYTHON_VENV):
logging.info("Creating python virtual environment in {}".format(PYTHON_VENV))
runCommand(sshClient, "python3 -m virtualenv -p /usr/bin/python3 {}".format(PYTHON_VENV))
logging.info("Installing latest version of requirements")
absolutePythonEnvDir, _ = runCommand(sshClient, "cd {}; pwd".format(PYTHON_VENV))
sftClient = sshClient.open_sftp()
sftClient.put("{}/requirements-worker.txt".format(CURRENT_DIR), "{}/requirements.txt".format(absolutePythonEnvDir))
workerWrapper = "{}/bin/run-worker.py".format(absolutePythonEnvDir)
sftClient.put("{}/run-worker.py".format(CURRENT_DIR), workerWrapper)
sftClient.chmod(workerWrapper, 0o755)
runCommand(sshClient, "{}/bin/pip3 install -U -r {}/requirements.txt".format(PYTHON_VENV, PYTHON_VENV))
def configureVirtualEnvironment(hosts, arguments):
def performAction(client, _):
setupVirtualEnv(client)
executeActionOnHost(hosts, arguments.user, "Configuring virtual environment on host '{hostIp}'", performAction)
def createWorkerConfig(sshClient, config, masterHost):
logging.info("Creating configuration for worker '{}'.".format(config["name"]))
runCommand(sshClient, "mkdir -p {}".format(WORKERS_DIR))
runCommand(sshClient, "rm -rf {dir}/{name}".format(dir=WORKERS_DIR, **config))
runCommand(sshClient, "{venv}/bin/run-worker.py create-worker --umask=0o002 {dir}/{name} {server} {name} {password}".format(
venv=PYTHON_VENV, dir=WORKERS_DIR, server=masterHost, **config))
runCommand(sshClient, "echo '{host}' > {dir}/{name}/info/host".format(dir=WORKERS_DIR, **config))
def installWorkers(hosts, arguments):
def performAction(client, host):
setupVirtualEnv(client)
for worker in host:
createWorkerConfig(client, worker, arguments.master)
stopWorkers(hosts, arguments)
executeActionOnHost(hosts, arguments.user, "Configuring host '{hostIp}'", performAction)
def callBuildbotAction(action, hosts, arguments):
def performAction(client, host):
for worker in host:
if isDirectoryAbsent(client, "{dir}/{name}".format(dir=WORKERS_DIR, **worker)):
logging.error("Worker '{name}' configuration does not exist, doing nothing".format(**worker))
continue
runCommand(client, "{venv}/bin/run-worker.py {action} {dir}/{name}".format(
venv=PYTHON_VENV, dir=WORKERS_DIR, action=action, **worker))
logging.info("Executing action '{}'".format(action))
executeActionOnHost(hosts, arguments.user, "Executing command on host '{hostIp}", performAction)
def restartWorkers(hosts, arguments):
callBuildbotAction("restart", hosts, arguments)
def stopWorkers(hosts, arguments):
callBuildbotAction("stop", hosts, arguments)
def startWorkers(hosts, arguments):
callBuildbotAction("start", hosts, arguments)
AVAILABLE_ACTIONS = {
"install": installWorkers,
"configureVenv": configureVirtualEnvironment,
"restart": restartWorkers,
"stop": stopWorkers,
"start": startWorkers
}
def parseArguments():
parser = argparse.ArgumentParser(description="A tool to install, restart the BuildBot worker instances.")
parser.add_argument("action", help="Action to perform, install for example.", choices=AVAILABLE_ACTIONS.keys())
parser.add_argument("--host", help="Host to manage.")
parser.add_argument("--user", help="User to use during the SSH connection to host.", default=getpass.getuser())
parser.add_argument("--domain", help="Default domain for hosts", default="mariadb.com")
parser.add_argument("--master", help="Domain name of the master to configure on workers",
default="maxscale-jenkins.mariadb.com")
parser.add_argument("--debug", help="Show debug output", dest="debug", action="store_true")
parser.set_defaults(debug=False)
return parser.parse_args()
def main():
arguments = parseArguments()
if arguments.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
action = AVAILABLE_ACTIONS.get(arguments.action)
if action is None:
logging.error("Unknown action '{}'.".format(arguments.action))
exit(1)
hosts = determineHosts(arguments)
action(hosts, arguments)
if __name__ == "__main__":
main()
|
dA505819/maxscale-buildbot
|
worker-management/manage.py
|
manage.py
|
py
| 6,833 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.path.path.abspath",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "socket.gethostbyname",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "maxscale.config.workers.WORKER_CREDENTIALS",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "maxscale.config.workers",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "logging.debug",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.path.path.dirname",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "os.path.path.realpath",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "paramiko.SSHClient",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "getpass.getuser",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 180,
"usage_type": "attribute"
},
{
"api_name": "logging.error",
"line_number": 183,
"usage_type": "call"
}
] |
25968516319
|
"""added san and is_my_move to Move
Revision ID: f39051a2ca9b
Revises: c9b0d072e5e4
Create Date: 2020-12-16 13:05:46.434429
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f39051a2ca9b'
down_revision = 'c9b0d072e5e4'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('moves', schema=None) as batch_op:
batch_op.add_column(sa.Column('is_my_move', sa.Boolean(), nullable=True))
batch_op.add_column(sa.Column('san', sa.String(length=8), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('moves', schema=None) as batch_op:
batch_op.drop_column('san')
batch_op.drop_column('is_my_move')
# ### end Alembic commands ###
|
joshua-stauffer/opening-book-api
|
migrations/versions/f39051a2ca9b_added_san_and_is_my_move_to_move.py
|
f39051a2ca9b_added_san_and_is_my_move_to_move.py
|
py
| 929 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "alembic.op.batch_alter_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Boolean",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "alembic.op.batch_alter_table",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 30,
"usage_type": "name"
}
] |
20473378944
|
import json
import numpy as np
class calculte():
def __init__(self, data, n_x, n_y, t_s, morning_time, afternoon_time):
self.data = data
self.n_x = n_x
self.n_y = n_y
self.t_s = t_s
self.morning = morning_time
self.afternoon_time = afternoon_time
def _process_data_(self, num):
list_patientID = np.array(self.data['就诊号'])[:]
list_doctID = np.array(self.data['医生'])[:]
list_sleepy = np.array(self.data['麻醉方式'])[:]
list_operation = np.array(self.data['time'])[:]
list_clean = np.array(self.data['手术级别'])[:]
list_operation = (np.ceil(list_operation / 5) * 5).astype(np.int)
list_sleepy.reshape((num, 1))
for i in range(num):
b = list_sleepy[i]
if (b == '全身麻醉' or b == '全身麻醉(喉罩)'):
tb = 60
else:
tb = 0
list_sleepy[i] = tb
list_clean.reshape((num, 1))
for i in range(num):
a = list_clean[i]
if a == '1.0':
tp = 10
elif a == '2.0' or a == '3.0':
tp = 20
else:
tp = 30
list_clean[i] = tp
c = np.vstack((list_doctID, list_patientID, list_operation, list_sleepy, list_clean))
key = [i + 1 for i in range(num)]
e = [] #存储了所有信息的列表,每一个列表的内容是一个字典
for i in range(num):
f = dict()
d = c[:, i]
f[key[i]] = d
e.append(f)
return list_doctID, list_patientID, list_operation, list_sleepy, list_clean, e
def _best_result_(self,best_paixu,Num,list_doctID,list_sleepy,list_operation,list_clean):
return list_1,list_2,list_3
def _get_list_(self,a):
key = []
dic = {}
key_2 = ['time_of_operation', 'time_of_sleep', 'time_of_clean']
for i in range(self.n_x):
c = a[i]
key.append('手术室{}'.format(i+1))
x = []
for j in range(int(len(c) / 3)):
e = 3 * j
d = c[e:e + 3]
f = dict(zip(key_2, d))
x.append(f)
dic[key[i]] = x
return dic
def _output_date_(self,output_1):
f = open('output.json', 'w', encoding='utf-8')
json.dump(output_1, f, ensure_ascii=False, indent=4)
f.close()
|
Jkcert/deecamp-frontend
|
src/ors_backend/model/schedule/calculation.py
|
calculation.py
|
py
| 2,473 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "numpy.array",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.ceil",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.int",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "numpy.vstack",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 68,
"usage_type": "call"
}
] |
32143586237
|
import utils
import requests
import json, sys
from datetime import date, datetime, timedelta
space_key = "SVMC"
# parentTitle = "Project Report - Automatic"
# weeklyPageTitle = "Weekly Project Status Report"
# monthlyPageTitle = "CP Monthly Report"
dailyPageTitle = "Issue Status Tool"
pageUrgentPrjTitle = "Issue Tool Project List"
user = utils.open_file(".plm")[0]
pw = utils.open_file(".plm")[2]
def submitToWiki(page_title, page_content):
response = getPageContent(page_title, space_key)
if response.json()['size'] > 0:
print('update page %s' % page_title)
page_id = response.json()['results'][0]['id']
current_version = response.json()['results'][0]['version']['number']
data = {
'id': str(page_id),
'type': 'page',
'title': page_title,
'space': {'key': space_key},
'version': {'number': current_version + 1},
'body': {
'storage':
{
'value': str(page_content),
'representation': 'storage',
}
}
}
data_to_send = json.dumps(data).encode("utf-8")
response = requests.put('http://mobilerndhub.sec.samsung.net/wiki/rest/api/content/%s' % page_id,
headers={'Content-Type': 'application/json'}, data=data_to_send, auth=(user, pw))
if response.status_code == requests.codes['ok']:
print("View page at %s" % response.url)
else:
print('add page %s' % page_title)
response = requests.get('http://mobilerndhub.sec.samsung.net/wiki/rest/api/content?spaceKey=%s&title=%s' %
(space_key, parentTitle), auth=(user, pw))
parent_id = response.json()['results'][0]['id']
data = {
'type': 'page',
'title': page_title,
"ancestors": [{"id": parent_id}],
'space': {'key': space_key},
'body': {
'storage':
{
'value': str(page_content),
'representation': 'storage',
}
}
}
data_to_send = json.dumps(data).encode("utf-8")
response = requests.post('http://mobilerndhub.sec.samsung.net/wiki/rest/api/content/',
headers={'Content-Type': 'application/json'}, data=data_to_send, auth=(user, pw))
if response.status_code == requests.codes['ok']:
print("View page at %s" % response.url)
def getPageContent(pageTitle, space_key):
response = requests.get('http://mobilerndhub.sec.samsung.net/wiki/rest/api/content?spaceKey=%s&title=%s&'
'expand=space,body.view,version,container' % (space_key, pageTitle), auth=(user, pw))
if not response.status_code == requests.codes['ok']:
print("Cannot get content of page: " + pageTitle)
sys.exit(1)
return response
def getListSingleID(data):
"""
:param data: table data
:return: list mysingle to chart group
"""
list_id = []
index = data[0].index('Owner')
for i in data:
list_id.append(i[index])
del (list_id[0])
return list_id
def makeLinkChat(mySingleId):
"""Returns <a> tag with href from single ID"""
info_link = "mysingleim://%s"
return r"<a target='_blank' href='%s'>%s</a>" % (info_link % mySingleId, mySingleId)
def makeLinkNameChat(mySingleId, name_member):
"""Returns <a> tag with href from single ID"""
info_link = "mysingleim://%s"
return r"<a target='_blank' href='%s'>%s</a>" % (info_link % mySingleId, name_member)
def makeLinkChatGroup(listID):
"""Returns <a> tag with href from single ID"""
strListID = ""
for i in range(0, len(listID)):
strListID += str(listID[i]) + ';'
info_link = "mysingleim://%s"
return r"<a target='_blank' style='font-size: 12px; font-style: normal;' target='_blank' href='%s'>%s</a>" % (
info_link % strListID, "<br />Chat")
def makeLinkPLM(PLMCaseCode):
"""Returns <a> tag with href from mysingleID"""
return "<a target='_blank' href='http://splm.sec.samsung.net/wl/tqm/defect/defectreg/getDefectCodeSearch.do?defectCode=%s'>%s</a>" % (
PLMCaseCode, PLMCaseCode)
def make_link_chat(single_id, text):
"""Returns <a> tag with href from single ID"""
info_link = "mysingleim://%s"
return r"<a target='_blank' href='%s'>%s</a>" % (info_link % single_id, text)
def make_link_jira(jira_key):
jira_link = r"http://mobilerndhub.sec.samsung.net/its/browse/%s"
return r"<a target='_blank' href='%s'>%s</a>" % (jira_link % jira_key, jira_key)
def make_link_jira_with_summary(jira_key, text):
jira_link = r"http://mobilerndhub.sec.samsung.net/its/browse/%s"
return r"<a target='_blank' href='%s'>%s</a>" % (jira_link % jira_key, text)
def make_img_jira(link):
return r"<img src='%s' class='icon'>" % link
def make_status_jira(text):
if text.lower() == 'new':
return r"<span class='aui-lozenge aui-lozenge-subtle aui-lozenge-complete'>%s</span>" % text
else:
return r"<span class='aui-lozenge aui-lozenge-subtle aui-lozenge-current'>%s</span>" % text
def create_isssue_owner(owner_list):
html = "<head> \n </head> \n <body> \n <div> \n <p>"
for i in owner_list:
key = get_user_key(i)
html += '<ac:link><ri:user ri:userkey="%s" /></ac:link>' % key
html += ", "
html += "</p> \n </div> \n </body>"
return html
def check_time_update():
response = getPageContent(dailyPageTitle, space_key)
page_key = response.json()['results'][0]['id']
response = requests.get("http://mobilerndhub.sec.samsung.net/wiki/rest/api/content/%s/history" % str(page_key),
auth=(user, pw))
time_update = response.json()['lastUpdated']['when'][:19] # %Y-%m-%dT%H:%M:%S
datetime_update = datetime.strptime(time_update, "%Y-%m-%dT%H:%M:%S") - timedelta(hours=2) # HQ earlier VN 2 hours
print("latest time update page: %s" % datetime_update.strftime("%H:%M %d-%m-%Y"))
return datetime_update
def get_updated_date(pageTitle):
response = getPageContent(pageTitle, space_key)
page_key = response.json()['results'][0]['id']
response = requests.get("http://mobilerndhub.sec.samsung.net/wiki/rest/api/content/%s/history" % str(page_key),
auth=(user, pw))
return response.json()['lastUpdated']['when'][:10] # YYYY-MM-DD
def get_user_key(user_name):
request_data = requests.get("http://mobilerndhub.sec.samsung.net/wiki/rest/api/user?username=%s" % user_name,
auth=(user, pw))
return request_data.json()['userKey']
def get_all_data_jira_task_list(project_key):
# Query data with in 3 month
jql_query = "project = %s and status not in (resolved, cancelled) and created > startOfMonth(-2) order by " \
"created desc" % project_key
max_result = 1000
params = {
"jql": jql_query,
"startAt": 0,
"maxResults": max_result,
"fields": [
"key",
"summary",
"issuetype",
"created",
"duedate",
"resolutiondate",
"assignee",
"priority",
"status"
]
}
url_query = 'http://mobilerndhub.sec.samsung.net/its/rest/api/2/search'
data_task_list_json = requests.get(url_query, params=params, auth=(user, pw))
list_all_task = json.loads(data_task_list_json.text)
return list_all_task['issues']
def convert_date_time(date_time):
date_time = datetime.strptime(date_time, "%Y-%m-%d").date()
return date_time
def get_data_jira_task_list_by_team(all_data_jira_task_list, member_id_list):
num_of_jira_task_by_team = {}
info_detail_jira_task = []
data_jira_task_for_pie_chart = [["", 'Jira Tasks'], ['Done', 0], ['NEW', 0], ["In Progress", 0]]
list_all_member = []
for team, member_of_team in member_id_list.items():
num_of_jira_task_by_team[team] = [0, 0] # [open, in progress]
list_all_member += member_of_team
number_of_jira_task_by_member = {key: 0 for key in list_all_member}
for task_info in all_data_jira_task_list:
summary = task_info['fields']['summary']
if not summary.startswith('[Automatic]'):
due_date = task_info['fields']['duedate']
created = task_info['fields']['created'][:10]
resolve_date = task_info['fields']['resolutiondate']
if resolve_date is None:
resolve_date = ''
else:
resolve_date = convert_date_time(resolve_date[:10])
if due_date is None:
due_date = ''
# else:
# due_date = convert_date_time(due_date)
single_id = task_info['fields']['assignee']['key']
team = ""
status_jira = task_info['fields']['status']['name'].lower()
if status_jira == 'in progress':
data_jira_task_for_pie_chart[3][1] += 1
elif status_jira == 'new':
data_jira_task_for_pie_chart[2][1] += 1
else:
data_jira_task_for_pie_chart[1][1] += 1
if status_jira == 'done' and resolve_date == date.today():
# include jira task resolve to day
number_of_jira_task_by_member[single_id] += 1
if status_jira == 'in progress' or status_jira == 'new':
try:
number_of_jira_task_by_member[single_id] += 1
except KeyError:
number_of_jira_task_by_member[single_id] = 1
for key, value in member_id_list.items():
if single_id in value:
team = key
if status_jira == 'in progress':
num_of_jira_task_by_team[key][1] = num_of_jira_task_by_team[key][1] + 1
elif status_jira == 'new':
num_of_jira_task_by_team[key][0] = num_of_jira_task_by_team[key][0] + 1
break
info = [
make_link_jira(task_info['key']),
summary,
make_img_jira(task_info['fields']['issuetype']['iconUrl']),
created,
due_date,
make_link_chat(single_id, task_info['fields']['assignee']['displayName']),
team,
make_img_jira(task_info['fields']['priority']['iconUrl']),
make_status_jira(task_info['fields']['status']['name'])
]
info_detail_jira_task.append(info)
data_chart_pie_jira = 'var dataChartPieJira = ' + str(data_jira_task_for_pie_chart) + '; \n'
return num_of_jira_task_by_team, info_detail_jira_task, number_of_jira_task_by_member, data_chart_pie_jira
|
hoangdt9/hoang
|
WikiSubmit.py
|
WikiSubmit.py
|
py
| 11,061 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "utils.open_file",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "utils.open_file",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "requests.put",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "requests.codes",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "requests.codes",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "requests.codes",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "datetime.date.today",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 268,
"usage_type": "name"
}
] |
70827770107
|
from selenium import webdriver
from datetime import datetime
import boto3
import os
import time
now = datetime.now()
folder_name = now.strftime("%Y%m%d")
image_name = "traffic_" + now.strftime("%Y%m%d") + "-" + now.strftime("%H-%M") + ".png"
Bucket_name = "googletrafficmap"
prefix = folder_name + "/"
#Get map snapshot
driver = webdriver.PhantomJS(service_log_path=os.path.devnull)
driver.set_window_size(1920, 1080) # set the window size that you need
driver.get('http://googletrafficmap.s3-website.ca-central-1.amazonaws.com')
# driver.save_screenshot(folder_name + "/" + image_name)
screenshotPNG = driver.get_screenshot_as_png() #Get screenshot in binary data
#Create low-client connection
client = boto3.client('s3')
#Uploading image to s3 bucket and creating folder structure at the same time
client.put_object(
Bucket = Bucket_name,
Body = screenshotPNG,
Key = folder_name + "/" + image_name
)
time.sleep(60)
driver.close()
driver.quit()
|
nathan36/GoogleTrafficMap-GIF
|
GoogleTrafficMap-GIF/saveImage.py
|
saveImage.py
|
py
| 993 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.PhantomJS",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "boto3.client",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 29,
"usage_type": "call"
}
] |
9695092385
|
#!/bin/python3
import datetime
import json
import threading
import time
import turtle
import sys
from urllib import request
from collections import namedtuple
class ISS():
def __init__(self):
self.is_instance = True
self._astronauts_url = 'http://api.open-notify.org/astros.json'
self._location_url = 'http://api.open-notify.org/iss-now.json'
self._location_tuple = namedtuple(
'Location', ['latitude', 'longitude'])
self._location()
def __enter__(self):
return self
def __exit__(self, exctype, excinst, exctb):
self.is_instance = False
def __repr__(self):
return (f'{self.__class__.__name__}:\n\tTimestamp:{self._update_timestamp}\n\tLocation:{self.location}\n\tPeople: {self.people_in_space}')
def _get_page(self, url):
response = request.urlopen(url)
result = json.loads(response.read())
return result
def _location(self):
result = self._get_page(self._location_url)
self.location = self._location_tuple(result['iss_position']['latitude'],
result['iss_position']['longitude'])
self._update_timestamp = result['timestamp']
@property
def people_in_space(self):
result = self._get_page(self._astronauts_url)
return [people['name'] for people in result['people']]
class Tracker(ISS):
def __init__(self):
super().__init__()
self._bgpic = 'images/map.gif'
self._shape = 'images/iss2.gif'
self._screen = turtle.Screen()
self._screen.title('Python ISS Tracker')
self._screen.setup(width=720, height=360)
self._screen.setworldcoordinates(-180, -90, 180, 90)
self._screen.bgpic(self._bgpic)
self._screen.register_shape(self._shape)
self._screen.onscreenclick(self.update_turtle_location, btn=1)
self._tracker = turtle.Turtle()
self._tracker.shape(self._shape)
self._tracker.setheading(90)
def update_turtle_location(self, *args):
self._location()
self._tracker.penup()
self._tracker.goto(float(self.location[0]), float(self.location[1]))
# Debug
print(self.__repr__())
if __name__ == '__main__':
try:
with Tracker() as iss:
iss.update_turtle_location()
turtle.mainloop()
except KeyboardInterrupt:
sys.exit(0)
# # http://open-notify.org/Open-Notify-API/
# url = 'http://api.open-notify.org/astros.json'
# response = urllib.request.urlopen(url)
# result = json.loads(response.read())
# print('People in Space: ', result['number'])
# people = result['people']
# for p in people:
# print(p['name'], ' in ', p['craft'])
# url = 'http://api.open-notify.org/iss-now.json'
# response = urllib.request.urlopen(url)
# result = json.loads(response.read())
# location = result['iss_position']
# lat = float(location['latitude'])
# lon = float(location['longitude'])
# print('Latitude: ', lat)
# print('Longitude: ', lon)
# screen = turtle.Screen()
# screen.setup(720, 360)
# screen.setworldcoordinates(-180, -90, 180, 90)
# screen.bgpic('map.gif')
# screen = turtle.Screen()
# screen.setup(720, 360)
# screen.setworldcoordinates(-180, -90, 180, 90)
# # image source:
# # map.jpg: http://visibleearth.nasa.gov/view.php?id=57752 Credit: NASA
# screen.bgpic('map.gif')
# screen.register_shape('iss2.gif')
# iss = turtle.Turtle()
# iss.shape('iss2.gif')
# iss.setheading(90)
# iss.penup()
# iss.goto(lon, lat)
# # When Does ISS next pass over me?
# #london
# #lat = 51.5072
# #lon = 0.1275
# # Tokyo
# #lat = 35.689487
# #lon = 139.691706
# # Space Center, Houston
# lat = 29.5502
# lon = -95.097
# location = turtle.Turtle()
# location.penup()
# location.color('yellow')
# location.goto(lon, lat)
# location.dot(5)
# location.hideturtle()
# url = 'http://api.open-notify.org/iss-pass.json?lat=' + \
# str(lat) + '&lon=' + str(lon)
# response = urllib.request.urlopen(url)
# result = json.loads(response.read())
# #print result
# over = result['response'][1]['risetime']
# location.write(time.ctime(over))
|
mattbhenley/ISS_Locator
|
locator.py
|
locator.py
|
py
| 4,144 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.namedtuple",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "turtle.Screen",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "turtle.Turtle",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "turtle.mainloop",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 86,
"usage_type": "call"
}
] |
35168238376
|
from serpent.game_agent import GameAgent
from serpent.input_controller import KeyboardKey
import offshoot
class SerpentSuperHexagonGameAgent(GameAgent):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.frame_handlers["PLAY"] = self.handle_play
self.frame_handler_setups["PLAY"] = self.setup_play
self.analytics_client = None
#Ccontext class setup
plugin_path = offshoot.config["file_paths"]["plugins"]
context_classifier_path = "datasets/context_classifier.model"
from serpent.machine_learning.context_classification.context_classifiers.cnn_inception_v3_context_classifier import \
CNNInceptionV3ContextClassifier
context_classifier = CNNInceptionV3ContextClassifier(
input_shape=(240, 384, 3)) # Replace with the shape (rows, cols, channels) of your captured context frames
context_classifier.prepare_generators()
context_classifier.load_classifier(context_classifier_path)
self.machine_learning_models["context_classifier"] = context_classifier
def setup_play(self):
# self.input_controller.tap_key(KeyboardKey.KEY_SPACE)
pass
def handle_play(self, game_frame):
# for i, game_frame in enumerate(self.game_frame_buffer.frames):
# self.visual_debugger.store_image_data(
# game_frame.frame,
# game_frame.frame.shape,
# str(i)
# )
# self.input_controller.tap_key(KeyboardKey.KEY_RIGHT)
context = self.machine_learning_models["context_classifier"].predict(game_frame.frame)
print("Context:", context)
|
cameron-j-knight/General-AI
|
plugins/SerpentSuperHexagonGameAgentPlugin/files/serpent_SuperHexagon_game_agent.py
|
serpent_SuperHexagon_game_agent.py
|
py
| 1,675 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "serpent.game_agent.GameAgent",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "offshoot.config",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "serpent.machine_learning.context_classification.context_classifiers.cnn_inception_v3_context_classifier.CNNInceptionV3ContextClassifier",
"line_number": 23,
"usage_type": "call"
}
] |
37169098035
|
__author__ = "Moath Maharmeh"
__license__ = "GNU General Public License v2.0"
__version__ = "1.1"
__email__ = "[email protected]"
__created__ = "13/Dec/2018"
__modified__ = "5/Apr/2019"
__project_page__ = "https://github.com/iomoath/file_watchtower"
import sqlite3
import os
import csv
DEFAULT_PATH = os.path.join(os.path.dirname(__file__), 'database.sqlite3')
def get_db_path():
global DEFAULT_PATH
return DEFAULT_PATH
def db_connect(db_path=DEFAULT_PATH):
con = sqlite3.connect(db_path)
return con
def create_tables():
file_record_query = """
CREATE TABLE IF NOT EXISTS file_record (
id INTEGER PRIMARY KEY AUTOINCREMENT,
file_path TEXT NOT NULL UNIQUE,
hash TEXT NOT NULL,
file_size TEXT NOT NULL,
exists_on_disk varchar(6) NOT NULL,
datetime_last_check TEXT NOT NULL)"""
email_msg_query = """
CREATE TABLE IF NOT EXISTS email_msg (
id INTEGER PRIMARY KEY,
subject TEXT NOT NULL,
body TEXT NOT NULL,
attachment TEXT,
is_sent VARCHAR(6) DEFAULT 'False')"""
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute(file_record_query)
cursor.execute(email_msg_query)
except:
pass
finally:
conn.commit()
conn.close()
def insert_file_record(file_record_dict):
conn = db_connect()
try:
cursor = conn.cursor()
query = """
INSERT INTO file_record (file_path, hash, file_size, exists_on_disk, datetime_last_check)
VALUES (?, ?, ?, ?, ?)"""
cursor.execute(query,
(file_record_dict["path"], file_record_dict["hash"], file_record_dict["file_size"],
file_record_dict["exists_on_disk"], file_record_dict["datetime_last_check"]))
return cursor.lastrowid
except:
conn.rollback()
raise
finally:
conn.commit()
conn.close()
def get_exists_on_disk_value(file_path):
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT exists_on_disk FROM file_record WHERE file_path=? LIMIT 1", (file_path,))
rows = cursor.fetchall()
return rows[0][0]
except IndexError:
return None
finally:
conn.close()
def get_exists_on_disk_value_by_hash(file_hash):
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT exists_on_disk FROM file_record WHERE hash=? LIMIT 1", (file_hash,))
rows = cursor.fetchall()
return rows[0][0]
except IndexError:
return None
finally:
conn.close()
def update_exists_on_disk_value(file_path, new_value):
conn = db_connect()
try:
cursor = conn.cursor()
query = """UPDATE file_record SET exists_on_disk =? WHERE file_path =?"""
cursor.execute(query, (new_value, file_path,))
return cursor.rowcount
except:
conn.rollback()
raise
finally:
conn.commit()
conn.close()
def update_exists_on_disk_value_by_hash(file_hash, new_value):
conn = db_connect()
try:
cursor = conn.cursor()
query = """UPDATE file_record SET exists_on_disk =? WHERE hash =?"""
cursor.execute(query, (new_value, file_hash,))
return cursor.rowcount
except:
conn.rollback()
raise
finally:
conn.commit()
conn.close()
def update_file_last_check(file_path, new_datetime_check):
conn = db_connect()
try:
cursor = conn.cursor()
query = """UPDATE file_record SET datetime_last_check =? WHERE file_path =?"""
cursor.execute(query, (new_datetime_check, file_path,))
return cursor.rowcount
except:
conn.rollback()
raise
finally:
conn.commit()
conn.close()
def update_file_path(file_hash, old_path, new_path):
conn = db_connect()
try:
cursor = conn.cursor()
query = """UPDATE file_record SET file_path =? WHERE hash =? and file_path=?"""
cursor.execute(query, (new_path, file_hash, old_path))
return cursor.rowcount
except:
conn.rollback()
raise
finally:
conn.commit()
conn.close()
def get_file_records(file_path):
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT * FROM file_record WHERE file_path=?", (file_path,))
rows = cursor.fetchall()
return rows
except IndexError:
return None
finally:
conn.commit()
conn.close()
def get_file_records_by_hash(file_hash):
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT * FROM file_record WHERE hash=?", (file_hash,))
rows = cursor.fetchall()
return rows
except Exception:
conn.rollback()
raise
finally:
conn.close()
def get_all_file_paths():
# returns all files paths
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT file_path FROM file_record")
rows = cursor.fetchall()
path_list = []
for row in rows:
path_list.append(row[0])
return path_list
except:
conn.rollback()
finally:
conn.close()
def get_file_hash(file_path):
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT hash FROM file_record WHERE file_path=? LIMIT 1", (file_path,))
rows = cursor.fetchall()
return rows[0][0]
except IndexError:
return None
finally:
conn.close()
def get_file_path_by_hash(file_hash):
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT file_path FROM file_record WHERE hash=? LIMIT 1", (file_hash,))
rows = cursor.fetchall()
return rows[0][0]
except IndexError:
return None
finally:
conn.close()
def is_file_has_record_by_path(file_path):
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT id FROM file_record WHERE file_path=? LIMIT 1", (file_path,))
rows = cursor.fetchall()
return len(rows) > 0
except:
conn.rollback()
return False
finally:
conn.close()
def is_file_has_record_by_hash(hash):
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT id FROM file_record WHERE hash=? LIMIT 1", (hash,))
rows = cursor.fetchall()
return len(rows) > 0
except:
conn.rollback()
finally:
conn.close()
def get_file_size(file_path):
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT file_size FROM file_record WHERE file_path=? LIMIT 1", (file_path,))
rows = cursor.fetchall()
return rows[0][0]
except IndexError:
return None
finally:
conn.close()
def get_file_size_by_hash(file_hash):
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT file_size FROM file_record WHERE hash=? LIMIT 1", (file_hash,))
rows = cursor.fetchall()
return rows[0][0]
except IndexError:
return None
finally:
conn.close()
def update_file_hash(file_path, new_hash):
conn = db_connect()
try:
cursor = conn.cursor()
query = """UPDATE file_record SET hash =? WHERE file_path =?"""
cursor.execute(query, (new_hash, file_path,))
return cursor.rowcount
except:
conn.rollback()
raise
finally:
conn.commit()
conn.close()
def delete_file_record(file_path):
conn = db_connect()
try:
cursor = conn.cursor()
query = """DELETE FROM file_record WHERE file_path=?"""
cursor.execute(query, (file_path,))
return cursor.rowcount
except:
conn.rollback()
raise
finally:
conn.commit()
conn.close()
def insert_email_msg(email_msg_dict):
conn = db_connect()
try:
cursor = conn.cursor()
query = """
INSERT INTO email_msg (subject, body, attachment)
VALUES (?, ?, ?)"""
cursor.execute(query,
(
email_msg_dict["subject"],
email_msg_dict["body"],
email_msg_dict["attachment"]))
return cursor.lastrowid
except:
conn.rollback()
raise
finally:
conn.commit()
conn.close()
def delete_msg(msg_id):
conn = db_connect()
try:
cursor = conn.cursor()
query = """DELETE FROM email_msg WHERE id=?"""
cursor.execute(query, (msg_id,))
return cursor.rowcount
except:
conn.rollback()
raise
finally:
conn.commit()
conn.close()
def get_unsent_messages():
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute("SELECT * FROM email_msg WHERE is_sent='False'")
rows = cursor.fetchall()
list_messages = []
for row in rows:
msg = {
"id": row[0],
"subject": row[1],
"body": row[2],
"attachments": row[3],
"is_sent": row[4]
}
list_messages.append(msg)
return list_messages
except:
conn.rollback()
raise
finally:
conn.close()
def delete_sent_messages():
conn = db_connect()
try:
cursor = conn.cursor()
query = """DELETE FROM email_msg WHERE is_sent=?"""
cursor.execute(query, ("True",))
return cursor.rowcount
except:
conn.rollback()
raise
finally:
conn.commit()
conn.close()
def dump_file_records_to_csv(export_path):
conn = db_connect()
try:
cursor = conn.cursor()
cursor.execute('SELECT * FROM file_record')
with open(export_path, 'w') as out_csv_file:
csv_out = csv.writer(out_csv_file)
# write header
csv_out.writerow([d[0] for d in cursor.description])
# write data
for result in cursor:
csv_out.writerow(result)
except:
conn.rollback()
raise
finally:
conn.close()
def delete_all_data():
conn = db_connect()
try:
cursor = conn.cursor()
query1 = """DELETE FROM email_msg"""
query2 = """DELETE FROM file_record"""
cursor.execute(query1, )
cursor.execute(query2, )
return cursor.rowcount
except:
conn.rollback()
raise
finally:
conn.commit()
conn.close()
# init the database, if no db file or tables, it will be created here
create_tables()
|
iomoath/file_watchtower
|
db.py
|
db.py
|
py
| 10,875 |
python
|
en
|
code
| 30 |
github-code
|
6
|
[
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 410,
"usage_type": "call"
}
] |
15932158711
|
from ..exceptions import HydraError, ResourceNotFoundError
from . import scenario, network
from .. import db
from ..db.model import ResourceGroup, ResourceGroupItem, Node, Link
from .scenario import _get_scenario
from sqlalchemy.orm.exc import NoResultFound
import logging
log = logging.getLogger(__name__)
def _get_group(group_id):
try:
return db.DBSession.query(ResourceGroup).filter(ResourceGroup.id==group_id).one()
except NoResultFound:
raise ResourceNotFoundError("ResourceGroup %s not found"%(group_id,))
def _get_item(item_id):
try:
item = db.DBSession.query(ResourceGroupItem).filter(ResourceGroupItem.id==item_id).one()
return item
except NoResultFound:
raise ResourceNotFoundError("ResourceGroupItem %s not found"%(item_id,))
def add_resourcegroup(group, network_id,**kwargs):
"""
Add a new group to a network.
"""
group_i = ResourceGroup()
group_i.name = group.name
group_i.description = group.description
group_i.status = group.status
group_i.network_id = network_id
db.DBSession.add(group_i)
db.DBSession.flush()
return group_i
def delete_resourcegroup(group_id, purge_data='N', **kwargs):
"""
Add a new group to a scenario.
"""
group_i = _get_group(group_id)
if purge_data == 'Y':
network._purge_datasets_unique_to_resource('GROUP', group_id)
#This should cascaded to delete all the group items.
db.DBSession.delete(group_i)
db.DBSession.flush()
return 'OK'
def update_resourcegroup(group,**kwargs):
"""
Add a new group to a network.
"""
group_i = _get_group(group.id)
group_i.name = group.name
group_i.description = group.description
group_i.status = group.status
db.DBSession.flush()
return group_i
def add_resourcegroupitem(group_item, scenario_id,**kwargs):
_get_scenario(scenario_id, kwargs['user_id'], check_can_edit=True)
#Check whether the ref_id is correct.
if group_item.ref_key == 'NODE':
try:
db.DBSession.query(Node).filter(Node.id==group_item.ref_id).one()
except NoResultFound:
raise HydraError("Invalid ref ID %s for a Node group item!"%(group_item.ref_id))
elif group_item.ref_key == 'LINK':
try:
db.DBSession.query(Link).filter(Link.id==group_item.ref_id).one()
except NoResultFound:
raise HydraError("Invalid ref ID %s for a Link group item!"%(group_item.ref_id))
elif group_item.ref_key == 'GROUP':
try:
db.DBSession.query(ResourceGroup).filter(ResourceGroup.id==group_item.ref_id).one()
except NoResultFound:
raise HydraError("Invalid ref ID %s for a Group group item!"%(group_item.ref_id))
else:
raise HydraError("Invalid ref key: %s"%(group_item.ref_key))
group_item_i = ResourceGroupItem()
group_item_i.scenario_id = scenario_id
group_item_i.group_id = group_item.group_id
group_item_i.ref_key = group_item.ref_key
if group_item.ref_key == 'NODE':
group_item_i.node_id = group_item.ref_id
elif group_item.ref_key == 'LINK':
group_item_i.link_id = group_item.ref_id
elif group_item.ref_key == 'GROUP':
group_item_i.subgroup_id = group_item.ref_id
db.DBSession.add(group_item_i)
db.DBSession.flush()
return group_item_i
def delete_resourcegroupitem(item_id,**kwargs):
group_item_i = _get_item(item_id)
_get_scenario(group_item_i.scenario_id, kwargs['user_id'], check_can_edit=True)
db.DBSession.delete(group_item_i)
db.DBSession.flush()
return 'OK'
|
hydraplatform/hydra-base
|
hydra_base/lib/groups.py
|
groups.py
|
py
| 3,757 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "db.model.DBSession.query",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "db.model.ResourceGroup",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name": "db.model.DBSession",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "db.model",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "db.model.ResourceGroup.id",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.orm.exc.NoResultFound",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "exceptions.ResourceNotFoundError",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "db.model.DBSession.query",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "db.model.ResourceGroupItem",
"line_number": 20,
"usage_type": "argument"
},
{
"api_name": "db.model.DBSession",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "db.model",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "db.model.ResourceGroupItem.id",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.orm.exc.NoResultFound",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "exceptions.ResourceNotFoundError",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "db.model.ResourceGroup",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "db.model.DBSession.add",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "db.model.DBSession",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "db.model",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "db.model.DBSession.flush",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "db.model.DBSession",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "db.model",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "db.model.DBSession.delete",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "db.model.DBSession",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "db.model",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "db.model.DBSession.flush",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "db.model.DBSession",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "db.model",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "db.model.DBSession.flush",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "db.model.DBSession",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "db.model",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "scenario._get_scenario",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "db.model.DBSession.query",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "db.model.Node",
"line_number": 74,
"usage_type": "argument"
},
{
"api_name": "db.model.DBSession",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "db.model",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "db.model.Node.id",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.orm.exc.NoResultFound",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "exceptions.HydraError",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "db.model.DBSession.query",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "db.model.Link",
"line_number": 80,
"usage_type": "argument"
},
{
"api_name": "db.model.DBSession",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "db.model",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "db.model.Link.id",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.orm.exc.NoResultFound",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "exceptions.HydraError",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "db.model.DBSession.query",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "db.model.ResourceGroup",
"line_number": 85,
"usage_type": "argument"
},
{
"api_name": "db.model.DBSession",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "db.model",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "db.model.ResourceGroup.id",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.orm.exc.NoResultFound",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "exceptions.HydraError",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "exceptions.HydraError",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "db.model.ResourceGroupItem",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "db.model.DBSession.add",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "db.model.DBSession",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "db.model",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "db.model.DBSession.flush",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "db.model.DBSession",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "db.model",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "scenario._get_scenario",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "db.model.DBSession.delete",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "db.model.DBSession",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "db.model",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "db.model.DBSession.flush",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "db.model.DBSession",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "db.model",
"line_number": 113,
"usage_type": "name"
}
] |
30791686316
|
from numba import *
from numba import error
#@autojit
def func():
if x:
print("hello")
else:
print("world")
def compile_func1():
try:
jit(void())(func)
except error.NumbaError as e:
print("exception: %s" % e)
__doc__ = """
>>> compile_func1()
--------------------- Numba Encountered Errors or Warnings ---------------------
if x:
-------^
Error 6:7: No global named 'x'
--------------------------------------------------------------------------------
exception: 6:7: No global named 'x'
"""
#@autojit
def func2():
print(10[20])
def compile_func2():
try:
jit(void())(func2)
except error.NumbaError as e:
print("exception: %s" % e)
__doc__ += """>>> compile_func2()
--------------------- Numba Encountered Errors or Warnings ---------------------
print(10[20])
----------^
Error 29:10: object of type int cannot be indexed
--------------------------------------------------------------------------------
exception: 29:10: object of type int cannot be indexed
"""
@autojit # this often messes up line numbers
def func_decorated():
print(10[20])
def compile_func3():
try:
func_decorated()
except error.NumbaError as e:
print("exception: %s" % e)
__doc__ += """
>>> compile_func3()
--------------------- Numba Encountered Errors or Warnings ---------------------
print(10[20])
----------^
Error 48:10: object of type int cannot be indexed
--------------------------------------------------------------------------------
exception: 48:10: object of type int cannot be indexed
"""
if __name__ == '__main__':
import numba
numba.testmod()
|
garrison/numba
|
numba/tests/test_reporting.py
|
test_reporting.py
|
py
| 1,664 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "numba.error.NumbaError",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numba.error",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "numba.error.NumbaError",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "numba.error",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "numba.error.NumbaError",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "numba.error",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "numba.testmod",
"line_number": 68,
"usage_type": "call"
}
] |
74637081787
|
import socket
import time
from PyQt5.QtCore import QTimer, QThread
import queue
import logging
import pyaudio
import threading
logging.basicConfig(format="%(message)s", level=logging.INFO)
class AudioRec(QThread):
def __init__(self, threadChat):
super().__init__()
self.threadChat = threadChat
self.host_name = socket.gethostname()
self.host_ip = socket.gethostbyname(self.host_name)
# self.host_ip = '127.0.0.1'
self.port = 9634
self.socket_address = (self.host_ip, self.port)
# a maxsize 100 will be ideal but lags with video at the moment
# must send frames from server VideoGen and make sync in client
# using audio and frame timestamps
self.q = queue.Queue(maxsize=5)
self.BUFF_SIZE = 65536
self.audio_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.audio_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.BUFF_SIZE)
self.audio_socket.bind(self.socket_address)
self.p = pyaudio.PyAudio()
self.CHUNK = 1024
self.stream = self.p.open(format=self.p.get_format_from_width(2),
channels=2,
rate=44100,
output=True,
frames_per_buffer=self.CHUNK)
self.timer = QTimer()
self.timer.timeout.connect(self.play_audio)
self.timer.start(1000 * 0.8 * self.CHUNK / 44100)
t1 = threading.Thread(target=self.get_audio_data, args=())
t1.start()
print('Listening for audio...')
def get_audio_data(self):
while self.threadChat.nickname == "":
# print('wait audio')
# time.sleep(0.1)
pass
while True:
try:
self.frame, _ = self.audio_socket.recvfrom(self.BUFF_SIZE)
self.q.put(self.frame)
except BlockingIOError:
pass
except Exception as e:
logging.error(e)
def play_audio(self):
if not self.q.empty():
frame = self.q.get()
self.stream.write(frame)
|
shully899509/OpenParty
|
app/client/ClientAudio.py
|
ClientAudio.py
|
py
| 2,191 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore.QThread",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "socket.gethostname",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "socket.gethostbyname",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "queue.Queue",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_DGRAM",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "socket.SOL_SOCKET",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "socket.SO_RCVBUF",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "pyaudio.PyAudio",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QTimer",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 62,
"usage_type": "call"
}
] |
70713803388
|
import re
import os
import sys
import time
import json
import torch
import wandb
import random
import datasets
import evaluate
import numpy as np
import transformers
from accelerate import Accelerator
from accelerate.utils import set_seed
from torch.utils.data import DataLoader
from transformers import AutoTokenizer, DefaultDataCollator, AutoModelForSequenceClassification
set_seed(42)
MODEL_NAME = str(sys.argv[1])
MIXED_PRECISION = str(sys.argv[2])
def prepare_dataset(data_folder, label2id, data_types):
def combine_data(example):
temp_text = ""
for data_type in data_types:
temp_text += example[data_type] + " "
example["text"] = temp_text
return example
dataset = datasets.load_from_disk(data_folder + "dataset/")
dataset = dataset["train"]
dataset_encoded = dataset.class_encode_column("category")
dataset_aligned = dataset_encoded.align_labels_with_mapping(label2id, "category")
dataset = dataset_aligned.map(combine_data, remove_columns=["title", "body"])
dataset = dataset.rename_column("category", "label")
return dataset
def main():
def preprocess_function(examples):
return tokenizer(examples["text"], truncation=True, padding='max_length', max_length=hps["max_length"], return_tensors='pt')
models = {"bert": "bert-base-uncased", "distilbert": "distilbert-base-uncased", "tinybert": "huawei-noah/TinyBERT_General_4L_312D"}
hps = {
"batch_size": 32,
"gradient_accumulation_steps": 2,
"learning_rate": 2e-5,
"data_types": ["title", "body"],
"model_name": models[MODEL_NAME],
"num_epochs": 3,
"max_length": 256,
"weight_decay": 0.01,
"num_warmup_steps": 0.2,
"mixed_precision": MIXED_PRECISION,
"split_batches": True,
}
wandb_id = wandb.util.generate_id()
accelerator = Accelerator(log_with="wandb", gradient_accumulation_steps=hps["gradient_accumulation_steps"], split_batches=hps["split_batches"], mixed_precision=hps["mixed_precision"])
accelerator.init_trackers(
project_name="DMOZ-classification",
config=hps,
init_kwargs={"wandb": {
"name": MODEL_NAME.upper() + "_DMOZ_" + str(wandb_id),
"job_type": "training",
"group": str(wandb_id),
"tags": [MODEL_NAME.upper(), "DMOZ"],
}
},
)
data_folder = str(sys.argv[3])
id2label = {0: "Arts", 1: "Business", 2: "Computers", 3: "Health", 4: "Home", 5: "News", 6: "Recreation", 7: "Reference", 8: "Science", 9: "Shopping", 10: "Society", 11: "Sports", 12: "Games"}
label2id = {v: k for k, v in id2label.items()}
labels = label2id.keys()
dataset = prepare_dataset(data_folder, label2id, hps["data_types"])
tokenizer = AutoTokenizer.from_pretrained(hps["model_name"])
data_collator = DefaultDataCollator()
tokenized_data = dataset.map(preprocess_function, batched=True)
tokenized_data = tokenized_data.remove_columns("text")
train_dataloader = DataLoader(
tokenized_data,
shuffle=True,
batch_size=hps["batch_size"],
collate_fn=data_collator,
drop_last=True,
)
model = AutoModelForSequenceClassification.from_pretrained(
hps["model_name"],
num_labels=len(labels),
id2label=id2label, label2id=label2id,
)
optimizer = torch.optim.AdamW(
model.parameters(),
lr=(hps["learning_rate"] * accelerator.num_processes),
weight_decay=hps["weight_decay"],
eps=1e-8,
)
num_training_steps = hps["num_epochs"] * len(tokenized_data)
num_warmup_steps = int(hps["num_warmup_steps"] * len(train_dataloader))
lr_scheduler = transformers.get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps
)
train_dataloader, model, optimizer, lr_scheduler = accelerator.prepare(train_dataloader, model, optimizer, lr_scheduler)
starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
accuracy = evaluate.load("accuracy")
model.train()
starter.record()
for epoch in range(hps["num_epochs"]):
for idx, batch in enumerate(train_dataloader):
with accelerator.accumulate(model):
outputs = model(**batch)
loss = outputs.loss
logits = outputs.logits
accelerator.backward(loss)
predictions = logits.argmax(dim=-1)
accelerator.log({"batch/batch_step": idx, "batch/loss": loss, "batch/accuracy": accuracy.compute(predictions=predictions, references=batch["labels"])["accuracy"]})
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
ender.record()
torch.cuda.synchronize()
training_time = starter.elapsed_time(ender)
accelerator.log({"train": {"train_time": training_time}})
# Saving model
accelerator.wait_for_everyone()
model = accelerator.unwrap_model(model)
state_dict = model.state_dict()
filename = data_folder + "models/BERT/model.pt"
accelerator.save(state_dict, filename)
accelerator.end_training()
if accelerator.is_main_process:
wandb.init(
project="DMOZ-classification",
name="MODEL_" + str(wandb_id),
group=str(wandb_id),
job_type="model",
tags=["model"],
)
model_artifact = wandb.Artifact(
name="model_" + MODEL_NAME.upper() + "_DMOZ",
type="model"
)
model_artifact.add_file(filename)
wandb.log_artifact(model_artifact)
wandb.finish()
if __name__ == "__main__":
main()
|
JesseBrons/Webpageclassification
|
training/train_model_BERT.py
|
train_model_BERT.py
|
py
| 5,845 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "accelerate.utils.set_seed",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "datasets.load_from_disk",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "wandb.util.generate_id",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "wandb.util",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "accelerate.Accelerator",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "transformers.AutoTokenizer.from_pretrained",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "transformers.AutoTokenizer",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "transformers.DefaultDataCollator",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "transformers.AutoModelForSequenceClassification.from_pretrained",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "transformers.AutoModelForSequenceClassification",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "torch.optim.AdamW",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "transformers.get_linear_schedule_with_warmup",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "torch.cuda.Event",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "evaluate.load",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "torch.cuda.synchronize",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "wandb.init",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "wandb.Artifact",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "wandb.log_artifact",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "wandb.finish",
"line_number": 170,
"usage_type": "call"
}
] |
44632720863
|
#!/usr/bin/env python
import sys
import shutil
from typing import Optional, List, Tuple, Dict
import typer
from rich import print
from rich.columns import Columns
from rich.console import Console
from rich.traceback import install
# fmt: off
# Mapping from topics to colors
TOPICS = {
"TIMR": "#9a9a99",
"VOTE": "#67a0b2",
"LEAD": "#d0b343",
"TERM": "#70c43f",
"LOG1": "#4878bc",
"LOG2": "#398280",
"CMIT": "#98719f",
"PERS": "#d08341",
"SNAP": "#FD971F",
"DROP": "#ff615c",
"CLNT": "#00813c",
"TEST": "#fe2c79",
"INFO": "#ffffff",
"WARN": "#d08341",
"ERRO": "#fe2626",
"TRCE": "#fe2626",
}
# fmt: on
def list_topics(value: Optional[str]):
if value is None:
return value
topics = value.split(",")
for topic in topics:
if topic not in TOPICS:
raise typer.BadParameter(f"topic {topic} not recognized")
return topics
def main(
file: typer.FileText = typer.Argument(None, help="File to read, stdin otherwise"),
colorize: bool = typer.Option(True, "--no-color"),
n_columns: Optional[int] = typer.Option(None, "--columns", "-c"),
ignore: Optional[str] = typer.Option(None, "--ignore", "-i", callback=list_topics),
just: Optional[str] = typer.Option(None, "--just", "-j", callback=list_topics),
):
topics = list(TOPICS)
# We can take input from a stdin (pipes) or from a file
input_ = file if file else sys.stdin
# Print just some topics or exclude some topics (good for avoiding verbose ones)
if just:
topics = just
if ignore:
topics = [lvl for lvl in topics if lvl not in set(ignore)]
topics = set(topics)
console = Console()
width = console.size.width
panic = False
for line in input_:
try:
time, topic, *msg = line.strip().split(" ")
# To ignore some topics
if topic not in topics:
continue
msg = " ".join(msg)
# Debug calls from the test suite aren't associated with
# any particular peer. Otherwise we can treat second column
# as peer id
if topic != "TEST":
i = int(msg[1])
# Colorize output by using rich syntax when needed
if colorize and topic in TOPICS:
color = TOPICS[topic]
msg = f"[{color}]{msg}[/{color}]"
# Single column printing. Always the case for debug stmts in tests
if n_columns is None or topic == "TEST":
print(time, msg)
# Multi column printing, timing is dropped to maximize horizontal
# space. Heavylifting is done through rich.column.Columns object
else:
cols = ["" for _ in range(n_columns)]
msg = "" + msg
cols[i] = msg
col_width = int(width / n_columns)
cols = Columns(cols, width=col_width - 1, equal=True, expand=True)
print(cols)
except:
# Code from tests or panics does not follow format
# so we print it as is
if line.startswith("panic"):
panic = True
# Output from tests is usually important so add a
# horizontal line with hashes to make it more obvious
if not panic:
print("#" * console.width)
print(line, end="")
if __name__ == "__main__":
typer.run(main)
|
fansehep/Raft_Key-Value
|
RAFT_6_824/src/raft/dslogs.py
|
dslogs.py
|
py
| 3,483 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "typing.Optional",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "typer.BadParameter",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "typer.FileText",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "typing.Optional",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "typer.Argument",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "typer.Option",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "typer.Option",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "typer.Option",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "typer.Option",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "rich.console.Console",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "rich.print",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "rich.columns.Columns",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "rich.print",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "rich.print",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "rich.print",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "typer.run",
"line_number": 112,
"usage_type": "call"
}
] |
30109795593
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 7 10:30:52 2021
@author: X
"""
import json
import lz4.frame, lz4.block
import os
import copy
# The full path to the Firefox folders is:
# C:\Users\USERNAME\AppData\Roaming\Mozilla\Firefox\Profiles
# Each profile gets its own folder and from there, the bookmark files are saved
# in "bookmarksbackups".
def bookmarkbackups():
USERS=r"C:\Users"
users=os.listdir(USERS)
MOZAPPDATA=r"AppData\Roaming\Mozilla\Firefox\Profiles"
REMOVE=['All Users', 'Default', 'Default User', 'desktop.ini', 'Public']
rv=[]
for each in REMOVE:
users.remove(each)
for user in users:
for profile_folder in os.listdir(os.path.join(USERS,user,MOZAPPDATA)):
for bookmark_file in os.listdir(os.path.join(USERS,user,MOZAPPDATA,
profile_folder,"bookmarkbackups")):
rv.append(os.path.join(USERS,user,MOZAPPDATA,profile_folder,
"bookmarkbackups",bookmark_file))
return rv
def readfile(fn):
with open(fn,'rb') as fh:
return fh.read()
# The backup files are lz4 compressed and start with "mozLz40"
def readbookmarkfile(fn):
file_content=readfile(fn)
if file_content[0:8]==bytes("mozLz40\x00".encode('ascii')):
file_content = lz4.block.decompress(file_content[8:])
return json.loads(file_content)
def count_links(j,count=0):
if type(j)==dict:
if "children" in j:
for e in j["children"]:
count+=count_links(e)
return count
else:#if no children then it's a link
return 1
assert False
def count_and_validate_flatv(v):
count=0
for j in v:
if "children" in j:
for e in j["children"]:
if e["type"]!="text/x-moz-place": return False, count
count+=1
else:
assert False
return True,count
def grab_all_links(j,depth=0):
rv=[]
if "children" in j:
for e in j["children"]:
if e["type"]=="text/x-moz-place":
rv.append(e)
elif e["type"]=="text/x-moz-place-container":
rv.extend(grab_all_links(e,depth+1))
else:
assert False
return rv
def printkeys(j):
for k,v in j.items():
if k!="children":
print(k,"=",v,sep="")
else:
print(len(v),"children")
print()
def write_pretty(j,fn):
with open(fn, "w") as write_file:
json.dump(j, write_file, indent=4)
# I had a bug where if every item didn't have its own unique id it would fail
# to load in Firefox. I created this dictionary making function to discover
# duplicate ids. In the end I just change all the ids in the big data structure
# rather than trying to keep track during the process of merging.
def id_dict(n,d):
id = n["id"]
if n["type"]=="text/x-moz-place":
if id in d:
d[id]+=1
else:
d[id]=1
elif n["type"]=="text/x-moz-place-container":
if id in d:
d[id]+=1
else:
d[id]=1
if "children" in n:
for sub in n["children"]:
id_dict(sub,d)
else:
assert False
def return_id_dict(n):
d={}
id_dict(n,d)
return d
def fix_all_ids(n,id=100):
n["id"]=id
id+=1
if "children" in n:
for sub in n["children"]:
id=fix_all_ids(sub,id)
return id
def remove_children(j):
rv={}
for k,v in j.items():
if k=="children": continue
rv[k]=v
return rv
def link_anywhere_in_rv(j,rv):
for folder in rv:
for link in folder["children"]:
if j["uri"]==link["uri"]:
return True
return False
# There are a few contradictory ideas here. It is possible to comment out
# if link_anywhere_in_rv() to only search folders with the same name
# first it searches if the link exists anywhere, leave that in to not have dupe
# then it looks for a place for the link to go
# it looks for a matching folder name
# then compares all links. If the folder name matches then it first checks the
# uris for a match. If already in folder skips
# but if not then it returns the destination folder
# if the uri is unique then it returns False signaling to create a place for it
def already_in_rv(link,title,rv):
if link_anywhere_in_rv(link,rv):
#print(link["title"])
return True
for i,j in enumerate(rv):
dest=None
if j["title"]==title:
dest = i
if "children" in j:
for sub in j["children"]:
if sub["uri"]==link["uri"]:
return True
if dest!=None:
return rv[dest]
return False
def merge_link_folder(link,folder,rv,idd):
assert link["type"]=="text/x-moz-place"
assert "children" not in link
assert folder["type"]=="text/x-moz-place-container"
assert type(rv)==list
b = already_in_rv(link,folder["title"],rv)
if b==False:
rv.append(remove_children(folder))
rv[-1]["children"]=[link]
elif type(b)==dict:
if "children" not in b:
b["children"]=[]
b["children"].append(link)
else:
assert b==True
def merge_link_folder_all(folder,rv,idd):
assert folder["type"]=="text/x-moz-place-container"
if "children" not in folder: return
for sub in folder["children"]:
if sub["type"]=="text/x-moz-place":
merge_link_folder(sub,folder,rv,idd)
elif sub["type"]=="text/x-moz-place-container":
merge_link_folder_all(sub,rv,idd)
else:
assert False
# mut is a name for the template structure that has a "menu" "unfiled" and
# "toolbar" folder. I actually later include "mobile" as well.
# This stucture is the empty structure that I merge all the links into since I
# don't want links to fall into those orignal folders and instead to fall into
# alternate ones that are under the root menu folder
def build_mut():
mut=readbookmarkfile("empty_pretty.json")
for each in mut["children"][0]["children"]:
each["children"]=[]
return mut["children"][0]["children"]
def process_alts(first=None):
if first==None:
files=[]
else:
files=[first]
files.extend(bookmarkbackups())
rv=build_mut()
idd={}
for fn in files:
j=readbookmarkfile(fn)
if count_links(j)<10000:
merge_link_folder_all(j,rv,idd)
else:
print(fn)
return rv
def create_merged_json(first=None):
v=process_alts(first)
merged=readbookmarkfile("empty_pretty.json")
merged["children"][0]["children"]=v
print("count =",count_links(merged))
fix_all_ids(merged)
write_pretty(merged,"merged.json")
return merged
merged=create_merged_json(input("Primary bookmark file: "))
|
AndrewWigginCout/bookmarks
|
bookmarks.py
|
bookmarks.py
|
py
| 7,127 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "os.listdir",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "lz4.frame.block.decompress",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "lz4.frame.block",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "lz4.frame",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 79,
"usage_type": "call"
}
] |
73839717628
|
from asyncio import sleep, run
import os
import random
from dotenv import load_dotenv
import discord
from discord.ext import commands, tasks
import data
from table2ascii import table2ascii as t2a, PresetStyle
import asyncpg
from datetime import datetime, timedelta
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
intents = discord.Intents.all()
intents.members = True
bot = commands.Bot(command_prefix='!', intents=intents)
async def create_db_pool():
bot.db = await asyncpg.create_pool(dsn="postgres://postgres:database@localhost:5432/finance_bot")
print("connected to db")
@bot.event
async def on_ready():
print(f'{bot.user.name} has connected to Discord!')
@bot.command(name='curr_price', help='Get the current price of one or more stocks')
async def curr_price(ctx, *args):
ret_array = []
non_existent = []
for tag in args:
if data.ticker_exists(str(tag)):
ret_array.append([str(tag), f"${round(data.current_price(str(tag)), 2)}"])
else:
non_existent.append(str(tag))
output = t2a(
header=["Ticker", "Price"],
body=[arr for arr in ret_array],
style=PresetStyle.thin_compact
)
await ctx.send(f"```\n{output}\n```")
if len(non_existent) > 0:
await ctx.send(f"{', '.join(non_existent)} symbol/s do not exist")
@bot.command(name='info', help='Get info of a particular stock according to the list of keys')
async def get_info(ctx, symbol: str, key: str):
if not data.ticker_exists(symbol):
await ctx.send(f"Ticker symbol {symbol} does not exist or may be delisted.")
else:
try:
await ctx.send(data.get_info(symbol, key))
except KeyError:
await ctx.send(f"{key} is not a valid information identifier")
@get_info.error
async def info_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("Incorrect arguments entered. Please enter: !get_info \{ticker symbol\} \{information requested\}")
@bot.command(name='balance_sheet', help='Returns the most recent balance sheet of a single company specified by the ticker symbol entered')
async def balance_sheet(ctx, symbol: str):
print("calling")
if not data.ticker_exists(symbol):
await ctx.send(f"Ticker symbol {symbol} does not exist or may be delisted.")
return
print("calling2")
bsheet = data.get_balance_sheet(symbol)
print("calling3")
for i in range(0, 4):
print("calling4")
sheet1 = bsheet[int((i / 4) * len(bsheet)):int(len(bsheet) * ((i + 1) / 4))]
output = t2a(
body=[arr for arr in sheet1],
style=PresetStyle.thin_compact
)
await ctx.send(f"```\n{output}\n```")
@balance_sheet.error
async def bsheet_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("Incorrect arguments entered. Please enter: !balance_sheet \{ticker symbol\}")
@bot.command(name='earnings', help='Returns a graph of a companies revenue and earnings over the past 4 years')
async def earnings(ctx, symbol: str):
if not data.ticker_exists(symbol):
await ctx.send(f"Ticker symbol {symbol} does not exist or may be delisted.")
return
url = data.get_earnings(symbol, False)
embed = discord.Embed(title=f"{symbol} Earnings")
embed.set_image(url=url)
await ctx.send(embed=embed)
@earnings.error
async def earnings_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("Incorrect arguments entered. Please enter: !earnings \{ticker symbol\}")
@bot.command(name='quarterly_earnings', help='Returns a graph of a companies revenue and earnings over the past 4 quarters')
async def quarterly_earnings(ctx, symbol: str):
if not data.ticker_exists(symbol):
await ctx.send(f"Ticker symbol {symbol} does not exist or may be delisted.")
return
url = data.get_earnings(symbol, True)
embed = discord.Embed(title=f"{symbol} Earnings")
embed.set_image(url=url)
await ctx.send(embed=embed)
@quarterly_earnings.error
async def qearnings_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("Incorrect arguments entered. Please enter: !quarterly_earnings \{ticker symbol\}")
@bot.command(name='add_news', help='Adds a ticker to get daily news for')
async def add_news(ctx, symbol: str):
if not data.ticker_exists(symbol):
await ctx.send(f"Ticker symbol {symbol} does not exist or may be delisted.")
return
check_ticker = await bot.db.fetch('SELECT ticker FROM news_tickers WHERE ticker = $1', symbol)
if len(check_ticker) > 0:
await ctx.send(f"Ticker symbol {symbol} has already been added")
else:
await bot.db.execute('INSERT INTO news_tickers(ticker) VALUES ($1)', symbol)
@tasks.loop(hours=24)
async def daily_news(ctx):
tickers = await bot.db.fetch('SELECT ticker FROM news_tickers')
ticker_array = [ticker[0] for ticker in tickers]
news = data.get_news(ticker_array)
set_of = set(ticker_array)
for article in news.values():
related_tickers = [company for company in article['relatedTickers'] if company in set_of]
ticker_string = ", ".join(related_tickers)
publisher = article['publisher']
thumbnail = None
try:
thumbnail = article['thumbnail']['resolution'][0]['url']
except KeyError:
pass
embed=discord.Embed(title=article['title'], url=article['link'], color=0x00ffff)
if thumbnail:
embed.set_thumbnail(url=thumbnail)
embed.add_field(name="Publisher", value=publisher, inline=False)
embed.add_field(name="Related Tickers", value=ticker_string, inline=True)
await ctx.send(embed=embed)
@daily_news.before_loop
async def before_daily_news():
now = datetime.now()
current_hour = now.strftime("%H")
if int(current_hour) > 8:
nine_am = (now + timedelta(days=1)).replace(hour=9, minute=0, microsecond=0, second=0)
else:
nine_am = datetime(year=int(now.strftime("%Y")), month=int(now.strftime("%m")), day=int(now.strftime("%d")), hour=9)
diff = (nine_am - now).seconds
await sleep(diff)
@bot.command(name="remove_news", help="Remove a ticker from the news watchlist")
async def remove_news(ctx, symbol: str):
tickers = await bot.db.fetch('SELECT ticker FROM news_tickers')
ticker_array = [ticker[0] for ticker in tickers]
if symbol not in ticker_array:
await ctx.send(f"Ticker {symbol} is not in the watchlist.")
else:
await bot.db.execute('''DELETE FROM news_tickers where ticker = $1''', symbol)
async def main():
await create_db_pool()
await bot.start(TOKEN)
run(main())
|
NexhmedinQ/Discord-Finance-Bot
|
bot.py
|
bot.py
|
py
| 6,920 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "dotenv.load_dotenv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "discord.Intents.all",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "discord.Intents",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "asyncpg.create_pool",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "data.ticker_exists",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "data.current_price",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "table2ascii.table2ascii",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "table2ascii.PresetStyle.thin_compact",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "table2ascii.PresetStyle",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "data.ticker_exists",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "data.get_info",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.MissingRequiredArgument",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "data.ticker_exists",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "data.get_balance_sheet",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "table2ascii.table2ascii",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "table2ascii.PresetStyle.thin_compact",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "table2ascii.PresetStyle",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.MissingRequiredArgument",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "data.ticker_exists",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "data.get_earnings",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.MissingRequiredArgument",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "data.ticker_exists",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "data.get_earnings",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.MissingRequiredArgument",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "data.ticker_exists",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "data.get_news",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "discord.ext.tasks.loop",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "discord.ext.tasks",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "asyncio.sleep",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "asyncio.run",
"line_number": 182,
"usage_type": "call"
}
] |
28395924014
|
import os
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
class AnimeDataset(Dataset):
def __init__(self, dataset_path, image_size):
self.transform = transforms.Compose([
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
self.paths = [os.path.join(dataset_path, name) for name in os.listdir(dataset_path)]
def __getitem__(self, item):
image = Image.open(self.paths[item])
data = self.transform(image)
return data
def __len__(self):
return len(self.paths)
class LossWriter:
def __init__(self, save_path):
self.save_path = save_path
def add(self, loss, i):
with open(self.save_path, mode="a") as f:
term = str(i) + " " + str(loss) + "\n"
f.write(term)
f.close()
def recover_image(img):
return (
(img.numpy() *
np.array([0.5, 0.5, 0.5]).reshape((1, 3, 1, 1)) +
np.array([0.5, 0.5, 0.5]).reshape((1, 3, 1, 1))
).transpose(0, 2, 3, 1) * 255
).clip(0, 255).astype(np.uint8)
|
cwpeng-cn/DCGAN
|
data.py
|
data.py
|
py
| 1,276 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.utils.data.Dataset",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.CenterCrop",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Normalize",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 45,
"usage_type": "attribute"
}
] |
18956703257
|
from datetime import datetime, timezone
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
import logging
import os
import mongo_client
from typing import Optional, List, Union
import random
client = WebClient(token=os.environ.get("SLACK_TOKEN"))
good_words_collection = mongo_client.get_good_words_collection()
EMOJIS = os.environ.get("VALID_EMOJIS").split(' ')
def add_historical_goodwords():
# Call the conversations.list method using the WebClient
result = client.conversations_history(channel="C0441R6SKBN")
conversation_history = result["messages"]
for message in conversation_history:
word = message['text']
date_millis = float(message['ts'])
user_id = message['user']
temp_list = list(filter(lambda a: len(a) > 0, word.split(" ")))
if len(temp_list) == 1:
handle_word_sent(temp_list[0], date_millis, user_id, True)
def process_event(event: object):
if event.get('text', False) and event.get('ts', False) and event.get('user', False):
if event.get('thread_ts', False):
print(f"Replies to posts not accepted.")
return
message = event['text']
millis_time = float(event['ts'])
user = event['user']
channel = event['channel']
temp_list = list(filter(lambda a: len(a) > 0, message.split(" ")))
if len(temp_list) > 1 or channel != "C0441R6SKBN":
print(f"invalid submission: {temp_list}")
else:
handle_word_sent(temp_list[0], millis_time, user)
else:
print(f"Event missing attribute ts or text: {event}")
def handle_word_sent(word: str, millis_time: float, user_id: str, historical: bool=False):
prev_sent = find_word(word)
if prev_sent is not None:
if not historical:
client.chat_postMessage(channel="C0441R6SKBN", text=f"{word} was previously sent on {datetime.fromtimestamp(prev_sent['date_millis']).strftime('%m/%d/%Y')}", thread_ts=str(millis_time))
print(f"Thread Time: {datetime.fromtimestamp(prev_sent['date_millis']).strftime('%m/%d/%Y')}, Prev Sent Word: {word}")
elif not historical:
insert_new_word(word, millis_time, user_id)
client.reactions_add(channel="C0441R6SKBN", name=random.choice(EMOJIS), timestamp=str(millis_time))
else:
insert_new_word(word, millis_time, user_id)
def insert_new_word(word: str, date_millis: float, user: str):
word_lowercase = word.lower()
document = {
"word": word_lowercase,
"date_millis": date_millis,
"user_id": user
}
good_words_collection.insert_one(document)
print(f"Successfully added word: \n {document['word']} \n millis: {document['date_millis']}")
def find_word(word: str):
result = good_words_collection.find_one({"word": word.lower()})
print(f"Found: {result}")
return result
|
isaacson-f/slack-bots
|
goodwords_service.py
|
goodwords_service.py
|
py
| 2,887 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "slack_sdk.WebClient",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "mongo_client.get_good_words_collection",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "random.choice",
"line_number": 54,
"usage_type": "call"
}
] |
8528358737
|
"""
crown.py
COMP9444, CSE, UNSW
"""
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
# the data for this task has three columns: x y and class.
# the input of nn will be x and y, and the output will be a binary class.
class Full3Net(torch.nn.Module):
# assume we have a linear nn here:
def __init__(self, hid=3):
super(Full3Net, self).__init__()
# define the structure of the nn
# define the first hidden layer: size of in feature is 2 and size of out feature is define by variable hid
self.hidden1 = nn.Linear(2, hid)
# define the second hidden layer: size of in feature is hid and size of out feature is hid
self.hidden2 = nn.Linear(hid, hid)
# define the third layer: the size of input is hid from layer 2, the size of output is 1
self.hidden3 = nn.Linear(hid, 1)
def forward(self, input):
# assume we are having a linear nn.
# calculate the linear sum of the weight with the input:
sum1 = self.hidden1(input)
# apply the activation function: tanh
self.hid1 = torch.tanh(sum1)
# calculate the linear sum of the weight with the first hidden layer output after activation
sum2 = self.hidden2(self.hid1)
# apply the activation function: tanh
self.hid2 = torch.tanh(sum2)
# compute the sum for the final layer
out_sum = self.hidden3(self.hid2)
# apply the activation function: sigmoid
output = torch.sigmoid(out_sum)
return output
class Full4Net(torch.nn.Module):
def __init__(self, hid):
super(Full4Net, self).__init__()
def forward(self, input):
self.hid1 = None
self.hid2 = None
self.hid3 = None
return 0*input[:,0]
class DenseNet(torch.nn.Module):
def __init__(self, num_hid):
super(DenseNet, self).__init__()
def forward(self, input):
self.hid1 = None
self.hid2 = None
return 0*input[:,0]
|
sijinwnag/COMP9444_HW1
|
hw1/crown.py
|
crown.py
|
py
| 2,002 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Linear",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "torch.tanh",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.tanh",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torch.sigmoid",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 53,
"usage_type": "attribute"
}
] |
39189785619
|
"""
Example of the FrequentistSurface plot.
Usage: surf_plot.py FILE
where FILE is a file containing Surface to be plotted. The surface is expected
to be found in the `/surface` directory of the FILE.
"""
import sys
import matplotlib.pyplot as plt
from cafplot import load
from cafplot.plot.surface import (
plot_surface, plot_surface_best_fit, plot_surface_gauss_contour
)
root_file = load(sys.argv[1])
surface = root_file.get_fsurface('surface')
f, ax = plt.subplots()
im = plot_surface(ax, surface)
plot_surface_best_fit(ax, surface, color = 'red', marker = '*')
plot_surface_gauss_contour(
ax, surface, sigma = 1, color = 'red', label = r'1$\sigma$'
)
ax.set_xlabel(r'$\sin^2 \theta_{23}$')
ax.set_ylabel(r'$\Delta m^2_{32}$')
ax.legend()
f.colorbar(im)
plt.show()
|
usert5432/cafplot
|
examples/surf_plot.py
|
surf_plot.py
|
py
| 791 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cafplot.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "cafplot.plot.surface.plot_surface",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cafplot.plot.surface.plot_surface_best_fit",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cafplot.plot.surface.plot_surface_gauss_contour",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 36,
"usage_type": "name"
}
] |
5033666757
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.db import transaction
from django.template import loader
from django.core.exceptions import ValidationError
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse
from django.core.mail import send_mail, BadHeaderError
from django.contrib.auth.decorators import login_required
from .forms import UserForm, RegisterForm, UserProfileForm, ContactForm
from .models import UserProfile, Event
def index(request):
template = loader.get_template("help/index.html")
return HttpResponse(template.render(request=request))
@transaction.atomic
def register(request):
registered = False
if request.method == "POST":
user_form = RegisterForm(data=request.POST)
userprofile_form = UserProfileForm(data=request.POST)
if user_form.is_valid() and userprofile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
phone = userprofile_form.cleaned_data.get("phone")
userprofile = UserProfile.objects.filter(user_id=user.id)
userprofile.update(phone=phone)
registered = True
else:
messages.error(request, (
'Veuillez corriger les erreurs ci-dessous.'))
else:
user_form = RegisterForm()
userprofile_form = UserProfileForm()
return render(
request, "help/registration.html", {
"user_form": user_form,
"userprofile_form": userprofile_form,
"registered": registered}
)
def logout2(request):
logout(request)
return redirect(reverse("index"))
@login_required()
def update_event(request):
if request.method == "POST":
id = request.POST['event_id']
event = Event.objects.filter(id=id)
event.update(status="closed")
return redirect(reverse('profile'))
else:
user_form = RegisterForm()
userprofile_form = UserProfileForm()
return render(request, "help/profile.html")
def contact(request):
send = False
email = []
contact_form = ContactForm()
if request.method == "POST":
subject = "demande d'info"
from_email = "needhelp_contact"
email.append(request.POST['Email'])
body = {
'name': request.POST['Nom'],
'email': request.POST['Email'],
'phone': request.POST['Mobile'],
'message': request.POST['Message'],
}
message = "\n".join(body.values())
try:
send_mail(
subject,
message,
from_email,
email
)
except BadHeaderError:
return HttpResponse('Invalid header found.')
send = True
# return redirect(reverse('contact'))
else:
contact_form = ContactForm()
return render(
request, "help/contact.html",
{
'contact_form': contact_form,
'send': send
})
|
davidbarat/P13
|
needhelp/help/views.py
|
views.py
|
py
| 3,219 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.template.loader.get_template",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.template.loader",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "forms.RegisterForm",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "forms.UserProfileForm",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "models.UserProfile.objects.filter",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "models.UserProfile.objects",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "models.UserProfile",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.error",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "forms.RegisterForm",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "forms.UserProfileForm",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "django.db.transaction.atomic",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "django.db.transaction",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.logout",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "models.Event.objects.filter",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "models.Event.objects",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "models.Event",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "forms.RegisterForm",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "forms.UserProfileForm",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "forms.ContactForm",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "django.core.mail.send_mail",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "django.core.mail.BadHeaderError",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "forms.ContactForm",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 100,
"usage_type": "call"
}
] |
24206813915
|
import webapp2
import jinja2
import os
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
template_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir), autoescape=True)
def to_render(template, **para):
t = template_env.get_template(template)
return t.render(para)
def blog_key(name='default'):
return db.Key.from_path('blog', name)
class Art(db.Model):
title = db.StringProperty(required = True)
arc = db.TextProperty(required = True)
created = db.DateTimeProperty(auto_now_add = True)
def render(self):
self._render_text = self.arc.replace('\n', '<br>')
return to_render('post.html', p = self)
class BaseHandler(webapp2.RequestHandler):
def render(self, template, **para):
self.response.out.write(to_render(template, **para))
class FrontPage(BaseHandler):
def get(self):
arts = []
arts = db.GqlQuery("select * from Art order by created DESC")
self.render("frontPage.html", arts = arts)
class ShowPost(BaseHandler):
def get(self, post_id):
key = db.Key.from_path('Art', int(post_id), parent=blog_key())
post = db.get(key)
self.render('permanlink.html', post = post)
"""Problem:
1. redirect('/blog') don't refresh the page
2. how add id colum to db automatiaclly increase
3. miss click on the title jump to a new page
"""
class NewPost(BaseHandler):
def get(self):
self.render("newPost.html")
def post(self):
title = self.request.get('title')
arc = self.request.get('arc')
if title and arc.strip():
a = Art(parent=blog_key(), title = title, arc = arc)
a.put()
self.redirect('/blog/%s' % str(a.key().id()))
else:
self.render("newPost.html", title=title, arc = arc, error="Content insufficiency")
app = webapp2.WSGIApplication([('/blog/?', FrontPage), ('/blog/newpost', NewPost), ('/blog/([0-9]+)', ShowPost)],
debug=True)
|
tongtie/udacity
|
WebDevelopment/hw3/my_solution.py
|
my_solution.py
|
py
| 2,089 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.join",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "jinja2.Environment",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "jinja2.FileSystemLoader",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db.Key.from_path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db.Key",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.Model",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.StringProperty",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.TextProperty",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.DateTimeProperty",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "webapp2.RequestHandler",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "google.appengine.ext.db.GqlQuery",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.Key.from_path",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db.Key",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "google.appengine.ext.db.get",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "google.appengine.ext.db",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "webapp2.WSGIApplication",
"line_number": 63,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.