metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jhoon-oh/FL_BABU",
"score": 3
} |
#### File: FL_BABU/utils/options.py
```python
import argparse
def args_parser():
parser = argparse.ArgumentParser()
# federated arguments
parser.add_argument('--epochs', type=int, default=10, help="rounds of training")
parser.add_argument('--num_users', type=int, default=100, help="number of users: K")
parser.add_argument('--shard_per_user', type=int, default=2, help="classes per user")
parser.add_argument('--frac', type=float, default=0.1, help="the fraction of clients: C")
parser.add_argument('--local_ep', type=int, default=5, help="the number of local epochs: E")
parser.add_argument('--local_bs', type=int, default=10, help="local batch size: B")
parser.add_argument('--bs', type=int, default=128, help="test batch size")
parser.add_argument('--lr', type=float, default=0.01, help="learning rate")
parser.add_argument('--momentum', type=float, default=0.5, help="SGD momentum (default: 0.5)")
parser.add_argument('--wd', type=float, default=0.0, help="weight decay (default: 0.0)")
parser.add_argument('--split', type=str, default='user', help="train-test split type, user or sample")
parser.add_argument('--grad_norm', action='store_true', help='use_gradnorm_avging')
parser.add_argument('--local_ep_pretrain', type=int, default=0, help="the number of pretrain local ep")
parser.add_argument('--lr_decay', type=float, default=1.0, help="learning rate decay per round")
parser.add_argument('--fl_alg', type=str, default='FedAvg', help="federated learning algorithm")
parser.add_argument('--mu', type=float, default=1.0, help="parameter for proximal local SGD")
# model arguments
parser.add_argument('--model', type=str, default='mlp', help='model name')
parser.add_argument('--kernel_num', type=int, default=9, help='number of each kind of kernel')
parser.add_argument('--kernel_sizes', type=str, default='3,4,5',
help='comma-separated kernel size to use for convolution')
parser.add_argument('--norm', type=str, default='batch_norm', help="batch_norm, layer_norm, or None")
parser.add_argument('--num_filters', type=int, default=32, help="number of filters for conv nets")
parser.add_argument('--max_pool', type=str, default='True',
help="Whether use max pooling rather than strided convolutions")
parser.add_argument('--num_layers_keep', type=int, default=1, help='number layers to keep')
# other arguments
parser.add_argument('--dataset', type=str, default='mnist', help="name of dataset")
parser.add_argument('--iid', action='store_true', help='whether i.i.d or not')
parser.add_argument('--num_classes', type=int, default=10, help="number of classes")
parser.add_argument('--num_channels', type=int, default=3, help="number of channels of imges")
parser.add_argument('--gpu', type=int, default=0, help="GPU ID, -1 for CPU")
parser.add_argument('--stopping_rounds', type=int, default=10, help='rounds of early stopping')
parser.add_argument('--verbose', action='store_true', help='verbose print')
parser.add_argument('--print_freq', type=int, default=100, help="print loss frequency during training")
parser.add_argument('--seed', type=int, default=1, help='random seed (default: 1)')
parser.add_argument('--test_freq', type=int, default=1, help='how often to test on val set')
parser.add_argument('--load_fed', type=str, default='', help='define pretrained federated model path')
parser.add_argument('--results_save', type=str, default='/', help='define fed results save folder')
parser.add_argument('--start_saving', type=int, default=0, help='when to start saving models')
# evaluation arguments
parser.add_argument('--ft_ep', type=int, default=5, help="the number of epochs for fine-tuning")
parser.add_argument('--fine_tuning', action='store_true', help='whether fine-tuning before evaluation')
# additional arguments
parser.add_argument('--local_upt_part', type=str, default=None, help='body, head, or full')
parser.add_argument('--aggr_part', type=str, default=None, help='body, head, or full')
parser.add_argument('--unbalanced', action='store_true', help='unbalanced data size')
parser.add_argument('--num_batch_users', type=int, default=0, help='when unbalanced dataset setting, batch users (same data size)')
parser.add_argument('--moved_data_size', type=int, default=0, help='when unbalanced dataset setting, moved data size')
parser.add_argument('--server_data_ratio', type=float, default=0.0, help='The percentage of data that servers also have across data of all clients.')
# arguments for a single model
parser.add_argument('--opt', type=str, default='SGD', help="optimizer")
parser.add_argument('--body_lr', type=float, default=None, help="learning rate for the body of the model")
parser.add_argument('--head_lr', type=float, default=None, help="learning rate for the head of the model")
parser.add_argument('--body_m', type=float, default=None, help="momentum for the body of the model")
parser.add_argument('--head_m', type=float, default=None, help="momentum for the head of the model")
args = parser.parse_args()
return args
```
#### File: FL_BABU/utils/train_utils.py
```python
from torchvision import datasets, transforms
from models.Nets import CNNCifar, MobileNetCifar
from models.ResNet import ResNet18, ResNet50
from utils.sampling import iid, noniid, iid_unbalanced, noniid_unbalanced
trans_mnist = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
trans_cifar10_train = transforms.Compose([transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
trans_cifar10_val = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
trans_cifar100_train = transforms.Compose([transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441],
std=[0.267, 0.256, 0.276])])
trans_cifar100_val = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441],
std=[0.267, 0.256, 0.276])])
def get_data(args, env='fed'):
if env == 'single':
if args.dataset == 'cifar10':
dataset_train = datasets.CIFAR10('data/cifar10', train=True, download=True, transform=trans_cifar10_train)
dataset_test = datasets.CIFAR10('data/cifar10', train=False, download=True, transform=trans_cifar10_val)
elif args.dataset == 'cifar100':
dataset_train = datasets.CIFAR100('data/cifar100', train=True, download=True, transform=trans_cifar100_train)
dataset_test = datasets.CIFAR100('data/cifar100', train=False, download=True, transform=trans_cifar100_val)
return dataset_train, dataset_test
elif env == 'fed':
if args.unbalanced:
if args.dataset == 'cifar10':
dataset_train = datasets.CIFAR10('data/cifar10', train=True, download=True, transform=trans_cifar10_train)
dataset_test = datasets.CIFAR10('data/cifar10', train=False, download=True, transform=trans_cifar10_val)
if args.iid:
dict_users_train = iid_unbalanced(dataset_train, args.num_users, args.num_batch_users, args.moved_data_size)
dict_users_test = iid_unbalanced(dataset_test, args.num_users, args.num_batch_users, args.moved_data_size)
else:
dict_users_train, rand_set_all = noniid_unbalanced(dataset_train, args.num_users, args.num_batch_users, args.moved_data_size, args.shard_per_user)
dict_users_test, rand_set_all = noniid_unbalanced(dataset_test, args.num_users, args.num_batch_users, args.moved_data_size, args.shard_per_user, rand_set_all=rand_set_all)
elif args.dataset == 'cifar100':
dataset_train = datasets.CIFAR100('data/cifar100', train=True, download=True, transform=trans_cifar100_train)
dataset_test = datasets.CIFAR100('data/cifar100', train=False, download=True, transform=trans_cifar100_val)
if args.iid:
dict_users_train = iid_unbalanced(dataset_train, args.num_users, args.num_batch_users, args.moved_data_size)
dict_users_test = iid_unbalanced(dataset_test, args.num_users, args.num_batch_users, args.moved_data_size)
else:
dict_users_train, rand_set_all = noniid_unbalanced(dataset_train, args.num_users, args.num_batch_users, args.moved_data_size, args.shard_per_user)
dict_users_test, rand_set_all = noniid_unbalanced(dataset_test, args.num_users, args.num_batch_users, args.moved_data_size, args.shard_per_user, rand_set_all=rand_set_all)
else:
exit('Error: unrecognized dataset')
else:
if args.dataset == 'mnist':
dataset_train = datasets.MNIST('data/mnist/', train=True, download=True, transform=trans_mnist)
dataset_test = datasets.MNIST('data/mnist/', train=False, download=True, transform=trans_mnist)
# sample users
if args.iid:
dict_users_train = iid(dataset_train, args.num_users, args.server_data_ratio)
dict_users_test = iid(dataset_test, args.num_users, args.server_data_ratio)
else:
dict_users_train, rand_set_all = noniid(dataset_train, args.num_users, args.shard_per_user, args.server_data_ratio)
dict_users_test, rand_set_all = noniid(dataset_test, args.num_users, args.shard_per_user, args.server_data_ratio, rand_set_all=rand_set_all)
elif args.dataset == 'cifar10':
dataset_train = datasets.CIFAR10('data/cifar10', train=True, download=True, transform=trans_cifar10_train)
dataset_test = datasets.CIFAR10('data/cifar10', train=False, download=True, transform=trans_cifar10_val)
if args.iid:
dict_users_train = iid(dataset_train, args.num_users, args.server_data_ratio)
dict_users_test = iid(dataset_test, args.num_users, args.server_data_ratio)
else:
dict_users_train, rand_set_all = noniid(dataset_train, args.num_users, args.shard_per_user, args.server_data_ratio)
dict_users_test, rand_set_all = noniid(dataset_test, args.num_users, args.shard_per_user, args.server_data_ratio, rand_set_all=rand_set_all)
elif args.dataset == 'cifar100':
dataset_train = datasets.CIFAR100('data/cifar100', train=True, download=True, transform=trans_cifar100_train)
dataset_test = datasets.CIFAR100('data/cifar100', train=False, download=True, transform=trans_cifar100_val)
if args.iid:
dict_users_train = iid(dataset_train, args.num_users, args.server_data_ratio)
dict_users_test = iid(dataset_test, args.num_users, args.server_data_ratio)
else:
dict_users_train, rand_set_all = noniid(dataset_train, args.num_users, args.shard_per_user, args.server_data_ratio)
dict_users_test, rand_set_all = noniid(dataset_test, args.num_users, args.shard_per_user, args.server_data_ratio, rand_set_all=rand_set_all)
else:
exit('Error: unrecognized dataset')
return dataset_train, dataset_test, dict_users_train, dict_users_test
def get_model(args):
if args.model == 'cnn' and args.dataset in ['cifar10', 'cifar100']:
net_glob = CNNCifar(args=args).to(args.device)
elif args.model == 'mobile' and args.dataset in ['cifar10', 'cifar100']:
net_glob = MobileNetCifar(num_classes=args.num_classes).to(args.device)
elif args.model == 'resnet18' and args.dataset in ['cifar10', 'cifar100']:
net_glob = ResNet18(num_classes=args.num_classes).to(args.device)
elif args.model == 'resnet50' and args.dataset in ['cifar10', 'cifar100']:
net_glob = ResNet50(num_classes=args.num_classes).to(args.device)
elif args.model == 'cnn' and args.dataset == 'mnist':
net_glob = CNNMnist(args=args).to(args.device)
elif args.model == 'mlp' and args.dataset == 'mnist':
net_glob = MLP(dim_in=784, dim_hidden=256, dim_out=args.num_classes).to(args.device)
else:
exit('Error: unrecognized model')
return net_glob
``` |
{
"source": "jhoormann/RMCodeDump",
"score": 2
} |
#### File: jhoormann/RMCodeDump/RL-fitting.py
```python
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
from scipy.integrate import fixed_quad
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import FormatStrFormatter
import bces.bces
title_font = {'size':'22', 'color':'black', 'weight':'normal', 'verticalalignment':'bottom'}
axis_font = {'size':'22'}
def makeFigSingle(title, xlabel, ylabel, xlim=[0, 0], ylim=[0, 0]):
fig = plt.figure()
fig = plt.gcf()
fig.set_size_inches(12, 9, forward=True)
ax = fig.add_subplot(111)
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(25)
ax.set_ylabel(ylabel, **axis_font)
if ylim != [0, 0] and ylim[0] < ylim[1]:
ax.set_ylim(ylim)
ax.set_xlabel(xlabel, **axis_font)
if xlim != [0, 0] and xlim[0] < xlim[1]:
ax.set_xlim(xlim)
ax.set_title(title, **title_font)
return fig, ax
# Read in the data of the lag/luminosity values you want to fit
# Note - luminosities are assumed to be log10(lum)
name_prev, lum_prev, lum_err_prev, lag_prev, lag_min_err_prev, lag_max_err_prev, z_prev = \
np.loadtxt("exisitingLags_all.txt", dtype={'names':('name', 'lum', 'lumerr', 'lag', 'lagerrmin', 'lagerrmax', 'z'),
'formats':('|S100', np.float, np.float, np.float, np.float, np.float,
np.float)},skiprows=1,unpack=True)
lum = lum_prev
lum_err = lum_err_prev
# Normalize by 10^44 which tends to be the value the CIV R-L relationship is normalized by - change as needed for your
# emission line of choice
lum = lum - 44
lum_err = lum*(lum_err_prev/lum_prev)
lag = np.log10(lag_prev)
lag_err_min = np.log10(lag_prev) - np.log10(lag_prev - lag_min_err_prev)
lag_err_max = np.log10(lag_prev+lag_max_err_prev) - np.log10(lag_prev)
# Now I call the bces package, see the documentation (https://github.com/rsnemmen/BCES) for the description of all the
# methods. While this method does allow for inclusion of 2D error bars it does not allow for the use of asymmetric
# ones. I tested the results using both the positive and negative errors and it did not make a difference.
a, b, aerr, berr, covab = bces.bces.bces(lum, lum_err, lag, lag_err_max, np.zeros_like(lag))
print("BCES Results using + errors")
print("y|x")
print("Slope = " + str(round(a[0],3)) + " +/-" + str(round(aerr[0],3)))
print("Intercept = " + str(round(b[0], 3)) + " +/- " + str(round(berr[0],3)))
print("\nbissector")
print("Slope = " + str(round(a[2],3)) + " +/-" + str(round(aerr[2],3)))
print("Intercept = " + str(round(b[2], 3)) + " +/- " + str(round(berr[2],3)))
print("orthogonal") # I am pretty sure this is the one I used, although it doesn't make a huge difference.
# Bear in mind many other papers use the bissector method but it is now suggested that approach is not self consistent
print("Slope = " + str(round(a[3],3)) + " +/-" + str(round(aerr[3],3)))
print("Intercept = " + str(round(b[3], 3)) + " +/- " + str(round(berr[3],3)) + "\n")
a, b, aerr, berr, covab = bces.bces.bces(lum, lum_err, lag, lag_err_min, np.zeros_like(lag))
print("BCES Results using - errors")
print("y|x")
print("Slope = " + str(round(a[0],3)) + " +/-" + str(round(aerr[0],3)))
print("Intercept = " + str(round(b[0], 3)) + " +/- " + str(round(berr[0],3)))
print("\nbissector")
print("Slope = " + str(round(a[2],3)) + " +/-" + str(round(aerr[2],3)))
print("Intercept = " + str(round(b[2], 3)) + " +/- " + str(round(berr[2],3)))
print("orthogonal")
print("Slope = " + str(round(a[3],3)) + " +/-" + str(round(aerr[3],3)) + "\n")
# I also tried my own monte-carlo-esque method where randomly shuffle the lag/lum points within the uncertainty,
# fit the data, do this a bunch of times, and find the mean slope/intercept. This gives a consistent value although
# the uncertainties are a bit small, I think the other method will be viewed as more realistic, particularly since we
# are already showing smaller errors than others.
nAGN = len(lum_prev)
nSim = 1000
slopes = np.zeros(nSim)
intercepts = np.zeros(nSim)
nLum = 100
nLag = 200
lum_array = np.zeros((nAGN, nLum))
lag_array = np.zeros((nAGN, nLag))
index = np.linspace(0,nAGN-1, nAGN).astype(int)
for i in range(nAGN):
lum_array[i,:] = np.linspace(lum[i]-lum_err[i], lum[i]+lum_err[i], nLum)
lag_array[i,:] = np.linspace(lag[i] - lag_err_min[i], lag[i] + lag_err_max[i], nLag)
for i in range(nSim):
lag_index = nLag*np.random.rand(nAGN)
lum_index = nLum*np.random.rand(nAGN)
lag_index = lag_index.astype(int)
lum_index = lum_index.astype(int)
lag_temp = [lag_array[x, y] for x,y in zip(index, lag_index)]
lum_temp = [lum_array[x, y] for x,y in zip(index, lum_index)]
ourFit = np.poly1d(np.polyfit(lum_temp, lag_temp, 1))
intercepts[i] = ourFit[0]
slopes[i] = ourFit[1]
mean_slope = np.mean(slopes)
mean_intercept = np.mean(intercepts)
std_slope = np.std(slopes)
std_intercept = np.std(intercepts)
# Now plot the results
print("Monte Carlo-esque method")
print("Slope = " + str(round(mean_slope,3)) + " +/- " + str(round(std_slope,3)) + "\n")
fig0, ax0 = makeFigSingle("", "Log Luminosity", "Log Lag")
ax0.errorbar(lum, lag, yerr=[lag_err_min, lag_err_max], xerr=lum_err, fmt='o',
color = 'black', markersize = 7)
lum_OzDES = np.linspace(38-44, 48-44, 10)
lag_OzDES = [b[3]+ a[3]*x for x in lum_OzDES]
ax0.plot(lum_OzDES, lag_OzDES, color = 'black')
plt.show()
``` |
{
"source": "jhoowy/plt2pix",
"score": 2
} |
#### File: plt2pix/loader/dataloader.py
```python
import pickle, random
import math, time, platform
from pathlib import Path
import cv2
import torch
import numpy as np
from PIL import Image
from skimage import color
from torchvision import transforms, datasets
from torchvision.transforms import functional as tvF
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset # For custom datasets
from torchvision.transforms.functional import InterpolationMode as IM
def set_seed(seed, print_log=True):
if seed < 0:
return
if print_log:
print('set random seed: {}'.format(seed))
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def pseudo_uniform(id, a, b):
return (((id * 1.253 + a * 324.2351 + b * 534.342) * 20147.2312369804) + 0.12949) % (b - a) + a
def real_uniform(id, a, b):
return random.uniform(a, b)
def get_tag_dict(tag_dump_path):
with open(tag_dump_path, 'rb') as f:
pkl = pickle.load(f)
iv_tag_list = pkl['iv_tag_list']
cv_tag_list = pkl['cv_tag_list']
name_to_id = pkl['tag_dict']
iv_dict = {tag_id: i for (i, tag_id) in enumerate(iv_tag_list)}
cv_dict = {tag_id: i for (i, tag_id) in enumerate(cv_tag_list)}
id_to_name = {tag_id: tag_name for (tag_name, tag_id) in name_to_id.items()}
return (iv_dict, cv_dict, id_to_name)
def read_tagline_txt(tag_txt_path, img_dir_path, iv_dict, cv_dict, data_size=0, is_train=True, seed=-1):
iv_class_len = len(iv_dict)
cv_class_len = len(cv_dict)
print("read_tagline_txt! We will use %d, %d tags" % (iv_class_len, cv_class_len))
if not tag_txt_path.exists():
raise Exception(f'tag list text file "{tag_txt_path}" does not exist.')
iv_tag_set = set(iv_dict.keys())
cv_tag_set = set(cv_dict.keys())
iv_class_list = []
cv_class_list = []
file_id_list = []
data_limited = data_size != 0
count = 0
count_all = 0
all_tag_num = 0
awful_tag_num = 0
iv_tag_num = 0
cv_tag_num = 0
tag_lines = []
with tag_txt_path.open('r') as f:
for line in f:
tag_lines.append(line)
random.seed(10)
random.shuffle(tag_lines)
random.seed(time.time() if seed < 0 else seed)
for line in tag_lines:
count_all += 1
tag_str_list = line.split(' ')
tag_list = [int(i) for i in tag_str_list]
file_name = tag_list[0]
tag_list = set(tag_list[1:])
if not (img_dir_path / f'{file_name}.png').exists():
continue
# one girl or one boy / one hair and eye color
person_tag = tag_list.intersection(include_tags)
hair_tag = tag_list.intersection(hair_tags)
eye_tag = tag_list.intersection(eye_tags)
if not (len(hair_tag) == 1 and len(eye_tag) == 1 and len(person_tag) == 1):
awful_tag_num += 1
if is_train:
continue
iv_class = torch.zeros(iv_class_len, dtype=torch.float)
cv_class = torch.zeros(cv_class_len, dtype=torch.float)
tag_exist = False
for tag in tag_list:
if tag in iv_tag_set:
try:
iv_class[iv_dict[tag]] = 1
tag_exist = True
iv_tag_num += 1
except IndexError as e:
print(len(iv_dict), iv_class_len, tag, iv_dict[tag])
raise e
if not tag_exist and is_train:
continue
tag_exist = False
for tag in tag_list:
if tag in cv_tag_set:
try:
cv_class[cv_dict[tag]] = 1
tag_exist = True
cv_tag_num += 1
except IndexError as e:
print(len(cv_dict), cv_class_len, tag, cv_dict[tag])
raise e
if not tag_exist and is_train:
continue
file_id_list.append(file_name)
iv_class_list.append(iv_class)
cv_class_list.append(cv_class)
all_tag_num += len(tag_list)
count += 1
if data_limited and count > data_size:
break
print(f'count_all {count_all}, select_count {count}, awful_count {awful_tag_num}, all_tag_num {all_tag_num}, iv_tag_num {iv_tag_num}, cv_tag_num {cv_tag_num}')
return (file_id_list, iv_class_list, cv_class_list)
def read_palette_dict(palette_path, img_dir_path, color_space='rgb', k=5, data_size=0):
if not palette_path.exists():
raise Exception(f'palette dictionary file "{palette_path}" does not exist.')
with open(palette_path, 'rb') as f:
palettes = pickle.load(f)
file_id_list = []
palette_list = []
data_limited = data_size != 0
count = 0
for file_name in palettes.keys():
if not (img_dir_path / file_name).exists():
continue
p = palettes[file_name]
if len(p) < k:
continue
file_id_list.append(file_name)
palette_list.append(p[:k])
count += 1
if data_limited and count > data_size:
break
palette_list = np.array(palette_list).astype('float32')
palette_list = palette_list.reshape(count, k * 3)
# if color_space == 'rgb':
# palette_list = palette_list * 2 - 1.
# elif color_space == 'lab':
# raise NotImplementedError('lab color space palette is not supported')
# elif color_space == 'hsv':
# raise NotImplementedError('hsv color space palette is not supported')
return (file_id_list, palette_list)
class ColorAndSketchDataset(Dataset):
def __init__(self, rgb_path, mask_path, sketch_path_list, file_id_list, palette_list,
override_len=None, both_transform=None, sketch_transform=None, color_transform=None,
mask_transform=None, palette_transform=None, seed=-1, **kwargs):
self.rgb_path = rgb_path
self.mask_path = mask_path
self.sketch_path_list = sketch_path_list
self.file_id_list = file_id_list # copy
self.palette_list = palette_list
self.both_transform = both_transform
self.color_transform = color_transform
self.sketch_transform = sketch_transform
self.mask_transform = mask_transform
self.palette_transform = palette_transform
self.data_len = len(file_id_list)
if override_len > 0 and self.data_len > override_len:
self.data_len = override_len
self.idx_shuffle = list(range(self.data_len))
random.seed(10)
random.shuffle(self.idx_shuffle)
random.seed(time.time() if seed < 0 else seed)
def __getitem__(self, idx):
index = self.idx_shuffle[idx]
file_id = self.file_id_list[index]
palette = self.palette_list[index]
sketch_path = random.choice(self.sketch_path_list)
color_path = self.rgb_path / file_id
mask_path = self.mask_path / file_id
sketch_path = sketch_path / file_id
color_img = Image.open(color_path).convert('RGB')
mask_img = np.load(mask_path)
sketch_img = Image.open(sketch_path).convert('L') # to [1, H, W]
if self.mask_transform is not None:
mask_img = self.mask_transform(mask_img)
else:
mask_img = torch.FloatTensor(mask_img)
if self.both_transform is not None:
color_img, sketch_img, mask_img = self.both_transform(color_img, sketch_img, mask_img)
if self.color_transform is not None:
color_img = self.color_transform(color_img)
if self.sketch_transform is not None:
sketch_img = self.sketch_transform(sketch_img)
if self.palette_transform is not None:
palette = self.palette_transform(palette)
else:
palette = palette / 255.0
return (color_img, sketch_img, mask_img, palette)
def __len__(self):
return self.data_len
def enhance_brightness(self, input_size):
random_jitter = [transforms.ColorJitter(brightness=[1, 7], contrast=0.2, saturation=0.2)]
data_augmentation = [transforms.Resize((input_size, input_size), interpolation=IM.LANCZOS),
transforms.ToTensor()]
self.sketch_transform = transforms.Compose(random_jitter + data_augmentation)
class RGB2ColorSpace(object):
def __init__(self, color_space):
self.color_space = color_space
def __call__(self, img):
if self.color_space == 'rgb':
return (img * 2 - 1.)
img = img.permute(1, 2, 0) # to [H, W, 3]
if self.color_space == 'lab':
img = color.rgb2lab(img) # [0~100, -128~127, -128~127]
img[:,:,0] = (img[:,:,0] - 50.0) * (1 / 50.)
img[:,:,1] = (img[:,:,1] + 0.5) * (1 / 127.5)
img[:,:,2] = (img[:,:,2] + 0.5) * (1 / 127.5)
elif self.color_space == 'hsv':
img = color.rgb2hsv(img) # [0~1, 0~1, 0~1]
img = (img * 2 - 1)
# to [3, H, W]
return torch.from_numpy(img).float().permute(2, 0, 1) # [-1~1, -1~1, -1~1]
class ColorSpace2RGB(object):
"""
[-1, 1] to [0, 255]
"""
def __init__(self, color_space):
self.color_space = color_space
def __call__(self, img):
"""numpy array [b, [-1~1], [-1~1], [-1~1]] to target space / result rgb[0~255]"""
img = img.data.numpy()
if self.color_space == 'rgb':
img = (img + 1) * 0.5
img = img.transpose(0, 2, 3, 1)
if self.color_space == 'lab': # to [0~100, -128~127, -128~127]
img[:,:,:,0] = (img[:,:,:,0] + 1) * 50
img[:,:,:,1] = (img[:,:,:,1] * 127.5) - 0.5
img[:,:,:,2] = (img[:,:,:,2] * 127.5) - 0.5
img_list = []
for i in img:
img_list.append(color.lab2rgb(i))
img = np.array(img_list)
elif self.color_space == 'hsv': # to [0~1, 0~1, 0~1]
img = (img + 1) * 0.5
img_list = []
for i in img:
img_list.append(color.hsv2rgb(i))
img = np.array(img_list)
img = (img * 255).astype(np.uint8)
return img # [0~255] / [b, h, w, 3]
def rot_crop(x):
"""return maximum width ratio of rotated image without letterbox"""
x = abs(x)
deg45 = math.pi * 0.25
deg135 = math.pi * 0.75
x = x * math.pi / 180
a = (math.sin(deg135 - x) - math.sin(deg45 - x))/(math.cos(deg135-x)-math.cos(deg45-x))
return math.sqrt(2) * (math.sin(deg45-x) - a*math.cos(deg45-x)) / (1-a)
class RandomFRC(transforms.RandomResizedCrop):
"""RandomHorizontalFlip + RandomRotation + RandomResizedCrop 2 images"""
def __call__(self, img1, img2, img3=None):
img1 = tvF.resize(img1, self.size, interpolation=IM.LANCZOS)
img2 = tvF.resize(img2, self.size, interpolation=IM.LANCZOS)
if img3 != None:
img3 = tvF.resize(img3, self.size, interpolation=IM.NEAREST)
if random.random() < 0.5:
img1 = tvF.hflip(img1)
img2 = tvF.hflip(img2)
if img3 != None:
img3 = tvF.hflip(img3)
if random.random() < 0.5:
rot = random.uniform(-10, 10)
crop_ratio = rot_crop(rot)
img1 = tvF.rotate(img1, rot, interpolation=IM.BILINEAR)
img2 = tvF.rotate(img2, rot, interpolation=IM.BILINEAR)
img1 = tvF.center_crop(img1, int(img1.size[0] * crop_ratio))
img2 = tvF.center_crop(img2, int(img2.size[0] * crop_ratio))
if img3 != None:
img3 = tvF.rotate(img3, rot, interpolation=IM.NEAREST)
img3 = tvF.center_crop(img3, int(img3.shape[1] * crop_ratio))
i, j, h, w = self.get_params(img1, self.scale, self.ratio)
# return the image with the same transformation
img1 = tvF.resized_crop(img1, i, j, h, w, self.size, self.interpolation)
img2 = tvF.resized_crop(img2, i, j, h, w, self.size, self.interpolation)
if img3 != None:
img3 = tvF.resized_crop(img3, i, j, h, w, self.size)
return (img1, img2, img3)
def get_train_dataset(args):
set_seed(args.seed)
data_dir_path = Path(args.data_dir)
batch_size = args.batch_size
input_size = args.input_size
data_randomize = RandomFRC(input_size, scale=(0.9, 1.0), ratio=(0.95, 1.05), interpolation=IM.LANCZOS)
color_space = args.color_space
swap_color_space = [RGB2ColorSpace(color_space)]
random_jitter = [transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2)]
data_augmentation = [transforms.Resize((input_size, input_size), interpolation=IM.LANCZOS),
transforms.ToTensor()]
mask_transform = [transforms.ToTensor()]
palette_num = args.palette_num
data_size = args.data_size
palette_path = data_dir_path / args.palette_dict
# Train set
print('making train set...')
rgb_train_path = data_dir_path / "rgb_train"
mask_train_path = data_dir_path / "mask_train"
sketch_dir_path_list = ["sketch_train"]
sketch_dir_path_list = [data_dir_path / p for p in sketch_dir_path_list if (data_dir_path / p).exists()]
(train_id_list, train_palette_list) = read_palette_dict(palette_path, rgb_train_path, color_space=color_space, k=palette_num, data_size=data_size)
if platform.system() == 'Windows':
_init_fn = None
else:
_init_fn = lambda worker_id: set_seed(args.seed, print_log=False)
train = ColorAndSketchDataset(rgb_path=rgb_train_path, mask_path=mask_train_path,
sketch_path_list=sketch_dir_path_list, file_id_list=train_id_list,
palette_list=train_palette_list, override_len=data_size, both_transform=data_randomize,
sketch_transform=transforms.Compose(random_jitter + data_augmentation),
color_transform=transforms.Compose(data_augmentation + swap_color_space),
seed=args.seed)
train_loader = DataLoader(train, batch_size=batch_size, shuffle=True, num_workers=args.thread, worker_init_fn=_init_fn)
print(f'train: read {sketch_dir_path_list[0]}, id_list len={len(train_id_list)}')
# Test set
print('making test set...')
rgb_test_path = data_dir_path / "rgb_test"
mask_test_path = data_dir_path / "mask_test"
sketch_test_path = data_dir_path / "sketch_test"
(test_id_list, test_palette_list) = read_palette_dict(palette_path, rgb_test_path, color_space=color_space, k=palette_num, data_size=args.test_image_count)
test = ColorAndSketchDataset(rgb_path=rgb_test_path, mask_path=mask_test_path,
sketch_path_list=[sketch_test_path], file_id_list=test_id_list,
palette_list=test_palette_list, override_len=args.test_image_count, both_transform=data_randomize,
sketch_transform=transforms.Compose(data_augmentation),
color_transform=transforms.Compose(data_augmentation + swap_color_space))
test_loader = DataLoader(test, batch_size=batch_size, shuffle=False, num_workers=args.thread, worker_init_fn=_init_fn)
print(f'test: read {sketch_test_path}, id_list len={len(test_id_list)}')
return train_loader, test_loader
# Need to fix
class LinerTestDataset(Dataset):
def __init__(self, sketch_path, file_id_list, iv_class_list, cv_class_list,
override_len=None, sketch_transform=None, **kwargs):
self.sketch_path = sketch_path
self.file_id_list = file_id_list # copy
self.iv_class_list = iv_class_list
self.cv_class_list = cv_class_list
self.sketch_transform = sketch_transform
self.data_len = len(file_id_list)
if override_len > 0 and self.data_len > override_len:
self.data_len = override_len
def __getitem__(self, idx):
file_id = self.file_id_list[idx]
iv_tag_class = self.iv_class_list[idx]
cv_tag_class = self.cv_class_list[idx]
sketch_path = self.sketch_path / f"{file_id}.png"
sketch_img = Image.open(sketch_path).convert('L') # to [1, H, W]
if self.sketch_transform is not None:
sketch_img = self.sketch_transform(sketch_img)
return (sketch_img, file_id, iv_tag_class, cv_tag_class)
def __len__(self):
return self.data_len
# Need to fix
def get_test_dataset(args):
data_dir_path = Path(args.data_dir)
batch_size = args.batch_size
input_size = args.input_size
data_augmentation = [transforms.Resize((input_size, input_size), interpolation=IM.LANCZOS),
transforms.ToTensor()]
data_size = args.data_size
sketch_path = data_dir_path / args.test_dir
tag_path = data_dir_path / args.tag_txt
(test_id_list, test_iv_class_list, test_cv_clas_list) = read_tagline_txt(
tag_path, sketch_path, iv_dict, cv_dict, is_train=False, data_size=data_size)
print('making train set...')
test_dataset = LinerTestDataset(sketch_path=sketch_path, file_id_list=test_id_list,
iv_class_list=test_iv_class_list, cv_class_list=test_cv_clas_list,
override_len=data_size, sketch_transform=transforms.Compose(data_augmentation))
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=args.thread)
print(f'iv_class_len={iv_class_len}, cv_class_len={cv_class_len}')
return test_loader
def get_dataset(args):
if args.test:
return get_test_dataset(args)
else:
return get_train_dataset(args)
```
#### File: jhoowy/plt2pix/plt2pix.py
```python
import numpy as np
import torch
import torch.nn as nn
from PIL import Image
from loader.dataloader import ColorSpace2RGB
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode as IM
# Only for inference
class plt2pix(object):
def __init__(self, args):
if args.model == 'tag2pix':
from network import Generator
else:
raise Exception('invalid model name: {}'.format(args.model))
self.args = args
self.gpu_mode = not args.cpu
self.input_size = args.input_size
self.color_revert = ColorSpace2RGB(args.color_space)
self.layers = args.layers
self.palette_num = args.palette_num
self.sketch_transform = transforms.Compose([
transforms.Resize((self.input_size, self.input_size), interpolation=IM.LANCZOS),
transforms.ToTensor()])
self.use_crn = (args.load_crn != "")
##### initialize network
self.net_opt = {
'guide': not args.no_guide,
'relu': args.use_relu,
'bn': not args.no_bn,
'cit': False
}
self.G = Generator(input_size=args.input_size, layers=args.layers,
palette_num=self.palette_num, net_opt=self.net_opt)
for param in self.G.parameters():
param.requires_grad = False
if self.gpu_mode:
self.G = nn.DataParallel(self.G)
if self.use_crn:
from network import ColorPredictor
self.CRN = ColorPredictor(palette_num=self.palette_num, net_opt=self.net_opt)
for param in self.CRN.parameters():
param.requires_grad = False
if self.gpu_mode:
self.CRN = nn.DataParallel(self.CRN)
if self.gpu_mode and torch.cuda.is_available():
self.device = torch.device("cuda:0")
else:
self.device = torch.device("cpu")
print("gpu mode: ", self.gpu_mode)
print("device: ", self.device)
if self.gpu_mode:
print(torch.cuda.device_count(), "GPUS!")
if self.gpu_mode:
self.G.to(self.device)
if self.use_crn:
self.CRN.to(self.device)
self.G.eval()
self.load_model(args.load)
if self.use_crn:
self.CRN.eval()
self.load_crn(args.load_crn)
if self.gpu_mode:
self.G.module.net_opt['guide'] = False
else:
self.G.net_opt['guide'] = False
def colorize(self, input_image, palette=None):
'''Colorize input image based on palette
Parameters:
input_image (PIL.Image) -- the input sketch image
palette (np.array) -- RGB-ordered conditional palette (K x 3)
Returns:
G_f (np.array) -- the colorized result of input sketch image
'''
sketch = self.sketch_transform(input_image)
if palette is None:
palette = np.zeros(3 * self.palette_num)
palette = torch.FloatTensor((palette / 255.0))
sketch = sketch.reshape(1, *sketch.shape)
palette = palette.reshape(1, *palette.shape)
if self.gpu_mode:
palette = palette.to(self.device)
sketch = sketch.to(self.device)
G_f, _ = self.G(sketch, palette)
G_f = self.color_revert(G_f.cpu())[0]
return G_f
def recommend_color(self, input_image):
'''Recommend color palette based on sketch image
Parameters:
input_image (PIL.Image) -- the input sketch image
Returns:
palette (np.array) -- RGB-ordered conditional palette (K x 3)
'''
if not self.use_crn:
raise Exception("Color Recommendation Network is not loaded")
sketch = self.sketch_transform(input_image)
sketch = sketch.reshape(1, *sketch.shape)
if self.gpu_mode:
sketch = sketch.to(self.device)
palette = self.CRN(sketch) * 255.
return palette
def load_model(self, checkpoint_path):
checkpoint = torch.load(str(checkpoint_path))
self.G.load_state_dict(checkpoint['G'])
def load_crn(self, checkpoint_path):
checkpoint = torch.load(str(checkpoint_path))
self.CRN.load_state_dict(checkpoint['CRN'])
``` |
{
"source": "J-hoplin1/Covid19-Information-bot",
"score": 3
} |
#### File: Covid19-Information-bot/Using Public API/CovidData.py
```python
import requests
import json
import re
from pytz import timezone
from typing import Any, MutableSequence
from urllib.parse import urlencode, quote_plus, unquote
from urllib.request import Request, urlopen
from datetime import datetime, timedelta
from bs4 import BeautifulSoup
from lxml import html, etree
import xml.etree.ElementTree as et
from xml.dom import minidom
class openDataAPICall(object):
def __init__(self,apiKey) -> None:
self.apiKey = apiKey
self.apiURL = 'http://openapi.data.go.kr/openapi/service/rest/Covid19/getCovid19InfStateJson'
def buildRequests(self) -> bool:
# ์ฝ๋ ์คํํ ์์
executedPoint = datetime.now(timezone('Asia/Seoul'))
endDate = executedPoint + timedelta(days = 1)# ํ๋ฃจ๋ค์ ์๊ฐ์ ์๋ฏธํ๋ค.
executedPoint = executedPoint + timedelta(days = -1)
#์์๋ฒ์
searchStart = executedPoint.strftime("%Y%m%d") # strftime์ผ๋ก ํฌ๋งท์ ๋ง์ถ์ด์ค๋ค."%Y%m%d" : YYYYMMDDํํ๋ก ์ถ๋ ฅ
#๋๋ฒ์
searchEnd = endDate.strftime("%Y%m%d") # ๋๋ฒ์๋ฅผ ๋ค์๋ ๋ก ํด์ค์ผ ์ค๋ ๋ ์ง์ ๋ํ ๊ฐ๋ง ๋์จ๋ค.
#Request Query๋ฅผ ๋ง๋ ๋ค.
queryParameter = '?' + urlencode({
quote_plus('serviceKey') : self.apiKey,
quote_plus('pageNo') : 1,
quote_plus('numOfRows') : 10,
quote_plus('startCreateDt') : searchStart,
quote_plus('endCreateDt') : searchEnd
})
response = requests.get(self.apiURL + queryParameter) # ๊ธฐ๋ณธ์ ์ผ๋ก requests๋ฅผ ์ธ์ฝ๋ฉํ ๋ฐํ๊ฐ์ Byte String์ด ๋์ค๊ฒ ๋๋ค.
responseXML = response.text
responseCode = response.status_code
if 200 <= responseCode < 300:
res = BeautifulSoup(responseXML, 'lxml-xml')
return self.reProcessXML(res)
else:
return False
def addMainNews(self) -> MutableSequence:
covidSite = "http://ncov.mohw.go.kr/index.jsp"
covidNotice = "http://ncov.mohw.go.kr"
html = urlopen(covidSite)
bs = BeautifulSoup(html, 'html.parser')
# Bug fix 2021 03 02 : ํ์ฑ ๊ตฌ์ญ ์ง์ ์ ์กฐ๊ธ ๋ ๊ตฌ์ฒดํํ์์ต๋๋ค -> ์นด๋ ๋ด์ค๋ก ์ธํ ๋ฐฉํด
bs = bs.find('div',{'class' : 'm_news'})
sounds = []
briefTasks = dict()
hotIssues = dict()
mainbrief = bs.findAll('a',{'href' : re.compile('\/tcmBoardView\.do\?contSeq=[0-9]*')})
for brf in mainbrief:
briefTasks[brf.text] = covidNotice + brf['href']
sounds.append(briefTasks)
hotIssue = bs.findAll('a',{'href' : re.compile('https\:\/\/www\.korea\.kr\/[\w?=]+')})
for u in hotIssue:
hotIssues[u.text] = u['href']
sounds.append(hotIssues)
return sounds
def reProcessXML(self,BSXML : BeautifulSoup) -> bool:
# ๋ง์ฝ XMLํ์ฑ ์์ ์ค๋ฅ๊ฐ ๋๋ ๊ฒฝ์ฐ -> API์ ๊ฒ ํน์ ์์ง ๋ถ๋ฌ์ค์ง ๋ชปํ ๊ฒฝ์ฐ
try:
res = BSXML# lxml-xml ๋งค์ฐ๋น ๋ฅด๊ณ ์ ์ผํ๊ฒ ์ง์๋๋ XMLํ์์ด๋ค.
item = res.findAll('item')
dayBefore = item[1]
today = item[0]
news = self.addMainNews()
except BaseException:
return False
# ๋ธ๋ฆฌํ ๊ด๋ จ ๋ฐ์ดํฐ
briefings = news[0]
briefTopics = list(briefings.keys())
#์ฃผ์ ์ด์๊ด๋ จ ๋ฐ์ดํฐ
hotIssues = news[1]
issueTopics = list(hotIssues.keys())
dataDictionary = {
'dataDate' : datetime.strptime(today.find('stateDt').text,"%Y%m%d").date().strftime("%Y-%m-%d"),
'data' : {
'totalDecidedPatient' : today.find('decideCnt').text,
'todayDecidedPatient' : str(int(today.find('decideCnt').text) - int(dayBefore.find('decideCnt').text)),
'clearedPatient' : today.find('clearCnt').text,
'totalDeath' : today.find('deathCnt').text,
'increasedDeath' : str(int(today.find('deathCnt').text) - int(dayBefore.find('deathCnt').text)),
'CumulatedConfirmPercentage' : today.find('accDefRate').text
},
'briefing' : {},
'hotIssue' : {}
}
for i,o in enumerate(briefTopics, start = 1):
dataDictionary['briefing']['briefTopics{}'.format(i)] = [o , briefings[o]]
for i,o in enumerate(issueTopics, start = 1):
dataDictionary['hotIssue']['issueTopics{}'.format(i)] = [o , hotIssues[o]]
return dataDictionary
if __name__=="__main__":
p = openDataAPICall()
print(p.buildRequests())
```
#### File: Covid19-Information-bot/Using WebScraping/Covid19KoreaStatusBot.py
```python
import discord
import asyncio
from discord.ext import commands
import urllib
from urllib.request import URLError
from urllib.request import HTTPError
from urllib.request import urlopen
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup
from urllib.parse import quote
import re # Regex for youtube link
import warnings
import requests
import time
client = discord.Client() # Create Instance of Client. This Client is discord server's connection to Discord Room
bottoken = ""
@client.event # Use these decorator to register an event.
async def on_ready(): # on_ready() event : when the bot has finised logging in and setting things up
await client.change_presence(status=discord.Status.online, activity=discord.Game("Type !help or !๋์๋ง for help"))
print("New log in as {0.user}".format(client))
@client.event
async def on_message(message): # on_message() event : when the bot has recieved a message
#To user who sent message
# await message.author.send(msg)
print(message.content)
if message.author == client.user:
return
if message.content.startswith("!์ฝ๋ก๋"):
# ๋ณด๊ฑด๋ณต์ง๋ถ ์ฝ๋ก๋ ๋ฐ์ด๋ฌ์ค ์ ๋ณด์ฌ์ดํธ"
covidSite = "http://ncov.mohw.go.kr/index.jsp"
covidNotice = "http://ncov.mohw.go.kr"
html = urlopen(covidSite)
bs = BeautifulSoup(html, 'html.parser')
latestupdateTime = bs.find('span', {'class': "livedate"}).text.split(',')[0][1:].split('.')
statisticalNumbers = bs.findAll('span', {'class': 'num'})
beforedayNumbers = bs.findAll('span', {'class': 'before'})
#์ฃผ์ ๋ธ๋ฆฌํ ๋ฐ ๋ด์ค๋งํฌ
briefTasks = []
mainbrief = bs.findAll('a',{'href' : re.compile('\/tcmBoardView\.do\?contSeq=[0-9]*')})
for brf in mainbrief:
container = []
container.append(brf.text)
container.append(covidNotice + brf['href'])
briefTasks.append(container)
print(briefTasks)
# ํต๊ณ์์น
statNum = []
# ์ ์ผ๋๋น ์์น
beforeNum = []
for num in range(7):
statNum.append(statisticalNumbers[num].text)
for num in range(4):
beforeNum.append(beforedayNumbers[num].text.split('(')[-1].split(')')[0])
totalPeopletoInt = statNum[0].split(')')[-1].split(',')
tpInt = ''.join(totalPeopletoInt)
lethatRate = round((int(''.join(''.join(statNum[3].split(',')).lstrip("'").rstrip("'"))) / int(tpInt)) * 100, 2)
embed = discord.Embed(title="Covid-19 Virus Korea Status", description="",color=0x5CD1E5)
embed.add_field(name="Data source : Ministry of Health and Welfare of Korea", value="http://ncov.mohw.go.kr/index.jsp", inline=False)
embed.add_field(name="Latest data refred time",value="ํด๋น ์๋ฃ๋ " + latestupdateTime[0] + "์ " + latestupdateTime[1] + "์ผ "+latestupdateTime[2] +" ์๋ฃ์
๋๋ค.", inline=False)
embed.add_field(name="ํ์งํ์(๋์ )", value=statNum[0].split(')')[-1]+"("+beforeNum[0]+")",inline=True)
embed.add_field(name="์์นํ์(๊ฒฉ๋ฆฌํด์ )", value=statNum[1] + "(" + beforeNum[1] + ")", inline=True)
embed.add_field(name="์น๋ฃ์ค(๊ฒฉ๋ฆฌ ์ค)", value=statNum[2] + "(" + beforeNum[2] + ")", inline=True)
embed.add_field(name="์ฌ๋ง", value=statNum[3] + "(" + beforeNum[3] + ")", inline=True)
embed.add_field(name="๋์ ํ์ง๋ฅ ", value=statNum[6], inline=True)
embed.add_field(name="์น์ฌ์จ", value=str(lethatRate) + " %",inline=True)
embed.add_field(name="- ์ต์ ๋ธ๋ฆฌํ 1 : " + briefTasks[0][0],value="Link : " + briefTasks[0][1],inline=False)
embed.add_field(name="- ์ต์ ๋ธ๋ฆฌํ 2 : " + briefTasks[1][0], value="Link : " + briefTasks[1][1], inline=False)
embed.set_thumbnail(url="https://wikis.krsocsci.org/images/7/79/%EB%8C%80%ED%95%9C%EC%99%95%EA%B5%AD_%ED%83%9C%EA%B7%B9%EA%B8%B0.jpg")
embed.set_footer(text='Service provided by Hoplin.',
icon_url='https://avatars2.githubusercontent.com/u/45956041?s=460&u=1caf3b112111cbd9849a2b95a88c3a8f3a15ecfa&v=4')
await message.channel.send("Covid-19 Virus Korea Status", embed=embed)
client.run(bottoken)
``` |
{
"source": "J-hoplin1/Design_Using_Riot_API",
"score": 3
} |
#### File: Design_Using_Riot_API/functionModules/smtpConnector.py
```python
import re
import sys,os
import smtplib as smt
from email.mime.multipart import MIMEMultipart # ๋ค์ํ ํ์(text,img,audio) ์ค์ฒฉํ์ฌ ๋ด๊ธฐ์ํ ๊ฐ์ฒด
from email import encoders # message contents to binary
from email.mime.text import MIMEText # ํ
์คํธํ์
# MIME : Multipurpose Internet Mail Extensions์ ์ฝ์๋ก ์ ์์ฐํธ์ ์ํ ์ธํฐ๋ท ํ์ค ํฌ๋งท์ด๋ค.
from datetime import datetime
from .patternChecker import patternChecker
from pytz import timezone
from .textMaker import makeText
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
# Pattern Checker Instance
checker = patternChecker()
def mailSend(smtpReqDatas, message,receiver):
with smt.SMTP(smtpReqDatas["server"], smtpReqDatas["SMTPPort"]) as server:
server.starttls() # Transport Layer Security Connection
server.login(smtpReqDatas["hostersEmail"], smtpReqDatas["hostersEmailPW"]) # login to smpt server
responseSignal = server.sendmail(message['From'], message['To'], message.as_string())
if not responseSignal:
print("MessageSend Completed to {}".format(receiver))
else:
print('{}'.format(responseSignal))
def generateTextMime(receiver,mail,pw):
textMakerInstance = makeText()
if not checker.checkEmailPattern(receiver):
print("Fatal Error : Wrong email Pattern Please Check Again")
return
else:
text = textMakerInstance.makeText()
title = "{} ์ฝ๋ก๋ 19 ๋ฐ์ดํฐ".format(datetime.now(timezone('Asia/Seoul')).strftime('%Y๋
%m์ %d์ผ'))
sendMail(receiver, text, title,mail,pw)
def sendMail(receiver,text,title,mail,pw):
smtpReqDatas = {
"server" : 'smtp.naver.com',
"hostersEmail" : mail, # Hoster's E-mail Address here (Naver Mail)
"hostersEmailPW" : pw, # Hoster's E-mail PW here
"SMTPPort" : 587
}
title = title
paragraph = text
hoster = mail # Hoster's E-mail Address
reveive = receiver
message = MIMEText(_text = paragraph, _charset = "utf-8")
message['Subject'] = title
message['From'] = hoster
message['To'] = reveive
mailSend(smtpReqDatas, message,receiver)
```
#### File: Design_Using_Riot_API/functionModules/textMaker.py
```python
import json
class makeText(object):
"""
์ด ํด๋์ค๋ apiCaller.py์ API ํธ์ถ๊ฐ XML์ ์ ์ฒ๋ฆฌํ์ฌ ๋ง๋ JSONํ์ผ๋ก ํ
์คํธ๋ฅผ ๋ง๋ค์ด ๋ฐํํฉ๋๋ค.
1) makeText(self)
ํ
์คํธ ๋ง๋ค์ด ๋ฐํํ๋ ๋ฉ์๋์
๋๋ค.
"""
def __init__(self):
with open('../Datas/smtpSendDatas.json') as w:
self.data = json.load(w)
def makeText(self):
InfoText = "{} ์ฝ๋ก๋19 ์ ๋ณด\n\n\n์ด ํ์ง์ ์ : {}๋ช
\n์ ์ฒด ์ฌ๋ง์ ์ : {}๋ช
\n์ ์ผ๋๋น ํ์ง์ ์ฆ๊ฐ ์ :{}๋ช
\n์ ์ผ๋๋น ์ฌ๋ง์ ์ฆ๊ฐ์ : {}๋ช
\n๋์ ํ์ง๋ฅ : {}%\n\n\n".format(self.data["dataDate"],self.data["data"]["totalDecidedPatient"],self.data["data"]["totalDeath"],self.data["data"]["todayDecidedPatient"],self.data["data"]["increasedDeath"],self.data["data"]["CumulatedConfirmPercentage"])
breifText = "<์ต์ ๋ณด๊ฑด๋ณต์ง๋ถ ๋ธ๋ฆฌํ>\n"
for e,p in self.data['briefing'].items():
text = f"{p[0]} : {p[1]}\n"
breifText = breifText + text
issuesText = "<์ต์ ํซ์ด์>\n"
for e,p in self.data['hotIssue'].items():
text = f"{p[0]} : {p[1]}\n"
issuesText = issuesText + text
lastText = '\n\n\n\n๋ฐ์ดํฐ(API) ์ ๊ณต : ๋ํ๋ฏผ๊ตญ ํ์ ์์ ๋ถ ๊ณต๊ณต๋ฐ์ดํฐ ํฌํธ\n๋ํ๋ฏผ๊ตญ ์ฝ๋ก๋ ๋ฐ์ด๋ฌ์ค ๊ฐ์ผ์ฆ ๊ณต์์ฌ์ดํธ : http://ncov.mohw.go.kr/ \n์๋น์ค ์ ๊ณต์ : Hoplin (https://github.com/J-hoplin1)'
completedText = InfoText + breifText + issuesText + lastText
return completedText
```
#### File: Design_Using_Riot_API/Service Tool/DBManager.py
```python
import subprocess
from enum import Enum
import json
import re
import yaml
import pymysql as sql
class dataBaseInitiator(object):
'''
DataBaseInitiator Document
Classify : System tool - Database
Using SQL Type : RDBMS - MySQL, Query Lang
What for? : This code is Database structure initiator before service start
Exception Class
1. connectionExceptions : Return error Message if connection fails or connection not maintained
Methods
1. __init__ : Initiate Essential Variables for methods. Make basic connection.
2. getConnectionAndCursor : Make a Connection with MySQL Server and Initiate MySQL Remote Cursor
3. initiateEssentialDatabase : Have variable "essentialDBList". This variables contains essential database name and it's 'tablename' and 'tableQuery'. Just revise this dictionary if you want to change service Database structure
4. initateServiceDatas : Initiate Essential Values for this service
- Public API Key (for data.go.kr)
- Public API URL
- Bitly API Key
- Hoster Email
- Hoster PW
'''
class connectionExceptions(Exception):
pass
def __init__(self) -> None:
self.sqlConnection = None # Variable : Save Connection Instance
self.cursor = None # Variable : Save SQL Cursor
self.ymlIns = None
'''
Save data about required Keys
'''
self.requiredKeys = ['Public API Key','API URL','Hostermail','HosteremailPW','Bitlykey']
self.keyBox = dict()
with open('../config.yml','r') as f:
#This yaml file includes connection information
self.ymlIns = yaml.load(f,yaml.FullLoader)
'''
Make a connection with keys written in yaml file
'''
self.ymlIns['sqlConnection']['db'] = 'covid19MailServiceData'
self.getConnectionAndCursor()
with open('../config.yml','w') as f:
yaml.dump(self.ymlIns,f,default_flow_style=False)
def getConnectionAndCursor(self) -> None:
self.sqlConnection = sql.connect(
user=f"{self.ymlIns['sqlConnection']['user']}",
password=f"{self.ymlIns['sqlConnection']['password']}",
host=f"{self.ymlIns['sqlConnection']['host']}"
)
self.cursor = self.sqlConnection.cursor(sql.cursors.DictCursor)
def initiateEssentialDatabase(self) -> None:
essentialDBList = {
'covid19MailServiceData' : {
"subsList" : """
CREATE TABLE subslist(
ID INT PRIMARY KEY NOT NULL AUTO_INCREMENT,
email VARCHAR(70) NOT NULL
);
""",
"serviceExecuteDatas" : """
CREATE TABLE adminDatas(
APIKEY VARCHAR(150),
APIURL VARCHAR(300),
HOSTERMAIL VARCHAR(100),
HOSTERMAILPW VARCHAR(100),
BITLYKEY VARCHAR(100)
);
"""
}
}
print(f"\nInitiating Database 'covid19MailServiceData'\n")
#Create database
self.cursor.execute(f"CREATE DATABASE covid19MailServiceData")
#Make Cursor to use new generated Database : To initiate tables
self.cursor.execute(f"USE covid19MailServiceData")
print("=" * 50)
for up in list(essentialDBList['covid19MailServiceData']):
print(f"Initiating table covid19MailServiceData - {up}")
self.cursor.execute(essentialDBList['covid19MailServiceData'][up])
print("=" * 50)
self.initateServiceDatas()
def initateServiceDatas(self) -> None:
print("\nInitiating Service Datas.")
self.cursor.execute("USE covid19MailServiceData")
stateBox = []
for i in self.requiredKeys:
self.keyBox[i] = None
for i in self.requiredKeys:
value = input(f"{i}๊ฐ ์
๋ ฅํ๊ธฐ : ")
self.keyBox[i] = value
stateBox.append(f"\'{self.keyBox[i]}\'")
self.keyBox = None
sqlState = f"""
INSERT INTO adminDatas (APIKEY,APIURL,HOSTERMAIL,HOSTERMAILPW,BITLYKEY)
VALUES ({','.join(stateBox)});
"""
self.cursor.execute(sqlState)
self.sqlConnection.commit()
print("DataBase ์ด๊ธฐํ ์๋ฃ!")
def changeValues(self,opts) -> None:
selected = {
keys.Public_API_Key : 'APIKEY',
keys.Public_API_EndPoint : 'APIURL',
keys.HosterMail : 'HOSTERMAIL',
keys.HosterMailPW : '<PASSWORD>ERMAILPW',
keys.BitlyKey : 'BITLYKEY'
}
def executeState(slt):
newValue = input(f"๋ณ๊ฒฝํ {slt}์
๋ ฅํ๊ธฐ : ")
self.cursor.execute(f"USE covid19MailServiceData")
self.cursor.execute(f"UPDATE adminDatas SET {slt}=\'{newValue}\'")
self.sqlConnection.commit()
if opts in list(selected.keys()):
executeState(selected[opts])
else:
pass
def checkConnectionStatus(self):
if self.sqlConnection:
print("Still Connected!")
else:
print("Connection Failed. Reconnect to MySQL")
self.getConnectionAndCursor()
def deleteDatabase(self):
self.cursor.execute("DROP DATABASE covid19MailServiceData")
option = Enum('option',["Initiate_Database_Structure",
"Delete_Database",
"Edit_Keys",
"Close"])
keys = Enum('keys',["Public_API_Key",
"Public_API_EndPoint",
"HosterMail",
"HosterMailPW",
"BitlyKey",
"Close"])
def selectOpt(enums) -> Enum:
options = [f'{p.value}. {p.name}' for p in enums]
while True:
print('-' * 20)
for i in options:
print(i)
print('-' * 20)
try:
select = int(input(">> "))
if 1 <= select <= len(options):
return enums(select)
except ValueError:
pass
except KeyboardInterrupt:
print("๋น์ ์์ ์ธ ์ข
๋ฃ์
๋๋ค\n")
pass
def loop() -> None:
initiator = dataBaseInitiator()
while True:
opt = selectOpt(option)
if opt == option.Initiate_Database_Structure:
initiator.initiateEssentialDatabase()
elif opt == option.Edit_Keys:
opts = selectOpt(keys)
initiator.changeValues(opts)
elif opt == option.Delete_Database:
initiator.deleteDatabase()
else:
print("Initiator Close")
break
if __name__ == "__main__":
try:
loop()
except sql.err.OperationalError as e:
subprocess.call("service mysql start", shell=True)
loop()
except sql.err.ProgrammingError as e:
print("Database already exist! Please check again.")
``` |
{
"source": "J-hoplin1/Rainbow-Six-Siege-Search-bot",
"score": 3
} |
#### File: J-hoplin1/Rainbow-Six-Siege-Search-bot/RainbowSixSIegeSearchBot.py
```python
import discord
import asyncio
import os
from discord.ext import commands
import urllib
from urllib.request import URLError
from urllib.request import HTTPError
from urllib.request import urlopen
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup
from urllib.parse import quote
import re # Regex for youtube link
import warnings
import requests
import unicodedata
from tqdm import tqdm
operatoriconURLDict = dict()
# Scrape Rainbow Six Siege's Operator's icon before start
unisoftURL = "https://www.ubisoft.com"
rainbowSixSiegeOperatorIconURL = "https://www.ubisoft.com/en-gb/game/rainbow-six/siege/game-info/operators"
html = requests.get(rainbowSixSiegeOperatorIconURL).text
bs = BeautifulSoup(html,'html.parser')
#Get oprators' pages with ccid
operatorListDiv = bs.findAll('div',{'ccid' : re.compile('[0-9A-Za-z]*')})
print("Initiating Rainbow Six Siege Operators' Information....")
for ind in tqdm(range(0,len(operatorListDiv))):
operatormainURL = operatorListDiv[ind].a['href']
#Get Operator's name
operatorname = operatormainURL.split('/')[-1]
#Open URL : each operator's pages
html2 = requests.get(unisoftURL + operatormainURL).text
bs2 = BeautifulSoup(html2, 'html.parser')
operatoriconURL = bs2.find('div',{'class' : "operator__header__icons__names"}).img['src']
operatoriconURLDict[operatorname] = operatoriconURL
token = ''
client = discord.Client() # Create Instance of Client. This Client is discord server's connection to Discord Room
def deleteTags(htmls):
for a in range(len(htmls)):
htmls[a] = re.sub('<.+?>','',str(htmls[a]),0).strip()
return htmls
#Strip accents in english : Like a in jรคger
def convertToNormalEnglish(text):
return ''.join(char for char in unicodedata.normalize('NFKD', text) if unicodedata.category(char) != 'Mn')
#r6stats ์๋ฒ์์ ํฌ๋กค๋ง์ ๋ง์๋ฏํ๋ค
r6URL = "https://r6stats.com"
playerSite = 'https://www.r6stats.com/search/'
@client.event # Use these decorator to register an event.
async def on_ready(): # on_ready() event : when the bot has finised logging in and setting things up
await client.change_presence(status=discord.Status.online, activity=discord.Game("Type !help or !๋์๋ง for help"))
print("New log in as {0.user}".format(client))
@client.event
async def on_message(message): # on_message() event : when the bot has recieved a message
#To user who sent message
# await message.author.send(msg)
print(message.content)
if message.author == client.user:
return
if message.content.startswith("!๋ ์์ ์ "):
# Get player nickname and parse page
playerNickname = ''.join((message.content).split(' ')[1:])
html = requests.get(playerSite + playerNickname + '/pc/').text
bs = BeautifulSoup(html, 'html.parser')
# ํ๋ฒ์ ๊ฒ์ ์๋๋ ๊ฒฝ์ฐ์๋ ํด๋น ๋ฐํ ๋ฆฌ์คํธ์ ๊ธธ์ด ์กด์ฌ. -> bs.find('div',{'class' : 'results'}
if bs.find('div', {'class': 'results'}) == None:
# Get latest season's Rank information
latestSeason = bs.find('div', {'class': re.compile('season\-rank operation\_[A-Za-z_]*')})
# if player nickname not entered
if len(message.content.split(" ")) == 1:
embed = discord.Embed(title="ํ๋ ์ด์ด ์ด๋ฆ์ด ์
๋ ฅ๋์ง ์์์ต๋๋ค", description="", color=0x5CD1E5)
embed.add_field(name="Error : Player name not entered" + playerNickname,
value="To use command : !๋ ์์ ์ (nickname)")
embed.set_footer(text='Service provided by Hoplin.',
icon_url='https://avatars2.githubusercontent.com/u/45956041?s=460&u=1caf3b112111cbd9849a2b95a88c3a8f3a15ecfa&v=4')
await message.channel.send("Error : Player name not entered ", embed=embed)
# search if it's empty page
elif latestSeason == None:
embed = discord.Embed(title="ํด๋น ์ด๋ฆ์ ๊ฐ์ง ํ๋ ์ด์ด๊ฐ ์กด์ฌํ์ง์์ต๋๋ค.", description="", color=0x5CD1E5)
embed.add_field(name="Error : Can't find player name " + playerNickname,
value="Please check player's nickname")
embed.set_footer(text='Service provided by Hoplin.',
icon_url='https://avatars2.githubusercontent.com/u/45956041?s=460&u=1caf3b112111cbd9849a2b95a88c3a8f3a15ecfa&v=4')
await message.channel.send("Error : Can't find player name " + playerNickname, embed=embed)
# Command entered well
else:
# r6stats profile image
r6Profile = bs.find('div', {'class': 'main-logo'}).img['src']
# player level
playerLevel = bs.find('span', {'class': 'quick-info__value'}).text.strip()
RankStats = bs.find('div', {'class': 'card stat-card block__ranked horizontal'}).findAll('span', {
'class': 'stat-count'})
# Get text from <span> values
for info in range(len(RankStats)):
RankStats[info] = RankStats[info].text.strip()
# value of variable RankStats : [Timeplayed, Match Played,kills per matchm, kills,death, KDA Rate,Wins,Losses,W/L Rate]
# latest season tier medal
lastestSeasonRankMedalLocation = latestSeason.div.img['src']
# latest Season tier
lastestSeasonRankTier = latestSeason.div.img['alt']
# latest season operation name
OperationName = latestSeason.find('div', {'class': 'meta-wrapper'}).find('div', {
'class': 'operation-title'}).text.strip()
# latest season Ranking
latestSeasonRanking = latestSeason.find('div', {'class': 'rankings-wrapper'}).find('span', {
'class': 'ranking'})
# if player not ranked, span has class not ranked if ranked span get class ranking
if latestSeasonRanking == None:
latestSeasonRanking = bs.find('span', {'class': 'not-ranked'}).text.upper()
else:
latestSeasonRanking = latestSeasonRanking.text
# Add player's MMR Rank MMR Information
playerInfoMenus = bs.find('a', {'class': 'player-tabs__season_stats'})['href']
mmrMenu = r6URL + playerInfoMenus
html = requests.get(mmrMenu).text
bs = BeautifulSoup(html, 'html.parser')
# recent season rank box
# Rank show in purpose : America - Europe - Asia. This code only support Asia server's MMR
getElements = bs.find('div', {'class': 'card__content'}) # first elements with class 'card__contet is latest season content box
for ckAsia in getElements.findAll('div', {'class': 'season-stat--region'}):
checkRegion = ckAsia.find('div',{'class' : 'season-stat--region-title'}).text
if checkRegion == "Asia":
getElements = ckAsia
break
else:
pass
# Player's Tier Information
latestSeasonTier = getElements.find('img')['alt']
# MMR Datas Info -> [Win,Losses,Abandon,Max,W/L,MMR]
mmrDatas = []
for dt in getElements.findAll('span', {'class': 'season-stat--region-stats__stat'}):
mmrDatas.append(dt.text)
embed = discord.Embed(title="Rainbow Six Siege player search from r6stats", description="",
color=0x5CD1E5)
embed.add_field(name="Player search from r6stats", value=playerSite + playerNickname + '/pc/',
inline=False)
embed.add_field(name="Player's basic information",
value="Ranking : #" + latestSeasonRanking + " | " + "Level : " + playerLevel,
inline=False)
embed.add_field(name="Latest season information | Operation : " + OperationName,
value=
"Tier(Asia) : " + latestSeasonTier + " | W/L : " + mmrDatas[0] + "/" + mmrDatas[
1] + " | " + "MMR(Asia) : " + mmrDatas[-1],
inline=False)
embed.add_field(name="Total Play Time", value=RankStats[0], inline=True)
embed.add_field(name="Match Played", value=RankStats[1], inline=True)
embed.add_field(name="Kills per match", value=RankStats[2], inline=True)
embed.add_field(name="Total Kills", value=RankStats[3], inline=True)
embed.add_field(name="Total Deaths", value=RankStats[4], inline=True)
embed.add_field(name="K/D Ratio", value=RankStats[5], inline=True)
embed.add_field(name="Wins", value=RankStats[6], inline=True)
embed.add_field(name="Losses", value=RankStats[7], inline=True)
embed.add_field(name="W/L Ratio", value=RankStats[8], inline=True)
embed.set_thumbnail(url=r6URL + r6Profile)
embed.set_footer(text='Service provided by Hoplin.',
icon_url='https://avatars2.githubusercontent.com/u/45956041?s=460&u=1caf3b112111cbd9849a2b95a88c3a8f3a15ecfa&v=4')
await message.channel.send("Player " + playerNickname + "'s stats search", embed=embed)
else:
searchLink = bs.find('a', {'class': 'result'})
if searchLink == None:
embed = discord.Embed(title="ํด๋น ์ด๋ฆ์ ๊ฐ์ง ํ๋ ์ด์ด๊ฐ ์กด์ฌํ์ง์์ต๋๋ค.", description="", color=0x5CD1E5)
embed.add_field(name="Error : Can't find player name " + playerNickname,
value="Please check player's nickname")
embed.set_footer(text='Service provided by Hoplin.',
icon_url='https://avatars2.githubusercontent.com/u/45956041?s=460&u=1caf3b112111cbd9849a2b95a88c3a8f3a15ecfa&v=4')
await message.channel.send("Error : Can't find player name " + playerNickname, embed=embed)
else:
searchLink = r6URL + searchLink['href']
html = requests.get(searchLink).text
bs = BeautifulSoup(html, 'html.parser')
# Get latest season's Rank information
latestSeason = bs.findAll('div', {'class': re.compile('season\-rank operation\_[A-Za-z_]*')})[0]
# if player nickname not entered
if len(message.content.split(" ")) == 1:
embed = discord.Embed(title="ํ๋ ์ด์ด ์ด๋ฆ์ด ์
๋ ฅ๋์ง ์์์ต๋๋ค", description="", color=0x5CD1E5)
embed.add_field(name="Error : Player name not entered" + playerNickname,
value="To use command : !๋ ์์ ์ (nickname)")
embed.set_footer(text='Service provided by Hoplin.',
icon_url='https://avatars2.githubusercontent.com/u/45956041?s=460&u=1caf3b112111cbd9849a2b95a88c3a8f3a15ecfa&v=4')
await message.channel.send("Error : Player name not entered ", embed=embed)
# search if it's empty page
elif latestSeason == None:
embed = discord.Embed(title="ํด๋น ์ด๋ฆ์ ๊ฐ์ง ํ๋ ์ด์ด๊ฐ ์กด์ฌํ์ง์์ต๋๋ค.", description="", color=0x5CD1E5)
embed.add_field(name="Error : Can't find player name " + playerNickname,
value="Please check player's nickname")
embed.set_footer(text='Service provided by Hoplin.',
icon_url='https://avatars2.githubusercontent.com/u/45956041?s=460&u=1caf3b112111cbd9849a2b95a88c3a8f3a15ecfa&v=4')
await message.channel.send("Error : Can't find player name " + playerNickname, embed=embed)
# Command entered well
else:
# r6stats profile image
r6Profile = bs.find('div', {'class': 'main-logo'}).img['src']
# player level
playerLevel = bs.find('span', {'class': 'quick-info__value'}).text.strip()
RankStats = bs.find('div', {'class': 'card stat-card block__ranked horizontal'}).findAll('span', {
'class': 'stat-count'})
# Get text from <span> values
for info in range(len(RankStats)):
RankStats[info] = RankStats[info].text.strip()
# value of variable RankStats : [Timeplayed, Match Played,kills per matchm, kills,death, KDA Rate,Wins,Losses,W/L Rate]
# latest season tier medal
lastestSeasonRankMedalLocation = latestSeason.div.img['src']
# latest Season tier
lastestSeasonRankTier = latestSeason.div.img['alt']
# latest season operation name
OperationName = latestSeason.find('div', {'class': 'meta-wrapper'}).find('div', {
'class': 'operation-title'}).text.strip()
# latest season Ranking
latestSeasonRanking = latestSeason.find('div', {'class': 'rankings-wrapper'}).find('span', {
'class': 'ranking'})
# if player not ranked, span has class not ranked if ranked span get class ranking
if latestSeasonRanking == None:
latestSeasonRanking = bs.find('span', {'class': 'not-ranked'}).text.upper()
else:
latestSeasonRanking = latestSeasonRanking.text
#Add player's MMR Rank MMR Information
playerInfoMenus = bs.find('a', {'class' : 'player-tabs__season_stats'})['href']
mmrMenu = r6URL + playerInfoMenus
html = requests.get(mmrMenu).text
bs = BeautifulSoup(html, 'html.parser')
#recent season rank box
# Rank show in purpose : America - Europe - Asia. This code only support Asia server's MMR
getElements = bs.find('div', {'class': 'card__content'}) # first elements with class 'card__contet is latest season content box
for ckAsia in getElements.findAll('div', {'class': 'season-stat--region'}):
checkRegion = ckAsia.find('div', {'class': 'season-stat--region-title'}).text
if checkRegion == "Asia":
getElements = ckAsia
break
else:
pass
# Player's Tier Information
latestSeasonTier = getElements.find('img')['alt']
# MMR Datas Info -> [Win,Losses,Abandon,Max,W/L,MMR]
mmrDatas = []
for dt in getElements.findAll('span', {'class': 'season-stat--region-stats__stat'}):
mmrDatas.append(dt.text)
embed = discord.Embed(title="Rainbow Six Siege player search from r6stats", description="",
color=0x5CD1E5)
embed.add_field(name="Player search from r6stats", value=searchLink,
inline=False)
embed.add_field(name="Player's basic information",value= "Ranking : #" + latestSeasonRanking + " | " + "Level : " + playerLevel,inline=False)
embed.add_field(name="Latest season information | Operation : " + OperationName,
value=
"Tier(Asia) : " + latestSeasonTier + " | W/L : " + mmrDatas[0] + "/"+mmrDatas[1] + " | " + "MMR(Asia) : " + mmrDatas[-1],
inline=False)
embed.add_field(name="Total Play Time", value=RankStats[0], inline=True)
embed.add_field(name="Match Played", value=RankStats[1], inline=True)
embed.add_field(name="Kills per match", value=RankStats[2], inline=True)
embed.add_field(name="Total Kills", value=RankStats[3], inline=True)
embed.add_field(name="Total Deaths", value=RankStats[4], inline=True)
embed.add_field(name="K/D Ratio", value=RankStats[5], inline=True)
embed.add_field(name="Wins", value=RankStats[6], inline=True)
embed.add_field(name="Losses", value=RankStats[7], inline=True)
embed.add_field(name="W/L Ratio", value=RankStats[8], inline=True)
embed.set_thumbnail(url=r6URL + r6Profile)
embed.set_footer(text='Service provided by Hoplin.',
icon_url='https://avatars2.githubusercontent.com/u/45956041?s=460&u=1caf3b112111cbd9849a2b95a88c3a8f3a15ecfa&v=4')
await message.channel.send("Player " + playerNickname + "'s stats search", embed=embed)
if message.content.startswith("!๋ ์์คํผ"):
# operator image dictionary key is lowercase
# for player's operator Informaiton
useroperatorInformation = dict()
playerNickname = ''.join((message.content).split(' ')[1:])
html = requests.get(playerSite + playerNickname + '/pc/').text
bs = BeautifulSoup(html, 'html.parser')
if bs.find('div', {'class': 'results'}) == None:
# Scrape menu hyperlink : to operator menu
playerOperator = bs.find('a', {'class': 'player-tabs__operators'})
playerOperatorMenu = r6URL + playerOperator['href']
print(playerOperatorMenu)
# Reopen page
html = requests.get(playerOperatorMenu).text
bs = BeautifulSoup(html, 'html.parser')
embed = discord.Embed(title="Stats by operator", description="Arrange in order of high-play operator",
color=0x5CD1E5)
embed.add_field(name="To see more stats by operator click link here", value=playerOperatorMenu,
inline=False)
operatorStats = bs.findAll('tr', {'class': 'operator'})
mostOperator = None
indNumS = 0
# statlist -> [operator,kills,deaths,K/D,Wins,Losses,W/L,HeadShots,Melee Kills,DBNO,Playtime]
for op in operatorStats:
# discord can show maximum 8 fields
if indNumS == 7:
break
count = 0
statlist = []
if op.td.span.text.split(" ")[-1] == "Recruit":
pass
else:
for b in op:
statlist.append(b.text)
if indNumS == 0:
mostOperator = convertToNormalEnglish(statlist[0].lower())
embed.add_field(name="Operator Name", value=statlist[0], inline=True)
embed.add_field(name="Kills / Deaths", value=statlist[1] + "K / " + statlist[2] + "D", inline=True)
embed.add_field(name="Wins / Losses", value=statlist[4] + "W / " + statlist[5] + "L", inline=True)
indNumS += 1
embed.set_thumbnail(url=operatoriconURLDict[mostOperator])
embed.set_footer(text='Service provided by Hoplin.',
icon_url='https://avatars2.githubusercontent.com/u/45956041?s=460&u=1caf3b112111cbd9849a2b95a88c3a8f3a15ecfa&v=4')
await message.channel.send("Player " + playerNickname + "'s stats search", embed=embed)
else:
searchLink = bs.find('a', {'class': 'result'})
if searchLink == None:
embed = discord.Embed(title="ํด๋น ์ด๋ฆ์ ๊ฐ์ง ํ๋ ์ด์ด๊ฐ ์กด์ฌํ์ง์์ต๋๋ค.", description="", color=0x5CD1E5)
embed.add_field(name="Error : Can't find player name " + playerNickname,
value="Please check player's nickname")
embed.set_footer(text='Service provided by Hoplin.',
icon_url='https://avatars2.githubusercontent.com/u/45956041?s=460&u=1caf3b112111cbd9849a2b95a88c3a8f3a15ecfa&v=4')
await message.channel.send("Error : Can't find player name " + playerNickname, embed=embed)
else:
searchLink = bs.find('a', {'class': 'result'})['href']
searchLink = r6URL + searchLink
html = requests.get(searchLink).text
bs = BeautifulSoup(html, 'html.parser')
# Scrape menu hyperlink : to operator menu
playerOperator = bs.find('a', {'class': 'player-tabs__operators'})
playerOperatorMenu = r6URL + playerOperator['href']
print(playerOperatorMenu)
# Reopen page
html = requests.get(playerOperatorMenu).text
bs = BeautifulSoup(html, 'html.parser')
embed = discord.Embed(title="Stats by operator", description="Arrange in order of high-play operator",
color=0x5CD1E5)
embed.add_field(name="To see more stats by operator click link here", value=playerOperatorMenu,
inline=False)
operatorStats = bs.findAll('tr', {'class': 'operator'})
mostOperator = None
indNumS = 0
# statlist -> [operator,kills,deaths,K/D,Wins,Losses,W/L,HeadShots,Melee Kills,DBNO,Playtime]
for op in operatorStats:
# discord can show maximum 8 fields
if indNumS == 7:
break
count = 0
statlist = []
if op.td.span.text.split(" ")[-1] == "Recruit":
pass
else:
for b in op:
statlist.append(b.text)
if indNumS == 0:
mostOperator = convertToNormalEnglish(statlist[0].lower())
embed.add_field(name="Operator Name", value=statlist[0], inline=True)
embed.add_field(name="Kills / Deaths", value=statlist[1] + "K / " + statlist[2] + "D",
inline=True)
embed.add_field(name="Wins / Losses", value=statlist[4] + "W / " + statlist[5] + "L",
inline=True)
indNumS += 1
embed.set_thumbnail(url=operatoriconURLDict[mostOperator])
embed.set_footer(text='Service provided by Hoplin.',
icon_url='https://avatars2.githubusercontent.com/u/45956041?s=460&u=1caf3b112111cbd9849a2b95a88c3a8f3a15ecfa&v=4')
await message.channel.send("Player " + playerNickname + "'s stats search", embed=embed)
client.run(token)
``` |
{
"source": "jhoralek/pyinels3",
"score": 3
} |
#### File: pyinels/device/pyBase.py
```python
from pyinels.const import (
ATTR_SWITCH_ON,
ATTR_SWITCH_OFF
)
class pyBase:
"""Inels base class."""
def __init__(self, device):
"""Initialize object."""
self._device = device
@property
def state(self):
"""Return the state of the switch."""
return (True if int(self._device.value[self._device.id])
== ATTR_SWITCH_ON else False)
@property
def name(self):
"""Name of the light."""
return self._device.title
@property
def unique_id(self):
"""Unique id of the device."""
return self._device.id
@property
def up(self):
"""Value of shutter for up."""
return self._device.value[self._device.up if self._device.up
is not None else self._device.id]
@property
def down(self):
"""Value of sutter for down."""
return self._device.value[self._device.down if self._device.down
is not None else self._device.id]
@property
def value(self):
"""Value of the device."""
val = self._device.value[self._device.id]
if isinstance(val, str):
if val.isdigit():
return int(val)
elif val.replace('.', '', 1).isdigit() and val.count('.') < 2:
return float(val)
else:
return val
else:
return val
def turn_off(self):
"""Turn the switch off."""
self._device.write_value(ATTR_SWITCH_OFF)
def turn_on(self):
"""Turn the switch on."""
self._device.write_value(ATTR_SWITCH_ON)
def update(self):
"""Update data on the device."""
return self._device.get_value()
def __repr__(self):
"""Object representation."""
state = "on" if self.state else "off"
return "<{} #{} - " \
"title: {}, " \
"state: {}" \
">".format(self._device.type, self._device.id,
self._device.title, state)
```
#### File: pyinels3/tests/production_test.py
```python
import asyncio
from pyinels.api import Api
from pyinels.device.pyLight import pyLight
from pyinels.device.pySwitch import pySwitch
from pyinels.device.pyDoor import pyDoor
from pyinels.device.pyShutter import pyShutter
# from unittest import async_case
MMS_IP_ADDRESS = "192.168.2.102"
PLC_IP_ADDRESS = "192.168.2.101"
# class ProductionTest(async_case.IsolatedAsyncioTestCase):
class ProductionTest():
"""Library used agains production server."""
def setUp(self):
"""Setup all necessary instances nad mocks."""
self.api = Api(f'http://{MMS_IP_ADDRESS}', 8001, "CU3")
async def asyncSetUp(self):
"""Setup all neccessary async stuff."""
devices = await self.api.getAllDevices()
self.api.set_devices(devices)
def tearDown(self):
"""Remove all attached properties."""
self.api = None
async def test_ping_success(self):
"""Ping test."""
ping = await self.api.ping()
self.assertEqual(ping, True)
async def test_plcIp_address(self):
"""Get Ip address of the PLC."""
ip = await self.api.getPlcIp()
self.assertEqual(PLC_IP_ADDRESS, ip)
def test_loaded_devices(self):
"""Are devices from api loaded?"""
self.assertGreater(len(self.api.devices), 0)
async def test_create_light(self):
"""create and test light."""
devices = [
x for x in self.api.devices
if x.id == "SV_7_Pokoj_dole"]
light = await pyLight(devices[0])
self.assertEqual(light.state, False)
await light.turn_on()
self.assertEqual(light.state, True)
asyncio.sleep(4)
await light.turn_off()
self.assertEqual(light.state, False)
asyncio.sleep(4)
await light.set_brightness(50)
self.assertEqual(light.state, True)
asyncio.sleep(4)
await light.set_brightness(100)
self.assertEqual(light.state, True)
asyncio.sleep(4)
await light.set_brightness(0)
self.assertEqual(light.state, False)
async def test_create_switch(self):
"""create and test switch."""
devices = [
x for x in self.api.devices if x.id
== "ZAS_1B_Pokoj_dole"]
switch = await pySwitch(devices[0])
await switch.turn_off()
self.assertEqual(switch.state, False)
asyncio.sleep(4)
await switch.turn_on()
self.assertEqual(switch.state, True)
async def test_create_door(self):
"""crate and test door."""
devices = [
x for x in self.api.devices if x.id
== "Vrata_Garaz"]
door = await pyDoor(devices[0])
await door.turn_on()
asyncio.sleep(30)
await door.turn_off()
async def test_create_shutter(self):
"""create and test shutter."""
devices = [
x for x in self.api.devices
if x.id == "ROL_Pokoj_host_nahoru_ROL_Pokoj_host_dolu"]
shutter = await pyShutter(devices[0])
await shutter.pull_up()
asyncio.sleep(20)
await shutter.stop()
await shutter.pull_down()
asyncio.sleep(10)
await shutter.stop()
```
#### File: pyinels3/tests/resource_test.py
```python
from unittest.mock import patch
from unittest import TestCase
from pyinels.api import Api
from pyinels.api.resources import ApiResource
from tests.const_test import (
TEST_API_CLASS_NAMESPACE,
TEST_API_NAMESPACE,
TEST_API_ROOM_DEVICES,
TEST_API_READ_DATA,
TEST_HOST,
TEST_PORT,
TEST_RAW_GARAGE_DOOR,
TEST_RESOURCE_SWITCH,
TEST_VERSION
)
GARAGE_ID = "Vrata_Garaz"
GARAGE_NAME = "Vrata"
GARAGE_CLOSE = {GARAGE_ID: 0}
GARAGE_OPEN = {GARAGE_ID: 1}
class ResourceTest(TestCase):
"""Class to test resource of api iNels BUS."""
def setUp(self):
self.api = Api(TEST_HOST, TEST_PORT, TEST_VERSION)
self.patches = [
patch(f'{TEST_API_CLASS_NAMESPACE}.ping', return_value=True),
patch(f'{TEST_API_CLASS_NAMESPACE}.{TEST_API_ROOM_DEVICES}',
return_value=TEST_RAW_GARAGE_DOOR),
patch(f'{TEST_API_CLASS_NAMESPACE}.{TEST_API_READ_DATA}',
return_value=GARAGE_CLOSE)
]
for p in self.patches:
p.start()
self.res_list = self.api.getRoomDevices('garage')
self.garage_door = self.res_list[0]
def tearDown(self):
"""Destroy all instances and mocks."""
self.api = None
self.res_list = None
self.garage_door = None
patch.stopall()
self.patches = None
@patch(f'{TEST_API_NAMESPACE}.resources.ApiResource')
def test_load_resource_object(self, mock_class):
"""Test to load resource object."""
mock_class(TEST_RESOURCE_SWITCH, self.api)
mock_class.assert_called()
mock_class.assert_called_once()
self.assertEqual(mock_class.call_count, 1)
res = ApiResource(TEST_RESOURCE_SWITCH, self.api)
self.assertIsInstance(res, ApiResource)
self.assertEqual(TEST_RESOURCE_SWITCH['type'], res.type)
# inels is raw data id from iNels BUS
self.assertEqual(TEST_RESOURCE_SWITCH['inels'], res.id)
self.assertEqual(TEST_RESOURCE_SWITCH['name'], res.title)
self.assertEqual(TEST_RESOURCE_SWITCH['read_only'], res.read_only)
# should be none, because it does not get the get_value
self.assertIsNone(res.value)
@patch(f'{TEST_API_CLASS_NAMESPACE}.read')
def test_get_value(self, mock_room_devices):
"""Test the get_value method of the Api resources. It should touche
the iNels BUS."""
mock_room_devices.return_value = GARAGE_CLOSE
self.assertEqual(len(self.res_list), 1)
self.assertEqual(self.garage_door.title, GARAGE_NAME)
door = self.garage_door.get_value()
value = door[self.garage_door.id]
self.assertEqual(value, 0)
self.assertIsNotNone(self.garage_door.value)
self.assertEqual(self.garage_door.value[self.garage_door.id], 0)
def test_write_value(self):
"""Test set value to the iNels BUS."""
with patch.object(self.api, '_Api__writeValues', return_value=None):
# set int
self.garage_door.write_value(1)
self.assertEqual(self.garage_door.value[self.garage_door.id], '1')
# change int to another value
self.garage_door.write_value(0)
self.assertEqual(self.garage_door.value[self.garage_door.id], '0')
# change to float with different value
self.garage_door.write_value(25.0)
self.assertEqual(
self.garage_door.value[self.garage_door.id], '25.0')
# change to int with same value
self.garage_door.write_value(25)
self.assertEqual(self.garage_door.value[self.garage_door.id], '25')
@patch(f'{TEST_API_CLASS_NAMESPACE}.{TEST_API_READ_DATA}')
def test_is_available(self, mock_garage_object):
mock_garage_object.return_value = None
"""Test when the resource object is available."""
with patch.object(self.api, '_Api__writeValues', return_value=None):
# set the value of the ApiResource then it should be available
self.garage_door.write_value(1)
self.assertTrue(self.garage_door.is_available)
``` |
{
"source": "JHorcasitas/cnn_document_binarization",
"score": 3
} |
#### File: data_ingestion/dataloader/dataloader_factory.py
```python
import torch
from data_ingestion.dataloader.dataloader import BinaryDataLoader
from utils.weights import get_weights
Dataset = torch.utils.data.Dataset
def get_dataloader(dataset: Dataset,
num_workers: int = 2,
batch_size: int = 256) -> BinaryDataLoader:
dataloader = BinaryDataLoader(dataset=dataset,
batch_size=batch_size,
num_workers=num_workers,
weights=get_weights(dataset))
return dataloader
```
#### File: cnn_document_binarization/inference/binarizer.py
```python
import configparser
import torch
import numpy as np
from tqdm import tqdm
from PIL import ImageOps
from torchvision import transforms
from torchvision.transforms import Compose
from models.model_factory import get_model
config = configparser.ConfigParser()
config.read('config.ini')
class Binarizer:
def __init__(self, kind, device=None):
"""
args:
kind (str): one of {'sk', 'sw'}
device (torch.device) defice to use in inference
"""
self._kind = kind
self._transform = Compose([transforms.ToTensor(),
transforms.Normalize(mean=[0.732],
std=[0.129])])
if device:
self._device = device
else:
self._device = torch.device('cpu')
self._model = get_model(kind=kind,
device=self._device,
cache=True)
self._model.eval()
self._radius = config['DATA INGESTION'].getint('radius')
def binarize(self, img):
"""
args:
img (PIL Image): image to binarize
returns:
binarized image
"""
img = self._process_image(img)
if self._kind == 'sw':
return self._sw_binarization(img)
elif self._kind == 'sk':
pass
else:
raise ValueError(f'Unrecognized kind: "{self._kind}"')
def _process_image(self, img):
"""
Get img ready to be processed by the model
"""
img = ImageOps.expand(img, border=self._radius, fill=255)
img = self._transform(img)
img = img[None, ...]
return img
def _sw_binarization(self, img):
"""
"""
with torch.no_grad():
rows = img.shape[2] - self._radius * 2
cols = img.shape[3] - self._radius * 2
output = np.empty((rows, cols)).astype(np.uint8)
for row in tqdm(range(rows)):
for col in range(cols):
window = img[0,
0,
row:(row + 2 * self._radius + 1),
col:(col + 2 * self._radius + 1)]
window = window[None, None, ...]
win_out = self._model(window)
win_out = torch.round(torch.sigmoid(win_out)) * 255
win_out = int(win_out.item())
output[row, col] = win_out
return output
```
#### File: models/model_definition/sk_network.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class StridedNet(nn.Module):
def __init__(self):
super(StridedNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1,
out_channels=10,
kernel_size=6,
stride=1,
dilation=1)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=1, dilation=1)
self.conv2 = nn.Conv2d(in_channels=10,
out_channels=20,
kernel_size=4,
stride=1,
dilation=2)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=1, dilation=2)
self.fc1 = nn.Conv2d(in_channels=20,
out_channels=32,
kernel_size=2,
stride=1,
dilation=4)
self.fc2 = nn.Conv2d(in_channels=32,
out_channels=16,
kernel_size=1,
stride=1,
dilation=1)
self.fc3 = nn.Conv2d(in_channels=16,
out_channels=1,
kernel_size=1,
stride=1,
dilation=1)
def forward(self, x):
out = F.gelu(self.pool1(self.conv1(x)))
out = F.gelu(self.pool2(self.conv2(out)))
out = F.gelu(self.fc1(out))
out = F.gelu(self.fc2(out))
out = self.fc3(out)
return out
```
#### File: models/model_definition/sw_network.py
```python
import torch.nn as nn
import torch.nn.functional as F
class SlideNet(nn.Module):
"""
Slided window network
"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=10, kernel_size=6)
self.conv2 = nn.Conv2d(in_channels=10, out_channels=20, kernel_size=4)
self.fc1 = nn.Linear(in_features=80, out_features=32)
self.fc2 = nn.Linear(in_features=32, out_features=16)
self.fc3 = nn.Linear(in_features=16, out_features=1)
def forward(self, x):
out = F.relu(F.max_pool2d(self.conv1(x), kernel_size=2))
out = F.relu(F.max_pool2d(self.conv2(out), kernel_size=2))
out = out.view(-1, 80)
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
return self.fc3(out)
```
#### File: cnn_document_binarization/test/test_dataloader.py
```python
import unittest
from random import random
from functools import reduce
from data_ingestion.dataset import BinaryDataset
from data_ingestion.dataloader import BinaryDataLoader, MAX_DATASET_SIZE
class TestDataloader(unittest.TestCase):
def setUp(self):
self.dataset = BinaryDataset(kind='train')
self.weights = [random() for _ in range(len(self.dataset))]
self.loader = BinaryDataLoader(dataset=self.dataset,
weights=self.weights)
def test_split_dataset(self):
# Check that the length of the original dataset is bigger than the
# allowed by PyTorch 1.3.0
self.assertTrue(len(self.dataset) > MAX_DATASET_SIZE)
# The sum of the length of each dataset must be equal to the the length
# of the original dataset
cum_dataset_length = sum([len(d) for d in self.loader._datasets])
self.assertEqual(len(self.dataset), cum_dataset_length)
# Check that the length of each dataset is smaller than what PyTorch
# allows
for d in self.loader._datasets:
self.assertTrue(len(d) <= MAX_DATASET_SIZE)
# Check that indices of all datasets are different
indices = [d.indices for d in self.loader._datasets]
n = len(self.dataset) - 1
self.assertEqual(sum([sum(i) for i in indices]), ((n * (n + 1)) / 2))
# Check that no index is bigger than the original dataset length
max_index = max([max(d.indices) for d in self.loader._datasets])
self.assertTrue(max_index == (len(self.dataset) - 1))
def test_split_weights(self):
# The concatenation of the splitted weights must be equal to the
# original weights
concat_weights = reduce(lambda acc, v: acc + v,
self.loader._weights,
[])
self.assertEqual(concat_weights, self.weights)
``` |
{
"source": "jhorchler/udacity-machine-learning-engineer",
"score": 3
} |
#### File: 07 Capstone - Detect breast cancer metastases/gcnn/helpers.py
```python
from torch import device
from typing import Dict, Tuple
from tqdm import tqdm
import torch.nn as nn
import torch
import time
import copy
def train_one_epoch(model: nn.Module, loaders: Dict, dataset_sizes: Dict,
criterion, optimizer, device: device) -> Tuple:
"""Trains the given model exactly one epoch.
Args:
model (nn.Module): a nn.Module network
loaders (Dict): dictionary holding dataloaders for training dataset
dataset_sizes (Dict): dictionary holding the sizes of the datasets
loaded by `dataloaders`
criterion: a instance of torch.nn loss function
optimizer: a instance of torch.optim optimizer function
device (torch.device): the device to run on
Returns:
A tuple holding
epoch_acc: accuracy of the epoch
epoch_loss: the loss of the epoch
"""
# switch to train mode
model.train()
# reset statistics
running_loss = 0.0
running_corrects = 0
# loop over data ..
for x, y in tqdm(loaders['train'], desc=' Training', unit='batches'):
# send data to GPU
x = x.to(device)
y = y.long().reshape(1, -1).squeeze().to(device)
# zero the gradients
optimizer.zero_grad()
# forward through network
with torch.set_grad_enabled(True):
# compute output
logits = model(x)
# get predictions
_, preds = torch.max(logits, 1)
# compute loss
loss = criterion(logits, y)
# compute gradients and optimize learnable parameters
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * x.size(0)
running_corrects += torch.sum(preds == y.data)
# epoch stats
epoch_acc = running_corrects.double() / dataset_sizes['train']
epoch_loss = running_loss / dataset_sizes['train']
print('Loss: {:.4f} Acc: {:.4f}'.format(epoch_loss, epoch_acc*100))
return epoch_acc.item(), epoch_loss
def validate_model(model: nn.Module, loaders: Dict, dataset_sizes: Dict,
criterion, device: device) -> Tuple:
"""Validates the given model using a validation set exactly one epoch.
Args:
model (nn.Module): a nn.Module network
loaders (Dict): dictionary holding dataloaders for validation dataset
dataset_sizes (Dict): dictionary holding the sizes of the datasets
loaded by `dataloaders`
criterion: a instance of torch.nn loss function
device (torch.device): the device to run on
Returns:
A tuple holding
epoch_acc: accuracy of the validation epoch
epoch_loss: the loss of the validation epoch
"""
# switch to eval mode
model.eval()
# reset statistics
running_loss = 0.0
running_corrects = 0
# loop over data ..
for x, y in tqdm(loaders['valid'], desc='Validation', unit='batches'):
# send data to GPU
x = x.to(device)
y = y.long().reshape(1, -1).squeeze().to(device)
# forward without tracking gradient history
with torch.set_grad_enabled(False):
# compute output
logits = model(x)
# get predictions
_, preds = torch.max(logits, 1)
# get loss
loss = criterion(logits, y)
# statistics
running_loss += loss.item() * x.size(0)
running_corrects += torch.sum(preds == y.data)
# epoch stats
epoch_acc = running_corrects.double() / dataset_sizes['valid']
epoch_loss = running_loss / dataset_sizes['valid']
print('Loss: {:.4f} Acc: {:.4f}'.format(epoch_loss, epoch_acc*100))
return epoch_acc.item(), epoch_loss
def train_and_validate(model: nn.Module, loaders: Dict, dataset_sizes: Dict,
criterion, optimizer, scheduler, num_epochs: int = 25,
device: device = 'cpu') -> Tuple:
"""Trains and validates a given model over the number of epochs given.
Args:
model (nn.Module): a nn.Module network
loaders (Dict): dictionary holding dataloaders for training and
validation datasets
dataset_sizes (Dict): dictionary holding the sizes of the datasets
loaded by `dataloaders`
criterion: a instance of torch.nn loss function
optimizer: a instance of torch.optim optimizer function
scheduler: a instance of torch.optim.lr_scheduler
num_epochs (int): how many epochs to train (default: 25)
device (torch.device): the device to run on (default: cpu)
Returns:
A tuple holding
model: the model having the best validation accuracy
val_acc_hist: a list of all accuracies in validation
val_loss_hist: a list of all losses in validation
train_acc_hist: a list of all accuracies in training
train_loss_hist: a list of all losses in validation
"""
since = time.time()
# track validation performance
val_acc_hist = []
val_loss_hist = []
# track training performance
train_acc_hist = []
train_loss_hist = []
# track best validation performance
best_acc = 0.0
best_model = copy.deepcopy(model.state_dict())
for epoch in range(num_epochs):
epoch_start = time.time()
print('-' * 15)
print('Epoch {}/{}'.format(epoch + 1, num_epochs))
print('-' * 15)
# first train and then validate for each epoch
for phase in ['train', 'valid']:
if phase == 'train':
model.train()
else:
model.eval()
# reset statistics
running_loss = 0.0
running_corrects = 0
for x, y in tqdm(loaders[phase], desc=phase, unit='batches'):
# copy data to GPU, if GPU is used. For CPU does nothing.
x = x.to(device, non_blocking=True)
y = y.long().to(device, non_blocking=True)
# zero the gradients
optimizer.zero_grad()
# forward
with torch.set_grad_enabled(phase == 'train'):
# get network output
logits = model(x)
# calculate the loss
loss = criterion(logits, y)
# get predictions
_, preds = torch.max(logits, 1)
# update parameters if in train mode
if phase == 'train':
# calculate gradients
loss.backward()
# update weights
optimizer.step()
# update learning rate
scheduler.step()
# update statistics
running_loss += loss.item()
running_corrects += torch.sum(preds == y.data).float()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects / dataset_sizes[phase]
print('Loss: {:.4f} Acc: {:.4f}'.format(epoch_loss, epoch_acc*100))
# save statistic history
if phase == 'valid':
val_acc_hist.append(epoch_acc.item())
val_loss_hist.append(epoch_loss)
if epoch_acc > best_acc:
best_acc = epoch_acc
best_model = copy.deepcopy(model.state_dict())
else:
train_acc_hist.append(epoch_acc.item())
train_loss_hist.append(epoch_loss)
time_elapsed = time.time() - epoch_start
print('Epoch {} completed: {:.0f}m {:.0f}s'.format(epoch + 1,
time_elapsed // 60,
time_elapsed % 60))
# print summary
time_elapsed = time.time() - since
print('-' * 15)
print('Training completed in {:.0f}m {:.0f}s'.format(time_elapsed // 60,
time_elapsed % 60))
print('Best validation Accuracy: {:4f}'.format(best_acc))
# load best weights
model.load_state_dict(best_model)
return model, val_acc_hist, val_loss_hist, train_acc_hist, train_loss_hist
``` |
{
"source": "jhorelrevilla/SententreeProyectos1",
"score": 3
} |
#### File: archive/preprocess/growphrase.py
```python
import bisect, csv, json, nltk, sys, time
#tweet token patterns
tokenPattern = r'''(?x) # set flag to allow verbose regexps
http://t\.co/\w+ # urls
|http://t\.co\w+ # urls
|http://t\.\w+ # urls
|http://\w+ # urls
| \@\w+ # Twitter handles
| \#\w+ # hashtags
| \d+(,\d+)+ # digits with internal commas
| \w+(-\w+)* # words with optional internal hyphens
| \$?\d+(\.\d+)?%? # currency and percentages, e.g. $12.40, 82%
| ([A-Z]\.)+ # abbreviations, e.g. U.S.A
'''
stopwords = nltk.corpus.stopwords.words('english') + ['rt', 'via', 'amp', 'http', 'https']
class Timer(object):
def __init__(self):
self.s = time.time()
self.e = None
self.elapsed = None
def start(self):
self.s = time.time()
def end(self):
self.e = time.time()
if self.s:
self.elapsed = self.e - self.s
def printElapsed(self):
self.end()
if self.elapsed:
print( "Elapsed time = " + str(self.elapsed) + " sec.")
class FDist(object):
def __init__(self):
self.hashtable = {}
def add(self, item, n = 1):
h = self.hashtable
if item in h:
h[item] += n
else:
h[item] = n
def freq(self, item):
h = self.hashtable
if item in h:
return h[item]
else:
return 0
def items(self):
h = self.hashtable
items = sorted(h.keys(), key = lambda i: h[i], reverse = True)
return items
class HashTable(object):
def __init__(self):
self.hashtable = {}
def add(self, key, value):
h = self.hashtable
if key in h:
h[key].append(value)
else:
h[key] = [value]
def remove(self, key, value):
h = self.hashtable
if key in h and value in h[key]:
h[key].remove(value)
if len(h[key]) == 0:
del h[key]
def replace(self, key, values):
if len(values) > 0:
self.hashtable[key] = values
elif key in self.hashtable:
del self.hashtable[key]
def pop(self, key):
h = self.hashtable
r = None
if key in h:
r = h[key]
del h[key]
return r
def get(self, key):
h = self.hashtable
if key in h and len(h[key]) > 0:
return h[key]
else:
return None
def getAll(self):
return self.hashtable
def displayAll(self):
ks = sorted(self.hashtable.keys(), reverse = True)
for k in ks:
print( str(k) + " > " + str( [ (v.Ids, v.s) for v in self.hashtable[k]] ))
class Corpus(object):
def __init__(self, dbfile, colText, colCnt, min_support = .01):
timer = Timer()
self.min_support = min_support
dbSize = 0
## load data and tokenize the text
f = open(dbfile, 'rU')
rdr = csv.reader(f, delimiter = '\t')
fdist = nltk.probability.FreqDist()
for r in rdr:
text = unicode(r[colText], 'utf-8')
tokens = nltk.regexp_tokenize(text, tokenPattern)
if colCnt < 0:
num = 1
else:
num = int(r[colCnt])
for t in tokens:
if t not in stopwords:
fdist.inc(t, num)
dbSize += num
self.dbSize = dbSize
self.fdist = fdist
## turn text into itemset numberings
itemset = []
for w in self.fdist.keys():
if not self._checkMinSupport(self.fdist[w]):
break
if w not in stopwords:
itemset.append(w)
self.itemset = itemset
texts = []
f.seek(0)
for r in rdr:
text = unicode(r[colText], 'utf-8')
tokens = nltk.regexp_tokenize(text, tokenPattern)
if colCnt < 0:
num = 1
else:
num = int(r[colCnt])
text = []
for t in tokens:
try:
i = itemset.index(t)
text.append(i)
except ValueError:
pass
if len(text) > 0:
texts.append((text, num))
self.texts = texts
f.close()
timer.printElapsed()
def growSets(self):
timer = Timer()
groups = []
nodes = []
links = {} #adjacency list
texts = self.texts
#init sets
g0 = {'seq':[], 'newItemPos':None, 'size':self.dbSize, 'DBs': [ {'text':t[0], 'count':t[1], 'seqIndices':[]} for t in texts ] }
groups.append(g0)
#growSets
while groups:
g = groups.pop()
#print 'grow: ' + str(g['seq']) + ' ' + str(g['size'])
pos = -1
word = None
cnt = 0
for s in range(len(g['seq']) + 1):
#print "s = " + str(s);
fdist = nltk.probability.FreqDist()
for t in g['DBs']:
if s == 0:
l = 0
else:
l = t['seqIndices'][s-1] + 1
if s == len(g['seq']):
r = len(t['text'])
else:
r = t['seqIndices'][s]
for w in t['text'][l:r]:
fdist.inc(w, t['count'])
#print self.printSeq(t['text'][l:r])
if fdist.N() > 0 and fdist[ fdist.max() ] > cnt:
pos = s
word = fdist.max()
cnt = fdist[word]
if not self._checkMinSupport(cnt): #could not find new item with enough support, discard branch
continue
#print str(pos) + " : " + self.itemset[word] + " : " + str(cnt)
if cnt == g['DBs'][0]['count']:
#take the entirety of the top tweet
t = g['DBs'][0]
tnodes = []
for i in range(0, len(t['text'])):
try:
j = t['seqIndices'].index(i)
tnodes.append(g['seq'][j])
except ValueError:
newWord = {'entity':self.itemset[t['text'][i]], 'freq':cnt, 'id':len(nodes)}
nodes.append(newWord)
tnodes.append(newWord)
for l in range(0, len(t['text'])-1):
if not l in t['seqIndices'] or not (l+1) in t['seqIndices']:
if not tnodes[l]['id'] in links:
links[ tnodes[l]['id'] ] = {}
links[ tnodes[l]['id'] ][ tnodes[l+1]['id'] ] = cnt
for l in range(0, len(t['seqIndices'])-1):
if t['seqIndices'][l+1] - t['seqIndices'][l] > 1:
links[tnodes[t['seqIndices'][l]]['id']][tnodes[t['seqIndices'][l+1]]['id']] -= cnt
if self._checkMinSupport(g['size']-cnt):
g0 = {'seq': g['seq'], 'newItemPos':None, 'size': g['size']-cnt, 'DBs': g['DBs'][1:]}
self._insertIntoSortedList(groups, g0)
else:
g0 = {'seq': g['seq'], 'newItemPos':None, 'size': g['size']-cnt, 'DBs': []}
#add new node
newWord = {'entity':self.itemset[word], 'freq':cnt, 'id':len(nodes)}
nodes.append(newWord)
newseq = list(g['seq'])
newseq.insert(pos, newWord)
g1 = {'seq': newseq, 'newItemPos': pos, 'size':cnt, 'DBs': []}
#add new links
if pos <= 0:
if g['seq']:
links[newWord['id']] = {g['seq'][0]['id']:cnt}
elif pos >= len(g['seq']):
if not g['seq'][-1]['id'] in links:
links[g['seq'][-1]['id']] = {}
links[g['seq'][-1]['id']][newWord['id']] = cnt
else:
links[g['seq'][pos-1]['id']][g['seq'][pos]['id']] -= cnt #?
links[g['seq'][pos-1]['id']][newWord['id']] = cnt
links[newWord['id']]={g['seq'][pos]['id']:cnt}
for t in g['DBs']:
if pos == 0:
l = 0
else:
l = t['seqIndices'][pos-1] + 1
if pos == len(g['seq']):
r = len(t['text'])
else:
r = t['seqIndices'][pos]
try:
i = l + t['text'][l:l+r].index(word)
t['seqIndices'].insert(pos, i)
g1['DBs'].append(t)
except ValueError:
g0['DBs'].append(t)
#print 'g0: ' + str(g0['seq']) + ' ' + str(g0['newItemPos']) + ' ' + str(g0['size']) + ' DBs: ' + str(g0['DBs'][:3])
#print 'g1: ' + str(g1['seq']) + ' ' + str(g1['newItemPos']) + ' ' + str(g1['size']) + ' DBs: ' + str(g1['DBs'][:3])
self._insertIntoSortedList(groups, g1)
if self._checkMinSupport( g0['size'] ):
self._insertIntoSortedList(groups, g0)
# for g in groups:
# print ' ' + self.printSeq(g['seq']) + ' ' + str(g['size'])
self.nodes = nodes
self.links = []
for l in links.keys():
for r in links[l].keys():
self.links.append({'source':l, 'target':r, 'freq':links[l][r]})
results = {'entities':self.nodes, 'links':self.links}
f = open('growPhraseResults.json', 'w')
f.write((json.dumps(results, ensure_ascii=False)))
f.close()
timer.printElapsed()
def _checkMinSupport(self, cnt):
if not hasattr(self, 'dbSize'):
raise NameError("dbSize is not defined.")
if not hasattr(self, 'min_support'):
raise NameError("min_support is not defined.")
if cnt >= self.dbSize * self.min_support:
return True
else:
return False
def printSeq(self, s):
return ' '.join([self.itemset[i] for i in s])
def _insertIntoSortedList(self, sortedlist, item):
i = bisect.bisect_left([l['size'] for l in sortedlist], item['size'])
sortedlist.insert(i, item)
return sortedlist
def main(argv):
c = Corpus('../data/raw/goal1.tsv', 1, 2)
c.growSets()
if __name__ == "__main__":
main(sys.argv[1:])
``` |
{
"source": "jhorey/ferry",
"score": 2
} |
#### File: ferry/cli/cli.py
```python
import errno
import ferry
import grp
import json
import logging
import logging.config
import os
import os.path
import pwd
import requests
import re
import sets
import StringIO
import sys
from termcolor import colored
import yaml
from requests.exceptions import ConnectionError
from ferry.table.prettytable import *
from ferry.options import CmdHelp
from ferry.install import Installer, FERRY_HOME, GUEST_DOCKER_REPO, DEFAULT_FERRY_APPS
class CLI(object):
def __init__(self):
self.cmds = CmdHelp()
self.cmds.description = "Development environment for big data applications"
self.cmds.version = ferry.__version__
self.cmds.usage = "ferry COMMAND [arg...]"
self.cmds.add_option("-b", "--build", "Build Ferry default images")
self.cmds.add_option("-c", "--conf", "Deployment configuration")
self.cmds.add_option("-d", "--dns", "Specify DNS servers")
self.cmds.add_option("-k", "--key", "Specify key directory")
self.cmds.add_option("-l", "--log", "Log configuration file")
self.cmds.add_option("-m", "--mode", "Deployment mode")
self.cmds.add_option("-n", "--naked", "Start in naked mode")
self.cmds.add_option("-r", "--retry", "Retry action on failure")
self.cmds.add_option("-t", "--net", "Use host network device")
self.cmds.add_option("-u", "--upgrade", "Upgrade Ferry")
self.cmds.add_cmd("build", "Build a Dockerfile")
self.cmds.add_cmd("clean", "Clean zombie Ferry processes")
self.cmds.add_cmd("help", "Print this help message")
self.cmds.add_cmd("info", "Print version information")
self.cmds.add_cmd("inspect", "Return low-level information on a service")
self.cmds.add_cmd("install", "Install the Ferry images")
self.cmds.add_cmd("login", "Login to Ferry servers")
self.cmds.add_cmd("logs", "Copy over the logs to the host")
self.cmds.add_cmd("ls", "View installed applications")
self.cmds.add_cmd("ls-images", "View installed images")
self.cmds.add_cmd("ps", "List deployed and running services")
self.cmds.add_cmd("pull", "Pull a remote image")
self.cmds.add_cmd("push", "Push an image to a remote registry")
self.cmds.add_cmd("rm", "Remove a service or snapshot")
self.cmds.add_cmd("server", "Start all the servers")
self.cmds.add_cmd("snapshot", "Take a snapshot")
self.cmds.add_cmd("snapshots", "List all snapshots")
self.cmds.add_cmd("ssh", "Connect to a client/connector")
self.cmds.add_cmd("start", "Start a new service or snapshot")
self.cmds.add_cmd("stop", "Stop a running service")
self.cmds.add_cmd("quit", "Stop the Ferry servers")
self.ferry_server = 'http://127.0.0.1:4000'
self.default_user = 'root'
self.installer = Installer(self)
def _pull_image(self, image):
"""
Pull a remote image to the local registry.
"""
try:
payload = { 'image' : image }
res = requests.get(self.ferry_server + '/image', params=payload)
return str(res.text)
except ConnectionError:
logging.error("could not connect to ferry server")
return "It appears Ferry servers are not running.\nType sudo ferry server and try again."
def _pull_app(self, app):
"""
Pull a local application to Ferry servers.
"""
# Now download the application description
# from the Ferry servers.
account, key, server = self.installer.get_ferry_account()
if account:
# Read in the contents of the application and
# generate the API key.
req = { 'action' : 'fetch',
'app' : app,
'account' : account }
sig = self.installer.create_signature(json.dumps(req), key)
try:
payload = { 'id' : account,
'app' : app,
'sig' : sig }
res = requests.get(server + '/app', params=payload)
status = json.loads(res.text)
file_name = self.installer.store_app(app, status['ext'], status['content'])
if file_name:
content = self._read_app_content(file_name)
images = self._get_user_images(content)
for i in images:
self._pull_image(i)
return app
else:
return "failed"
except ConnectionError:
logging.error("could not connect to application server")
return "failed"
def _pull(self, image):
"""
Pull a remote application/image to the local registry.
"""
logging.warning("pulling " + image)
# Figure out if we're pushing a Ferry application or
# plain Docker image.
s = image.split("://")
if len(s) > 1:
proto = s[0]
image = s[1]
else:
proto = "image"
image = s[0]
if proto == "image":
return self._pull_image(image)
else:
return self._pull_app(image)
def _push_image(self, image, registry):
"""
Push a local image to a remote registry.
"""
try:
payload = { 'image' : image,
'server' : registry }
res = requests.post(self.ferry_server + '/image', data=payload)
return str(res.text)
except ConnectionError:
logging.error("could not connect to ferry server")
return "It appears Ferry servers are not running.\nType sudo ferry server and try again."
def _builtin_image(self, image):
"""
Indicates whether the image is a pre-built Ferry image.
Right now we only verify the client images.
"""
return image in ["ferry/hadoop-client",
"ferry/spark-client",
"ferry/cassandra-client",
"ferry/openmpi-client"]
def _get_user_images(self, content):
"""
Get the user-defined images.
"""
images = set()
for c in content['connectors']:
p = c['personality']
if not self._builtin_image(p):
images.add(p)
return images
def _read_app_content(self, file_path):
"""
Read the content of the application.
"""
json_arg = None
with open(file_path, "r") as f:
n, e = os.path.splitext(file_path)
if e == '.json':
json_string = self._read_file_arg(file_path)
json_arg = json.loads(json_string)
elif e == '.yaml' or e == '.yml':
yaml_file = open(file_path, 'r')
json_arg = yaml.load(yaml_file)
return json_arg
def _push_app(self, app, registry):
"""
Push a local application to Ferry servers.
"""
# First find all the images that need to
# be pushed to the Docker registry.
content = self._read_app_content(app)
if content:
images = self._get_user_images(content)
for i in images:
self._push_image(i, registry)
# Register the application in the Ferry database.
account, key, server = self.installer.get_ferry_account()
if account:
# Read in the contents of the application and
# generate the API key.
with open(app, "r") as f:
name = account + '/' + os.path.basename(app)
name, ext = os.path.splitext(name)
content = f.read()
req = { 'action' : 'register',
'app' : name,
'account' : account }
sig = self.installer.create_signature(json.dumps(req), key)
try:
payload = { 'id' : account,
'app' : name,
'ext' : ext,
'content' : content,
'sig' : sig }
res = requests.post(server + '/app', data=payload)
status = json.loads(res.text)
if status['status'] == 'fail':
logging.error("failed to register app " + app)
return "Failed to register app " + app
else:
return status['name']
except ConnectionError:
logging.error("could not connect to application server")
return "Could not register the application."
except ValueError as e:
logging.error(str(e))
return "Registration server sent back unknown reply"
else:
logging.error("could not read account information")
return "Could not read account information."
def _push(self, image, registry):
"""
Push a local appliation/image to a remote registry.
"""
logging.warning("pushing " + image)
# Figure out if we're pushing a Ferry application or
# plain Docker image.
s = image.split("://")
if len(s) > 1:
proto = s[0]
image = s[1]
else:
proto = "image"
image = s[0]
if proto == "image":
return self._push_image(image, registry)
else:
return self._push_app(image, registry)
def _build(self, dockerfile):
"""
Build a new local image.
"""
logging.warning("building " + dockerfile)
names = self.installer._get_image(dockerfile)
name = names.pop().split("/")
if len(name) == 1:
repo = GUEST_DOCKER_REPO
image = name[0]
else:
repo = name[0]
image = name[1]
build_dir = os.path.dirname(dockerfile)
self.installer._compile_image(image, repo, build_dir, build=True)
if len(names) > 0:
self.installer._tag_images(image, repo, names)
def _login(self):
"""
Login to a remote registry
"""
try:
res = requests.post(self.ferry_server + '/login')
return str(res.text)
except ConnectionError:
logging.error("could not connect to ferry server")
return "It appears Ferry servers are not running.\nType sudo ferry server and try again."
def _create_stack(self, stack_description, args, private_key):
"""
Create a new stack.
"""
payload = { 'payload' : json.dumps(stack_description),
'key' : private_key }
try:
res = requests.post(self.ferry_server + '/create', data=payload)
return True, str(res.text)
except ConnectionError:
logging.error("could not connect to ferry server")
return False, "It appears Ferry servers are not running.\nType sudo ferry server and try again."
def _ask_question(self, question_text):
question = colored(question_text, 'red')
prompt = colored(' >> ', 'green')
return raw_input(question + prompt)
def _format_images_query(self, json_data):
t = PrettyTable()
unique = sets.Set()
for j in json_data:
unique.add(j)
t.add_column("Image", list(unique))
return t.get_string(sortby="Image",
padding_width=2)
def _format_apps_query(self, json_data):
authors = []
versions = []
descriptions = []
for app in json_data.keys():
authors.append(json_data[app]['author'])
versions.append(json_data[app]['version'])
descriptions.append(json_data[app]['short'])
t = PrettyTable()
t.add_column("App", json_data.keys())
t.add_column("Author", authors)
t.add_column("Version", versions)
t.add_column("Description", descriptions)
return t.get_string(sortby="App",
padding_width=2)
def _format_snapshots_query(self, json_data):
bases = []
date = []
for uuid in json_data.keys():
bases.append(json_data[uuid]['base'])
if 'snapshot_ts' in json_data[uuid]:
date.append(json_data[uuid]['snapshot_ts'])
else:
date.append(' ')
t = PrettyTable()
t.add_column("UUID", json_data.keys())
t.add_column("Base", bases)
t.add_column("Date", date)
return t.get_string(sortby="Date",
padding_width=2)
def _format_table_query(self, json_data):
storage = []
compute = []
connectors = []
status = []
base = []
time = []
# Each additional row should include the actual data.
for uuid in json_data.keys():
csstore = []
bstore = []
cstore = []
for c in json_data[uuid]['connectors']:
csstore.append(c)
backends = json_data[uuid]['backends']
for b in backends:
if b['storage']:
bstore.append(b['storage'])
else:
bstore.append(' ')
if b['compute']:
cstore.append(b['compute'])
else:
cstore.append(' ')
storage.append(bstore)
compute.append(cstore)
connectors.append(csstore)
status.append(json_data[uuid]['status'])
base.append(json_data[uuid]['base'])
time.append(json_data[uuid]['ts'])
t = PrettyTable()
t.add_column("UUID", json_data.keys())
t.add_column("Storage", storage)
t.add_column("Compute", compute)
t.add_column("Connectors", connectors)
t.add_column("Status", status)
t.add_column("Base", base)
t.add_column("Time", time)
return t.get_string(sortby="UUID",
padding_width=2)
def _stop_all(self, private_key):
try:
# We used to stop all the services when quitting
# Ferry, but this does not seem safe since a user
# may want to keep running a Hadoop cluster even if
# Ferry is shutdown.
# constraints = { 'status' : 'running' }
# payload = { 'constraints' : json.dumps(constraints) }
# res = requests.get(self.ferry_server + '/query', params=payload)
# stacks = json.loads(res.text)
# for uuid in stacks.keys():
# self._manage_stacks({'uuid' : uuid,
# 'key' : private_key,
# 'action' : 'stop'})
# Shutdown any backend services that may have
# been started (e.g., OpenStack Heat server, etc.).
res = requests.post(self.ferry_server + '/quit')
logging.info(res.text)
except ConnectionError:
logging.error("could not connect to ferry server")
def _read_stacks(self, show_all=False, args=None):
try:
res = requests.get(self.ferry_server + '/query')
query_reply = json.loads(res.text)
deployed_reply = {}
if show_all:
mode = self._parse_deploy_arg('mode', args, default='local')
conf = self._parse_deploy_arg('conf', args, default='default')
payload = { 'mode' : mode,
'conf' : conf }
res = requests.get(self.ferry_server + '/deployed', params=payload)
deployed_reply = json.loads(res.text)
# Merge the replies and format.
return self._format_table_query(dict(query_reply.items() + deployed_reply.items()))
except ConnectionError:
logging.error("could not connect to ferry server")
return "It appears Ferry servers are not running.\nType sudo ferry server and try again."
def _list_apps(self):
"""
List all installed applications including the built-in applications.
"""
try:
res = requests.get(self.ferry_server + '/apps')
json_reply = json.loads(res.text)
return self._format_apps_query(json_reply)
except ConnectionError:
logging.error("could not connect to ferry server")
return "It appears Ferry servers are not running.\nType sudo ferry server and try again."
def _list_images(self):
"""
List all installed Docker images.
"""
try:
res = requests.get(self.ferry_server + '/images')
json_reply = json.loads(res.text)
return self._format_images_query(json_reply)
except ConnectionError:
logging.error("could not connect to ferry server")
return "It appears Ferry servers are not running.\nType sudo ferry server and try again."
def _list_snapshots(self):
"""
List all snapshots.
"""
try:
res = requests.get(self.ferry_server + '/snapshots')
json_reply = json.loads(res.text)
return self._format_snapshots_query(json_reply)
except ConnectionError:
logging.error("could not connect to ferry server")
return "It appears Ferry servers are not running.\nType sudo ferry server and try again."
def _format_stack_inspect(self, json_data):
return json.dumps(json_data,
sort_keys=True,
indent=2,
separators=(',',':'))
def _inspect_stack(self, stack_id):
"""
Inspect a specific stack.
"""
payload = { 'uuid':stack_id }
try:
res = requests.get(self.ferry_server + '/stack', params=payload)
return res.text
except ConnectionError:
logging.error("could not connect to ferry server")
return "It appears Ferry servers are not running.\nType sudo ferry server and try again."
def _copy_logs(self, stack_id, to_dir):
"""
Copy over the logs.
"""
payload = {'uuid':stack_id,
'dir':to_dir}
try:
res = requests.get(self.ferry_server + '/logs', params=payload)
json_value = json.loads(str(res.text))
return self._format_stack_inspect(json_value)
except ConnectionError:
logging.error("could not connect to ferry server")
return "It appears Ferry servers are not running.\nType sudo ferry server and try again."
"""
Connector a specific client/connector via ssh.
"""
def _connect_stack(self, stack_id, connector_id, options):
# Get the IP and default user information for this connector.
payload = {'uuid':stack_id}
try:
res = requests.get(self.ferry_server + '/stack', params=payload)
json_value = json.loads(str(res.text))
except ConnectionError:
logging.error("could not connect to ferry server")
return "It appears Ferry servers are not running.\nType sudo ferry server and try again."
except ValueError:
json_value = None
connector_ip = None
if json_value and 'connectors' in json_value:
for cg in json_value['connectors']:
if not connector_id and 'ip' in cg['entry']:
connector_ip = cg['entry']['ip']
break
elif connector_id == cg['uniq'] and 'ip' in cg['entry']:
connector_ip = cg['entry']['ip']
break
# Now form the ssh command. This just executes in the same shell.
if connector_ip:
private_key = self._get_ssh_key(options=options)
key_opt = '-o StrictHostKeyChecking=no'
host_opt = '-o UserKnownHostsFile=/dev/null'
ident = '-i %s' % private_key
dest = '%s@%s' % (self.default_user, connector_ip)
cmd = "ssh %s %s %s %s" % (key_opt, host_opt, ident, dest)
logging.warning(cmd)
os.execv('/usr/bin/ssh', cmd.split())
else:
logging.warning("could not find connector %s" % connector_id)
return "could not connect to " + str(stack_id)
def _parse_deploy_arg(self, param, args, default):
pattern = re.compile('--%s=(\w+)' % param)
for a in args:
m = pattern.match(a)
if m and m.group(0) != '':
return m.group(1)
return default
def _manage_stacks(self, stack_info):
"""
Manage the stack.
"""
try:
res = requests.post(self.ferry_server + '/manage/stack', data=stack_info)
return str(res.text)
except ConnectionError:
logging.error("could not connect to ferry server")
return "It appears Ferry servers are not running.\nType sudo ferry server and try again."
def _print_help(self):
"""
Output the help message.
"""
return self.cmds.print_help()
def _print_info(self):
"""
Output version information.
"""
try:
res = requests.get(self.ferry_server + '/version')
s = self.cmds.description + '\n'
s += "Version: %s\n" % self.cmds.version
s += "Docker: %s\n" % res.text.strip()
return s
except ConnectionError:
logging.error("could not connect to ferry server")
return "It appears Ferry servers are not running.\nType sudo ferry server and try again."
def _read_file_arg(self, file_name):
"""
Helper method to read a file.
"""
json_file = open(os.path.abspath(file_name), 'r')
json_text = ''
for line in json_file:
json_text += line.strip()
return json_text
def _get_ssh_key(self, options=None):
if options and '-k' in options:
return options['-k'][0]
else:
return ferry.install.DEFAULT_SSH_KEY
def _find_installed_app(self, app):
"""
Help find the path to the application. Check both the built-in
global directory and the user-installed directory.
"""
file_path = None
for item in os.listdir(FERRY_HOME + '/data/plans/'):
if app == os.path.splitext(item)[0]:
return FERRY_HOME + '/data/plans/' + item
if not file_path:
for user in os.listdir(DEFAULT_FERRY_APPS):
if os.path.isdir(DEFAULT_FERRY_APPS + '/' + user):
for item in os.listdir(DEFAULT_FERRY_APPS + '/' + user):
if app == user + '/' + os.path.splitext(item)[0]:
return DEFAULT_FERRY_APPS + '/' + user + '/' + item
def _format_output(self, reply):
output = reply['text'] + "\n"
if 'msgs' in reply:
for c in reply['msgs'].keys():
output += "%s: %s\n" % (c, reply['msgs'][c])
return output
def _start_stack(self, options, args):
private_key = self._get_ssh_key(options=options)
# Check if we need to build the image before running.
if '-b' in options:
build_dir = options['-b'][0]
self._build(build_dir + '/Dockerfile')
# Try to figure out what application the user is staring.
# This could be a new stack or an existing stopped stack.
arg = args.pop(0)
json_arg = {}
if not os.path.exists(arg):
file_path = self._find_installed_app(arg)
else:
file_path = arg
# Looks like the user is trying to start a brand
# new application (specified by a filename).
if file_path and os.path.exists(file_path):
file_path = os.path.abspath(file_path)
json_arg = self._read_app_content(file_path)
if not json_arg:
logging.error("could not load file " + file_path)
exit(1)
else:
# Check if there are any questions associated with
# this application stack. If so, we should prompt the
# user and include the answers.
if 'questions' in json_arg:
for q in json_arg['questions']:
question = q['question']
q['_answer'] = self._ask_question(question)
json_arg['_file_path'] = file_path
json_arg['_file'] = arg
# Create the application stack and print
# the status message.
posted, reply = self._create_stack(json_arg, args, private_key)
if posted:
try:
reply = json.loads(reply)
if reply['status'] == 'failed':
return 'could not create application'
else:
return self._format_output(reply)
except ValueError as e:
logging.error(reply)
def dispatch_cmd(self, cmd, args, options):
"""
This is the command dispatch table.
"""
if(cmd == 'start'):
return self._start_stack(options, args)
elif(cmd == 'ps'):
if len(args) > 0 and args[0] == '-a':
opt = args.pop(0)
return self._read_stacks(show_all=True, args = args)
else:
return self._read_stacks(show_all=False, args = args)
elif(cmd == 'snapshots'):
return self._list_snapshots()
elif(cmd == 'install'):
msg = self.installer.install(args, options)
self.installer._stop_docker_daemon()
return msg
elif(cmd == 'clean'):
self.installer._force_stop_web()
self.installer._stop_docker_daemon(force=True)
return 'cleaned ferry'
elif(cmd == 'inspect'):
return self._inspect_stack(args[0])
elif(cmd == 'logs'):
return self._copy_logs(args[0], args[1])
elif(cmd == 'server'):
self.installer.start_web(options)
return 'started ferry'
elif(cmd == 'ssh'):
stack_id = args[0]
connector_id = None
if len(args) > 1:
connector_id = args[1]
return self._connect_stack(stack_id, connector_id, options)
elif(cmd == 'quit'):
private_key = self._get_ssh_key(options=options)
self._stop_all(private_key)
self.installer.stop_web(private_key)
self.installer._stop_docker_daemon()
return 'stopped ferry'
elif(cmd == 'ls'):
return self._list_apps()
elif(cmd == 'ls-images'):
return self._list_images()
elif(cmd == 'info'):
return self._print_info()
elif(cmd == 'build'):
return self._build(args[0])
elif(cmd == 'pull'):
return self._pull(args[0])
elif(cmd == 'push'):
image = args.pop(0)
if len(args) > 0:
registry = args.pop(0)
else:
registry = None
return self._push(image, registry)
elif(cmd == 'login'):
return self._login()
elif(cmd == 'help'):
return self._print_help()
else:
# The user wants to perform some management function
# over the stack.
private_key = self._get_ssh_key(options=options)
stack_info = {'uuid' : args[0],
'key' : private_key,
'action' : cmd}
return self._manage_stacks(stack_info)
def main(argv=None):
# Set up the various logging facilities
logging.config.fileConfig(FERRY_HOME + "/logging.conf")
cli = CLI()
if(sys.argv):
if len(sys.argv) > 1:
cli.cmds.parse_args(sys.argv)
# Initialize the cli
options = cli.cmds.get_options()
if '-l' in options:
logging.config.fileConfig(options['-l'][0])
# Execute the commands
all_cmds = cli.cmds.get_cmds()
if len(all_cmds) > 0:
for c in all_cmds.keys():
msg = cli.dispatch_cmd(c, all_cmds[c], options)
print msg
exit(0)
print cli._print_help()
```
#### File: config/cassandra/cassandraclientconfig.py
```python
import sys
import sh
from string import Template
class CassandraClientInitializer(object):
"""
Create a new initializer
Param user The user login for the git repo
"""
def __init__(self, system):
self.template_dir = None
self.template_repo = None
self.container_data_dir = CassandraClientConfig.data_directory
self.container_log_dir = CassandraClientConfig.log_directory
"""
Generate a new hostname
"""
def new_host_name(self, instance_id):
return 'cassandra_client' + str(instance_id)
"""
Start the service on the containers.
"""
def _execute_service(self, containers, entry_point, fabric, cmd):
return fabric.cmd(containers,
'/service/sbin/startnode %s %s' % (cmd, entry_point['cassandra_url']))
def start_service(self, containers, entry_point, fabric):
return self._execute_service(containers, entry_point, fabric, "start")
def restart_service(self, containers, entry_point, fabric):
return self._execute_service(containers, entry_point, fabric, "restart")
def stop_service(self, containers, entry_point, fabric):
return self._execute_service(containers, entry_point, fabric, "stop")
def _generate_config_dir(self, uuid):
return 'cassandra_client' + str(uuid)
def get_public_ports(self, num_instances):
"""
Ports to expose to the outside world.
"""
return []
def get_internal_ports(self, num_instances):
"""
Ports needed for communication within the network.
This is usually used for internal IPC.
"""
return []
def get_working_ports(self, num_instances):
"""
Ports necessary to get things working.
"""
return []
def get_total_instances(self, num_instances, layers):
"""
Get total number of instances.
"""
instances = []
for i in range(num_instances):
instances.append('cassandra-client')
return instances
"""
Generate a new configuration
"""
def generate(self, num):
return CassandraClientConfig(num)
def _apply_cassandra(self, host_dir, entry_point, config, container):
yaml_in_file = open(self.template_dir + '/cassandra.yaml.template', 'r')
yaml_out_file = open(host_dir + '/cassandra.yaml', 'w+')
# Now make the changes to the template file.
changes = { "LOCAL_ADDRESS":container['data_ip'],
"DATA_DIR":config.data_directory,
"CACHE_DIR":config.cache_directory,
"COMMIT_DIR":config.commit_directory,
"SEEDS":entry_point['cassandra_url']}
for line in yaml_in_file:
s = Template(line).substitute(changes)
yaml_out_file.write(s)
yaml_out_file.close()
yaml_in_file.close()
def _apply_titan(self, host_dir, storage_entry, container):
in_file = open(self.template_dir + '/titan.properties', 'r')
out_file = open(host_dir + '/titan.properties', 'w+')
changes = { "BACKEND":"cassandrathrift",
"DB":container['args']['db'],
"IP":storage_entry['seed']}
for line in in_file:
s = Template(line).substitute(changes)
out_file.write(s)
out_file.close()
in_file.close()
def _find_cassandra_storage(self, containers):
"""
Find a Cassandra compatible storage entry.
"""
for c in containers:
for s in c['storage']:
if s['type'] == 'cassandra':
return s
"""
Apply the configuration to the instances
"""
def apply(self, config, containers):
entry_point = { 'type' : 'cassandra-client' }
entry_point['ip'] = containers[0]['manage_ip']
# Get the storage information.
storage_entry = self._find_cassandra_storage(containers)
if not storage_entry:
# The Cassandra client is currently only compatible with a
# Cassandra backend. So just return an error.
return None, None
# Otherwise record the storage type and get the seed node.
entry_point['cassandra_url'] = storage_entry['seed']
# Create a new configuration directory, and place
# into the template directory.
config_dirs = []
try:
host_dir = "/tmp/" + self._generate_config_dir(config.uuid)
try:
sh.mkdir('-p', host_dir)
except:
sys.stderr.write('could not create config dir ' + host_dir)
self._apply_cassandra(host_dir, entry_point, config, containers[0])
# See if we need to apply
if 'titan' in storage_entry:
self._apply_titan(host_dir, storage_entry, containers[0])
out_file = open(host_dir + '/servers', 'w+')
out_file.write("%s %s" % (storage_entry['titan']['ip'], 'rexserver'))
out_file.close
# The config dirs specifies what to transfer over. We want to
# transfer over specific files into a directory.
for c in containers:
config_dirs.append([c['container'],
host_dir + '/*',
config.config_directory])
except IOError as err:
sys.stderr.write('' + str(err))
return config_dirs, entry_point
class CassandraClientConfig(object):
data_directory = '/service/data/main/'
log_directory = '/service/data/logs/'
commit_directory = '/service/data/commits/'
cache_directory = '/service/data/cache/'
config_directory = '/service/conf/cassandra/'
def __init__(self, num):
self.num = num
self.data_directory = CassandraClientConfig.data_directory
self.commit_directory = CassandraClientConfig.commit_directory
self.cache_directory = CassandraClientConfig.cache_directory
self.log_directory = CassandraClientConfig.log_directory
self.config_directory = CassandraClientConfig.config_directory
```
#### File: config/hadoop/hadoopclientconfig.py
```python
import os
import sys
import sh
from string import Template
from ferry.install import FERRY_HOME
from ferry.config.hadoop.hiveconfig import *
class HadoopClientInitializer(object):
"""
Create a new initializer
Param user The user login for the git repo
"""
def __init__(self, system):
self.system = system
self.template_dir = None
self.template_repo = None
self.hive_client = HiveClientInitializer(system)
self.hive_client.template_dir = FERRY_HOME + '/data/templates/hive-metastore/'
self.container_data_dir = None
self.container_log_dir = HadoopClientConfig.log_directory
"""
Generate a new hostname
"""
def new_host_name(self, instance_id):
return 'hadoop-client' + str(instance_id)
"""
Start the service on the containers.
"""
def _execute_service(self, containers, entry_point, fabric, cmd):
# We need to know what sort of storage backend we are
# using, since this will help set up everything.
if entry_point['hdfs_type'] == 'hadoop':
output = fabric.cmd(containers, '/service/sbin/startnode %s hadoop' % cmd)
elif entry_point['hdfs_type'] == 'gluster':
mount_url = entry_point['gluster_url']
output = fabric.cmd(containers,
'/service/sbin/startnode %s gluster %s' % (cmd, mount_url))
return output
def start_service(self, containers, entry_point, fabric):
return self._execute_service(containers, entry_point, fabric, "start")
def restart_service(self, containers, entry_point, fabric):
return self._execute_service(containers, entry_point, fabric, "restart")
def stop_service(self, containers, entry_point, fabric):
return self._execute_service(containers, entry_point, fabric, "stop")
def _generate_config_dir(self, uuid):
"""
Generate a new configuration.
"""
return 'hadoop_client_' + str(uuid)
def get_public_ports(self, num_instances):
"""
Ports to expose to the outside world.
"""
return []
def get_internal_ports(self, num_instances):
"""
Ports needed for communication within the network.
This is usually used for internal IPC.
"""
return []
def get_working_ports(self, num_instances):
"""
Ports necessary to get things working.
"""
return []
"""
Generate a new configuration
"""
def generate(self, num):
return HadoopClientConfig(num)
"""
Generate the core-site configuration for a local filesystem.
"""
def _generate_gluster_core_site(self, mount_point, new_config_dir):
core_in_file = open(self.template_dir + '/core-site.xml.template', 'r')
core_out_file = open(new_config_dir + '/core-site.xml', 'w+')
changes = { "DEFAULT_NAME":"file:///",
"DATA_TMP":"/service/data/client/tmp" }
for line in core_in_file:
s = Template(line).substitute(changes)
core_out_file.write(s)
core_in_file.close()
core_out_file.close()
def _generate_log4j(self, new_config_dir):
in_file = open(self.template_dir + '/log4j.properties', 'r')
out_file = open(new_config_dir + '/log4j.properties', 'w+')
for line in in_file:
out_file.write(line)
in_file.close()
out_file.close()
def _generate_core_site(self, hdfs_master, new_config_dir):
"""
Generate the core-site configuration.
"""
core_in_file = open(self.template_dir + '/core-site.xml.template', 'r')
core_out_file = open(new_config_dir + '/core-site.xml', 'w+')
default_name = "%s://%s:%s" % ("hdfs",
hdfs_master,
HadoopClientConfig.HDFS_MASTER)
changes = { "DEFAULT_NAME":default_name,
"DATA_TMP":"/service/data/client/tmp" }
for line in core_in_file:
s = Template(line).substitute(changes)
core_out_file.write(s)
core_in_file.close()
core_out_file.close()
"""
Generate the yarn-site configuration.
"""
def _generate_yarn_site(self, yarn_master, new_config_dir):
yarn_in_file = open(self.template_dir + '/yarn-site.xml.template', 'r')
yarn_out_file = open(new_config_dir + '/yarn-site.xml', 'w+')
changes = { "YARN_MASTER":yarn_master,
"DATA_STAGING":"/service/data/client/staging" }
# Get memory information.
mem = self.system.get_total_memory()
if mem < 1024:
mem = 1024
changes['MEM'] = mem
changes['CMEM'] = max(mem / 8, 512)
changes['RMEM'] = 2 * changes['CMEM']
changes['ROPTS'] = '-Xmx' + str(int(0.8 * changes['RMEM'])) + 'm'
cores = self.system.get_num_cores() / 2
if cores < 1:
cores = 1
changes['CORES'] = cores
for line in yarn_in_file:
s = Template(line).substitute(changes)
yarn_out_file.write(s)
yarn_in_file.close()
yarn_out_file.close()
"""
Generate the mapred-site configuration.
"""
def _generate_mapred_site(self, config, containers, new_config_dir):
mapred_in_file = open(self.template_dir + '/mapred-site.xml.template', 'r')
mapred_out_file = open(new_config_dir + '/mapred-site.xml', 'w+')
# Most of these values aren't applicable for the client,
# so just make up fake numbers.
changes = { "NODE_REDUCES":1,
"NODE_MAPS":1,
"JOB_REDUCES":1,
"JOB_MAPS":1,
"HISTORY_SERVER":config.yarn_master,
"DATA_TMP":"/service/data/client/tmp" }
# Get memory information.
mem = self.system.get_total_memory()
if mem < 1024:
mem = 1024
changes['MMEM'] = max(mem / 8, 512)
changes['RMEM'] = 2 * changes['MMEM']
changes['MOPTS'] = '-Xmx' + str(int(0.8 * changes['MMEM'])) + 'm'
changes['ROPTS'] = '-Xmx' + str(int(0.8 * changes['RMEM'])) + 'm'
for line in mapred_in_file:
s = Template(line).substitute(changes)
mapred_out_file.write(s)
mapred_in_file.close()
mapred_out_file.close()
"""
Apply the Hive client configuration
"""
def _apply_hive_client(self, config, containers):
return self.hive_client.apply(config, containers)
"""
Apply the configuration to the instances
"""
def apply(self, config, containers):
entry_point = { 'type' : 'hadoop-client' }
entry_point['ip'] = containers[0]['manage_ip']
# Create a new configuration directory, and place
# into the template directory.
new_config_dir = "/tmp/" + self._generate_config_dir(config.uuid)
try:
sh.mkdir('-p', new_config_dir)
except:
sys.stderr.write('could not create config dir ' + new_config_dir)
# Check if there is an explicit compute cluster. If there
# is, then we use that for YARN information.
storage = containers[0]['storage'][0]
compute = None
if 'compute' in containers[0] and len(containers[0]['compute']) > 0:
compute = containers[0]['compute'][0]
if compute and 'yarn' in compute:
config.yarn_master = compute['yarn']
if 'db' in compute:
config.hive_meta = compute['db']
else:
# Use the storage backend for the YARN info. However, first
# check if the storage is compatible.
if 'yarn' in storage:
config.yarn_master = storage['yarn']
if 'db' in storage:
config.hive_meta = storage['db']
# Check what sort of storage we are using.
entry_point['hdfs_type'] = storage['type']
if storage['type'] == 'hadoop':
config.hdfs_master = storage['hdfs']
self._generate_core_site(config.hdfs_master, new_config_dir)
elif storage['type'] == 'gluster':
mount_url = "%s:/%s" % (storage['gluster'], storage['volume'])
entry_point['gluster_url'] = mount_url
self._generate_gluster_core_site('/data', new_config_dir)
# Generate the Hadoop conf files.
if config.yarn_master:
self._generate_log4j(new_config_dir)
self._generate_mapred_site(config, containers, new_config_dir)
self._generate_yarn_site(config.yarn_master, new_config_dir)
# Each container needs to point to a new config dir.
config_dirs = []
for c in containers:
config_dirs.append([c['container'],
new_config_dir + '/*',
config.config_directory])
# Now configure the Hive client.
if config.hive_meta:
hive_config = HiveClientConfig(1)
hive_config.uuid = config.uuid
hive_config.hadoop_config_dir = config.config_directory
hive_config.metastore = config.hive_meta
hive_dirs, hive_entry = self._apply_hive_client(hive_config, containers)
config_dirs.extend(hive_dirs)
return config_dirs, entry_point
class HadoopClientConfig(object):
log_directory = '/service/data/logs/'
config_directory = '/service/conf/hadoop/'
HDFS_MASTER = 9000
def __init__(self, num):
self.num = num
self.config_directory = HadoopClientConfig.config_directory
self.system_info = None
self.yarn_master = None
self.hdfs_master = None
self.hive_meta = None
```
#### File: config/mongo/mongoconfig.py
```python
import json
import logging
import os
import sh
import sys
import time
from string import Template
class MongoInitializer(object):
def __init__(self, system):
"""
Create a new initializer
Param user The user login for the git repo
"""
self.template_dir = None
self.template_repo = None
self.fabric = None
self.container_data_dir = MongoConfig.data_directory
self.container_log_dir = MongoConfig.log_directory
def new_host_name(self, instance_id):
"""
Generate a new hostname
"""
return 'mongo' + str(instance_id)
def _execute_service(self, containers, entry_point, fabric, cmd):
"""
Start the service on the containers.
"""
all_output = {}
for c in containers:
if c.args:
args = c.args
else:
args = 'notrust'
output = fabric.cmd([c], '/service/sbin/startnode %s %s' % (cmd, args))
all_output = dict(all_output.items() + output.items())
# Now wait a couple seconds to make sure
# everything has started.
time.sleep(2)
return all_output
def start_service(self, containers, entry_point, fabric):
return self._execute_service(containers, entry_point, fabric, "start")
def restart_service(self, containers, entry_point, fabric):
return self._execute_service(containers, entry_point, fabric, "restart")
def stop_service(self, containers, entry_point, fabric):
return self._execute_service(containers, entry_point, fabric, "stop")
def _generate_config_dir(self, uuid):
"""
Generate a new configuration.
"""
return 'mongo_' + str(uuid)
def get_public_ports(self, num_instances):
"""
Ports to expose to the outside world.
"""
return []
def get_internal_ports(self, num_instances):
"""
Ports needed for communication within the network.
This is usually used for internal IPC.
"""
return []
def get_working_ports(self, num_instances):
"""
Ports necessary to get things working.
"""
return [MongoConfig.MONGO_PORT]
def get_total_instances(self, num_instances, layers):
instances = []
for i in range(num_instances):
instances.append('mongodb')
return instances
def generate(self, num):
"""
Generate a new configuration
Param num Number of instances that need to be configured
Param image Image type of the instances
"""
return MongoConfig(num)
def _generate_mongo_config(self, host_dir, config, arg):
"""
Generate the MongoDB configuration file.
"""
if arg == "trust":
conf_file = "trusted.conf"
else:
conf_file = "mongodb.conf"
in_file = open(self.template_dir + '/%s.template' % conf_file, 'r')
out_file = open(host_dir + '/%s' % conf_file, 'w+')
changes = { "MONGO_LOG":config.log_directory,
"MONGO_DATA":config.data_directory }
for line in in_file:
s = Template(line).substitute(changes)
out_file.write(s)
out_file.close()
in_file.close()
def apply(self, config, containers):
"""
Apply the configuration to the instances
"""
entry_point = { 'type' : 'mongodb' }
config_dirs = []
# Keep track of the MongoDB IP address.
entry_point['mongo'] = containers[0]['data_ip']
entry_point['ip'] = containers[0]['manage_ip']
new_config_dir = "/tmp/" + self._generate_config_dir(config.uuid)
try:
sh.mkdir('-p', new_config_dir)
except:
sys.stderr.write('could not create config dir ' + new_config_dir)
# This file records all instances so that we can
# generate the hosts file.
entry_point['instances'] = []
for server in containers:
entry_point['instances'].append([server['data_ip'], server['host_name']])
if not 'storage' in containers[0]:
# This is being called as a storage service.
# The client service doesn't do anything right now.
if 'args' in containers[0] and containers[0]['args']:
self._generate_mongo_config(new_config_dir, config, containers[0]['args'])
else:
self._generate_mongo_config(new_config_dir, config, 'notrust')
# Expose the login info.
output = self.fabric.cmd_raw(key = containers[0]['container'].privatekey,
ip = entry_point['mongo'],
cmd = '/service/sbin/startnode login',
user = self.fabric.docker_user)
logging.warning(str(output))
login_info = json.loads(str(output))
entry_point['mongo_user'] = login_info['user']
entry_point['mongo_pass'] = login_info['pass']
# Transfer the configuration.
for c in containers:
config_files = new_config_dir + '/*'
config_dirs.append([c['container'],
config_files,
config.config_directory])
return config_dirs, entry_point
class MongoConfig(object):
log_directory = '/service/logs/'
config_directory = '/service/conf/mongodb/'
data_directory = '/service/data/'
MONGO_PORT = '27017'
def __init__(self, num):
self.num = num
self.mongo_port = MongoConfig.MONGO_PORT
self.config_directory = MongoConfig.config_directory
self.log_directory = MongoConfig.log_directory
self.data_directory = MongoConfig.data_directory
```
#### File: config/openmpi/mpiconfig.py
```python
import logging
import sh
import sys
from string import Template
class OpenMPIInitializer(object):
def __init__(self, system):
self.template_dir = None
self.template_repo = None
self.container_data_dir = None
self.container_log_dir = MPIConfig.log_directory
def new_host_name(self, instance_id):
"""
Generate a new hostname
"""
return 'openmpi' + str(instance_id)
def _execute_service(self, containers, entry_point, fabric, cmd):
"""
Start the service on the containers.
"""
master_output = fabric.cmd(containers[:1], '/service/sbin/startnode %s %s %s' % (cmd, entry_point['mount'], 'glustermaster'))
slave_output = fabric.cmd(containers[1:len(containers)], '/service/sbin/startnode %s %s %s' % (cmd, entry_point['mount'], 'glusterslave'))
return dict(master_output.items() + slave_output.items())
def start_service(self, containers, entry_point, fabric):
return self._execute_service(containers, entry_point, fabric, "start")
def restart_service(self, containers, entry_point, fabric):
return self._execute_service(containers, entry_point, fabric, "restart")
def stop_service(self, containers, entry_point, fabric):
return self._execute_service(containers, entry_point, fabric, "stop")
def _generate_config_dir(self, uuid):
"""
Generate a new configuration.
"""
return 'openmpi_' + str(uuid)
def get_public_ports(self, num_instances):
"""
Ports to expose to the outside world.
"""
return []
def get_internal_ports(self, num_instances):
"""
Ports needed for communication within the network.
This is usually used for internal IPC.
"""
return ["0-65535"]
def get_working_ports(self, num_instances):
"""
Get the internal ports.
"""
BTL_PORT_END = MPIConfig.BTL_PORT_MIN + (MPIConfig.PORT_RANGE * num_instances)
OOB_PORT_END = MPIConfig.OOB_PORT_MIN + (MPIConfig.PORT_RANGE * num_instances)
BTL_PORTS = '%s-%s' % (MPIConfig.BTL_PORT_MIN, BTL_PORT_END)
OOB_PORTS = '%s-%s' % (MPIConfig.OOB_PORT_MIN, OOB_PORT_END)
return [BTL_PORTS, OOB_PORTS]
def get_total_instances(self, num_instances, layers):
instances = []
for i in range(num_instances):
instances.append('openmpi')
return instances
def generate(self, num):
"""
Generate a new configuration
Param num Number of instances that need to be configured
Param image Image type of the instances
"""
config = MPIConfig(num)
config.btl_port_min = MPIConfig.BTL_PORT_MIN
config.oob_port_min = MPIConfig.OOB_PORT_MIN
config.btl_port_range = MPIConfig.PORT_RANGE * num
config.oob_port_range = MPIConfig.PORT_RANGE * num
return config
def _generate_mca_params(self, config, new_config_dir):
"""
Generate the mca-params configuration.
"""
in_file = open(self.template_dir + '/openmpi-mca-params.conf', 'r')
out_file = open(new_config_dir + '/openmpi-mca-params.conf', 'w+')
changes = { "BTL_PORT_MIN": config.btl_port_min,
"BTL_PORT_RANGE": config.btl_port_range,
"OOB_PORT_MIN": config.oob_port_min,
"OOB_PORT_RANGE": config.oob_port_range }
for line in in_file:
s = Template(line).substitute(changes)
out_file.write(s)
in_file.close()
out_file.close()
def _find_mpi_storage(self, containers):
"""
Find a MPI compatible storage entry.
"""
for c in containers:
for s in c['storage']:
if s['type'] == 'gluster':
return s
def _find_mpi_compute(self, containers):
"""
Find a MPI compatible compute entry.
"""
for c in containers:
for s in c['compute']:
if s['type'] == 'openmpi':
return s
def apply(self, config, containers):
"""
Apply the configuration to the instances
"""
entry_point = { 'type' : 'openmpi' }
config_dirs = []
new_config_dir = "/tmp/" + self._generate_config_dir(config.uuid)
try:
sh.mkdir('-p', new_config_dir)
except:
sys.stderr.write('could not create config dir ' + new_config_dir)
# For now the MPI client assumes there is only one storage and that it is
# a Gluster end point.
storage = self._find_mpi_storage(containers)
if storage:
mount_ip = storage['gluster']
mount_dir = storage['volume']
entry_point['mount'] = "%s:/%s" % (mount_ip, mount_dir)
# Check if we are being called as a compute instance or client.
if not 'compute' in containers[0]:
entry_point['hosts'] = []
entry_point['instances'] = []
for server in containers:
entry_point['instances'].append([server['data_ip'], server['host_name']])
entry_point['hosts'].append([server['data_ip'], server['host_name']])
else:
# This is the MPI client. First check if there are any compute
# nodes (the MPI client can be used with just a raw GlusterFS
# configuration). If it does have a compute, create a "hosts" file that contains the
# IP addresses of the compute nodes.
entry_point['ip'] = containers[0]['manage_ip']
compute = self._find_mpi_compute(containers)
if compute and 'hosts' in compute:
with open(new_config_dir + '/hosts', 'w+') as hosts_file:
for c in compute['hosts']:
hosts_file.write(c[0] + "\n")
self._generate_mca_params(config, new_config_dir)
for c in containers:
config_files = new_config_dir + '/*'
config_dirs.append([c['container'],
config_files,
config.config_directory])
return config_dirs, entry_point
class MPIConfig(object):
log_directory = '/service/logs/'
config_directory = '/service/conf/openmpi/'
BTL_PORT_MIN = 2000
OOB_PORT_MIN = 6000
PORT_RANGE = 4
def __init__(self, num):
self.num = num
self.btl_port_min = 0
self.btl_port_range = 0
self.oob_port_min = 0
self.oob_port_range = 0
self.config_directory = MPIConfig.config_directory
self.log_directory = MPIConfig.log_directory
```
#### File: ferry/http/httpapi.py
```python
import json
import logging
from flask import Flask, request
import ferry.install
from ferry.install import Installer
from ferry.docker.manager import DockerManager
from ferry.docker.docker import DockerInstance
import os
import Queue
import sys
import threading2
import time
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
# Initialize Flask
app = Flask(__name__)
# Initialize the storage driver
installer = Installer()
docker = DockerManager()
def _stack_worker():
"""
Worker thread.
"""
while(True):
payload = _new_queue.get()
if payload["_action"] == "new":
_allocate_new_worker(payload["_uuid"], payload)
elif payload["_action"] == "stopped":
_allocate_stopped_worker(payload)
elif payload["_action"] == "snapshotted":
_allocate_snapshot_worker(payload["_uuid"], payload)
elif payload["_action"] == "manage":
_manage_stack_worker(payload["_uuid"], payload["_manage"], payload["_key"])
time.sleep(2)
_new_queue = Queue.Queue()
_new_stack_worker = threading2.Thread(target=_stack_worker)
_new_stack_worker.daemon = True
_new_stack_worker.start()
def _allocate_backend_from_snapshot(cluster_uuid, payload, key_name):
"""
Allocate the backend from a snapshot.
"""
snapshot_uuid = payload['_file']
backends = docker.fetch_snapshot_backend(snapshot_uuid)
if backends:
return _allocate_backend(cluster_uuid = cluster_uuid,
payload = None,
key_name = key_name,
backends = backends,
new_stack = True)
def _allocate_backend_from_stopped(payload):
"""
Allocate the backend from a stopped service.
"""
app_uuid = payload['_file']
backends, key_name = docker.fetch_stopped_backend(app_uuid)
if backends:
backend_info, backend_plan = _allocate_backend(cluster_uuid = app_uuid,
payload = None,
key_name = key_name,
backends = backends,
uuid = app_uuid,
new_stack = False)
return backend_info, backend_plan, key_name
def _fetch_num_instances(instance_arg):
reply = {}
try:
reply['num'] = int(instance_arg)
except ValueError:
# This is an error so don't allocate anything.
reply['num'] = 0
return reply
def _allocate_compute(cluster_uuid, computes, key_name, storage_uuid):
"""
Allocate a new compute backend. This method assumes that every
compute backend already has a specific instance count associated
with it. After creating the compute backend, it sends back a list
of all UUIDs that were created in the process.
"""
uuids = []
compute_plan = []
for c in computes:
compute_type = c['personality']
reply = _fetch_num_instances(c['instances'])
num_instances = reply['num']
c['instances'] = num_instances
args = {}
if 'args' in c:
args = c['args']
layers = []
if 'layers' in c:
layers = c['layers']
compute_uuid, compute_containers = docker.allocate_compute(cluster_uuid = cluster_uuid,
compute_type = compute_type,
key_name = key_name,
storage_uuid = storage_uuid,
args = args,
num_instances = num_instances,
layers = layers)
if compute_uuid:
compute_plan.append( { 'uuid' : compute_uuid,
'containers' : compute_containers,
'type' : compute_type,
'start' : 'start' } )
uuids.append( compute_uuid )
else:
# The manager could not allocate the compute backend
# properly. Return a failure so that we can update the
# status properly and cancel the stack.
return None, None
return uuids, compute_plan
def _restart_compute(cluster_uuid, computes):
uuids = []
compute_plan = []
for c in computes:
service_uuid = c['uuid']
compute_type = c['type']
# Transform the containers into proper container objects.
compute_containers = c['containers']
containers = [DockerInstance(j) for j in compute_containers]
uuids.append(service_uuid)
compute_plan.append( { 'uuid' : service_uuid,
'containers' : containers,
'type' : compute_type,
'start' : 'restart' } )
docker.restart_containers(cluster_uuid, service_uuid, containers)
return uuids, compute_plan
def _allocate_backend(cluster_uuid,
payload,
key_name,
backends=None,
replace=False,
uuid=None,
new_stack = True):
"""
Allocate a brand new backend
"""
if not backends:
# We should find the backend information in the payload.
if 'backend' in payload:
backends = payload['backend']
else:
backends = []
# This is the reply we send back. The 'status' denotes whether
# everything was created/started fine. The UUIDs are a list of
# tuples (storage, compute) IDs. The 'backends' just keeps track of
# the backends we used for allocation purposes.
backend_info = { 'status' : 'ok',
'uuids' : [],
'backend' : backends }
storage_plan = []
compute_plan = []
compute_uuids = []
# Go ahead and create the actual backend stack. If the user has passed in
# an existing backend UUID, that means we should restart that backend. Otherwise
# we create a fresh backend.
for b in backends:
storage = b['storage']
if new_stack:
args = None
if 'args' in storage:
args = storage['args']
storage_type = storage['personality']
reply = _fetch_num_instances(storage['instances'])
num_instances = reply['num']
storage['instances'] = num_instances
layers = []
if 'layers' in storage:
layers = storage['layers']
storage_uuid, storage_containers = docker.allocate_storage(cluster_uuid = cluster_uuid,
storage_type = storage_type,
key_name = key_name,
num_instances = num_instances,
layers = layers,
args = args,
replace = replace)
if storage_uuid:
storage_plan.append( { 'uuid' : storage_uuid,
'containers' : storage_containers,
'type' : storage_type,
'start' : 'start' } )
else:
# The storage was not allocated properly. Change
# the status so that the stack can be properly cancelled.
backend_info["status"] = 'failed'
return backend_info, None
else:
storage_uuid = storage['uuid']
storage_type = storage['type']
storage_containers = storage['containers']
# Transform the containers into proper container objects.
containers = [DockerInstance(j) for j in storage_containers]
storage_plan.append( { 'uuid' : storage_uuid,
'containers' : containers,
'type' : storage_type,
'start' : 'restart' } )
docker.restart_containers(cluster_uuid, storage_uuid, containers)
# Now allocate the compute backend. The compute is optional so
# we should check if it even exists first.
compute_uuid = []
if 'compute' in b:
if not uuid:
compute_uuid, plan = _allocate_compute(cluster_uuid = cluster_uuid,
computes = b['compute'],
key_name = key_name,
storage_uuid = storage_uuid)
if compute_uuid:
compute_uuids += compute_uuid
compute_plan += plan
else:
# The storage was not allocated properly. Change
# the status so that the stack can be properly cancelled.
backend_info["status"] = 'failed'
return backend_info, None
else:
compute_uuid, plan = _restart_compute(cluster_uuid, b['compute'])
compute_uuids += compute_uuid
compute_plan += plan
backend_info['uuids'].append( {'storage':storage_uuid,
'compute':compute_uuid} )
return backend_info, { 'storage' : storage_plan,
'compute' : compute_plan }
def _allocate_connectors(cluster_uuid, payload, key_name, backend_info):
connector_info = []
connector_plan = []
if 'connectors' in payload:
connectors = payload['connectors']
for c in connectors:
# Check number of instances.
num_instances = 1
if 'instances' in c:
num_instances = int(c['instances'])
# Check if this connector type has already been pulled
# into the local index. If not, manually pull it.
connector_type = c['personality']
if not installer._check_and_pull_image(connector_type):
# We could not fetch this connetor. Instead of
# finishing, just return an error.
return False, connector_info, None
for i in range(num_instances):
# Connector names are created by the user
# to help identify particular instances.
if 'name' in c:
connector_name = c['name']
if num_instances > 1:
connector_name = connector_name + "-" + str(i)
else:
connector_name = None
# Arguments are optional parameters defined by
# the user and passed to the connectors.
if 'args' in c:
args = c['args']
else:
args = {}
# The user can choose to expose ports on the connectors.
if 'ports' in c:
ports = c['ports']
else:
ports = []
# Now allocate the connector.
uuid, containers = docker.allocate_connector(cluster_uuid = cluster_uuid,
connector_type = connector_type,
key_name = key_name,
backend = backend_info,
name = connector_name,
args = args,
ports = ports)
if uuid:
connector_plan.append( { 'uuid' : uuid,
'containers' : containers,
'type' : connector_type,
'start' : 'start' } )
connector_info.append(uuid)
else:
# The manager could not allocate the connectors
# properly. Return a failure so that we can update the
# status properly and cancel the stack.
return False, [], connector_plan
return True, connector_info, connector_plan
def _allocate_connectors_from_snapshot(cluster_uuid, payload, key_name, backend_info):
"""
Allocate the connectors from a snapshot.
"""
snapshot_uuid = payload['_file']
return docker.allocate_snapshot_connectors(cluster_uuid,
snapshot_uuid,
key_name,
backend_info)
def _allocate_connectors_from_stopped(payload, backend_info, params=None):
"""
Allocate the connectors from a stopped application.
"""
app_uuid = payload['_file']
return docker.allocate_stopped_connectors(app_uuid,
backend_info,
params)
def _register_ip_addresses(backend_plan, connector_plan):
"""
Helper function to register the hostname/IP addresses
of all the containers.
"""
ips = []
private_key = None
for s in backend_plan['storage']:
for c in s['containers']:
if isinstance(c, dict):
ips.append( [c['internal_ip'], c['external_ip'], c['hostname']] )
private_key = c['privatekey']
else:
ips.append( [c.internal_ip, c.external_ip, c.host_name] )
private_key = c.privatekey
for s in backend_plan['compute']:
for c in s['containers']:
if isinstance(c, dict):
ips.append( [c['internal_ip'], c['external_ip'], c['hostname']] )
else:
ips.append( [c.internal_ip, c.external_ip, c.host_name] )
for s in connector_plan:
for c in s['containers']:
# This is slightly awkward. It is because when starting
# a new stack, we get proper "container" objects. However,
# when restarting we get dictionary descriptions. Should just
# fix at the restart level!
if isinstance(c, dict):
ips.append( [c['internal_ip'], c['external_ip'], c['hostname']] )
else:
ips.append( [c.internal_ip, c.external_ip, c.host_name] )
# It's possible that the storage wasn't allocated
# properly and so there's nothing to transfer.
if private_key:
docker._transfer_ip(private_key, ips)
def _start_all_services(backend_plan, connector_plan):
"""
Helper function to start both the backend and
frontend. Depending on the plan, this will either
do a fresh start or a restart on an existing cluster.
"""
# Make sure that all the hosts have the current set
# of IP addresses.
_register_ip_addresses(backend_plan, connector_plan)
# Now we need to start/restart all the services.
for s in backend_plan['storage']:
if s['start'] == 'start':
docker.start_service(s['uuid'],
s['containers'])
else:
docker._restart_service(s['uuid'], s['containers'], s['type'])
for c in backend_plan['compute']:
if c['start'] == 'start':
docker.start_service(c['uuid'], c['containers'])
else:
docker._restart_service(c['uuid'], c['containers'], c['type'])
# The connectors can optionally output msgs for the user.
# Collect them so that we can display them later.
all_output = {}
for c in connector_plan:
if c['start'] == 'start':
output = docker.start_service(c['uuid'], c['containers'])
all_output = dict(all_output.items() + output.items())
else:
output = docker._restart_connectors(c['uuid'], c['containers'], c['backend'])
all_output = dict(all_output.items() + output.items())
return all_output
def _allocate_new(payload, key_name):
"""
Helper function to allocate and start a new stack.
"""
# Check if there are any questions/answers in
# this payload. If so, go ahead and resolve the answers.
if 'questions' in payload:
values = docker.resolver.resolve(payload['questions'])
payload = docker.resolver.replace(payload, values)
# Now allocate the backend. This includes both storage and compute.
reply = {}
uuid = docker.reserve_stack()
payload["_action"] = "new"
payload["_uuid"] = str(uuid)
payload["_key"] = key_name
_new_queue.put(payload)
docker.register_stack(backends = { 'uuids':[] },
connectors = [],
base = payload['_file'],
cluster_uuid = uuid,
status='building',
key = key_name,
new_stack=True)
return json.dumps({ 'text' : str(uuid),
'status' : 'building' })
def _cancel_stack(uuid, backend_info, connector_info, base):
logging.info("canceling stack...")
docker.cancel_stack(uuid, backend_info, connector_info)
docker.register_stack(backends = { 'uuids':[] },
connectors = [],
base = base,
cluster_uuid = uuid,
status='failed',
new_stack=False)
def _allocate_new_worker(uuid, payload):
"""
Helper function to allocate and start a new stack.
"""
reply = {}
key_name = payload['_key']
logging.info("creating backend...")
backend_info, backend_plan = _allocate_backend(cluster_uuid = uuid,
payload = payload,
key_name = key_name,
replace=True,
new_stack=True)
# Check if the backend status was ok, and if so,
# go ahead and allocate the connectors.
reply['status'] = backend_info['status']
if backend_info['status'] == 'ok':
logging.info("creating connectors...")
success, connector_info, connector_plan = _allocate_connectors(cluster_uuid = uuid,
payload = payload,
key_name = key_name,
backend_info = backend_info['uuids'])
if success:
logging.info("starting services...")
output = _start_all_services(backend_plan, connector_plan)
docker.register_stack(backends = backend_info,
connectors = connector_info,
base = payload['_file'],
key = key_name,
cluster_uuid = uuid,
status='running',
output = output,
new_stack=False)
reply['text'] = str(uuid)
reply['msgs'] = output
else:
# One or more connectors was not instantiated properly.
logging.info("cancelling services...")
_cancel_stack(uuid, backend_info, connector_info, payload['_file'])
reply['status'] = 'failed'
else:
_cancel_stack(uuid, backend_info, [], payload['_file'])
reply['status'] = 'failed'
return json.dumps(reply)
def _allocate_stopped(payload):
uuid = payload['_file']
stack = docker.get_stack(uuid)
payload["_action"] = "stopped"
payload["_key"] = stack['key']
_new_queue.put(payload)
docker.register_stack(backends = stack['backends'],
connectors = stack['connectors'],
base = stack['base'],
cluster_uuid = uuid,
status='restarting',
key = stack['key'],
new_stack = False)
return json.dumps({'status' : 'building',
'text' : str(uuid)})
def _allocate_stopped_worker(payload):
"""
Helper function to allocate and start a stopped stack.
"""
uuid = payload['_file']
stack = docker.get_stack(uuid)
logging.info("creating backend...")
backend_info, backend_plan, key_name = _allocate_backend_from_stopped(payload = payload)
if backend_info['status'] == 'ok':
logging.info("creating connectors...")
connector_info, connector_plan = _allocate_connectors_from_stopped(payload = payload,
backend_info = backend_info['uuids'])
logging.info("starting services...")
output = _start_all_services(backend_plan, connector_plan)
docker.register_stack(backends = backend_info,
connectors = connector_info,
base = stack['base'],
cluster_uuid = uuid,
status='running',
output = output,
key = stack['key'],
new_stack = False)
return json.dumps({'status' : 'ok',
'text' : str(uuid),
'msgs' : output})
else:
return json.dumps({'status' : 'failed'})
def _allocate_snapshot(payload, key_name):
"""
Helper function to allocate and start a snapshot.
"""
uuid = docker.reserve_stack()
payload["_action"] = "snapshotted"
payload["_uuid"] = str(uuid)
payload["_key"] = key_name
_new_queue.put(payload)
docker.register_stack(backends = { 'uuids':[] },
connectors = [],
base = payload['_file'],
cluster_uuid = uuid,
status='building',
key = key_name,
new_stack=True)
return json.dumps({ 'text' : str(uuid),
'status' : 'building' })
def _allocate_snapshot_worker(uuid, payload):
"""
Helper function to allocate and start a snapshot.
"""
key_name = payload['_key']
backend_info, backend_plan = _allocate_backend_from_snapshot(cluster_uuid = uuid,
payload = payload,
key_name = key_name)
if backend_info['status'] == 'ok':
connector_info, connector_plan = _allocate_connectors_from_snapshot(cluster_uuid = uuid,
payload = payload,
key_name = key_name,
backend_info = backend_info['uuids'])
output = _start_all_services(backend_plan, connector_plan)
docker.register_stack(backends = backend_info,
connectors = connector_info,
base = payload['_file'],
cluster_uuid = uuid,
status='running',
output = output,
key = key_name,
new_stack = True)
return json.dumps({'status' : 'ok',
'text' : str(uuid),
'msgs' : output })
else:
return json.dumps({'status' : 'failed'})
@app.route('/storage', methods=['GET'])
def query_storage():
"""
Fetch the current information for a particular filesystem.
"""
status = AllocationResponse()
status.uuid = request.args['uuid']
status.status = status.NOT_EXISTS
# Get the time the storage cluster was created,
# along with basic usage information.
info = storage.query_storage(status.uuid)
if info != None:
status.status = info
# Return the JSON reply.
return status.json()
@app.route('/version', methods=['GET'])
def get_version():
"""
Fetch the current docker version
"""
return docker.version()
@app.route('/login', methods=['POST'])
def login_registry():
"""
Login to a remote registry.
"""
if docker.login_registry():
return "success"
else:
return "fail"
@app.route('/image', methods=['POST'])
def push_image():
"""
Push a local image to a remote registry.
"""
image = request.form['image']
if 'server' in request.form:
registry = request.form['server']
else:
registry = None
if docker.push_image(image, registry):
return "success"
else:
return "fail"
@app.route('/image', methods=['GET'])
def pull_image():
"""
Pull a remote image to the local registry.
"""
image = request.args['image']
if docker.pull_image(image):
return "success"
else:
return "fail"
@app.route('/create', methods=['POST'])
def allocate_stack():
"""
Create some new storage infrastructure
"""
payload = json.loads(request.form['payload'])
key_name = request.form['key']
# Check whether the user wants to start from fresh or
# start with a snapshot.
if docker.is_stopped(payload['_file']):
return _allocate_stopped(payload)
elif docker.is_snapshot(payload['_file']):
return _allocate_snapshot(payload, key_name)
elif '_file_path' in payload:
return _allocate_new(payload, key_name)
else:
return "Could not start " + payload['_file']
@app.route('/quit', methods=['POST'])
def quit():
"""
Quit any backend services that may be running.
"""
docker.quit()
return ""
@app.route('/query', methods=['GET'])
def query_stacks():
"""
Query the stacks.
"""
if 'constraints' in request.args:
constraints = json.loads(request.args['constraints'])
return docker.query_stacks(constraints)
else:
return docker.query_stacks()
@app.route('/snapshots', methods=['GET'])
def snapshots():
"""
Query the snapshots
"""
return docker.query_snapshots()
@app.route('/apps', methods=['GET'])
def apps():
"""
Get list of installed applications.
"""
if 'app' in request.args:
app = request.args['app']
else:
app = None
return docker.query_applications(app)
@app.route('/images', methods=['GET'])
def images():
"""
Get list of installed Docker images.
"""
return docker.query_images()
@app.route('/stack', methods=['GET'])
def inspect():
"""
Inspect a particular stack.
"""
uuid = request.args['uuid']
resp = docker.inspect_stack(uuid)
if resp:
return resp
elif docker.is_installed(uuid):
return docker.inspect_installed(uuid)
else:
# Try to see if this is an active service.
service = docker._get_service_configuration(uuid, detailed=True)
if service:
service_info = docker._get_inspect_info(uuid)
return json.dumps(service_info,
sort_keys=True,
indent=2,
separators=(',',':'))
return "could not inspect " + str(uuid)
@app.route('/logs', methods=['GET'])
def logs():
"""
Copy over logs
"""
stack_uuid = request.args['uuid']
to_dir = request.args['dir']
return docker.copy_logs(stack_uuid, to_dir)
@app.route('/manage/stack', methods=['POST'])
def manage_stack():
"""
Manage the stacks.
"""
payload = { "_uuid" : request.form['uuid'],
"_manage" : request.form['action'],
"_key" : request.form['key'],
"_action" : "manage" }
_new_queue.put(payload)
return ""
def _manage_stack_worker(uuid, action, private_key):
"""
Manage the stacks.
"""
reply = docker.manage_stack(stack_uuid = uuid,
private_key = private_key,
action = action)
@app.before_request
def before_request():
context = {
'url': request.path,
'method': request.method,
'ip': request.environ.get("REMOTE_ADDR")
}
logging.debug("%(method)s from %(ip)s for %(url)s", context)
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')
return response
if __name__ == '__main__':
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(port=int(sys.argv[2]),
address=sys.argv[1])
IOLoop.instance().start()
```
#### File: ferry/ip/nat.py
```python
import logging
import os
from pymongo import MongoClient
from subprocess import Popen, PIPE
class NAT(object):
def __init__(self):
self._current_port = 999
self.reserved_ports = [4000, 5000]
self._init_state_db()
self._clear_nat()
self._init_nat()
self._repop_nat()
def _init_state_db(self):
self.mongo = MongoClient(os.environ['MONGODB'], 27017, connectTimeoutMS=6000)
self.nat_collection = self.mongo['network']['nat']
def _clear_nat(self):
logging.warning("clearing nat")
cmds = ['iptables -t nat -D PREROUTING -m addrtype --dst-type LOCAL -j FERRY_CHAIN',
'iptables -t nat -D OUTPUT -m addrtype --dst-type LOCAL ! --dst 127.0.0.0/8 -j FERRY_CHAIN',
'iptables -t nat -D OUTPUT -m addrtype --dst-type LOCAL -j FERRY_CHAIN',
'iptables -t nat -D OUTPUT -j FERRY_CHAIN',
'iptables -t nat -F FERRY_CHAIN',
'iptables -t nat -D PREROUTING -j FERRY_CHAIN',
'iptables -t nat -X FERRY_CHAIN']
for c in cmds:
logging.warning(c)
Popen(c, shell=True)
def _init_nat(self):
logging.warning("init nat")
cmds = ['iptables -t nat -N FERRY_CHAIN',
'iptables -t nat -A OUTPUT -m addrtype --dst-type LOCAL ! --dst 127.0.0.0/8 -j FERRY_CHAIN',
'iptables -t nat -A PREROUTING -m addrtype --dst-type LOCAL -j FERRY_CHAIN']
for c in cmds:
logging.warning(c)
Popen(c, shell=True)
def _repop_nat(self):
rules = self.nat_collection.find()
for r in rules:
self._save_nat(r['src_ip'], r['src_port'],r['ip'], r['port'])
def _save_nat(self, source_ip, source_port, dest_ip, dest_port):
cmds = ['iptables -I FORWARD 1 ! -i ferry0 -o ferry0 -p tcp --dport %s -d %s -j ACCEPT' % (str(dest_port), dest_ip),
'iptables -t nat -A FERRY_CHAIN -d %s -p tcp --dport %s -j DNAT --to-destination %s:%s' % (source_ip, str(source_port), dest_ip, str(dest_port))]
for c in cmds:
logging.warning(c)
Popen(c, shell=True)
def _delete_nat(self, source_ip, source_port, dest_ip, dest_port):
cmds = ['iptables -D FORWARD ! -i ferry0 -o ferry0 -p tcp --dport %s -d %s -j ACCEPT' % (str(dest_port), dest_ip),
'iptables -t nat -D FERRY_CHAIN -d %s -p tcp --dport %s -j DNAT --to-destination %s:%s' % (source_ip, str(source_port), dest_ip, str(dest_port))]
for c in cmds:
logging.warning(c)
Popen(c, shell=True)
def _save_forwarding_rule(self, source_ip, source_port, dest_ip, dest_port):
self.nat_collection.insert({ 'ip' : dest_ip,
'port' : dest_port,
'src_ip' : source_ip,
'src_port' : source_port })
def _delete_forwarding_rule(self, dest_ip, dest_port):
self.nat_collection.remove( { 'ip' : dest_ip,
'port' : dest_port } )
def random_port(self):
while True:
port = self._current_port
self._current_port += 1
if not port in self.reserved_ports:
return str(port)
def has_rule(self, dest_ip, dest_port):
rule = self.nat_collection.find_one( { 'ip' : dest_ip,
'port' : dest_port } )
if rule:
return rule['src_ip'], rule['src_port']
else:
return None, None
def delete_rule(self, dest_ip, dest_port):
"""
Delete the forwarding rule.
"""
src_ip, src_port = self.has_rule(dest_ip, dest_port)
if src_ip:
self._delete_forwarding_rule(dest_ip, dest_port)
self._delete_nat(src_ip, src_port, dest_ip, dest_port)
else:
logging.warning("no such dest %s:%s" % (dest_ip, dest_port))
def forward_rule(self, source_ip, source_port, dest_ip, dest_port):
"""
Add a new forwarding rule.
"""
if source_port in self.reserved_ports:
logging.warning("cannot use reserved port " + source_port)
return False
src_ip, src_port = self.has_rule(dest_ip, dest_port)
if not src_ip:
self._save_forwarding_rule(source_ip, source_port, dest_ip, dest_port)
self._save_nat(source_ip, source_port, dest_ip, dest_port)
return True
else:
logging.warning("port " + source_port + " already reserved")
return False
```
#### File: ferry/ferry/options.py
```python
class CmdHelp(object):
def __init__(self):
self.options = {}
self.cmds = {}
self.usage = ''
self.description = ''
def add_option(self, short_flag, long_flag, help):
self.options[short_flag] = { 'short' : short_flag,
'long' : long_flag,
'help' : help,
'args' : [] }
def add_cmd(self, cmd, help):
self.cmds[cmd] = { 'cmd' : cmd,
'help' : help,
'args' : [] }
def _parse_values(self, i, args):
values = []
if i == len(args):
return i - 1, values
elif args[i] in self.options or args[i] in self.cmds:
return i - 1, values
elif i < len(args):
values.append(args[i])
j, v = self._parse_values(i + 1, args)
if i + 1 == j:
i = j
values += v
return i, values
def _is_option(self, flag):
if flag in self.options:
return True
else:
for f in self.options.keys():
if self.options[f]['long'] == flag:
return True
return False
def _get_canonical_option(self, flag):
if flag in self.options:
return flag
else:
for f in self.options.keys():
if self.options[f]['long'] == flag:
return f
def parse_args(self, args):
i = 0
while i < len(args):
s = args[i].strip()
if self._is_option(s):
j, values = self._parse_values(i + 1, args)
s = self._get_canonical_option(s)
if len(values) > 0:
i = j
self.options[s]['args'] += values
else:
i += 1
self.options[s]['args'].append(True)
elif s in self.cmds:
j, values = self._parse_values(i + 1, args)
if len(values) > 0:
i = j
self.cmds[s]['args'] += values
else:
i += 1
self.cmds[s]['args'].append(True)
else:
i += 1
def get_cmds(self):
ac = {}
for c in self.cmds:
a = self.cmds[c]['args']
if len(a) > 0:
ac[c] = a
return ac
def get_options(self):
ac = {}
for c in self.options:
a = self.options[c]['args']
if len(a) > 0:
ac[c] = a
return ac
def print_help(self):
help_string = 'Usage: ' + self.usage + '\n'
help_string += '\n'
help_string += self.description + '\n'
help_string += '\n'
help_string += 'Options:\n'
for k in sorted(self.options.iterkeys()):
cmd_string = ' {:10s} {:13s} {:10s}'.format(k, self.options[k]['long'], self.options[k]['help'])
help_string += cmd_string + '\n'
help_string += '\n'
help_string += 'Commands:\n'
for k in sorted(self.cmds.iterkeys()):
cmd_string = ' {:10s} {:10s}'.format(k, self.cmds[k]['help'])
help_string += cmd_string + '\n'
return help_string
``` |
{
"source": "JHORIZ-RODEL-AQUINO/OOP-1-2",
"score": 4
} |
#### File: JHORIZ-RODEL-AQUINO/OOP-1-2/Midterm Exam b.py
```python
from tkinter import *
window = Tk()
window.title("Special Midterm Exam in OOP")
window.geometry("500x400+20+10")
# Creating a function code for changing color
i = 1
def change_color():
global i
i += 1
if i % 2 == 0:
btn.configure(bg="yellow")
else:
btn.configure(bg="white")
# Insert a Button Widget
btn = Button(window, text="Click to Change color", bg="white", height=10, width=30, command=change_color)
btn.place(relx=.5, rely=.5, anchor="center")
window.mainloop()
``` |
{
"source": "jhorvat/uogmobile-server",
"score": 3
} |
#### File: uogmobile-server/app/api_error.py
```python
class ApiError(Exception):
def __init__(self, message, status_code=400, cause=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.cause = cause
def to_dict(self):
return {
"message": "{0} {1}".format(self.message, "Cause: {0}".format(repr(self.cause)) if self.cause else "")
}
``` |
{
"source": "JHorwitz1011/DFA2020-2021-UCP",
"score": 3
} |
#### File: 2048/gui/Wrapper.py
```python
import queue
from gui.AppGui import AppGui
from tracking.WebcamThread import WebcamThread
import shelve
import os
import vars.constants as c
import vars.config as cfg
import logic
import cv2
class Wrapper:
def __init__(self):
#Data storage initiate first so GUI created with correct values
self.init_data()
self.app_gui = AppGui()
#create a Video camera instance
#self.camera = VideoCamera()
#intialize variable to hold current webcam video frame
self.current_frame = None
#create a queue to fetch and execute callbacks passed
#from background thread
self.callback_queue = queue.Queue()
#create a thread to fetch webcam feed video
self.webcam_thread = WebcamThread(self.app_gui, self.callback_queue)
#save attempts made to fetch webcam video in case of failure
self.webcam_attempts = 0
#register callback for being called when GUI window is closed
self.app_gui.root.protocol("WM_DELETE_WINDOW", self.on_gui_closing)
#start webcam
self.start_video()
#start fetching video
self.fetch_webcam_video()
def init_data(self):
if(not os.path.exists(c.folderPath)):
os.makedirs(c.folderPath)
if (not len(os.listdir(c.folderPath)) == 0):
with shelve.open(c.filePath, 'c') as dataFile:
if not dataFile.keys().__contains__('bounds'):
dataFile['bounds'] = (0,0)
if not dataFile.keys().__contains__('threshold'): #TODO rename to sensitivity or similar
dataFile['threshold'] = cfg.threshold
cfg.colorLower, cfg.colorUpper = dataFile['bounds']
cfg.threshold = dataFile['threshold']
def on_gui_closing(self):
#saving
logic.save_game(self.app_gui.left_view.matrix, cfg.highScore, cfg.currentScore)
self.webcam_attempts = 51
self.webcam_thread.stop()
self.webcam_thread.join()
self.webcam_thread.release_resources()
self.app_gui.root.destroy()
def start_video(self):
self.webcam_thread.start()
def fetch_webcam_video(self):
try:
#while True:
#try to get a callback put by webcam_thread
#if there is no callback and call_queue is empty
#then this function will throw a Queue.Empty exception
callback = self.callback_queue.get_nowait()
callback()
self.webcam_attempts = 0
#self.app_gui.root.update_idletasks()
self.app_gui.root.after(7, self.fetch_webcam_video)
except queue.Empty:
if (self.webcam_attempts <= 500):
self.webcam_attempts = self.webcam_attempts + 1
self.app_gui.root.after(10, self.fetch_webcam_video)
def test_gui(self):
#test images update
#read the images using OpenCV, later this will be replaced
#by live video feed
image, gray = self.read_images()
self.app_gui.update_webcam_output(image)
self.app_gui.update_neural_network_output(gray)
#test chat view update
self.app_gui.update_chat_view("4 + 4 = ? ", "number")
#test emotion state update
self.app_gui.update_emotion_state("neutral")
def read_images(self):
image = cv2.imread('data/test1.jpg')
#conver to RGB space and to gray scale
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
return image, gray
def launch(self):
self.app_gui.launch()
def __del__(self):
print('Done')#EDIT self.webcam_thread.stop()
```
#### File: 2048/misc/GameBoard.py
```python
from tkinter import *
from constants import *
from PIL import ImageTk, Image
from threading import Thread
import time
class GameBoard(Canvas):
def __init__(self, parent, **kwargs):
Canvas.__init__(self, parent, **kwargs)
self.height = self.winfo_reqheight()
self.width = self.winfo_reqwidth()
self.pieces = []
self.bind("<Configure>", self.on_resize)
#self.bind("<<Test>>",self.test)
# config settings
self['bg'] = BACKGROUND_COLOR_GAME #background color
self['bd'] = 0 #border width pixels
# c = self.create_rectangle(, 10, 100, 100,fill=BACKGROUND_COLOR_CELL_EMPTY,width=GRID_PADDING/2)
#c['bg'] = BACKGROUND_COLOR_CELL_EMPTY
self.draw_background()
l = Label(self, bg="green",fg="black",text="2",font=("Calibri",15))
a = self.create_window(10,10,width=100,height=100,window=l,anchor='nw')
self.pieces.append(a)
self.update_pieces()
#self.pieces.append(self.create_rectangle(30, 10, 120, 80, outline="#fb0", fill="#fb0"))
#self.pieces[0].label
#self.pieces.append(ImageTk.PhotoImage(Image.open("test.png")))
#self.create_image(0,0,anchor=NW,image=self.pieces[0])
#self.tag_lower()
def test(self):
print("threading, works!")
def draw_background(self):
for i in range(GRID_LEN):
for j in range(GRID_LEN):
pad_offset = GRID_PADDING
x0 = i*SQUARE_SIDE + pad_offset*(1+i)
y0 = j*SQUARE_SIDE + pad_offset*(j+1)
x1 = (i + 1)*SQUARE_SIDE + pad_offset*(1+i)
y1 = (j + 1)*SQUARE_SIDE + pad_offset*(j+1)
c = self.create_rectangle(x0, y0, x1, y1,
fill=BACKGROUND_COLOR_CELL_EMPTY,width=0)
def update_pieces(self):
print(self.pieces[0].x)
self.move(self.pieces[0], 0.1, 0.1)
self.after(10, self.update_pieces)
def on_resize(self,event):
# determine the ratio of old width/height to new width/height
wscale = float(event.width)/self.width
hscale = float(event.height)/self.height
self.width = event.width
self.height = event.height
# resize the canvas
#self.config(width=self.width, height=self.height)
# rescale all the objects tagged with the "all" tag
self.scale("all",0,0,wscale,hscale)
print(self.height, self.width)
#root.event_generate("<<Test>>")
if __name__ == '__main__':
root = Tk()
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
#root.resizable(height=False,width=False)
#root.bind("<<Test>>",threadingtest)
side = GRID_LEN *( SQUARE_SIDE + GRID_PADDING ) + GRID_PADDING
canvas = GameBoard(root,width=side,height=side)
canvas.pack(fill=BOTH, expand=YES)
canvas.addtag_all('all')
#label_frame = Frame(root,width=700,height=20,bg="white")
#label_frame.pack() # Stops child widgets of label_frame from resizing it
# label_frame.pack()
#widget = Frame(root, width=SQUARE_SIDE, height=SQUARE_SIDE,bg='red',borderwidth=4)
#widget.pack()
#text = Label(widget, text ="test",fg='blue')
#text.pack()
#looping(root)
root.mainloop()
# loop_thread = Thread(target=looping,args=(root,))
# loop_thread.start()
# for x in range(10):
# time.sleep(1)
#print('testing thread')
# loop_thread.join()
```
#### File: 2048/tracking/Tracking.py
```python
import vars.config as cfg
import vars.constants as c
import cv2
import imutils
import numpy as np
import keyboard
import time
from imutils.video import VideoStream
def press(input, key='a'):
if input and not cfg.last_input:
keyboard.press_and_release(key)
cfg.cooldown = 50
cfg.last_input = input
# EDIT
# function to detect Aruco with OpenCV
def auto_range():
initialBoundingBox = None
vs = VideoStream(src=0).start()
time.sleep(1.0)
while True:
frame = vs.read()
frame = imutils.resize(frame, width = c.WIN_SIZE)
#display frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("s"):
lowerBound, upperBound, success = roi_range(frame)
vs.stop()
return lowerBound, upperBound
# Expects frame to be in HSV color space
def roi_range(frame):
initialBoundingBox = cv2.selectROI("Frame", frame, fromCenter = False, showCrosshair = True)
# crop original frame
roi = frame[int(initialBoundingBox[1]):int(initialBoundingBox[1]+initialBoundingBox[3]), int(initialBoundingBox[0]):int(initialBoundingBox[0]+initialBoundingBox[2])]
if len(roi) != 0:
# convert to hsv
hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
print(type(roi))
# get a height and width
(height, width, channels) = roi.shape
hue = []
sat = []
val = []
#parse hsv values
for y in range(0, height, 4):
for x in range(0, width, 4):
(h,s,v) = hsv[y,x]
hue.append(h)
sat.append(s)
val.append(v)
# determine max
hMaxValue = max(hue, key = hue.count)
sMaxValue = max(sat, key = sat.count)
vMaxValue = max(val, key = val.count)
# calculate upper/lower bounds
upperBound = (int(hMaxValue + c.PLUS_MINUS), int(sMaxValue + 2*c.PLUS_MINUS), int(vMaxValue + 3*c.PLUS_MINUS))
lowerBound = (int(hMaxValue - c.PLUS_MINUS), int(sMaxValue - 2*c.PLUS_MINUS), int(vMaxValue - 3*c.PLUS_MINUS))
print("BOUNDS:", lowerBound," ", upperBound)
cv2.destroyAllWindows()
return lowerBound, upperBound, True
else:
cv2.destroyAllWindows()
return 0, 0, False
def detect_color(img, points):
#img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
# if cfg.cooldown > 0:
# print(cfg.cooldown)
# resize the img, blur it, and convert it to the HSV
# color space
img = imutils.resize(img, width=600)
blurred = cv2.GaussianBlur(img, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_RGB2HSV) #TODO why is this a RGB conversion
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, cfg.colorLower, cfg.colorUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
centroid = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(centroid)
M = cv2.moments(centroid)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the img,
# then update the list of tracked points
#cv2.circle(img, (int(x), int(y)), int(radius),
# (0, 255, 255), 2)
#cv2.circle(img, center, 5, (0, 0, 255), -1)
pass
# update the points queue
cfg.pts.appendleft(center)
checkKeyPress()
drawLine(img)
img = cv2.flip(img, 1)
return img
def checkKeyPress():
if((len(cfg.pts) == c.maxlen) and cfg.pts[(c.maxlen) - 1] is not None and cfg.pts[0] is not None):
xdif = cfg.pts[c.maxlen-1][0] - cfg.pts[0][0]
ydif = cfg.pts[c.maxlen-1][1] - cfg.pts[0][1]
if xdif > cfg.threshold:
press(True, key='d')
elif xdif < -1 * cfg.threshold:
press(True, key='a')
elif ydif > cfg.threshold:
press(True, key='w')
elif ydif < -1 * cfg.threshold:
press(True, key='s')
else:
press(False)
def drawLine(img):
# loop over the set of tracked points
for i in range(1, len(cfg.pts)):
# if either of the tracked points are None, ignore
# them
if cfg.pts[i - 1] is None or cfg.pts[i] is None:
continue
# otherwise, compute the thickness of the line and
# draw the connecting lines
thickness = int(np.sqrt(c.LINE_THICKNESS / float(i + 1)) * 2.5)
if cfg.last_input or cfg.cooldown > 0:
cv2.line(img, cfg.pts[i - 1], cfg.pts[i], c.LINE_GREEN, thickness)
if cfg.cooldown > 0:
cfg.cooldown -= 1
else:
cv2.line(img, cfg.pts[i - 1], cfg.pts[i], c.LINE_RED, thickness)
return img
def detect_aruco(img, points):
global up_frames, down_frames, left_frames, right_frames
arucoDict = cv2.aruco.Dictionary_get(c.ARUCO_DICT[c.TAG_TYPE])
arucoParams = cv2.aruco.DetectorParameters_create()
(corners, ids, rejected) = cv2.aruco.detectMarkers(img, arucoDict, parameters=arucoParams)
cv2.aruco.drawDetectedMarkers(img, corners, ids, (0, 255, 0))
if len(corners) > 0:
ids = ids.flatten()
for (markerCorner, markerID) in zip(corners, ids):
corners = markerCorner.reshape((4, 2))
(topLeft, topRight, bottomRight, bottomLeft) = corners
topRight = (int(topRight[0]), int(topRight[1]))
bottomRight = (int(bottomRight[0]), int(bottomRight[1]))
bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))
topLeft = (int(topLeft[0]), int(topLeft[1]))
# compute and draw the center (x, y)-coordinates of the ArUco marker
cX = int((topLeft[0] + bottomRight[0]) / 2.0)
cY = int((topLeft[1] + bottomRight[1]) / 2.0)
center = (cX, cY)
cfg.pts.appendleft(center)
checkKeyPress()
drawLine(img)
img = cv2.flip(img, 1)
return img
```
#### File: DFA2020-2021-UCP/Experimental Input/PongInput.py
```python
import turtle
import os
wn = turtle.Screen()
wn.title("Pong by @TokyoEdTech")
wn.bgcolor("black")
wn.setup(width=800, height=600)
wn.tracer(0)
# Score
score_a = 0
score_b = 0
# Paddle A
paddle_a = turtle.Turtle()
paddle_a.speed(0)
paddle_a.shape("square")
paddle_a.color("white")
paddle_a.shapesize(stretch_wid=5, stretch_len=1)
paddle_a.penup()
paddle_a.goto(-350, 0)
# Paddle B
paddle_b = turtle.Turtle()
paddle_b.speed(0)
paddle_b.shape("square")
paddle_b.color("white")
paddle_b.shapesize(stretch_wid=5, stretch_len=1)
paddle_b.penup()
paddle_b.goto(350, 0)
# Ball
ball1 = turtle.Turtle()
ball1.speed(0)
ball1.shape("square")
ball1.color("green")
ball1.penup()
ball1.goto(0, 0)
ball1.dx = 0.5
ball1.dy = -0.5
# Ball
ball2 = turtle.Turtle()
ball2.speed(0)
ball2.shape("square")
ball2.color("blue")
ball2.penup()
ball2.goto(0, 0)
ball2.dx = -2
ball2.dy = -2
# Ball
ball3 = turtle.Turtle()
ball3.speed(0)
ball3.shape("square")
ball3.color("yellow")
ball3.penup()
ball3.goto(0, 0)
ball3.dx = 1
ball3.dy = 1
# Ball
ball4 = turtle.Turtle()
ball4.speed(0)
ball4.shape("square")
ball4.color("red")
ball4.penup()
ball4.goto(0, 0)
ball4.dx = -1
ball4.dy = 1
balls = [ball1] # balls = [ball1, ball2, ball3, ball4]
# Pen
pen = turtle.Turtle()
pen.speed(0)
pen.color("white")
pen.penup()
pen.hideturtle()
pen.goto(0, 260)
pen.write("Player A: 0 Player B: 0", align="center",
font=("Courier", 24, "normal"))
# Function
def paddle_a_up():
y = paddle_a.ycor()
y += 20
paddle_a.sety(y)
def paddle_a_down():
y = paddle_a.ycor()
y -= 20
paddle_a.sety(y)
def paddle_b_up():
y = paddle_b.ycor()
y += 20
paddle_b.sety(y)
def paddle_b_down():
y = paddle_b.ycor()
y -= 20
paddle_b.sety(y)
# Keyboard binding
wn.listen()
wn.onkeypress(paddle_a_up, "w")
wn.onkeypress(paddle_a_down, "s")
wn.onkeypress(paddle_b_up, "Up")
wn.onkeypress(paddle_b_down, "Down")
# Main game loop
while True:
wn.update()
for ball in balls:
# Move the ball
ball.setx(ball.xcor() + ball.dx)
ball.sety(ball.ycor() + ball.dy)
# Border checking
if ball.ycor() > 290:
ball.sety(290)
ball.dy *= -1
os.system("afplay bounce.wav&")
if ball.ycor() < -290:
ball.sety(-290)
ball.dy *= -1
os.system("afplay bounce.wav&")
if ball.xcor() > 390:
ball.goto(0, 0)
ball.dx *= -1
score_a += 1
pen.clear()
pen.write("Player A: {} Player B: {}".format(
score_a, score_b), align="center", font=("Courier", 24, "normal"))
if ball.xcor() < -390:
ball.goto(0, 0)
ball.dx *= -1
score_b += 1
pen.clear()
pen.write("Player A: {} Player B: {}".format(
score_a, score_b), align="center", font=("Courier", 24, "normal"))
# Paddle and ball collisions
if (ball.xcor() > 340 and ball.xcor() < 350) and (ball.ycor() < paddle_b.ycor() + 40 and ball.ycor() > paddle_b.ycor() - 40):
ball.setx(340)
ball.dx *= -1
#os.system("afplay bounce.wav&")
if (ball.xcor() < -340 and ball.xcor() > -350) and (ball.ycor() < paddle_a.ycor() + 40 and ball.ycor() > paddle_a.ycor() - 40):
ball.setx(-340)
ball.dx *= -1
#os.system("afplay bounce.wav&")
```
#### File: DFA2020-2021-UCP/Inputs/Input.py
```python
class input():
#Not quite sure what is fully needed here
# HEIRARCHY
# input --------------------------------\
# / \-----\ \
# joystick trackpad button
# (velocity) (position)---\ (on/off) ---------\
# / / \ / \ \ / | \ \
# blobs aruco | | blobs aruco aruco | color keyboard
# keyboard mouse GPIO pin
# / \
# asdf arrows
#
"""
initializes an input
"""
def __init__(self):
pass
"""
calibrates the input if necessary
"""
def calibrate(self):
pass
"""
Boolean to ensure that sensor is working properly
"""
def is_working(self):
return True
"""
prints the input type to the console. to be overridden
"""
def __str__(self):
return "generic input"
``` |
{
"source": "Jhoselyn-Carballo/computacion_para_ingenieria",
"score": 4
} |
#### File: examen_2/p1/p1.py
```python
from tkinter import *
def invertirCadena(palabra):
res = ''
index = len(palabra)
while index >0:
ultimo_caracter = palabra[index-1]
res = res + ultimo_caracter
index=index-1
return res
palabra = input("ingrese una palabra:")
if palabra == invertirCadena(palabra):
print(f" la cadena {palabra} es Palindroma")
else:
print(f" la cadena {palabra} No es Palindroma")
# crear la ventana
window = Tk()
window.geometry('400x400')
label_text=Label(window, text="Enter a word:")
input_text=Entry(window)
label_result=Label(window, text="<<result>>")
button_res=Button(window, text="Validate", command=lambda: invertirCadena())
# coordenada de los components
label_text.place(x=10, y=10)
input_text.place(x=100, y=10)
label_result.place(x=100, y=50)
button_res.place(x=100, y=70)
window.mainloop()
```
#### File: examen_2/p2/p2.py
```python
from tkinter import *
def contador(accion, contador):
if accion == 'countUp':
contador == contador + 1
elif accion == 'coundDown':
contador == contador -1
elif accion == 'reset':
contador == 0
return contador
``` |
{
"source": "jhoskin/psychic-broccoli",
"score": 2
} |
#### File: jhoskin/psychic-broccoli/model.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import seq2seq
class TrumpBSModel(object):
def __init__(self, vocabularySize, config_param):
self.vocabularySize = vocabularySize
self.config = config_param
self._inputX = tf.placeholder(tf.int32, [self.config.batch_size, self.config.sequence_size], "InputsX")
self._inputTargetsY = tf.placeholder(tf.int32, [self.config.batch_size, self.config.sequence_size], "InputTargetsY")
#Converting Input in an Embedded form
with tf.device("/cpu:0"): #Tells Tensorflow what GPU to use specifically
embedding = tf.get_variable("embedding", [self.vocabularySize, self.config.embeddingSize])
embeddingLookedUp = tf.nn.embedding_lookup(embedding, self._inputX)
inputs = tf.split(1, self.config.sequence_size, embeddingLookedUp)
inputTensorsAsList = [tf.squeeze(input_, [1]) for input_ in inputs]
#Define Tensor RNN
singleRNNCell = rnn_cell.BasicRNNCell(self.config.hidden_size)
self.multilayerRNN = rnn_cell.MultiRNNCell([singleRNNCell] * self.config.num_layers)
self._initial_state = self.multilayerRNN.zero_state(self.config.batch_size, tf.float32)
#Defining Logits
hidden_layer_output, last_state = rnn.rnn(self.multilayerRNN, inputTensorsAsList, initial_state=self._initial_state)
hidden_layer_output = tf.reshape(tf.concat(1, hidden_layer_output), [-1, self.config.hidden_size])
self._logits = tf.nn.xw_plus_b(hidden_layer_output, tf.get_variable("softmax_w", [self.config.hidden_size, self.vocabularySize]), tf.get_variable("softmax_b", [self.vocabularySize]))
self._predictionSoftmax = tf.nn.softmax(self._logits)
#Define the loss
loss = seq2seq.sequence_loss_by_example([self._logits], [tf.reshape(self._inputTargetsY, [-1])], [tf.ones([self.config.batch_size * self.config.sequence_size])], self.vocabularySize)
self._cost = tf.div(tf.reduce_sum(loss), self.config.batch_size)
self._final_state = last_state
def defineTensorGradientDescent(self):
self._learningRate = tf.Variable(0.0, trainable=False)
trainingVars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, trainingVars),self.config.max_grad_norm)
optimizer = tf.train.AdamOptimizer(self.learningRate)
self._tensorGradientDescentTrainingOperation = optimizer.apply_gradients(zip(grads, trainingVars))
def assign_learningRate(self, session, lr_value):
session.run(tf.assign(self.learningRate, lr_value))
@property
def inputX(self):
return self._inputX
@property
def inputTargetsY(self):
return self._inputTargetsY
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def learningRate(self):
return self._learningRate
@property
def gradient_desc_training_op(self):
return self._tensorGradientDescentTrainingOperation
@property
def predictionSoftmax(self):
return self._predictionSoftmax
@property
def logits(self):
return self._logits
```
#### File: jhoskin/psychic-broccoli/run.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from model import TrumpBSModel
from reader import BSReader
import numpy as np
import tensorflow as tf
tf.flags.DEFINE_string("dp", "quote.txt", "The path point to the training and testing data")
tf.flags.DEFINE_integer("ckpt", 1, "Checkpoint after this many steps (default: 100)")
def main(unused_args):
if not tf.flags.FLAGS.dp:
raise ValueError("Must set --data_path to PTB data directory")
bs_reader = BSReader(tf.flags.FLAGS.dp,5)
bs_reader.print_data_info()
with tf.Graph().as_default(), tf.Session() as session:
config = HyperParameterConfig()
initializer = tf.random_uniform_initializer(-config.init_scale,config.init_scale)
with tf.variable_scope("TrumpBSQuoteModel", reuse=None, initializer=initializer):
training_model = TrumpBSModel(bs_reader.vocabularySize,config_param=config)
training_model.defineTensorGradientDescent()
with tf.variable_scope("TrumpBSQuoteModel", reuse=True, initializer=initializer):
eval_config = HyperParameterConfig()
#We only want to input one token at a time (not as batches) and get out the next token only
eval_config.batch_size = 1
eval_config.num_time_steps = 1
prediction_model = TrumpBSModel(bs_reader.vocabularySize, config_param=eval_config)
tf.initialize_all_variables().run()
for epochCount in range(config.total_max_epoch):
accumulated_costs = 0.0
accumulated_seq_count = 0
#current_model_state = training_model.initial_state.eval()
current_model_state = session.run(training_model.initial_state)
#current_model_state = tf.convert_to_tensor(training_model.initial_state) state = training_model.initial_state.eval()
#This can be removed and replaced by assigning just the initial learning rate
learning_rate_decay = config.lr_decay ** max(epochCount - config.initialLearningRate_max_epoch, 0.0)
training_model.assign_learningRate(session, config.learning_rate * learning_rate_decay)
lowest_perplexity = 2000
for sequence_counter, (x, y) in enumerate(bs_reader.generateXYPairs(bs_reader.get_training_data(), training_model.config.batch_size, training_model.config.sequence_size)):
feed_dict = {training_model._inputX: x, training_model._inputTargetsY: y, training_model.initial_state: current_model_state}
cost, current_model_state, _ = session.run([training_model.cost, training_model.final_state, training_model.gradient_desc_training_op], feed_dict)
accumulated_costs += cost
accumulated_seq_count += training_model.config.sequence_size
perplexity = np.exp(accumulated_costs / accumulated_seq_count)
if sequence_counter != 0 and sequence_counter % tf.flags.FLAGS.ckpt == 0:
print("Epoch %d, Perplexity: %.3f" % (epochCount, perplexity))
if perplexity < lowest_perplexity:
lowest_perplexity = perplexity
get_prediction(prediction_model, bs_reader, session, 500, ['T','h','e',' '])
session.close()
def get_prediction(model, bs_Reader, session, total_tokens, output_tokens = [' ']):
state = session.run(model.multilayerRNN.zero_state(1, tf.float32))
for token_count in range(total_tokens):
next_token = output_tokens[token_count]
input = np.full((model.config.batch_size, model.config.sequence_size), bs_Reader.token_to_id[next_token], dtype=np.int32)
feed = {model._inputX: input, model._initial_state:state}
[predictionSoftmax, state] = session.run([model._predictionSoftmax, model._final_state], feed)
if (len(output_tokens) -1) <= token_count:
accumulated_sum = np.cumsum(predictionSoftmax[0])
currentTokenId = (int(np.searchsorted(accumulated_sum, np.random.rand(1))))
next_token = bs_Reader.unique_tokens[currentTokenId]
output_tokens.append(next_token)
output_sentence = " "
for token in output_tokens:
output_sentence+=token
print('---- Prediction: \n %s \n----' % (output_sentence))
class HyperParameterConfig(object):
init_scale = 0.1
learning_rate = 0.002
max_grad_norm = 5
num_layers = 2
sequence_size = 50
batch_size = 50
hidden_size = 128
embeddingSize = 100
initialLearningRate_max_epoch = 1
total_max_epoch = 10000
keep_prob = 1.0
lr_decay = 0.97
if __name__ == "__main__":
tf.app.run()
``` |
{
"source": "jhosoume/pymfe",
"score": 3
} |
#### File: source/pages/run.py
```python
import os
import pandas as pd
from pymfe.mfe import MFE
AUTO_PAGES_PATH = "auto_pages"
TABLE_CONF = """
.. csv-table:: Meta-feature description
:file: meta_features_description.csv
:header-rows: 1
"""
NOTE_REL_SUB = """
.. note::
Relative and Subsampling Landmarking are subcase of Landmarking. Thus, the
Landmarking description is the same for Relative and Subsampling groups."""
NOTE_OTHER_INFO = """
.. note::
More info about implementation can be found in API Documentation.
See :ref:`sphx_api`.
"""
TITLE = """
Meta-feature Description Table
==============================
The table shows for each meta-feature the group, a quick description and
paper reference. See examples of how to compute the meta-feature in
:ref:`sphx_glr_auto_examples`.
"""
def meta_features_description():
"""Automatically create the meta-feature description file."""
data, _ = MFE.metafeature_description(sort_by_group=True,
sort_by_mtf=True,
print_table=False,
include_references=True)
if not os.path.exists(AUTO_PAGES_PATH):
os.makedirs(AUTO_PAGES_PATH)
col = data[0]
del data[0]
df = pd.DataFrame(data, columns=col)
df.to_csv(AUTO_PAGES_PATH+"/meta_features_description.csv", index=False)
notes = NOTE_REL_SUB + "\n" + NOTE_OTHER_INFO
table_str = TABLE_CONF
f = open(AUTO_PAGES_PATH+"/meta_features_description.rst", "w")
f.write(TITLE + '\n' + table_str + '\n' + notes)
f.close()
def main():
"""Main function of Run script."""
meta_features_description()
if __name__ == "__main__":
main()
```
#### File: pymfe/tests/test_architecture.py
```python
import pytest
import typing as t
import numpy as np
import sklearn.tree
from pymfe import _internal
from pymfe.mfe import MFE
from . import utils
GNAME = "framework-testing"
def summary_exception(values: np.ndarray,
raise_exception: bool = False) -> int:
"""Returns the length of ``values`` or raise a ValueError exception."""
if raise_exception:
raise ValueError("Summary exception raised.")
return len(values)
def summary_memory_error(values: np.ndarray,
raise_mem_err: bool = False) -> int:
"""Returns the length of ``values`` or raise a MemoryError exception."""
if raise_mem_err:
utils.raise_memory_error()
return len(values)
class MFETestClass:
"""Some generic methods for testing the MFE Framework."""
@classmethod
def postprocess_return_none(cls, **kwargs) -> None:
"""Postprocess: return None."""
return None
@classmethod
def postprocess_return_new_feature(
cls,
number_of_lists: int = 3,
**kwargs) -> t.Tuple[t.List, t.List, t.List]:
"""Postprocess: return Tuple of lists."""
return tuple(["test_value"] for _ in range(number_of_lists))
@classmethod
def postprocess_raise_exception(cls,
raise_exception: bool = False,
**kwargs) -> None:
"""Posprocess: raise exception."""
if raise_exception:
raise ValueError("Expected exception (postprocess).")
return None
@classmethod
def postprocess_memory_error(cls,
raise_mem_err: bool = False,
**kwargs) -> t.Optional[np.ndarray]:
"""Posprocess: memory error."""
if raise_mem_err:
return utils.raise_memory_error()
@classmethod
def precompute_return_empty(cls, **kwargs) -> t.Dict[str, t.Any]:
"""Precompute: return empty dictionary."""
precomp_vals = {}
return precomp_vals
@classmethod
def precompute_return_something(cls, **kwargs) -> t.Dict[str, t.Any]:
"""Precompute: return empty dictionary."""
precomp_vals = {
"test_param_1": 0,
"test_param_2": "euclidean",
"test_param_3": list,
"test_param_4": abs,
}
return precomp_vals
@classmethod
def precompute_raise_exception(cls,
raise_exception: bool = False,
**kwargs) -> t.Dict[str, t.Any]:
"""Precompute: raise exception."""
precomp_vals = {}
if raise_exception:
raise ValueError("Expected exception (precompute).")
return precomp_vals
@classmethod
def precompute_memory_error(cls,
raise_mem_err: bool = False,
**kwargs) -> None:
"""Precompute: memory error."""
precomp_vals = {}
if raise_mem_err:
precomp_vals["huge_array"] = utils.raise_memory_error()
return precomp_vals
@classmethod
def ft_valid_number(cls, X: np.ndarray, y: np.ndarray) -> float:
"""Metafeature: float type."""
return 0.0
@classmethod
def ft_valid_array(cls, X: np.ndarray, y: np.ndarray) -> np.ndarray:
"""Metafeature: float type."""
return np.zeros(5)
@classmethod
def ft_raise_exception(cls, X: np.ndarray, y: np.ndarray,
raise_exception: False) -> float:
"""Metafeature: float type."""
if raise_exception:
raise ValueError("Expected exception (feature).")
return -1.0
@classmethod
def ft_memory_error(cls,
raise_mem_err: bool = False,
**kwargs) -> np.ndarray:
"""Metafeature: memory error."""
if raise_mem_err:
return utils.raise_memory_error()
return np.array([1, 2, 3])
class TestArchitecture:
"""Tests for the framework architecture."""
def test_summary_valid1(self):
vals = np.arange(5)
res = _internal.summarize(features=vals,
callable_sum=summary_exception)
assert res == len(vals)
def test_summary_valid2(self):
vals = np.arange(5)
res = _internal.summarize(features=vals,
callable_sum=summary_memory_error)
assert res == len(vals)
def test_summary_invalid1(self):
res = _internal.summarize(features=np.arange(5),
callable_sum=summary_exception,
callable_args={"raise_exception": True})
assert np.isnan(res)
def test_summary_invalid2(self):
res = _internal.summarize(features=np.arange(5),
callable_sum=summary_memory_error,
callable_args={"raise_mem_err": True})
assert np.isnan(res)
def test_postprocessing_valid(self):
"""Test valid postprocessing and its automatic detection."""
results = [], [], []
_internal.post_processing(results=results,
groups=tuple(),
custom_class_=MFETestClass)
assert all(map(lambda l: len(l) > 0, results))
def test_preprocessing_valid(self):
"""Test valid precomputation and its automatic detection."""
precomp_args = _internal.process_precomp_groups(
precomp_groups=tuple(), groups=tuple(), custom_class_=MFETestClass)
assert len(precomp_args) > 0
def test_feature_detection(self):
"""Test automatic dectection of metafeature extraction method."""
name, mtd, groups = _internal.process_features(
features="all",
groups=tuple(),
suppress_warnings=True,
custom_class_=MFETestClass)
assert len(name) == 4 and len(mtd) == 4 and len(groups) == 1
def test_get_groups(self):
model = MFE()
res = model.valid_groups()
assert (len(res) == len(_internal.VALID_GROUPS)
and not set(res).symmetric_difference(_internal.VALID_GROUPS))
def test_metafeature_description(self):
desc, _ = MFE.metafeature_description(print_table=False)
groups = [d[0] for d in desc]
assert len(set(groups)) == len(_internal.VALID_GROUPS)
desc, _ = MFE.metafeature_description(sort_by_group=True,
sort_by_mtf=True,
print_table=False,
include_references=True)
mtf = [d[1] for d in desc]
assert mtf[1][0] < mtf[-1][0]
desc = MFE.metafeature_description()
assert desc is None
def test_metafeature_description_exceptions(self):
"""Test metafeature description exceptions"""
with pytest.raises(TypeError):
MFE.metafeature_description(print_table="False")
with pytest.raises(TypeError):
MFE.metafeature_description(sort_by_mtf=1)
with pytest.raises(TypeError):
MFE.metafeature_description(sort_by_group=[True])
def test_default_alias_groups(self):
model = MFE(groups="default")
res = model.valid_groups()
assert (len(res) == len(_internal.VALID_GROUPS)
and not set(res).symmetric_difference(_internal.VALID_GROUPS))
model = MFE(groups=["default"])
res = model.valid_groups()
assert (len(res) == len(_internal.VALID_GROUPS)
and not set(res).symmetric_difference(_internal.VALID_GROUPS))
model = MFE(groups=["general", "default"])
res = model.valid_groups()
assert (len(res) == len(_internal.VALID_GROUPS)
and not set(res).symmetric_difference(_internal.VALID_GROUPS))
@pytest.mark.parametrize("groups, summary", [
("statistical", "all"),
("general", "all"),
("landmarking", "all"),
("relative", "all"),
("model-based", "all"),
("info-theory", "all"),
("statistical", ("mean", "sd")),
("general", ("mean", "sd")),
("landmarking", ("mean", "sd")),
("model-based", ("mean", "sd")),
("general", ("mean", "histogram")),
("landmarking", ("mean", "histogram")),
("model-based", ("mean", "histogram")),
("general", ("quantiles", "histogram")),
("landmarking", ("quantiles", "histogram")),
("model-based", ("quantiles", "histogram")),
(["general", "relative"], ("mean", "sd")),
(["general", "relative"], ("quantiles", "histogram")),
(["landmarking", "relative"], ("mean", "sd")),
(["landmarking", "relative"], ("quantiles", "histogram")),
(["statistical", "landmarking", "relative"], ("mean", "sd")),
("all", "all"),
])
def test_extract_metafeature_names_supervised(self, groups, summary):
"""Test .extract_metafeature_names method."""
X, y = utils.load_xy(0)
mfe = MFE(groups=groups, summary=summary)
mtf_names_1 = mfe.extract_metafeature_names(supervised=True)
mtf_names_2 = mfe.fit(X.values, y.values).extract(suppress_warnings=True)[0]
assert mtf_names_1 == tuple(mtf_names_2)
@pytest.mark.parametrize("groups, summary", [
("statistical", "all"),
("general", "all"),
("landmarking", "all"),
("relative", "all"),
("model-based", "all"),
("info-theory", "all"),
("statistical", ("mean", "sd")),
("general", ("mean", "sd")),
("landmarking", ("mean", "sd")),
("model-based", ("mean", "sd")),
("general", ("mean", "histogram")),
("landmarking", ("mean", "histogram")),
("model-based", ("mean", "histogram")),
("general", ("quantiles", "histogram")),
("landmarking", ("quantiles", "histogram")),
("model-based", ("quantiles", "histogram")),
(["general", "relative"], ("mean", "sd")),
(["general", "relative"], ("quantiles", "histogram")),
(["landmarking", "relative"], ("mean", "sd")),
(["landmarking", "relative"], ("quantiles", "histogram")),
(["statistical", "landmarking", "relative"], ("mean", "sd")),
("all", "all"),
])
def test_extract_metafeature_names_unsupervised_01(self, groups, summary):
"""Test .extract_metafeature_names method."""
X, _ = utils.load_xy(0)
mfe = MFE(groups=groups, summary=summary)
mtf_names_1 = mfe.extract_metafeature_names(supervised=False)
mtf_names_2 = mfe.fit(X.values).extract(suppress_warnings=True)[0]
assert mtf_names_1 == tuple(mtf_names_2)
@pytest.mark.parametrize("groups, summary", [
("general", "all"),
("statistical", ("mean", "sd")),
(["general", "relative"], ("mean", "sd")),
(["general", "relative"], ("quantiles", "histogram")),
(["landmarking", "relative"], ("mean", "sd")),
(["landmarking", "relative"], ("quantiles", "histogram")),
(["statistical", "landmarking", "relative"], ("mean", "sd")),
("all", "all"),
])
def test_extract_metafeature_names_unsupervised_02(self, groups, summary):
"""Test .extract_metafeature_names method."""
X, _ = utils.load_xy(0)
mfe = MFE(groups=groups, summary=summary)
mtf_names_1 = mfe.fit(X.values).extract(suppress_warnings=True)[0]
# Note: by default, .extract_metafeature_names should check wether
# 'y' was fitted or not if .fit was called before. Therefore, here,
# supervised=True is expected to be ignored and behave like
# supervised=False.
mtf_names_2 = mfe.extract_metafeature_names(supervised=True)
mtf_names_3 = mfe.extract_metafeature_names(supervised=False)
assert tuple(mtf_names_1) == mtf_names_2 == mtf_names_3
@pytest.mark.parametrize("groups", [
"statistical",
"general",
"landmarking",
"relative",
"model-based",
"info-theory",
("statistical", "landmarking"),
("landmarking", "relative"),
("general", "model-based", "statistical"),
("statistical", "statistical"),
])
def test_parse_valid_metafeatures(self, groups):
"""Check the length of valid metafeatures per group."""
X, y = utils.load_xy(0)
mfe = MFE(groups="all",
summary=None,
lm_sample_frac=0.5,
random_state=1234)
mfe.fit(X.values, y.values)
res = mfe.extract()
target_mtf = mfe.valid_metafeatures(groups=groups)
names, _ = mfe.parse_by_group(groups, res)
assert not set(names).symmetric_difference(target_mtf)
def test_no_cat_transformation(self):
X, y = utils.load_xy(1)
mfe = MFE()
mfe.fit(X.values, y.values, transform_cat=None)
assert mfe._custom_args_ft["N"].size == 0
def test_gray_encoding_missing_value(self):
X, y = utils.load_xy(1)
mfe = MFE()
X = np.copy(X.values)
y = y.values
X[5, 0] = np.nan
with pytest.raises(ValueError):
mfe.fit(X, y, transform_cat="gray")
def test_one_hot_encoding_01(self):
X, y = utils.load_xy(1)
mfe = MFE()
mfe.fit(X.values, y.values, transform_cat="one-hot")
exp_value = np.sum([np.unique(attr).size - 1 for attr in X.values.T])
assert mfe._custom_args_ft["N"].shape[1] == exp_value
def test_one_hot_encoding_02(self):
X, y = utils.load_xy(1)
mfe = MFE()
mfe.fit(X.values, y.values, transform_cat="one-hot-full")
exp_value = np.sum([np.unique(attr).size for attr in X.values.T])
assert mfe._custom_args_ft["N"].shape[1] == exp_value
def test_one_hot_encoding_03(self):
X, y = utils.load_xy(2)
mfe = MFE()
mfe.fit(X.values, y.values, transform_cat="one-hot")
exp_value = X.values.shape[1]
assert mfe._custom_args_ft["N"].shape[1] == exp_value
def test_one_hot_encoding_04(self):
X, y = utils.load_xy(2)
mfe = MFE()
X = np.hstack((X.values, np.ones((y.size, 1), dtype=str)))
y = y.values
with pytest.raises(ValueError):
mfe.fit(X=X, y=y, transform_cat="one-hot")
@pytest.mark.parametrize("confidence", (0.95, 0.99))
def test_extract_with_confidence(self, confidence):
X, y = utils.load_xy(2)
mtf_names, mtf_vals, mtf_conf_int = MFE(
groups="all",
features=["mean", "best_node", "sil"],
random_state=1234).fit(
X=X.values, y=y.values, precomp_groups=None).extract_with_confidence(
sample_num=64,
return_avg_val=False,
confidence=confidence,
verbose=0)
in_range_prop = np.zeros(len(mtf_names), dtype=float)
for mtf_ind, cur_mtf_vals in enumerate(mtf_vals):
int_low, int_high = mtf_conf_int[mtf_ind, :]
in_range_prop[mtf_ind] = np.sum(np.logical_and(
int_low <= cur_mtf_vals, cur_mtf_vals <= int_high)) / len(cur_mtf_vals)
assert np.all(confidence - 0.05 <= in_range_prop)
def test_extract_with_confidence_invalid1(self):
with pytest.raises(TypeError):
MFE().extract_with_confidence()
def test_extract_with_confidence_invalid2(self):
X, y = utils.load_xy(2)
with pytest.raises(ValueError):
MFE().fit(
X.values, y.values).extract_with_confidence(confidence=-0.0001)
def test_extract_with_confidence_invalid3(self):
X, y = utils.load_xy(2)
with pytest.raises(ValueError):
MFE().fit(
X.values, y.values).extract_with_confidence(confidence=1.0001)
@pytest.mark.parametrize("return_avg_val", (True, False))
def test_extract_with_confidence_time(self, return_avg_val):
X, y = utils.load_xy(2)
res = MFE(
features=["mean", "nr_inst", "unknown"],
measure_time="avg").fit(
X=X.values, y=y.values).extract_with_confidence(
sample_num=3,
return_avg_val=return_avg_val)
mtf_names, mtf_vals, mtf_time, mtf_conf_int = res
assert (len(mtf_names) == len(mtf_vals) == len(mtf_time) == len(mtf_conf_int))
def test_extract_with_confidence_multiple_conf_level(self):
X, y = utils.load_xy(2)
confidence = [0.8, 0.9, 0.7]
mtf_conf_int = MFE(
features=["mean", "nr_inst", "unknown"]).fit(
X=X.values, y=y.values).extract_with_confidence(
sample_num=2,
confidence=confidence)[2]
assert 2 * len(confidence) == mtf_conf_int.shape[1]
def test_extract_with_confidence_random_state1(self):
X, y = utils.load_xy(2)
_, mtf_vals_1, mtf_conf_int_1 = MFE(
features=["mean", "sd"], random_state=16).fit(
X=X.values, y=y.values).extract_with_confidence(
sample_num=3)
_, mtf_vals_2, mtf_conf_int_2 = MFE(
features=["mean", "sd"], random_state=16).fit(
X=X.values, y=y.values).extract_with_confidence(
sample_num=3)
assert (np.allclose(mtf_vals_1, mtf_vals_2) and
np.allclose(mtf_conf_int_1, mtf_conf_int_2))
def test_extract_with_confidence_random_state2(self):
X, y = utils.load_xy(2)
_, mtf_vals_1, mtf_conf_int_1 = MFE(
features=["mean", "sd"], random_state=16).fit(
X=X.values, y=y.values).extract_with_confidence(
sample_num=3)
_, mtf_vals_2, mtf_conf_int_2 = MFE(
features=["mean", "sd"], random_state=17).fit(
X=X.values, y=y.values).extract_with_confidence(
sample_num=3)
assert (np.any(~np.isclose(mtf_vals_1, mtf_vals_2)) and
np.any(~np.isclose(mtf_conf_int_1, mtf_conf_int_2)))
def test_extract_with_confidence_random_state3(self):
X, y = utils.load_xy(2)
np.random.seed(1234)
_, mtf_vals_1, mtf_conf_int_1 = MFE(
features=["mean", "sd"]).fit(
X=X.values, y=y.values).extract_with_confidence(
sample_num=3)
np.random.seed(1234)
_, mtf_vals_2, mtf_conf_int_2 = MFE(
features=["mean", "sd"]).fit(
X=X.values, y=y.values).extract_with_confidence(
sample_num=3)
assert (np.any(~np.isclose(mtf_vals_1, mtf_vals_2)) and
np.any(~np.isclose(mtf_conf_int_1, mtf_conf_int_2)))
def test_extract_from_model(self):
X, y = utils.load_xy(2)
model = sklearn.tree.DecisionTreeClassifier(random_state=1234).fit(
X.values, y.values)
mtf_name, mtf_vals = MFE(random_state=1234).extract_from_model(model)
extractor = MFE(groups="model-based", random_state=1234)
extractor.fit(X=X.values, y=y.values, transform_num=False)
mtf_name2, mtf_vals2 = extractor.extract()
assert (np.all(mtf_name == mtf_name2)
and np.allclose(mtf_vals, mtf_vals2))
def test_extract_from_model_invalid1(self):
X, y = utils.load_xy(2)
model = sklearn.tree.DecisionTreeRegressor().fit(X.values, y.values)
with pytest.raises(TypeError):
MFE().extract_from_model(model)
def test_extract_from_model_invalid2(self):
X, y = utils.load_xy(2)
model = sklearn.tree.DecisionTreeClassifier(random_state=1234).fit(
X.values, y.values)
with pytest.raises(KeyError):
MFE().extract_from_model(model, arguments_fit={"dt_model": model})
def test_extract_from_model_invalid3(self):
model = sklearn.tree.DecisionTreeClassifier()
with pytest.raises(RuntimeError):
MFE().extract_from_model(model)
def test_extract_from_model_invalid4(self):
X, y = utils.load_xy(2)
model = sklearn.tree.DecisionTreeClassifier().fit(X, y)
with pytest.raises(ValueError):
MFE(groups="general").extract_from_model(model)
class TestArchitectureWarnings:
def test_feature_warning1(self):
"""Test exception handling of feature extraction."""
name, mtd, groups = map(
np.asarray,
_internal.process_features(features="raise_exception",
groups=tuple(),
suppress_warnings=True,
custom_class_=MFETestClass))
with pytest.warns(RuntimeWarning):
_internal.get_feat_value(mtd_name=name[0],
mtd_args={
"X": np.array([]),
"y": np.ndarray([]),
"raise_exception": True
},
mtd_callable=mtd[0][1],
suppress_warnings=False)
def test_feature_warning2(self):
"""Test memory error handling of feature extraction."""
name, mtd, groups = map(
np.asarray,
_internal.process_features(features="memory_error",
groups=tuple(),
suppress_warnings=True,
custom_class_=MFETestClass))
with pytest.warns(RuntimeWarning):
_internal.get_feat_value(mtd_name=name[0],
mtd_args={
"X": np.array([]),
"y": np.ndarray([]),
"raise_mem_err": True
},
mtd_callable=mtd[0][1],
suppress_warnings=False)
def test_mem_err_precompute(self):
with pytest.warns(UserWarning):
_internal.process_precomp_groups(precomp_groups=tuple(),
groups=tuple(),
custom_class_=MFETestClass,
raise_mem_err=True)
def test_mem_err_postprocess(self):
"""Test memory error in postprocessing methods."""
results = [], [], []
with pytest.warns(UserWarning):
_internal.post_processing(results=results,
groups=tuple(),
custom_class_=MFETestClass,
raise_mem_err=True)
def test_postprocessing_invalid1(self):
"""Test exception handling in invalid postprocessing."""
results = [], [], []
with pytest.warns(UserWarning):
_internal.post_processing(results=results,
groups=tuple(),
custom_class_=MFETestClass,
raise_exception=True)
def test_postprocessing_invalid2(self):
"""Test incorrect return value in postprocessing methods."""
results = [], [], []
with pytest.warns(UserWarning):
_internal.post_processing(results=results,
groups=tuple(),
custom_class_=MFETestClass,
number_of_lists=2)
def test_preprocessing_invalid(self):
"""Test exception handling of precomputation."""
with pytest.warns(UserWarning):
_internal.process_precomp_groups(precomp_groups=tuple(),
groups=tuple(),
custom_class_=MFETestClass,
raise_exception=True)
```
#### File: pymfe/tests/test_model_based.py
```python
import pytest
from pymfe.mfe import MFE
from tests.utils import load_xy
import numpy as np
GNAME = "model-based"
class TestModelBased:
"""TestClass dedicated to test model-based metafeatures."""
@pytest.mark.parametrize(
"dt_id, ft_name, exp_value, precompute",
[
###################
# Mixed data
###################
(0, "leaves", 13, True),
(0, "leaves_branch", [4.6153846, 1.4455945], True),
(0, "leaves_corrob", [0.07692308, 0.058791243], True),
(0, "leaves_homo", [84.933334, 41.648125], True),
(0, "leaves_per_class", [0.5, 0.05439285], True),
(0, "nodes", 12, True),
(0, "nodes_per_attr", 1.0909090909090908, True),
(0, "nodes_per_inst", 0.24, True),
(0, "nodes_per_level", [2.0, 0.8944272], True),
(0, "nodes_repeated", [3.0, 2.828427], True),
(0, "tree_depth", [3.84, 1.6753109], True),
(0, "tree_imbalance", [0.16146065, 0.113601856], True),
(0, "tree_shape", [0.20192307, 0.1227767], True),
(0, "var_importance", [0.09090909, 0.1993217], True),
(0, "leaves", 13, False),
(0, "leaves_branch", [4.6153846, 1.4455945], False),
(0, "leaves_corrob", [0.07692308, 0.058791243], False),
(0, "leaves_homo", [84.933334, 41.648125], False),
(0, "leaves_per_class", [0.5, 0.05439285], False),
(0, "nodes", 12, False),
(0, "nodes_per_attr", 1.0909090909090908, False),
(0, "nodes_per_inst", 0.24, False),
(0, "nodes_per_level", [2.0, 0.8944272], False),
(0, "nodes_repeated", [3.0, 2.828427], False),
(0, "tree_depth", [3.84, 1.6753109], False),
(0, "tree_imbalance", [0.16146065, 0.113601856], False),
(0, "tree_shape", [0.20192307, 0.1227767], False),
(0, "var_importance", [0.09090909, 0.1993217], False),
###################
# Categorical data
###################
(1, "leaves", 57, True),
(1, "leaves_branch", [9.140351, 3.136414], True),
(1, "leaves_corrob", [0.01754386, 0.04135247], True),
(1, "leaves_homo", [18342.629, 45953.414], True),
(1, "leaves_per_class", [0.5, 0.11164843], True),
(1, "nodes", 56, True),
(1, "nodes_per_attr", 1.4736842105263157, True),
(1, "nodes_per_inst", 0.017521902377972465, True),
(1, "nodes_per_level", [3.5, 2.4221203], True),
(1, "nodes_repeated", [1.6969697, 0.88334763], True),
(1, "tree_depth", [8.230088, 3.305863], True),
(1, "tree_imbalance", [0.05483275, 0.092559], True),
(1, "tree_shape", [0.052245557, 0.09386974], True),
(1, "var_importance", [0.02631579, 0.06340529], True),
(1, "leaves", 57, False),
(1, "leaves_branch", [9.140351, 3.136414], False),
(1, "leaves_corrob", [0.01754386, 0.04135247], False),
(1, "leaves_homo", [18342.629, 45953.414], False),
(1, "leaves_per_class", [0.5, 0.11164843], False),
(1, "nodes", 56, False),
(1, "nodes_per_attr", 1.4736842105263157, False),
(1, "nodes_per_inst", 0.017521902377972465, False),
(1, "nodes_per_level", [3.5, 2.4221203], False),
(1, "nodes_repeated", [1.6969697, 0.88334763], False),
(1, "tree_depth", [8.230088, 3.305863], False),
(1, "tree_imbalance", [0.05483275, 0.092559], False),
(1, "tree_shape", [0.052245557, 0.09386974], False),
(1, "var_importance", [0.02631579, 0.06340529], False),
###################
# Numerical data
###################
(2, "leaves", 9, True),
(2, "leaves_branch", [3.7777777, 1.2018504], True),
(2, "leaves_corrob", [0.11111111, 0.15051763], True),
(2, "leaves_homo", [37.466667, 13.142298], True),
(2, "leaves_per_class", [0.33333334, 0.22222224], True),
(2, "nodes", 8, True),
(2, "nodes_per_attr", 2.0, True),
(2, "nodes_per_inst", 0.05333333333333334, True),
(2, "nodes_per_level", [1.6, 0.8944272], True),
(2, "nodes_repeated", [2.0, 1.1547005], True),
(2, "tree_depth", [3.0588236, 1.4348601], True),
(2, "tree_imbalance", [0.19491705, 0.1330071], True),
(2, "tree_shape", [0.27083334, 0.107119605], True),
(2, "var_importance", [0.24999999, 0.27823895], True),
(2, "leaves", 9, False),
(2, "leaves_branch", [3.7777777, 1.2018504], False),
(2, "leaves_corrob", [0.11111111, 0.15051763], False),
(2, "leaves_homo", [37.466667, 13.142298], False),
(2, "leaves_per_class", [0.33333334, 0.22222224], False),
(2, "nodes", 8, False),
(2, "nodes_per_attr", 2.0, False),
(2, "nodes_per_inst", 0.05333333333333334, False),
(2, "nodes_per_level", [1.6, 0.8944272], False),
(2, "nodes_repeated", [2.0, 1.1547005], False),
(2, "tree_depth", [3.0588236, 1.4348601], False),
(2, "tree_imbalance", [0.19491705, 0.1330071], False),
(2, "tree_shape", [0.27083334, 0.107119605], False),
(2, "var_importance", [0.24999999, 0.27823895], False),
])
def test_ft_methods_model_based_01(self, dt_id, ft_name, exp_value,
precompute):
"""Function to test each meta-feature belongs to model-based group.
"""
precomp_group = GNAME if precompute else None
X, y = load_xy(dt_id)
mfe = MFE(groups=[GNAME], features=[ft_name], random_state=1234)
mfe.fit(X.values, y.values, precomp_groups=precomp_group)
value = mfe.extract()[1]
if exp_value is np.nan:
assert value[0] is exp_value
else:
assert np.allclose(value, exp_value)
@pytest.mark.parametrize(
"dt_id, ft_name, exp_value, precompute",
[
###################
# Mixed data
###################
(0, "leaves", 7, True),
(0, "leaves_branch", [3.7142856, 1.7043363], True),
(0, "leaves_corrob", [0.14285713, 0.06575568], True),
(0, "leaves_homo", [32.266666, 15.709021], True),
(0, "leaves_per_class", [0.5, 0.30304578], True),
(0, "nodes", 6, True),
(0, "nodes_per_attr", 0.5454545454545454, True),
(0, "nodes_per_inst", 0.12, True),
(0, "nodes_per_level", [1.2, 0.4472136], True),
(0, "nodes_repeated", [3.0, 1.4142135], True),
(0, "tree_depth", [3.0769231, 1.7541162], True),
(0, "tree_imbalance", [0.19825712, 0.11291388], True),
(0, "tree_shape", [0.2857143, 0.16675964], True),
(0, "var_importance", [0.09090909, 0.2417293], True),
(0, "leaves", 7, False),
(0, "leaves_branch", [3.7142856, 1.7043363], False),
(0, "leaves_corrob", [0.14285713, 0.06575568], False),
(0, "leaves_homo", [32.266666, 15.709021], False),
(0, "leaves_per_class", [0.5, 0.30304578], False),
(0, "nodes", 6, False),
(0, "nodes_per_attr", 0.5454545454545454, False),
(0, "nodes_per_inst", 0.12, False),
(0, "nodes_per_level", [1.2, 0.4472136], False),
(0, "nodes_repeated", [3.0, 1.4142135], False),
(0, "tree_depth", [3.0769231, 1.7541162], False),
(0, "tree_imbalance", [0.19825712, 0.11291388], False),
(0, "tree_shape", [0.2857143, 0.16675964], False),
(0, "var_importance", [0.09090909, 0.2417293], False),
###################
# Categorical data
###################
(1, "leaves", 10, True),
(1, "leaves_branch", [4.3, 1.4944341], True),
(1, "leaves_corrob", [0.1, 0.08727827], True),
(1, "leaves_homo", [55.2, 18.552029], True),
(1, "leaves_per_class", [0.5, 0.2828427], True),
(1, "nodes", 9, True),
(1, "nodes_per_attr", 0.23684210526315788, True),
(1, "nodes_per_inst", 0.002816020025031289, True),
(1, "nodes_per_level", [1.8, 1.3038405], True),
(1, "nodes_repeated", [1.125, 0.35355338], True),
(1, "tree_depth", [3.5789473, 1.6437014], True),
(1, "tree_imbalance", [0.25800052, 0.0827512], True),
(1, "tree_shape", [0.225, 0.14493772], True),
(1, "var_importance", [0.02631579, 0.07277515], True),
(1, "leaves", 10, False),
(1, "leaves_branch", [4.3, 1.4944341], False),
(1, "leaves_corrob", [0.1, 0.08727827], False),
(1, "leaves_homo", [55.2, 18.552029], False),
(1, "leaves_per_class", [0.5, 0.2828427], False),
(1, "nodes", 9, False),
(1, "nodes_per_attr", 0.23684210526315788, False),
(1, "nodes_per_inst", 0.002816020025031289, False),
(1, "nodes_per_level", [1.8, 1.3038405], False),
(1, "nodes_repeated", [1.125, 0.35355338], False),
(1, "tree_depth", [3.5789473, 1.6437014], False),
(1, "tree_imbalance", [0.25800052, 0.0827512], False),
(1, "tree_shape", [0.225, 0.14493772], False),
(1, "var_importance", [0.02631579, 0.07277515], False),
###################
# Numerical data
###################
(2, "leaves", 6, True),
(2, "leaves_branch", [3.0, 1.0954452], True),
(2, "leaves_corrob", [0.16666667, 0.15927614], True),
(2, "leaves_homo", [18.0, 4.8989797], True),
(2, "leaves_per_class", [0.33333334, 0.28867516], True),
(2, "nodes", 5, True),
(2, "nodes_per_attr", 1.25, True),
(2, "nodes_per_inst", 0.03333333333333333, True),
(2, "nodes_per_level", [1.25, 0.5], True),
(2, "nodes_repeated", [2.5, 0.70710677], True),
(2, "tree_depth", [2.3636363, 1.2862914], True),
(2, "tree_imbalance", [0.2524478, 0.1236233], True),
(2, "tree_shape", [0.35416666, 0.094096586], True),
(2, "var_importance", [0.25, 0.31985083], True),
(2, "leaves", 6, False),
(2, "leaves_branch", [3.0, 1.0954452], False),
(2, "leaves_corrob", [0.16666667, 0.15927614], False),
(2, "leaves_homo", [18.0, 4.8989797], False),
(2, "leaves_per_class", [0.33333334, 0.28867516], False),
(2, "nodes", 5, False),
(2, "nodes_per_attr", 1.25, False),
(2, "nodes_per_inst", 0.03333333333333333, False),
(2, "nodes_per_level", [1.25, 0.5], False),
(2, "nodes_repeated", [2.5, 0.70710677], False),
(2, "tree_depth", [2.3636363, 1.2862914], False),
(2, "tree_imbalance", [0.2524478, 0.1236233], False),
(2, "tree_shape", [0.35416666, 0.094096586], False),
(2, "var_importance", [0.25, 0.31985083], False),
])
def test_ft_methods_model_based_02(self, dt_id, ft_name, exp_value,
precompute):
"""Function to test each meta-feature belongs to model-based group.
"""
precomp_group = GNAME if precompute else None
X, y = load_xy(dt_id)
mfe = MFE(
groups=[GNAME],
features=[ft_name],
hypparam_model_dt={
"max_depth": 5,
"min_samples_split": 10,
"criterion": "entropy",
},
random_state=1234)
mfe.fit(X.values, y.values, precomp_groups=precomp_group)
if precomp_group is None:
# Note: the precomputation of 'model-based' group is always
# forced due to the need of the 'dt_model' value
mfe._precomp_args_ft = {
"dt_model": mfe._precomp_args_ft.get("dt_model")
}
value = mfe.extract()[1]
if exp_value is np.nan:
assert value[0] is exp_value
else:
assert np.allclose(value, exp_value)
@pytest.mark.parametrize(
"dt_id, exp_value, precompute",
[
###################
# Mixed data
###################
(0, [
13, 4.6153846, 0.07692308, 84.933334, 0.5, 12,
1.0909090909090908, 0.24, 2.0, 3.0, 3.84, 0.16146065,
0.20192307, 0.09090909
], False),
(0, [
13, 4.6153846, 0.07692308, 84.933334, 0.5, 12,
1.0909090909090908, 0.24, 2.0, 3.0, 3.84, 0.16146065,
0.20192307, 0.09090909
], True),
###################
# Numerical data
###################
(2, [
9, 3.7777777, 0.11111111, 37.466667, 0.33333334, 8, 2.0,
0.05333333333333334, 1.6, 2.0, 3.0588236, 0.19491705,
0.27083334, 0.24999999
], False),
(2, [
9, 3.7777777, 0.11111111, 37.466667, 0.33333334, 8, 2.0,
0.05333333333333334, 1.6, 2.0, 3.0588236, 0.19491705,
0.27083334, 0.24999999
], True),
])
def test_integration_model_based(self, dt_id, exp_value, precompute):
"""Function to test all model-based meta-features.
"""
precomp_group = GNAME if precompute else None
X, y = load_xy(dt_id)
mfe = MFE(groups=[GNAME], summary="mean", random_state=1234)
mfe.fit(X.values, y.values, precomp_groups=precomp_group)
value = mfe.extract()[1]
assert np.allclose(value, exp_value, equal_nan=True)
```
#### File: pymfe/tests/test_output.py
```python
import pytest
import sklearn.tree
import pymfe._internal as _internal
from pymfe.mfe import MFE
from tests.utils import load_xy
GNAME = "mfe-output-details"
class TestOutput:
"""TestClass dedicated to test MFE output details."""
def test_output_lengths_1(self):
X, y = load_xy(0)
res = MFE().fit(X=X.values, y=y.values).extract()
vals, names = res
assert len(vals) == len(names)
@pytest.mark.parametrize("dt_id, measure_time", [
(0, "total"),
(0, "total_summ"),
(0, "avg"),
(0, "avg_summ"),
(2, "total"),
(2, "total_summ"),
(2, "avg"),
(2, "avg_summ"),
])
def test_output_lengths_2(self, dt_id, measure_time):
X, y = load_xy(dt_id)
res = MFE(measure_time=measure_time).fit(
X=X.values, y=y.values).extract()
vals, names, time = res
assert len(vals) == len(names) == len(time)
def test_output_lengths_3(self):
X, y = load_xy(0)
res = MFE(summary=None).fit(X=X.values, y=y.values).extract()
vals, names = res
assert len(vals) == len(names)
@pytest.mark.parametrize("dt_id, measure_time", [
(0, "total"),
(0, "total_summ"),
(0, "avg"),
(0, "avg_summ"),
(2, "total"),
(2, "total_summ"),
(2, "avg"),
(2, "avg_summ"),
])
def test_output_lengths_4(self, dt_id, measure_time):
X, y = load_xy(dt_id)
res = MFE(
summary=None, measure_time=measure_time).fit(
X=X.values, y=y.values).extract()
vals, names, time = res
assert len(vals) == len(names) == len(time)
def test_verbosity_2(self, capsys):
X, y = load_xy(0)
MFE().fit(X=X.values, y=y.values).extract(verbose=0)
captured = capsys.readouterr().out
assert not captured
@pytest.mark.parametrize("verbosity, msg_expected", [
(0, False),
(1, True),
(2, True),
])
def test_verbosity_3(self, verbosity, msg_expected, capsys):
X, y = load_xy(0)
MFE().fit(X=X.values, y=y.values).extract(verbose=verbosity)
captured = capsys.readouterr().out
assert (not msg_expected) or captured
@pytest.mark.parametrize("verbosity, msg_expected", [
(0, False),
(1, True),
])
def test_verbosity_with_confidence(self, verbosity, msg_expected, capsys):
X, y = load_xy(2)
MFE().fit(X.values, y.values).extract_with_confidence(verbose=verbosity)
captured = capsys.readouterr().out
assert ((not msg_expected) and (not captured)) or (msg_expected and captured)
@pytest.mark.parametrize("verbosity, msg_expected", [
(0, False),
(1, True),
])
def test_verbosity_from_model(self, verbosity, msg_expected, capsys):
X, y = load_xy(2)
model = sklearn.tree.DecisionTreeClassifier().fit(X.values, y.values)
MFE().extract_from_model(model, verbose=verbosity)
captured = capsys.readouterr().out
assert ((not msg_expected) and (not captured)) or (msg_expected and captured)
```
#### File: pymfe/tests/utils.py
```python
import typing as t
import arff
import pandas as pd
import numpy as np
DATA_ID = [
"tests/test_datasets/mix_aids.arff",
"tests/test_datasets/cat_kr-vs-kp.arff",
"tests/test_datasets/num_Iris.arff",
]
DATA_ = [
None,
None,
None,
]
def load_xy(dt_id: int):
"""Returns a dataset loaded from arff file."""
if DATA_[dt_id] is None:
with open(DATA_ID[dt_id], "r") as data_file:
data = arff.load(data_file)
df = pd.DataFrame(data["data"])
y = df.iloc[:, -1]
X = df.iloc[:, :-1]
DATA_[dt_id] = (X, y)
return DATA_[dt_id]
def raise_memory_error(
size: t.Union[int, float] = 1e+20) -> np.ndarray:
"""Try to create a huge array, raising a MemoryError."""
return np.zeros(int(size), dtype=np.float64)
``` |
{
"source": "jhosteny/iglu-python-client",
"score": 3
} |
#### File: iglu-python-client/iglu_client/core.py
```python
import re
import jsonschema
# Regular expression to extract metadata from self-describing JSON
URI_REGEX = "^iglu:([a-zA-Z0-9\\-_.]+)/([a-zA-Z0-9\\-_]+)/([a-zA-Z0-9\\-_]+)/([1-9][0-9]*(?:-(?:0|[1-9][0-9]*)){2})$"
# Regular expression to extract all parts of SchemaVer: MODEL, REVISION,
# ADDITION
SCHEMAVER_REGEX = "^([1-9][0-9]*)-(0|[1-9][0-9]*)-(0|[1-9][0-9]*)$"
# Let jsonschema know about the self-describing meta-schema
jsonschema.validators.meta_schemas[
"http://iglucentral.com/schemas/com.snowplowanalytics.self-desc/schema/jsonschema/1-0-0"
] = jsonschema.validators.Draft4Validator
class SchemaVer(object):
def __init__(self, model: int, revision: int, addition: int):
self.model = model
self.revision = revision
self.addition = addition
def as_string(self) -> str:
return "%d-%d-%d" % (self.model, self.revision, self.addition)
@staticmethod
def parse_schemaver(version: str):
m = re.match(SCHEMAVER_REGEX, version)
if not m:
raise IgluError(
"Schema version {version} is not a valid Iglu SchemaVer".format(
version=version
)
)
else:
model, revision, addition = m.groups()
return SchemaVer(int(model), int(revision), int(addition))
class SchemaKey(object):
def __init__(self, vendor: str, name: str, format: str, version: SchemaVer):
self.vendor = vendor
self.name = name
self.format = format
self.version = version
def as_uri(self) -> str:
return "iglu:{path}".format(path=self.as_path())
def as_path(self) -> str:
return "{vendor}/{name}/{format}/{version}".format(
vendor=self.vendor,
name=self.name,
format=self.format,
version=self.version.as_string(),
)
# Construct SchemaKey from URI
@staticmethod
def parse_key(key):
m = re.match(URI_REGEX, key)
if not m:
raise IgluError("Schema key [{key}] is not valid Iglu URI".format(key=key))
else:
vendor, name, format, version = m.groups()
schema_ver = SchemaVer.parse_schemaver(version)
return SchemaKey(vendor, name, format, schema_ver)
# Common Iglu error
class IgluError(Exception):
def __init__(self, message):
self.message = message
```
#### File: iglu-python-client/iglu_client/self_describing_json.py
```python
import json
import jsonschema
from .core import IgluError, SchemaKey
from .resolver import Resolver
class SelfDescribingJson(object):
# Constructor. To initalize from string - use static parse_schemaver
def __init__(self, schema_key: SchemaKey, data: dict):
self.schema_key = schema_key
self.data = data
self.isValid = False
def to_json(self) -> dict:
return json.dumps({"schema": self.schema_key.as_uri(), "data": self.data})
def validate(self, resolver: Resolver) -> True:
schema = resolver.lookup_schema(self.schema_key)
jsonschema.validate(instance=self.data, schema=schema)
self.isValid = True
return self.isValid
def valid(self, resolver: Resolver) -> bool:
try:
return self.isValid or self.validate(resolver)
except Exception:
return False
@classmethod
def parse(cls, payload_json: str):
payload = json.loads(payload_json)
schema_uri = payload.get("schema")
if not schema_uri:
raise IgluError(
"JSON instance is not self-describing (schema property is absent):\n {json}".format(
json=json.to_json()
)
)
data = payload.get("data")
if not data:
raise IgluError(
"JSON instance is not self-describing (data proprty is absent):\n {json}".format(
json=json.to_json()
)
)
schema_key = SchemaKey.parse_key(schema_uri)
return cls(schema_key, data)
``` |
{
"source": "jhostyk/atav",
"score": 3
} |
#### File: atav/utils/convert_bins.py
```python
import sys
import os
import logging
## This is a script to convert dpbin mysql table dump from 10k -> 1k block size
## Should be possible to change the new block size to any proper divisors of 10000
## i.e. 1, 500, 1000, 2000, 2500, 5000, etc. in the collapse_bins() function
## Only tested with 1000 though
## Define some globals storing bin value info
current_bin_vals = {'a':0,'b':3,'c':10,'d':20,'e':30,'f':50,'g':60,'h':80,
'i':100,'j':150,'k':200,'l':250,'m':300,'n':400,'o':500,'p':600,'q':800,'r':1000}
new_bins = ['a','b','c','d','e','f','g']
def return_new_bin(val):
"""
return new bin value
"""
if val < 3:
return 'a'
elif val < 10:
return 'b'
elif val < 20:
return 'c'
elif val < 30:
return 'd'
elif val < 50:
return 'e'
elif val < 200:
return 'f'
else:
return 'g'
def expand_bins(bin_string):
"""
bin_string : a base36 encoded(for bin lengths) dp string
"""
char_bin = ''
expanded_string = ''
for i in range(0,len(bin_string)):
if bin_string[i] in current_bin_vals:
bin_length = convert_to_int(char_bin)
bin_val = bin_string[i]
new_bin_val = return_new_bin(current_bin_vals[bin_val])
expanded_string+=new_bin_val*bin_length
char_bin = ''
else:
char_bin+=bin_string[i]
return expanded_string
def collapse_bins(expanded_bin_string, new_block_size, block_id, sample_id):
""" Collapse an expanded bin string into individal blocks of a new size
expanded_bin_string : The original 10k bin_string with expanded bin lengths
i.e. each base has a bin value
new_block_size : The new block size to convert to
block_id : The original 10K block id for this bin string
sample_id : The sample id for this bin string
"""
out_bin = ''
total_bin_length = 0
prev_bin = expanded_bin_string[0]
counter = 0
bin_counter = 0
new_block_id = int(block_id) * 10
exclusion_str = radix36_encoding(new_block_size)+'a'
n = len(expanded_bin_string)
for i in range(0,n):
if bin_counter == new_block_size:
out_bin+=radix36_encoding(counter)+prev_bin
if out_bin != exclusion_str:
print sample_id+'\t'+str(new_block_id)+'\t'+out_bin
out_bin = ''
total_bin_length += counter
counter = 0
bin_counter = 0
new_block_id += 1
if expanded_bin_string[i] != prev_bin:
out_bin+=radix36_encoding(counter)+prev_bin
total_bin_length += counter
counter = 0
counter+=1
prev_bin = expanded_bin_string[i]
bin_counter+=1
out_bin+=radix36_encoding(counter)+prev_bin
total_bin_length += counter
if total_bin_length != 10000:
logging.info(block_id+'\t'+sample_id+'\tfailed the conversion process,still present in output ')
if out_bin != exclusion_str:
print sample_id+'\t'+str(new_block_id)+'\t'+out_bin
def return_bins(chrom):
""" Query db1 and return dp bins
chrom : chromosome to query
"""
db = MySQLdb.connect(
read_default_file="/home/rp2801/.my.cnf",
read_default_group="clientdb1",database="WalDB")
cur = db.cursor()
cur.execute("SELECT * FROM DP_bins_%s limit 10")
while True:
res = cur.fetchone()
yield res[0][0]
def convert_to_int(bin_len):
""" convert base36 back to int
"""
return int(bin_len,36)
def radix36_encoding(number):
""" returns base36 encoded value for a given decimal number
Refer to : http://stackoverflow.com/questions/1181919/python-base-36-encoding
number => numerical coverage value in decimal
"""
alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if not isinstance(number, (int, long)):
raise TypeError('number must be an integer')
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
raise Exception("Negative bin length encoutered, bug in your code !")
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number,i = divmod(number,len(alphabet))
base36 = alphabet[i] + base36
return str(sign + base36)
def read_file(bin_dump):
"""
bin_dump : str ; path to the dp bin file
"""
with open(bin_dump,"r") as IN:
for line in IN:
sample_id,block_id,block_str = line.strip('\n').split('\t')
expanded_string = expand_bins(block_str)
if len(expanded_string) != 10000:
logging.info(sample_id+'\t'+block_id+'\tfailed_the_expansion(expanded bin string is not 10000 bases will not be in output)')
continue
collapse_bins(expanded_string,1000,block_id,sample_id)
def main():
""" Main Function
"""
if len(sys.argv) != 3:
print "RUN as : python/pypy convert_bins.py <bin_dump_file> <log_file> > <converted_bin_file>\n"
sys.exit(1)
## Command line arguments
bin_dump = sys.argv[1]
log_file = sys.argv[2]
## Set logging
logging.basicConfig(filename=log_file,level=logging.DEBUG,
filemode='w')
read_file(bin_dump)
if __name__ == '__main__':
main()
```
#### File: utils/log_analysis/log_parsing.py
```python
from pyspark import SparkContext
from pyspark.sql import SQLContext
import sys
import os
from subprocess import Popen
from dateutil import parser
sc = SparkContext()
sqlContext = SQLContext(sc)
def help():
print("Usage: spark-submit (...) log_parsing.py"+\
"\n\t --in <input-path>\n\t\t# Directory containing input files"+\
"\n\t --out <output-path>\n\t\t# Directory to output csv files"+\
"\n\t --intervals <intervals>\n\t\t# Comma-separated date list"+\
"\n\t[--input-s3] [--output-s3]"+\
"\n\t[--aws-s3-bucket <bucket-name>]"+\
"\n\t[--aws-access-key <key>]"+\
"\n\t[--aws-secret-key <key>]" , file=sys.stderr)
exit(-1)
inputDir = None
outputDir = None
intervals = None
s3Bucket = None
useS3 = False
inputS3 = False
outputS3 = False
awsParamCount = 0
i=1;
while i < len(sys.argv):
if sys.argv[i] == "--in":
inputDir = sys.argv[i+1]
i=i+1
elif sys.argv[i] == "--out":
outputDir = sys.argv[i+1]
i=i+1
elif sys.argv[i] == "--intervals":
intervalList = sys.argv[i+1].split(',')
intervals=[]
for j in range(0,len(intervalList),2):
intervals.append([intervalList[j],intervalList[j+1]])
i=i+1
elif sys.argv[i] == "--aws-s3-bucket":
s3Bucket = sys.argv[i+1]
awsParamCount=awsParamCount+1
i=i+1
elif sys.argv[i] == "--aws-access-key":
awsParamCount=awsParamCount+1
sc._jsc.hadoopConfiguration().set("fs.s3a.access.key", sys.argv[i+1])
i=i+1
elif sys.argv[i] == "--aws-secret-key":
awsParamCount=awsParamCount+1
sc._jsc.hadoopConfiguration().set("fs.s3a.secret.key", sys.argv[i+1])
i=i+1
elif sys.argv[i] == "--input-s3":
inputS3=useS3=True
elif sys.argv[i] == "--output-s3":
outputS3=useS3=True
else:
print("Invalid argument: "+sys.argv[i],file=sys.stderr)
help()
i=i+1
if inputDir == None or outputDir == None or intervals == None:
help()
if useS3 and awsParamCount != 3:
print("Missing some AWS S3 parameter!",file=sys.stderr)
help()
if useS3:
awsBucket = 's3a://'+s3Bucket
if inputS3:
if inputDir[0] == '/':
inputDir=inputDir[1:]
inputDir = os.path.join(awsBucket,inputDir)
else:
inputDir = 'file://'+inputDir
if outputS3:
if outputDir[0] == '/':
outputDir=outputDir[1:]
outputDir = os.path.join(awsBucket,outputDir)
else:
outputDir = 'file://'+outputDir
def save(data, name):
if isinstance(data, list):
df = sqlContext.createDataFrame(data)
else:
df = data
print(">>> Saving "+name)
outputFile = os.path.join(outputDir,name)
df.write \
.format('csv').mode('overwrite') \
.options(header='true') \
.save(outputFile)
if not outputS3:
Popen('hadoop fs -getmerge '+outputFile+' '+outputFile+'.csv && hadoop fs -rm -f -r '+outputFile,shell=True)
from pyspark.sql.types import *
logSchema = StructType([ \
StructField("user", StringType(), False), \
StructField("datetime", StringType(), False), \
StructField("db", StringType(), False), \
StructField("server", StringType(), False), \
StructField("command", StringType(), False), \
StructField("time", StringType(), False), \
StructField("size", StringType(), False) ])
functionsSchema = StructType([ \
StructField("function", StringType(), False), \
StructField("type", StringType(), False) ])
# Load command log file
awsLogFilepath = os.path.join(inputDir,'users.command.log')
df = sqlContext.read \
.format('csv') \
.options(header='false',delimiter='\t',mode='DROPMALFORMED') \
.load(awsLogFilepath, schema = logSchema).cache()
df.registerTempTable('aws_command_log')
# Load functions
awsLogFilepath = os.path.join(inputDir,'functions.txt')
df = sqlContext.read \
.format('csv') \
.options(header='false',delimiter=' ') \
.load(awsLogFilepath, schema = functionsSchema).cache()
df.registerTempTable('aws_function_types')
# Load user groups
awsLogFilepath = os.path.join(inputDir,'atav_users.csv')
df = sqlContext.read \
.format('csv') \
.options(header='true') \
.load(awsLogFilepath,inferSchema='true').cache()
df.registerTempTable('aws_user_groups')
# COMMAND ----------
# --- Auxiliary function declarations ---
from pyspark.sql import Row
# get functions to build dict() (python equivalent HashMap)
functionType = dict()
functions = sqlContext.sql("SELECT * FROM aws_function_types").collect()
for f in functions:
functionType[f.function] = f.type
# get user groups to build dict() (python equivalent HashMap)
userGroup = dict()
users = sqlContext.sql("SELECT * FROM aws_user_groups").collect()
for u in users:
userGroup[str(u.uni)] = str(u.group)
def get_user_group(user):
if user in userGroup:
return userGroup[user]
else:
return "n/a"
# extracts the function string from the whole command string
def extractFunction(x):
try:
words = str(x).split(" ")
for word in words:
if word in functionType:
return word
return "notfound"
except:
return "invalid"
# register function to be used in SparkSQL
from pyspark.sql.types import StringType
sqlContext.registerFunction("extract_function",extractFunction,StringType())
# checks if a given datetime string is inside a given interval
def date_in_interval(datetimestr,start,end):
try:
dateObj = parser.parse(str(datetimestr))
date = str(dateObj.date())
return (start <= date and date <= end)
except:
return False #probably an invalid format, so just return false
# register function to be used in SparkSQL
from pyspark.sql.types import BooleanType
sqlContext.registerFunction("date_in_interval",date_in_interval,BooleanType())
# extracts time in seconds from time string
def get_time(timestr):
try:
return int(timestr.split(" ")[0])
except:
return 0
# register function to be used in SparkSQL
from pyspark.sql.types import IntegerType
sqlContext.registerFunction("get_time",get_time,IntegerType())
timeframes = ["[0,10min[","[10min,1h[","[1h,6h[","[6h,12h[","[12h,1d[","[1d,2d[","[2d+"]
def get_timeframe(time):
if time < 10*60:
return 0
elif time < 3600:
return 1
elif time < 6*3600:
return 2
elif time < 12*3600:
return 3
elif time < 24*3600:
return 4
elif time < 2*24*3600:
return 5
else:
return 6
# Workaround to allow enum usage
def enum(l):
return zip(range(len(l)),l)
### Usage and time analysis
print(">>> Running function usage/runtime analysis")
# textFile = sc.textFile("/FileStore/tables/x27jm5321465339702253/users_command-ae26f.log")
functionUsageData = []
functionTimeData = sc.emptyRDD().cache()
functionCountData = [] # Used at further analysis
# functionCountData[<interval-id>][<function-name>] has the number of times such function was used in such interval
for inx,interval in enum(intervals):
#get DataFrame for the interval being analyzed
df = sqlContext.sql("SELECT user, extract_function(command) fun, get_time(time) time FROM aws_command_log") \
.where("date_in_interval(datetime,'"+interval[0]+"','"+interval[1]+"')") \
.drop("datetime")
# Workaround for Spark 2.0 preview
# Switch back to udf ASAP
# df = sqlContext.sql("SELECT * FROM aws_command_log")
# r3 = df.rdd.filter(lambda r: r.user not in userGroup)
# r = df.rdd.filter(lambda r: date_in_interval(r.datetime,interval[0],interval[1]))\
# .map(lambda r: Row(user=r.user,fun=extractFunction(r.command),time=get_time(r.time))).cache()
r = df.rdd
intervalstr = interval[0]+" to "+interval[1]
# Function Usage Analysis
functionUsagePairs = r.map(lambda l: (l.fun,1)).reduceByKey(lambda x,y : x+y)
functionCountData.append(functionUsagePairs.collectAsMap())
newRows = functionUsagePairs.map(lambda t: Row( series=intervalstr, function=t[0], value=t[1])).collect()
functionUsageData.extend(newRows)
# Timeframe Analysis
funTimeframePairs = r.map(lambda l: ((l.fun,get_timeframe(l.time),get_user_group(l.user)),(1,l.time))).reduceByKey(lambda x,y: (x[0]+y[0],x[1]+y[1]) )
newRows = funTimeframePairs.map(lambda t: \
Row(interval=intervalstr,interval_id=inx,timeframe_id=t[0][1],timeframe=timeframes[t[0][1]], \
function=t[0][0], count=t[1][0],cpu_time=t[1][1],group=t[0][2]))
functionTimeData = functionTimeData.union(newRows)
# CPU Time Analysis
cpuTimePairs = r.map(lambda l: (get_user_group(l.user),l.time)).reduceByKey(lambda x,y : x+y)
# Create DataFrame for easy ad-hoc analysis
functionTimeDF = functionTimeData.toDF()
save(functionUsageData,'function_usage_data')
# Check function timeframes for a specific interval
save(functionTimeDF.orderBy("timeframe_id"),'function_time_data')
l=cpuTimePairs.map(lambda t: (t[0],t[1]/3600)).collect()
l.sort()
l
### Options analysis
print(">>> Running options analysis")
def createFunOptPairs(command):
try:
fun = extractFunction(command)
words = str(command).split(" ")
l = []
for word in words:
if word[:2] == "--" and word != fun:
l.append((fun,word))
return l
except:
return []
# for each interval ...
funOptData = sc.emptyRDD().cache()
for inx,interval in enum(intervals):
# ... map-reduce function-options pairs
df = sqlContext.sql("SELECT * FROM aws_command_log")
r = df.rdd.filter(lambda r: date_in_interval(r.datetime,interval[0],interval[1])) \
.flatMap(lambda r : createFunOptPairs(r.command)) \
.map(lambda t : (t,1)).reduceByKey(lambda x,y : x+y) \
.sortBy(lambda tt : (tt[0][0],-tt[1])).cache()
intervalstr = interval[0]+" to "+interval[1]
# ... then generate rows for DF
r1 = r.map(lambda t: \
Row(interval=intervalstr, interval_id=inx, function_name=t[0][0], function_type=functionType[t[0][0]], \
option=t[0][1], value=(t[1]*100.0)/functionCountData[inx][t[0][0]]))
funOptData = funOptData.union(r1)
# Create DataFrame for easy ad-hoc analysis
funOptDF = funOptData.toDF()
save(funOptDF,'function_option_data')
``` |
{
"source": "jhouck/mne-python",
"score": 3
} |
#### File: tutorials/forward/20_source_alignment.py
```python
import os.path as op
import numpy as np
import nibabel as nib
from scipy import linalg
import mne
from mne.io.constants import FIFF
data_path = mne.datasets.sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
trans_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_raw-trans.fif')
raw = mne.io.read_raw_fif(raw_fname)
trans = mne.read_trans(trans_fname)
src = mne.read_source_spaces(op.join(subjects_dir, 'sample', 'bem',
'sample-oct-6-src.fif'))
# Load the T1 file and change the header information to the correct units
t1w = nib.load(op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz'))
t1w = nib.Nifti1Image(t1w.dataobj, t1w.affine)
t1w.header['xyzt_units'] = np.array(10, dtype='uint8')
t1_mgh = nib.MGHImage(t1w.dataobj, t1w.affine)
# %%
# .. raw:: html
#
# <style>
# .pink {color:DarkSalmon; font-weight:bold}
# .blue {color:DeepSkyBlue; font-weight:bold}
# .gray {color:Gray; font-weight:bold}
# .magenta {color:Magenta; font-weight:bold}
# .purple {color:Indigo; font-weight:bold}
# .green {color:LimeGreen; font-weight:bold}
# .red {color:Red; font-weight:bold}
# </style>
#
# .. role:: pink
# .. role:: blue
# .. role:: gray
# .. role:: magenta
# .. role:: purple
# .. role:: green
# .. role:: red
#
#
# Understanding coordinate frames
# -------------------------------
# For M/EEG source imaging, there are three **coordinate frames** must be
# brought into alignment using two 3D `transformation matrices <wiki_xform_>`_
# that define how to rotate and translate points in one coordinate frame
# to their equivalent locations in another. The three main coordinate frames
# are:
#
# * :blue:`"meg"`: the coordinate frame for the physical locations of MEG
# sensors
# * :gray:`"mri"`: the coordinate frame for MRI images, and scalp/skull/brain
# surfaces derived from the MRI images
# * :pink:`"head"`: the coordinate frame for digitized sensor locations and
# scalp landmarks ("fiducials")
#
#
# Each of these are described in more detail in the next section.
#
# A good way to start visualizing these coordinate frames is to use the
# `mne.viz.plot_alignment` function, which is used for creating or inspecting
# the transformations that bring these coordinate frames into alignment, and
# displaying the resulting alignment of EEG sensors, MEG sensors, brain
# sources, and conductor models. If you provide ``subjects_dir`` and
# ``subject`` parameters, the function automatically loads the subject's
# Freesurfer MRI surfaces. Important for our purposes, passing
# ``show_axes=True`` to `~mne.viz.plot_alignment` will draw the origin of each
# coordinate frame in a different color, with axes indicated by different sized
# arrows:
#
# * shortest arrow: (**R**)ight / X
# * medium arrow: forward / (**A**)nterior / Y
# * longest arrow: up / (**S**)uperior / Z
#
# Note that all three coordinate systems are **RAS** coordinate frames and
# hence are also `right-handed`_ coordinate systems. Finally, note that the
# ``coord_frame`` parameter sets which coordinate frame the camera
# should initially be aligned with. Let's have a look:
fig = mne.viz.plot_alignment(raw.info, trans=trans, subject='sample',
subjects_dir=subjects_dir, surfaces='head-dense',
show_axes=True, dig=True, eeg=[], meg='sensors',
coord_frame='meg', mri_fiducials='estimated')
mne.viz.set_3d_view(fig, 45, 90, distance=0.6, focalpoint=(0., 0., 0.))
print('Distance from head origin to MEG origin: %0.1f mm'
% (1000 * np.linalg.norm(raw.info['dev_head_t']['trans'][:3, 3])))
print('Distance from head origin to MRI origin: %0.1f mm'
% (1000 * np.linalg.norm(trans['trans'][:3, 3])))
dists = mne.dig_mri_distances(raw.info, trans, 'sample',
subjects_dir=subjects_dir)
print('Distance from %s digitized points to head surface: %0.1f mm'
% (len(dists), 1000 * np.mean(dists)))
# %%
# Coordinate frame definitions
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# 1. Neuromag/Elekta/MEGIN head coordinate frame ("head", :pink:`pink axes`)
# The head coordinate frame is defined through the coordinates of
# anatomical landmarks on the subject's head: usually the Nasion (`NAS`_),
# and the left and right preauricular points (`LPA`_ and `RPA`_).
# Different MEG manufacturers may have different definitions of the head
# coordinate frame. A good overview can be seen in the
# `FieldTrip FAQ on coordinate systems`_.
#
# For Neuromag/Elekta/MEGIN, the head coordinate frame is defined by the
# intersection of
#
# 1. the line between the LPA (:red:`red sphere`) and RPA
# (:purple:`purple sphere`), and
# 2. the line perpendicular to this LPA-RPA line one that goes through
# the Nasion (:green:`green sphere`).
#
# The axes are oriented as **X** originโRPA, **Y** originโNAS,
# **Z** originโupward (orthogonal to X and Y).
#
# .. note:: The required 3D coordinates for defining the head coordinate
# frame (NAS, LPA, RPA) are measured at a stage separate from
# the MEG data recording. There exist numerous devices to
# perform such measurements, usually called "digitizers". For
# example, see the devices by the company `Polhemus`_.
#
# 2. MEG device coordinate frame ("meg", :blue:`blue axes`)
# The MEG device coordinate frame is defined by the respective MEG
# manufacturers. All MEG data is acquired with respect to this coordinate
# frame. To account for the anatomy and position of the subject's head, we
# use so-called head position indicator (HPI) coils. The HPI coils are
# placed at known locations on the scalp of the subject and emit
# high-frequency magnetic fields used to coregister the head coordinate
# frame with the device coordinate frame.
#
# From the Neuromag/Elekta/MEGIN user manual:
#
# The origin of the device coordinate system is located at the center
# of the posterior spherical section of the helmet with X axis going
# from left to right and Y axis pointing front. The Z axis is, again
# normal to the plane with positive direction up.
#
# .. note:: The HPI coils are shown as :magenta:`magenta spheres`.
# Coregistration happens at the beginning of the recording and
# the headโmeg transformation matrix is stored in
# ``raw.info['dev_head_t']``.
#
# 3. MRI coordinate frame ("mri", :gray:`gray axes`)
# Defined by Freesurfer, the "MRI surface RAS" coordinate frame has its
# origin at the center of a 256ร256ร256 1mm anisotropic volume (though the
# center may not correspond to the anatomical center of the subject's
# head).
#
# .. note:: We typically align the MRI coordinate frame to the head
# coordinate frame through a
# `rotation and translation matrix <wiki_xform_>`_,
# that we refer to in MNE as ``trans``.
#
# A bad example
# ^^^^^^^^^^^^^
# Let's try using `~mne.viz.plot_alignment` with ``trans=None``, which
# (incorrectly!) equates the MRI and head coordinate frames.
mne.viz.plot_alignment(raw.info, trans=None, subject='sample', src=src,
subjects_dir=subjects_dir, dig=True,
surfaces=['head-dense', 'white'], coord_frame='meg')
# %%
# A good example
# ^^^^^^^^^^^^^^
# Here is the same plot, this time with the ``trans`` properly defined
# (using a precomputed transformation matrix).
mne.viz.plot_alignment(raw.info, trans=trans, subject='sample',
src=src, subjects_dir=subjects_dir, dig=True,
surfaces=['head-dense', 'white'], coord_frame='meg')
# %%
# Visualizing the transformations
# -------------------------------
# Let's visualize these coordinate frames using just the scalp surface; this
# will make it easier to see their relative orientations. To do this we'll
# first load the Freesurfer scalp surface, then apply a few different
# transforms to it. In addition to the three coordinate frames discussed above,
# we'll also show the "mri_voxel" coordinate frame. Unlike MRI Surface RAS,
# "mri_voxel" has its origin in the corner of the volume (the left-most,
# posterior-most coordinate on the inferior-most MRI slice) instead of at the
# center of the volume. "mri_voxel" is also **not** an RAS coordinate system:
# rather, its XYZ directions are based on the acquisition order of the T1 image
# slices.
# The head surface is stored in "mri" coordinate frame
# (origin at center of volume, units=mm)
seghead_rr, seghead_tri = mne.read_surface(
op.join(subjects_dir, 'sample', 'surf', 'lh.seghead'))
# To put the scalp in the "head" coordinate frame, we apply the inverse of
# the precomputed `trans` (which maps head โ mri)
mri_to_head = linalg.inv(trans['trans'])
scalp_pts_in_head_coord = mne.transforms.apply_trans(
mri_to_head, seghead_rr, move=True)
# To put the scalp in the "meg" coordinate frame, we use the inverse of
# raw.info['dev_head_t']
head_to_meg = linalg.inv(raw.info['dev_head_t']['trans'])
scalp_pts_in_meg_coord = mne.transforms.apply_trans(
head_to_meg, scalp_pts_in_head_coord, move=True)
# The "mri_voxel"โ"mri" transform is embedded in the header of the T1 image
# file. We'll invert it and then apply it to the original `seghead_rr` points.
# No unit conversion necessary: this transform expects mm and the scalp surface
# is defined in mm.
vox_to_mri = t1_mgh.header.get_vox2ras_tkr()
mri_to_vox = linalg.inv(vox_to_mri)
scalp_points_in_vox = mne.transforms.apply_trans(
mri_to_vox, seghead_rr, move=True)
# %%
# Now that we've transformed all the points, let's plot them. We'll use the
# same colors used by `~mne.viz.plot_alignment` and use :green:`green` for the
# "mri_voxel" coordinate frame:
def add_head(renderer, points, color, opacity=0.95):
renderer.mesh(*points.T, triangles=seghead_tri, color=color,
opacity=opacity)
renderer = mne.viz.backends.renderer.create_3d_figure(
size=(600, 600), bgcolor='w', scene=False)
add_head(renderer, seghead_rr, 'gray')
add_head(renderer, scalp_pts_in_meg_coord, 'blue')
add_head(renderer, scalp_pts_in_head_coord, 'pink')
add_head(renderer, scalp_points_in_vox, 'green')
mne.viz.set_3d_view(figure=renderer.figure, distance=800,
focalpoint=(0., 30., 30.), elevation=105, azimuth=180)
renderer.show()
# %%
# The relative orientations of the coordinate frames can be inferred by
# observing the direction of the subject's nose. Notice also how the origin of
# the :green:`mri_voxel` coordinate frame is in the corner of the volume
# (above, behind, and to the left of the subject), whereas the other three
# coordinate frames have their origin roughly in the center of the head.
#
# Example: MRI defacing
# ^^^^^^^^^^^^^^^^^^^^^
# For a real-world example of using these transforms, consider the task of
# defacing the MRI to preserve subject anonymity. If you know the points in
# the "head" coordinate frame (as you might if you're basing the defacing on
# digitized points) you would need to transform them into "mri" or "mri_voxel"
# in order to apply the blurring or smoothing operations to the MRI surfaces or
# images. Here's what that would look like (we'll use the nasion landmark as a
# representative example):
# Get the nasion:
nasion = [p for p in raw.info['dig'] if
p['kind'] == FIFF.FIFFV_POINT_CARDINAL and
p['ident'] == FIFF.FIFFV_POINT_NASION][0]
assert nasion['coord_frame'] == FIFF.FIFFV_COORD_HEAD
nasion = nasion['r'] # get just the XYZ values
# Transform it from head to MRI space (recall that `trans` is head โ mri)
nasion_mri = mne.transforms.apply_trans(trans, nasion, move=True)
# Then transform to voxel space, after converting from meters to millimeters
nasion_vox = mne.transforms.apply_trans(
mri_to_vox, nasion_mri * 1e3, move=True)
# Plot it to make sure the transforms worked
renderer = mne.viz.backends.renderer.create_3d_figure(
size=(400, 400), bgcolor='w', scene=False)
add_head(renderer, scalp_points_in_vox, 'green', opacity=1)
renderer.sphere(center=nasion_vox, color='orange', scale=10)
mne.viz.set_3d_view(figure=renderer.figure, distance=600.,
focalpoint=(0., 125., 250.), elevation=45, azimuth=180)
renderer.show()
# %%
# Defining the headโMRI ``trans`` using the GUI
# ---------------------------------------------
# You can try creating the headโMRI transform yourself using
# :func:`mne.gui.coregistration`.
#
# * First you must load the digitization data from the raw file
# (``Head Shape Source``). The MRI data is already loaded if you provide the
# ``subject`` and ``subjects_dir``. Toggle ``Always Show Head Points`` to see
# the digitization points.
# * To set the landmarks, toggle ``Edit`` radio button in ``MRI Fiducials``.
# * Set the landmarks by clicking the radio button (LPA, Nasion, RPA) and then
# clicking the corresponding point in the image.
# * After doing this for all the landmarks, toggle ``Lock`` radio button. You
# can omit outlier points, so that they don't interfere with the finetuning.
#
# .. note:: You can save the fiducials to a file and pass
# ``mri_fiducials=True`` to plot them in
# :func:`mne.viz.plot_alignment`. The fiducials are saved to the
# subject's bem folder by default.
# * Click ``Fit Head Shape``. This will align the digitization points to the
# head surface. Sometimes the fitting algorithm doesn't find the correct
# alignment immediately. You can try first fitting using LPA/RPA or fiducials
# and then align according to the digitization. You can also finetune
# manually with the controls on the right side of the panel.
# * Click ``Save As...`` (lower right corner of the panel), set the filename
# and read it with :func:`mne.read_trans`.
#
# For more information, see step by step instructions
# `in these slides
# <https://www.slideshare.net/mne-python/mnepython-coregistration>`_.
# Uncomment the following line to align the data yourself.
# mne.gui.coregistration(subject='sample', subjects_dir=subjects_dir)
# %%
# .. _plot_source_alignment_without_mri:
#
# Alignment without MRI
# ---------------------
# The surface alignments above are possible if you have the surfaces available
# from Freesurfer. :func:`mne.viz.plot_alignment` automatically searches for
# the correct surfaces from the provided ``subjects_dir``. Another option is
# to use a :ref:`spherical conductor model <eeg_sphere_model>`. It is
# passed through ``bem`` parameter.
sphere = mne.make_sphere_model(info=raw.info, r0='auto', head_radius='auto')
src = mne.setup_volume_source_space(sphere=sphere, pos=10.)
mne.viz.plot_alignment(
raw.info, eeg='projected', bem=sphere, src=src, dig=True,
surfaces=['brain', 'inner_skull', 'outer_skull', 'outer_skin'],
coord_frame='meg', show_axes=True)
# %%
# It is also possible to use :func:`mne.gui.coregistration`
# to warp a subject (usually ``fsaverage``) to subject digitization data, see
# `these slides
# <https://www.slideshare.net/mne-python/mnepython-scale-mri>`_.
#
# .. _right-handed: https://en.wikipedia.org/wiki/Right-hand_rule
# .. _wiki_xform: https://en.wikipedia.org/wiki/Transformation_matrix
# .. _NAS: https://en.wikipedia.org/wiki/Nasion
# .. _LPA: http://www.fieldtriptoolbox.org/faq/how_are_the_lpa_and_rpa_points_defined/ # noqa:E501
# .. _RPA: http://www.fieldtriptoolbox.org/faq/how_are_the_lpa_and_rpa_points_defined/ # noqa:E501
# .. _Polhemus: https://polhemus.com/scanning-digitizing/digitizing-products/
# .. _FieldTrip FAQ on coordinate systems: http://www.fieldtriptoolbox.org/faq/how_are_the_different_head_and_mri_coordinate_systems_defined/ # noqa:E501
``` |
{
"source": "jhoupt/adaptiveSFT",
"score": 2
} |
#### File: jhoupt/adaptiveSFT/adaptive_sft2.py
```python
import numpy as np
import pystan
import os.path
import pickle
from scipy.stats import lognorm
#########################
### Helper functions ###
#########################
def get_pr(intensity, target, range, posterior_samples, log=None) :
x = ((posterior_samples['intensity'] * posterior_samples['alpha']
+ posterior_samples['intensity']^2 * posterior_samples['alpha2'])
/ posterior_samples['mu'])
pr = np.mean(np.logical_and(x < target+range, x > target-range))
if log is None or log is False :
return(pr)
else :
return(np.log(pr))
######
def lognormalrace_pdf(x, m, psi, mu, sigmasq, log=None) :
sigma = np.sqrt(sigmasq)
g = lognorm.logpdf(x-psi, loc=mu[m], s=sigma[m], scale=np.exp(mu[m]))
G = 0
for i in range(allchannels) :
if i == m :
continue
else :
G = G + lognorm.logsf(x-psi, loc=mu[i], s=sigma[i],
scale=np.exp(mu[i]))
if log is None or log is False :
rval = np.exp(g + G)
rval[x<psi] = 0
else :
rval = g + G
rval[x < psi] = -np.inf
return(rval)
#######
#plognormalrace <- function(x, m, psi, mu, sigmasq) {
# px <- rep(NA, length(x))
# for(j in 1:length(x)) {
# px[j] <- lnrm_adjusted_integral(x[j], m, psi, mu, sigmasq, x[j+1]-x[j])
# }
# return(px)
#}
#######
#lnrm_adjusted_integral <- function(x, m, psi, mu, sigmasq, stepsize) {
# tryCatch({
# f <- integrate(dlognormalrace, lower=0, upper=x, m=m, psi=psi,
# mu=mu, sigmasq=sigmasq)$value
# }, error = function(e1) {
# tryCatch({
# f <- integrate(dlognormalrace, lower=0, upper=x+stepsize/2, m=m,
# psi=psi, mu=mu, sigmasq=sigmasqx)$value
# }, error = function(e2) {
# if (dlognormalrace(x-stepsize, m, psi, mu, sigmasq) == 0) {
# ff <- integrate(dlognormalrace, lower=0, upper=x+stepsize, m=2,
# psi=psi, mu=mu, sigmasq=sigmasq)$value
# ff <- ff/2
# } else {
# ff <- NaN
# }
# return(ff)
# })
# })
# return(f)
#}
#######
#dfp_ddm <- function(N, drift.1, drift.2, a, ter, sdv, architecture,
# stopping.rule, pmix=.5) {
## Function to generate rt and accuracy from DDM in DFP
#
# if (architecture == "COA") {
# channel12 <- simdiffT(N,a,drift.1+drift.2,sdv,ter)
# rt <- channel12$rt
# cr <- channel12$x
# } else {
# channel1 <- simdiffT(N,a,drift.1,sdv,ter)
# channel2 <- simdiffT(N,a,drift.2,sdv,ter)
# if (architecture == "PAR") {
# if (stopping.rule == "OR") {
# rt <- pmin(channel1$rt, channel2$rt)
# cr <- channel2$x
# cr[channel1$rt < channel2$rt] <-
# channel1$x[channel1$rt < channel2$rt]
# } else if (stopping.rule == "AND") {
# rt <- pmax(channel1$rt, channel2$rt)
# cr <- channel1$x & channel2$x
# }
# } else if (architecture == "SER") {
# if (stopping.rule == "OR") {
# channel.samp <- runif(N) < pmix
# rt <- channel2$rt
# rt[channel.samp] <- channel1$rt[channel.samp]
# cr <- channel2$x
# cr[channel.samp] <- channel1$x[channel.samp]
# } else if (stopping.rule == "AND") {
# rt <- channel1$rt + channel2$rt
# cr <- channel1$x & channel2$x
# }
# }
# }
# return(list(rt=rt, x=1*cr))
#}
######
#moc_ddm <- function(N, a, v, ter, sdv, intensity_levels) {
## Function to generate method of constant stimuli data from DDM
# intensity <- c()
# correct <- c()
# rt <- c()
# for ( i in intensity_levels ) {
# x <- simdiffT(N,a,i*v,sdv,ter)
# intensity <- c(intensity, rep(i, N))
# correct <- c(correct, x$x)
# rt <- c(rt, x$rt)
# }
# return(data.frame(intensity=intensity, rt=rt, correct=correct))
#}
######
#dataframe2stan <- function(dat) {
## Reformat data for Stan
# standat <- with(dat, list(N=dim(dat)[1], intensity=intensity,
# correct=correct, minRT=min(rt), rt=rt) )
# return(standat)
#}
#import pickle
#with open("temp_data.p", "rb") as f:
# mydata = pickle.load(f)
#
#####
def find_salience(dat, h_targ, l_targ, fit_model = None):
# dat is a dictionary with:
# 'N': total number of observations
# intensity[]: length N array-like containing stimulus intensity on
# each trial
# correct[]: length N array-like containing indicator of correct
# on each trial
# minRT: smallest observed RT
# rt[]: length N array-like containing response time on each trial
#
from scipy.special import logit
ML = False
if fit_model is None :
# Uncomment for quadratic
#init_dict = {'alpha': -.1, 'alpha2': 0, 'mu': 1.5,
# 'psi': .1*dat['minRT'], 'varZ': 1}
init_dict = {'slope': .1, 'midpoint': .5, 'mu': 1.5,
'psi': .1*dat['minRT'], 'varZ': 1}
if os.path.isfile("compiled_model.p"):
with open("compiled_model.p", "rb") as f:
sm = pickle.load(f)
else:
sm = pystan.StanModel(file="lnrm2a.stan")
#sm = pystan.StanModel(file="lnrm2.stan")
with open("compiled_model.p", "wb") as f:
pickle.dump(sm, f)
if not ML :
fit_model = sm.sampling(data=dat, init=[init_dict, init_dict,
init_dict, init_dict])
if not ML:
post_diff = fit_model.extract(pars=["mu", "slope", "midpoint", "psi",
"varZ"])
else :
post_diff = sm.optimizing(data=dat, init=init_dict)
slope = post_diff['slope']
midpoint = post_diff['midpoint']
l_targ_dist = logit(l_targ / 10.) / slope + midpoint
h_targ_dist = logit(h_targ / 10.) / slope + midpoint
rval = {}
if not ML:
rval['high'] = np.nanmean(h_targ_dist)
rval['high_var'] = np.var(h_targ_dist)
rval['low'] = np.nanmean(l_targ_dist)
rval['low_var'] = np.var(l_targ_dist)
rval['fit'] = fit_model
else :
rval['high'] = h_targ_dist
rval['low'] = l_targ_dist
rval['fit'] = post_diff
return(rval)
``` |
{
"source": "jhouser/houseoffun",
"score": 2
} |
#### File: app/migrations/0022_plugin_default_enabled.py
```python
from django.db import migrations, models
from api.app.models import Plugin
def update_threads_plugin(apps, schema_editor):
"""
"""
plugin = Plugin.objects.get(name="Threads")
plugin.default_enabled = True
plugin.save()
class Migration(migrations.Migration):
dependencies = [
('app', '0021_markdown_fiels_20180208_1947'),
]
operations = [
migrations.AddField(
model_name='plugin',
name='default_enabled',
field=models.BooleanField(default=False),
),
migrations.RunPython(update_threads_plugin),
]
```
#### File: unit/games/TestGames.py
```python
from django.test import TestCase
from api.app.models.games import *
class GamesTest(TestCase):
"""
Test model functionality for the Games model
"""
def setUp(self):
self.user = User.objects.create_user(username='test_user', email='<EMAIL>', password='<PASSWORD>')
self.game = Game.objects.create(
name='Sample of Fun',
abbreviation='SoF',
description='This is a sample game!',
game_master=self.user
)
def test_advance_to_registration(self):
# Draft progresses to registration, but has no other effects
self.assertEqual(Game.DRAFT, self.game.status)
self.game.next_status()
self.assertEqual(Game.REGISTRATION, self.game.status)
def test_revert_draft_fails(self):
# Draft cannot be reverted to a previous state
self.assertEqual(Game.DRAFT, self.game.status)
self.game.previous_status()
self.assertEqual(Game.DRAFT, self.game.status)
def test_revert_to_draft(self):
# Reverting should reset to DRAFT status and destroy all signups
self.game.status = Game.REGISTRATION
# This is technically invalid, a user shouldn't be able to sign up for their own game
signup = GameSignup.objects.create(user=self.user, game=self.game)
self.game.previous_status()
self.assertEqual(Game.DRAFT, self.game.status)
signup = GameSignup.objects.filter(pk=signup.id).first()
self.assertIsNone(signup)
def test_advance_to_pending(self):
# Advance to pending should only work if all registrations have been handled and should create characters
self.game.status = Game.REGISTRATION
signup = GameSignup.objects.create(user=self.user, game=self.game)
with self.assertRaises(ValidationError):
self.game.next_status()
signup.status = GameSignup.ACCEPTED
signup.save()
self.game.next_status()
self.assertEqual(Game.PENDING, self.game.status)
character = Character.objects.filter(game=self.game, owner=self.user).first()
self.assertIsNotNone(character)
def test_revert_to_registration(self):
# Reverting to registration should mark all characters deleted
self.game.status = Game.PENDING
character = Character.objects.create(game=self.game, owner=self.user)
self.game.previous_status()
character.refresh_from_db()
self.assertEqual(Game.REGISTRATION, self.game.status)
self.assertEqual(Character.DELETED, character.status)
def test_advance_to_running(self):
# Advancing to running should only work if all player characters have been approved
self.game.status = Game.PENDING
signup = GameSignup.objects.create(user=self.user, game=self.game)
signup.status = GameSignup.ACCEPTED
signup.save()
character = Character.objects.create(game=self.game, owner=self.user)
with self.assertRaises(ValidationError):
self.game.next_status()
character.status = Character.FINISHED
character.save()
self.game.next_status()
self.assertEqual(Game.RUNNING, self.game.status)
def test_get_players(self):
# Get players should return any user who signed up and was accepted
self.assertFalse(self.game.get_players())
signup = GameSignup.objects.create(user=self.user, game=self.game)
signup.status = GameSignup.REJECTED
signup.save()
self.assertFalse(self.game.get_players())
signup.status = GameSignup.ACCEPTED
signup.save()
self.assertIn(signup.user.id, self.game.get_players())
``` |
{
"source": "jhover/cafa4",
"score": 2
} |
#### File: cafa4/cafa4/quickgo.py
```python
import configparser
import logging
import requests
import sys
from io import StringIO
import pandas as pd
import pdpipe as pdp
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import subprocess
from bioservices.uniprot import QuickGo # to query online REST interface
from Bio import SeqIO # to parse uniprot.dat
class QuickGOPlugin(object):
'''
GENEPRODUCTDB GENEPRODUCTID SYMBOL QUALIFIER GOTERM GOASPECT ECO ID GOEVCODE REFERENCE WITH/FROM TAXONID ASSIGNEDBY ANNOTATIONEXTENSIONDATE
UniProtKB A4K2U9 YWHAB involved_in GO:0045744 P ECO:0000250 ISS GO_REF:0000024 UniProtKB:P31946 9601 UniProt 20160330
db proteinid gene goqual goterm goaspect ecoid goevidence goref withfrom taxonid assignby extdate
'''
def __init__(self, config):
self.log = logging.getLogger('QuickGOPlugin')
self.requestbase = "https://www.ebi.ac.uk/QuickGO/services/annotation"
self.qg = QuickGo()
self.config = config
def get_df(self,dataframe):
'''
Takes
Returns Pandas DataFrame
'''
entries = dataframe['tacc'].unique().tolist()
txt = self._query_entries(entries)
def _query_entries(self, entrylist):
self.log.debug("querying entry list: %s" % entrylist)
entrystr=','.join(entrylist)
self.log.debug("querying entry string: %s" % entrystr)
requestURL = "%s/downloadSearch?geneProductId=%s" % (self.requestbase, entrystr )
self.log.debug("RequestURL=%s"% requestURL )
r = requests.get(requestURL, headers={ "Accept" : "text/tsv"})
if not r.ok:
r.raise_for_status()
#sys.exit()
response = r.text
self.log.debug("response=%s" % response)
return response
def _query_taxon(self, taxon):
self.log.debug("querying taxon: %s" % taxon)
requestURL = "%s/downloadSearch?taxonId=%s" % (self.requestbase, taxon )
self.log.debug("RequestURL=%s"% requestURL )
r = requests.get(requestURL, headers={ "Accept" : "text/tsv"})
if not r.ok:
r.raise_for_status()
#sys.exit()
response = r.text
self.log.debug("response=%s" % response)
return response
def _query_bioservices_taxon(self, taxon):
pass
if __name__ == '__main__':
config = configparser.ConfigParser()
qg = QuickGOPlugin(config)
taxon='4577'
# entrylist = ['Q9CQV8', 'P35213', 'A4K2U9', 'P31946', 'Q4R572', 'P68250']
# out = qg._query_entries(entrylist)
out = qg._query_taxon(taxon)
sio = StringIO(out)
df = pd.read_table(sio,
names=['db','proteinid','gene','goqual','goterm','goaspect','ecoid',
'goevidence','goref','withfrom','taxonid','assignby','extdate' ],
skip_blank_lines=True,
comment='#',
skiprows=1,
index_col=False,
)
df.to_csv('%s.quickgo.csv' % taxon)
print(df)
``` |
{
"source": "jhover/cshl-work",
"score": 3
} |
#### File: cshl-work/cshlwork/genlatinlist.py
```python
import argparse
import logging
import traceback
import sys
def parse_latinnames(infile):
try:
filehandle = open(infile, 'r')
for line in filehandle:
logging.debug(f"handling line {line}")
flist = line.split(",")
idx = flist[0]
tcode = flist[1]
kingdom = flist[2]
taxid = flist[3]
rawname = flist[4]
commonname = flist[5]
sflist = rawname.split()
if len(sflist) < 2:
pass
elif len(sflist) > 2:
pass
else:
fixed = rawname
logging.debug(f"raw: {rawname}")
logging.debug(f"fixed: {fixed} ")
print(fixed)
except Exception as e:
traceback.print_exc(file=sys.stdout)
finally:
if filehandle is not None:
filehandle.close()
logging.debug("done")
if __name__ == '__main__':
FORMAT='%(asctime)s (UTC) [ %(levelname)s ] %(filename)s:%(lineno)d %(name)s.%(funcName)s(): %(message)s'
logging.basicConfig(format=FORMAT)
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug',
action="store_true",
dest='debug',
help='debug logging')
parser.add_argument('-v', '--verbose',
action="store_true",
dest='verbose',
help='verbose logging')
parser.add_argument('infile',
metavar='infile',
type=str,
help='Pandas .CSV')
args= parser.parse_args()
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
if args.verbose:
logging.getLogger().setLevel(logging.INFO)
logging.info("dendrotool...")
parse_latinnames(args.infile)
```
#### File: cshl-work/cshlwork/gotool.py
```python
import argparse
from configparser import ConfigParser
import logging
import os
import sys
gitpath=os.path.expanduser("~/git/cshlwork")
sys.path.append(gitpath)
from cshlwork.ontology import GeneOntology, GOMatrix
OBOFILE=os.path.expanduser('~/data/go/go.obo')
GAFFILE=os.path.expanduser('~/data/go/zfin.gaf')
CONFFILE=os.path.expanduser('~/git/cshl-work/etc/gotool.conf')
def gaf_to_df():
"""
db dbobji dbobjsym goterm dbref goevidence withfrom goaspect dbobjnam dbobjtype taxonid date assignedby annotext geneprodid
ZFIN ZDB-GENE-070410-141 zgc:163098 GO:0003723 ZFIN:ZDB-PUB-170525-1 IEA UniRule:UR000414619 F zgc:163098 protein taxon:7955 20190914 UniProt UniProtKB:A0A2R8QLY
"""
pass
def gocat(gotermlist):
filehandle = open(GOFILE, 'r')
lines = filehandle.readlines()
print("read in %d lines" % len(lines))
print("got arg %s" % gotermlist)
for gt in gotermlist:
found = False
for line in lines:
if line.startswith("id: %s" % gt):
found = True
print('[Term]')
#print(line.strip())
if line.startswith("[Term]"):
found = False
if found and not line.startswith("[Term]"):
print(line.strip())
if __name__ == '__main__':
FORMAT='%(asctime)s (UTC) [ %(levelname)s ] %(filename)s:%(lineno)d %(name)s.%(funcName)s(): %(message)s'
logging.basicConfig(format=FORMAT)
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug',
action="store_true",
dest='debug',
help='debug logging')
parser.add_argument('-v', '--verbose',
action="store_true",
dest='verbose',
help='verbose logging')
parser.add_argument('-b', '--obofile',
action="store",
dest='obofile',
default=OBOFILE,
help='Gene ontology OBO file.')
parser.add_argument('-g', '--gaffile',
action="store",
dest='gaffile',
default=GAFFILE,
help='GAF file. ')
parser.add_argument('-o', '--outfile',
action="store",
dest='outfile',
default='genegomatrix.csv',
help='Binary matrix file. ')
parser.add_argument('-c', '--config',
action="store",
dest='conffile',
default=CONFFILE,
help='Config file path [~/etc/gotool.conf]')
parser.add_argument('goterms',
metavar='goterms',
type=str,
help='one or more space-separated goterms GO:XXXXXXXX' ,
nargs='*'
)
args= parser.parse_args()
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
if args.verbose:
logging.getLogger().setLevel(logging.INFO)
cp = ConfigParser()
cp.read(args.conffile)
#gocat(args.goterms)
```
#### File: cshl-work/cshlwork/phylotool.py
```python
import argparse
import itertools
import logging
import os
from collections import OrderedDict
from Bio import Phylo
import numpy as np
import pandas as pd
#pd.set_option("display.max_rows", 15)
#pd.set_option("display.max_columns", 15)
class Phylogeny(object):
def __init__(self):
self.log = logging.getLogger(self.__class__.__name__)
self.tree = None
self.distmatrix = None
self.df = None
self.filepath = None
def __repr__(self):
pass
def parsefile(self, filepath):
"""
Reads NHX format file,...
"""
self.log.debug("Reading file %s" % filepath)
self.filepath = filepath
self.tree = Phylo.read(filepath, 'newick')
self.log.debug("tree is %s" % self.tree )
#print(tree)
def get_distance_matrix(self):
"""
We want a list of row/column names, and a corresponding 2D matrix
names = ['75743','137246','7950']
data =
Note: Takes about ~2 seconds with 119 terminals on a 2019 Macbook Pro
Takes about ~38 seconds with 459 terminals on a 2019 Macbook Pro
Takes about ~7 minutes with 876 terminals on a 2019 Macbook Pro
"""
self.distmatrix = OrderedDict()
terminals = self.tree.get_terminals()
terminals.sort(key=lambda x: x.name, reverse=True)
mdim = len(terminals)
self.log.debug("%d terminals.." % mdim)
i = 0
for x,y in itertools.combinations_with_replacement(terminals, 2):
if i % 1000 == 0:
self.log.debug("Handling combination %d" % i)
v = self.tree.distance(x, y)
self.distmatrix[x.name] = self.distmatrix.get(x.name, OrderedDict())
self.distmatrix[x.name][y.name] = v
self.distmatrix[y.name] = self.distmatrix.get(y.name, OrderedDict())
self.distmatrix[y.name][x.name] = v
i += 1
self.log.debug("Done computing distances. Filling diagonal...")
for x in terminals:
self.distmatrix[x.name][x.name] = 0
self.log.debug(self.distmatrix)
colnames = list(self.distmatrix.keys())
return ( colnames, self.distmatrix )
def to_df(self):
#(allclades, distmatrix) = self.to_distance_matrix()
#df = pd.DataFrame(data = distmatrix,
# index = allclades,
# columns = allclades)
csvpath = "%s.csv" % self.filepath
if os.path.exists(csvpath):
self.df = pd.read_csv(csvpath, sep='\t', index_col=0)
else:
if self.distmatrix is not None:
self.log.debug("Found completed distmatrix. Converting...")
else:
self.log.debug("No distmatrix found. Computing...")
self.get_distance_matrix()
self.df = pd.DataFrame(self.distmatrix, columns = self.distmatrix.keys())
return self.df
def to_csv(self):
if self.df is not None:
self.log.debug("DataFrame found. Using...")
else:
self.log.debug("No DataFrame found. Computing...")
self.to_df()
self.df.to_csv("%s.csv" % self.filepath)
if __name__ == '__main__':
FORMAT='%(asctime)s (UTC) [ %(levelname)s ] %(filename)s:%(lineno)d %(name)s.%(funcName)s(): %(message)s'
logging.basicConfig(format=FORMAT)
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug',
action="store_true",
dest='debug',
help='debug logging')
parser.add_argument('-v', '--verbose',
action="store_true",
dest='verbose',
help='verbose logging')
parser.add_argument('infile',
metavar='infile',
type=str,
help='a phylegeny file NHX')
parser.add_argument('-c', '--config',
action="store",
dest='conffile',
default='~/etc/phylo.conf',
help='Config file path [~/etc/phylo.conf]')
args= parser.parse_args()
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
if args.verbose:
logging.getLogger().setLevel(logging.INFO)
#cp = ConfigParser()
#cp.read(args.conffile)
p = Phylogeny()
p.parsefile(args.infile)
#(terminals, matrix) = p.get_distance_matrix()
#print(terminals)
df = p.to_df()
print(df)
p.to_csv()
```
#### File: cshl-work/project/make_genename_uniprotid_lookup.py
```python
import argparse
import logging
import os
import sys
import pandas as pd
'''
Take in trembl fasta file and create Pandas-compatible tsv of accession, uniprotid, genename
'''
def parse_uniprot_fasta(filename):
f = open(filename, 'r')
lod = []
nlines = 0
for line in f:
nlines += 1
if nlines % 10000 == 0:
logging.debug(f"processed {nlines} lines...")
if line.startswith('>'):
d = {}
# >tr|A0A1S3RID5|A0A1S3RID5_SALSA ras and Rab interactor 2-like OS=Salmo salar OX=8030 GN=LOC106602976 PE=4 SV=1
fields = line.split()
(db, acc, uid) = fields[0][1:].split('|')
d['db'] = db
d['acc'] = acc
d['uid'] = uid
#logging.debug(f"db={db} acc={acc} uid={uid}")
for t in fields[1:]:
if '=' in t:
(key,val) = t.split('=')
#logging.debug(f"key={key} val={val}")
if key == 'GN':
d['gn'] = val
lod.append(d)
logging.info(f"Processed {nlines} lines. Done.")
df = pd.DataFrame(lod)
return df
if __name__ == '__main__':
FORMAT='%(asctime)s (UTC) [ %(levelname)s ] %(filename)s:%(lineno)d %(name)s.%(funcName)s(): %(message)s'
logging.basicConfig(format=FORMAT)
logging.getLogger().setLevel(logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('infile',
metavar='infile',
type=str,
help='a .fasta sequence file')
parser.add_argument('outfile',
metavar='outfile',
type=str,
help='a DF .csv prediction file')
args= parser.parse_args()
logging.debug(f"infile={args.infile} outfile={args.outfile}")
tdf = parse_uniprot_fasta(args.infile)
logging.debug(f"tdf=\n{tdf}")
tdf.to_csv(args.outfile, sep='\t' )
logging.debug(f"Wrote to {args.outfile}")
```
#### File: cshl-work/project/make_paircomb.py
```python
import argparse
import os
import sys
import logging
from itertools import combinations
gitpath=os.path.expanduser("~/git/cshl-work")
sys.path.append(gitpath)
def do_comb(infile, outfile=None):
plist = []
logging.debug(f"Opening infile {infile}")
i = 0
with open(infile) as f:
for i, l in enumerate(f):
plist.append(l.strip())
f.close()
logging.debug(f"Handled {i} items...")
comb = combinations(plist, 2)
comblist = list(comb)
logging.debug(f"Generated {len(comblist)} combinations...")
if outfile is None:
outfile = f'{infile}.pairwise.tsv'
o = open(outfile,'w')
logging.debug(f"Opened outfile {outfile}")
for (p1, p2 ) in comblist:
o.write(f"{p1}\t{p2}\n")
o.close()
if __name__ == '__main__':
FORMAT='%(asctime)s (UTC) [ %(levelname)s ] %(filename)s:%(lineno)d %(name)s.%(funcName)s(): %(message)s'
logging.basicConfig(format=FORMAT)
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug',
action="store_true",
dest='debug',
help='debug logging')
parser.add_argument('-v', '--verbose',
action="store_true",
dest='verbose',
help='verbose logging')
parser.add_argument('infile',
metavar='infile',
type=str,
help='input file')
#parser.add_argument('outfile',
# metavar='outfile',
# type=str,
# default=None,
# required=False,
# help='pairwise info. ')
args= parser.parse_args()
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
if args.verbose:
logging.getLogger().setLevel(logging.INFO)
do_comb(args.infile, None)
```
#### File: cshl-work/project/splitfile.py
```python
import argparse
import os
import sys
import logging
import traceback
gitpath=os.path.expanduser("~/git/cshl-work")
sys.path.append(gitpath)
def file_len(filename):
i = -1
with open(filename) as f:
for i, l in enumerate(f):
pass
f.close()
return i + 1
#
# i=0 line
# i=1
#
def do_split(filename, nfiles):
logging.debug(f"splitting {filename} into {nfiles} pieces...")
nlines = file_len(filename)
logging.debug(f"{filename} has {nlines} lines.")
lpf = int(nlines / nfiles)
logging.debug(f"will put {lpf} lines per file...")
fnum = 1
of = None
with open(filename) as f:
for i, l in enumerate(f):
if i % lpf == 0:
if of is not None:
logging.debug("closing file...")
of.close()
fnum += 1
logging.debug(f"opening new file= {filename}.{fnum} ")
of = open(f"{filename}.{fnum}", 'w')
of.write(l)
of.close()
f.close()
if __name__ == '__main__':
FORMAT='%(asctime)s (UTC) [ %(levelname)s ] %(filename)s:%(lineno)d %(name)s.%(funcName)s(): %(message)s'
logging.basicConfig(format=FORMAT)
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug',
action="store_true",
dest='debug',
help='debug logging')
parser.add_argument('-v', '--verbose',
action="store_true",
dest='verbose',
help='verbose logging')
parser.add_argument('infile',
metavar='infile',
type=str,
help='a .fasta sequence file')
parser.add_argument('nfiles',
metavar='nfiles',
type=int,
help='Number of pieces.')
args= parser.parse_args()
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
if args.verbose:
logging.getLogger().setLevel(logging.INFO)
do_split(args.infile, args.nfiles)
``` |
{
"source": "jhover/cshlwork",
"score": 2
} |
#### File: cshlwork/protlib/uniprot.py
```python
import argparse
import logging
import os
import pickle
import sys
import traceback
from collections import defaultdict
from configparser import ConfigParser
import pandas as pd
gitpath = os.path.expanduser("~/git/cshlwork")
sys.path.append(gitpath)
ASPECTMAP = { 'C': 'cc',
'F': 'mf',
'P': 'bp'
}
def get_default_config():
cp = ConfigParser()
cp.read(os.path.expanduser("~/git/cshlwork/etc/uniprot.conf"))
return cp
def parse_uniprot_dat(config):
"""
Parses uniprot/sprot DAT file, returns dictionary of dicts
using primary and secondary accession codes as keys.
{ 'Q84P23' :
{ 'proteinid': '4CLL9_ARATH',
'protein': '4CLL9',
'species': 'ARATH',
'proteinacc': 'Q84P23',
'taxonid': '3702',
'goterms': {'GO:0005777': ['cc', 'IDA'],
'GO:0005524': ['mf', 'IEA'],
'GO:0004321': ['mf', 'IDA'],
'GO:0016874': ['mf', 'IEA'],
'GO:0009695': ['bp', 'IDA'],
'GO:0031408': ['bp', 'IEA']}
},
.
.
.
"""
allentries = None
paccidx = {}
cachedir = os.path.expanduser(config.get('uniprot','cachedir'))
filepath = os.path.expanduser(config.get('uniprot','datfile'))
filebase = os.path.basename(filepath)
(filebase, e) = os.path.splitext(filebase)
cachefile =f"{cachedir}/{filebase}.allbypacc.pickle"
if os.path.exists(cachefile):
logging.info("Cache hit. Using existing data...")
f = open(cachefile, 'rb')
paccidx = pickle.load(f)
logging.info("Loaded from cache...")
f.close()
else:
logging.debug(f"opening datfile={filepath}")
try:
logging.debug(f" attempting to open '{filepath}'")
filehandle = open(filepath, 'r')
except FileNotFoundError:
logging.error(f"No such file {filepath}")
allentries = []
current = None
sumreport = 1
suminterval = 10000
repthresh = sumreport * suminterval
try:
while True:
line = filehandle.readline()
if line == '':
break
if line.startswith("ID "):
# ID 001R_FRG3G Reviewed; 256 AA.
# <prot_name>_<prot_spec>
val = line[5:]
fields = val.split()
proteinid = fields[0].strip().replace(';','')
current = defaultdict(dict)
current['proteinid'] = proteinid
(protein, species) = proteinid.split('_')
current['protein'] = protein
current['species'] = species
elif line.startswith("AC "):
# AC Q6GZX4;
# AC A0A023GPJ0;
# AC Q91896; O57469;
rest = line[5:]
acclist = rest.split(';')
current['proteinacc'] = acclist[0].strip()
for c in acclist:
if len(c) > 2:
c = c.strip()
paccidx[c] = current
elif line.startswith("OX "):
#OX NCBI_TaxID=654924;
taxonid = ""
val = line[5:]
fields = val.split('=')
if fields[0] == 'NCBI_TaxID':
tfields = fields[1].split()
taxonid = tfields[0].strip().replace(';','')
#taxonid = fields[1].strip().replace(';','')
current['taxonid'] = taxonid
elif line.startswith("DR GO;"):
# DR GO; GO:0046782; P:regulation of viral transcription; IEA:InterPro.
# P biological process, C cellular component, F molecular function.
fields = line.split(';')
goterm = fields[1].strip()
goinfo = fields[2]
aspcode = goinfo.split(':')[0].strip()
goaspect = ASPECTMAP[aspcode]
goevsrc = fields[3]
(goevidence, evsrc) = goevsrc.split(':')
goevidence = goevidence.strip()
current['goterms'][goterm] = [ goaspect, goevidence ]
elif line.startswith("SQ SEQUENCE"):
current['seqlength'] = int(line.split()[2])
current['sequence'] = ""
seqlen = current['seqlength']
aaread = 0
while aaread < seqlen:
line = filehandle.readline()
lineseq = line.strip().replace(" ","")
current['sequence'] = "%s%s" % (current['sequence'], lineseq)
aaread += len(lineseq)
elif line.startswith("GN "):
# Examples:
# standard example
# GN Name=GRF1;
# w/ extra info, but all on one line per protein
# GN Name=BRCA1; Synonyms=RNF53;
# GN Name=GRF10; OrderedLocusNames=At1g22300; ORFNames=T16E15.8;
# GN Name=GRF2; Synonyms=GF14; OrderedLocusNames=At1g78300; ORFNames=F3F9.16;
# non Name= info only
# GN ORFNames=OsI_006296;
# multiple lines in one protein (with Name=)
# GN Name=GRF12 {ECO:0000303|PubMed:11553742};
# GN OrderedLocusNames=At1g26480 {ECO:0000312|Araport:AT1G26480};
# GN ORFNames=T1K7.15 {ECO:0000312|EMBL:AAF98570.1};
# multi-line, no key(s)
# GN Name=matK {ECO:0000256|HAMAP-Rule:MF_01390,
# GN ECO:0000313|EMBL:ACK76147.1};
# multiple lines in one protein (no Name=)
# GN OrderedLocusNames=Os02g0224200, LOC_Os02g13110;
# GN ORFNames=OsJ_005772, P0470A03.14;
val = line[5:]
if val.startswith("Name="):
fields = val.split() # by whitespace
(n, gname) = fields[0].split("=")
gname = gname.upper()
current['gene'] = gname.replace(';','')
#current['gene'] = gname.replace(';','')
#elif val.startswith("")
else:
current['gene'] = ''
elif line.startswith("//"):
allentries.append(current)
if len(allentries) >= repthresh:
logging.info(f"Processed {len(allentries)} entries... ")
sumreport +=1
repthresh = sumreport * suminterval
current = None
except Exception as e:
traceback.print_exc(file=sys.stdout)
if filehandle is not None:
filehandle.close()
logging.info(f"Parsed file with {len(allentries)} entries" )
logging.info(f"Created prim/sec index. {len(paccidx)} entries ")
logging.debug(f"Some entries: {allentries[10:15]}")
logging.info(f"Caching paccindex to {cachefile}...")
f = open(cachefile, 'wb')
pickle.dump(paccidx, f )
f.close()
logging.debug("Done.")
return paccidx
def uniprot_to_df(cp):
"""
"""
COLUMNS = ['pacc', 'proteinid', 'protein', 'species', 'proteinacc', 'gene', 'taxonid']
XCOLS = ['proteinid', 'protein', 'species', 'proteinacc', 'gene', 'taxonid']
pidx = parse_uniprot_dat(cp)
logging.debug('Done parsing uniprot .dat file. Building LOL.')
lol = []
for k in pidx.keys():
e = pidx[k]
flist = [k]
for col in XCOLS:
v = e[col]
if len(v) == 0:
v = ''
flist.append(v)
lol.append(flist)
logging.debug(f'made lol with {len(lol)} entries. Making DF...')
df = pd.DataFrame(lol, columns=COLUMNS)
logging.debug(f'completed DF.')
return df
def write_df_tsv(dataframe, filepath):
"""
"""
dataframe.to_csv(filepath, sep='\t', index=False)
def index_by_acc(dictlist):
accidx = {}
n = 0
for p in dictlist:
acc = p['proteinacc']
accidx[acc] = p
n += 1
logging.debug(f"indexed by acc {n} entries...")
return accidx
def write_tfa_fromlist(pacclist, paccidx, outfile):
'''
writes only select protein sequences to tfa outfile.
fixes proteinacc inside in case the list entry is an alternate code.
'''
newdlist = []
total = 0
found = 0
missing = 0
missinglist =[]
for pacc in pacclist:
total += 1
try:
pdict = paccidx[pacc]
pdict['proteinacc'] = pacc
newdlist.append(pdict)
found += 1
except KeyError:
missing += 1
missinglist.append(pacc)
logging.info(f"total={total} found={found} missing={missing}")
logging.info(f"missing list: {missinglist}")
logging.debug(f"Made shorter dictlist length={len(newdlist)} writing to file={outfile}.")
write_tfa_file(newdlist, outfile)
def write_tfa_file(dictlist, outfile):
'''
defaultdict(<class 'dict'>,
{'proteinid': '1433Z_XENTR',
'protein': '1433Z',
'species': 'XENTR',
'proteinacc': 'Q6P4Z5',
'gene': 'YWHAZ',
'taxonid': '8364',
'goterms': {'GO:0005737': ['cc', 'IEA'],
'GO:0070372': ['bp', 'ISS']},
'seqlength': 245,
'sequence': 'MDKNELVQKAKL... ...EGGEN'})
'''
s=""
snum = 1
header=""
x = 60
for p in dictlist:
header = f">{p['proteinacc']}\t{p['protein']}\t{p['species']}\t{p['gene']}"
header = header.replace('{}','') # remove missing values.
sequence = p['sequence']
s += f"{header}\n"
chunklist = [ sequence[y-x:y] for y in range(x, len(sequence)+x, x) ]
for c in chunklist:
s += f"{c}\n"
snum += 1
logging.debug(s)
try:
f = open(outfile, 'w')
f.write(s)
logging.debug(f"Wrote TFA sequence to file {outfile}")
except IOError:
logging.error(f"could not write to file {outfile}")
traceback.print_exc(file=sys.stdout)
finally:
f.close()
if __name__ == '__main__':
FORMAT='%(asctime)s (UTC) [ %(levelname)s ] %(filename)s:%(lineno)d %(name)s.%(funcName)s(): %(message)s'
logging.basicConfig(format=FORMAT)
logging.getLogger().setLevel(logging.WARN)
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug',
action="store_true",
dest='debug',
help='debug logging')
parser.add_argument('-v', '--verbose',
action="store_true",
dest='verbose',
help='verbose logging')
parser.add_argument('infile',
metavar='infile',
type=str,
nargs='?',
default=os.path.expanduser("~/data/uniprot/uniprot_all_vertebrates.dat"),
help='Uniprot .dat file')
parser.add_argument('-t','--tfafile',
metavar='tfafile',
type=str,
required=False,
default=None,
help='Fasta .tfa output file')
args= parser.parse_args()
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
if args.verbose:
logging.getLogger().setLevel(logging.INFO)
config = get_default_config()
logging.info(f"Requesting info from {args.infile} ...")
uplist = parse_uniprot_dat(config, args.infile)
print(f"outlist is length = {len(uplist)}")
if args.tfafile is not None:
write_tfa_file(uplist, os.path.expanduser(args.tfafile))
logging.info("Generating acc index..")
idx = index_by_acc(uplist)
logging.info("done.")
``` |
{
"source": "jhover/cshl-work",
"score": 2
} |
#### File: cshl-work/scripts/idmapper.py
```python
import argparse
import logging
import os
import pickle
import requests
import sys
import time
import traceback
from collections import defaultdict
from configparser import ConfigParser
import pandas as pd
gitpath = os.path.expanduser("~/git/cshlwork")
sys.path.append(gitpath)
from protlib.utils import *
from protlib.uniprot import *
ASPECTMAP = { 'C': 'cc',
'F': 'mf',
'P': 'bp'
}
KEYMAP = { 'OrderedLocusNames' : 'locus' ,
'Name' : 'gene',
'ORFNames' : 'orfname',
'Synonyms' : 'synonym'
}
VALID_IDS = ['accession', 'proteinid','locus', 'gene']
def process_file(infile, ididx, outid):
with open(infile) as f:
for line in f:
fields = line.split('\t')
newline = []
for i, field in enumerate(fields):
field = field.strip()
logging.debug(f'i={i} field={field}')
try:
edict = ididx[field]
newid = edict[outid]
newline.append(newid)
logging.debug(f'found {newid} to replace {field}')
except KeyError:
logging.debug(f'no entry found for {field}')
newline.append(field)
print('\t'.join(newline))
if __name__ == '__main__':
FORMAT='%(asctime)s (UTC) [ %(levelname)s ] %(filename)s:%(lineno)d %(name)s.%(funcName)s(): %(message)s'
logging.basicConfig(format=FORMAT)
logging.getLogger().setLevel(logging.WARN)
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug',
action="store_true",
dest='debug',
help='debug logging')
parser.add_argument('-v', '--verbose',
action="store_true",
dest='verbose',
help='verbose logging')
parser.add_argument('-u', '--uniprot',
metavar='uniprot',
type=str,
nargs='?',
default=os.path.expanduser("~/data/uniprot/uniprot_all_vertebrates.dat"),
help='A Uniprot .dat file')
parser.add_argument('-i','--inid',
metavar='inid',
type=str,
required=False,
default='accession',
help='Identifier type [ accession | proteinid | locus ] (B4FCB1 | B4FCB1_MAIZE ')
parser.add_argument('-o','--outid',
metavar='outid',
type=str,
required=False,
default='accession',
help='Identifier type [ accession | proteinid | locus ] (B4FCB1 | B4FCB1_MAIZE ')
parser.add_argument('infile' ,
metavar='infile',
type=str,
nargs='?',
default=None,
help='UPID [UPID UPID ...] ')
args= parser.parse_args()
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
if args.verbose:
logging.getLogger().setLevel(logging.INFO)
# Prepare for commands that require parsing DAT file.
logging.info(f"Parsing uniprot .dat file={args.uniprot} ...")
config = get_default_config()
entries = parse_uniprot_dat(config, datfile=args.uniprot)
idx = index_by(entries, args.inid)
#(entries, pididx, accidx)
logging.debug(f"uniprot length = {len(entries)}")
logging.debug(f'infile={args.infile}')
process_file(args.infile, idx, args.outid)
``` |
{
"source": "jhoward/code_shop",
"score": 4
} |
#### File: site_problems/euler/10.py
```python
import math
primeList = [2]
def isPrime(number):
sN = int(math.sqrt(number)) + 1
for n in primeList:
if n > sN:
return True
if number % n == 0:
return False
return True
num = 3
while True:
if isPrime(num):
primeList.append(num)
print num
num += 2
if num >= 2000000:
break
print sum(primeList)
```
#### File: site_problems/euler/5.py
```python
start = 19*17*13*11*7*5*3
num = start
def check(num, limit = 20):
for i in range(2, limit):
if num%i != 0:
return False
return True
attempts = 0
while True:
print num
attempts += 1
if check(num, 20):
break
num += start
print attempts
``` |
{
"source": "JHowell45/advent-of-code",
"score": 2
} |
#### File: advent-of-code/_2019/__init__.py
```python
try:
from _2019.src.day_1 import day_1_puzzle_1_solution, day_1_puzzle_2_solution
except ImportError:
from src.day_1 import day_1_puzzle_1_solution, day_1_puzzle_2_solution
try:
from _2019.src.day_2 import day_2_puzzle_1_solution, day_2_puzzle_2_solution
except ImportError:
from src.day_2 import day_2_puzzle_1_solution, day_2_puzzle_2_solution
def main():
print(f"Day 1, Puzzle 1: {day_1_puzzle_1_solution()}")
print(f"Day 1, Puzzle 2: {day_1_puzzle_2_solution()}")
print(f"Day 2, Puzzle 1: {day_2_puzzle_1_solution()}")
print(f"Day 2, Puzzle 2: {day_2_puzzle_2_solution()}")
if __name__ == "__main__":
main()
``` |
{
"source": "JHowell45/financial-application",
"score": 5
} |
#### File: app/tax_calculations/calculate_tax.py
```python
from typing import List
def calculate_tax(
income: float,
tax_brackets: List[int],
next_brackets: List[int],
tax_rates: List[float],
):
total_tax = 0
for bracket, next_bracket, rate in generate_tax_sections(
tax_brackets, next_brackets, tax_rates
):
temp_tax, income = calculate_tax_for_bracket(
income, bracket, next_bracket, rate
)
total_tax += temp_tax
return round(total_tax, 2)
def calculate_tax_for_bracket(
income: float, tax_bracket: int, next_bracket: int, tax_rate: float
):
"""Use this function to calculate the tax amount for a specific bracket.
This function is used for calculating the tax amount for a specific tax bracket
given the income.
:param income: the income to tax.
:param tax_bracket: the tax bracket being checked.
:param next_bracket: the next bracket to only tax the specific amount.
:param tax_rate: the tax rate for the given bracket.
:return: the taxed amount and the new income amount to pass to following function.
"""
if income >= tax_bracket:
taxable_income = income - next_bracket
taxed_amount = taxable_income * tax_rate
return taxed_amount, next_bracket
else:
return 0, income
def generate_tax_sections(tax_brackets: list, next_bracket: list, tax_rates: list):
"""Use this function to calculate the rates and brackets for the tax sections.
This function is used for returning the tax bracket, the following bracket to do
subtractions to get the taxable amount and the tax rate for the bracket.
:return: the tax bracket, the next bracket and the tax percentage.
"""
for bracket, next_bracket, rate in zip(tax_brackets, next_bracket, tax_rates):
yield bracket, next_bracket, rate
``` |
{
"source": "JHowell45/flask-vue-docker-template",
"score": 3
} |
#### File: app/routes/test_routes.py
```python
from flask_restplus import Namespace, Resource
api = Namespace(
"tests",
description=(
"test endpoint for checking the application works and can be connected to."
),
)
@api.route("/1")
class Test1(Resource):
def get(self):
return {"test": "all good!"}
@api.route("/2")
class Test2(Resource):
def get(self):
return {"test": 3}
``` |
{
"source": "JHowell45/Forests",
"score": 3
} |
#### File: Forests/tests/test_tree_classes.py
```python
import pytest
from woodland.tree_classes import TreeNode
class TestTreeNodeGetterFunctions:
"""Use this class to test the 'TreeNode' class to test the getters made.
This function is used for testing the getters for the 'TreeNode' class. This
tests to make sure the getters return the correct values and types.
"""
def test_id_type(self, tree_node_instance):
"""Use this function to test the type of the ID for the 'TreeNode' instance.
This function is used for testing the type for the ID value for the test
'TreeNode' instance.
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
"""
assert isinstance(getattr(tree_node_instance, "id"), int)
def test_id_value(self, tree_node_instance, tree_node_instance_data):
"""Use this function to test the value of the ID for the 'TreeNode' instance.
This function is used for testing the value for the ID value for the test
'TreeNode' instance.
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
:param tree_node_instance_data: data used for generating
:type tree_node_instance_data: dict
"""
assert getattr(tree_node_instance, "id") == tree_node_instance_data["node_id"]
def test_payload_type(self, tree_node_instance):
"""Use this function to test the type of the payload for 'TreeNode'.
This function is used for testing the type for the payload value for the test
'TreeNode' instance.
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
"""
assert isinstance(getattr(tree_node_instance, "payload"), (int, float, str))
def test_payload_value(self, tree_node_instance, tree_node_instance_data):
"""Use this function to test the value of the payload.
This function is used for testing the value for payload for the test
'TreeNode' instance.
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
:param tree_node_instance_data: data used for generating
:type tree_node_instance_data: dict
"""
assert (
getattr(tree_node_instance, "payload") == tree_node_instance_data["payload"]
)
def test_children_type(self, tree_node_instance):
"""Use this function to test the type of the children for 'TreeNode'.
This function is used for testing the type for the children value for the test
'TreeNode' instance.
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
"""
assert isinstance(getattr(tree_node_instance, "children"), list)
def test_children_value(self, tree_node_instance):
"""Use this function to test the value of the children for 'TreeNode'.
This function is used for testing the value for the children value for the test
'TreeNode' instance.
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
"""
assert getattr(tree_node_instance, "children") == list()
def test_parent_type(self, tree_node_instance):
"""Use this function to test the type of the parent for 'TreeNode'.
This function is used for testing the type for the parent value for the test
'TreeNode' instance.
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
"""
assert (
isinstance(getattr(tree_node_instance, "parent"), TreeNode)
or getattr(tree_node_instance, "parent") is None
)
def test_parent_value(self, tree_node_instance, tree_node_instance_data):
"""Use this function to test the value of the parent for 'TreeNode'.
This function is used for testing the value for the parent value for the test
'TreeNode' instance.
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
:param tree_node_instance_data: data used for generating
:type tree_node_instance_data: dict
"""
assert (
getattr(tree_node_instance, "parent") == tree_node_instance_data["parent"]
)
def test_repr_type(self, tree_node_instance):
"""Use this function to test the type of the __repr__ function.
This function is used for testing the type for the 'TreeNode' test instance
__repr__ function.
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
"""
assert isinstance(repr(tree_node_instance), str)
def test_repr_value(self, tree_node_instance):
"""Use this function to test the value of the __repr__ function.
This function is used for testing the value for the 'TreeNode' test instance
__repr__ function.
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
"""
assert repr(
tree_node_instance
) == "<TreeNode ID: {}, Payload: {}, Parent: {}, Child: {}>".format(
tree_node_instance.id,
tree_node_instance.payload,
tree_node_instance.parent,
tree_node_instance.children,
)
class TestTreeNodeSetterFunctions:
"""Use this class to test the 'TreeNode' class to test the setters made.
This function is used for testing the setters for the 'TreeNode' class. This
tests to make sure the setters return the correct values and types.
"""
test_node_id_options = [10, 2.0]
test_payload_choices = [1, 1.0]
test_payload_types = [type(payload) for payload in test_payload_choices]
test_children_options = [[], {}, None]
test_parent_options = [
None,
TreeNode(1, 1, None, None),
{"node_id": 1, "payload": 1, "children": None, "parent": None},
]
test_parent_expected = [
None,
TreeNode(1, 1, None, None),
TreeNode(1, 1, None, None),
]
@pytest.mark.parametrize("node_id", test_node_id_options)
def test_id_type(self, node_id, tree_node_instance):
"""Use this function to test the type of the ID for the 'TreeNode' instance.
This function is used for testing the type for the ID value for the test
'TreeNode' instance.
:param node_id: the test Node ID.
:type node_id: int/float
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
"""
tree_node_instance.id = node_id
assert isinstance(getattr(tree_node_instance, "id"), int)
@pytest.mark.parametrize("node_id", test_node_id_options)
def test_id_value(self, node_id, tree_node_instance):
"""Use this function to test the value of the ID for the 'TreeNode' instance.
This function is used for testing the value for the ID value for the test
'TreeNode' instance.
:param node_id: the test Node ID.
:type node_id: int/float
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
"""
tree_node_instance.id = node_id
assert getattr(tree_node_instance, "id") == node_id
@pytest.mark.parametrize(
"payload,expected", zip(test_payload_choices, test_payload_types)
)
def test_payload_type(self, payload, expected, tree_node_instance):
"""Use this function to test the type of the payload for 'TreeNode'.
This function is used for testing the type for the payload value for the test
'TreeNode' instance.
:param payload: the test payload to use.
:type payload: any type.
:param expected: the type for the payload.
:type expected: the types for payload.
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
"""
tree_node_instance.payload = payload
assert isinstance(getattr(tree_node_instance, "payload"), expected)
@pytest.mark.parametrize("payload", test_payload_choices)
def test_payload_value(self, payload, tree_node_instance):
"""Use this function to test the value of the payload.
This function is used for testing the value for payload for the test
'TreeNode' instance.
:param payload: the test payload to use.
:type payload: any type.
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
"""
tree_node_instance.payload = payload
assert getattr(tree_node_instance, "payload") == payload
@pytest.mark.parametrize("children", test_children_options)
def test_children_type(self, children, tree_node_instance):
"""Use this function to test the type of the children for 'TreeNode'.
This function is used for testing the type for the children value for the test
'TreeNode' instance.
:param children: the test children to be added.
:type children: set/dict/None
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
"""
tree_node_instance.children = children
assert isinstance(getattr(tree_node_instance, "children"), list)
@pytest.mark.parametrize("children", test_children_options)
def test_children_value(self, children, tree_node_instance):
"""Use this function to test the value of the children for 'TreeNode'.
This function is used for testing the value for the children value for the test
'TreeNode' instance.
:param children: the test children to be added.
:type children: set/dict/None
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
"""
tree_node_instance.children = children
assert getattr(tree_node_instance, "children") == list()
@pytest.mark.parametrize("parent", test_parent_options)
def test_parent_type(self, parent, tree_node_instance):
"""Use this function to test the type of the parent for 'TreeNode'.
This function is used for testing the type for the parent value for the test
'TreeNode' instance.
:param parent: the test parent to be added to the test TreeNode instance.
:type parent: None/TreeNode/dict
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
"""
tree_node_instance.parent = parent
assert (
isinstance(getattr(tree_node_instance, "parent"), TreeNode)
or getattr(tree_node_instance, "parent") is None
)
@pytest.mark.parametrize(
"parent,expected", zip(test_parent_options, test_parent_expected)
)
def test_parent_value(self, parent, expected, tree_node_instance):
"""Use this function to test the value of the parent for 'TreeNode'.
This function is used for testing the value for the parent value for the test
'TreeNode' instance.
:param parent: the test parent to be added to the test TreeNode instance.
:type parent: None/TreeNode/dict
:param expected: the expected parent value for the 'TreeNode' test instance.
:type expected: None/TreeNode.
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
"""
tree_node_instance.parent = parent
assert getattr(tree_node_instance, "parent") == expected
class TestTreeNodeMethods:
"""Use this class to test the 'TreeNode' class to test the methods made.
This function is used for testing the methods for the 'TreeNode' class. This
tests to make sure the methods return the correct values and types.
"""
test_child = TreeNode(1, 1, None, None)
def test_add_child_type(self, tree_node_instance):
"""Use this function to test the 'add_child' function.
This function is used for testing the type for the 'add_child' function to
check that a 'TreeNode' has been successfully added to the list of children.
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
"""
tree_node_instance.add_child(self.test_child)
assert isinstance(tree_node_instance.children, list)
for child_node in tree_node_instance.children:
assert isinstance(child_node, TreeNode)
def test_add_child_value(self, tree_node_instance):
"""Use this function to test the 'add_child' function.
This function is used for testing the value for the 'add_child' function to
check that a 'TreeNode' has been successfully added to the list of children.
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
"""
tree_node_instance.add_child(self.test_child)
assert isinstance(tree_node_instance.children, list)
for child_node in tree_node_instance.children:
assert child_node == self.test_child
class TestTreeNodeErrors:
"""Use this class to test the 'TreeNode' class to test the errors made.
This function is used for testing the errors for the 'TreeNode' class. This
tests to make sure the errors return the correct values and types.
"""
def test_set_id_raises_error(self, tree_node_instance):
"""Use this function to check an error is successfully raised.
This function is used for testing the function raises the correct error code
when passed an incorrect value.
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
"""
with pytest.raises(TypeError):
tree_node_instance.id = "hello world"
def test_set_payload_raises_error(self, tree_node_instance):
"""Use this function to check an error is successfully raised.
This function is used for testing the function raises the correct error code
when passed an incorrect value.
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
"""
with pytest.raises(TypeError):
tree_node_instance.payload = TreeNode(1, 1, None, None)
def test_set_children_raises_error(self, tree_node_instance):
"""Use this function to check an error is successfully raised.
This function is used for testing the function raises the correct error code
when passed an incorrect value.
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
"""
with pytest.raises(TypeError):
tree_node_instance.children = TreeNode(1, 1, None, None)
def test_set_parent_raises_error(self, tree_node_instance):
"""Use this function to check an error is successfully raised.
This function is used for testing the function raises the correct error code
when passed an incorrect value.
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
"""
with pytest.raises(TypeError):
tree_node_instance.parent = "Not a TreeNode"
def test_add_child_raise_error(self, tree_node_instance):
"""Use this function to check an error is successfully raised.
This function is used for testing the function raises the correct error code
when passed an incorrect value.
:param tree_node_instance: the test 'TreeNode' instance.
:type tree_node_instance: TreeNode
"""
with pytest.raises(TypeError):
tree_node_instance.add_child(1)
``` |
{
"source": "JHowell45/machine-learning-theory",
"score": 3
} |
#### File: src/linear_regression/models.py
```python
from dataclasses import dataclass, field
from pandas import DataFrame, Series
@dataclass
class UnivariateLinearRegressionModel:
theta_0: float
theta_1: float
def __post_init__(self):
self.params = Series([self.theta_0, self.theta_1])
def predict(self, feature: float) -> float:
return (self.theta_1 * feature) + self.theta_0
def multiple_predictions(self, features: Series) -> Series:
"""Use this function to quickly predict the values for multiple features.
This function is used for running the linear regression to get the predictions for several features at once.
:param features: the vector of feature values.
:return: the vector of feature predictions.
"""
features_dataframe = DataFrame(
[Series(1 for _ in range(len(features))), features]
).transpose()
return features_dataframe.dot(self.params)
@dataclass
class MultivariateLinearRegression:
theta_0: float
gradients: Series = field(default_factory=Series)
@property
def params(self):
return Series([self.theta_0]).append(self.gradients, ignore_index=True)
def predict(self, features: Series) -> float:
if len(self.gradients) != len(features):
raise ValueError(
f"Features not the same length as gradients! Features: "
f"{len(features)}, Gradients: {len(self.gradients)}"
)
return self.gradients.multiply(features).sum() + self.theta_0
def multiple_predictions(self, features: DataFrame) -> Series:
rows, columns = features.shape
if len(self.gradients) != columns:
raise ValueError(
f"Features not the same length as gradients! Features: {columns}, Gradients: {len(self.gradients)}"
)
return features.mul(self.gradients).sum(1).add(self.theta_0)
```
#### File: linear_regression/parameter_optimisations/univariate.py
```python
from math import inf
from pandas import Series
from tqdm import tqdm
from src.linear_regression.cost_functions import mean_squared_error
from src.linear_regression.models import UnivariateLinearRegressionModel
def batch_gradient_descent(
features: Series,
labels: Series,
current_theta_0: int = 0,
current_theta_1: int = 0,
learning_rate: float = 0.0001,
epochs: int = None,
):
previous_mse_score = inf
current_mse_score = 0
rounds = 0
start = True
if epochs is None:
while previous_mse_score > current_mse_score:
if start:
current_mse_score = inf
start = False
previous_mse_score = current_mse_score
(
current_theta_0,
current_theta_1,
current_mse_score,
) = single_gradient_descent(
features, labels, current_theta_0, current_theta_1, learning_rate
)
rounds += 1
print(
{
"current_theta_0": round(current_theta_0, 2),
"current_theta_1": round(current_theta_1, 2),
"current_mse_score": round(current_mse_score, 4),
"epochs": rounds,
}
)
else:
for _ in tqdm(range(epochs)):
if previous_mse_score > current_mse_score:
if start:
current_mse_score = inf
start = False
previous_mse_score = current_mse_score
(
current_theta_0,
current_theta_1,
current_mse_score,
) = single_gradient_descent(
features, labels, current_theta_0, current_theta_1, learning_rate
)
rounds += 1
else:
break
return {
"current_theta_0": round(current_theta_0, 2),
"current_theta_1": round(current_theta_1, 2),
"current_mse_score": round(current_mse_score, 4),
"epochs": rounds,
}
def single_gradient_descent(
features: Series,
labels: Series,
current_theta_0: float,
current_theta_1: float,
learning_rate: float,
):
m = len(features)
model = UnivariateLinearRegressionModel(current_theta_0, current_theta_1)
# predicted_labels = model.multiple_predictions(features) # somehow slower??
predicted_labels = Series(model.predict(feature) for feature in features)
theta_0_derivative = theta_0_partial_derivative(
predictions=predicted_labels, actual_labels=labels, m=m
)
theta_1_derivative = theta_1_partial_derivative(
predictions=predicted_labels, actual_labels=labels, features=features, m=m
)
current_theta_0 -= learning_rate * theta_0_derivative
current_theta_1 -= learning_rate * theta_1_derivative
cost_function_score = mean_squared_error(labels, predicted_labels)
return current_theta_0, current_theta_1, cost_function_score
def theta_0_partial_derivative(
predictions: Series, actual_labels: Series, m: int
) -> float:
return (1 / m) * sum(predictions.subtract(actual_labels))
def theta_1_partial_derivative(
predictions: Series, actual_labels: Series, features: Series, m: int
) -> float:
return (1 / m) * sum(predictions.subtract(actual_labels).multiply(features))
```
#### File: probability/bayes_theorem/__init__.py
```python
from dataclasses import dataclass
@dataclass
class BayesTheorem:
probability_hypothesis: float
probability_evidence_given_hypothesis: float
probability_not_hypothesis: float
probability_evidence_given_not_hypothesis: float
@property
def probability_evidence(self) -> float:
return (
self.probability_hypothesis * self.probability_evidence_given_hypothesis
) + (
self.probability_not_hypothesis
* self.probability_evidence_given_not_hypothesis
)
def calculate(self) -> float:
return (
self.probability_hypothesis * self.probability_evidence_given_hypothesis
) / self.probability_evidence
```
#### File: tests/test_linear_regression/test_parameter_learning.py
```python
from pandas import DataFrame, Series
from src.linear_regression.parameter_optimisations import normal_equation
label_values = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
features = DataFrame(
{
"theta_zero": [1 for _ in label_values],
"feature_1": [x / 10 for x in label_values],
}
)
labels = Series(label_values)
class TestNormalEquation:
results = normal_equation(features, labels)
def test_type(self):
assert isinstance(self.results, Series)
def test_length(self):
assert len(self.results) == features.shape[1]
def test_value(self):
results = [round(x, 2) for x in self.results]
assert results == [0, 10]
``` |
{
"source": "JHowell45/Pupper",
"score": 4
} |
#### File: puppo/decorator_functions/display_decorators.py
```python
from functools import wraps
from shutil import get_terminal_size
import click
def command_handler(command_title, colour='green'):
"""Use this decorator for surrounding the functions with banners."""
def decorator(function):
"""Nested decorator function."""
terminal_width = int(get_terminal_size()[0])
title = ' {} '.format(command_title)
banner_length = int((terminal_width - len(title)) / 2)
banner = '-' * banner_length
command_banner = '|{0}{1}{0}|'.format(
banner, title.title())
lower_banner = '|{}|'.format('-' * int(len(command_banner) - 2))
@wraps(function)
def wrapper(*args, **kwargs):
"""Nested wrapper function."""
click.secho(command_banner, fg=colour)
result = function(*args, **kwargs)
click.secho(lower_banner, fg=colour)
return result
return wrapper
return decorator
```
#### File: Pupper/puppo/__init__.py
```python
import click
@click.group()
def cli():
"""Scripts and commands for making Python easier.
Puppo is a list of commands and scripts designed to making python
packages, applications, webpages and anything else you can think of easier.
"""
pass
``` |
{
"source": "jhowl01/straxen",
"score": 2
} |
#### File: straxen/plugins/event_processing.py
```python
import strax
import numpy as np
from straxen.common import pax_file, get_resource, get_elife, first_sr1_run
from straxen.itp_map import InterpolatingMap
export, __all__ = strax.exporter()
@export
@strax.takes_config(
strax.Option('trigger_min_area', default=100,
help='Peaks must have more area (PE) than this to '
'cause events'),
strax.Option('trigger_max_competing', default=7,
help='Peaks must have FEWER nearby larger or slightly smaller'
' peaks to cause events'),
strax.Option('left_event_extension', default=int(1e6),
help='Extend events this many ns to the left from each '
'triggering peak'),
strax.Option('right_event_extension', default=int(1e6),
help='Extend events this many ns to the right from each '
'triggering peak'),
)
class Events(strax.OverlapWindowPlugin):
depends_on = ['peak_basics', 'peak_proximity']
data_kind = 'events'
dtype = [
('event_number', np.int64, 'Event number in this dataset'),
('time', np.int64, 'Event start time in ns since the unix epoch'),
('endtime', np.int64, 'Event end time in ns since the unix epoch')]
events_seen = 0
def get_window_size(self):
return (2 * self.config['left_event_extension'] +
self.config['right_event_extension'])
def compute(self, peaks):
le = self.config['left_event_extension']
re = self.config['right_event_extension']
triggers = peaks[
(peaks['area'] > self.config['trigger_min_area'])
& (peaks['n_competing'] <= self.config['trigger_max_competing'])]
# Join nearby triggers
t0, t1 = strax.find_peak_groups(
triggers,
gap_threshold=le + re + 1,
left_extension=le,
right_extension=re)
result = np.zeros(len(t0), self.dtype)
result['time'] = t0
result['endtime'] = t1
result['event_number'] = np.arange(len(result)) + self.events_seen
if not result.size > 0:
print("Found chunk without events?!")
self.events_seen += len(result)
return result
# TODO: someday investigate if/why loopplugin doesn't give
# anything if events do not contain peaks..
# Likely this has been resolved in 6a2cc6c
@export
class EventBasics(strax.LoopPlugin):
__version__ = '0.1.2'
depends_on = ('events',
'peak_basics',
'peak_positions',
'peak_proximity')
def infer_dtype(self):
dtype = [(('Number of peaks in the event',
'n_peaks'), np.int32),
(('Drift time between main S1 and S2 in ns',
'drift_time'), np.int64)]
for i in [1, 2]:
dtype += [
((f'Main S{i} peak index',
f's{i}_index'), np.int32),
((f'Main S{i} time since unix epoch [ns]',
f's{i}_time'), np.int64),
((f'Main S{i} weighted center time since unix epoch [ns]',
f's{i}_center_time'), np.int64),
((f'Alternate S{i} time since unix epoch [ns]',
f'alt_s{i}_time'), np.int64),
((f'Alternate S{i} weighted center time since unix epoch [ns]',
f'alt_s{i}_center_time'), np.int64),
((f'Main S{i} area, uncorrected [PE]',
f's{i}_area'), np.float32),
((f'Main S{i} area fraction top',
f's{i}_area_fraction_top'), np.float32),
((f'Main S{i} width, 50% area [ns]',
f's{i}_range_50p_area'), np.float32),
((f'Main S{i} number of competing peaks',
f's{i}_n_competing'), np.int32),
((f'Area of alternate S{i} in event [PE]',
f'alt_s{i}_area'), np.float32),
((f'Drift time using alternate S{i} [ns]',
f'alt_s{i}_interaction_drift_time'), np.float32)]
dtype += [('x_s2', np.float32,
'Main S2 reconstructed X position, uncorrected [cm]',),
('y_s2', np.float32,
'Main S2 reconstructed Y position, uncorrected [cm]',)]
return dtype
def compute_loop(self, event, peaks):
result = dict(n_peaks=len(peaks))
if not len(peaks):
return result
main_s = dict()
secondary_s = dict()
for s_i in [2, 1]:
s_mask = peaks['type'] == s_i
# For determining the main / alternate S1s,
# remove all peaks after the main S2 (if there was one)
# since these cannot be related to the main S2.
# This is why S2 finding happened first.
if s_i == 1 and result[f's2_index'] != -1:
s_mask &= peaks['time'] < main_s[2]['time']
ss = peaks[s_mask]
s_indices = np.arange(len(peaks))[s_mask]
if not len(ss):
result[f's{s_i}_index'] = -1
continue
main_i = np.argmax(ss['area'])
result[f's{s_i}_index'] = s_indices[main_i]
if ss['n_competing'][main_i] > 0 and len(ss['area']) > 1:
# Find second largest S..
secondary_s[s_i] = x = ss[np.argsort(ss['area'])[-2]]
for prop in ['area', 'time', 'center_time']:
result[f'alt_s{s_i}_{prop}'] = x[prop]
s = main_s[s_i] = ss[main_i]
for prop in ['area', 'area_fraction_top', 'time', 'center_time',
'range_50p_area', 'n_competing']:
result[f's{s_i}_{prop}'] = s[prop]
if s_i == 2:
for q in 'xy':
result[f'{q}_s2'] = s[q]
# Compute a drift time only if we have a valid S1-S2 pairs
if len(main_s) == 2:
result['drift_time'] = \
main_s[2]['center_time'] - main_s[1]['center_time']
if 1 in secondary_s:
result['alt_s1_interaction_drift_time'] = \
main_s[2]['center_time'] - secondary_s[1]['center_time']
if 2 in secondary_s:
result['alt_s2_interaction_drift_time'] = \
secondary_s[2]['center_time'] - main_s[1]['center_time']
return result
@export
@strax.takes_config(
strax.Option(
name='electron_drift_velocity',
help='Vertical electron drift velocity in cm/ns (1e4 m/ms)',
default=1.3325e-4
),
strax.Option(
'fdc_map',
help='3D field distortion correction map path',
default_by_run=[
(0, pax_file('XENON1T_FDC_SR0_data_driven_3d_correction_tf_nn_v0.json.gz')), # noqa
(first_sr1_run, pax_file('XENON1T_FDC_SR1_data_driven_time_dependent_3d_correction_tf_nn_part1_v1.json.gz')), # noqa
(170411_0611, pax_file('XENON1T_FDC_SR1_data_driven_time_dependent_3d_correction_tf_nn_part2_v1.json.gz')), # noqa
(170704_0556, pax_file('XENON1T_FDC_SR1_data_driven_time_dependent_3d_correction_tf_nn_part3_v1.json.gz')), # noqa
(170925_0622, pax_file('XENON1T_FDC_SR1_data_driven_time_dependent_3d_correction_tf_nn_part4_v1.json.gz'))]), # noqa
)
class EventPositions(strax.Plugin):
depends_on = ('event_basics',)
dtype = [
('x', np.float32,
'Interaction x-position, field-distortion corrected (cm)'),
('y', np.float32,
'Interaction y-position, field-distortion corrected (cm)'),
('z', np.float32,
'Interaction z-position, field-distortion corrected (cm)'),
('r', np.float32,
'Interaction radial position, field-distortion corrected (cm)'),
('z_naive', np.float32,
'Interaction z-position using mean drift velocity only (cm)'),
('r_naive', np.float32,
'Interaction r-position using observed S2 positions directly (cm)'),
('r_field_distortion_correction', np.float32,
'Correction added to r_naive for field distortion (cm)'),
('theta', np.float32,
'Interaction angular position (radians)')]
def setup(self):
self.map = InterpolatingMap(
get_resource(self.config['fdc_map'], fmt='binary'))
def compute(self, events):
z_obs = - self.config['electron_drift_velocity'] * events['drift_time']
orig_pos = np.vstack([events['x_s2'], events['y_s2'], z_obs]).T
r_obs = np.linalg.norm(orig_pos[:, :2], axis=1)
delta_r = self.map(orig_pos)
with np.errstate(invalid='ignore', divide='ignore'):
r_cor = r_obs + delta_r
scale = r_cor / r_obs
result = dict(x=orig_pos[:, 0] * scale,
y=orig_pos[:, 1] * scale,
r=r_cor,
z_naive=z_obs,
r_naive=r_obs,
r_field_distortion_correction=delta_r,
theta=np.arctan2(orig_pos[:, 1], orig_pos[:, 0]))
with np.errstate(invalid='ignore'):
z_cor = -(z_obs ** 2 - delta_r ** 2) ** 0.5
invalid = np.abs(z_obs) < np.abs(delta_r) # Why??
z_cor[invalid] = z_obs[invalid]
result['z'] = z_cor
return result
@strax.takes_config(
strax.Option(
's1_relative_lce_map',
help="S1 relative LCE(x,y,z) map",
default_by_run=[
(0, pax_file('XENON1T_s1_xyz_lce_true_kr83m_SR0_pax-680_fdc-3d_v0.json')), # noqa
(first_sr1_run, pax_file('XENON1T_s1_xyz_lce_true_kr83m_SR1_pax-680_fdc-3d_v0.json'))]), # noqa
strax.Option(
's2_relative_lce_map',
help="S2 relative LCE(x, y) map",
default_by_run=[
(0, pax_file('XENON1T_s2_xy_ly_SR0_24Feb2017.json')),
(170118_1327, pax_file('XENON1T_s2_xy_ly_SR1_v2.2.json'))]),
strax.Option(
'elife_file',
default='https://raw.githubusercontent.com/XENONnT/strax_auxiliary_files/master/elife.npy',
help='link to the electron lifetime'))
class CorrectedAreas(strax.Plugin):
depends_on = ['event_basics', 'event_positions']
dtype = [('cs1', np.float32, 'Corrected S1 area (PE)'),
('cs2', np.float32, 'Corrected S2 area (PE)')]
def setup(self):
self.s1_map = InterpolatingMap(
get_resource(self.config['s1_relative_lce_map']))
self.s2_map = InterpolatingMap(
get_resource(self.config['s2_relative_lce_map']))
self.elife = get_elife(self.run_id,self.config['elife_file'])
def compute(self, events):
event_positions = np.vstack([events['x'], events['y'], events['z']]).T
s2_positions = np.vstack([events['x_s2'], events['y_s2']]).T
lifetime_corr = np.exp(
events['drift_time'] / self.elife)
return dict(
cs1=events['s1_area'] / self.s1_map(event_positions),
cs2=events['s2_area'] * lifetime_corr / self.s2_map(s2_positions))
@strax.takes_config(
strax.Option(
'g1',
help="S1 gain in PE / photons produced",
default_by_run=[(0, 0.1442),
(first_sr1_run, 0.1426)]),
strax.Option(
'g2',
help="S2 gain in PE / electrons produced",
default_by_run=[(0, 11.52/(1 - 0.63)),
(first_sr1_run, 11.55/(1 - 0.63))]),
strax.Option(
'lxe_w',
help="LXe work function in quanta/keV",
default=13.7e-3),
)
class EnergyEstimates(strax.Plugin):
__version__ = '0.0.1'
depends_on = ['corrected_areas']
dtype = [
('e_light', np.float32, 'Energy in light signal [keVee]'),
('e_charge', np.float32, 'Energy in charge signal [keVee]'),
('e_ces', np.float32, 'Energy estimate [keVee]')]
def compute(self, events):
el = self.cs1_to_e(events['cs1'])
ec = self.cs2_to_e(events['cs2'])
return dict(e_light=el,
e_charge=ec,
e_ces=el + ec)
def cs1_to_e(self, x):
return self.config['lxe_w'] * x / self.config['g1']
def cs2_to_e(self, x):
return self.config['lxe_w'] * x / self.config['g2']
class EventInfo(strax.MergeOnlyPlugin):
depends_on = ['events',
'event_basics', 'event_positions', 'corrected_areas',
'energy_estimates']
save_when = strax.SaveWhen.ALWAYS
``` |
{
"source": "jhowl01/strax",
"score": 2
} |
#### File: strax/processing/general.py
```python
import strax
import numba
import numpy as np
export, __all__ = strax.exporter()
# (5-10x) faster than np.sort(order=...), as np.sort looks at all fields
# TODO: maybe this should be a factory?
@export
@numba.jit(nopython=True, nogil=True, cache=True)
def sort_by_time(x):
"""Sort pulses by time, then channel.
Assumes you have no more than 10k channels, and records don't span
more than 100 days. TODO: FIX this
"""
if len(x) == 0:
# Nothing to do, and .min() on empty array doesn't work, so:
return x
# I couldn't get fast argsort on multiple keys to work in numba
# So, let's make a single key...
sort_key = (x['time'] - x['time'].min()) * 10000 + x['channel']
sort_i = np.argsort(sort_key)
return x[sort_i]
@export
@numba.jit(nopython=True, nogil=True, cache=True)
def first_index_not_below(arr, t):
"""Return first index of array >= t, or len(arr) if no such found"""
for i, x in enumerate(arr):
if x >= t:
return i
return len(arr)
@export
def endtime(x):
"""Return endtime of intervals x"""
try:
return x['endtime']
except (KeyError, ValueError, IndexError):
return x['time'] + x['length'] * x['dt']
@export
# TODO: somehow numba compilation hangs on this one? reproduce / file issue?
# numba.jit(nopython=True, nogil=True, cache=True)
def from_break(x, safe_break=10000, left=True, tolerant=False):
"""Return records on side of a break at least safe_break long
If there is no such break, return the best break found.
"""
# TODO: This is extremely rough. Better to find proper gaps, and if we
# know the timing of the readers, consider breaks at end and start too.
break_i = find_break_i(x, safe_break=safe_break, tolerant=tolerant)
if left:
return x[:break_i]
else:
return x[break_i:]
@export
class NoBreakFound(Exception):
pass
@export
@numba.jit(nopython=True, nogil=True, cache=True)
def find_break_i(x, safe_break, tolerant=True):
"""Returns LAST index of x whose time is more than safe_break away
from the x before
:param tolerant: if no break found, yield an as good as possible break
anyway.
"""
max_gap = 0
max_gap_i = -1
for _i in range(len(x) - 1):
i = len(x) - 1 - _i
gap = x[i]['time'] - x[i - 1]['time']
if gap >= safe_break:
return i
if gap > max_gap:
max_gap_i = i
max_gap = gap
if not tolerant:
raise NoBreakFound
print("\t\tDid not find safe break, using largest available break: ",
max_gap,
" ns")
return max_gap_i
@export
def fully_contained_in(things, containers):
"""Return array of len(things) with index of interval in containers
for which things are fully contained in a container, or -1 if no such
exists.
We assume all intervals are sorted by time, and b_intervals
nonoverlapping.
"""
result = np.ones(len(things), dtype=np.int32) * -1
a_starts = things['time']
b_starts = containers['time']
a_ends = strax.endtime(things)
b_ends = strax.endtime(containers)
_fc_in(a_starts, b_starts, a_ends, b_ends, result)
return result
@numba.jit(nopython=True, nogil=True, cache=True)
def _fc_in(a_starts, b_starts, a_ends, b_ends, result):
b_i = 0
for a_i in range(len(a_starts)):
# Skip ahead one or more b's if we're beyond them
# Note <= in second condition: end is an exclusive bound
while b_i < len(b_starts) and b_ends[b_i] <= a_starts[a_i]:
b_i += 1
if b_i == len(b_starts):
break
# Check for containment. We only need to check one b, since bs
# are nonoverlapping
if b_starts[b_i] <= a_starts[a_i] and a_ends[a_i] <= b_ends[b_i]:
result[a_i] = b_i
@export
def split_by_containment(things, containers):
"""Return list of thing-arrays contained in each container
Assumes everything is sorted, and containers are nonoverlapping
"""
if not len(containers):
return []
# Index of which container each thing belongs to, or -1
which_container = fully_contained_in(things, containers)
# Restrict to things in containers
mask = which_container != -1
things = things[mask]
which_container = which_container[mask]
if not len(things):
# np.split has confusing behaviour for empty arrays
return [things[:0] for _ in range(len(containers))]
# Split things up by container
split_indices = np.where(np.diff(which_container))[0] + 1
things_split = np.split(things, split_indices)
# Insert empty arrays for empty containers
empty_containers = np.setdiff1d(np.arange(len(containers)),
np.unique(which_container))
for c_i in empty_containers:
things_split.insert(c_i, things[:0])
return things_split
@export
@numba.jit(nopython=True, nogil=True, cache=True)
def overlap_indices(a1, n_a, b1, n_b):
"""Given interval [a1, a1 + n_a), and [b1, b1 + n_b) of integers,
return indices [a_start, a_end), [b_start, b_end) of overlapping region.
"""
if n_a < 0 or n_b < 0:
raise ValueError("Negative interval length passed to overlap test")
if n_a == 0 or n_b == 0:
return (0, 0), (0, 0)
# a: p, b: r
s = a1 - b1
if s <= -n_a:
# B is completely right of a
return (0, 0), (0, 0)
# Range in b that overlaps with a
b_start = max(0, s)
b_end = min(n_b, s + n_a)
if b_start >= b_end:
return (0, 0), (0, 0)
# Range of a that overlaps with b
a_start = max(0, -s)
a_end = min(n_a, -s + n_b)
return (a_start, a_end), (b_start, b_end)
```
#### File: strax/tests/test_general_processing.py
```python
from hypothesis import given, example
from hypothesis.strategies import integers
from .helpers import sorted_intervals, disjoint_sorted_intervals
from .helpers import several_fake_records
import numpy as np
import strax
@given(sorted_intervals, disjoint_sorted_intervals)
# Tricky example: uncontained interval precedes contained interval
# (this did not produce an issue, but good to show this is handled)
@example(things=np.array([(0, 1, 0, 1),
(0, 1, 1, 5),
(0, 1, 2, 1)],
dtype=strax.interval_dtype),
containers=np.array([(0, 1, 0, 4)],
dtype=strax.interval_dtype))
def test_fully_contained_in(things, containers):
result = strax.fully_contained_in(things, containers)
assert len(result) == len(things)
if len(result):
assert result.max() < len(containers)
for i, thing in enumerate(things):
if result[i] == -1:
# Check for false negative
for c in containers:
assert not _is_contained(thing, c)
else:
# Check for false positives
assert _is_contained(thing, containers[result[i]])
@given(sorted_intervals, disjoint_sorted_intervals)
# Specific example to trigger issue #37
@example(
things=np.array([(0, 1, 2, 1)],
dtype=strax.interval_dtype),
containers=np.array([(0, 1, 0, 1), (0, 1, 2, 1)],
dtype=strax.interval_dtype))
def test_split_by_containment(things, containers):
result = strax.split_by_containment(things, containers)
assert len(result) == len(containers)
for container_i, things_in in enumerate(result):
for t in things:
assert ((t in things_in)
== _is_contained(t, containers[container_i]))
if len(result) and len(np.concatenate(result)) > 1:
assert np.diff(np.concatenate(result)['time']).min() >= 0, "Sorting bug"
def _is_contained(_thing, _container):
# Assumes dt = 1
return _container['time'] \
<= _thing['time'] \
<= _thing['time'] + _thing['length'] \
<= _container['time'] + _container['length']
@given(several_fake_records)
def test_from_break(records):
window = 5
def has_break(x):
if len(x) < 2:
return False
return np.diff(x['time']).max() > window
try:
left = strax.from_break(records, safe_break=window,
left=True, tolerant=False)
right = strax.from_break(records, safe_break=window,
left=False, tolerant=False)
except strax.NoBreakFound:
assert not has_break(records)
else:
assert len(left) + len(right) == len(records)
if len(records) > 0:
np.testing.assert_equal(np.concatenate([left, right]),
records)
if len(left) and len(right):
assert left[-1]['time'] <= right[0]['time'] - window
assert not has_break(right)
@given(integers(0, 100), integers(0, 100), integers(0, 100), integers(0, 100))
def test_overlap_indices(a1, n_a, b1, n_b):
a2 = a1 + n_a
b2 = b1 + n_b
(a_start, a_end), (b_start, b_end) = strax.overlap_indices(a1, n_a, b1, n_b)
assert a_end - a_start == b_end - b_start, "Overlap must be equal length"
assert a_end >= a_start, "Overlap must be nonnegative"
if n_a == 0 or n_b == 0:
assert a_start == a_end == b_start == b_end == 0
return
a_filled = np.arange(a1, a2)
b_filled = np.arange(b1, b2)
true_overlap = np.intersect1d(a_filled, b_filled)
if not len(true_overlap):
assert a_start == a_end == b_start == b_end == 0
return
true_a_inds = np.searchsorted(a_filled, true_overlap)
true_b_inds = np.searchsorted(b_filled, true_overlap)
found = (a_start, a_end), (b_start, b_end)
expected = (
(true_a_inds[0], true_a_inds[-1] + 1),
(true_b_inds[0], true_b_inds[-1] + 1))
assert found == expected
``` |
{
"source": "jhowl01/WFSim",
"score": 2
} |
#### File: wfsim/core/afterpulse.py
```python
import logging
import numpy as np
from strax import exporter
from .pulse import Pulse
from .s2 import S2
export, __all__ = exporter()
logging.basicConfig(handlers=[logging.StreamHandler()])
log = logging.getLogger('wfsim.core')
log.setLevel('WARNING')
@export
class PhotoIonization_Electron(S2):
"""
Produce electron after pulse simulation, using already built cdfs
The cdfs follow distribution parameters extracted from data.
"""
def __init__(self, config):
super().__init__(config)
self._photon_timings = []
def generate_instruction(self, signal_pulse, signal_pulse_instruction):
if len(signal_pulse._photon_timings) == 0:
return []
return self.electron_afterpulse(signal_pulse, signal_pulse_instruction)
def electron_afterpulse(self, signal_pulse, signal_pulse_instruction):
"""
For electron afterpulses we assume a uniform x, y
"""
delaytime_pmf_hist = self.resource.uniform_to_ele_ap
# To save calculation we first find out how many photon will give rise ap
n_electron = np.random.poisson(delaytime_pmf_hist.n
* len(signal_pulse._photon_timings)
* self.config['photoionization_modifier'])
ap_delay = delaytime_pmf_hist.get_random(n_electron)
# Reasonably bin delay time that would be diffuse out together
ap_delay_i, n_electron_i = self._reduce_instruction_timing(
ap_delay,
delaytime_pmf_hist)
n_instruction = len(ap_delay_i)
# Randomly select original photon as time zeros
t_zeros = signal_pulse._photon_timings[np.random.randint(
low=0, high=len(signal_pulse._photon_timings),
size=n_instruction)]
instruction = np.repeat(signal_pulse_instruction[0], n_instruction)
instruction['type'] = 4 # pi_el
instruction['time'] = t_zeros - self.config['drift_time_gate']
instruction['x'], instruction['y'] = self._rand_position(n_instruction)
instruction['z'] = - ap_delay_i * self.config['drift_velocity_liquid']
instruction['amp'] = n_electron_i
return instruction
def _reduce_instruction_timing(self, ap_delay, delaytime_pmf_hist):
# Binning the delay time, so electron timing within each
# will be diffused to fill the whole bin
delaytime_spread = np.sqrt(2 * self.config['diffusion_constant_longitudinal']\
* delaytime_pmf_hist.bin_centers)
delaytime_spread /= self.config['drift_velocity_liquid']
coarse_time, coarse_time_i = [], 100 # Start at 100ns, as its smaller than single electron width
while coarse_time_i < delaytime_pmf_hist.bin_centers[-1]:
coarse_time.append(coarse_time_i)
coarse_time_i += delaytime_spread[np.argmin(np.abs(coarse_time_i - delaytime_pmf_hist.bin_centers))]
coarse_time = np.array(coarse_time)
idx = np.digitize(ap_delay[ap_delay < coarse_time[-1]], coarse_time)
idxs, n = np.unique(idx, return_counts=True)
_ap_delay = coarse_time[idxs]
return _ap_delay, n
def _rand_position(self, n):
Rupper = self.config['tpc_radius']
r = np.sqrt(np.random.uniform(0, Rupper*Rupper, n))
angle = np.random.uniform(-np.pi, np.pi, n)
return r * np.cos(angle), r * np.sin(angle)
@export
class PhotoElectric_Electron(S2):
"""
Produce electron after S2 pulse simulation, using a gaussian distribution
"""
def __init__(self, config):
super().__init__(config)
self._photon_timings = []
def generate_instruction(self, signal_pulse, signal_pulse_instruction):
if len(signal_pulse._photon_timings) == 0:
return []
return self.electron_afterpulse(signal_pulse, signal_pulse_instruction)
def electron_afterpulse(self, signal_pulse, signal_pulse_instruction):
n_electron = np.random.poisson(self.config['photoelectric_p']
* len(signal_pulse._photon_timings)
* self.config['photoelectric_modifier'])
ap_delay = np.clip(
np.random.normal(self.config['photoelectric_t_center'] + self.config['drift_time_gate'],
self.config['photoelectric_t_spread'],
n_electron), 0, None)
# Randomly select original photon as time zeros
t_zeros = signal_pulse._photon_timings[np.random.randint(
low=0,
high=len(signal_pulse._photon_timings),
size=n_electron)]
instruction = np.repeat(signal_pulse_instruction[0], n_electron)
instruction['type'] = 6 # pe_el
instruction['time'] = t_zeros + self.config['drift_time_gate']
instruction['x'], instruction['y'] = self._rand_position(n_electron)
instruction['z'] = - ap_delay * self.config['drift_velocity_liquid']
instruction['amp'] = 1
return instruction
def _rand_position(self, n):
Rupper = self.config['tpc_radius']
r = np.sqrt(np.random.uniform(0, Rupper*Rupper, n))
angle = np.random.uniform(-np.pi, np.pi, n)
return r * np.cos(angle), r * np.sin(angle)
@export
class PMT_Afterpulse(Pulse):
"""
Produce pmt after pulse simulation, using already built cdfs
The cdfs follow distribution parameters extracted from data.
"""
def __init__(self, config):
if not config['enable_pmt_afterpulses']:
return
super().__init__(config)
# Convert lists back to ndarray. As ndarray not supported by json
for k in self.resource.uniform_to_pmt_ap.keys():
for q in self.resource.uniform_to_pmt_ap[k].keys():
if isinstance(self.resource.uniform_to_pmt_ap[k][q], list):
self.resource.uniform_to_pmt_ap[k][q] = np.array(self.resource.uniform_to_pmt_ap[k][q])
def __call__(self, signal_pulse):
if len(signal_pulse._photon_timings) == 0:
self.clear_pulse_cache()
return
self._photon_timings, self._photon_channels, self._photon_gains = \
self.photon_afterpulse(signal_pulse, self.resource, self.config)
super().__call__()
@staticmethod
def photon_afterpulse(signal_pulse, resource, config):
"""
For pmt afterpulses, gain and dpe generation is a bit different from standard photons
"""
element_list = resource.uniform_to_pmt_ap.keys()
_photon_timings = []
_photon_channels = []
_photon_amplitude = []
for element in element_list:
delaytime_cdf = resource.uniform_to_pmt_ap[element]['delaytime_cdf']
amplitude_cdf = resource.uniform_to_pmt_ap[element]['amplitude_cdf']
delaytime_bin_size = resource.uniform_to_pmt_ap[element]['delaytime_bin_size']
amplitude_bin_size = resource.uniform_to_pmt_ap[element]['amplitude_bin_size']
# Assign each photon FRIST random uniform number rU0 from (0, 1] for timing
rU0 = 1 - np.random.rand(len(signal_pulse._photon_timings))
# Select those photons with U <= max of cdf of specific channel
cdf_max = delaytime_cdf[signal_pulse._photon_channels, -1]
sel_photon_id = np.where(rU0 <= cdf_max * config['pmt_ap_modifier'])[0]
if len(sel_photon_id) == 0:
continue
sel_photon_channel = signal_pulse._photon_channels[sel_photon_id]
# Assign selected photon SECOND random uniform number rU1 from (0, 1] for amplitude
rU1 = 1 - np.random.rand(len(sel_photon_channel))
# The map is made so that the indices are delay time in unit of ns
if 'Uniform' in element:
ap_delay = (np.random.uniform(delaytime_cdf[sel_photon_channel, 0],
delaytime_cdf[sel_photon_channel, 1])
* delaytime_bin_size)
ap_amplitude = np.ones_like(ap_delay)
else:
ap_delay = (np.argmin(
np.abs(
delaytime_cdf[sel_photon_channel]
- rU0[sel_photon_id][:, None]), axis=-1) * delaytime_bin_size
- config['pmt_ap_t_modifier'])
if len(amplitude_cdf.shape) == 2:
ap_amplitude = np.argmin(
np.abs(
amplitude_cdf[sel_photon_channel]
- rU1[:, None]), axis=-1) * amplitude_bin_size
else:
ap_amplitude = np.argmin(
np.abs(
amplitude_cdf[None, :]
- rU1[:, None]), axis=-1) * amplitude_bin_size
_photon_timings.append(signal_pulse._photon_timings[sel_photon_id] + ap_delay)
_photon_channels.append(signal_pulse._photon_channels[sel_photon_id])
_photon_amplitude.append(np.atleast_1d(ap_amplitude))
if len(_photon_timings) > 0:
_photon_timings = np.hstack(_photon_timings)
_photon_channels = np.hstack(_photon_channels).astype(np.int64)
_photon_amplitude = np.hstack(_photon_amplitude)
_photon_gains = np.array(config['gains'])[_photon_channels] * _photon_amplitude
return _photon_timings, _photon_channels, _photon_gains
else:
return np.zeros(0, np.int64), np.zeros(0, np.int64), np.zeros(0)
```
#### File: wfsim/core/s1.py
```python
import logging
from numba import njit
import numpy as np
from strax import exporter
from .pulse import Pulse
export, __all__ = exporter()
logging.basicConfig(handlers=[logging.StreamHandler()])
log = logging.getLogger('wfsim.core')
log.setLevel('WARNING')
try:
import nestpy
except (ModuleNotFoundError, ImportError):
log.warning('Nestpy is not found, + nest mode will not work!')
@export
class NestId:
"""
Nest ids for referring to different scintillation models, only ER is actually validated
See github.com/NESTCollaboration/nestpy/blob/8eb79414e5f834eb6cf6ddba5d6c433c6b0cbc70/src/nestpy/helpers.py#L27
"""
NR = [0]
ALPHA = [6]
ER = [7, 8, 11, 12]
LED = [20]
_ALL = NR + ALPHA + ER + LED
@export
class S1(Pulse):
"""
Given temperal inputs as well as number of photons
Random generate photon timing and channel distribution.
"""
nestpy_calc = None
def __init__(self, config):
super().__init__(config)
self.phase = 'liquid' # To distinguish singlet/triplet time delay.
if 'nest' in self.config['s1_model_type'] and (self.nestpy_calc is None):
log.info('Using NEST for scintillation time without set calculator\n'
'Creating new nestpy calculator')
self.nestpy_calc = nestpy.NESTcalc(nestpy.DetectorExample_XENON10())
# Check if user specified s1 model type exist
S1VALIDTYPE = ['', 'simple', 'custom', 'optical_propagation', 'nest']
def s1_valid_type(s, c='+ ,'):
if len(c) > 0:
for k in s.split(c[0]):
s1_valid_type(k, c[1:])
else:
assert s in S1VALIDTYPE, f'Model type "{s}" not in {S1VALIDTYPE}'
s1_valid_type(self.config['s1_model_type'])
def __call__(self, instruction):
"""Main s1 simulation function. Called by RawData for s1 simulation.
Generates first number of photons in the s1, then timings and channels.
These arrays are fed to Pulse to generate the data.
param instructions: Array with dtype wfsim.instruction_dtype """
if len(instruction.shape) < 1:
# shape of recarr is a bit strange
instruction = np.array([instruction])
# _, _, t, x, y, z, n_photons, recoil_type, *rest = [
# np.array(v).reshape(-1) for v in zip(*instruction)]
t = instruction['time']
x = instruction['x']
y = instruction['y']
z = instruction['z']
n_photons = instruction['amp']
recoil_type = instruction['recoil']
positions = np.array([x, y, z]).T # For map interpolation
n_photon_hits = self.get_n_photons(n_photons=n_photons,
positions=positions,
s1_lce_correction_map=self.resource.s1_lce_correction_map,
config=self.config)
# The new way interpolation is written always require a list
self._photon_channels = self.photon_channels(positions=positions,
n_photon_hits=n_photon_hits,
config=self.config,
s1_pattern_map=self.resource.s1_pattern_map)
extra_targs = {}
if 'nest' in self.config['s1_model_type']:
extra_targs['n_photons_emitted'] = n_photons
extra_targs['n_excitons'] = instruction['n_excitons']
extra_targs['local_field'] = instruction['local_field']
extra_targs['e_dep'] = instruction['e_dep']
extra_targs['nestpy_calc'] = self.nestpy_calc
self._photon_timings = self.photon_timings(t=t,
n_photon_hits=n_photon_hits,
recoil_type=recoil_type,
config=self.config,
phase=self.phase,
channels=self._photon_channels,
positions=positions,
resource=self.resource,
**extra_targs)
# Sorting times according to the channel, as non-explicit sorting
# is performed later and this breaks timing of individual channels/arrays
sortind = np.argsort(self._photon_channels)
self._photon_channels = self._photon_channels[sortind]
self._photon_timings = self._photon_timings[sortind]
super().__call__()
@staticmethod
def get_n_photons(n_photons, positions, s1_lce_correction_map, config):
"""Calculates number of detected photons based on number of photons in total and the positions
:param n_photons: 1d array of ints with number of emitted S1 photons:
:param positions: 2d array with xyz positions of interactions
:param s1_lce_correction_map: interpolator instance of s1 light yield map
:param config: dict wfsim config
return array with number photons"""
ly = s1_lce_correction_map(positions)
# depending on if you use the data driven or mc pattern map for light yield
#the shape of n_photon_hits will change. Mc needs a squeeze
if len(ly.shape)!=1:
ly=np.squeeze(ly,axis=-1)
ly /= 1 + config['p_double_pe_emision']
ly *= config['s1_detection_efficiency']
n_photon_hits = np.random.binomial(n=n_photons, p=ly)
return n_photon_hits
@staticmethod
def photon_channels(positions, n_photon_hits, config, s1_pattern_map):
"""Calculate photon arrival channels
:params positions: 2d array with xy positions of interactions
:params n_photon_hits: 1d array of ints with number of photon hits to simulate
:params config: dict wfsim config
:params s1_pattern_map: interpolator instance of the s1 pattern map
returns nested array with photon channels
"""
channels = np.arange(config['n_tpc_pmts']) # +1 for the channel map
p_per_channel = s1_pattern_map(positions)
p_per_channel[:, np.in1d(channels, config['turned_off_pmts'])] = 0
_photon_channels = np.array([]).astype(np.int64)
for ppc, n in zip(p_per_channel, n_photon_hits):
_photon_channels = np.append(_photon_channels,
np.random.choice(
channels,
size=n,
p=ppc / np.sum(ppc),
replace=True))
return _photon_channels
@staticmethod
def photon_timings(t, n_photon_hits, recoil_type, config, phase,
channels=None, positions=None, e_dep=None,
n_photons_emitted=None, n_excitons=None,
local_field=None, resource=None, nestpy_calc=None):
"""Calculate distribution of photon arrival timnigs
:param t: 1d array of ints
:param n_photon_hits: number of photon hits, 1d array of ints
:param recoil_type: 1d array of ints
:param config: dict wfsim config
:param phase: str "liquid"
:param channels: list of photon hit channels
:param positions: nx3 array of true XYZ positions from instruction
:param e_dep: energy of the deposit, 1d float array
:param n_photons_emitted: number of orignally emitted photons/quanta, 1d int array
:param n_excitons: number of exctions in deposit, 1d int array
:param local_field: local field in the point of the deposit, 1d array of floats
:param resource: pointer to resources class of wfsim that contains s1 timing splines
returns photon timing array"""
_photon_timings = np.repeat(t, n_photon_hits)
_n_hits_total = len(_photon_timings)
if len(_photon_timings) == 0:
return _photon_timings.astype(np.int64)
if 'optical_propagation' in config['s1_model_type']:
z_positions = np.repeat(positions[:, 2], n_photon_hits)
_photon_timings += S1.optical_propagation(channels, z_positions, config,
spline=resource.s1_optical_propagation_spline).astype(np.int64)
if 'simple' in config['s1_model_type']:
# Simple S1 model enabled: use it for ER and NR.
_photon_timings += np.random.exponential(config['s1_decay_time'], _n_hits_total).astype(np.int64)
_photon_timings += np.random.normal(0, config['s1_decay_spread'], _n_hits_total).astype(np.int64)
if 'nest' in config['s1_model_type'] or 'custom' in config['s1_model_type']:
# Pulse model depends on recoil type
counts_start = 0
for i, counts in enumerate(n_photon_hits):
if 'custom' in config['s1_model_type']:
for k in vars(NestId):
if k.startswith('_'):
continue
if recoil_type[i] in getattr(NestId, k):
str_recoil_type = k
try:
_photon_timings[counts_start: counts_start + counts] += \
getattr(S1, str_recoil_type.lower())(
size=counts,
config=config,
phase=phase).astype(np.int64)
except AttributeError:
raise AttributeError(f"Recoil type must be ER, NR, alpha or LED, "
f"not {recoil_type}. Check nest ids")
if 'nest' in config['s1_model_type']:
scint_time = nestpy_calc.GetPhotonTimes(
nestpy.INTERACTION_TYPE(recoil_type[i]),
n_photons_emitted[i],
n_excitons[i],
local_field[i],
e_dep[i])
scint_time = np.clip(scint_time, 0, config.get('maximum_recombination_time', 10000))
_photon_timings[counts_start: counts_start + counts] += np.array(scint_time[:counts], np.int64)
counts_start += counts
return _photon_timings
@staticmethod
def optical_propagation(channels, z_positions, config, spline):
"""Function gettting times from s1 timing splines:
:param channels: The channels of all s1 photon
:param z_positions: The Z positions of all s1 photon
:param config: current configuration of wfsim
:param spline: pointer to s1 optical propagation splines from resources
"""
assert len(z_positions) == len(channels), 'Give each photon a z position'
prop_time = np.zeros_like(channels)
z_rand = np.array([z_positions, np.random.rand(len(channels))]).T
is_top = channels < config['n_top_pmts']
prop_time[is_top] = spline(z_rand[is_top], map_name='top')
is_bottom = channels >= config['n_top_pmts']
prop_time[is_bottom] = spline(z_rand[is_bottom], map_name='bottom')
return prop_time
@staticmethod
def alpha(size, config, phase):
""" Calculate S1 photon timings for an alpha decay. Neglible recombination time, not validated
:param size: 1d array of ints, number of photons
:param config: dict wfsim config
:param phase: str "liquid"
return 1d array of photon timings"""
return Pulse.singlet_triplet_delays(size, config['s1_ER_alpha_singlet_fraction'], config, phase)
@staticmethod
def led(size, config, **kwargs):
""" distribute photons uniformly within the LED pulse length, not validated
:param size: 1d array of ints, number of photons
:param config: dict wfsim config
return 1d array of photon timings"""
return np.random.uniform(0, config['led_pulse_length'], size)
@staticmethod
def er(size, config, phase):
"""Complex ER model, not validated
:param size: 1d array of ints, number of photons
:param config: dict wfsim config
:param phase: str "liquid"
return 1d array of photon timings
"""
# How many of these are primary excimers? Others arise through recombination.
# This config is not set for the nT fax config todo
config.setdefault('liquid_density', 1.872452802978054e+30)
density = config['liquid_density'] / (units.g / units.cm ** 3)
excfrac = 0.4 - 0.11131 * density - 0.0026651 * density ** 2 # primary / secondary excimers
excfrac = 1 / (1 + excfrac) # primary / all excimers
# primary / all excimers that produce a photon:
excfrac /= 1 - (1 - excfrac) * (1 - config['s1_ER_recombination_fraction'])
config['s1_ER_primary_excimer_fraction'] = excfrac
log.debug('Inferred s1_ER_primary_excimer_fraction %s' % excfrac)
# Recombination time from NEST 2014
# 3.5 seems fishy, they fit an exponential to data, but in the code they use a non-exponential distribution...
efield = (config['drift_field'] / (units.V / units.cm))
reco_time = 3.5 / \
0.18 * (1 / 20 + 0.41) * np.exp(-0.009 * efield)
config['s1_ER_recombination_time'] = reco_time
log.debug('Inferred s1_ER_recombination_time %s' % reco_time)
timings = np.random.choice([0, reco_time], size, replace=True,
p=[excfrac, 1 - excfrac])
primary = timings == 0
size_primary = len(timings[primary])
timings[primary] += Pulse.singlet_triplet_delays(
size_primary, config['s1_ER_primary_singlet_fraction'], config, phase)
# Correct for the recombination time
# For the non-exponential distribution: see Kubota 1979, solve eqn 2 for n/n0.
# Alternatively, see Nest V098 source code G4S1Light.cc line 948
timings[~primary] *= 1 / (-1 + 1 / np.random.uniform(0, 1, size - size_primary))
# Update max recombine time in the nT fax config
config['maximum_recombination_time'] = 1000
timings[~primary] = np.clip(timings[~primary], 0, config['maximum_recombination_time'])
timings[~primary] += Pulse.singlet_triplet_delays(
size - size_primary, config['s1_ER_secondary_singlet_fraction'], config, phase)
return timings
@staticmethod
def nr(size, config, phase):
"""NR model model, not validated
:param size: 1d array of ints, number of photons
:param config: dict wfsim config
:param phase: str "liquid"
return 1d array of photon timings
"""
return Pulse.singlet_triplet_delays(size, config['s1_NR_singlet_fraction'], config, phase)
``` |
{
"source": "Jhow-Rambo/itdt_backend_fastAPI",
"score": 2
} |
#### File: routes/toten/TotenRoute.py
```python
from ..index import *
from fastapi import HTTPException
from src.database.models.totens import models, schemas
@router.get("/toten/{id}", response_model=schemas.Toten)
async def read_one_toten(id: int, db: Session = Depends(get_db)):
return TotenService.get_totens_by_id(db, id)
@router.get("/toten", response_model=List[schemas.Toten])
async def read_totens(db: Session = Depends(get_db)):
allTotens = TotenService.get_totens(db)
return allTotens
@router.post("/toten", response_model=schemas.TotenCreate, status_code=201)
async def create_totens(toten: schemas.TotenCreate, db: Session = Depends(get_db)):
return TotenService.create_toten(db, toten)
@router.delete("/toten/{id}")
async def delete_totens(id: int, db: Session = Depends(get_db)):
return TotenService.delete_totens(db, id)
```
#### File: services/inference/InferenceService.py
```python
from ..index import *
from src.database.models.inference import models, schemas
def create_inference(db: Session, inference: schemas.InferenceCreate, toten_id: int):
db_inference = models.Inference(**inference.dict(), toten_id=toten_id)
db.add(db_inference)
db.commit()
db.refresh(db_inference)
return inference
def get_inferences(db: Session):
db_inferences = db.query(models.Inference).all()
return db_inferences
``` |
{
"source": "Jhow-Rambo/itdt-microservices",
"score": 3
} |
#### File: Jhow-Rambo/itdt-microservices/trt_micro_service.py
```python
from icecream import ic
import os
import time
import uuid
from numpy.lib.npyio import save
import pika
import base64
import numpy as np
from imageio import imread
import io
import json
import ast
from threading import Thread
import queue
import datetime
q=queue.Queue()
import cv2
import pycuda.autoinit # This is needed for initializing CUDA driver
import Jetson.GPIO as GPIO
import RPi.GPIO as GPIO
from obj_tracking import CentroidTracker
from utils.yolo_classes import get_cls_dict
from utils.camera import add_camera_args, Camera
from utils.display import open_window, set_display, show_fps
from utils.visualization import BBoxVisualization
from utils.yolo_with_plugins import TrtYOLO
from micro_services.request import addNewInference
def __init__(self, cls_dict):
self.cls_dict = cls_dict
WINDOW_NAME = 'TrtYOLODemo'
# def rabbitMQ(data):
# connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
# channel = connection.channel()
# channel.queue_declare(queue='in')
# # Send the image tho rabbitMQ
# channel.basic_publish(exchange='',
# routing_key='in',
# body=json.dumps(data))
def show_image(cam):
ret, frame = cam.read()
cv2.imshow('frame', frame)
cv2.waitKey(25)
def loop_and_detect(cam, trt_yolo, conf_th, vis):
"""Continuously capture images from camera and do object detection.
# Arguments
cam: the camera instance (video source).
trt_yolo: the TRT YOLO object detector instance.
conf_th: confidence/score threshold for object detection.
vis: for visualization.
"""
full_scrn = False
fps = 0.0
tic = time.time()
ct = CentroidTracker()
auxId = -1
save_frames = []
n = 0
port = 33
GPIO.cleanup()
GPIO.setmode(GPIO.BOARD)
GPIO.setup(port, GPIO.IN)
controll = True
while True:
ret, frame = cam.read()
if frame is None or ret is not True:
continue
try:
save_frames.append(frame)
result = GPIO.input(port)
aux = True
# cv2.imshow('frame', frame)
show_image(cam)
while not result:
show_image(cam)
result = GPIO.input(port)
if controll:
boxes, confs, clss = trt_yolo.detect(save_frames[0], conf_th)
results = vis.draw_bboxes(save_frames[0], boxes, confs, clss)
frame = results[0]
# frame = show_fps(frame, fps)
cls_image = results[1]
newBoxes = results[2]
print(newBoxes)
print(cls_image)
# Convert captured image to JPG
ret, buffer = cv2.imencode('.jpg', frame)
# Convert to base64 encoding and show start of data
base = base64.b64encode(buffer).decode('utf-8')
data = {
0: frame,
1: cls_image,
2: confs.tolist()
}
Thread(target=addNewInference, args=(data,)).start()
controll = False
save_frames = []
controll = True
k = cv2.waitKey(1)
if k % 256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
except Exception as e:
print(str(e), 'loop error')
continue
def main():
# yolo configs
category_num = 80
model = 'yolov4-tiny-416'
cls_dict = get_cls_dict(category_num)
vis = BBoxVisualization(cls_dict)
trt_yolo = TrtYOLO(model, category_num)
# Connect to camera
cam = cv2.VideoCapture()
cam.open("rtsp://admin:[email protected]:554/h264/ch1/sub")
# cam.open("rtsp://admin:[email protected]:554/h264/ch1/sub")
if not cam.isOpened():
raise SystemExit('ERROR: failed to open camera!')
# Thread(target = loop_and_detect(cam, trt_yolo, 0.3, vis)).start()
# Loop to detect
loop_and_detect(cam, trt_yolo, conf_th=0.3, vis=vis)
# Close all
cam.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
``` |
{
"source": "jhoyland/MooDown",
"score": 3
} |
#### File: jhoyland/MooDown/MooDown.py
```python
from markdown import markdown
from lxml import etree as et
from pathlib import Path
import re
import base64
import os
import argparse
# Puts the text in a div element - thought I would use it more, didn't really need a function
def wrapDiv(tx):
return "<div>" + tx + "</div>"
def wrapSpan(tx):
return "<span>" + tx + "</span>"
def ElementWithText(name,text,textname='text',CDATA=True):
el = et.Element(name)
elText = et.SubElement(el,textname)
if CDATA:
elText.text = et.CDATA(text)
else:
elText.text = text
return el
def SubElementWithText(parent,name,text,textname='text',CDATA=True):
el = ElementWithText(name,text,textname,CDATA)
parent.append(el)
return el
# Extracts mark fraction from end of answer lines
# Returns None if no fraction found.
def getFraction(txt):
match = re.findall(r'\#(\*|\d{1,3})$',txt)
frac = 0
if len(match) > 0:
if match[0] == '*':
frac = 100
else:
frac = max(min(100,int(match[0])),0)
else:
return None
return frac
# Extracts feedback text.
# Returns None if none found
def getFeedback(txt):
match = re.findall(r'"([^"]*)"',txt)
if len(match) > 0:
return match[0]
else:
return None
# Extracts tolerance in numeric questions.
# Returns None if none found or tolerance was invalid (not convertable to float)
def getTolerance(txt):
match = re.findall(r'\{(.*?)\}',txt)
if len(match) > 0:
try:
float(match[0])
return match[0]
except ValueError:
return None
else:
return None
# Parses the ordered and unordered lists as answers to numerical, multiple choice, true-false or short answer questions
def parseListAnswers(listElement):
qtype = 'unknown'
fracset = False
# List to store multiple answers
answers = []
# Ordered list defaults to mc and unordered to numerical
if listElement.tag == 'ol':
qtype = 'multichoice'
else:
qtype = 'numerical'
# Step through the list item elements - each one represents one answer
for answer in listElement.iter('li'):
atext = re.split('[\{"#]',answer.text)[0].strip()
fb = getFeedback(answer.text)
# if this s the first answer check to see if it is true / false
if len(answers) == 0:
firstWord = atext.split(None,1)[0].lower()
if firstWord in 'truefalse':
qtype = 'truefalse'
# Create the true and false answers - this is really a special case of multichoice
answers.append({'ans':firstWord,'tol':None,'fbk':fb,'frc':'100'})
answers.append({'ans':qtype.replace(firstWord,''),'tol':None,'fbk':None,'frc':'0'})
fracset = True
continue
# If we've already established this is true false the second answer is only there to provide feedback for incorrect
if qtype == 'truefalse':
answers[1]['fbk']=fb
break
tolerance = None
# If this is a numercal question try to convert the answer to a number - if this fails the question switches to short answer
if qtype == 'numerical':
try:
a = float(atext)
tolerance = getTolerance(answer.text)
except ValueError:
qtype = 'shortanswer'
# Get the score for this question. Set to zero if none found.
frac = getFraction(answer.text)
if frac is not None:
fracset = True
else:
frac = '0'
# Append the answer to the list
answers.append({'ans':atext,'tol':tolerance,'fbk':fb,'frc':frac})
# If no fraction was specified for any answer, set the first answer to 100% correct
if not fracset and len(answers)>0:
answers[0]['frc']=100
return (answers,qtype)
# Generate an XML answer. Non-numerical answers need their answers wrapped in CDATA section
def createAnswerXML(answer,isNumeric=True):
answerElement = ElementWithText('answer',answer['ans'],CDATA = not isNumeric)
answerElement.set('fraction',str(answer['frc']))
if 'tol' in answer:
if answer['tol'] is not None:
SubElementWithText(answerElement, 'tolerance', answer['tol'], CDATA = False)
if answer['fbk'] is not None:
SubElementWithText(answerElement,'feedback',answer['fbk'])
return answerElement
# Generate a cloze format numeric answer
def createAnswerCloze(answer):
answerString = "%{}%{}".format(answer['frc'],answer['ans'])
if answer['tol'] is not None:
answerString += ":{}".format(answer['tol'])
if answer['fbk'] is not None:
answerString += "#{}".format(answer['fbk'])
return answerString
# Question text must be wrapped in CDATA tags. This function does that and also base64 converts
# image tag contents. Yes, yes I know functions should just do one thing - I'll fix it later
def generateCDATA(qtextElement):
# Locate image tags in the XML
images = qtextElement.findall('.//img')
# Iterate over the image tags
for image in images:
# Get the image filename and extension
src = image.get('src')
extension = os.path.splitext(src)[1][1:]
# For jpegs and pngs, open the image file and base64 encode it, replacing the file link
# With the encoded image in the HTML. Change tag atributes accordingly
if extension in "jpeg jpg png":
with open(src,'rb') as img_file:
image64 = base64.b64encode(img_file.read()).decode('utf-8')
imageString = "data:image/" + extension + ";base64," + image64
image.set('src',imageString)
# TO DO: Warn could not open img_file
# TO DO: for SVG load and integrate the SVG into the HTML directly
# TO DO: skip Base 64 encoding for web images
# tO DO: raise all heading levels by 2 to allow H1 / H2 in question text
# Convert the HTML back to a string and embed it in a CDATA section
return et.CDATA(et.tostring(qtextElement,pretty_print=True).decode('utf-8'))
# Parse arguments for input filename
argparser = argparse.ArgumentParser()
argparser.add_argument('fn',type=str,help='Markdown format source file')
argparser.add_argument('-html',action='store_true',help='Output intermediate HTML (mostly for debugging)')
argparser.add_argument('-echo',action='store_true',help='Print final XML to console')
args = argparser.parse_args()
# Read file
contents = Path(args.fn).read_text()
fnroot = os.path.splitext(args.fn)[0]
save_html = args.html
echo_to_console = args.echo
# Parse markdown into HTML
htmlString = markdown(contents)
# Create a div to act as the root node so we have a well formed doc
htmlTree = et.fromstring(wrapDiv(htmlString))
#print(et.tostring(htmlTree,pretty_print=True).decode('utf-8'))
tree = et.ElementTree(htmlTree)
if save_html:
tree.write(fnroot+'.html', pretty_print=True, xml_declaration=True, encoding="utf-8")
quiz = et.Element('quiz')
# Step through sibling elements
mode = 'start'
qtype = 'none'
answercount = 0
questioncount = 0
# Find the top level headings - these define the questions
for h1 in tree.iter('h1'):
# question type is initially unknown
mode = 'question'
qtype = 'unknown'
questioncount = questioncount + 1
# Create an empyy question
question = et.SubElement(quiz,'question')
qName = et.SubElement(question,'name')
qNameText = et.SubElement(qName,'text')
qNameText.text = h1.text
questionText = et.SubElement(question,'questiontext')
questionText.set("format","html")
questionTextText = et.SubElement(questionText,'text')
qtextContents = et.Element('div')
for su in h1.itersiblings():
skipTag = False
if su.tag == 'h1': # Found another question - TO DO wrap up to determine is the previous question was valid
break
if su.tag == 'h2' and su.text.lower().startswith('cloze'):
qtype = 'cloze'
mode = 'cloze'
question.set("type",qtype)
continue
# An H2 tag starting 'ans' (case insensitive) starts the answer section of the question
if su.tag == 'h2' and su.text.lower().startswith('ans') and qtype == 'unknown':
# Finish the question text by converting to CDATA
questionTextText.text = generateCDATA(qtextContents)
# Create an empty div for the next question's text
qtextContents = et.Element('div')
mode = 'answer'
continue
if mode == 'answer':
if su.tag in 'ol ul':
answers,qtype = parseListAnswers(su)
question.set("type",qtype)
for answer in answers:
answerXML = createAnswerXML(answer,qtype=='numerical')
question.append(answerXML)
continue
if mode == 'cloze':
if su.tag in 'ol ul':
answers,qtype = parseListAnswers(su)
clzqtext ="{{1:{}:".format(qtype.upper())
first = True
for answer in answers:
if first:
first = False
else:
clzqtext = clzqtext + '~'
answerStr = createAnswerCloze(answer)
clzqtext = clzqtext + answerStr
clzqtext = clzqtext + '}'
clzqel = et.Element('span')
clzqel.text = clzqtext
qtextContents.append(clzqel)
continue
qtextContents.append(su)
if not mode=='answer': # No answer section was found
questionTextText.text = generateCDATA(qtextContents)
# TO DO: Check for Cloze
qtextContents = et.Element('div')
answer = et.SubElement(question,'answer')
answerText = et.SubElement(answer,'text')
answer.set('fraction','0')
if qtype=='unknown':
question.set("type","essay")
if args.echo:
print(et.tostring(quiz,pretty_print=True).decode('utf-8'))
print("\nFound {} questions in file {}. Outputting to {}.xml".format(questioncount,args.fn,fnroot))
quizoutput = et.ElementTree(quiz)
quizoutput.write(fnroot+'.xml', pretty_print=True, xml_declaration=True, encoding="utf-8")
``` |
{
"source": "jhp038/fashion_project",
"score": 2
} |
#### File: jhp038/fashion_project/training_MLP2_PJH.py
```python
import pickle
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
import torchvision.transforms as transforms
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from torch import nn
import torch.nn.functional as Fun
import random
print(torch.cuda.current_device())
print(torch.cuda.device_count())
print(torch.cuda.is_available())
USE_GPU=False
class FeatureDataset(Dataset):
"""Face Landmarks dataset."""
def __init__(self, filename, K=20, is_train=True, transform=None):
pickle_in = open(filename,"rb")
ps = pickle.load(pickle_in)
image_mats = list()
for p in ps:
image_mats.append(np.asarray(p, dtype=np.float32))
self.transform = transform
self.is_train = is_train
self.N = len(image_mats)
self.K = K
self.image_feats_train = list()
self.image_feat_imagepool = list()
for i in range(len(image_mats)):
# self.image_feats_train.append(torch.from_numpy(image_mats[i][0:-1,:]).cuda())
self.image_feats_train.append((image_mats[i][0:-1,:]))
self.image_feat_imagepool.append(image_mats[i][-1,:].tolist())
self.user_num = len(self.image_feat_imagepool)
self.image_dim = image_mats[0].shape[1]
# self.image_feat_imagepool = torch.from_numpy(np.asarray(self.image_feat_imagepool, dtype=np.float32)).cuda()
self.image_feat_imagepool = (np.asarray(self.image_feat_imagepool, dtype=np.float32))
def __len__(self):
return self.N
def __getitem__(self, idx):
permute_list = np.random.permutation(self.image_feats_train[idx].shape[0])
input_feat_idx = permute_list[0:(self.K)] #np.random.randint(self.image_feats_train[idx].shape[0], size=self.K+1)
pos_feat_idx = permute_list[self.K+1]
neg_idx = random.choice(list(range(0,idx))+list(range(idx+1,self.N)))
neg_feat_idx = random.choice(list(range(self.image_feats_train[neg_idx].shape[0])))
# print(idx, pos_feat_idx, input_feat_idx)
# print(neg_idx, neg_feat_idx)
Xs = self.image_feats_train[idx][input_feat_idx,:]
x_pos = self.image_feats_train[idx][pos_feat_idx,:]
x_neg = self.image_feats_train[neg_idx][neg_feat_idx,:]
return Xs, x_pos, x_neg
class Net1(nn.Module):
def __init__(self, image_dim):
super(Net1, self).__init__()
self.embedding_dim = 128
self.embedding_mlp = nn.Sequential(
nn.Linear(image_dim, 256),
nn.LeakyReLU(inplace=True),
nn.Linear(256, self.embedding_dim),
nn.LeakyReLU(inplace=True)
)
# self.mlp2 = nn.Sequential(
# nn.Linear(image_dim, 256),
# nn.LeakyReLU(inplace=True),
# nn.Linear(256, 128),
# nn.LeakyReLU(inplace=True)
# )
self.attention = nn.Sequential(
nn.Linear(self.embedding_dim*3, 1),
nn.LeakyReLU(inplace=True),
)
self.ATTENTION = True
def forward(self, x, x_p=None, x_n=None, is_train=True, test_mode='single'):
if is_train:
# get embedding
N, K, F = x.size()
# x
x = x.view(N*K, F)
x = self.embedding_mlp(x)
x = x.view(N, K, 128)
# x_p
x_p = self.embedding_mlp(x_p.view(N,1,F))
# x_n
x_n = self.embedding_mlp(x_n.view(N,1,F))
### averaging x
if not self.ATTENTION:
x = torch.mean(x, 1, True)
x_aggregated = x.view(N,1,128)
else:
x_max = x.max(1)[0].view(N,1,self.embedding_dim).expand_as(x)
x_avg = torch.mean(x,1,True).expand_as(x)
# print(x.size(), x_max.size(), x_avg.size())
x_all = torch.cat((x,x_max,x_avg), 1)
x_all = x_all.view(N*K, 3*self.embedding_dim)
x_attention = self.attention(x_all).view(N,K,1)
x_attention = Fun.softmax(x_attention, dim=1)
x_aggregated = x*x_attention
x_aggregated = x_aggregated.sum(1,keepdim=True)
### loss1 contrasive loss
# positive_distance
dist_p = torch.sum((x_aggregated-x_p)*(x_aggregated-x_p), 2)
# negtive_distance
dist_n = torch.sum((x_aggregated-x_n)*(x_aggregated-x_n), 2)
# loss
margin=0.5
loss = dist_p + torch.clamp(margin-dist_n, min=0.0, max=10000.0)
loss = torch.mean(loss)
return loss
else:
if test_mode=='average':
K,F = x.size()
x = x.view(1,K,F)
x = self.embedding_mlp(x)
if not self.ATTENTION:
x = torch.mean(x, 1, True)
x = x.view(1,128)
else:
N=1
x_max = x.max(1)[0].view(N,1,self.embedding_dim).expand_as(x)
x_avg = torch.mean(x,1,True).expand_as(x)
# print(x.size(), x_max.size(), x_avg.size())
x_all = torch.cat((x,x_max,x_avg), 1)
x_all = x_all.view(N*K, 3*self.embedding_dim)
x_attention = self.attention(x_all).view(N,K,1)
x_attention = Fun.softmax(x_attention, dim=1)
x = x*x_attention
x = x.sum(1,keepdim=True)
x = x.view(1,128)
return x
elif test_mode=='single':
x = self.embedding_mlp(x)
return x
# class PCA(nn.Module):
# def __init__(self, image_dim):
# super(PCA, self).__init__()
# self.fc1 = nn.Linear(image_dim, 256)
# self.fc2 = nn.Linear(256, image_dim)
# def forward(self, x):
# N, K, F = x.size()
# x = x.view(N*K, F)
# coding = F.ReLU(self.fc1(x), inplace=True)
# x_hat = self.fc2(x)
# x = x.view(N,K,F)
# return x
def nearest_search(imagepool, feat, K=1):
assert K==1
nearest_idx = -1
nearest_dist = 10000000
for gt_idx, gt_feat in enumerate(dataset.image_feat_imagepool):
dist = np.mean(np.abs(gt_feat-feat))
if dist<nearest_dist:
nearest_dist = dist
nearest_idx = gt_idx
return nearest_idx, nearest_dist
def nearest_search_matrix(imagepool, feat, K=1):
assert K==1
dist = np.mean(np.abs(imagepool-feat), axis=1)
nearest_idx = np.argmin(dist)
nearest_dist = dist[nearest_idx]
return nearest_idx, nearest_dist
transform = transforms.Compose([transforms.ToTensor()])
dataset = FeatureDataset("final_gather_with_title_hashtag.pkl",K=10, is_train=True, transform=transform)
# print(dataset[0][0].shape, dataset[0][1].shape, dataset[0][2].shape)
trainloader = DataLoader(dataset, batch_size=16,shuffle=True, num_workers=0)
trainloader_test = DataLoader(dataset, batch_size=1,shuffle=False, num_workers=0)
net = Net1(image_dim=dataset.image_dim)
if USE_GPU:
net = net.cuda()
# net_pca = PCA(image_dim=dataset.image_dim).cuda()
# criterion = torch.nn.MSELoss(reduction='sum')
criterion = torch.nn.L1Loss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.95)
scheduler = StepLR(optimizer, step_size = 100, gamma = 0.1)
loss_list = []
acc_list = []
# PCA = False
# if PCA:
# for epoch in range(200): # loop over the dataset multiple times
# running_loss = 0.0
# ## training
# for i, data in enumerate(trainloader, 0):
# # get the inputs; data is a list of [inputs, labels]
# inputs, gt = data
# inputs, gt = inputs.cuda(), gt.cuda()
# # zero the parameter gradients
# optimizer.zero_grad()
# # forward + backward + optimize
# outputs = net(inputs)
# loss = criterion(outputs, gt)
# loss.backward()
# optimizer.step()
# # print statistics
# running_loss += loss.item()
for epoch in range(500): # loop over the dataset multiple times
running_loss = 0.0
scheduler.step()
## training
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
input_feat, pos_feat, neg_feat = data
if USE_GPU:
input_feat, pos_feat, neg_feat = input_feat.cuda(), pos_feat.cuda(), neg_feat.cuda()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
loss = net(input_feat,pos_feat,neg_feat)
# loss = criterion(outputs, gt)
# loss = torch.sum(outputs)
# print(loss)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
## print training result
if epoch % 1 == 0: # print every 2000 mini-batches
print('epoch %d, loss: %.6f , LR: %.10f' % (epoch + 1, running_loss / (len(trainloader)), scheduler.get_lr()[0]))
loss_list.append(running_loss / (len(trainloader)))
# test accuracy:
TEST_EPOCH = 1
if epoch % TEST_EPOCH == TEST_EPOCH-1: # print every 2000 mini-batches
with torch.no_grad():
image_feat_imagepool = torch.from_numpy(dataset.image_feat_imagepool)
if USE_GPU:
image_feat_imagepool = image_feat_imagepool.cuda()
# print(type(image_feat_imagepool), image_feat_imagepool.size())
image_feat_mlp_imagepool = net(image_feat_imagepool, is_train=False, test_mode='single').cpu().detach().numpy()
# print(image_feat_mlp_imagepool.shape)
# exit()
TEST_NUM = 1
oo=0.
total=0.
with torch.no_grad():
for _ in range(TEST_NUM):
# for sample_idx, data in enumerate(trainloader_test, 0):
for sample_idx, sample in enumerate(dataset):
input_feat, pos_feat, neg_feat = sample
input_feat, pos_feat, neg_feat = torch.from_numpy(input_feat), torch.from_numpy(pos_feat), torch.from_numpy(neg_feat)
if USE_GPU:
input_feat, pos_feat, neg_feat = input_feat.cuda(), pos_feat.cuda(), neg_feat.cuda()
output = net(input_feat, is_train=False, test_mode='average').cpu().detach().numpy()
# print(output.shape, image_feat_mlp_imagepool.shape)
# do KNN
nearest_index, nearest_dist = nearest_search_matrix(image_feat_mlp_imagepool, output)
# print(sample_idx, nearest_index, nearest_dist)
oo += 1 if (nearest_index==sample_idx) else 0
total += 1
# print statistics
# print(oo, total, 1, float(dataset.user_num))
print('epoch %d, acc: %.6f, random guess: %.6f' % (epoch + 1, oo / total, 1/float(dataset.user_num)))
acc_list.append(oo / total)
print('Finished Training')
``` |
{
"source": "JHP4911/bert_paper_classification",
"score": 3
} |
#### File: JHP4911/bert_paper_classification/evaluate_new_data.py
```python
import os
import argparse
def main():
ap = argparse.ArgumentParser()
ap.add_argument("--tsv", required=True, help="path to training format TSV file")
ap.add_argument("--results", required=True, help="path to prediction results")
args = vars(ap.parse_args())
test_tsv_file = args["tsv"]
results_file = args["results"]
ids = []
results = []
i = 0
with open(test_tsv_file, 'r') as f:
for line in f:
if i == 0:
i += 1
continue
fields = line.rstrip().split("\t")
ids.append(fields[0])
with open(results_file, 'r') as f:
for line in f:
fields = line.rstrip().split("\t")
neg_prob = float(fields[0])
pos_prob = float(fields[1])
result = 0
if pos_prob > neg_prob:
result = 1
results.append(result)
# merge the list of ids with the list of results - only output the positive hits
for i in range(len(ids)):
if results[i] == 1:
print(ids[i])
if __name__ == "__main__":
main()
``` |
{
"source": "JHP4911/bisenetv2-tensorflow",
"score": 2
} |
#### File: bisenetv2-tensorflow/tools/train_bisenet_cityscapes.py
```python
from trainner import cityscapes_bisenet_trainner as trainner
from local_utils.log_util import init_logger
LOG = init_logger.INIT_LOG
def train_model():
"""
:return:
"""
worker = trainner.BiseNetCityScapesTrainer()
worker.train()
return
if __name__ == '__main__':
"""
main function
"""
train_model()
``` |
{
"source": "JHP4911/django-inviting",
"score": 2
} |
#### File: django-inviting/invitation/admin.py
```python
from django.contrib import admin
from models import Invitation, InvitationStats
class InvitationAdmin(admin.ModelAdmin):
list_display = ('user', 'email', 'expiration_date')
admin.site.register(Invitation, InvitationAdmin)
class InvitationStatsAdmin(admin.ModelAdmin):
list_display = ('user', 'available', 'sent', 'accepted', 'performance')
def performance(self, obj):
return '%0.2f' % obj.performance
admin.site.register(InvitationStats, InvitationStatsAdmin)
``` |
{
"source": "JHP4911/e-learning_app",
"score": 2
} |
#### File: e-learning_app/forum/tests.py
```python
from django.test import TestCase, RequestFactory
from users.models import UserProfile
from forum.views import forum
class TestCalls(TestCase):
def setUp(self):
# Every test needs access to the request factory.
self.factory = RequestFactory()
self.user = UserProfile.objects.create_user(
username='Hodor', email='<EMAIL>', password='<PASSWORD>')
def test_call_view_denies_anonymous(self):
response = self.client.get('forum', follow=True)
self.assertEqual(response.status_code, 404)
# Test forum view after login
def test_call_view_loads(self):
# Create an instance of a GET request.
request = self.factory.get('forum')
request.user = self.user
response = forum(request)
self.assertEqual(response.status_code, 200)
``` |
{
"source": "JHP4911/fderyckel-ifitwala_ed",
"score": 2
} |
#### File: ifitwala_ed/controllers/queries.py
```python
from __future__ import unicode_literals
import frappe
from frappe.desk.reportview import get_match_cond, get_filters_cond
from frappe.utils import nowdate, getdate
from collections import defaultdict
from frappe.utils import unique
import ifitwala_ed
# searches for active employees
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs
def employee_query(doctype, txt, searchfield, start, page_len, filters):
conditions = []
fields = get_fields("Employee", ["name", "employee_full_name"])
return frappe.db.sql("""select {fields} from `tabEmployee`
where status = 'Active'
and docstatus < 2
and ({key} like %(txt)s
or employee_full_name like %(txt)s)
{fcond} {mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, employee_full_name), locate(%(_txt)s, employee_full_name), 99999),
idx desc,
name, employee_full_name
limit %(start)s, %(page_len)s""".format(**{
'fields': ", ".join(fields),
'key': searchfield,
'fcond': get_filters_cond(doctype, filters, conditions),
'mcond': get_match_cond(doctype)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
def get_fields(doctype, fields=[]):
meta = frappe.get_meta(doctype)
fields.extend(meta.get_search_fields())
if meta.title_field and not meta.title_field.strip() in fields:
fields.insert(1, meta.title_field.strip())
return unique(fields)
```
#### File: ifitwala_ed/setup/utils.py
```python
from __future__ import unicode_literals
import frappe
def insert_record(records):
for r in records:
doc = frappe.new_doc(r.get("doctype"))
doc.update(r)
try:
doc.insert(ignore_permissions=True)
except frappe.DuplicateEntryError as e:
# pass DuplicateEntryError and continue
if e.args and e.args[0]==doc.doctype and e.args[1]==doc.name:
# make sure DuplicateEntryError is for the exact same doc and not a related doc
pass
else:
raise
```
#### File: fderyckel-ifitwala_ed/ifitwala_ed/utils.py
```python
from __future__ import unicode_literals, division
import frappe
from frappe import _
class OverlapError(frappe.ValidationError): pass
def validate_overlap_for(doc, doctype, fieldname, value=None):
"""Checks overlap for specified field.
:param fieldname: Checks Overlap for this field
"""
existing = get_overlap_for(doc, doctype, fieldname, value)
if existing:
frappe.throw(_("This {0} conflicts with {1} for {2} {3}").format(doc.doctype, existing.name,
doc.meta.get_label(fieldname) if not value else fieldname , value or doc.get(fieldname)), OverlapError)
def get_overlap_for(doc, doctype, fieldname, value=None):
"""Returns overlaping document for specified field.
:param fieldname: Checks Overlap for this field
"""
existing = frappe.db.sql("""select name, from_time, to_time from `tab{0}`
where `{1}`=%(val)s and schedule_date = %(schedule_date)s and
(
(from_time > %(from_time)s and from_time < %(to_time)s) or
(to_time > %(from_time)s and to_time < %(to_time)s) or
(%(from_time)s > from_time and %(from_time)s < to_time) or
(%(from_time)s = from_time and %(to_time)s = to_time))
and name!=%(name)s and docstatus!=2""".format(doctype, fieldname),
{
"schedule_date": doc.schedule_date,
"val": value or doc.get(fieldname),
"from_time": doc.from_time,
"to_time": doc.to_time,
"name": doc.name or "No Name"
}, as_dict=True)
return existing[0] if existing else None
def validate_duplicate_student(students):
unique_students = []
for stud in students:
if stud.student in unique_students:
frappe.throw(_("Student {0} - {1} appears Multiple times in row {2} & {3}")
.format(stud.student, stud.student_name, unique_students.index(stud.student)+1, stud.idx))
else:
unique_students.append(stud.student)
return None
# CMS - Course Management
# to return a list of all programs that can be displayed on the portal.
def get_portal_programs():
published_programs = frappe.get_all("Program", filters = {"is_published": True})
if not published_programs:
return None
program_list = [frappe.get_doc("Program", program) for program in published_programs]
portal_programs = [{"program":program, 'has_access':allowed_program_access(program.name)} for program in program_list if allowed_program_access(program.name)]
return portal_programs
# deciding if the current user is a student who has access to program or if it is a super-user
def allowed_program_access(program, student=None):
if has_super_access():
return True
if not student:
student = get_current_student()
if student and get_enrollment('program', program, student.name):
return True
else:
return False
# will display all programs and courses for users with certain roles.
def has_super_access():
current_user = frappe.get_doc("User", frappe.session.user)
roles = set([role.role for role in current_user.roles])
return bool(roles & {"Administrator", "Instructor", "Curriculum Coordinator", "System Manager", "Academic Admin", "Schedule Maker", "School IT"})
# to get the name of the student who is currently logged-in
def get_current_student():
email = frappe.session.user
if email in ('Administrator', 'Guest'):
return None
try:
student_id = frappe.get_all("Student", {"student_email": email}, ["name"])[0].name
return frappe.get_doc("Student", student_id)
except (IndexError, frappe.DoesNotExistError):
return None
# for CMS get a list of all enrolled program and or course for the current academic year.
def get_enrollment(master, document, student):
current_year = frappe.get_single("Education Settings").get("current_academic_year")
if master == 'program':
enrollments = frappe.get_all("Program Enrollment", filters={'student':student, 'program': document, 'docstatus': 1})
#enrollments = frappe.get_all("Program Enrollment", filters={'student':student, 'program': document, 'docstatus': 1, 'academic_year': current_year})
if master == 'course':
enrollments = frappe.get_all("Course Enrollment", filters={'student':student, 'course': document, 'academic_year': current_year})
if enrollments:
return enrollments[0].name
else:
return None
```
#### File: www/kms/index.py
```python
from __future__ import unicode_literals
import frappe
import ifitwala_ed.utils as utils
no_cache = 1
#always get context first for portal
def get_context(context):
context.education_settings = frappe.get_single("Education Settings")
if not context.education_settings.enable_cms:
frappe.local.flags.redirect_location = '/'
raise frappe.Redirect
context.featured_programs = get_featured_programs()
def get_featured_programs():
return utils.get_portal_programs()
``` |
{
"source": "JHP4911/Gesture-Triggered-Alarm-on-Pi-or-Jetson-Nano",
"score": 2
} |
#### File: Gesture-Triggered-Alarm-on-Pi-or-Jetson-Nano/gesture_recognition_demo/tracker.py
```python
import numpy as np
from scipy.optimize import linear_sum_assignment
class Detection:
"""Class that stores detected object"""
def __init__(self, obj_id, roi, conf, waiting=0, duration=1):
"""Constructor"""
self.id = obj_id
self.roi = roi
self.conf = conf
self.waiting = waiting
self.duration = duration
@property
def roi(self):
"""Returns ROI of detected object"""
return self._roi
@roi.setter
def roi(self, roi):
"""Sets ROI of detected object"""
self._roi = np.copy(roi.reshape(1, -1))
class Tracker: # pylint: disable=too-few-public-methods
"""Class that carries out tracking of persons using Hungarian algorithm"""
def __init__(self, detector, score_threshold, iou_threshold, smooth_weight=0.5, max_waiting=5):
"""Constructor"""
self._detector = detector
self._score_threshold = score_threshold
self._iou_threshold = iou_threshold
self._smooth_weight = smooth_weight
self._max_waiting = max_waiting
self._last_detections = []
self._cur_req_id, self._next_req_id = 0, 1
self._last_id = 0
@staticmethod
def _matrix_iou(set_a, set_b):
"""Computes IoU metric for the two sets of vectors"""
intersect_ymin = np.maximum(set_a[:, 0].reshape([-1, 1]), set_b[:, 0].reshape([1, -1]))
intersect_xmin = np.maximum(set_a[:, 1].reshape([-1, 1]), set_b[:, 1].reshape([1, -1]))
intersect_ymax = np.minimum(set_a[:, 2].reshape([-1, 1]), set_b[:, 2].reshape([1, -1]))
intersect_xmax = np.minimum(set_a[:, 3].reshape([-1, 1]), set_b[:, 3].reshape([1, -1]))
intersect_heights = np.maximum(0.0, intersect_ymax - intersect_ymin)
intersect_widths = np.maximum(0.0, intersect_xmax - intersect_xmin)
intersect_areas = intersect_heights * intersect_widths
areas_set_a = ((set_a[:, 2] - set_a[:, 0]) * (set_a[:, 3] - set_a[:, 1])).reshape([-1, 1])
areas_set_b = ((set_b[:, 2] - set_b[:, 0]) * (set_b[:, 3] - set_b[:, 1])).reshape([1, -1])
union_areas = areas_set_a + areas_set_b - intersect_areas
return intersect_areas / union_areas
@staticmethod
def filter_rois(new_rois, score_threshold):
"""Filters input ROIs by valid height/width and score threshold values"""
heights = new_rois[:, 2] - new_rois[:, 0]
widths = new_rois[:, 3] - new_rois[:, 1]
valid_sizes_mask = np.logical_and(heights > 0.0, widths > 0.0)
valid_conf_mask = new_rois[:, 4] > score_threshold
valid_roi_ids = np.where(np.logical_and(valid_sizes_mask, valid_conf_mask))[0]
filtered_rois = new_rois[valid_roi_ids, :4]
filtered_conf = new_rois[valid_roi_ids, 4]
return filtered_rois, filtered_conf
def _track(self, last_detections, new_rois):
"""Updates current tracks according new observations"""
filtered_rois, filtered_conf = self.filter_rois(new_rois, self._score_threshold)
if filtered_rois.shape[0] == 0:
out_detections = []
for det in last_detections:
det.waiting = 1
det.duration = 0
out_detections.append(det)
return out_detections
if last_detections is None or len(last_detections) == 0:
out_detections = []
for roi, conf in zip(filtered_rois, filtered_conf):
out_detections.append(Detection(self._last_id, roi.reshape(1, -1), conf))
self._last_id += 1
return out_detections
last_rois = np.concatenate([det.roi for det in last_detections], axis=0)
affinity_matrix = self._matrix_iou(last_rois, filtered_rois)
cost_matrix = 1.0 - affinity_matrix
row_ind, col_ind = linear_sum_assignment(cost_matrix)
affinity_values = 1.0 - cost_matrix[row_ind, col_ind]
valid_matches = affinity_values > self._iou_threshold
row_ind = row_ind[valid_matches]
col_ind = col_ind[valid_matches]
out_detections = []
for src_id, trg_id in zip(row_ind, col_ind):
det = last_detections[src_id]
det.waiting = 0
det.duration += 1
new_roi = filtered_rois[trg_id]
det.roi = self._smooth_roi(det.roi, new_roi.reshape(1, -1), self._smooth_weight)
det.conf = filtered_conf[trg_id]
out_detections.append(det)
unmatched_src_ind = set(range(len(last_detections))) - set(row_ind.tolist())
for src_id in unmatched_src_ind:
det = last_detections[src_id]
det.waiting += 1
det.duration = 0
if det.waiting < self._max_waiting:
out_detections.append(det)
unmatched_trg_ind = set(range(len(filtered_rois))) - set(col_ind.tolist())
for trg_id in unmatched_trg_ind:
new_roi = filtered_rois[trg_id]
new_roi_conf = filtered_conf[trg_id]
out_detections.append(Detection(self._last_id, new_roi.reshape(1, -1), new_roi_conf))
self._last_id += 1
return out_detections
@staticmethod
def _smooth_roi(prev_roi, new_roi, weight):
"""Smooths tracking ROI"""
if prev_roi is None:
return new_roi
return weight * prev_roi + (1.0 - weight) * new_roi
@staticmethod
def _clip_roi(roi, frame_size):
"""Clips ROI limits according frame sizes"""
frame_height, frame_width = frame_size
old_roi = roi.reshape(-1)
new_roi = [np.maximum(0, int(old_roi[0])),
np.maximum(0, int(old_roi[1])),
np.minimum(frame_width, int(old_roi[2])),
np.minimum(frame_height, int(old_roi[3]))]
return np.array(new_roi)
def _get_last_detections(self, frame_size, max_num_detections, labels_map):
"""Returns active detections"""
if self._last_detections is None or len(self._last_detections) == 0:
return [], {}
out_detections = []
for det in self._last_detections:
if det.waiting > 0 or det.duration <= 1:
continue
clipped_roi = self._clip_roi(det.roi, frame_size)
out_det = Detection(det.id, clipped_roi, det.conf, det.waiting, det.duration)
out_detections.append(out_det)
if len(out_detections) > max_num_detections:
out_detections.sort(key=lambda x: x.conf, reverse=True)
out_detections = out_detections[:max_num_detections]
matched_det_ids = {det.id for det in out_detections} & labels_map.keys()
unused_det_ids = sorted(set(range(max_num_detections)) - matched_det_ids)
out_labels_map = {}
for det in out_detections:
if det.id in matched_det_ids:
out_labels_map[det.id] = labels_map[det.id]
else:
new_local_det_id = unused_det_ids[0]
unused_det_ids = unused_det_ids[1:]
out_labels_map[det.id] = new_local_det_id
det.id = new_local_det_id
return out_detections, labels_map
def add_frame(self, frame, max_num_detections, labels_map):
"""Adds new detections and returns active tracks"""
self._detector.async_infer(frame, self._next_req_id)
new_rois = self._detector.wait_request(self._cur_req_id)
self._cur_req_id, self._next_req_id = self._next_req_id, self._cur_req_id
if new_rois is not None:
self._last_detections = self._track(self._last_detections, new_rois)
frame_size = frame.shape[:2]
out_detections, out_labels_map = self._get_last_detections(
frame_size, max_num_detections, labels_map)
return out_detections, out_labels_map
``` |
{
"source": "JHP4911/JINA",
"score": 2
} |
#### File: executors/indexers/keyvalue.py
```python
import mmap
from typing import Iterator, Optional
import numpy as np
from . import BaseKVIndexer
from ..compound import CompoundExecutor
HEADER_NONE_ENTRY = (-1, -1, -1)
class BinaryPbIndexer(BaseKVIndexer):
class WriteHandler:
def __init__(self, path, mode):
self.body = open(path, mode)
self.header = open(path + '.head', mode)
def close(self):
self.body.close()
self.header.close()
def flush(self):
self.body.flush()
self.header.flush()
class ReadHandler:
def __init__(self, path):
with open(path + '.head', 'rb') as fp:
tmp = np.frombuffer(fp.read(), dtype=np.int64).reshape([-1, 4])
self.header = {r[0]: None if np.array_equal(r[1:], HEADER_NONE_ENTRY) else r[1:] for r in tmp}
self._body = open(path, 'r+b')
self.body = self._body.fileno()
def close(self):
self._body.close()
def get_add_handler(self):
# keep _start position as in pickle serialization
return self.WriteHandler(self.index_abspath, 'ab')
def get_create_handler(self):
self._start = 0 # override _start position
return self.WriteHandler(self.index_abspath, 'wb')
def get_query_handler(self):
return self.ReadHandler(self.index_abspath)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._total_byte_len = 0
self._start = 0
self._page_size = mmap.ALLOCATIONGRANULARITY
def add(self, keys: Iterator[int], values: Iterator[bytes], *args, **kwargs):
if len(list(keys)) != len(list(values)):
raise ValueError(f'Len of keys {len(keys)} did not match len of values {len(values)}')
for key, value in zip(keys, values):
l = len(value) #: the length
p = int(self._start / self._page_size) * self._page_size #: offset of the page
r = self._start % self._page_size #: the remainder, i.e. the start position given the offset
self.write_handler.header.write(
np.array(
(key, p, r, r + l),
dtype=np.int64
).tobytes()
)
self._start += l
self.write_handler.body.write(value)
self._size += 1
self.write_handler.flush()
def query(self, key: int) -> Optional[bytes]:
pos_info = self.query_handler.header.get(key, None)
if pos_info is not None:
p, r, l = pos_info
with mmap.mmap(self.query_handler.body, offset=p, length=l) as m:
return m[r:]
def update(self, keys: Iterator[int], values: Iterator[bytes], *args, **kwargs):
keys, values = self._filter_nonexistent_keys_values(keys, values, self.query_handler.header.keys(), self.save_abspath)
self._delete(keys)
self.add(keys, values)
return
def _delete(self, keys: Iterator[int]):
self.query_handler.close()
self.handler_mutex = False
for key in keys:
self.write_handler.header.write(
np.array(
np.concatenate([[key], HEADER_NONE_ENTRY]),
dtype=np.int64
).tobytes()
)
if self.query_handler:
del self.query_handler.header[key]
self._size -= 1
def delete(self, keys: Iterator[int], *args, **kwargs):
keys = self._filter_nonexistent_keys(keys, self.query_handler.header.keys(), self.save_abspath)
self._delete(keys)
class DataURIPbIndexer(BinaryPbIndexer):
"""Shortcut for :class:`DocPbIndexer` equipped with ``requests.on`` for storing doc-level protobuf and data uri info,
differ with :class:`ChunkPbIndexer` only in ``requests.on`` """
class UniquePbIndexer(CompoundExecutor):
"""A frequently used pattern for combining a :class:`BaseKVIndexer` and a :class:`DocIDCache` """
```
#### File: unit/flow/test_flow.py
```python
import os
import numpy as np
import pytest
from jina import Flow
from jina.enums import SocketType, FlowBuildLevel
from jina.excepts import RuntimeFailToStart
from jina.executors import BaseExecutor
from jina.helper import random_identity
from jina.proto.jina_pb2 import DocumentProto
from jina.types.request import Response
from tests import random_docs, rm_files
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_flow_with_jump():
def _validate(f):
node = f._pod_nodes['gateway']
assert node.head_args.socket_in == SocketType.PULL_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r1']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUB_BIND
node = f._pod_nodes['r2']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r3']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r4']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r5']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r6']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r8']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r9']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r10']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_BIND
for name, node in f._pod_nodes.items():
assert node.peas_args['peas'][0] == node.head_args
assert node.peas_args['peas'][0] == node.tail_args
f = (Flow().add(name='r1')
.add(name='r2')
.add(name='r3', needs='r1')
.add(name='r4', needs='r2')
.add(name='r5', needs='r3')
.add(name='r6', needs='r4')
.add(name='r8', needs='r6')
.add(name='r9', needs='r5')
.add(name='r10', needs=['r9', 'r8']))
with f:
_validate(f)
f.save_config('tmp.yml')
Flow.load_config('tmp.yml')
with Flow.load_config('tmp.yml') as f:
_validate(f)
rm_files(['tmp.yml'])
@pytest.mark.parametrize('restful', [False, True])
def test_simple_flow(restful):
bytes_gen = (b'aaa' for _ in range(10))
def bytes_fn():
for _ in range(100):
yield b'aaa'
f = Flow(restful=restful).add()
with f:
f.index(input_fn=bytes_gen)
with f:
f.index(input_fn=bytes_fn)
with f:
f.index(input_fn=bytes_fn)
f.index(input_fn=bytes_fn)
node = f._pod_nodes['gateway']
assert node.head_args.socket_in == SocketType.PULL_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['pod0']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_BIND
for name, node in f._pod_nodes.items():
assert node.peas_args['peas'][0] == node.head_args
assert node.peas_args['peas'][0] == node.tail_args
def test_flow_identical():
with open(os.path.join(cur_dir, '../yaml/test-flow.yml')) as fp:
a = Flow.load_config(fp)
b = (Flow()
.add(name='chunk_seg', parallel=3)
.add(name='wqncode1', parallel=2)
.add(name='encode2', parallel=2, needs='chunk_seg')
.join(['wqncode1', 'encode2']))
a.save_config('test2.yml')
c = Flow.load_config('test2.yml')
assert a == b
assert a == c
with a as f:
node = f._pod_nodes['gateway']
assert node.head_args.socket_in == SocketType.PULL_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['chunk_seg']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.head_args.socket_out == SocketType.ROUTER_BIND
for arg in node.peas_args['peas']:
assert arg.socket_in == SocketType.DEALER_CONNECT
assert arg.socket_out == SocketType.PUSH_CONNECT
assert node.tail_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUB_BIND
node = f._pod_nodes['wqncode1']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.head_args.socket_out == SocketType.ROUTER_BIND
for arg in node.peas_args['peas']:
assert arg.socket_in == SocketType.DEALER_CONNECT
assert arg.socket_out == SocketType.PUSH_CONNECT
assert node.tail_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['encode2']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.head_args.socket_out == SocketType.ROUTER_BIND
for arg in node.peas_args['peas']:
assert arg.socket_in == SocketType.DEALER_CONNECT
assert arg.socket_out == SocketType.PUSH_CONNECT
assert node.tail_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
rm_files(['test2.yml'])
@pytest.mark.parametrize('restful', [False, True])
def test_flow_no_container(restful):
f = (Flow(restful=restful)
.add(name='dummyEncoder', uses=os.path.join(cur_dir, '../mwu-encoder/mwu_encoder.yml')))
with f:
f.index(input_fn=random_docs(10))
def test_shards():
f = Flow().add(name='doc_pb', uses=os.path.join(cur_dir, '../yaml/test-docpb.yml'), parallel=3)
with f:
f.index(input_fn=random_docs(1000), random_doc_id=False)
with f:
pass
rm_files(['test-docshard-tmp'])
def test_py_client():
f = (Flow().add(name='r1')
.add(name='r2')
.add(name='r3', needs='r1')
.add(name='r4', needs='r2')
.add(name='r5', needs='r3')
.add(name='r6', needs='r4')
.add(name='r8', needs='r6')
.add(name='r9', needs='r5')
.add(name='r10', needs=['r9', 'r8']))
with f:
node = f._pod_nodes['gateway']
assert node.head_args.socket_in == SocketType.PULL_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r1']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUB_BIND
node = f._pod_nodes['r2']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r3']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r4']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r5']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r6']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r8']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r9']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r10']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUSH_BIND
for name, node in f._pod_nodes.items():
assert node.peas_args['peas'][0] == node.head_args
assert node.peas_args['peas'][0] == node.tail_args
def test_dry_run_with_two_pathways_diverging_at_gateway():
f = (Flow()
.add(name='r2')
.add(name='r3', needs='gateway')
.join(['r2', 'r3']))
with f:
node = f._pod_nodes['gateway']
assert node.head_args.socket_in == SocketType.PULL_CONNECT
assert node.tail_args.socket_out == SocketType.PUB_BIND
node = f._pod_nodes['r2']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r3']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
for name, node in f._pod_nodes.items():
assert node.peas_args['peas'][0] == node.head_args
assert node.peas_args['peas'][0] == node.tail_args
def test_dry_run_with_two_pathways_diverging_at_non_gateway():
f = (Flow()
.add(name='r1')
.add(name='r2')
.add(name='r3', needs='r1')
.join(['r2', 'r3']))
with f:
node = f._pod_nodes['gateway']
assert node.head_args.socket_in == SocketType.PULL_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r1']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUB_BIND
node = f._pod_nodes['r2']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r3']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
for name, node in f._pod_nodes.items():
assert node.peas_args['peas'][0] == node.head_args
assert node.peas_args['peas'][0] == node.tail_args
def test_refactor_num_part():
f = (Flow()
.add(name='r1', uses='_logforward', needs='gateway')
.add(name='r2', uses='_logforward', needs='gateway')
.join(['r1', 'r2']))
with f:
node = f._pod_nodes['gateway']
assert node.head_args.socket_in == SocketType.PULL_CONNECT
assert node.tail_args.socket_out == SocketType.PUB_BIND
node = f._pod_nodes['r1']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r2']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
for name, node in f._pod_nodes.items():
assert node.peas_args['peas'][0] == node.head_args
assert node.peas_args['peas'][0] == node.tail_args
def test_refactor_num_part_proxy():
f = (Flow()
.add(name='r1', uses='_logforward')
.add(name='r2', uses='_logforward', needs='r1')
.add(name='r3', uses='_logforward', needs='r1')
.join(['r2', 'r3']))
with f:
node = f._pod_nodes['gateway']
assert node.head_args.socket_in == SocketType.PULL_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r1']
assert node.head_args.socket_in == SocketType.PULL_BIND
assert node.tail_args.socket_out == SocketType.PUB_BIND
node = f._pod_nodes['r2']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
node = f._pod_nodes['r3']
assert node.head_args.socket_in == SocketType.SUB_CONNECT
assert node.tail_args.socket_out == SocketType.PUSH_CONNECT
for name, node in f._pod_nodes.items():
assert node.peas_args['peas'][0] == node.head_args
assert node.peas_args['peas'][0] == node.tail_args
@pytest.mark.parametrize('restful', [False, True])
def test_refactor_num_part_proxy_2(restful):
f = (Flow(restful=restful)
.add(name='r1', uses='_logforward')
.add(name='r2', uses='_logforward', needs='r1', parallel=2)
.add(name='r3', uses='_logforward', needs='r1', parallel=3, polling='ALL')
.needs(['r2', 'r3']))
with f:
f.index_lines(lines=['abbcs', 'efgh'])
@pytest.mark.parametrize('restful', [False, True])
def test_refactor_num_part_2(restful):
f = (Flow(restful=restful)
.add(name='r1', uses='_logforward', needs='gateway', parallel=3, polling='ALL'))
with f:
f.index_lines(lines=['abbcs', 'efgh'])
f = (Flow(restful=restful)
.add(name='r1', uses='_logforward', needs='gateway', parallel=3))
with f:
f.index_lines(lines=['abbcs', 'efgh'])
@pytest.mark.parametrize('restful', [False, True])
def test_index_text_files(mocker, restful):
def validate(req):
assert len(req.docs) > 0
for d in req.docs:
assert d.text
response_mock = mocker.Mock(wrap=validate)
f = (Flow(restful=restful, read_only=True)
.add(uses=os.path.join(cur_dir, '../yaml/datauriindex.yml'), timeout_ready=-1))
with f:
f.index_files('*.py', on_done=response_mock, callback_on='body')
rm_files(['doc.gzip'])
response_mock.assert_called()
# TODO(Deepankar): Gets stuck when `restful: True` - issues with `needs='gateway'`
@pytest.mark.parametrize('restful', [False])
def test_flow_with_publish_driver(mocker, restful):
def validate(req):
for d in req.docs:
assert d.embedding is not None
response_mock = mocker.Mock(wrap=validate)
f = (Flow(restful=restful)
.add(name='r2', uses='!OneHotTextEncoder')
.add(name='r3', uses='!OneHotTextEncoder', needs='gateway')
.join(needs=['r2', 'r3']))
with f:
f.index_lines(lines=['text_1', 'text_2'], on_done=response_mock)
response_mock.assert_called()
@pytest.mark.parametrize('restful', [False, True])
def test_flow_with_modalitys_simple(mocker, restful):
def validate(req):
for d in req.index.docs:
assert d.modality in ['mode1', 'mode2']
def input_fn():
doc1 = DocumentProto()
doc1.modality = 'mode1'
doc2 = DocumentProto()
doc2.modality = 'mode2'
doc3 = DocumentProto()
doc3.modality = 'mode1'
return [doc1, doc2, doc3]
response_mock = mocker.Mock(wrap=validate)
flow = (Flow(restful=restful)
.add(name='chunk_seg', parallel=3)
.add(name='encoder12', parallel=2,
uses='- !FilterQL | {lookups: {modality__in: [mode1, mode2]}, traversal_paths: [c]}'))
with flow:
flow.index(input_fn=input_fn, on_done=response_mock)
response_mock.assert_called()
def test_flow_arguments_priorities():
f = Flow(port_expose=12345).add(name='test', port_expose=23456)
assert f._pod_nodes['test'].args.port_expose == 23456
f = Flow(port_expose=12345).add(name='test')
assert f._pod_nodes['test'].args.port_expose == 12345
@pytest.mark.parametrize('restful', [False])
def test_flow_arbitrary_needs(restful):
f = (Flow(restful=restful)
.add(name='p1').add(name='p2', needs='gateway')
.add(name='p3', needs='gateway')
.add(name='p4', needs='gateway')
.add(name='p5', needs='gateway')
.needs(['p2', 'p4'], name='r1')
.needs(['p3', 'p5'], name='r2')
.needs(['p1', 'r1'], name='r3')
.needs(['r2', 'r3'], name='r4'))
with f:
f.index_lines(['abc', 'def'])
@pytest.mark.parametrize('restful', [False])
def test_flow_needs_all(restful):
f = (Flow(restful=restful)
.add(name='p1', needs='gateway')
.needs_all(name='r1'))
assert f._pod_nodes['r1'].needs == {'p1'}
f = (Flow(restful=restful)
.add(name='p1', needs='gateway')
.add(name='p2', needs='gateway')
.add(name='p3', needs='gateway')
.needs(needs=['p1', 'p2'], name='r1')
.needs_all(name='r2'))
assert f._pod_nodes['r2'].needs == {'p3', 'r1'}
with f:
f.index_ndarray(np.random.random([10, 10]))
f = (Flow(restful=restful)
.add(name='p1', needs='gateway')
.add(name='p2', needs='gateway')
.add(name='p3', needs='gateway')
.needs(needs=['p1', 'p2'], name='r1')
.needs_all(name='r2')
.add(name='p4', needs='r2'))
assert f._pod_nodes['r2'].needs == {'p3', 'r1'}
assert f._pod_nodes['p4'].needs == {'r2'}
with f:
f.index_ndarray(np.random.random([10, 10]))
def test_flow_with_pod_envs():
f = Flow.load_config('yaml/flow-with-envs.yml')
class EnvChecker1(BaseExecutor):
"""Class used in Flow YAML"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# pea/pod-specific
assert os.environ['key1'] == 'value1'
assert os.environ['key2'] == 'value2'
# inherit from parent process
assert os.environ['key_parent'] == 'value3'
class EnvChecker2(BaseExecutor):
"""Class used in Flow YAML"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# pea/pod-specific
assert 'key1' not in os.environ
assert 'key2' not in os.environ
# inherit from parent process
assert os.environ['key_parent'] == 'value3'
with f:
pass
@pytest.mark.parametrize('return_results', [False, True])
@pytest.mark.parametrize('restful', [False, True])
def test_return_results_sync_flow(return_results, restful):
with Flow(restful=restful, return_results=return_results).add() as f:
r = f.index_ndarray(np.random.random([10, 2]))
if return_results:
assert isinstance(r, list)
assert isinstance(r[0], Response)
else:
assert r is None
@pytest.mark.parametrize('input, expect_host, expect_port',
[('0.0.0.0', '0.0.0.0', None),
('0.0.0.0:12345', '0.0.0.0', 12345),
('123.456.789.0:45678', '123.456.789.0', 45678),
('api.jina.ai:45678', 'api.jina.ai', 45678)])
def test_flow_host_expose_shortcut(input, expect_host, expect_port):
f = Flow().add(host=input).build()
assert f['pod0'].args.host == expect_host
if expect_port is not None:
assert f['pod0'].args.port_expose == expect_port
def test_flow_workspace_id():
f = Flow().add().add().add().build()
assert len(f.workspace_id) == 3
assert len(set(f.workspace_id.values())) == 3
with pytest.raises(ValueError):
f.workspace_id = 'hello'
new_id = random_identity()
f.workspace_id = new_id
assert len(set(f.workspace_id.values())) == 1
assert list(f.workspace_id.values())[0] == new_id
def test_flow_identity():
f = Flow().add().add().add().build()
assert len(f.identity) == 4
assert len(set(f.identity.values())) == 4
with pytest.raises(ValueError):
f.identity = 'hello'
new_id = random_identity()
f.identity = new_id
assert len(set(f.identity.values())) == 1
assert list(f.identity.values())[0] == new_id
assert f.args.identity == new_id
def test_flow_identity_override():
f = Flow().add().add(parallel=2).add(parallel=2)
with f:
assert len(set(p.args.identity for _, p in f)) == f.num_pods
f = Flow(identity='123456').add().add(parallel=2).add(parallel=2)
with f:
assert len(set(p.args.identity for _, p in f)) == 1
y = '''
!Flow
version: '1.0'
pods:
- uses: _pass
- uses: _pass
parallel: 3
'''
f = Flow.load_config(y)
for _, p in f:
p.args.identity = '1234'
with f:
assert len(set(p.args.identity for _, p in f)) == 2
for _, p in f:
if p.args.identity != '1234':
assert p.name == 'gateway'
def test_bad_pod_graceful_termination():
def asset_bad_flow(f):
with pytest.raises(RuntimeFailToStart):
with f:
assert f._build_level == FlowBuildLevel.EMPTY
# bad remote pod
asset_bad_flow(Flow().add(host='hello-there'))
# bad local pod
asset_bad_flow(Flow().add(uses='hello-there'))
# bad local pod at second
asset_bad_flow(Flow().add().add(uses='hello-there'))
# bad remote pod at second
asset_bad_flow(Flow().add().add(host='hello-there'))
# bad local pod at second, with correct pod at last
asset_bad_flow(Flow().add().add(uses='hello-there').add())
# bad remote pod at second, with correct pod at last
asset_bad_flow(Flow().add().add(host='hello-there').add())
``` |
{
"source": "JHP4911/JioTC",
"score": 3
} |
#### File: jiotc/models/bilstm_attention_model.py
```python
import os
import pdb
import copy
import logging
from typing import Union, Optional, Dict, Any, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from jiotc.embeddings.base_embedding import BaseEmbedding
from .base_model import BaseModel
# Bidirectional LSTM neural network (many-to-one)
class BiLSTMAttentionModel(BaseModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'layer_bi_lstm': {
'hidden_size': 128,
'num_layers': 1,
'dropout': 0.2, # ๅฝ num_layers == 1 ๆถๅคฑๆ
'bidirectional': True
},
'layer_dense': {
'activation': 'softmax'
}
}
def __init__(self, embed_model: Optional[BaseEmbedding] = None,
device: Union['cuda', 'cpu'] = None,
hyper_parameters: Optional[Dict[str, Dict[str, Any]]] = None):
'''
self.device
self.embedding_layer
self.embedding
self.embedding_size
self.num_classes
ๅๆฐๅทฒ็ฅ๏ผๅฏไปฅ็ดๆฅไฝฟ็จ
'''
super(BiLSTMAttentionModel, self).__init__(embed_model, device=device)
self.hidden_size = hyper_parameters['layer_bi_lstm']['hidden_size']
self.num_layers = hyper_parameters['layer_bi_lstm']['num_layers']
self.dropout = hyper_parameters['layer_bi_lstm']['dropout']
self.lstm = nn.LSTM(
self.embedding_size, self.hidden_size, self.num_layers,
batch_first=True, bidirectional=True)
# dropout ๅจ lstm ไป
ไธๅฑๆถไธ่ตทไฝ็จ
self.w_attention = nn.Parameter(torch.Tensor(
self.hidden_size * 2, self.hidden_size * 2))
self.query_attention = nn.Parameter(torch.Tensor(self.hidden_size * 2, 1))
self.fc = nn.Linear(self.hidden_size * 2,
self.num_classes) # 2 for bidirection
nn.init.uniform_(self.w_attention, -0.1, 0.1)
nn.init.uniform_(self.query_attention, -0.1, 0.1)
def forward(self, samples):
masks = samples.gt(0)
embeds = self._compute_embedding(samples)
lstm_out = self._compute_lstm(embeds, masks)
att_lstm_out_sum = self._compute_attention(lstm_out, masks)
output = self.fc(att_lstm_out_sum)
return output
def _compute_embedding(self, samples):
''' ๆพๅบ samples ๅฏนๅบ็ embedding๏ผๅนถๅจ Embedding ไธญ้ๆบ mask ๆไธไบ่ฏๆฑ '''
if self.training:
# ๅฏน embedding weight matrix ๅ dropout
complete_embedding_weight = copy.deepcopy(self.embedding_layer.weight)
embedding_weight = F.dropout2d(
torch.unsqueeze(self.embedding_layer.weight, 0),
p=self.dropout, training=self.training)[0]
self.embedding_layer.weight.data.copy_(embedding_weight)
embeds = self.embedding_layer(samples)
if self.training:
# ๅฐๅๆฌๆช dropout ๅๆฐ่ฟๅ
self.embedding_layer.weight.data.copy_(complete_embedding_weight)
#pdb.set_trace()
return embeds
def _compute_lstm(self, embeds, masks):
seq_length = masks.sum(1)
sorted_seq_length, perm_idx = seq_length.sort(descending=True)
embeds = embeds[perm_idx, :] # ้ๆฐๆๅบ
pack_sequence = pack_padded_sequence(
embeds, lengths=sorted_seq_length, batch_first=True)
# Forward propagate LSTM
packed_output, _ = self.lstm(pack_sequence)
# out: tensor of shape (batch_size, seq_length, hidden_size * 2)
lstm_out, _ = pad_packed_sequence(packed_output, batch_first=True)
_, unperm_idx = perm_idx.sort()
lstm_out = lstm_out[unperm_idx, :]
return lstm_out
def _compute_attention(self, lstm_out, masks):
''' ่ฎก็ฎๆณจๆๅๆ้๏ผๅนถๅฏนๅๆถ้ด็็นๅพ้ๅ ๅ '''
# dropout_layer
lstm_out = lstm_out.permute(1, 0, 2)
# [batch_size, seq_len, hidden_size] => [seq_len, batch_size, hidden_size * 2]
#lstm_out = F.dropout2d(lstm_out, p=self.dropout, training=self.training)
# attention_layer
u = torch.tanh(torch.matmul(lstm_out, self.w_attention))
# [seq_len, batch_size, hidden_size] => [seq_len, batch_size, 1]
att = torch.matmul(u, self.query_attention) # [seq_len, batch_size, 1]
att = torch.squeeze(att, dim=-1) # [seq_len, batch_size, 1] => [seq_len, batch_size]
# att_score ๅ้กนๅ ๅไธบ1๏ผไฟ่ฏ่พๅบ็ๆฐๆฎๅๅธๅๅ
cur_seq_len, batch_size = att.shape
cur_seq_masks = masks[:, :cur_seq_len].T # ๅฝๅ mask,ๅฏ่ฝๅฐไบๆจกๅๅ
่ฎธ็ๆๅคง้ฟๅบฆ๏ผๅ ๆญคๅฏน้ฝ
att_without_pad = torch.where(cur_seq_masks == False, torch.ones_like(att) * (- float('inf')), att)
att_score = F.softmax(att_without_pad, dim=0) # [seq_len, batch_size]๏ผๅบๅฝๅฏนๆฏไธชๅฅๅญๅๆชๆญ๏ผ็ถๅ่ฎก็ฎๆณจๆๅๆ้
if not self.training:
# ่งๅฏๅฎไพ
#print(samples[:,0])
#print([float(item) for item in att_score[:,0].cpu().detach().numpy()][:30])
#pdb.set_trace()
pass
seq_len, batch_size = att_score.shape
att_score = torch.unsqueeze(att_score, 2).expand(seq_len, batch_size, self.hidden_size * 2)
# [seq_len, batch_size] => [seq_len, batch_size, hidden_size * 2]
att_lstm_out = lstm_out * att_score # ๅฏนๅบ้กน็นไน [seq_len, batch_size, hidden_size * 2]
att_lstm_out = att_lstm_out.permute(1, 0, 2)
# [seq_len, batch_size, hidden_size * 2] => [batch_size, seq_len, hidden_size * 2]
att_lstm_out_sum = torch.sum(att_lstm_out, dim=1)
#pdb.set_trace()
return att_lstm_out_sum
```
#### File: jiotc/models/bilstm_model.py
```python
import os
import pdb
import logging
from typing import Union, Optional, Dict, Any, Tuple
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from jiotc.embeddings.base_embedding import BaseEmbedding
from .base_model import BaseModel
# Bidirectional LSTM neural network (many-to-one)
class BiLSTMModel(BaseModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'layer_bi_lstm': {
'hidden_size': 128,
'num_layers': 1,
'dropout': 0.2, # ๅฝ num_layers == 1 ๆถๅคฑๆ
'bidirectional': True
},
'layer_dense': {
'activation': 'softmax'
}
}
def __init__(self, embed_model: Optional[BaseEmbedding] = None,
device: Union['cuda', 'cpu'] = None,
hyper_parameters: Optional[Dict[str, Dict[str, Any]]] = None):
'''
self.device
self.embedding_layer
self.embedding
self.embedding_size
self.num_classes
ๅๆฐๅทฒ็ฅ๏ผๅฏไปฅ็ดๆฅไฝฟ็จ
'''
super(BiLSTMModel, self).__init__(embed_model, device=device)
self.hidden_size = hyper_parameters['layer_bi_lstm']['hidden_size']
self.num_layers = hyper_parameters['layer_bi_lstm']['num_layers']
self.dropout = hyper_parameters['layer_bi_lstm']['dropout']
self.lstm = nn.LSTM(
self.embedding_size, self.hidden_size, self.num_layers,
batch_first=True, bidirectional=True)
self.fc = nn.Linear(self.hidden_size * 2,
self.num_classes) # 2 for bidirection
def forward(self, samples):
masks = samples.gt(0)
embeds = self.embedding_layer(samples) #.to(self.device)
# ๆ้ฟ็ญ่ฐๆดๆ ทๆฌ้กบๅบ
seq_length = masks.sum(1)
sorted_seq_length, perm_idx = seq_length.sort(descending=True)
embeds = embeds[perm_idx, :] # ้ๆฐๆๅบ
pack_sequence = pack_padded_sequence(
embeds, lengths=sorted_seq_length, batch_first=True)
# Set initial states, involved with batch_size
'''
h0 = torch.autograd.Variable(torch.randn(
self.num_layers * 2, embeds.shape[0],
self.hidden_size)).to(self.device) # 2 for bidirection
c0 = torch.autograd.Variable(torch.randn(
self.num_layers * 2, embeds.shape[0],
self.hidden_size)).to(self.device)
#'''
# Forward propagate LSTM
packed_output, _ = self.lstm(pack_sequence) #, (h0, c0))
# out: tensor of shape (batch_size, seq_length, hidden_size * 2)
lstm_out, _ = pad_packed_sequence(packed_output, batch_first=True)
_, unperm_idx = perm_idx.sort()
lstm_out = lstm_out[unperm_idx, :]
# dropout_layer
lstm_out = lstm_out.permute(1, 0, 2) # [batch_size, seq_len, hidden_size * 2] => [seq_len, batch_size, hidden_size * 2]
# disabled when not training
lstm_out = F.dropout2d(lstm_out, p=self.dropout, training=self.training)
lstm_out = lstm_out.permute(1, 0, 2) # [seq_len, batch_size, hidden_size * 2] => [batch_size, seq_len, hidden_size * 2]
lstm_out_sum = torch.mean(lstm_out, dim=1)
output = self.fc(lstm_out_sum)
return output
``` |
{
"source": "JHP4911/library-of-congress-classifier-hack",
"score": 2
} |
#### File: JHP4911/library-of-congress-classifier-hack/test_lcc_classifier.py
```python
from lcc_classifier import find_classification_strings, lcc_to_classification
test_cases = {
"DP402.C8 O46 1995": [
"World History and History of Europe, Asia, Africa, Australia, New Zealand, Etc.",
"History of Spain",
"Local history and description",
"Other cities, towns, etc., A-Z",
],
"DR1313.3 .U54 1993": [
"World History and History of Europe, Asia, Africa, Australia, New Zealand, Etc.",
"History of Spain",
"Local history and description",
"Other cities, towns, etc., A-Z",
],
"DR82 .G46 1993": [
"World History and History of Europe, Asia, Africa, Australia, New Zealand, Etc.",
"History of Spain",
"Local history and description",
"Other cities, towns, etc., A-Z",
],
"DS557.8.M9 B55 1992B": [
"World History and History of Europe, Asia, Africa, Australia, New Zealand, Etc.",
"History of Spain",
"Local history and description",
"Other cities, towns, etc., A-Z",
],
"HM216 .G44 1993": [
"Social sciences",
"Sociology",
"These are obsolete numbers no longer used by the Library of Congress",
],
"HM261 .H47 1993": [
"Social sciences",
"Sociology",
"These are obsolete numbers no longer used by the Library of Congress",
],
"HN530.2.A85 I86 1992": [
"Social sciences",
"Social history and conditions. Social problems. Social Reform",
"By region or country",
],
"HQ755.8 .T63 1995": [
"Social sciences",
"The Family. Marriage. Women",
"The family. Marriage. Home",
"Parents. Parenthood",
],
"KF27 .A3 1992H": [
"Law",
"Law of the United States",
"Federal law. Common and collective state law: Individual states",
],
"KF3613.4 .C34": [
"Law",
"Law of the United States",
"Federal law. Common and collective state law: Individual states",
],
"KHA878 .G37 1996": ["Law", "South America (General)", "Argentina"],
"KHH3003 .Q57 1995": ["Law", "South America (General)", "Argentina"],
"KK2222 .L36 1993": ["Law", "Europe", "Germany", "Germany and West Germany"],
"KLA940 .K65 1990": [
"Law",
"Asia and Eurasia, Africa, Pacific Area, and Antarctica",
"Eurasia",
"Russia. Soviet Union",
],
"SD418 .A38 1990": [
"Agriculture",
"Forestry",
(
"Conservation and protection: Including forest influences, damage by "
"elements, fires, forest reserves"
),
],
"TK5105.5 .O653 1993": [
"Technology",
"Electrical engineering. Electronics. Nuclear engineering",
"Telecommunication: Including telegraphy, telephone, radio, radar, television",
],
}
def test_function():
"""
>>> for i, (key, value) in enumerate(test_cases.items()):
>>> lcc_to_classification(key) == value
>>> lcc_to_classification("MLCM 95/14118 (P)")
['Medium size', '1995', 'Language and Literature']
"""
pass
if __name__ == "__main__":
import doctest
# find_classification_strings()
errors = 0
for key, value in test_cases.items():
classification = lcc_to_classification(key)
try:
assert classification == value, f"{key}: Ours vs. Theirs...\n{classification} != \n{value}"
except AssertionError as e:
errors += 1
print(e)
# find_classification_strings(key, value)
if errors:
exit(errors)
doctest.testmod()
``` |
{
"source": "JHP4911/mapswipe_convnet",
"score": 3
} |
#### File: mapswipe_convnet/experiment_1/mapswipe_apply_convnet.py
```python
import os, shutil
import argparse
import numpy as np
import keras
from keras.preprocessing import image
from keras import layers
from keras import models
from keras.models import load_model
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
def main():
parser = argparse.ArgumentParser(description="Apply a trained Convnet to a set of MapSwipe image tiles")
parser.add_argument('--dataset', '-d', metavar='<path to input image tiles>', required=True,
help='Directory containing image tiles for evaluation by the CNN')
parser.add_argument('--model', '-m', metavar='<model>', required=True,
help='HD5 format file containing the trained CNN model')
args = parser.parse_args()
image_size = 128
#image_size = 256
model = load_model(args.model)
image_dir = args.dataset
i = 0
for img_file in os.listdir(image_dir):
tile_id = img_file.replace(".jpg", "")
img_path = os.path.join(image_dir, img_file)
img = image.load_img(img_path, target_size=(image_size, image_size))
img_tensor = image.img_to_array(img)
img_tensor = np.expand_dims(img_tensor, axis=0)
img_tensor /= 255.
result = model.predict(img_tensor)[0][0]
print("{}, {:5.2f}".format(tile_id, result))
main()
```
#### File: mapswipe_convnet/experiment_2/mapswipe_train_convnet_expt_2.py
```python
import os, shutil
import datetime
import re
import argparse
import numpy as np
import cv2
import keras
from keras.preprocessing import image
from keras import layers
from keras import models
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
def setup_model(image_size, n_epochs):
model = models.Sequential()
# model is VGG16 based onthe Keras implementation
# https://github.com/keras-team/keras/blob/master/keras/applications/vgg16.py
# Block 1
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1', input_shape=(image_size, image_size, 3)))
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2'))
model.add(layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool'))
# Block 2
model.add(layers.Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1'))
model.add(layers.Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2'))
model.add(layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool'))
# Block 3
model.add(layers.Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1'))
model.add(layers.Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2'))
model.add(layers.Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3'))
model.add(layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool'))
# Block 4
model.add(layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1'))
model.add(layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2'))
model.add(layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3'))
model.add(layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool'))
# Block 5
model.add(layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1'))
model.add(layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2'))
model.add(layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3'))
model.add(layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool'))
model.add(layers.Flatten(name='flatten'))
model.add(layers.Dense(4096, activation='relu', name='fc1'))
model.add(layers.Dense(4096, activation='relu', name='fc2'))
model.add(layers.Dense(1, activation='sigmoid', name='predictions'))
return(model)
# Plot the results and save to file
def plot_accuracy_and_loss(run_id, run_dir, history):
train_acc = history.history['acc']
validation_acc = history.history['val_acc']
train_loss = history.history['loss']
validation_loss = history.history['val_loss']
epochs = range(len(train_acc))
plt.ylim(0.0, 1.0)
major_y_ticks = [ 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
plt.grid(True, 'major', 'y')
plt.tick_params(axis='both', which='both',
bottom='on', top='off',
left='on', right='off',
labelbottom='on', labelleft='on')
plt.gca().set_yticks(major_y_ticks)
plt.plot(epochs, train_acc, 'r', label='Training accuracy')
plt.plot(epochs, validation_acc, 'b', label='Validation accuracy')
plt.plot(epochs, train_loss, 'm', label='Training loss')
plt.plot(epochs, validation_loss, 'c', label='Validation loss')
plt.title(run_id)
plt.legend()
# filename = os.path.join(run_dir, "plot_{}.png".format(run_id))
filename = os.path.join(run_dir, "plot.png")
plt.savefig(filename)
# write this to a csv file...
def save_accuracy_and_loss_to_csv_file(run_dir, history):
n_epochs = len(history.history['acc'])
csv_file = os.path.join(run_dir, 'history.csv')
with open(csv_file, 'wt') as f:
f.write("Epoch,Train Acc,Val Acc,Train Loss, Val Loss\n")
for i in range(n_epochs):
f.write("{:d}, {:4.3f}, {:4.3f}, {:4.3f}, {:4.3f}\n".format( i+1,
history.history['acc'][i],
history.history['val_acc'][i],
history.history['loss'][i],
history.history['val_loss'][i]))
# List the subdirectories and get the run serial from a subdir with the current date
# Generate run id - 20171108_1, 20171108_2, etc
def generate_run_id(output_dir):
date = datetime.datetime.now().strftime("%Y%m%d")
pattern = re.compile(date)
run_serial = 0
sub_dirs = os.listdir(output_dir)
for subdir in os.listdir(output_dir):
subdir_strings = subdir.split("_")
if subdir_strings[0] == date:
serial = int(subdir_strings[1])
if serial > run_serial:
run_serial = serial
run_serial += 1
run_id = "{}_{}".format(date, str(run_serial))
run_dir = os.path.join(output_dir, run_id)
os.makedirs(run_dir)
return run_id, run_dir
def output_model_summary_to_file(run_dir, model):
model_summary_file = os.path.join(run_dir, 'model_summary.txt')
with open(model_summary_file, 'wt') as f:
# this hack necessary to get the summary written to file
model.summary(print_fn=lambda x: f.write(x + '\n'))
# Write the run_id and user message into a README file
def write_readme_file(run_dir, run_id, message, parameters):
readme_file = os.path.join(run_dir, 'README')
with open(readme_file, 'wt') as f:
f.write("Run {}\n\n{}\n\n".format(run_id, message))
for key in sorted(parameters):
f.write("{}: {}\n".format(key, parameters[key]))
def copy_this_script(run_dir):
filename = os.path.basename(__file__)
src = __file__
dst = os.path.join(run_dir, filename)
shutil.copy(src, dst)
def subtract_mean(image):
r_mean = 125.7
b_mean = 93.1
g_mean = 121.4
(B, G, R) = cv2.split(image.astype("float32"))
R -= r_mean
G -= g_mean
B -= b_mean
return cv2.merge([B, G, R])
def main():
parser = argparse.ArgumentParser(description="Train a Convnet to distinguish between positive and negative MapSwipe image tiles")
parser.add_argument('--project', '-p', metavar='<project_dir>', required=True,
help='Directory containing image tiles in train, validation and test subdirectories')
parser.add_argument('--output', '-o', metavar='<output_dir>', required=True,
help='Output Directory')
parser.add_argument('--n_epochs', '-n', metavar='<n_epochs>', type=int, required=True,
help='Number of epochs to run the training - e.g. 50')
parser.add_argument('--message', '-m', metavar='<message>',
help='Brief Message to write to a README file', default="")
args = parser.parse_args()
project_dir = args.project
output_dir = args.output
n_epochs = int(args.n_epochs)
message = args.message
# Bing Maps image tiles are 256x256 but reducing to 128x128 does not make a big difference
image_size = 224
#image_size = 150
#image_size = 128
#image_size = 256
parameters = {}
parameters['keras_version'] = keras.__version__
parameters['n_epochs'] = n_epochs
parameters['image_size'] = image_size
# Create output directory if it doesn't exist
if not os.path.exists(output_dir):
os.makedirs(output_dir)
run_id, run_dir = generate_run_id(output_dir)
print("\nThis is Run {}\n\n".format(run_id))
write_readme_file(run_dir, run_id, message, parameters)
copy_this_script(run_dir)
train_dir = os.path.join(project_dir, 'train')
validation_dir = os.path.join(project_dir, 'validation')
test_dir = os.path.join(project_dir, 'test')
model_checkpoint_hdf_file = os.path.join(run_dir, "mapswipe_model_checkpoint.h5")
model_final_hdf_file = os.path.join(run_dir, "mapswipe_model_final.h5")
model = setup_model(parameters['image_size'], parameters['n_epochs'])
# previous best
learning_rate = 0.0001
decay_rate = 1e-6
optimizer = optimizers.Adam(lr=learning_rate, decay=decay_rate)
# try with SGD with momentum
#learning_rate = 0.1
#decay_rate = learning_rate / n_epochs
#momentum = 0.8
#optimizer=optimizers.SGD(lr=learning_rate, momentum=momentum, decay=decay_rate, nesterov=True)
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['acc'])
print(model.summary())
output_model_summary_to_file(run_dir, model)
# Use data augmentation to increase the effective size of the training dataset
# Not using shear as we should not find that in real images
train_datagen = ImageDataGenerator(
rescale=1./255,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
vertical_flip=True,
fill_mode='wrap',
samplewise_center = True
)
# rotation_range=40,
# featurewise_center=True
# zoom_range=0.2,
# preprocessing_function=subtract_mean
validation_datagen = ImageDataGenerator(
rescale=1./255,
samplewise_center = True
)
# preprocessing_function=subtract_mean
# batch size 64 gives slight improvement over 32 but takes longer
#batch_size = 32
batch_size = 64
#batch_size = 128
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(image_size, image_size),
batch_size=batch_size,
class_mode='binary')
validation_generator = validation_datagen.flow_from_directory(
validation_dir,
target_size=(image_size, image_size),
batch_size=batch_size,
class_mode='binary')
callbacks_list = [
# keras.callbacks.ReduceLROnPlateau(
# # This callback will monitor the validation loss of the model
# monitor='val_loss',
# # It will divide the learning by 10 when it gets triggered
# factor=0.5,
# # factor=0.1,
# # It will get triggered after the validation loss has stopped improving for at least 10 epochs
# patience=10,
# ),
# This callback will save the current weights after every epoch if the validation loss has improved
keras.callbacks.ModelCheckpoint(
filepath=model_checkpoint_hdf_file, # Path to the destination model file
monitor='val_loss',
save_best_only=True,
)
]
steps_per_epoch = 2800
validation_steps = 820
#steps_per_epoch = 500
#validation_steps = 100
history = model.fit_generator(
train_generator,
callbacks=callbacks_list,
steps_per_epoch=steps_per_epoch,
epochs=n_epochs,
validation_data=validation_generator,
validation_steps=validation_steps)
# Save our model after training and validation
model.save(model_final_hdf_file)
save_accuracy_and_loss_to_csv_file(run_dir, history)
plot_accuracy_and_loss(run_id, run_dir, history)
main()
``` |
{
"source": "JHP4911/mapswipe_utils",
"score": 3
} |
#### File: JHP4911/mapswipe_utils/mapswipe_display_grid_random_tiles.py
```python
import argparse
import sys
import os
import json
import random
from PIL import Image
def main():
parser = argparse.ArgumentParser(description="Display a grid of image tiles")
parser.add_argument('--tilelist', '-f', metavar='<tile_list_file>', required=True,
help='MapSwipe Project Tile List file')
parser.add_argument('--tiledir', '-d', metavar='<tile_directory>', required=True,
help='Directory of tile images')
parser.add_argument('--nx', '-x', metavar='<nx>', type=int, required=True,
help='Number of tiles in X dimension')
parser.add_argument('--ny', '-y', metavar='<ny>', type=int, required=True,
help='Number of tiles in Y dimension')
args = parser.parse_args()
nx = int(args.nx)
ny = int(args.ny)
tile_list_file = args.tilelist
tile_dir = args.tiledir
# create the base image
# Bing maps tiles are 256 x 256 pixels
tile_size = 256
image_width = nx * (tile_size + 1) + 1
image_height = ny * (tile_size + 1) + 1
base_image = Image.new('RGBA', (image_width, image_height), (0,0,0))
selected_tiles = {}
# Load the tile_ids into a list
with open(tile_list_file, 'rt') as f:
tile_ids = f.read().splitlines()
used_ints = {}
n_tiles = len(tile_ids)
print(n_tiles)
x = 0
y = 1
for i in range(ny):
x = 1
for j in range(nx):
k = random.randint(0, n_tiles-1)
# keep looking if this tile has already been seen
while k in used_ints:
k = random.randint(0, n_tiles-1)
used_ints[k] = 1
tile_id = tile_ids[k]
print(tile_id)
img_path = os.path.join(tile_dir, "{}.jpg".format(tile_id))
img = Image.open(img_path)
img = img.resize((tile_size, tile_size), Image.ANTIALIAS)
base_image.paste(img, (x, y))
x += tile_size + 1
print('')
y += tile_size + 1
base_image.show()
main()
```
#### File: JHP4911/mapswipe_utils/mapswipe_fetch_tile_block.py
```python
import argparse
import sys
import os
import urllib.request
import datetime
import time
import math
# Convert Tile X and Y to a Bing Maps Quadkey which is used to retrieve a tile
def tile_coords_and_zoom_to_quadkey(x, y, zoom):
quadkey = ''
for i in range(zoom, 0, -1):
digit = 0
mask = 1 << (i - 1)
if(x & mask) != 0:
digit += 1
if(y & mask) != 0:
digit += 2
quadkey += str(digit)
return quadkey
def main():
parser = argparse.ArgumentParser(description="Fetch a block of Bing Maps image tiles")
parser.add_argument('--outdir', '-o', metavar='<output_directory>', default='.',
help='Output directory to download to. Default: "."')
parser.add_argument('--keyfile', '-k', metavar='<bing maps key file>', required=True,
help='File containing the Bing maps API key')
parser.add_argument('--x', metavar='<x dimension low bound tile id>', required=True,
help='X dimension lower bound')
parser.add_argument('--y', metavar='<y dimension low bound tile id>', required=True,
help='Y dimension lower bound')
parser.add_argument('--nx', metavar='<number of tiles in x dimension>', required=True,
help='number of tiles in X dimension')
parser.add_argument('--ny', metavar='<number of tiles in y dimension>', required=True,
help='number of tiles in Y dimension')
parser.add_argument('--zoom', metavar='<bing maps zoom level>', default=18, type=int,
help='Bing Maps zoom level - default 18')
args = parser.parse_args()
output_dir = args.outdir
# tile id bounds are inclusive
x_lo = int(args.x)
y_lo = int(args.y)
nx = int(args.nx)
ny = int(args.ny)
zoom = int(args.zoom)
# get the bing maps api key
try:
f = open(args.keyfile)
bing_maps_api_key = f.read()
except:
print ("Problem reading Bing Maps API key")
# Bing Maps limits access to 50,000 records per day so spread them out
# Not necessary for this script
request_delay = math.ceil((24.0 * 60.0 * 60.0) / 50000)
#create output directory if it doesn't exist
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Generate a list of the tile ids
tile_ids = []
for y in range(y_lo, y_lo+ny):
for x in range(x_lo, x_lo+nx):
tile_id = "{}-{}-{}".format(zoom, x, y)
tile_ids.append(tile_id)
# print(tile_id)
# Fetch the tiles
i = 0
for tile_id in tile_ids:
a = tile_id.split('-')
zoom = int(a[0])
tile_x = int(a[1])
tile_y = int(a[2])
quadkey = tile_coords_and_zoom_to_quadkey(tile_x, tile_y, zoom)
# construct the Bing Maps URL
tile_url = "http://t0.tiles.virtualearth.net/tiles/a{}.jpeg?g=854&mkt=en-US&token={}".format(quadkey, bing_maps_api_key)
output_file_path = os.path.join(output_dir, "{}.jpg".format(tile_id))
# Skip this tile if we already downloaded it
if os.path.exists(output_file_path):
print("skip {}".format(tile_id))
continue
local_filename, headers = urllib.request.urlretrieve(tile_url, output_file_path)
# # report progress every 25 tiles
# i += 1
# if i % 25 == 0:
# print("{} tiles".format(i))
# pause before getting the next one
time.sleep(request_delay)
main()
```
#### File: JHP4911/mapswipe_utils/mapswipe_find_negative_neighbors.py
```python
import argparse
import sys
import os
import json
import random
# Given a tile ID find a neighbor that is not in the dict
# This searches systematically for an immediate neighbor
# if it can't find one then it searches systematically 2 tiles away
# if it can't find one then it looks at random tiles up to 5 tiles away
# if it can't find one then it looks at random tiles up to 50 tiles away
#
# The idea is that we want to select negative tiles that are likely to have
# similar terrain to the positives
#
# This approach seems to work well in practice
def find_neighbor(all_tile_ids, tile_id):
# split the id
fields = tile_id.split('-')
zoom = int(fields[0])
tile_x = int(fields[1])
tile_y = int(fields[2])
# explore 1 unit away in clockwise order N, NE, E, SE etc
x_steps_1 = [ 0, 1, 1, 1, 0, -1, -1, -1 ]
y_steps_1 = [ -1, -1, 0, 1, 1, 1, 0, -1 ]
new_id = ''
neighbors = []
flag = 0
for i in range(len(x_steps_1)):
new_x = tile_x + x_steps_1[i]
new_y = tile_y + y_steps_1[i]
new_id = "{}-{}-{}".format(zoom, str(new_x), str(new_y))
if new_id not in all_tile_ids:
neighbors.append(new_id)
# if one or more neighbors exists then pick one at random
if len(neighbors) == 1:
new_id = neighbors[0]
all_tile_ids[new_id] = 1
flag = 1
elif len(neighbors) > 1:
new_id = random.choice(neighbors)
all_tile_ids[new_id] = 1
flag = 1
# if not then look for one 2 steps away
if flag == 0:
x_steps_2 = [ 0, 1, 2, 2, 2, 2, 2, 1, 0, -1, -2, -2, -2, -2, -2, -1 ]
y_steps_2 = [ -2, -2, -2, -1, 0, 1, 2, 2, 2, 2, 2, 1, 0, -1, -2, -2 ]
for i in range(len(x_steps_2)):
new_x = tile_x + x_steps_2[i]
new_y = tile_y + y_steps_2[i]
new_id = "{}-{}-{}".format(zoom, str(new_x), str(new_y))
if new_id not in all_tile_ids:
neighbors.append(new_id)
# if one or more neighbors exists then pick one at random
if len(neighbors) == 1:
new_id = neighbors[0]
all_tile_ids[new_id] = 1
flag = 1
elif len(neighbors) > 1:
new_id = random.choice(neighbors)
all_tile_ids[new_id] = 1
flag = 1
# generate random x and y within 5 tiles...
if flag == 0:
radius = 5
for i in range(200):
new_x = tile_x + (random.randint(-radius, radius))
new_y = tile_y + (random.randint(-radius, radius))
new_id = "{}-{}-{}".format(zoom, str(new_x), str(new_y))
# pick the first one
if new_id not in all_tile_ids:
all_tile_ids[new_id] = 1
flag = 1
break
if flag == 0:
radius = 50
for i in range(200):
new_x = tile_x + (random.randint(-radius, radius))
new_y = tile_y + (random.randint(-radius, radius))
new_id = "{}-{}-{}".format(zoom, str(new_x), str(new_y))
# pick the first one
if new_id not in all_tile_ids:
all_tile_ids[new_id] = 1
flag = 1
break
# report if we can't find a neighbor
if flag == 0:
print("{} has no neighbor".format(tile_id), file=sys.stderr)
return new_id
def main():
parser = argparse.ArgumentParser(description="Identify negative MapSwipe tiles near to positive tiles")
parser.add_argument('--jsonfile', '-f', metavar='<json_file>', required=True,
help='MapSwipe Project JSON file')
parser.add_argument('--tilelist', '-p', metavar='<tile_id_file>', required=True,
help='File of positive tile IDs')
args = parser.parse_args()
json_file = args.jsonfile
tile_list_file = args.tilelist
all_tile_ids = {}
positive_tile_ids = []
# Load all the IDs from the JSON file
with open(json_file, 'rt') as f:
json_text = f.read()
tiles = json.loads(json_text)
for tile in tiles:
all_tile_ids[tile['id']] = 1
# Load the positive IDs
with open(tile_list_file, 'rt') as f:
positive_tile_ids = f.read().splitlines()
# For each positive, calculate neighbors and pick one at random
for tile_id in positive_tile_ids:
# calculate a neighbor
new_id = find_neighbor(all_tile_ids, tile_id)
print(new_id)
main()
```
#### File: JHP4911/mapswipe_utils/mapswipe_select_tile_subset.py
```python
import argparse
import sys
import os
import shutil
import re
def main():
parser = argparse.ArgumentParser(description="Select a subset of map tiles based on a file of tile IDs")
parser.add_argument('--tilelist', '-t', metavar='<tile_list_file>', required=True,
help='File of tile IDs')
parser.add_argument('--indir', '-i', metavar='<input_directory>', required=True,
help='Input Directory')
parser.add_argument('--outdir', '-o', metavar='<output_directory>', required=True,
help='Output Directory')
parser.add_argument('--action', '-a', metavar='<action>', default='include',
help='action is to include (default) or exclude the supplied tile IDs')
args = parser.parse_args()
tile_list_file = args.tilelist
input_dir = args.indir
output_dir = args.outdir
action = args.action
# Load the tile IDs
# tile ID is the first field
tile_ids = []
lines = []
with open(tile_list_file, 'rt') as f:
lines = f.read().splitlines()
# Handle various inputs - extract IDs like 18-147209-144812
tile_id_pattern = re.compile(r'^(\d+-\d+-\d+)')
for line in lines:
m = tile_id_pattern.search(line)
if m:
tile_ids.append(m.group(0))
tile_hash = {}
for tile_id in tile_ids:
tile_hash[tile_id] = 1
# create output directory if it doesn't exist
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# List the input directory and copy files if appropriate
for filename in os.listdir(input_dir):
if filename.endswith(".jpg"):
input_id = filename.replace('.jpg', '')
if input_id in tile_hash:
if action == 'include':
src = os.path.join(input_dir, filename)
dst = os.path.join(output_dir, filename)
shutil.copyfile(src, dst)
else:
if action == 'exclude':
src = os.path.join(input_dir, filename)
dst = os.path.join(output_dir, filename)
shutil.copyfile(src, dst)
main()
``` |
{
"source": "JHP4911/pi-Projects",
"score": 4
} |
#### File: JHP4911/pi-Projects/Birthday_Wisher.py
```python
import pandas as pd
import datetime
import smtplib
from email.message import EmailMessage
import os
def sendEmail(to, sub, msg):
print(f"email to {to} \nsend with subject: {sub}\n message: {msg}")
email = EmailMessage()
email['from'] = '<NAME>'
email['to'] = f"{to}"
email['subject'] = f'{sub}'
email.set_content(f'{msg}')
with smtplib.SMTP(host='smtp.gmail.com', port=587) as smtp:
smtp.ehlo()
smtp.starttls()
smtp.login('Email','password')
smtp.send_message(email)
print("Email send")
pass
if __name__ == "__main__":
df = pd.read_excel("data.xlsx")
print(df)
today = datetime.datetime.now().strftime("%d-%m")
#print(type(today))
update = []
yearnow = datetime.datetime.now().strftime("%Y")
#print(yearnow)
for index, item in df.iterrows():
#print(index,item['birthday'])
bday = item['Birthday'].strftime("%d-%m")
#print(type(bday))
if(bday == today) and yearnow not in str(item["Year"]):
sendEmail(item['Email'] ,"Happy BIrthday "+item["Name"], item['message'])
update.append(index)
for i in update:
yr = df.loc[i, 'Year']
#print(yr)
df.loc[i,'Year'] = f"{yr}, {yearnow}"
#print((df.loc[i, 'Year'])
#print(df)
df.to_excel("data.xlsx", index=False)
```
#### File: JHP4911/pi-Projects/Wikipedia_Article_Summarizer.py
```python
from bs4 import BeautifulSoup
import re
import requests
import heapq
from nltk.tokenize import sent_tokenize,word_tokenize
from nltk.corpus import stopwords
url = str(input("Paste the url"\n"))
num = int(input("Enter the Number of Sentence you want in the summary"))
num = int(num)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
#url = str(input("Paste the url......."))
res = requests.get(url,headers=headers)
summary = ""
soup = BeautifulSoup(res.text,'html.parser')
content = soup.findAll("p")
for text in content:
summary +=text.text
def clean(text):
text = re.sub(r"\[[0-9]*\]"," ",text)
text = text.lower()
text = re.sub(r'\s+'," ",text)
text = re.sub(r","," ",text)
return text
summary = clean(summary)
print("Getting the data......\n")
##Tokenixing
sent_tokens = sent_tokenize(summary)
summary = re.sub(r"[^a-zA-z]"," ",summary)
word_tokens = word_tokenize(summary)
## Removing Stop words
word_frequency = {}
stopwords = set(stopwords.words("english"))
for word in word_tokens:
if word not in stopwords:
if word not in word_frequency.keys():
word_frequency[word]=1
else:
word_frequency[word] +=1
maximum_frequency = max(word_frequency.values())
print(maximum_frequency)
for word in word_frequency.keys():
word_frequency[word] = (word_frequency[word]/maximum_frequency)
print(word_frequency)
sentences_score = {}
for sentence in sent_tokens:
for word in word_tokenize(sentence):
if word in word_frequency.keys():
if (len(sentence.split(" "))) <30:
if sentence not in sentences_score.keys():
sentences_score[sentence] = word_frequency[word]
else:
sentences_score[sentence] += word_frequency[word]
print(max(sentences_score.values()))
def get_key(val):
for key, value in sentences_score.items():
if val == value:
return key
key = get_key(max(sentences_score.values()))
print(key+"\n")
print(sentences_score)
summary = heapq.nlargest(num,sentences_score,key=sentences_score.get)
print(" ".join(summary))
summary = " ".join(summary)
``` |
{
"source": "JHP4911/PyDoc",
"score": 2
} |
#### File: PyDoc/Managers/CheatsheetManager.py
```python
import requests
import re
import json
import ast
import os
import ui
import threading
import tarfile
import math
import time
import plistlib
import console
import shutil
import sqlite3
from Managers import DBManager, TypeManager
from Utilities import LogThread
class Cheatsheet (object):
def __init__(self):
self.__version = ''
self.__globalversion = ''
self.__name = ''
self.__aliases = []
self.__icon = None
self.__id = ''
self.__path = None
self.__status = ''
self.__stats = ''
self.__onlineid = ''
@property
def onlineid(self):
return self.__onlineid
@onlineid.setter
def onlineid(self, id):
self.__onlineid = id
@property
def version(self):
return self.__version
@version.setter
def version(self, version):
self.__version = version
@property
def globalversion(self):
return self.__globalversion
@globalversion.setter
def globalversion(self, globalversion):
self.__globalversion = globalversion
@property
def name(self):
return self.__name
@name.setter
def name(self, name):
self.__name = name
@property
def aliases(self):
return self.__aliases
@aliases.setter
def aliases(self, aliases):
self.__aliases = aliases
@property
def image(self):
return self.__icon
@image.setter
def image(self, icon):
self.__icon = icon
@property
def id(self):
return self.__id
@id.setter
def id(self, id):
self.__id = id
@property
def path(self):
return self.__path
@path.setter
def path(self, path):
self.__path = path
@property
def status(self):
return self.__status
@status.setter
def status(self, status):
self.__status = status
@property
def stats(self):
return self.__stats
@stats.setter
def stats(self, stats):
self.__stats = stats
class CheatsheetManager (object):
def __init__(self, serverManager, iconPath, typeIconPath):
self.typeManager = TypeManager.TypeManager(typeIconPath)
self.serverManager = serverManager
self.iconPath = iconPath
self.typeIconPath = typeIconPath
self.localServer = None
self.jsonServerLocation = 'zzz/cheatsheets/cheat.json'
self.downloadServerLocation = 'zzz/cheatsheets/%@.tgz'
self.plistPath = 'Contents/Info.plist'
self.indexPath = 'Contents/Resources/docSet.dsidx'
self.cheatsheetFolder = 'Docsets/Cheatsheets'
self.headers = {'User-Agent': 'PyDoc-Pythonista'}
self.cheatsheets = None
self.downloading = []
self.workThreads = []
self.downloadThreads = []
self.uiUpdateThreads = []
self.__createCheatsheetFolder()
def getAvailableCheatsheets(self):
cheatsheets = self.__getOnlineCheatsheets()
for d in self.__getDownloadedCheatsheets():
for c in cheatsheets:
if c.name == d.name:
c.status = 'installed'
c.path = d.path
c.id = d.id
for d in self.__getDownloadingCheatsheets():
for c in cheatsheets:
if c.name == d.name:
c.status = d.status
try:
c.stats = d.stats
except KeyError:
c.stats = 'downloading'
return cheatsheets
def __getOnlineCheatsheets(self):
if self.cheatsheets == None:
self.cheatsheets = self.__getCheatsheets()
return self.cheatsheets
def __getDownloadedCheatsheets(self):
ds = []
dbManager = DBManager.DBManager()
t = dbManager.InstalledDocsetsByType('cheatsheet')
ds = []
for d in t:
aa = Cheatsheet()
aa.name = d[1]
aa.id = d[0]
aa.path = os.path.join(os.path.abspath('.'),d[2])
aa.image = self.__getIconWithName(d[4])
ds.append(aa)
return ds
def __getDownloadingCheatsheets(self):
return self.downloading
def getDownloadedCheatsheets(self):
return self.__getDownloadedCheatsheets()
def __getCheatsheets(self):
server = self.serverManager.getDownloadServer(self.localServer)
url = server.url
if not url[-1] == '/':
url = url + '/'
url = url + self.jsonServerLocation
data = requests.get(url).text
data = ast.literal_eval(data)
cheatsheets = []
icon = self.__getIconWithName('cheatsheet')
for k,d in data['cheatsheets'].items():
c = Cheatsheet()
c.name = d['name']
c.aliases = d['aliases']
c.globalversion = data['global_version']
c.version = d['version']
c.image = icon
c.onlineid = k
c.status = 'online'
cheatsheets.append(c)
return sorted(cheatsheets, key=lambda x: x.name.lower())
def __getIconWithName(self, name):
imgPath = os.path.join(os.path.abspath('.'), self.iconPath, name+'.png')
if not os.path.exists(imgPath):
imgPath = os.path.join(os.path.abspath('.'), self.iconPath, 'Other.png')
return ui.Image.named(imgPath)
def __createCheatsheetFolder(self):
if not os.path.exists(self.cheatsheetFolder):
os.mkdir(self.cheatsheetFolder)
def downloadCheatsheet(self, cheatsheet, action, refresh_main_view):
if not cheatsheet in self.downloading:
cheatsheet.status = 'downloading'
self.downloading.append(cheatsheet)
action()
workThread = LogThread.LogThread(target=self.__determineUrlAndDownload, args=(cheatsheet,action,refresh_main_view,))
self.workThreads.append(workThread)
workThread.start()
def __determineUrlAndDownload(self, cheatsheet, action, refresh_main_view):
cheatsheet.stats = 'getting download link'
action()
downloadLink = self.__getDownloadLink(cheatsheet.onlineid)
downloadThread = LogThread.LogThread(target=self.downloadFile, args=(downloadLink,cheatsheet,refresh_main_view,))
self.downloadThreads.append(downloadThread)
downloadThread.start()
updateThread = LogThread.LogThread(target=self.updateUi, args=(action,downloadThread,))
self.uiUpdateThreads.append(updateThread)
updateThread.start()
def updateUi(self, action, t):
while t.is_alive():
action()
time.sleep(0.5)
action()
def __getDownloadLink(self, id):
server = self.serverManager.getDownloadServer(self.localServer)
url = server.url
if not url[-1] == '/':
url = url + '/'
url = url + self.downloadServerLocation
url = url.replace('%@', id)
return url
def downloadFile(self, url, cheatsheet, refresh_main_view):
local_filename = self.__downloadFile(url, cheatsheet)
#self.__downloadFile(url+'.tarix', cheatsheet)
cheatsheet.status = 'waiting for install'
self.installCheatsheet(local_filename, cheatsheet, refresh_main_view)
def __downloadFile(self, url, cheatsheet):
local_filename = self.cheatsheetFolder+'/'+url.split('/')[-1]
r = requests.get(url, headers = self.headers, stream=True)
ret = None
if r.status_code == 200:
ret = local_filename
total_length = r.headers.get('content-length')
dl = 0
last = 0
if os.path.exists(local_filename):
os.remove(local_filename)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
dl += len(chunk)
f.write(chunk)
if not total_length == None:
done = 100 * dl / int(total_length)
cheatsheet.stats = str(round(done,2)) + '% ' + str(self.convertSize(dl)) + ' / '+ str(self.convertSize(float(total_length)))
else:
cheatsheet.stats = str(self.convertSize(dl))
r.close()
return ret
def installCheatsheet(self, filename, cheatsheet, refresh_main_view):
extract_location = self.cheatsheetFolder
cheatsheet.status = 'Preparing to install: This might take a while.'
tar = tarfile.open(filename, 'r:gz')
n = [name for name in tar.getnames() if '/' not in name][0]
m = os.path.join(self.cheatsheetFolder, n)
tar.extractall(path=extract_location, members = self.track_progress(tar, cheatsheet, len(tar.getmembers())))
tar.close()
os.remove(filename)
dbManager = DBManager.DBManager()
dbManager.DocsetInstalled(cheatsheet.name, m, 'cheatsheet', 'cheatsheet', cheatsheet.version)
if cheatsheet in self.downloading:
self.downloading.remove(cheatsheet)
self.indexCheatsheet(cheatsheet, refresh_main_view, m)
def track_progress(self, members, cheatsheet, totalFiles):
i = 0
for member in members:
i = i + 1
done = 100 * i / totalFiles
cheatsheet.status = 'installing: ' + str(round(done,2)) + '% ' + str(i) + ' / '+ str(totalFiles)
yield member
def indexCheatsheet(self, cheatsheet, refresh_main_view, path):
cheatsheet.status = 'indexing'
indexPath = os.path.join(path, self.indexPath)
conn = sqlite3.connect(indexPath)
sql = 'SELECT count(*) FROM sqlite_master WHERE type = \'table\' AND name = \'searchIndex\''
c = conn.execute(sql)
data = c.fetchone()
if int(data[0]) == 0:
sql = 'CREATE TABLE searchIndex(rowid INTEGER PRIMARY KEY, name TEXT, type TEXT, path TEXT)'
c = conn.execute(sql)
conn.commit()
sql = 'SELECT f.ZPATH, m.ZANCHOR, t.ZTOKENNAME, ty.ZTYPENAME, t.rowid FROM ZTOKEN t, ZTOKENTYPE ty, ZFILEPATH f, ZTOKENMETAINFORMATION m WHERE ty.Z_PK = t.ZTOKENTYPE AND f.Z_PK = m.ZFILE AND m.ZTOKEN = t.Z_PK ORDER BY t.ZTOKENNAME'
c = conn.execute(sql)
data = c.fetchall()
for t in data:
conn.execute("insert into searchIndex values (?, ?, ?, ?)", (t[4], t[2], self.typeManager.getTypeForName(t[3]).name, t[0] ))
conn.commit()
else:
sql = 'SELECT rowid, type FROM searchIndex'
c = conn.execute(sql)
data = c.fetchall()
for t in data:
newType = self.typeManager.getTypeForName(t[1])
if not newType == None and not newType.name == t[1]:
conn.execute("UPDATE searchIndex SET type=(?) WHERE rowid = (?)", (newType.name, t[0] ))
conn.commit()
conn.close()
self.postProcess(cheatsheet, refresh_main_view)
def postProcess(self, cheatsheet, refresh_main_view):
cheatsheet.status = 'installed'
refresh_main_view()
def convertSize(self, size):
if (size == 0):
return '0B'
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size,1024)))
p = math.pow(1024,i)
s = round(size/p,2)
return '%s %s' % (s,size_name[i])
def deleteCheatsheet(self, cheatsheet, post_action):
but = console.alert('Are you sure?', 'Would you like to delete the cheatsheet, ' + cheatsheet.name, 'Ok')
if but == 1:
dbmanager = DBManager.DBManager()
dbmanager.DocsetRemoved(cheatsheet.id)
shutil.rmtree(cheatsheet.path)
cheatsheet.status = 'online'
post_action()
cheatsheet.path = None
def getTypesForCheatsheet(self, cheatsheet):
types = []
path = cheatsheet.path
indexPath = os.path.join(path, self.indexPath)
conn = sqlite3.connect(indexPath)
sql = 'SELECT type FROM searchIndex GROUP BY type ORDER BY type COLLATE NOCASE'
c = conn.execute(sql)
data = c.fetchall()
conn.close()
for t in data:
types.append(self.typeManager.getTypeForName(t[0]))
return types
def getIndexesbyTypeForCheatsheet(self, cheatsheet, type):
indexes = []
path = cheatsheet.path
indexPath = os.path.join(path, self.indexPath)
conn = sqlite3.connect(indexPath)
sql = 'SELECT type, name, path FROM searchIndex WHERE type = (?) ORDER BY name COLLATE NOCASE'
c = conn.execute(sql, (type.name,))
data = c.fetchall()
conn.close()
for t in data:
indexes.append({'type':self.typeManager.getTypeForName(t[0]), 'name':t[1],'path':t[2]})
return indexes
def getIndexesForCheatsheet(self, cheatsheet):
indexes = []
path = cheatsheet.path
indexPath = os.path.join(path, self.indexPath)
conn = sqlite3.connect(indexPath)
sql = 'SELECT type, name, path FROM searchIndex ORDER BY name COLLATE NOCASE'
c = conn.execute(sql)
data = c.fetchall()
conn.close()
for i in data:
indexes.append({'type':self.typeManager.getTypeForName(t[0]), 'name':t[1],'path':t[2]})
return types
if __name__ == '__main__':
import ServerManager
c = CheatsheetManager(ServerManager.ServerManager(), '../Images/icons')
```
#### File: PyDoc/Managers/DBManager.py
```python
import sqlite3
class DBManager (object):
def __init__(self):
self.docsetDBLocation = 'Docsets/docsets.db'
self.connection = None
self.SetupDocsetDB()
def SetupDocsetDB(self):
self.connection = sqlite3.connect(self.docsetDBLocation)
c = self.connection.cursor()
c.execute('CREATE TABLE IF NOT EXISTS docsets(ID INTEGER PRIMARY KEY AUTOINCREMENT, Name TEXT NOT NULL, Path TEXT NOT NULL, Type TEXT NOT NULL, Icon TEXT NOT NULL, Version REAL NULL, OtherData TEXT NOT NULL);')
self.connection.commit()
def DocsetInstalled(self, name, path, type, icon, version, otherdata = ''):
c = self.connection.cursor()
c.execute('INSERT INTO docsets (Name, Path, Type, Icon, Version, OtherData) VALUES (?,?,?,?,?,?)',(name,path,type,icon,version,otherdata,))
self.connection.commit()
def DocsetRemoved(self, id):
c = self.connection.cursor()
c.execute('DELETE FROM docsets WHERE ID = (?)',(id,))
self.connection.commit()
def InstalledDocsets(self):
return self.connection.execute('SELECT * FROM docsets').fetchall()
def InstalledDocsetsByType(self, type):
return self.connection.execute('SELECT * FROM docsets WHERE type = (?) ORDER BY name COLLATE NOCASE', (type,)).fetchall()
```
#### File: PyDoc/Views/SettingsView.py
```python
import ui
import console
import threading
import time
from objc_util import ObjCClass, NSURL, ns
class SettingsView (object):
def __init__(self, show_docset_management_view, show_cheatsheet_management_view, show_usercontributed_management_view):
self.data = ['Standard Docsets', 'Cheat Sheets', 'User Contributed Docsets']
self.ack_data = [{'text':'Dash','url':'https://kapeli.com/dash'}]
self.manage_docset_row = 0
self.manage_cheatsheet_row = 1
self.manage_usercontributed_row = 2
self.show_docset_management_view = show_docset_management_view
self.show_cheatsheet_management_view = show_cheatsheet_management_view
self.show_usercontributed_management_view = show_usercontributed_management_view
self.docset_section_number = 0
self.ack_section_number = 1
def tableview_did_select(self, tableview, section, row):
if self.docset_section_number == section:
if self.manage_docset_row == row:
self.show_docset_management_view()
elif self.manage_cheatsheet_row == row:
console.show_activity('Loading Cheat Sheets...')
uiThread = threading.Thread(target=self.show_cheatsheet_management_view)
uiThread.start()
elif self.manage_usercontributed_row == row:
console.show_activity('Loading User Contributed Docsets...')
uiThread = threading.Thread(target=self.show_usercontributed_management_view)
uiThread.start()
if self.ack_section_number == section:
if row == 0:
self.open_url(self.ack_data[row]['url'])
def tableview_number_of_sections(self, tableview):
return 2
def tableview_number_of_rows(self, tableview, section):
if section == self.docset_section_number:
return len(self.data)
if section == self.ack_section_number:
return len(self.ack_data)
def tableview_cell_for_row(self, tableview, section, row):
cell = ui.TableViewCell()
if section == self.docset_section_number:
cell.text_label.text = self.data[row]
cell.accessory_type = 'disclosure_indicator'
elif section == self.ack_section_number:
cell.text_label.text = self.ack_data[row]['text']
return cell
def tableview_title_for_header(self, tableview, section):
if section == self.docset_section_number:
return 'Manage'
if section == self.ack_section_number:
return 'Docsets are provided by Dash the MacOS docset browser. Please checkout Dash please by clicking the link below.'
def open_url(self, url):
UIApplication = ObjCClass('UIApplication')
sharedApplication = UIApplication.sharedApplication()
internalurl = NSURL.URLWithString_(ns(url))
sharedApplication.openURL_(internalurl)
tv = ui.TableView('grouped')
def get_view(show_docset_management_view, show_cheatsheet_management_view, show_usercontributed_management_view):
w,h = ui.get_screen_size()
tv.width = w
tv.height = h
tv.flex = 'WH'
tv.name = 'Settings'
data = SettingsView(show_docset_management_view, show_cheatsheet_management_view, show_usercontributed_management_view)
tv.delegate = data
tv.data_source = data
return tv
``` |
{
"source": "JHP4911/SLAM-on-Raspberry-Pi",
"score": 3
} |
#### File: JHP4911/SLAM-on-Raspberry-Pi/mqtt-slamviz.py
```python
import time
from sys import exit
from roboviz import MapVisualizer
import numpy as np
try:
import paho.mqtt.client as mqtt
except ImportError:
exit("This example requires the paho-mqtt module\nInstall with: sudo pip install paho-mqtt")
MQTT_SERVER = "test.mosquitto.org"
MQTT_PORT = 1883
MQTT_TOPIC = "safetycam/topic/slamviz"
# Set these to use authorisation
MQTT_USER = None
MQTT_PASS = None
MAP_SIZE_PIXELS = 200
MAP_SIZE_METERS = 10
# Set up a SLAM display
viz = MapVisualizer(MAP_SIZE_PIXELS, MAP_SIZE_METERS, 'SLAM', show_trajectory=True)
print("""
Public MQTT messages from {server} on port {port} to visualize SLAM!
It will monitor the {topic} topic by default, and decodes the bytearray
""".format(
server=MQTT_SERVER,
port=MQTT_PORT,
topic=MQTT_TOPIC
))
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
client.subscribe(MQTT_TOPIC)
def on_message(client, userdata, msg):
robotPos_bytes = msg.payload[:24]
map_bytes = msg.payload[24:]
robotPos = np.frombuffer(robotPos_bytes, dtype='float64')
robotPos = np.array(robotPos)
x, y, theta = robotPos
viz.display(x / 1000., y / 1000., theta, map_bytes)
return
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
if MQTT_USER is not None and MQTT_PASS is not None:
print("Using username: {un} and password: {pw}".format(un=MQTT_USER, pw="*" * len(MQTT_PASS)))
client.username_pw_set(username=MQTT_USER, password=MQTT_PASS)
client.connect(MQTT_SERVER, MQTT_PORT, 60)
client.loop_forever()
```
#### File: JHP4911/SLAM-on-Raspberry-Pi/rpslam-thread.py
```python
import os
import time
from math import cos, sin, pi, floor
# import pygame
from adafruit_rplidar import RPLidar, RPLidarException
import numpy as np
import matplotlib.pyplot as plt
import paho.mqtt.client as mqtt
from threading import Thread
from breezyslam.algorithms import RMHC_SLAM
from breezyslam.sensors import RPLidarA1 as LaserModel
# from rplidar import RPLidar as Lidar
# from adafruit_rplidar import RPLidar as Lidar
from roboviz import MapVisualizer
# Screen width & height
W = 640
H = 480
MAP_SIZE_PIXELS = 250
MAP_SIZE_METERS = 15
MIN_SAMPLES = 150
SCAN_BYTE = b'\x20'
SCAN_TYPE = 129
slamData = []
# Setup the RPLidar
PORT_NAME = '/dev/ttyUSB0'
lidar = RPLidar(None, PORT_NAME)
# Create an RMHC SLAM object with a laser model and optional robot model
slam = RMHC_SLAM(LaserModel(), MAP_SIZE_PIXELS, MAP_SIZE_METERS)
# # Set up a SLAM display
viz = MapVisualizer(MAP_SIZE_PIXELS, MAP_SIZE_METERS, 'SLAM', show_trajectory=True)
# Initialize an empty trajectory
trajectory = []
# To exit lidar scan thread gracefully
runThread = True
# Initialize empty map
mapbytes = bytearray(MAP_SIZE_PIXELS * MAP_SIZE_PIXELS)
# used to scale data to fit on the screen
max_distance = 0
# x, y, theta = 0, 0, 0
# Pose will be modified in our threaded code
pose = [0, 0, 0]
scan_data = [0]*360
def _process_scan(raw):
'''Processes input raw data and returns measurment data'''
new_scan = bool(raw[0] & 0b1)
inversed_new_scan = bool((raw[0] >> 1) & 0b1)
quality = raw[0] >> 2
if new_scan == inversed_new_scan:
raise RPLidarException('New scan flags mismatch')
check_bit = raw[1] & 0b1
if check_bit != 1:
raise RPLidarException('Check bit not equal to 1')
angle = ((raw[1] >> 1) + (raw[2] << 7)) / 64.
distance = (raw[3] + (raw[4] << 8)) / 4.
return new_scan, quality, angle, distance
def lidar_measurments(self, max_buf_meas=500):
lidar.set_pwm(800)
status, error_code = self.health
cmd = SCAN_BYTE
self._send_cmd(cmd)
dsize, is_single, dtype = self._read_descriptor()
if dsize != 5:
raise RPLidarException('Wrong info reply length')
if is_single:
raise RPLidarException('Not a multiple response mode')
if dtype != SCAN_TYPE:
raise RPLidarException('Wrong response data type')
while True:
raw = self._read_response(dsize)
self.log_bytes('debug', 'Received scan response: ', raw)
if max_buf_meas:
data_in_buf = self._serial_port.in_waiting
if data_in_buf > max_buf_meas*dsize:
self.log('warning',
'Too many measurments in the input buffer: %d/%d. '
'Clearing buffer...' %
(data_in_buf//dsize, max_buf_meas))
self._serial_port.read(data_in_buf//dsize*dsize)
yield _process_scan(raw)
def lidar_scans(self, max_buf_meas=800, min_len=100):
scan = []
iterator = lidar_measurments(lidar,max_buf_meas)
for new_scan, quality, angle, distance in iterator:
if new_scan:
if len(scan) > min_len:
yield scan
scan = []
if quality > 0 and distance > 0:
scan.append((quality, angle, distance))
def slam_compute(pose, mapbytes):
try:
# We will use these to store previous scan in case current scan is inadequate
previous_distances = None
previous_angles = None
scan_count = 0
for scan in lidar_scans(lidar):
# To stop the thread
if not runThread:
break
scan_count += 1
# Extract (quality, angle, distance) triples from current scan
items = [item for item in scan]
# Extract distances and angles from triples
distances = [item[2] for item in items]
angles = [item[1] for item in items]
# Update SLAM with current Lidar scan and scan angles if adequate
if len(distances) > MIN_SAMPLES:
slam.update(distances, scan_angles_degrees=angles)
previous_distances = distances.copy()
previous_angles = angles.copy()
# If not adequate, use previous
elif previous_distances is not None:
slam.update(previous_distances, scan_angles_degrees=previous_angles)
# Get new position
pose[0], pose[1], pose[2] = slam.getpos()
# Get current map bytes as grayscale
slam.getmap(mapbytes)
except KeyboardInterrupt:
lidar.stop()
lidar.disconnect()
raise
# Launch the slam computation thread
thread = Thread(target=slam_compute,
args=(pose, mapbytes))
thread.daemon = True
thread.start()
try:
# Loop forever,displaying current map and pose
while True:
#print("x = " + str(pose[0]) + " y = " + str(pose[1]) + "theta = " + str(pose[2]))
if not viz.display(pose[0]/1000., pose[1]/1000., pose[2], mapbytes):
raise KeyboardInterrupt
except KeyboardInterrupt:
runThread = False
thread.join()
lidar.stop()
lidar.disconnect()
exit(0)
``` |
{
"source": "jhpark428/studio",
"score": 2
} |
#### File: function/classification/svm_classification.py
```python
import pandas as pd
import numpy as np
from sklearn import svm
from brightics.function.utils import _model_dict
from brightics.common.repr import BrtcReprBuilder, strip_margin, pandasDF2MD, dict2MD
from brightics.common.groupby import _function_by_group
from brightics.common.utils import check_required_parameters
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.validation import validate, greater_than, greater_than_or_equal_to, less_than, \
over_to, less_than_or_equal_to, raise_runtime_error, greater_than_or_equal_to_or_equal_to
import sklearn.utils as sklearn_utils
from brightics.common.classify_input_type import check_col_type
from brightics.common.exception import BrighticsFunctionException as BFE
def svm_classification_train(table, group_by=None, **params):
check_required_parameters(_svm_classification_train, params, ['table','gamma_val'])
params = get_default_from_parameters_if_required(params, _svm_classification_train)
if params['gamma'] == 'other':
if 'gamma_val' not in params:
raise BFE.from_errors([{'0100': 'Gamma value is mandatory when gamma is other'}])
if params['gamma_val'] <= 0:
raise BFE.from_errors([{'0100': 'Gamma value must be greater than 0'}])
else:
params['gamma_val'] = None
param_validation_check = [over_to(params, 0.0, 1.0, 'c'),
greater_than_or_equal_to(params, 0, 'degree'),
greater_than(params, 0.0, 'tol'),
greater_than_or_equal_to_or_equal_to(params, 1, -1, 'max_iter')]
validate(*param_validation_check)
if group_by is not None:
grouped_model = _function_by_group(_svm_classification_train, table, group_by=group_by, **params)
return grouped_model
else:
return _svm_classification_train(table, **params)
def _svm_classification_train(table, feature_cols, label_col, gamma_val, c=1.0, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
shrinking=True, probability=True, tol=1e-3, max_iter=-1, random_state=None, class_weight=None):
_table = table.copy()
feature_names, features = check_col_type(table, feature_cols)
_label_col = _table[label_col]
if(sklearn_utils.multiclass.type_of_target(_label_col) == 'continuous'):
raise_runtime_error('''Label Column should not be continuous.''')
class_labels = sorted(set(_label_col))
if class_weight is not None:
if len(class_weight) != len(class_labels):
raise ValueError("Number of class weights should match number of labels.")
else:
class_weight = {class_labels[i] : class_weight[i] for i in range(len(class_labels))}
if gamma == 'other':
_gamma = gamma_val
else:
_gamma = gamma
_svc = svm.SVC(C=c, kernel=kernel, degree=degree, gamma=_gamma, coef0=coef0, shrinking=shrinking,
probability=probability, tol=tol, max_iter=max_iter, random_state=random_state, class_weight=class_weight)
_svc_model = _svc.fit(features, _label_col)
get_param = _svc.get_params()
get_param['feature_cols'] = feature_names
get_param['label_col'] = label_col
rb = BrtcReprBuilder()
rb.addMD(strip_margin("""
| ## SVM Classification Result
| ### Parameters
| {table_parameter}
""".format(table_parameter=dict2MD(get_param))))
_model = _model_dict('svc_model')
_model['svc_model'] = _svc_model
_model['features'] = feature_cols
_model['_repr_brtc_'] = rb.get()
return {'model':_model}
def svm_classification_predict(table, model, **params):
check_required_parameters(_svm_classification_predict, params, ['table', 'model'])
if '_grouped_data' in model:
return _function_by_group(_svm_classification_predict, table, model, **params)
else:
return _svm_classification_predict(table, model, **params)
def _svm_classification_predict(table, model, prediction_col='prediction', prob_prefix='probability',
display_log_prob=True, log_prob_prefix='log_probability',
thresholds=None, probability_col='probability', log_probability_col='log_probability',
suffix='index'):
# # migration for 3.6.0.4 <- studio
if (probability_col != 'probability'):
prob_prefix = probability_col
if (log_probability_col != 'log_probability'):
log_prob_prefix = log_probability_col
# # migration for 3.6.0.4 <- studio
_table = table.copy()
if 'features' in model:
feature_cols = model['features']
feature_names, features = check_col_type(table, feature_cols)
else:
feature_cols = model['feature_cols']
features = table[feature_cols]
if 'svc_model' in model:
svc_model = model['svc_model']
classes = svc_model.classes_
len_classes = len(classes)
is_binary = len_classes == 2
else:
classes = np.array([0, 1])
len_classes = 2
is_binary = True
if thresholds is None:
thresholds = np.array([1 / len_classes for _ in classes])
elif isinstance(thresholds, list):
if len(thresholds) == 1 and is_binary and 0 < thresholds[0] < 1:
thresholds = np.array([thresholds[0], 1 - thresholds[0]])
else:
thresholds = np.array(thresholds)
# validation: the lengths of classes and thresholds must be equal.
if suffix == 'index':
suffixes = [i for i, _ in enumerate(classes)]
else:
suffixes = classes
if 'svc_model' in model:
prob = svc_model.predict_proba(features)
else:
coef = model['table_1'].values[0][:-2]
prob = 1 / (np.exp((np.sum(features.values * coef, axis=1) + model['table_1']['intercept'][0]) / np.sum(coef * coef) ** 0.5) + 1)
prob = np.c_[prob, 1 - prob]
prob_cols = ['{probability_col}_{suffix}'.format(probability_col=prob_prefix, suffix=suffix) for suffix in suffixes]
prob_df = pd.DataFrame(data=prob, columns=prob_cols)
prediction = classes[np.argmax(prob / thresholds, axis=1)]
out_table = table.copy()
out_table[prediction_col] = prediction
if display_log_prob == True:
log_prob = np.log(prob)
logprob_cols = ['{log_probability_col}_{suffix}'.format(log_probability_col=log_prob_prefix, suffix=suffix)
for suffix in suffixes]
logprob_df = pd.DataFrame(data=log_prob, columns=logprob_cols)
out_table = pd.concat([out_table, prob_df, logprob_df], axis=1)
else:
out_table = pd.concat([out_table, prob_df], axis=1)
return {'out_table' : out_table}
```
#### File: function/evaluation/__init__.py
```python
import pandas as pd
import numpy as np
import itertools
from brightics.common.repr import BrtcReprBuilder, strip_margin, pandasDF2MD, plt2MD
import matplotlib.pyplot as plt
from sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error, median_absolute_error, r2_score
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, confusion_matrix
from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score
from inspect import signature
from brightics.common.groupby import _function_by_group
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.utils import check_required_parameters
from brightics.common.validation import validate, greater_than, greater_than_or_equal_to, less_than, \
less_than_or_equal_to, raise_runtime_error
from sklearn import preprocessing
from brightics.common.validation import raise_runtime_error
def evaluate_regression(table, group_by=None, **params):
check_required_parameters(_evaluate_regression, params, ['table'])
if group_by is not None:
return _function_by_group(_evaluate_regression, table, group_by=group_by, **params)
else:
return _evaluate_regression(table, **params)
def _evaluate_regression(table, label_col, prediction_col):
label = table[label_col]
predict = table[prediction_col]
# compute metrics
evs = explained_variance_score(label, predict)
mse = mean_squared_error(label, predict)
rmse = np.sqrt(mse)
mae = mean_absolute_error(label, predict)
mape = _mean_absolute_percentage_error(label, predict)
mdae = median_absolute_error(label, predict)
r2 = r2_score(label, predict)
# json
summary = dict()
summary['label_col'] = label_col
summary['prediction_col'] = prediction_col
summary['r2_score'] = r2
summary['mean_squared_error'] = mse
summary['root_mean_squared_error'] = rmse
summary['mean_absolute_error'] = mae
summary['median_absolute_error'] = mdae
summary['explained_variance_score'] = evs
# report
all_dict_list = [{'r2_score': r2, 'mean_squared_error': mse, 'root_mean_squared_error': rmse, 'mean_absolute_error': mae, 'mean_absolute_percentage_error': mape, 'median_absolute_error': mdae, 'explained_variance_score': evs}]
all_df = pd.DataFrame(all_dict_list)
all_df = all_df[['r2_score', 'mean_squared_error', 'root_mean_squared_error', 'mean_absolute_error', 'mean_absolute_percentage_error', 'median_absolute_error', 'explained_variance_score']]
summary['metrics'] = all_df
rb = BrtcReprBuilder()
rb.addMD(strip_margin("""
| ## Evaluate Regression Result
| ### Metrics
| {table1}
|
|
""".format(table1=pandasDF2MD(all_df)
)))
summary['_repr_brtc_'] = rb.get()
return {'result' : summary}
def _mean_absolute_percentage_error(y_true, y_pred):
return np.mean(np.abs((np.array(y_true) - np.array(y_pred)) / np.array(y_true))) * 100
def evaluate_classification(table, group_by=None, **params):
check_required_parameters(_evaluate_classification, params, ['table'])
if group_by is not None:
return _function_by_group(_evaluate_classification, table, group_by=group_by, **params)
else:
return _evaluate_classification(table, **params)
def _evaluate_classification(table, label_col, prediction_col, average="weighted"):
if average == 'None':
average = None
label = table[label_col]
predict = table[prediction_col]
# compute metrics
accuracy = accuracy_score(label, predict)
f1 = f1_score(label, predict, average=average)
precision = precision_score(label, predict, average=average)
recall = recall_score(label, predict, average=average)
class_names = np.unique(np.union1d(label.values, predict.values))
# Plot non-normalized confusion matrix
plt.figure()
_plot_confusion_matrix(label, predict, classes=class_names,
title='Confusion matrix, without normalization')
fig_cnf_matrix = plt2MD(plt)
# Plot normalized confusion matrix
plt.figure()
_plot_confusion_matrix(label, predict, classes=class_names, normalize=True,
title='Normalized confusion matrix')
fig_cnf_matrix_normalized = plt2MD(plt)
plt.clf()
# json
summary = dict()
summary['label_col'] = label_col
summary['prediction_col'] = prediction_col
summary['f1_score'] = f1
summary['accuracy_score'] = accuracy
summary['precision_score'] = precision
summary['recall_score'] = recall
# report
if average == 'weighted' or average == 'macro':
all_dict_list = [{'f1': f1, 'precision': precision, 'recall': recall}]
all_df = pd.DataFrame(all_dict_list)
all_df = all_df[['f1', 'precision', 'recall']]
else:
all_dict_list = [f1, precision, recall]
all_df = pd.DataFrame(all_dict_list)
all_df = all_df.transpose()
all_df.columns = ['f1', 'precision', 'recall']
all_df['label'] = class_names
all_df = all_df[['label'] + all_df.columns[:-1].tolist()]
summary['metrics'] = all_df
rb = BrtcReprBuilder()
rb.addMD(strip_margin("""
| ## Evaluate Classification Result
|
| ### Accuracy : {accuracy}
|
| ### Metrics
| {table1}
|
| ### Confusion matrix
| {fig_confusion_matrix}
|
| {fig_confusion_matrix_normalized}
|
""".format(accuracy=accuracy, table1=pandasDF2MD(all_df),
fig_confusion_matrix=fig_cnf_matrix,
fig_confusion_matrix_normalized=fig_cnf_matrix_normalized
)))
summary['_repr_brtc_'] = rb.get()
return {'result' : summary}
def _plot_confusion_matrix(label, predict, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
# #This code is from http://scikit-learn.org/stable/auto_examples/model_selection/_plot_confusion_matrix.html
cnf_matrix = confusion_matrix(label, predict)
if normalize:
cnf_matrix = cnf_matrix.astype('float') / cnf_matrix.sum(axis=1)[:, np.newaxis]
# print("Normalized confusion matrix")
# else:
# print('Confusion matrix, without normalization')
# print(cnf_matrix)
plt.imshow(cnf_matrix, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cnf_matrix.max() / 2.
for i, j in itertools.product(range(cnf_matrix.shape[0]), range(cnf_matrix.shape[1])):
plt.text(j, i, format(cnf_matrix[i, j], fmt),
horizontalalignment="center",
color="white" if cnf_matrix[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
def _plot_binary(label, probability, threshold=None, fig_size=(6.4, 4.8), pos_label=None):
fpr, tpr, threshold_roc = roc_curve(label, probability, pos_label=pos_label)
# tpf 1-fpr
if threshold is None:
argmin = np.argmin(np.abs(tpr + fpr - 1))
threshold = threshold_roc[argmin]
fpr_prop = fpr[argmin]
tpr_prop = tpr[argmin]
plt.plot(threshold_roc, tpr, color='blue',
label='TPR')
plt.plot(threshold_roc, 1 - fpr, color='red',
label='1-FPR')
plt.xlabel('Threshold')
plt.ylabel('TPR or 1-FPR')
plt.legend(loc="lower center")
plt.axvline(threshold, linestyle='--')
plt.text(threshold + 0.02, 0.5, 'threshold: %0.2f' % threshold, rotation=90, verticalalignment='center')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
fig_tpr_fpr = plt2MD(plt)
plt.clf()
# roc
auc_score = auc(fpr, tpr)
plt.figure(figsize=fig_size)
plt.plot(fpr, tpr, color='darkorange',
label='ROC curve (area = %0.2f)' % auc_score)
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.plot(fpr_prop, tpr_prop, 'g*', markersize=10, color="red", label='threshold: %0.2f' % threshold)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
fig_roc = plt2MD(plt)
plt.clf()
# pr
precision, recall, threshold_pr = precision_recall_curve(label, probability, pos_label=pos_label)
precision_prop = precision[argmin]
recall_prop = recall[argmin]
step_kwargs = ({'step': 'post'}
if 'step' in signature(plt.fill_between).parameters
else {})
plt.step(recall, precision, color='b', alpha=0.2, where='post')
plt.fill_between(recall, precision, alpha=0.2, color='b', **step_kwargs)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.plot(recall_prop, precision_prop, 'g*', markersize=10, color="red", label='threshold: %0.2f' % threshold)
plt.title('Precision-Recall curve') # TODO Average precision score
plt.legend()
fig_pr = plt2MD(plt)
plt.clf()
threshold_pr = np.append(threshold_pr, 1)
plt.plot(threshold_pr, precision, color='blue',
label='Precision')
plt.plot(threshold_pr, recall, color='red',
label='Recall')
plt.xlabel('Threshold')
plt.ylabel('Precision or Recall')
plt.legend(loc="lower center")
plt.axvline(threshold, linestyle='--')
plt.text(threshold + 0.02, 0.5, 'threshold: %0.2f' % threshold, rotation=90, verticalalignment='center')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
fig_precision_recall = plt2MD(plt)
plt.clf()
classes = label.unique()
neg_label = [cls for cls in classes if cls != pos_label][0]
predict = probability.apply(lambda x: pos_label if x >= threshold else neg_label)
_plot_confusion_matrix(label, predict, [pos_label, neg_label],
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues)
fig_confusion = plt2MD(plt)
plt.clf()
return threshold, fig_tpr_fpr, fig_roc, fig_precision_recall, fig_pr, fig_confusion
def plot_roc_pr_curve(table, group_by=None, **params):
check_required_parameters(_plot_roc_pr_curve, params, ['table'])
params = get_default_from_parameters_if_required(params, _plot_roc_pr_curve)
param_validation_check = [greater_than_or_equal_to(params, 0.0, 'fig_w'),
greater_than_or_equal_to(params, 0.0, 'fig_h')]
validate(*param_validation_check)
if group_by is not None:
return _function_by_group(_plot_roc_pr_curve, table, group_by=group_by, **params)
else:
return _plot_roc_pr_curve(table, **params)
def _plot_roc_pr_curve(table, label_col, probability_col, fig_w=6.4, fig_h=4.8, pos_label=None):
label = table[label_col]
probability = table[probability_col]
threshold, fig_tpr_fpr, fig_roc, fig_precision_recall, fig_pr, fig_confusion = \
_plot_binary(label, probability, fig_size=(fig_w, fig_h), pos_label=pos_label)
summary = dict()
summary['threshold'] = threshold
summary['label_col'] = label_col
summary['probability_col'] = probability_col
rb = BrtcReprBuilder()
rb.addMD(strip_margin("""
| ## Plot ROC Curve and PR Curve Result
|
| ### ROC Curve
| {fig_tpr_fpr}
| {fig_roc}
|
| ### PR Curve
| {fig_precision_recall}
| {fig_pr}
|
| ### Confusion Matrix
| {fig_confusion}
""".format(fig_roc=fig_roc,
fig_tpr_fpr=fig_tpr_fpr,
fig_pr=fig_pr,
fig_precision_recall=fig_precision_recall,
fig_confusion=fig_confusion
)))
summary['_repr_brtc_'] = rb.get()
return {'result': summary}
def _rel(documents, element):
return element in documents
def _precision_k(k_value, num_users, documents, recommend_table):
return np.mean([np.sum([_rel(documents[i], recommend_table[i][j]) for j in range(np.min([k_value, len(recommend_table[i])]))]) for i in range(num_users)]) / k_value
def _map(num_users, documents, recommend_table):
result = []
for i in range(num_users):
number = 0
count = 0
for j in range(len(recommend_table[i])):
if _rel(documents[i], recommend_table[i][j]) == 1:
count += 1
number += count / (j + 1) / (len(documents[i]))
result.append(number)
return np.mean(result)
def _ndcg_k(k_value, num_users, documents, recommend_table):
return np.mean([np.sum([_rel(documents[i], recommend_table[i][j]) / (np.log(j + 2)) for j in range(np.min([k_value, np.max([len(documents[i]), len(recommend_table[i])])]))]) / (np.sum([1 / np.log(j + 2) for j in range(np.min([k_value, len(documents[i])]))])) for i in range(num_users)])
def evaluate_ranking_algorithm(table1, table2, user_col, item_col, evaluation_measure, rating_col=None, rating_edge=None, k_values=None):
none_str = 'None'
item_encoder = preprocessing.LabelEncoder()
tmp_table_item_col = table1[item_col].values.tolist()
tmp_table_item_col.append(none_str)
item_encoder.fit(tmp_table_item_col)
if table2.columns[0] != 'user_name' and table2.columns[0] != 'user':
raise_runtime_error("topN-list data schema should consist of [user_name, item_top1, rating_top1, .... item_topN, rating_topN]")
user_encoder = preprocessing.LabelEncoder()
user_encoder.fit(table2[table2.columns[0]])
if rating_col is not None and rating_edge is not None:
table = table1[table1[rating_col] > rating_edge]
else:
table = table1
table = table[table[user_col].isin(user_encoder.classes_)]
table_user_col = table[user_col]
table_item_col = table[item_col]
user_correspond = user_encoder.transform(table_user_col)
item_correspond = item_encoder.transform(table_item_col)
documents = dict()
for i in range(len(user_encoder.classes_)):
documents[i] = []
for i in range(len(user_correspond)):
documents[user_correspond[i]].append(item_correspond[i])
columns = []
for i in range(int(len(table2.columns) / 2)):
if table2.columns[2 * i + 1] != 'item_%d' % (i + 1) and table2.columns[2 * i + 2] != 'rating_%d' % (i + 1) and table2.columns[2 * i + 1] != 'item_top%d' % (i + 1) and table2.columns[2 * i + 2] != 'rating_top%d' % (i + 1):
raise_runtime_error("topN-list data schema should consist of [user_name, item_top1, rating_top1, .... item_topN, rating_topN]")
columns.append(table2.columns[2 * i + 1])
recommend_table = table2[columns].replace('', none_str).fillna(none_str).values
for i in range(len(recommend_table)):
recommend_table[i] = item_encoder.transform(recommend_table[i])
result = []
num_users = len(user_encoder.classes_)
if k_values is not None:
if 'prec' in evaluation_measure:
for k_value in k_values:
result.append(['precision_{}'.format(k_value), _precision_k(k_value, num_users, documents, recommend_table)])
if 'ndcg' in evaluation_measure:
for k_value in k_values:
result.append(['ndcg_{}'.format(k_value), _ndcg_k(k_value, num_users, documents, recommend_table)])
if 'map' in evaluation_measure:
result.append(['meanAveragePrecision', _map(num_users, documents, recommend_table)])
result = pd.DataFrame(result, columns=['measure', 'value'])
return {'out_table':result}
```
#### File: function/extraction/array_column_conversion.py
```python
import numpy as np
import pandas as pd
import re
from brightics.common.utils import check_required_parameters
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.validation import validate
from brightics.common.validation import greater_than
from brightics.common.validation import greater_than_or_equal_to
from brightics.common.classify_input_type import check_col_type
def fill_na(list, number):
for _ in range(number - len(list)):
list.append(None)
return list
def columns_to_array(table, **params):
check_required_parameters(_columns_to_array, params, ['table'])
return _columns_to_array(table, **params)
def _columns_to_array(table, input_cols, remain_cols=False, output_col_name='array'):
_output_col_name = re.sub("[ ,;{}()\n\t=]", "_", output_col_name)
if remain_cols:
out_table = table.copy()
npa = np.array(table[input_cols])
tmp_col = table[input_cols].dtypes == 'object'
if tmp_col.any():
out_table[_output_col_name] = list(npa.astype(str))
else:
out_table[_output_col_name] = list(npa)
return {'out_table' : out_table}
else:
out_table = table[list(set(table.axes[1]) - set(input_cols))].copy()
npa = np.array(table[input_cols])
tmp_col = table[input_cols].dtypes == 'object'
if tmp_col.any():
out_table[_output_col_name] = list(npa.astype(str))
else:
out_table[_output_col_name] = list(npa)
return {'out_table' : out_table}
def array_to_columns(table, **params):
check_required_parameters(_array_to_columns, params, ['table'])
return _array_to_columns(table, **params)
def _array_to_columns(table, input_cols, remain_cols=False):
if remain_cols:
out_table = table.copy()
for input_col in input_cols:
_input_col = re.sub("[ ,;{}()\n\t=]", "_", input_col)
for i in range(len(table[input_col][0])):
out_table[_input_col + str(i)] = [x[i] for x in table[input_col]]
return {'out_table' : out_table}
else:
out_table = table[list(set(table.axes[1]) - set(input_cols))].copy()
for input_col in input_cols:
_input_col = re.sub("[ ,;{}()\n\t=]", "_", input_col)
length = np.max([len(i) for i in table[input_col]])
tmp = np.array([fill_na(i, length) for i in table[input_col]])
columns = [_input_col + '_' + str(i) for i in range(tmp.shape[1])]
result_table = pd.DataFrame(tmp, columns=columns)
out_table = pd.concat([out_table, result_table], axis=1)
return {'out_table' : out_table}
```
#### File: function/recommendation/association_rule.py
```python
import itertools
import math
import pandas as pd
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from sklearn import preprocessing
from brightics.common.repr import BrtcReprBuilder, strip_margin, plt2MD, pandasDF2MD, dict2MD
from brightics.function.utils import _model_dict
from brightics.common.groupby import _function_by_group
from brightics.common.utils import check_required_parameters
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.validation import validate
from brightics.common.validation import greater_than_or_equal_to
from brightics.common.validation import less_than_or_equal_to
from brightics.common.validation import greater_than
from brightics.common.validation import from_to
#-----------------------------------------------------------------------------------------------------
"""
License:
Copyright (c) 2016, <NAME>
All rights reserved.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
class _FPNode(object):
"""
A node in the FP tree.
"""
def __init__(self, value, count, parent):
"""
Create the node.
"""
self.value = value
self.count = count
self.parent = parent
self.link = None
self.children = []
def _has_child(self, value):
"""
Check if node has a particular child node.
"""
for node in self.children:
if node.value == value:
return True
return False
def _get_child(self, value):
"""
Return a child node with a particular value.
"""
for node in self.children:
if node.value == value:
return node
return None
def _add_child(self, value):
"""
Add a node as a child node.
"""
child = _FPNode(value, 1, self)
self.children.append(child)
return child
class _FPTree(object):
"""
A frequent pattern tree.
"""
def __init__(self, transactions, threshold, root_value, root_count):
"""
Initialize the tree.
"""
self.frequent = self._find_frequent_items(transactions, threshold)
self.headers = self._build_header_table(self.frequent)
self.root = self._build_fptree(
transactions, root_value,
root_count, self.frequent, self.headers)
@staticmethod
def _find_frequent_items(transactions, threshold):
"""
Create a dictionary of items with occurrences above the threshold.
"""
items = {}
for transaction in transactions:
for item in transaction:
if item in items:
items[item] += 1
else:
items[item] = 1
for key in list(items.keys()):
if items[key] < threshold:
del items[key]
return items
@staticmethod
def _build_header_table(frequent):
"""
Build the header table.
"""
headers = {}
for key in frequent.keys():
headers[key] = None
return headers
def _build_fptree(self, transactions, root_value,
root_count, frequent, headers):
"""
Build the FP tree and return the root node.
"""
root = _FPNode(root_value, root_count, None)
for transaction in transactions:
sorted_items = [x for x in transaction if x in frequent]
sorted_items.sort(key=lambda x: (frequent[x], x), reverse=True)
if len(sorted_items) > 0:
self._insert_tree(sorted_items, root, headers)
return root
def _insert_tree(self, items, node, headers):
"""
Recursively grow FP tree.
"""
first = items[0]
child = node._get_child(first)
if child is not None:
child.count += 1
else:
# Add new child.
child = node._add_child(first)
# Link it to header structure.
if headers[first] is None:
headers[first] = child
else:
current = headers[first]
while current.link is not None:
current = current.link
current.link = child
# Call function recursively.
remaining_items = items[1:]
if len(remaining_items) > 0:
self._insert_tree(remaining_items, child, headers)
def _tree_has_single_path(self, node):
"""
If there is a single path in the tree,
return True, else return False.
"""
num_children = len(node.children)
if num_children > 1:
return False
elif num_children == 0:
return True
else:
return True and self._tree_has_single_path(node.children[0])
def _mine_patterns(self, threshold):
"""
Mine the constructed FP tree for frequent patterns.
"""
if self._tree_has_single_path(self.root):
return self._generate_pattern_list()
else:
return self._zip_patterns(self._mine_sub_trees(threshold))
def _zip_patterns(self, patterns):
"""
Append suffix to patterns in dictionary if
we are in a conditional FP tree.
"""
suffix = self.root.value
if suffix is not None:
# We are in a conditional tree.
new_patterns = {}
for key in patterns.keys():
new_patterns[tuple(sorted(list(key) + [suffix]))] = patterns[key]
new_patterns[tuple([self.root.value])] = self.root.count
return new_patterns
return patterns
def _generate_pattern_list(self):
"""
Generate a list of patterns with support counts.
"""
patterns = {}
items = self.frequent.keys()
# If we are in a conditional tree,
# the suffix is a pattern on its own.
if self.root.value is None:
suffix_value = []
else:
suffix_value = [self.root.value]
patterns[tuple(suffix_value)] = self.root.count
for i in range(1, len(items) + 1):
for subset in itertools.combinations(items, i):
pattern = tuple(sorted(list(subset) + suffix_value))
patterns[pattern] = \
min([self.frequent[x] for x in subset])
return patterns
def _mine_sub_trees(self, threshold):
"""
Generate subtrees and mine them for patterns.
"""
patterns = {}
mining_order = sorted(self.frequent.keys(),
key=lambda x: self.frequent[x])
# Get items in tree in reverse order of occurrences.
for item in mining_order:
suffixes = []
conditional_tree_input = []
node = self.headers[item]
# Follow node links to get a list of
# all occurrences of a certain item.
while node is not None:
suffixes.append(node)
node = node.link
# For each occurrence of the item,
# trace the path back to the root node.
for suffix in suffixes:
frequency = suffix.count
path = []
parent = suffix.parent
while parent.parent is not None:
path.append(parent.value)
parent = parent.parent
for _ in range(frequency):
conditional_tree_input.append(path)
# Now we have the input for a subtree,
# so construct it and grab the patterns.
subtree = _FPTree(conditional_tree_input, threshold,
item, self.frequent[item])
subtree_patterns = subtree._mine_patterns(threshold)
# Insert subtree patterns into main patterns dictionary.
for pattern in subtree_patterns.keys():
if pattern in patterns:
patterns[pattern] += subtree_patterns[pattern]
else:
patterns[pattern] = subtree_patterns[pattern]
return patterns
def _find_frequent_patterns(transactions, support_threshold):
"""
Given a set of transactions, find the patterns in it
over the specified support threshold.
"""
support_threshold *= len(transactions)
tree = _FPTree(transactions, support_threshold, None, None)
return tree._mine_patterns(support_threshold)
def _generate_association_rules(patterns, confidence_threshold):
"""
Given a set of frequent itemsets, return a dict
of association rules in the form
{(left): ((right), confidence)}
"""
rules = {}
for itemset in patterns.keys():
union_frequent = patterns[itemset]
for i in range(1, len(itemset)):
for antecedent in itertools.combinations(itemset, i):
antecedent = tuple(sorted(antecedent))
consequent = tuple(sorted(set(itemset) - set(antecedent)))
if antecedent in patterns:
antecedent_frequent = patterns[antecedent]
consequent_frequent = patterns[consequent]
confidence = float(union_frequent) / antecedent_frequent
if confidence >= confidence_threshold:
rule1 = (consequent, union_frequent, antecedent_frequent, consequent_frequent)
rule1 = list(rule1)
if antecedent in rules:
rules[antecedent].append(rule1)
else:
rules[antecedent] = [rule1]
return rules
#----------------------------------------------------------------------------------------------------------------
def _dict_to_table(rules, len_trans):
result = []
for items in rules.keys():
for elements in rules[items]:
support_both = elements[1] / len_trans
confidence = elements[1] / elements[2]
lift = confidence / elements[3] * len_trans
if confidence == 1:
conviction = math.inf
else:
conviction = (1 - elements[3] / len_trans) / (1 - confidence)
result += [[list(items), list(elements[0]), support_both, confidence, lift, conviction]]
result = pd.DataFrame.from_records(result)
result.columns = ['antecedent', 'consequent', 'support', 'confidence', 'lift', 'conviction']
return result
def _table_to_transactions(table, items, user_name):
items = np.array(table[items])
label_encoder = preprocessing.LabelEncoder()
label_encoder.fit(table[user_name])
labels = label_encoder.transform(table[user_name])
result = []
for _ in range(len(label_encoder.classes_)):
result += [[]]
for j in range(len(table[user_name])):
result[labels[j]] += [items[j]]
return result
def association_rule(table, group_by=None, **params):
check_required_parameters(_association_rule, params, ['table'])
params = get_default_from_parameters_if_required(params, _association_rule)
param_validation_check = [from_to(params, 0, 1, 'min_support'),
from_to(params, 0, 1, 'min_confidence')]
validate(*param_validation_check)
if group_by is not None:
return _function_by_group(_association_rule, table, group_by=group_by, **params)
else:
return _association_rule(table, **params)
def _association_rule(table, input_mode=None, array_input=None, mul_items=None, items=None, user_name=None, min_support=0.01, min_confidence=0.8, min_lift=-math.inf, max_lift=math.inf, min_conviction=-math.inf, max_conviction=math.inf):
if input_mode == 'user_multiple':
transactions = []
for column in mul_items:
tmp = []
for item in table[column]:
if item is None:
tmp += [None]
else:
tmp += ['{} : {}'.format(column, item)]
transactions += [tmp]
transactions = list(np.transpose(transactions))
for i in range(len(transactions)):
if None in transactions[i]:
tmp = set(transactions[i])
tmp.remove(None)
transactions[i] = np.array(list(tmp))
elif input_mode == 'transaction':
transactions = [list(set(transaction) - {None}) for transaction in np.array(table[array_input])]
else:
if items is None:
raise Exception('Select Item Column')
if user_name is None:
raise Exception('Select User Column')
table_erase_duplicates = table.drop_duplicates([items] + [user_name])
table_erase_duplicates = table_erase_duplicates.reset_index()
transactions = _table_to_transactions(table_erase_duplicates, items, user_name)
len_trans = len(transactions)
patterns = _find_frequent_patterns(transactions, min_support)
rules = _generate_association_rules(patterns, min_confidence)
if len(rules) == 0:
result = pd.DataFrame(columns=['antecedent', 'consequent', 'support', 'confidence', 'lift', 'conviction'])
return {'out_table' : result}
result = _dict_to_table(rules, len_trans)
result = result[(result.lift >= min_lift) & (result.conviction >= min_conviction) & (result.lift <= max_lift) & (result.conviction <= max_conviction)]
return {'out_table' : result}
def _scaling(number_list):
maximum = np.max(number_list)
minimum = np.min(number_list)
result = []
for number in number_list:
result += [(number - minimum) / (maximum - minimum) + 0.2]
return result
def _n_blank_strings(number):
result = ''
for _ in range(number):
result += ' '
return result
def association_rule_visualization(table, group_by=None, **params):
params = get_default_from_parameters_if_required(params, _association_rule_visualization)
param_validation_check = [greater_than(params, 0, 'figure_size_muliplier'),
greater_than(params, 0, 'edge_length_scaling'),
greater_than(params, 0, 'node_size_scaling'),
greater_than(params, 0, 'font_size')]
validate(*param_validation_check)
check_required_parameters(_association_rule_visualization, params, ['table'])
if group_by is not None:
return _function_by_group(_association_rule_visualization, table, group_by=group_by, **params)
else:
return _association_rule_visualization(table, **params)
def _association_rule_visualization(table, option='multiple_to_single', edge_length_scaling=1, font_size=10, node_size_scaling=1, figure_size_muliplier=1, display_rule_num=False):
if(option == 'single_to_single'):
result_network = table.copy()
length_ante = []
string_ante = []
length_conse = []
string_conse = []
for row in result_network['antecedent']:
length_ante += [len(row)]
string_ante += [row[0]]
for row in result_network['consequent']:
length_conse += [len(row)]
string_conse += [row[0]]
result_network['length_ante'] = length_ante
result_network['string_ante'] = string_ante
result_network['length_conse'] = length_conse
result_network['string_conse'] = string_conse
result_network = result_network[result_network.length_ante == 1]
result_network = result_network[result_network.length_conse == 1]
result_network['support_ante'] = result_network['support'] / result_network['confidence']
result_network['support_conse'] = result_network['confidence'] / result_network['lift']
# edges_colors = preprocessing.LabelEncoder()
# edges_colors.fit(result_network['lift'])
# edges_colors = edges_colors.transform(result_network['lift'])
# result_network['edge_colors'] = edges_colors
result_network = result_network.reset_index()
edges = []
for i in range(len(result_network.string_ante)):
edges += [(result_network.string_ante[i], result_network.string_conse[i])]
G = nx.DiGraph()
G.add_edges_from(edges)
nodes = G.nodes()
plt.figure(figsize=(4 * len(nodes) ** 0.5 * figure_size_muliplier, 4 * len(nodes) ** 0.5 * figure_size_muliplier))
pos = nx.spring_layout(G, k=0.4 * edge_length_scaling)
node_tmp = list(result_network.string_ante) + list(result_network.string_conse)
support_tmp = list(result_network.support_ante) + list(result_network.support_conse)
tmp_node_support = []
for i in range(len(node_tmp)):
tmp_node_support += [[node_tmp[i], support_tmp[i]]]
nodes_table = pd.DataFrame.from_records(tmp_node_support, columns=['name', 'support'])
nodes_table = nodes_table.drop_duplicates(['name'])
node_color = []
nodes_table = nodes_table.reset_index()
scaled_support = _scaling(nodes_table.support)
for node in nodes:
for i in range(len(nodes_table.name)):
if nodes_table.name[i] == node:
node_color += [scaled_support[i] * 2500 * node_size_scaling]
break
# if(scaling==True):
# edge_color = [result_network['edge_colors'][n] for n in range(len(result_network['length_conse']))]
# else:
scaled_support = _scaling(result_network['confidence'])
edge_size = [scaled_support[n] * 8 for n in range(len(result_network['length_conse']))]
edge_color = [result_network['lift'][n] for n in range(len(result_network['length_conse']))]
nx.draw(G, pos, node_color=node_color, edge_color=edge_color, node_size=node_color, arrowsize=20 * (0.2 + 0.8 * node_size_scaling), font_family='NanumGothic',
with_labels=True, cmap=plt.cm.Blues, edge_cmap=plt.cm.Reds, arrows=True, edge_size=edge_color, width=edge_size, font_size=font_size)
fig_digraph = plt2MD(plt)
graph_min_support = np.min(nodes_table.support)
graph_max_support = np.max(nodes_table.support)
graph_min_confidence = np.min(result_network['confidence'])
graph_max_confidence = np.max(result_network['confidence'])
graph_min_lift = np.min(result_network['lift'])
graph_max_lift = np.max(result_network['lift'])
rb = BrtcReprBuilder()
rb.addMD(strip_margin("""
| ### Network Digraph
| ##### Node color, size : support ({graph_min_support}~{graph_max_support})
| ##### Edge color : lift ({graph_min_lift}~{graph_max_lift})
| ##### Edge size : confidence ({graph_min_confidence}~{graph_max_confidence})
| {image1}
|
""".format(image1=fig_digraph, graph_min_support=graph_min_support, graph_max_support=graph_max_support, graph_min_lift=graph_min_lift, graph_max_lift=graph_max_lift, graph_min_confidence=graph_min_confidence, graph_max_confidence=graph_max_confidence)))
elif(option == 'multiple_to_single'):
result_network = table.copy()
length_ante = []
string_ante = []
length_conse = []
string_conse = []
for row in result_network['consequent']:
length_conse += [len(row)]
string_conse += [row[0]]
result_network['length_conse'] = length_conse
result_network['consequent'] = string_conse
result_network = result_network[result_network.length_conse == 1]
index_list = result_network.index.tolist()
rownum = []
for i in range(len(result_network['consequent'])):
if display_rule_num:
rownum += ['R%d' % (i + 1)]
else:
rownum += [_n_blank_strings(i + 1)]
result_network['row_number'] = rownum
edges = []
nodes = []
for i in index_list:
for j in range(len(result_network.antecedent[i])):
edges += [(result_network.antecedent[i][j], result_network['row_number'][i])]
edges += [(result_network['row_number'][i], result_network.consequent[i])]
nodes += [result_network['row_number'][i]]
G = nx.DiGraph()
G.add_nodes_from(nodes)
G.add_edges_from(edges)
plt.figure(figsize=(2 * len(nodes) ** 0.5 * figure_size_muliplier, 2 * len(nodes) ** 0.5 * figure_size_muliplier))
pos = nx.spring_layout(G, k=0.2 * edge_length_scaling)
nodes_color = []
nodes_size = []
scaled_lift = _scaling(result_network.lift)
for node in range(len(G.nodes())):
if node < len(nodes):
nodes_color += [result_network.support[index_list[node]]]
nodes_size += [scaled_lift[node] * 2000 * node_size_scaling]
else:
nodes_color += [0]
nodes_size += [0]
nx.draw(G, pos, node_color=nodes_color, node_size=nodes_size, font_family='NanumGothic',
with_labels=True, cmap=plt.cm.Reds, arrows=True, edge_color='Grey', font_weight='bold', arrowsize=20 * (0.2 + 0.8 * node_size_scaling), font_size=font_size)
fig_digraph = plt2MD(plt)
graph_min_support = np.min(result_network.support)
graph_max_support = np.max(result_network.support)
graph_min_lift = np.min(result_network.lift)
graph_max_lift = np.max(result_network.lift)
rb = BrtcReprBuilder()
rb.addMD(strip_margin("""
| ### Network Digraph
| ##### Size of circle : support ({graph_min_support}~{graph_max_support})
| ##### Color of circle : lift ({graph_min_lift}~{graph_max_lift})
| {image1}
|
""".format(image1=fig_digraph, graph_min_support=graph_min_support, graph_max_support=graph_max_support, graph_min_lift=graph_min_lift, graph_max_lift=graph_max_lift)))
else:
result_network = table.copy()
length_ante = []
string_ante = []
length_conse = []
string_conse = []
for row in result_network['consequent']:
length_conse += [len(row)]
result_network['length_conse'] = length_conse
result_network = result_network.reset_index()
rownum = []
for i in range(len(result_network['consequent'])):
if display_rule_num:
rownum += ['R%d' % i]
else:
rownum += [_n_blank_strings(i + 1)]
result_network['row_number'] = rownum
edges = []
nodes = []
for i in range(len(result_network.consequent)):
for j in range(len(result_network.antecedent[i])):
edges += [(result_network.antecedent[i][j], result_network['row_number'][i])]
for j in range(len(result_network.consequent[i])):
edges += [(result_network['row_number'][i], result_network.consequent[i][j])]
nodes += [result_network['row_number'][i]]
G = nx.DiGraph()
G.add_nodes_from(nodes)
G.add_edges_from(edges)
plt.figure(figsize=(2 * len(nodes) ** 0.5 * figure_size_muliplier, 2 * len(nodes) ** 0.5 * figure_size_muliplier))
pos = nx.spring_layout(G, k=0.2 * edge_length_scaling)
nodes_color = []
nodes_size = []
scaled_lift = _scaling(result_network.lift)
for node in range(len(G.nodes())):
if node < len(nodes):
nodes_color += [result_network.support[node]]
nodes_size += [scaled_lift[node] * 2000 * node_size_scaling]
else:
nodes_color += [0]
nodes_size += [0]
nx.draw(G, pos, node_color=nodes_color, node_size=nodes_size, font_family='NanumGothic',
with_labels=True, cmap=plt.cm.Reds, arrows=True, edge_color='Grey', font_weight='bold', arrowsize=20 * (0.2 + 0.8 * node_size_scaling), font_size=font_size)
fig_digraph = plt2MD(plt)
graph_min_support = np.min(result_network.support)
graph_max_support = np.max(result_network.support)
graph_min_lift = np.min(result_network.lift)
graph_max_lift = np.max(result_network.lift)
rb = BrtcReprBuilder()
rb.addMD(strip_margin("""
| ### Network Digraph
| ##### Size of circle : support ({graph_min_support}~{graph_max_support})
| ##### Color of circle : lift ({graph_min_lift}~{graph_max_lift})
| {image1}
|
""".format(image1=fig_digraph, graph_min_support=graph_min_support, graph_max_support=graph_max_support, graph_min_lift=graph_min_lift, graph_max_lift=graph_max_lift)))
model = _model_dict('Association rule')
model['_repr_brtc_'] = rb.get()
# plt.figure(figsize=(6.4,4.8))
return{'model' : model}
```
#### File: function/textanalytics/regex.py
```python
from brightics.common.utils import check_required_parameters
from brightics.common.exception import BrighticsFunctionException
from .data import regex_format_dict
import re
def regex(table, **params):
check_required_parameters(_regex, params, ['table'])
return _regex(table, **params)
def _regex(table, input_cols, transformation_mode='extract', find_mode='all', pattern='',
user_dict_pattern='', custom_pattern='', replacement_string='', user_dict=None):
out_table = table.copy()
pattern_dict = regex_format_dict.pattern_dict
user_pattern_dict = {}
if user_dict is not None:
user_patterns = user_dict.values
for user_pattern in user_patterns:
user_pattern_name = user_pattern[0]
user_pattern_content = user_pattern[1]
user_pattern_dict[user_pattern_name] = user_pattern_dict.get(user_pattern_name, []) + [user_pattern_content]
user_pattern_dict = {key: r'|'.join(value) for key, value in user_pattern_dict.items()}
if pattern == '':
raise BrighticsFunctionException.from_errors([{'0100': "Please choose a pattern."}])
if pattern == 'custom':
raw_pattern = custom_pattern
elif pattern == 'user_dictionary':
raw_pattern = user_pattern_dict.get(user_dict_pattern)
if raw_pattern is None:
raise BrighticsFunctionException.from_errors(
[{'0100': user_dict_pattern + " is not a valid pattern name in the user dictionary."}])
else:
raw_pattern = pattern_dict.get(pattern)
regex_pattern = re.compile(raw_pattern)
def transformation(text):
if transformation_mode == 'extract':
if find_mode == 'first':
result = regex_pattern.search(text)
if result is None:
return ""
else:
return result.group()
else: # find_mode == 'all'
return regex_pattern.findall(text)
elif transformation_mode == 'replace':
if find_mode == 'first':
return regex_pattern.sub(replacement_string, text, 1)
else: # find_mode == 'all'
return regex_pattern.sub(replacement_string, text)
elif transformation_mode == 'remove':
if find_mode == 'first':
return regex_pattern.sub("", text, 1)
else: # find_mode == 'all'
return regex_pattern.sub("", text)
else: # transformation_mode == 'split'
if find_mode == 'first':
return regex_pattern.split(text, 1)
else: # find_mode == 'all'
return regex_pattern.split(text)
for col in input_cols:
result_col = table[col].apply(transformation)
out_table['regex_' + col] = result_col
return {'out_table': out_table}
```
#### File: function/textanalytics/tokenizer2.py
```python
import numpy as np
import pandas as pd
from nltk.stem import PorterStemmer
from nltk.tokenize import sent_tokenize, word_tokenize
import nltk
from bs4 import BeautifulSoup
import re
from random import randint
from brightics.common.utils import check_required_parameters
LEAF_KEY = True
REPLACE_NO_SPACE = re.compile("[.;:!\'?,\"()\[\]]")
REPLACE_WITH_SPACE = re.compile("(<br\s*/><br\s*/>)|(\-)|(\/)")
POS_ENG_DEFAULT = ["CC", "CD", "DT", "EX", "FW", "IN", "JJ", "JJR", "JJS", "LS", "MD",
"NN", "NNS", "NNP", "NNPS", "PDT", "POS", "PRP", "PRP$", "RB", "RBR", "RBS",
"RP", "TO", "UH", "VB", "VBD", "VBG", "VBN", "VBP", "VBZ", "WDT", "WP", "WP$", "WRB"]
"""
Handling compound words: replace every compound words in the given text to numbers which is not contained in the
text, apply the tokenizer, and replace the numbers back to the corresponding compound words.
"""
def _encode(text_table, user_dict, lower_case):
encode_ind = randint(1000, 10000) # a random integer
encode_list = []
decode_dict = {}
if user_dict is not None:
user_dict_arr = user_dict.values.tolist()
texts_arr = text_table.values.tolist()
whole_text = ' '.join([' '.join(row) for row in texts_arr])
numbers_in_text = [int(item) for item in re.compile(r'\d+').findall(whole_text)]
for row in user_dict_arr:
compound_word = row[0]
if len(row) > 1 and row[1] is not None and row[1] != np.nan and row[1] != "":
pos = row[1]
else:
pos = None
while encode_ind in numbers_in_text:
encode_ind += 1
encode_word = ' {} '.format(encode_ind)
encode_list.append((compound_word, encode_word))
decode_dict[str(encode_ind)] = (compound_word, pos)
encode_ind += 1
table_encoded = text_table.applymap(lambda text: _substitution(text, encode_list, lower_case))
else:
table_encoded = text_table
return table_encoded, decode_dict
def _substitution(text, encode_list, lower_case):
if lower_case:
text = text.lower()
for compound_word, encode_word in encode_list:
text = re.compile(compound_word).sub(encode_word, text)
return text
def _extract_kor(list_tokens_tagged, is_tagged, decode_dict, *pos_extraction):
list_tokens_tagged_decoded = []
for token_tagged in list_tokens_tagged:
if token_tagged[0] in decode_dict:
compound_word, pos = decode_dict[token_tagged[0]]
if pos is None:
pos = 'Noun'
list_tokens_tagged_decoded.append([compound_word, pos])
else:
list_tokens_tagged_decoded.append(token_tagged)
if pos_extraction:
list_tokens_tagged_filtered = [token_tagged for token_tagged in list_tokens_tagged_decoded
if token_tagged[1] in set(pos_extraction)]
else:
list_tokens_tagged_filtered = list_tokens_tagged_decoded
if is_tagged is False:
res = [token_tagged[0] for token_tagged in list_tokens_tagged_filtered]
else:
res = ['{text}/{pos}'.format(text=token_tagged[0], pos=token_tagged[1])
for token_tagged in list_tokens_tagged_filtered]
return res
def tokenizer_kor2(table, **params):
check_required_parameters(_tokenizer_kor2, params, ['table'])
return _tokenizer_kor2(table, **params)
def _tokenizer_kor2(table, input_cols, hold_cols=None, new_col_prefix='tokenized',
normalization=True, stemming=True, pos_extraction=None, is_tagged=False, user_dict=None):
from twkorean import TwitterKoreanProcessor as Tw # import here since twkorean cannot be loaded in Enterprise ver.
if pos_extraction is None:
pos_extraction = []
table_encoded, decode_dict = _encode(table[input_cols], user_dict, False)
tokenizer = Tw(normalization=normalization, stemming=stemming)
tokenize_vec = np.vectorize(tokenizer.tokenize, otypes=[object])(table_encoded)
columns = ['{prefix}_{col}'.format(prefix=new_col_prefix, col=col) for col in input_cols]
tokenized_table = pd.DataFrame(np.vectorize(_extract_kor, otypes=[object])(
tokenize_vec, is_tagged, decode_dict, *pos_extraction), columns=columns)
if hold_cols is None:
out_table = pd.concat([table, tokenized_table], axis=1)
else:
out_table = pd.concat([table[hold_cols], tokenized_table], axis=1)
return {'out_table': out_table}
def doc_list_stemming_eng(word_tok_list):
ps = PorterStemmer()
return [ps.stem(word_tok) for word_tok in word_tok_list]
def preprocess_reviews_eng(text, lower_case):
if lower_case:
text = REPLACE_NO_SPACE.sub("", text.lower())
else:
text = REPLACE_NO_SPACE.sub("", text)
return text
def _extract_pos_eng(tagged, pos_extraction, is_tagged, decode_dict):
token, pos = tagged
if token in decode_dict:
token, pos = decode_dict[token]
if pos is None:
pos = 'NN'
if pos in pos_extraction:
if is_tagged:
return '{token}({pos})'.format(token=token, pos=pos)
else:
return '{token}'.format(token=token)
else:
return None
def _transform_tagged_list_eng(tagged_list, pos_extraction, is_tagged, decode_dict):
tagged_list_transformed = [_extract_pos_eng(tagged, pos_extraction, is_tagged, decode_dict)
for tagged in tagged_list]
return [item for item in tagged_list_transformed if item is not None]
def tokenizer_eng2(table, **params):
check_required_parameters(_tokenizer_eng2, params, ['table'])
return _tokenizer_eng2(table, **params)
def _tokenizer_eng2(table, input_cols, hold_cols=None, new_col_prefix='tokenized',
lower_case=True, stemming=True, pos_extraction=None, is_tagged=False, user_dict=None):
if hold_cols is None:
out_table = table.copy()
else:
out_table = table[hold_cols]
if pos_extraction is None:
pos_extraction = POS_ENG_DEFAULT
table_encoded, decode_dict = _encode(table[input_cols], user_dict, lower_case)
def _process(text):
text_filtered_html = BeautifulSoup(text).get_text()
text_preprocessed = preprocess_reviews_eng(text_filtered_html, lower_case)
text_tokenized = word_tokenize(text_preprocessed)
if stemming:
text_tokenized = doc_list_stemming_eng(text_tokenized)
text_tagged = nltk.pos_tag(text_tokenized)
text_result = _transform_tagged_list_eng(text_tagged, pos_extraction, is_tagged, decode_dict)
return text_result
for col in input_cols:
docs = table_encoded[col]
docs_result = docs.apply(_process)
out_table['{prefix}_{col}'.format(prefix=new_col_prefix, col=col)] = docs_result
return {'out_table': out_table}
```
#### File: timeseries/test/spcrule_test.py
```python
from brightics.common.datasets import load_iris
from brightics.function.timeseries import spcrule,spcrule_summ
import unittest
import pandas as pd
import numpy as np
import HtmlTestRunner
import os
class SpcruleTest(unittest.TestCase):
def setUp(self):
print("*** Spc ruleset AD with/without summary UnitTest Start ***")
testdata = load_iris()
setosa = testdata[testdata.species=='setosa']
versicolor = testdata[testdata.species=='versicolor']
versicolor_test = versicolor[versicolor.sepal_length>6.5]
testset = setosa.append(versicolor_test)
testset = testset.reset_index(drop=True)
testset['time'] = testset.index.astype(int)
self.testdata = testset
def tearDown(self):
print("*** Spc ruleset AD with/without summary UnitTest End ***")
def test_first(self):
spc = spcrule(self.testdata,time_col='time',value_col='sepal_length',ruleset_id='0')
DF1 = spc['out_table'].values
DF2 = spc['out_table2'].values
np.testing.assert_string_equal(DF1[0][4], 'lack of samples')
np.testing.assert_equal(DF1[0][5], None)
np.testing.assert_string_equal(DF1[len(DF1)-1][4], '1')
np.testing.assert_equal(DF1[len(DF1)-1][5], True)
np.testing.assert_equal(DF2[0][3][-1],DF1[-1][1])
np.testing.assert_equal(DF2[0][3][-2],DF1[-2][1])
np.testing.assert_equal(DF2[0][3][-3],DF1[-3][1])
np.testing.assert_equal(DF2[0][3][-4],DF1[-4][1])
np.testing.assert_equal(DF2[0][3][-4],DF1[-4][1])
np.testing.assert_equal(DF2[0][0],DF1[-1][2])
np.testing.assert_equal(DF2[0][1],DF1[-1][3])
self.summary = td['out_table2']
def test_second(self):
spc = spcrule(self.testdata,time_col='time',value_col='sepal_length',ruleset_id='0')
self.summary = td['out_table2']
spc2 = spcrule_summ(self.testdata,self.summary,time_col='time',value_col='sepal_length',ruleset_id='0')
DF1 = spc2['out_table'].values
DF2 = spc2['out_table2'].values
np.testing.assert_string_equal(DF1[0][4], 'old_data')
np.testing.assert_string_equal(DF1[len(DF1)-1][4], '1')
np.testing.assert_equal(DF1[len(DF1)-1][5], True)
np.testing.assert_equal(DF2[0][3][-1],DF1[-1][1])
np.testing.assert_equal(DF2[0][3][-2],DF1[-2][1])
np.testing.assert_equal(DF2[0][3][-3],DF1[-3][1])
np.testing.assert_equal(DF2[0][3][-4],DF1[-4][1])
np.testing.assert_equal(DF2[0][3][-4],DF1[-4][1])
np.testing.assert_equal(DF2[0][0],DF1[-1][2])
np.testing.assert_equal(DF2[0][1],DF1[-1][3])
if __name__ == '__main__':
filepath = os.path.dirname(os.path.abspath(__file__))
reportFoler = filepath + "/../../../../../../../reports"
unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(combine_reports=True, output=reportFoler))
```
#### File: function/transform/tsne.py
```python
from sklearn.manifold import TSNE as tSNE
import pandas as pd
import matplotlib.pyplot as plt
from brightics.common.repr import BrtcReprBuilder, strip_margin, pandasDF2MD, plt2MD, dict2MD
from brightics.function.utils import _model_dict
from brightics.common.groupby import _function_by_group
from brightics.common.utils import check_required_parameters
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.validation import validate
from brightics.common.validation import from_to
import seaborn as sns
import numpy as np
import matplotlib.cm as cm
from matplotlib.patches import Patch
from brightics.common.validation import validate, greater_than_or_equal_to
def tsne(table, group_by=None, **params):
check_required_parameters(_tsne, params, ['table'])
params = get_default_from_parameters_if_required(params, _tsne)
param_validation_check = [from_to(params, 1, len(params['input_cols']), 'n_components')]
validate(*param_validation_check)
if group_by is not None:
grouped_model = _function_by_group(_tsne, table, group_by=group_by, **params)
return grouped_model
else:
return _tsne(table, **params)
def _tsne(table, input_cols, new_column_name='projected_', n_components=2, perplexity=30.0,
early_exaggeration=12.0, learning_rate=200.0, n_iter=1000,
n_iter_without_progress=300, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
seed=None, method='barnes_hut', angle=0.5):
num_feature_cols = len(input_cols)
if n_components is None:
n_components = num_feature_cols
tsne = tSNE(n_components=n_components, perplexity=perplexity, early_exaggeration=early_exaggeration,
learning_rate=learning_rate, n_iter=n_iter, n_iter_without_progress=n_iter_without_progress,
min_grad_norm=min_grad_norm, metric=metric, init=init, verbose=verbose,
random_state=seed, method=method, angle=angle)
tsne_result = tsne.fit_transform(table[input_cols])
column_names = []
for i in range(0, n_components):
column_names.append(new_column_name + str(i))
out_df = pd.DataFrame(data=tsne_result[:, :n_components], columns=[column_names])
out_df = pd.concat([table.reset_index(drop=True), out_df], axis=1)
out_df.columns = table.columns.values.tolist() + column_names
return {'out_table': out_df}
``` |
{
"source": "jhparkinfinyx/fabric_server",
"score": 2
} |
#### File: jhparkinfinyx/fabric_server/app.py
```python
import os
import io
import json
import time
import atexit
import base64
from PIL import Image
from torchvision import models
import torchvision.transforms as transforms
from PIL import Image
from flask import Flask, jsonify, request, render_template, send_from_directory
from werkzeug.utils import secure_filename
from flask_cors import CORS
import pymysql
# from celery import Celery
from modules.db import DBController
# from modules.similarity_model import similarity
from model.similarity.dyetec_similar_test import run as similarity_run
from model.gan.cCycle_test import run as gan_run
# from model.similarity.get_similarity_vector import run as vector_run
UPLOAD_FOLDER = './static/images'
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}
application = Flask(__name__, static_url_path='/static')
application.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
application.config.update(
CELERY_BROKER_URL='redis://localhost:6379',
CELERY_RESULT_BACKEND='redis://localhost:6379'
)
application.config['JSON_AS_ASCII'] = False
# celery = Celery(application.name, broker=application.config['CELERY_BROKER_URL'])
# celery.conf.update(application.config)
# CORS(application)
# CORS(application, resources={r'*': {'origins': 'http://localhost:3000'}}, supports_credentials=True)
CORS(application, resources={r'*': {'origins': '*'}}, supports_credentials=True)
imagenet_class_index = json.load(open('imagenet_class_index.json'))
model = models.densenet121(pretrained=True)
model.eval()
# # create DB
db = DBController.instance()
def transform_image(image_bytes):
my_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
image = Image.open(io.BytesIO(image_bytes))
return my_transforms(image).unsqueeze(0)
# test
def get_prediction(image_bytes):
tensor = transform_image(image_bytes=image_bytes)
outputs = model.forward(tensor)
_, y_hat = outputs.max(1)
predicted_idx = str(y_hat.item())
return imagenet_class_index[predicted_idx]
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
# @celery.task #(bind=True)
# def asyncInferenceSimilerModel(data):
# print("asyncInferenceSimilerModel()")
# # with application.app_context():
# # ๋ชจ๋ธ ์์
์ํ
# # string to bytes
# img_bytes = base64.b64decode(data['img_str'])
# # model work
# rows = similarity_run(img_bytes, data['img_rows'])
# # # open image
# # image = Image.open(io.BytesIO(img_bytes))
# # image.show()
# # # test work
# # class_id, class_name = get_prediction(image_bytes=img_bytes)
# # time.gmtime(1000)
# return rows # data['img_rows']
# @celery.task
# def asyncQeruyDB(sql):
# print("asyncQeruyDB()")
# print(5)
# rows =getImages(sql)
# print(6)
# return rows
'''
Name : Create drape images
Param : [number, ...]
return [bytes, bytes]
'''
@application.route('/api/drape', methods=['post'])
def image_drape():
if request.method == 'POST':
data = request.get_json()
print(data)
# data = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37]
drape_coef, img1, img2 = gan_run(data)
res1 = base64.b64encode(img1)
res2 = base64.b64encode(img2)
print("response:",[res1.decode("utf-8"), res2.decode("utf-8"), drape_coef])
# return str(image)
return jsonify([res1.decode("utf-8"), res2.decode("utf-8"), drape_coef])
'''
Name : Search similarity images
Param : file(image)
return [{img_row}, ...]
'''
@application.route('/api/similarity', methods=['POST'])
def image_similarity():
if request.method == 'POST':
file = request.files['file']
# bytes to string
img_str = base64.b64encode(file.read())
img_str = img_str.decode('utf-8')
# print("img_str: ", img_str[:100])
sql = "select * from images"
# task = asyncQeruyDB.delay(sql)
# img_rows = task.get()
rows = db.getImages(sql, ())
img_rows = rows
# print(img_rows)
data = {
'img_str': img_str, # ์ด๋ฏธ์ง ํ์ผ ์์ฒด ๋ฐ์ดํฐ
'img_rows': img_rows # DB์์ ๋ถ๋ฌ์จ ์ด๋ฏธ์ง ํ
์ด๋ธ ๋ฐ์ดํฐ(id,์ด๋ฆ,์์น,๋ฐฑํฐ๊ฐ,์๊ฐ์ ๋ณด)
}
# # send to celery worker
# task = asyncInferenceSimilerModel.delay(data)
# response = task.get()
image_trans, results = similarity_run(base64.b64decode(data['img_str']), data['img_rows'])
image = base64.b64encode(image_trans)
response = {"image": image.decode("utf-8"), "rows": results}
# print(response)
return jsonify([response])
# '''
# Name : Upload images
# Param : file(image), fileName
# return isSuccess
# '''
# @application.route('/api/upload', methods=['POST'])
# def image_upload():
# if request.method == 'POST':
# if 'file' not in request.files:
# print('No file part')
# return jsonify({"isSuccess": 0, "message": "ํ์ผ์ด ์กด์ฌํ์ง ์์ต๋๋ค."})
# file = request.files['file']
# # bytes to string
# img_str = base64.b64encode(file.read())
# img_str = img_str.decode('utf-8')
# # print("img_str: ", img_str[:100])
# img_bytes = base64.b64decode(img_str)
# if file.filename == '':
# print('No selected file')
# return jsonify({"isSuccess": 0, "message": "ํ์ผ์ด ์กด์ฌํ์ง ์์ต๋๋ค."})
# file_name = file.filename
# # print('file_name: ', file_name)
# if not file or not allowed_file(file_name):
# return jsonify({"isSuccess": 0, "message": "ํ์ผ์ด ์กด์ฌํ์ง ์์ต๋๋ค."})
# filename = secure_filename(file_name)
# # print('filename: ', filename)
# name = os.path.splitext(filename)[0]
# path = os.path.join(application.config['UPLOAD_FOLDER'], filename)[2:]
# vector = vector_run(img_bytes)
# # print(path)
# # dup check
# rows = db.getImages("select * from images where name=%s", (name))
# # print("rows.length: ", len(rows))
# if len(rows) > 0 :
# return jsonify({"isSuccess": 0, "message": "ํ์ผ์ด ์ด๋ฏธ ์กด์ฌํฉ๋๋ค."})
# file.seek(0)
# file.save(path)
# res = db.setImages("insert into images (name, path, vector) values (%s,%s,%s)", (name, path, str(vector)))
# print(res)
# # response = similarity_run(base64.b64decode(data['img_str']), data['img_rows'])
# # print(response)
# return jsonify({"isSuccess": 1, "message": "ํ์ผ ์
๋ก๋ ์ฑ๊ณต๏ผ", "img": [[], 1, name, path] })
@application.route('/static/images/<image_file>')
def image(image_file):
print(image_file)
return send_from_directory('./static/images', image_file)
# return render_template('img.html', image_file='images/'+image_file)
# @application.route('/static/images')
# def image():
# name = request.args.get('name', default = '', type = str)
# print(name)
# return send_from_directory('./static/images', name)
@application.route("/api/images", methods=['GET'])
def images():
rows = db.getImages("select vector, id, name, path from images ORDER BY id DESC", ())
return jsonify(rows)
@application.route('/api/image', methods=['POST'])
def delete_image():
if request.method == 'POST':
data = request.form
print(data.get('id'))
target_id = data.get('id')
# get db info
rows = db.getImages("select id, name, path from images where id=%s", (target_id))
print(rows)
filename = os.path.basename(rows[0][2])
# remove db info
db.setImages("delete from images where id=%s", (target_id))
# # remove static file
path = os.path.join(application.config['UPLOAD_FOLDER'], filename)[2:]
os.remove(path)
return jsonify({"isSuccess": 1, "message": "ํ์ผ์ด ์ญ์ ๋์์ต๋๋ค."})
@application.route("/")
def home():
return "Dive Fabric"
if __name__ == '__main__':
# application.run(host='0.0.0.0')
application.run(host='0.0.0.0', port=9999, debug=True)
# application.run(host='127.0.0.1', port=8080)
# def cleanup():
# try:
# # db.close()
# # print("db closed!")
# except Exception:
# pass
# atexit.register(cleanup)
####
```
#### File: fabric_server/modules/db.py
```python
import pymysql
class SingletonInstane:
__instance = None
def __init__(self):
self.db = None
@classmethod
def openDB(cls):
cls.db = pymysql.connect(
host='localhost',
user='root',
db='fabric_db',
password='<PASSWORD>',
charset='utf8'
)
@classmethod
def __getInstance(cls):
return cls.__instance
@classmethod
def instance(cls, *args, **kargs):
cls.__instance = cls(*args, **kargs)
cls.instance = cls.__getInstance
return cls.__instance
class DBController(SingletonInstane):
@classmethod
def getImages(cls, sql, data):
cls.openDB()
curs = cls.db.cursor()
curs.execute(sql, data)
# dbController.mail_send_process()
rows = curs.fetchall()
# print(rows)
cls.db.close()
return rows
@classmethod
def setImages(cls, sql, data):
cls.openDB()
curs = cls.db.cursor()
curs.execute(sql, data)
# dbController.mail_send_process()
curs.fetchall()
# print(rows)
cls.db.commit()
cls.db.close()
return
@classmethod
def close(cls):
cls.db.close()
``` |
{
"source": "jhpenger/joint-ppo",
"score": 2
} |
#### File: joint-ppo/sonic_on_ray/sonic_on_ray.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import deque
import cv2
import gym
import gym.spaces as spaces
import retro
import numpy as np
import time
import os
import csv
class LazyFrames(object):
def __init__(self, frames):
"""
This object ensures that common frames between the observations are
only stored once. It exists purely to optimize memory usage which can
be huge for DQN's 1M frames replay buffers. This object should only be
converted to numpy array before being passed to the model. You'd not
believe how complex the previous solution was.
"""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=2)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack the k last frames.
Returns a lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255,
shape=(shp[0], shp[1], shp[2] * k),
dtype=np.uint8)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = 80
self.height = 80
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 1),
dtype=np.uint8)
def observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height),
interpolation=cv2.INTER_AREA)
return frame[:, :, None]
class SonicDiscretizer(gym.ActionWrapper):
"""
Wrap a gym-retro environment and make it use discrete
actions for the Sonic game.
"""
def __init__(self, env):
super(SonicDiscretizer, self).__init__(env)
# buttons = ["B", "A", "MODE", "START", "UP", "DOWN", "LEFT", "RIGHT",
# "C", "Y", "X", "Z"]
# actions = [['LEFT'], ['RIGHT'], ['LEFT', 'DOWN'], ['RIGHT', 'DOWN'],
# ['DOWN'], ['DOWN', 'B'], ['B']]
buttons = ["B", "Y", "SELECT", "START", "UP", "DOWN", "LEFT", "RIGHT", "A", "X", "L", "R"]
actions = [['LEFT'], ['RIGHT'], ['B'], ['L'], ['R']]
self._actions = []
for action in actions:
arr = np.array([False] * 12)
for button in action:
arr[buttons.index(button)] = True
self._actions.append(arr)
self.action_space = gym.spaces.Discrete(len(self._actions))
def action(self, a):
return self._actions[a].copy()
class RewardScaler(gym.RewardWrapper):
"""
Bring rewards to a reasonable scale for PPO. This is incredibly important
and effects performance a lot.
"""
def reward(self, reward):
return reward * 0.01
class StochasticFrameSkip(gym.Wrapper):
def __init__(self, env, n, stickprob):
gym.Wrapper.__init__(self, env)
self.n = n
self.stickprob = stickprob
self.curac = None
self.rng = np.random.RandomState()
def reset(self, **kwargs):
self.curac = None
return self.env.reset(**kwargs)
def step(self, ac):
done = False
totrew = 0
for i in range(self.n):
# First step after reset, use action
if self.curac is None:
self.curac = ac
# First substep, delay with probability=stickprob
elif i == 0:
if self.rng.rand() > self.stickprob:
self.curac = ac
# Second substep, new action definitely kicks in
elif i == 1:
self.curac = ac
ob, rew, done, info = self.env.step(self.curac)
totrew += rew
if done:
break
return ob, totrew, done, info
class StochasticFrameSkip(gym.Wrapper):
def __init__(self, env, n, stickprob):
gym.Wrapper.__init__(self, env)
self.n = n
self.stickprob = stickprob
self.curac = None
self.rng = np.random.RandomState()
def reset(self, **kwargs):
self.curac = None
return self.env.reset(**kwargs)
def step(self, ac):
done = False
totrew = 0
for i in range(self.n):
# First step after reset, use action
if self.curac is None:
self.curac = ac
# First substep, delay with probability=stickprob
elif i == 0:
if self.rng.rand() > self.stickprob:
self.curac = ac
# Second substep, new action definitely kicks in
elif i == 1:
self.curac = ac
ob, rew, done, info = self.env.step(self.curac)
totrew += rew
if done:
break
return ob, totrew, done, info
class Monitor(gym.Wrapper):
def __init__(self, env, monitorfile, logfile=None):
gym.Wrapper.__init__(self, env)
self.file = open(monitorfile, 'w')
self.csv = csv.DictWriter(self.file, ['r', 'l', 't'])
#if logfile is not None:
self.log = open(logfile, 'w')
self.logcsv = csv.DictWriter(self.log, ['l', 't'])
self.episode_reward = 0
self.episode_length = 0
self.total_length = 0
self.start = None
self.csv.writeheader()
self.file.flush()
#if logfile is not None:
self.logcsv.writeheader()
self.log.flush()
self.logfile = logfile
def reset(self, **kwargs):
if not self.start:
self.start = time.time()
else:
self.csv.writerow({
'r': self.episode_reward,
'l': self.episode_length,
't': time.time() - self.start
})
self.file.flush()
self.episode_length = 0
self.episode_reward = 0
return self.env.reset(**kwargs)
def step(self, ac):
ob, rew, done, info = self.env.step(ac)
self.episode_length += 1
self.total_length += 1
self.episode_reward += rew
#if self.logfile is not None:
if self.total_length % 1000 == 0:
self.logcsv.writerow({
'l': self.total_length,
't': time.time() - self.start
})
self.log.flush()
return ob, rew, done, info
def __del__(self):
self.file.close()
def make(game, state, stack=True, scale_rew=True, monitordir='logs/', bk2dir='videos/'):
"""
Create an environment with some standard wrappers.
"""
env = retro.make(game, state)
if bk2dir:
env.auto_record('videos/')
if monitordir:
#env = Monitor(env, os.path.join(monitordir, 'monitor.csv'), os.path.join(monitordir, 'log.csv'))
time_int = int(time.time())
env = Monitor(env, os.path.join('monitor_{}.csv'.format(time_int)), os.path.join('log_{}.csv'.format(time_int)))
env = StochasticFrameSkip(env, n=6, stickprob=0.0)
env = SonicDiscretizer(env)
if scale_rew:
env = RewardScaler(env)
env = WarpFrame(env)
if stack:
env = FrameStack(env, 4)
return env
``` |
{
"source": "jhphan/geneflow2",
"score": 3
} |
#### File: geneflow/cli/add_workflows.py
```python
from geneflow.config import Config
from geneflow.data import DataSource, DataSourceException
from geneflow.log import Log
def init_subparser(subparsers):
"""Initialize the add-workflows CLI subparser."""
parser = subparsers.add_parser(
'add-workflows', help='add workflows to database'
)
parser.add_argument(
'workflow_yaml',
type=str,
help='geneflow definition yaml with workflows'
)
parser.add_argument(
'-c', '--config',
type=str,
required=True,
help='geneflow config file path'
)
parser.add_argument(
'-e', '--environment',
type=str,
required=True,
help='environment'
)
parser.set_defaults(func=add_workflows)
return parser
def add_workflows(args, other_args, subparser=None):
"""
Add GeneFlow workflows to database.
Args:
args.workflow_yaml: GeneFlow definition with workflows.
args.config: GeneFlow config file path.
args.environment: Config environment.
Returns:
On success: True.
On failure: False.
"""
workflow_yaml = args.workflow_yaml
config = args.config
environment = args.environment
# load config file
cfg = Config()
if not cfg.load(config):
Log.an().error('cannot load config file: %s', config)
return False
config_dict = cfg.config(environment)
if not config_dict:
Log.an().error('invalid config environment: %s', environment)
return False
# connect to data source
try:
data_source = DataSource(config_dict['database'])
except DataSourceException as err:
Log.an().error('data source initialization error [%s]', str(err))
return False
# import workflow
defs = data_source.import_workflows_from_def(workflow_yaml)
if not defs:
Log.an().error('workflow definition load failed: %s', workflow_yaml)
return False
data_source.commit()
# display new IDs
for workflow in defs:
Log.some().info(
'workflow loaded: %s -> %s', workflow, defs[workflow]
)
return True
```
#### File: geneflow/cli/common.py
```python
from geneflow.log import Log
from geneflow.workflow import Workflow
def run_workflow(job, config, log_level):
"""
Run a GeneFlow workflow.
Args:
job: job dict describing run.
config: GeneFlow configuration dict.
log_level: logging level for this run.
Returns:
On success: Workflow job dict.
On failure: False.
"""
if job['log']:
# reconfig log location for this run
Log.config(log_level, job['log'])
Log.some().info('job loaded: %s -> %s', job['name'], job['id'])
# run job
workflow = Workflow(job['id'], config)
if not workflow.initialize():
Log.an().error('workflow initialization failed: job_id=%s', job['id'])
return False
Log.some().info('running workflow:\n%s', str(workflow))
if not workflow.run():
Log.an().error('workflow run failed: job_id=%s', job['id'])
return False
Log.some().info('workflow complete:\n%s', str(workflow))
return workflow.get_job()
```
#### File: geneflow/extend/contexts.py
```python
class Contexts:
"""
A class that contains GeneFlow context mappings.
"""
# is data context option initialized by workflow class?
mapping = {
'local': {
'exec': True,
'data': True,
'data_scheme': 'local'
},
'agave': {
'exec': True,
'data': True,
'data_scheme': 'agave'
},
'gridengine': {
'exec': True,
'data': False,
'data_scheme': 'local'
},
'slurm': {
'exec': True,
'data': False,
'data_scheme': 'local'
},
'tapis': {
'alias': 'agave'
}
}
@classmethod
def is_exec_context(cls, context):
"""
Determine if a context is an execution context.
Args:
cls: class object
context: context to check
Returns:
True: context is in mapping dict and 'exec' is True.
False: context is not in mapping dict, or 'exec' is False.
"""
if context in cls.mapping:
if 'alias' in cls.mapping[context]:
return cls.is_exec_context(cls.mapping[context]['alias'])
else:
return cls.mapping[context]['exec']
else:
return False
@classmethod
def is_data_context(cls, context):
"""
Determine if a context is a data context.
Args:
cls: class object
context: context to check
Returns:
True: context is in mapping dict and 'data' is True.
False: context is not in mapping dict, or 'data' is False.
"""
if context in cls.mapping:
if 'alias' in cls.mapping[context]:
return cls.is_data_context(cls.mapping[context]['alias'])
else:
return cls.mapping[context]['data']
else:
return False
@classmethod
def get_data_scheme_of_exec_context(cls, context):
"""
Return the data scheme for a execution context.
Args:
cls: class object
context: context to check
Returns:
data scheme: if context is a valid execution context.
False: context is not in mapping dict, or not an execution context.
"""
if context in cls.mapping:
if 'alias' in cls.mapping[context]:
return cls.get_data_scheme_of_exec_context(cls.mapping[context]['alias'])
else:
if cls.mapping[context]['exec']:
return cls.mapping[context]['data_scheme']
else:
return False
else:
return False
```
#### File: geneflow/extend/local_step.py
```python
from slugify import slugify
from geneflow.log import Log
from geneflow.workflow_step import WorkflowStep
from geneflow.data_manager import DataManager
from geneflow.uri_parser import URIParser
from geneflow.shell_wrapper import ShellWrapper
class LocalStep(WorkflowStep):
"""
A class that represents Local Workflow step objects.
Inherits from the "WorkflowStep" class.
"""
def __init__(
self,
job,
step,
app,
inputs,
parameters,
config,
depend_uris,
data_uris,
source_context,
clean=False,
local={}
):
"""
Instantiate LocalStep class by calling the super class constructor.
See documentation for WorkflowStep __init__().
"""
super(LocalStep, self).__init__(
job,
step,
app,
inputs,
parameters,
config,
depend_uris,
data_uris,
source_context,
clean
)
def initialize(self):
"""
Initialize the LocalStep class.
Validate that the step context is appropriate for this "local" context.
And that the app contains a "local" definition.
Args:
self: class instance.
Returns:
On success: True.
On failure: False.
"""
# make sure the step context is local
if self._step['execution']['context'] != 'local':
msg = (
'"local" step class can only be instantiated with a'
' step definition that has a "local" execution context'
)
Log.an().error(msg)
return self._fatal(msg)
# make sure app has a local implementation
if 'local' not in self._app['implementation']:
msg = (
'"local" step class can only be instantiated with an app that'
' has a "local" implementation'
)
Log.an().error(msg)
return self._fatal(msg)
if not super(LocalStep, self).initialize():
msg = 'cannot initialize workflow step'
Log.an().error(msg)
return self._fatal(msg)
return True
def _init_data_uri(self):
"""
Create output data URI for the source context (local).
Args:
self: class instance.
Returns:
On success: True.
On failure: False.
"""
# make sure the source data URI has a compatible scheme (local)
if self._parsed_data_uris[self._source_context][0]['scheme'] != 'local':
msg = 'invalid data uri scheme for this step: {}'.format(
self._parsed_data_uris[self._source_context][0]['scheme']
)
Log.an().error(msg)
return self._fatal(msg)
# delete old folder if it exists and clean==True
if (
DataManager.exists(
parsed_uri=self._parsed_data_uris[self._source_context][0]
)
and self._clean
):
if not DataManager.delete(
parsed_uri=self._parsed_data_uris[self._source_context][0]
):
Log.a().warning(
'cannot delete existing data uri: %s',
self._parsed_data_uris[self._source_context][0]['chopped_uri']
)
# create folder
if not DataManager.mkdir(
parsed_uri=self._parsed_data_uris[self._source_context][0],
recursive=True
):
msg = 'cannot create data uri: {}'.format(
self._parsed_data_uris[self._source_context][0]['chopped_uri']
)
Log.an().error(msg)
return self._fatal(msg)
# create _log folder
if not DataManager.mkdir(
uri='{}/_log'.format(
self._parsed_data_uris[self._source_context][0]['chopped_uri']
),
recursive=True
):
msg = 'cannot create _log folder in data uri: {}/_log'.format(
self._parsed_data_uris[self._source_context][0]['chopped_uri']
)
Log.an().error(msg)
return self._fatal(msg)
return True
def _get_map_uri_list(self):
"""
Get the contents of the map URI (local URI).
Args:
self: class instance.
Returns:
Array of base file names in the map URI. Returns False on
exception.
"""
combined_file_list = []
for uri in self._parsed_map_uris:
# make sure map URI is compatible scheme (local)
if uri['scheme'] != 'local':
msg = 'invalid map uri scheme for this step: {}'.format(
uri['scheme']
)
Log.an().error(msg)
return self._fatal(msg)
# get file list from URI
file_list = DataManager.list(
parsed_uri=uri,
globstr=self._step['map']['glob']
)
if file_list is False:
msg = 'cannot get contents of map uri: {}'\
.format(uri['chopped_uri'])
Log.an().error(msg)
return self._fatal(msg)
for f in file_list:
combined_file_list.append({
'chopped_uri': uri['chopped_uri'],
'filename': f
})
return combined_file_list
def _run_map(self, map_item):
"""
Run a job for each map item and store the proc and PID.
Args:
self: class instance.
map_item: map item object (item of self._map).
Returns:
On success: True.
On failure: False.
"""
# load default app inputs, overwrite with template inputs
inputs = {}
for input_key in self._app['inputs']:
if input_key in map_item['template']:
inputs[input_key] = map_item['template'][input_key]
else:
if self._app['inputs'][input_key]['default']:
inputs[input_key] = self._app['inputs'][input_key]['default']
# load default app parameters, overwrite with template parameters
parameters = {}
for param_key in self._app['parameters']:
if param_key in map_item['template']:
parameters[param_key] = map_item['template'][param_key]
else:
if self._app['parameters'][param_key]['default'] not in [None, '']:
parameters[param_key] \
= self._app['parameters'][param_key]['default']
# construct shell command
cmd = self._app['implementation']['local']['script']
for input_key in inputs:
if inputs[input_key]:
cmd += ' --{}="{}"'.format(
input_key,
URIParser.parse(inputs[input_key])['chopped_path']
)
for param_key in parameters:
if param_key == 'output':
cmd += ' --output="{}/{}"'.format(
self._parsed_data_uris[self._source_context][0]\
['chopped_path'],
parameters['output']
)
else:
cmd += ' --{}="{}"'.format(
param_key, parameters[param_key]
)
# add exeuction method
cmd += ' --exec_method="{}"'.format(self._step['execution']['method'])
# specify execution init commands if 'init' param given
if 'init' in self._step['execution']['parameters']:
cmd += ' --exec_init="{}"'.format(self._step['execution']['parameters']['init'])
# add stdout and stderr
log_path = '{}/_log/gf-{}-{}-{}'.format(
self._parsed_data_uris[self._source_context][0]['chopped_path'],
map_item['attempt'],
slugify(self._step['name'], regex_pattern=r'[^-a-z0-9_]+'),
slugify(map_item['template']['output'], regex_pattern=r'[^-a-z0-9_]+')
)
cmd += ' > "{}.out" 2> "{}.err"'.format(log_path, log_path)
Log.a().debug('command: %s', cmd)
# launch process
proc = ShellWrapper.spawn(cmd)
if proc is False:
msg = 'shell process error: {}'.format(cmd)
Log.an().error(msg)
return self._fatal(msg)
# record job info
map_item['run'][map_item['attempt']]['proc'] = proc
map_item['run'][map_item['attempt']]['pid'] = proc.pid
# set status of process
map_item['status'] = 'RUNNING'
map_item['run'][map_item['attempt']]['status'] = 'RUNNING'
return True
def run(self):
"""
Execute shell scripts for each of the map items.
Then store PIDs in run detail.
Args:
self: class instance.
Returns:
On success: True.
On failure: False.
"""
for map_item in self._map:
if not self._run_map(map_item):
msg = 'cannot run script for map item "{}"'\
.format(map_item['filename'])
Log.an().error(msg)
return self._fatal(msg)
self._update_status_db('RUNNING', '')
return True
def _serialize_detail(self):
"""
Serialize map-reduce items.
But leave out non-serializable Popen proc item, keep pid.
Args:
self: class instance.
Returns:
A dict of all map items and their run histories.
"""
return {
map_item['filename']: [
{
'status': run_item['status'],
'pid': run_item['pid']
} for run_item in map_item['run']
] for map_item in self._map
}
def check_running_jobs(self):
"""
Check the status/progress of all map-reduce items and update _map status.
Args:
self: class instance.
Returns:
True.
"""
# check if procs are running, finished, or failed
for map_item in self._map:
try:
if ShellWrapper.is_running(
map_item['run'][map_item['attempt']]['proc']
):
map_item['status'] = 'RUNNING'
else:
if map_item['run'][map_item['attempt']]['proc'].returncode:
map_item['status'] = 'FAILED'
else:
map_item['status'] = 'FINISHED'
map_item['run'][map_item['attempt']]['status']\
= map_item['status']
except (OSError, AttributeError) as err:
Log.a().warning(
'process polling failed for map item "%s" [%s]',
map_item['filename'], str(err)
)
map_item['status'] = 'FAILED'
self._update_status_db(self._status, '')
return True
def retry_failed(self):
"""
Retry any map-reduce jobs that failed.
This is not-yet supported for local apps.
Args:
self: class instance.
Returns:
False.
"""
msg = 'retry not yet supported for local apps'
Log.an().error(msg)
return self._fatal(msg)
```
#### File: geneflow/extend/local_workflow.py
```python
class LocalWorkflow:
"""
A class that represents the Local Workflow objects.
"""
def __init__(
self,
job,
config,
parsed_job_work_uri
):
"""
Instantiate LocalWorkflow class.
"""
self._job = job
self._config = config
self._parsed_job_work_uri = parsed_job_work_uri
def initialize(self):
"""
Initialize the LocalWorkflow class.
This workflow class has no additional functionality.
Args:
None.
Returns:
True.
"""
return True
def init_data(self):
"""
Initialize any data specific to this context.
"""
return True
def get_context_options(self):
"""
Return dict of options specific for this context.
Args:
None.
Returns:
{} - no options specific for this context.
"""
return {}
```
#### File: geneflow/extend/slurm_workflow.py
```python
import drmaa
from geneflow.log import Log
class SlurmWorkflow:
"""
A class that represents the SLURM Workflow objects.
"""
def __init__(
self,
job,
config,
parsed_job_work_uri
):
"""
Instantiate LocalWorkflow class.
"""
self._job = job
self._config = config
self._parsed_job_work_uri = parsed_job_work_uri
# drmaa library for grid engine
self._drmaa_session = drmaa.Session()
Log.some().debug('DRMAA contact strings: {}'.format(self._drmaa_session.contact))
Log.some().debug('DRMAA systems: {}'.format(self._drmaa_session.drmsInfo))
Log.some().debug('DRMAA implementations: {}'.format(self._drmaa_session.drmaaImplementation))
Log.some().debug('DRMAA version: {}'.format(self._drmaa_session.version))
def __del__(self):
"""
Disconnect from drmaa session when workflow class is deleted.
Args:
None.
Returns:
Nothing.
"""
try:
self._drmaa_session.exit()
except drmaa.errors.DrmaaException as err:
Log.a().warning(
'cannot exit drmaa session: [%s]', str(err)
)
def initialize(self):
"""
Initialize the SlurmWorkflow class.
This workflow class has no additional functionality.
Args:
None.
Returns:
On success: True.
On failure: False.
"""
try:
self._drmaa_session.initialize()
except drmaa.errors.DrmaaException as err:
Log.an().error(
'cannot initialize drmaa session: [%s]', str(err)
)
return False
return True
def init_data(self):
"""
Initialize any data specific to this context.
"""
return True
def get_context_options(self):
"""
Return dict of options specific for this context.
Args:
None.
Returns:
Dict containing drmaa session.
"""
return {
'drmaa_session': self._drmaa_session
}
def _re_init():
"""Reinit drmaa session."""
# exit existing session
try:
self._drmaa_session.exit()
except drmaa.errors.DrmaaException as err:
Log.a().warning(
'cannot exit drmaa session: [%s]', str(err)
)
# initialize session again
try:
self._drmaa_session.initialize()
except drmaa.errors.DrmaaException as err:
Log.an().error(
'cannot initialize drmaa session: [%s]', str(err)
)
return False
return True
```
#### File: src/geneflow/__main__.py
```python
import sys
import argparse
import geneflow.cli.common
import geneflow.cli.add_apps
import geneflow.cli.add_workflows
import geneflow.cli.help
import geneflow.cli.init_db
import geneflow.cli.install_workflow
import geneflow.cli.make_app
import geneflow.cli.migrate_db
import geneflow.cli.run
import geneflow.cli.run_pending
from geneflow.log import Log
from geneflow import __version__
def parse_args():
"""
Parse command line arguments.
Args:
None.
Returns:
Command line arguments.
"""
parser = argparse.ArgumentParser(
description='GeneFlow CLI',
prog='gf'
)
# print version
parser.add_argument(
'-v',
'--version',
action='version',
version='%(prog)s {}'.format(__version__)
)
# shared arguments
parser.add_argument(
'--log-level',
type=str,
default='info',
dest='log_level',
help='logging level'
)
parser.add_argument(
'--log-file',
type=str,
default=None,
dest='log_file',
help='log file'
)
parser.set_defaults(func=None)
subparsers = parser.add_subparsers(help='Functions', dest='command')
subparser_dict = {}
# configure arguments for sub-commands
subparser_dict['add-apps'] = geneflow.cli.add_apps.init_subparser(subparsers)
subparser_dict['add-workflows'] = geneflow.cli.add_workflows.init_subparser(subparsers)
subparser_dict['help'] = geneflow.cli.help.init_subparser(subparsers)
subparser_dict['init-db'] = geneflow.cli.init_db.init_subparser(subparsers)
subparser_dict['install-workflow'] = geneflow.cli.install_workflow.init_subparser(subparsers)
subparser_dict['make-app'] = geneflow.cli.make_app.init_subparser(subparsers)
subparser_dict['migrate-db'] = geneflow.cli.migrate_db.init_subparser(subparsers)
subparser_dict['run'] = geneflow.cli.run.init_subparser(subparsers)
subparser_dict['run-pending'] = geneflow.cli.run_pending.init_subparser(subparsers)
# parse arguments
args = parser.parse_known_args()
if not args[0].func:
parser.print_help()
return False
return args, subparser_dict[args[0].command]
def main():
"""
Geneflow CLI main entrypoint.
Args:
None.
Returns:
Nothing.
"""
args, subparser = parse_args()
if not args:
sys.exit(1)
# configure logging
Log.config(args[0].log_level, args[0].log_file)
# display GeneFlow version
Log.some().info('GeneFlow %s', __version__)
# call the appropriate command
if not args[0].func(
args=args[0],
other_args=args[1],
subparser=subparser
):
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()
``` |
{
"source": "jhphan/geneflow",
"score": 3
} |
#### File: src/geneflow/workflow.py
```python
import time
import copy
import requests
from slugify import slugify
import yaml
from geneflow.log import Log
from geneflow.data import DataSource, DataSourceException
from geneflow.data_manager import DataManager
from geneflow.definition import Definition
from geneflow.workflow_dag import WorkflowDAG, WorkflowDAGException
from geneflow.uri_parser import URIParser
from geneflow.extend.contexts import Contexts
class Workflow:
"""Wraps workflow, job, app loading and running calls."""
def __init__(self, job_id, config):
"""
Initialize the GeneFlow Workflow class.
Initialize the class by loading the job and the config.
Args:
self: class instance
job_id: Job identifier
config: the Workflow subsection of the GeneFlow configuration
Returns:
Class instance.
"""
self._config = config # configuration structure
self._job_id = job_id
self._job = None # job definition
self._workflow = None # workflow definition
self._apps = None # app definitions
self._dag = None # WorkflowDAG class instance
self._status = 'PENDING'
self._parsed_job_work_uri = {}
self._parsed_job_output_uri = {}
self._exec_contexts = set() # all execution contexts
self._data_contexts = set() # all data contexts
# context-specific data and methods
self._workflow_context = {}
def initialize(self):
"""
Initialize the GeneFlow Workflow class.
Initialize the class by loading the workflow and job definitions
from the database, creating work and output URIs, and creating step
objects.
Args:
self: class instance
Returns:
On success: True.
On failure: False.
"""
# load and validate job definition from database
if not self._load_job():
msg = 'cannot load job definition'
Log.an().error(msg)
return self._fatal(msg)
# load and validate workflow definition from database
if not self._load_workflow():
msg = 'cannot load workflow definition'
Log.an().error(msg)
return self._fatal(msg)
# load and validate app definitions from database
if not self._load_apps():
msg = 'cannot load app definitions'
Log.an().error(msg)
return self._fatal(msg)
# inject job parameters into workflow def
if not self._inject_job_params():
msg = 'cannot inject job parameters into workflow definition'
Log.an().error(msg)
return self._fatal(msg)
# initialize set of execution contexts
if not self._init_exec_context_set():
msg = 'cannot initialize set of execution contexts'
Log.an().error(msg)
return self._fatal(msg)
# initialize set of data contexts
if not self._init_data_context_set():
msg = 'cannot initialize set of data contexts'
Log.an().error(msg)
return self._fatal(msg)
# validate all work and output URIs
if not self._init_job_uris():
msg = 'cannot construct and validate work and output uris'
Log.an().error(msg)
return self._fatal(msg)
# initialize context-specific workflow items (e.g., context connection info)
if not self._init_workflow_contexts():
msg = 'cannot initialize context-specific workflow properties'
Log.an().error(msg)
return self._fatal(msg)
# create all work and output URIs
if not self._create_job_uris():
msg = 'cannot create work and output uris'
Log.an().error(msg)
return self._fatal(msg)
# initialize context-specific workflow data items (e.g., create remote directories)
if not self._init_workflow_context_data():
msg = 'cannot initialize context-specific workflow data'
Log.an().error(msg)
return self._fatal(msg)
# initialize directed acyclic graph structure
if not self._init_dag():
msg = 'cannot initialize workflow graph structure'
Log.an().error(msg)
return self._fatal(msg)
return True
def __str__(self):
"""
Workflow string representation.
Args:
None.
Returns:
A string representation of the workflow.
"""
str_rep = (
'Job: {} ({})'
'\n Workflow: {}'
'\n Version: {}'
'\n Description: {}'
'\n Git: {}'
).format(
self._job['name'],
self._job_id,
self._workflow['name'],
self._workflow['version'],
self._workflow['description'],
self._workflow['git']
)
str_rep += '\n Inputs: '
for input_key in self._workflow['inputs']:
str_rep += '\n {}: {}'.format(
input_key, self._workflow['inputs'][input_key]['value']
)
str_rep += '\n Parameters: '
for parameter_key in self._workflow['parameters']:
str_rep += '\n {}: {}'.format(
parameter_key,
self._workflow['parameters'][parameter_key]['value']
)
str_rep += '\n Work URIs: '
for context in self._parsed_job_work_uri:
str_rep += '\n {}: {}'.format(
context, self._parsed_job_work_uri[context]['chopped_uri']
)
str_rep += '\n Output URI: {}'.format(
self._parsed_job_output_uri['chopped_uri']
)
return str_rep
def _fatal(self, msg):
self._update_status_db('ERROR', msg)
return False
def _load_job(self):
"""
Load and validate job definition from the database.
Args:
self: class instance
Returns:
On success: True.
On failure: False.
"""
try:
data_source = DataSource(self._config['database'])
except DataSourceException as err:
msg = 'data source initialization error [{}]'.format(str(err))
Log.an().error(msg)
return self._fatal(msg)
self._job = data_source.get_job_def_by_id(self._job_id)
if self._job is False:
msg = 'cannot load job from data source: job_id={}'\
.format(self._job_id)
Log.an().error(msg)
return self._fatal(msg)
if not self._job:
msg = 'job not found: job_id={}'.format(self._job_id)
Log.an().error(msg)
return self._fatal(msg)
# validate the job definition
valid_def = Definition.validate_job(self._job)
if valid_def is False:
msg = 'invalid job definition:\n{}'.format(yaml.dump(self._job))
Log.an().error(msg)
return self._fatal(msg)
self._job = valid_def
return True
def _load_workflow(self):
"""
Load and validate workflow definition from the database.
Args:
self: class instance
Returns:
On success: True.
On failure: False.
"""
try:
data_source = DataSource(self._config['database'])
except DataSourceException as err:
msg = 'data source initialization error [{}]'.format(str(err))
Log.an().error(msg)
return self._fatal(msg)
self._workflow = data_source.get_workflow_def_by_id(
self._job['workflow_id']
)
if self._workflow is False:
msg = 'cannot load workflow from data source: workflow_id={}'.\
format(self._job['workflow_id'])
Log.an().error(msg)
return self._fatal(msg)
if not self._workflow:
msg = 'workflow not found: workflow_id={}'\
.format(self._job['workflow_id'])
Log.an().error(msg)
return self._fatal(msg)
# validate the workflow definition
valid_def = Definition.validate_workflow(self._workflow)
if valid_def is False:
msg = 'invalid workflow definition:\n{}'\
.format(yaml.dump(self._workflow))
Log.an().error(msg)
return self._fatal(msg)
self._workflow = valid_def
return True
def _load_apps(self):
"""
Load and validate app definitions from the database.
Args:
self: class instance
Returns:
On success: True.
On failure: False.
"""
try:
data_source = DataSource(self._config['database'])
except DataSourceException as err:
msg = 'data source initialization error [{}]'.format(str(err))
Log.an().error(msg)
return self._fatal(msg)
self._apps = data_source.get_app_defs_by_workflow_id(
self._job['workflow_id']
)
if self._apps is False:
msg = 'cannot load apps from data source: workflow_id={}'.\
format(self._job['workflow_id'])
Log.an().error(msg)
return self._fatal(msg)
if not self._apps:
msg = 'no apps found for workflow: workflow_id={}'.\
format(self._job['workflow_id'])
Log.an().error(msg)
return self._fatal(msg)
# validate the app definitions
for app in self._apps:
valid_def = Definition.validate_app(self._apps[app])
if valid_def is False:
msg = 'invalid app definition:\n{}'\
.format(yaml.dump(self._apps[app]))
Log.an().error(msg)
return self._fatal(msg)
self._apps[app] = valid_def
return True
def _inject_job_params(self):
# substitute inputs
for input_key in self._workflow['inputs']:
self._workflow['inputs'][input_key]['value']\
= self._workflow['inputs'][input_key]['default']
for input_key in self._job['inputs']:
if input_key in self._workflow['inputs']:
self._workflow['inputs'][input_key]['value']\
= self._job['inputs'][input_key]
# substitute parameters
for parameter_key in self._workflow['parameters']:
self._workflow['parameters'][parameter_key]['value']\
= self._workflow['parameters'][parameter_key]['default']
for parameter_key in self._job['parameters']:
if parameter_key in self._workflow['parameters']:
self._workflow['parameters'][parameter_key]['value']\
= self._job['parameters'][parameter_key]
# update publish list
if self._job['publish']:
# over-ride the workflow publish list with the job publish list
self._workflow['publish'] = self._job['publish']
# update the publish list based on publish flag of each step
for step_name, step in self._workflow['steps'].items():
if step['publish']:
if step_name not in self._workflow['publish']:
self._workflow['publish'].append(step_name)
# insert step execution parameters
for step_name, step in self._workflow['steps'].items():
step['execution'] = {
'context': self._job['execution']['context']['default'],
'method': self._job['execution']['method']['default'],
'parameters': copy.deepcopy(self._job['execution']['parameters']['default'])
}
if step_name in self._job['execution']['context']:
step['execution']['context'] \
= self._job['execution']['context'][step_name]
if step_name in self._job['execution']['method']:
step['execution']['method'] \
= self._job['execution']['method'][step_name]
if step_name in self._job['execution']['parameters']:
# only copy params that have been set to avoid deleting default params
for param_name in self._job['execution']['parameters'][step_name]:
step['execution']['parameters'][param_name] \
= self._job['execution']['parameters'][step_name][param_name]
return True
def _init_exec_context_set(self):
"""
Initialize set of execution contexts, which is specified by the execution.context job
parameters.
Args:
self: class instance
Returns:
On success: True.
"""
# get explicit execution contexts from the job parameters
self._exec_contexts = set(self._job['execution']['context'].values())
# check validity of exec contexts
for context in self._exec_contexts:
if not Contexts.is_exec_context(context):
msg = 'invalid exec context: {}'.format(context)
Log.an().error(msg)
return self._fatal(msg)
Log.some().debug('execution contexts: %s', self._exec_contexts)
return True
def _init_data_context_set(self):
"""
Initialize set of data contexts, which is determined by inputs and output.
Args:
self: class instance
Returns:
On success: True.
On failure: False.
"""
# check input URIs for data contexts
for input_key in self._workflow['inputs']:
parsed_uri = URIParser.parse(self._workflow['inputs'][input_key]['value'][0])
if not parsed_uri:
msg = 'invalid input uri: {}'.format(
self._workflow['inputs'][input_key]['value'][0]
)
Log.an().error(msg)
return self._fatal(msg)
self._data_contexts.add(parsed_uri['scheme'])
# add output URI data context
parsed_output_uri = URIParser.parse(self._job['output_uri'])
if not parsed_output_uri:
msg = 'invalid base of job output uri: {}'.format(
self._job['output_uri']
)
Log.an().error(msg)
return self._fatal(msg)
self._data_contexts.add(parsed_output_uri['scheme'])
# check validity of data contexts
for context in self._data_contexts:
if not Contexts.is_data_context(context):
msg = 'invalid data context: {}'.format(context)
Log.an().error(msg)
return self._fatal(msg)
Log.some().debug('data contexts: %s', self._data_contexts)
return True
def _init_job_uris(self):
"""
Initialize all work and output URIs.
Args:
self: class instance
Returns:
On success: True.
On failure: False.
"""
# name of the job directory
job_dir = slugify(self._job['name'], regex_pattern=r'[^-a-z0-9_]+')
job_dir_hash = '{}-{}'.format(job_dir, self._job['job_id'][:8])
# validate work URI for each exec context
# use the 'data_scheme' for each execution context
# and place into a set to remove repeats
for context in {
Contexts.get_data_scheme_of_exec_context(con)
for con in self._exec_contexts
}:
# work_uri must be set for each exec_context
if context not in self._job['work_uri']:
msg = 'missing work_uri for context: {}'.format(context)
Log.an().error(msg)
return self._fatal(msg)
parsed_uri = URIParser.parse(self._job['work_uri'][context])
if not parsed_uri:
msg = 'invalid base of job work uri for context: {}->{}'.format(
context, self._job['work_uri'][context]
)
Log.an().error(msg)
return self._fatal(msg)
# append hashed job dir to each context
full_job_work_uri = (
'{}{}' if parsed_uri['chopped_path'] == '/' else '{}/{}'
).format(parsed_uri['chopped_uri'], job_dir_hash)
# validate again after appending
parsed_job_work_uri = URIParser.parse(full_job_work_uri)
if not parsed_job_work_uri:
msg = 'invalid job work uri for context: {}->{}'.format(
context, full_job_work_uri
)
Log.an().error(msg)
return self._fatal(msg)
self._parsed_job_work_uri[context] = parsed_job_work_uri
# validate output URI
parsed_uri = URIParser.parse(self._job['output_uri'])
if not parsed_uri:
msg = 'invalid base of job output uri: {}'.format(
self._job['output_uri']
)
Log.an().error(msg)
return self._fatal(msg)
# append job dir (hashed or not) to output uri
full_job_output_uri = (
'{}{}' if parsed_uri['chopped_path'] == '/' else '{}/{}'
).format(
parsed_uri['chopped_uri'],
job_dir if self._job['no_output_hash'] else job_dir_hash
)
# validate again after appending
parsed_job_output_uri = URIParser.parse(full_job_output_uri)
if not parsed_job_output_uri:
msg = 'invalid job output uri: {}'.format(
full_job_output_uri
)
Log.an().error(msg)
return self._fatal(msg)
self._parsed_job_output_uri = parsed_job_output_uri
return True
def _init_workflow_context_data(self):
"""
Initialize data components of workflow contexts.
Args:
None.
Returns:
On success: True.
On failure: False.
"""
for exec_context in self._exec_contexts:
if not self._workflow_context[exec_context].init_data():
msg = (
'cannot initialize data for workflow context: {}'\
.format(exec_context)
)
Log.an().error(msg)
return self._fatal(msg)
return True
def _init_workflow_contexts(self):
"""
Import modules and load classes for each workflow context.
Args:
self: class instance
Returns:
On success: True.
On failure: False.
"""
# currently the union of all execution and data contexts will be used
# to initialize workflow contexts/classes. the reason is that all supported
# data contexts are also execution contexts. This may change in the future
# with data-only contexts (e.g., http/s). In that case, a new method
# (_init_data_contexts) will be added to populate a _data_context variable.
for context in self._exec_contexts | self._data_contexts:
mod_name = '{}_workflow'.format(context)
cls_name = '{}Workflow'.format(context.capitalize())
try:
workflow_mod = __import__(
'geneflow.extend.{}'.format(mod_name),
fromlist=[cls_name]
)
except ImportError as err:
msg = 'cannot import workflow module: {} [{}]'.format(
mod_name, str(err)
)
Log.an().error(msg)
return self._fatal(msg)
try:
workflow_class = getattr(workflow_mod, cls_name)
except AttributeError as err:
msg = 'cannot import workflow class: {} [{}]'.format(
cls_name, str(err)
)
Log.an().error(msg)
return self._fatal(msg)
self._workflow_context[context] = workflow_class(
self._config, self._job, self._parsed_job_work_uri
)
# perform context-specific init
if not self._workflow_context[context].initialize():
msg = (
'cannot initialize workflow context: {}'.format(cls_name)
)
Log.an().error(msg)
return self._fatal(msg)
return True
def _create_job_uris(self):
"""
Create all work and output URIs.
Args:
self: class instance
Returns:
On success: True.
On failure: False.
"""
# create work URIs. a work URI is required for each workflow context
for context in {
Contexts.mapping[exec_context]['data_scheme']
for exec_context in self._exec_contexts
}:
if not DataManager.mkdir(
parsed_uri=self._parsed_job_work_uri[context],
recursive=True,
**{
context: self._workflow_context[context]\
.get_context_options()
}
):
msg = 'cannot create job work uri for context: {}->{}'.format(
context, self._parsed_job_work_uri[context]['chopped_uri']
)
Log.an().error(msg)
return self._fatal(msg)
# create output URI. output URI scheme must be in the set of data contexts
output_context = self._parsed_job_output_uri['scheme']
if output_context not in self._data_contexts:
msg = 'invalid output context: {}'.format(output_context)
Log.an().error(msg)
return self._fatal(msg)
if not DataManager.mkdir(
parsed_uri=self._parsed_job_output_uri,
recursive=True,
**{
output_context: self._workflow_context[output_context]\
.get_context_options()
}
):
msg = 'cannot create job output uri: {}'.format(
self._parsed_job_output_uri['chopped_uri']
)
Log.an().error(msg)
return self._fatal(msg)
return True
def _init_dag(self):
"""
Initialize NetworkX graph with workflow info from database.
Args:
self: class instance.
Returns:
Result of DAG initialization (True/False).
"""
self._dag = WorkflowDAG(
self._job,
self._workflow,
self._apps,
self._parsed_job_work_uri,
self._parsed_job_output_uri,
self._exec_contexts,
self._data_contexts,
self._config,
**{
context: self._workflow_context[context].get_context_options()\
for context in self._workflow_context
}
)
try:
self._dag.initialize()
except WorkflowDAGException as err:
msg = 'cannot initialize workflow graph class'
Log.an().error(msg)
return self._fatal(str(err)+'|'+msg)
return True
def _re_init(self):
"""Reinitialize connection object."""
return True
def run(self):
"""
Run Workflow.
Args:
self: class instance
Returns:
On success: True.
On failure: False.
"""
self._update_status_db('RUNNING', '')
for node_name in self._dag.get_topological_sort():
node = self._dag.graph().nodes[node_name]
if node['type'] == 'input':
Log.some().debug('[%s]: staging input', node_name)
if not node['node'].stage(
move_final=False,
**{
context: self._workflow_context[context]\
.get_context_options()\
for context in self._workflow_context
}
):
msg = 'staging failed for input {}'.format(node_name)
Log.an().error(msg)
return self._fatal(msg)
else: # step node
# Reinit connection to exec context
if not self._re_init():
msg = 'cannot reinit exec context'
Log.an().error(msg)
return self._fatal(msg)
Log.some().info(
'[%s]: app: %s:%s [%s]',
node_name,
node['node']._app['name'],
node['node']._app['version'],
node['node']._app['git']
)
Log.some().debug('[%s]: iterating map uri', node_name)
if not node['node'].iterate_map_uri():
msg = 'iterate map uri failed for step {}'.format(node_name)
Log.an().error(msg)
return self._fatal(msg)
# run new jobs and poll until all job(s) done
Log.some().info('[%s]: running', node_name)
while not node['node'].all_done():
if not node['node'].run():
msg = 'run failed for step {}'.format(node_name)
Log.an().error(msg)
return self._fatal(msg)
node['node'].check_running_jobs()
time.sleep(self._config['run_poll_delay'])
Log.some().debug('[%s]: all jobs complete', node_name)
# check if step satisfies checkpoint of all, any, or none job completion
if not node['node'].checkpoint():
msg = 'failed checkpoint for step {}'.format(node_name)
Log.an().error(msg)
return self._fatal(msg)
# cleanup jobs
Log.some().debug('[%s]: cleaning', node_name)
if not node['node'].clean_up():
msg = 'clean up failed for step {}'.format(node_name)
Log.an().error(msg)
return self._fatal(msg)
# stage outputs (non-final)
Log.some().debug('[%s]: staging output', node_name)
if not node['node'].stage(
**{
context: self._workflow_context[context]\
.get_context_options()\
for context in self._workflow_context
}
):
msg = 'staging failed for step {}'.format(node_name)
Log.an().error(msg)
return self._fatal(msg)
# stage final outputs
for node_name in self._dag.get_topological_sort():
node = self._dag.graph().nodes[node_name]
if node['type'] == 'step':
Log.some().debug('[%s]: staging final output', node_name)
if not node['node'].stage_final(
**{
context: self._workflow_context[context]\
.get_context_options()\
for context in self._workflow_context
}
):
msg = 'staging final output failed for step {}'.format(node_name)
Log.an().error(msg)
return self._fatal(msg)
Log.some().info('[%s]: complete', node_name)
self._update_status_db('FINISHED', '')
return True
def _update_status_db(self, status, msg):
"""
Update workflow status in DB.
Args:
self: class instance
status: Workflow status
msg: Success, error or warning message
Returns:
On success: True.
On failure: False.
"""
try:
data_source = DataSource(self._config['database'])
except DataSourceException as err:
msg = 'data source initialization error [{}]'.format(str(err))
Log.an().error(msg)
return False
# set start time (if started, or errored immediatedly)
if (
status in ['RUNNING', 'ERROR']
and self._status == 'PENDING'
):
if not data_source.set_job_started(self._job_id):
Log.a().warning('cannot set job start time in data source')
data_source.rollback()
# set finished time (even on error)
if status in ['FINISHED', 'ERROR']:
if not data_source.set_job_finished(self._job_id):
Log.a().warning('cannot set job finish time in data source')
data_source.rollback()
# update database
self._status = status
if not data_source.update_job_status(self._job_id, status, msg):
Log.a().warning('cannot update job status in data source')
data_source.rollback()
data_source.commit()
return True
def get_status_struct(self):
"""
Get a workflow status dictionary.
Args:
self: class instance
Returns:
False
"""
struct = {
'id': self._job_id[:8],
'name': self._job['name'],
'status': self._status,
}
return struct
def get_job(self):
"""
Get workflow job info.
Args:
self: class instance
Returns:
workflow job dict
"""
return self._job
def get_status(self):
"""
Get workflow status.
Args:
self: class instance
Returns:
workflow status string
"""
return self._status
def clean_up(self):
"""
Copy/move workflow data to final output location.
Args:
self: class instance
Returns:
True
"""
return True
``` |
{
"source": "jhphillips1029/EngineeringToolbox",
"score": 3
} |
#### File: EngineeringToolbox/materials/materialTesting.py
```python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
import warnings
warnings.filterwarnings(action='once')
data = None;
matData = None;
def initData(csvName):
data = pd.read_csv(csvName)
matData = pd.DataFrame(columns=['Name','Diameter','Length','Reduced Diamter','Area','Reduced Area','UTS','Elastic Modulus','Total Fail Strain','Plastic Strain Fail','Elastic Strain Fail','Offset Yield'])
def addMaterial(matInfo):
if len(matInfo) < len(matData.columns):
print("Not enough entries in matInfo")
return
matData.loc[len(matData)] = matInfo
def area(diameter):
return math.pi * (diameter/2)**2
def findUTS(key):
return max(data[key])
def getYoungsModulus(stress,strain,plot=False):
# finds the Young's Modulus by finding the largest linear slope between 1/10 the number of data points
# returns the Young's Modulus in the same units as the input stress
dummyData = pd.DataFrame(data={'x':strain,'y':stress})
dummyData.dropna(inplace=True)
x=np.array(dummyData['x'][:int(len(dummyData['x'])/2)])
y=np.array(dummyData['y'][:int(len(dummyData['x'])/2)])
numPts = len(x)
minFitLength = 8
chi = 0
chi_min = 10000
i_best=0
j_best=0
m_best=0
for i in range(numPts - minFitLength):
for j in range(i+minFitLength, numPts):
coefs = np.polyfit(x[i:j],y[i:j],1)
y_lin = x * coefs[0] + coefs[1]
chi=0
for k in range(i,j):
chi += (y_lin[k] - y[k])**2
if chi < chi_min and coefs[0] > m_best:
i_best = i
j_best = j
chi_min = chi
m_best = coefs[0]
coefs = np.polyfit(x[i_best:j_best],y[i_best:j_best],1)
y_lin = x[i_best:j_best] * coefs[0] + coefs[1]
if(plot):
plt.plot(x,y,'ro')
plt.plot(x[i_best:j_best],y_lin,'b-')
print("Young's Modulus (MPa): " + str(m_best))
return m_best
def findElasticModulus(stressKey,strainKey):
strain = data[strainKey]
stress = data[stressKey]
return getYoungsModulus(stress,strain)
def getFailure(stress):
# finds the point of failure by looking for largest jump between two stresses
# returns index of point of failure
# stress = np.array(stress)[int(len(stress)/2):]
maxJump=0;
indexVal=0;
for i in range(2,len(stress)):
if( abs(stress[i] - stress[i-2]) > maxJump and stress[i] - stress[i-2] < 0 ):
maxJump = abs(stress[i] - stress[i-2])
indexVal = i
return indexVal-2
def findFailure(stressKey,strainKey):
stress = data[stressKey]
return data[strainKey][getFailure(stress)]
def findPlasticElasticFailureStrain(stressKey,strainKey,elasticModulus,totFailStrain):
failIndex = findFailure(data[stressKey])
failStress = data[stressKey][failIndex]
return [failStress/elasticModulus,totFailStrain-failStress/elasticModulus]
def getYieldStress(strain, stress, offset, E):
x = strain
y = stress
x_n = x[x>0]
x_n = x_n[y>0]
y_n = y[x>0]
y_n = y_n[y>0]
dummyData = pd.DataFrame(data={'x':x_n,'y':y_n})
dummyData.dropna(inplace=True)
x=np.array(dummyData['x'][:int(len(dummyData['x'])/2)])
y=np.array(dummyData['y'][:int(len(dummyData['x'])/2)])
f=lambda x : E*(x-offset)
u=np.linspace(0,0.2,100)
v=f(u)
minDiff = 1000
index = -1
for i in range(len(y)):
for j in range(len(v)):
if y[i]-v[j] < minDiff:
minDiff = y[i]-v[j]
index = j
print(v[j])
return v[j]
def findYieldStress(stressKey,strainKey,elasticModulus,offset=.002):
stress = data[stressKey]
strain = data[strainKey]
return getYieldStress(strain,stress,offset,elasticModulus)
def writeOut(fName):
f = open(fName,'w')
for i in range(matData.shape[0]):
f.write(matData['Type'][i]+'\n')
f.write(str(matData['Diameter (mm)'][i])+'\n')
f.write(str(matData['Length (m)'][i])+'\n')
f.write(str(matData["Young's Modulus (MPa)"][i])+'\n')
f.close()
def plotData(stressKeys,strainKeys,names,totalFailureStrain,fName=None):
for i,xKey,yKey in enumerate(zip(strainKeys,stressKeys)):
x = data[xKey]
y = data[yKey]
index = 0
for j in range(len(x)):
if x[j] == totalFailureStrain:
index = j
xy = [[a,b] for k, (a,b) in enumerate(zip(x,y)) if a>0 and b>0 and k<index]
plt.plot(xy[:,0],xy[:,1],label=names[i])
plt.xlabel('Strain')
plt.ylabel('Stress')
plt.title('Stress-Strain Curve')
plt.legend(loc=(1.05,.65))
if fName != None:
plt.savefig(fName)
```
#### File: EngineeringToolbox/miscellaneous/circuitbuilder.py
```python
class Comp:
def __init__(self,start,end=None,compType=None,labels=None,arrowType=None,node=None):
if compType == None and node == None:
raise TypeError("must provide component or node.")
self.compType = compType
self.start = start
self.end = end
self.labels = labels
self.arrowType = arrowType
self.node = node
def wire(start,end,arrowType):
return Comp(start,end=end,compType="short",arrowType=arrowType)
def res(start,end,labels,arrowType):
return Comp(start,end=end,compType="R",labels=labels,arrowType=arrowType)
def Isrc(start,end,labels,arrowType):
return Comp(start,end=end,compType="I",labels=labels,arrowType=arrowType)
def Vsrc(start,end,labels,arrowType):
return Comp(start,end=end,compType="V",labels=labels,arrowType=arrowType)
def gnd(start):
return Comp(start,end=(start[0],start[1]-0.5),node="node[ground]{}")
O=(0,0)
def texCircuit(components):
circuit = []
for comp in components:
compStr = "\\draw "+str(comp.start)+" "
if comp.compType != None:
compStr += "to["+comp.compType
if comp.labels != None:
compStr += ", "+comp.labels
if comp.arrowType != None:
compStr += ", "+comp.arrowType
compStr += "]"
if comp.end != None:
compStr += " "+str(comp.end)
if comp.node != None:
if comp.end != None:
compStr += " -- "+str(comp.end)
compStr += " "+comp.node
compStr += ";"
circuit.append(compStr)
return circuit
def Thevenin_Norton(V_OC,I_SC,R_t):
import math
R_label = ""
V_label = ""
I_label = ""
if isinstance(V_OC,complex):
V_label = "{} \\angle {}^o".format(round((V_OC.imag**2 + V_OC.real**2)**0.5,3),round(math.atan2(V_OC.imag,V_OC.real),3))
else:
V_label = str(round(V_OC,3))
if isinstance(I_SC,complex):
I_label = "{} \\angle {}^o".format(round((I_SC.imag**2 + I_SC.real**2)**0.5,3),round(math.atan2(I_SC.imag,I_SC.real),3))
else:
V_label = str(round(I_SC,3))
if isinstance(R_t,complex):
R_label = str(R_t)
else:
R_label = str(round(R_t,3))
R_t1 = Comp.res((0,3),(3,3),"R=$R_t \\text{$=$} "+R_label+" \\Omega$","-o")
R_t2 = Comp.res((15,3),(15,0),"R=$R_t \\text{$=$} "+R_label+" \\Omega$",None)
V_s1 = Comp.Vsrc((0,3),(0,0),"V=$V_{OC} \\text{$=$} "+V_label+" V$",None)
I_s2 = Comp.Isrc((12,0),(12,3),"I=$I_{SC} \\text{$=$} "+I_label+" A$",None)
w1 = Comp.wire((0,0),(3,0),"-o")
w2 = Comp.wire((15,3),(18,3),"-o")
w3 = Comp.wire((15,0),(18,0),"-o")
w4 = Comp.wire((12,0),(15,0),None)
w5 = Comp.wire((12,3),(15,3),None)
comps = [R_t1,R_t2,V_s1,I_s2,w1,w2,w3,w4,w5]
return texCircuit(comps)
``` |
{
"source": "jhpyle/docassemble-empty",
"score": 2
} |
#### File: docassemble/empty/lsc.py
```python
import requests
import copy
import json
import re
import sys
from six import text_type
from io import open
from docassemble.base.util import Organization, DADict, path_and_mimetype, DARedis, DAList, Address, objects_from_file
from math import sin, cos, sqrt, atan2, radians
__all__ = ['lsc_program_for', 'offices_for', 'cities_near', 'poverty_percentage']
base_url = "https://services3.arcgis.com/n7h3cEoHTyNCwjCf/ArcGIS/rest/services/BasicField_ServiceAreas2019/FeatureServer/0/query"
base_params = {'where': 'OBJECTID>=0', 'objectIds': '', 'time': '', 'geometryType': 'esriGeometryPoint', 'inSR': '{"wkid": 4326}', 'spatialRel': 'esriSpatialRelWithin', 'resultType': 'none', 'distance': '0.0', 'units': 'esriSRUnit_Meter', 'returnGeodetic': 'false', 'outFields': '*', 'returnGeometry': 'false', 'returnCentroid': 'false', 'multipatchOption': 'xyFootprint', 'maxAllowableOffset': '', 'geometryPrecision': '', 'outSR': '{"wkid": 4326}', 'datumTransformation': '', 'applyVCSProjection': 'false', 'returnIdsOnly': 'false', 'returnUniqueIdsOnly': 'false', 'returnCountOnly': 'false', 'returnExtentOnly': 'false', 'returnQueryGeometry': 'false', 'returnDistinctValues': 'false', 'orderByFields': '', 'groupByFieldsForStatistics': '', 'outStatistics': '', 'having': '', 'resultOffset': '', 'resultRecordCount': '', 'returnZ': 'false', 'returnM': 'false', 'returnExceededLimitFeatures': 'true', 'quantizationParameters': '', 'sqlFormat': 'none', 'f': 'pjson', 'token': ''}
office_base_url = "https://services3.arcgis.com/n7h3cEoHTyNCwjCf/ArcGIS/rest/services/LSC_offices_grantees_main_branch_(Public)/FeatureServer/0/query"
office_base_params = {'objectIds': '', 'time': '', 'geometry': '', 'geometryType': 'esriGeometryEnvelope', 'inSR': '', 'spatialRel': 'esriSpatialRelIntersects', 'resultType': 'none', 'distance': '0.0', 'units': 'esriSRUnit_Meter', 'returnGeodetic': 'false', 'outFields': '*', 'returnGeometry': 'false', 'multipatchOption': 'xyFootprint', 'maxAllowableOffset': '', 'geometryPrecision': '', 'outSR': '', 'datumTransformation': '', 'applyVCSProjection': 'false', 'returnIdsOnly': 'false', 'returnUniqueIdsOnly': 'false', 'returnCountOnly': 'false', 'returnExtentOnly': 'false', 'returnQueryGeometry': 'false', 'returnDistinctValues': 'false', 'orderByFields': '', 'groupByFieldsForStatistics': '', 'outStatistics': '', 'having': '', 'resultOffset': '', 'resultRecordCount': '', 'returnZ': 'false', 'returnM': 'false', 'returnExceededLimitFeatures': 'true', 'quantizationParameters': '', 'sqlFormat': 'none', 'f': 'pjson', 'token': ''}
lsc_programs = dict()
lsc_programs_by_rin = dict()
lsc_programs_by_serv_a = dict()
poverty = objects_from_file("docassemble.lscrefer:data/sources/poverty.yml")
def poverty_percentage(household_income, household_size, state):
try:
household_size = int(household_size)
assert household_size > 0
except:
raise Exception("poverty_percentage: invalid household size")
if state == 'HI':
index_num = 2
elif state == 'AK':
index_num = 1
else:
index_num = 0
if household_size < 9:
return 100.0 * household_income/(0.5*poverty['level'][household_size][index_num])
return 100.0 * household_income/(0.5*(poverty['level'][8][index_num] + poverty['level']['extra'][index_num] * (household_size - 8)))
def service_areas():
redis = DARedis()
result = redis.get('lsc_service_areas')
if result is None:
#sys.stderr.write('service_areas: calling arcgis.\n')
r = requests.get('https://services3.arcgis.com/n7h3cEoHTyNCwjCf/ArcGIS/rest/services/BasicFieldServiceAreas_GrantCycle/FeatureServer/0/query?where=OBJECTID%3E%3D0&objectIds=&time=&geometry=&geometryType=esriGeometryEnvelope&inSR=&spatialRel=esriSpatialRelIntersects&resultType=none&distance=0.0&units=esriSRUnit_Meter&returnGeodetic=false&outFields=*&returnGeometry=false&returnCentroid=false&multipatchOption=xyFootprint&maxAllowableOffset=&geometryPrecision=&outSR=&datumTransformation=&applyVCSProjection=false&returnIdsOnly=false&returnUniqueIdsOnly=false&returnCountOnly=false&returnExtentOnly=false&returnQueryGeometry=false&returnDistinctValues=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=&having=&resultOffset=&resultRecordCount=&returnZ=false&returnM=false&returnExceededLimitFeatures=true&quantizationParameters=&sqlFormat=none&f=pjson&token=')
if r.status_code != 200:
redis.set('lsc_service_areas', '{}')
sys.stderr.write('load_service_areas: got error code {} from ArcGIS. Response: {}\n'.format(r.status_code, r.text))
else:
try:
the_dict = r.json()
assert 'features' in the_dict
assert len(the_dict['features']) > 0
redis.set('lsc_service_areas', r.text)
except Exception as the_err:
redis.set('lsc_service_areas', '{}')
sys.stderr.write('load_service_areas: got invalid response from server: {}\n'.format(text_type(the_err)))
redis.expire('lsc_service_areas', 60*60*24*7)
result = redis.get('lsc_service_areas')
return json.loads(result.decode())
def load_program_data():
(path, mimetype) = path_and_mimetype('docassemble.lscrefer:data/sources/Programs.json')
with open(path, 'rU', encoding='utf-8') as fp:
program_list = json.load(fp)
lsc_programs.clear()
for program_dict in program_list:
program = dict()
lsc_programs[program_dict["Serv_Area_ID"]] = program
program['name'] = program_dict["R_Legalname"].strip()
program['phone_number'] = program_dict["Local_800"].strip()
program['url'] = program_dict["Web_URL"].strip()
lsc_programs_by_rin.clear()
lsc_programs_by_serv_a.clear()
area_data = service_areas()
if 'features' not in area_data:
sys.stderr.write("area data is empty\n")
else:
for item in area_data['features']:
attribs = item['attributes']
if attribs['ServArea'] == 'MA-4':
attribs['ServArea'] = 'MA04'
attribs['ServArea_1'] = 'MA-4'
attribs['servA'] = 'MA04'
service_area = attribs['ServArea_1'].strip()
if service_area in lsc_programs:
lsc_programs_by_rin[attribs['RIN']] = lsc_programs[service_area]
lsc_programs_by_serv_a[attribs['servA']] = lsc_programs[service_area]
lsc_programs[service_area]['rin'] = attribs['RIN']
lsc_programs[service_area]['serv_a'] = attribs['servA']
else:
sys.stderr.write("Could not find {} in program info.\n".format(service_area))
def offices_for(org, by_proximity_to=None):
if org is None:
return None
params = copy.copy(office_base_params)
params['where'] = "recipID={}".format(org.rin)
r = requests.get(office_base_url, params=params)
if r.status_code != 200:
raise Exception('offices_for: got error code {} from ArcGIS. Response: {}'.format(r.status_code, r.text))
result = r.json()
offices = DAList(object_type=Address)
offices.set_random_instance_name()
for office_data in result['features']:
attribs = office_data['attributes']
office = offices.appendObject()
office.address = attribs['address'].strip()
office.city = attribs['City'].strip()
office.state = attribs['State'].strip()
office.zip = attribs['ZIP'].strip()
office.location.longitude = attribs['Longitude']
office.location.latitude = attribs['Latitude']
office.office_type = attribs['officetype'].strip()
if attribs['bldgSuite']:
office.unit = attribs['bldgSuite'].strip()
if by_proximity_to:
office.distance = distance_between(by_proximity_to.address, office)
offices.gathered = True
if by_proximity_to:
by_proximity_to.address.geolocate()
if not by_proximity_to.address.geolocate_success:
raise Exception('offices_for: failure to geolocate address')
offices.elements = sorted(offices.elements, key=lambda y: y.distance)
offices._reset_instance_names()
return offices
def distance_between(addr1, addr2):
R = 3958.8
lat1 = radians(addr1.location.latitude)
lon1 = radians(addr1.location.longitude)
lat2 = radians(addr2.location.latitude)
lon2 = radians(addr2.location.longitude)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
return R * c
def cities_near(org, person):
offices = offices_for(org)
person.address.geolocate()
if not person.address.geolocate_success:
raise Exception('cities_near: failure to geolocate address')
cities = DAList(gathered=True)
cities.set_random_instance_name()
for y in sorted(offices, key=lambda y: distance_between(person.address, y)):
if y.city not in cities:
cities.append(y.city)
cities.gathered = True
return cities
def lsc_program_for(person):
person.address.geolocate()
if not person.address.geolocate_success:
raise Exception('lsc_program_for: failure to geolocate address')
params = copy.copy(base_params)
params['geometry'] = "{},{}".format(person.address.location.longitude, person.address.location.latitude)
r = requests.get(base_url, params=params)
if r.status_code != 200:
raise Exception('lsc_program_for: got error code {} from ArcGIS. Response: {}'.format(r.status_code, r.text))
result = r.json()
if not isinstance(result, dict) or 'features' not in result or not isinstance(result['features'], list):
raise Exception('lsc_program_for: unexpected response from server')
if len(result['features']) == 0:
return None
if 'attributes' not in result['features'][0] or not isinstance(result['features'][0]['attributes'], dict) or '':
raise Exception('lsc_program_for: unexpected response from server')
attribs = result['features'][0]['attributes']
if 'Grantee' not in attribs or 'ServArea' not in attribs:
raise Exception('lsc_program_for: missing information in response')
service_area = attribs['ServArea'].strip()
if service_area not in lsc_programs_by_serv_a:
raise Exception('lsc_program_for: service area {} not found'.format(service_area))
program = lsc_programs_by_serv_a[service_area]
result = Organization()
result.set_random_instance_name()
result.name.text = program['name']
result.phone_number = program['phone_number']
result.url = program['url']
if 'rin' in program:
result.rin = program['rin']
if 'serv_a' in program:
result.serv_a = program['rin']
return result
load_program_data()
``` |
{
"source": "jhqthai/summer-studio-a",
"score": 2
} |
#### File: summer-studio-a/S7/a3c.py
```python
from __future__ import print_function
from collections import deque
import time
import os
import torch
from torch.autograd import Variable
# noinspection PyPep8Naming
import torch.nn.functional as F
import torch.multiprocessing as mp
import argparse
import shutil
from scipy.misc import imsave
from utils import FloatTensor, get_elapsed_time_str, SharedAdam
from envs import create_atari_env
from model import ActorCritic
# Parse program arguments
parser = argparse.ArgumentParser(description='Asynchronous Actor Critic')
parser.add_argument('--savedir', default='/tmp', type=str, metavar='PATH',
help='Dir name in which we save checkpoints')
parser.add_argument('--resume', dest='resume', type=str,
help="If checkpoint available, resume from latest")
parser.add_argument('--no-resume', dest='resume', action='store_false')
parser.set_defaults(resume=True)
parser.add_argument('--play', default='', type=str, metavar='PATH',
help='play your modle with path specified')
parser.add_argument('--rom', default='PongDeterministic-v4', type=str, metavar='GYMROMNAME',
help='Game ROM, e.g. PongDeterministic-v4 (default)')
args = parser.parse_args()
romname = args.rom
SEED = 1
# noinspection PyShadowingNames
def ensure_shared_grads(model, shared_model):
for param, shared_param in zip(model.parameters(),
shared_model.parameters()):
if shared_param.grad is not None:
return
shared_param._grad = param.grad
# noinspection PyShadowingNames
def train(rank, shared_model, optimizer):
"""
:param rank: worker-ID
:param shared_model: model to sync between workers
:param optimizer:
:return:
"""
# torch.manual_seed(SEED + rank)
ac_steps = 20 # The amount of steps before you review
max_episode_length = 10000 # The game will stop after this amount of time and maybe re run the game?
gamma = 0.99
tau = 1.0
max_grad_norm = 50.0 # Limit the direction of gradient travel within the queue. Anything outside the queue is cut
checkpoint_n = 20 # To see the model after this many n. Can increase this number if have a shit comp
env = create_atari_env(romname) # enage game. romname is depending on the game of your choice.
env.seed(SEED + rank) # For the problem to occur again? LOOK THIS UP
state = env.reset()
# Allow torch to handle pixel data. Don't understrand squeeze. FloatTensor - Tensor is an array, therefore array of float.
state = Variable(torch.from_numpy(state).unsqueeze(0).type(FloatTensor), requires_grad=False)
# Selecting model, with this size of input and that kind of output
model = ActorCritic(env.observation_space.shape[0], env.action_space)
t = 0
done = True # Starting from a state when gameover is true!
episodes = 0
reward_sum = 0
reward_sum1 = 0
start_time = time.time()
best_reward = -999
isbest = 0
cx = hx = None
while True:
model.load_state_dict(shared_model.state_dict()) # Pull the up to date model from the shared model
if done: # need to reset LSTM cell's input
# the LSTM units need their own output to feed into next step
# input (hence the name of the kind: recurrent neural nets).
# At the beginning of an episode, to get things started,
# we need to allocate some initial values in the required format,
# i.e. the same size as the output of the layer.
#
# see http://pytorch.org/docs/master/_modules/torch/nn/modules/rnn.html#LSTM
# for details
#
# Optionally, you can remove LSTM to simplify the code
# Think: what is the possible loss?
cx = Variable(torch.zeros(1, 256)).type(FloatTensor) # torch.zeros - setting the values to all zeros since there's nothing there yet
hx = Variable(torch.zeros(1, 256)).type(FloatTensor)
else:
cx = Variable(cx.data) # takes the last computed value for the next input
hx = Variable(hx.data) # basically this is to detach from previous comp graph
states = []
values = []
log_probs = []
rewards = []
entropies = []
for i in range(ac_steps): # Running through the 20 steps
t += 1
v, logit, (hx, cx) = model((state, (hx, cx))) # When you run model, it will return you 4 values -> store those 4 values in v, logit, etc.
states.append(state)
prob = F.softmax(logit) # The gradient descent thing
log_prob = F.log_softmax(logit) # Do it again, a lot to make sure its correct
entropy = -(log_prob * prob).sum(1, keepdim=True) # To increase diversity of our choice (part of e-greedy?)
entropies.append(entropy)
# detach - anything compute with pytorch will drag a trail behind it. When get gradient descent, the calculation will race with the result. We do not want the descent to chase it randomly, so we just detach it. !Do not need to modify this function when modify the code.
action = prob.multinomial().detach() # detach -- so the backprob will NOT go through multinomial()
# use the current action as an index to get the
# corresponding log probability
log_prob = log_prob.gather(1, action) # allow you to simultenously take probability of many actions.
action = action.data[0, 0] # Extract the variables out of the integer. Turning it from a torch integer to a "normal" integer
# Accept what was given by the action, does it things? and the env will return the 4 following; state, reward, done
# _ is something that we don't care about but since env.step is returning 4 values so we just have to have something to take it.
state, reward, done, _ = env.step(action)
reward_sum += reward
reward_sum1 += reward # reason why store reward sum twice just for re-assurance
done = (done or t >= max_episode_length)
if done:
t_ = t
t = 0
state = env.reset()
episodes += 1
if episodes % 10 == 0:
time_str = time.strftime(
"%Hh %Mm %Ss", time.gmtime(time.time() - start_time))
print("Time {}, worker-{} episode {} "
"mean episode reward {}, "
"episode length {}".
format(time_str, rank, episodes, reward_sum / 10.0, t_))
reward_sum = 0.0
if episodes % checkpoint_n == 0:
ave_reward = reward_sum1 / checkpoint_n
if best_reward < ave_reward:
isbest = 1
best_reward = ave_reward
print("Saving checkpoint Time {}, worker-{} episode {} "
"mean episode reward {}, "
"episode length {} best_reward {}".
format(get_elapsed_time_str(), rank, episodes, ave_reward, t_, best_reward))
checkpoint_fname = os.path.join(
args.savedir,
args.rom + '_worker' + str(rank) + '_' + str(episodes))
save_checkpoint({'epoch': episodes,
'average_reward': ave_reward,
'time': time.time(),
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, isbest, checkpoint_fname)
reward_sum1 = 0.0
state = Variable(torch.from_numpy(state).unsqueeze(0).type(FloatTensor), requires_grad=False)
reward = max(min(reward, 1), -1)
values.append(v)
log_probs.append(log_prob) # Keep record
rewards.append(reward)
if done:
break
# We reach here because either
# i) an episode ends, such as game over
# ii) we have explored certain steps into the future and now it is
# time to look-back and summerise the
if done:
R = torch.zeros(1, 1).type(FloatTensor) # If game over, the game over stage receive a reward of 0
else:
value, _, _ = model((state, (hx, cx))) # if its not game over, then we will use the model to evaluate the reward
R = value.data
values.append(Variable(R))
critic_loss = 0
actor_loss = 0
R = Variable(R)
gae = 0
for i in reversed(range(len(rewards))):
R = gamma * R + rewards[i] # R - longterm reward
advantage = R - values[i] # type: Variable, advantage against the average
# Compare the actual long-term reward. Note: we are reversing the
# experience of a complete trajectory. If the full length is 100
# (time indexes are among 0, 1, 2, ..., 99), and now i=50, that means
# we have processed all information in steps, 51, 52, ..., 99
# and R will contain the actual long term reward at time step 51 at
# the beginning of this step. The above computation injects the reward
# information in step 50 to R. Now R is the long-term reward at this
# step.
#
# So-called advantage is then the "unexpected gain/loss". It forms the base
# of evaluating the action taken at this step (50).
#
# critic_loss accumulates those "exceptional gain/loss" so that later we will
# adjust our expectation for each state and reduce future exceptions (to better
# evaluate actions, say, the advantage agains expectation is only meaningful
# when the expectation itself is meaningful).
critic_loss += 0.5 * advantage.pow(2)
# Generalized Advantage Estimation
# see https://arxiv.org/abs/1506.02438
# we can use advantage in the computation of the direction to adjust policy,
# but the manipulation here improves stability (as claims by the paper).
#
# Note advantage implicitly contributes to GAE, since it helps
# achieve a good estimation of state-values.
td_error = rewards[i] + gamma * values[i + 1].data - values[i].data
gae = gae * gamma * tau + td_error
# log_probs[i] is the log-probability(action-taken). If GAE is great, that
# means the choice we had made was great, and we want to make the same
# action decision in future -- make log_probs[i] large. Otherwise,
# we add log_probs to our regret and will be less likely to take the same
# action in future.
#
# entropy means the variety in a probabilistic distribution,
# to encourage big entropies is to make more exploration.
actor_loss -= (Variable(gae) * log_probs[i] + 0.01 * entropies[i])
optimizer.zero_grad() # Applied the gradient to the parameter (back-propagation will get you good stuff from gradient)
total_loss = actor_loss + critic_loss * 0.5 # type: Variable
total_loss.backward() # error occur, back propagation
# this is to improve stability
torch.nn.utils.clip_grad_norm(model.parameters(), max_grad_norm)
ensure_shared_grads(model, shared_model) # Push each updated model to the shared model
optimizer.step()
def save_checkpoint(state, is_best, filename):
torch.save(state, filename)
if is_best:
dirname, _ = os.path.split(filename)
best_fname = os.path.join(dirname, 'best.tar')
shutil.copyfile(filename, best_fname)
# noinspection PyShadowingNames
def test(shared_model, render=0):
env = create_atari_env(args.rom)
if render == 1:
env.render()
model = ActorCritic(env.observation_space.shape[0], env.action_space)
model.eval()
state = env.reset()
state = torch.from_numpy(state)
reward_sum = 0
done = True
# a quick hack to prevent the agent from stucking
episode_length = 0
cx = hx = None
while True:
episode_length += 1
# Sync with the shared model
if done:
model.load_state_dict(shared_model.state_dict())
cx = Variable(torch.zeros(1, 256).type(FloatTensor), volatile=True)
hx = Variable(torch.zeros(1, 256).type(FloatTensor), volatile=True)
else:
cx = Variable(cx.data, volatile=True)
hx = Variable(hx.data, volatile=True)
value, logit, (hx, cx) = model((Variable(
state.unsqueeze(0).type(FloatTensor), volatile=True), (hx, cx)))
prob = F.softmax(logit)
# print logit.data.numpy()
action = prob.max(1, keepdim=True)[1].data.cpu().numpy()
state, reward, done, _ = env.step(action[0, 0])
if render:
#env.render()
# Spits out images in the selected path
img = env.render('rgb_array')
imsave('/opt/tmp/img/pac-20000/frame_{:06d}.png'.format(episode_length), img)
"""
TEST-DEMO-ONLY
state_im = state.numpy()
state_im.transpose()
scipy.misc.imageio.saveim(state_im, filename-with-time-step-number)
#ffmpeg
END-WORKZONE
"""
done = done or episode_length >= 10000
reward_sum += reward
# a quick hack to prevent the agent from stucking
# actions.append(action[0, 0])
# if actions.count(actions[0]) == actions.maxlen:
# done = True
if done:
print("Time {}, episode reward {}, episode length {}".
format(get_elapsed_time_str(), reward_sum, episode_length))
reward_sum = 0
episode_length = 0
state = env.reset()
time.sleep(60)
state = torch.from_numpy(state)
if __name__ == '__main__':
env = create_atari_env(args.rom)
# torch.manual_seed(SEED)
shared_model = ActorCritic(env.observation_space.shape[0], env.action_space)
shared_model.share_memory()
# print (shared_model.conv1._parameters['weight'].data.is_cuda)
optimizer = SharedAdam(shared_model.parameters(), lr=0.0001)
optimizer.share_memory()
if args.play:
if os.path.isfile(args.play):
print("=> loading checkpoint '{}'".format(args.play))
checkpoint = torch.load(args.play)
# args.start_epoch = checkpoint['epoch']
# best_prec1 = checkpoint['best_prec1']
shared_model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.play))
test(shared_model, render=1) # let it play the game
exit(0)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
# args.start_epoch = checkpoint['epoch']
# best_prec1 = checkpoint['best_prec1']
shared_model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
mp.set_start_method('spawn')
processes = []
p = mp.Process(target=test, args=(shared_model, 0))
p.start()
processes.append(p)
# This loop start the processes
for rank in range(0, 1): # This loop how many agent we shall run simultaneously
print("Starting {}".format(rank))
p = mp.Process(target=train, args=(rank, shared_model, optimizer))
p.start() # Start point
processes.append(p)
for p in processes:
p.join()
``` |
{
"source": "jhrcook/awesome-streamlit",
"score": 4
} |
#### File: gallery/sudoku_solver/sudoku_solver.py
```python
import streamlit as st
# Your imports goes below
import random
import re
from itertools import product
import streamlit as st
import pandas as pd
import numpy as np
from gallery.sudoku_solver.pyomo_sudoku_solver import solve_sudoku
from gallery.sudoku_solver.ui_auxiliary import empty_board_str, board_matrix_to_dataframe
def main():
st.title("Sudoku Solver")
st.markdown("A fast, interactive web application for solving Sudoku puzzles using the optimization library [Pyomo](https://www.pyomo.org/).")
# Your code goes below
random.seed(0)
input_data = st.text_area(
label="Enter the starting state of the board.", value=empty_board_str, height=400
)
rows = np.repeat(np.arange(1, 10), 9)
cols = np.tile(np.arange(1, 10), 9)
values = []
for line in input_data.split("\n"):
if not "-" in line:
vals = re.findall("[0-9]", line.rstrip())
values += [int(x) for x in vals]
if len(rows) == len(cols) == len(values):
known_cells = pd.DataFrame({"i": rows, "j": cols, "k": values})
board = known_cells.copy()
known_cells = known_cells[known_cells["k"] != 0]
board.k = ["" if x == 0 else str(x) for x in board.k]
board = board.pivot(index="i", columns="j", values="k")
if st.button("Solve!"):
st.markdown("**Solution**")
res = solve_sudoku(known_cells)
st.write(board_matrix_to_dataframe(res))
else:
st.markdown("**Board layout**")
st.write(board)
else:
st.write("Something is wrong with the layout of the board. Please try again.")
main()
``` |
{
"source": "jhrdt/django-public-admin",
"score": 3
} |
#### File: django-public-admin/public_admin/admin.py
```python
from django.contrib.admin import ModelAdmin
from public_admin.sites import PublicAdminSite
class PublicModelAdmin(ModelAdmin):
"""This mimics the Django's native ModelAdmin but filters URLs that should
not exist in a public admin, and deals with request-based permissions."""
def has_view_permission(self, request, obj=None):
"""Only allows view requests if the method is GET"""
return request.method == "GET"
def has_add_permission(self, request):
"""Denies permission to any request trying to add new objects."""
return False
def has_change_permission(self, request, obj=None):
"""Denies permission to any request trying to change objects."""
return False
def has_delete_permission(self, request, obj=None):
"""Denies permission to any request trying to delete objects."""
return False
def get_urls(self):
"""Filter out the URLs that should not exist in a public admin."""
return [url for url in super().get_urls() if PublicAdminSite.valid_url(url)]
```
#### File: django-public-admin/tests/test_dummy_user.py
```python
from public_admin.sites import DummyUser, PublicApp
PUBLIC_APPS = (
PublicApp("my_open_house", ("beverage", "snack")),
PublicApp("library", ("book", "journal")),
)
def test_has_module_permissions():
user = DummyUser(PUBLIC_APPS)
assert user.has_module_perms("my_open_house")
assert user.has_module_perms("library")
assert not user.has_module_perms("core")
def test_has_permissions():
user = DummyUser(PUBLIC_APPS)
assert user.has_perm("my_open_house.view_beverage")
assert user.has_perm("my_open_house.view_snack")
assert not user.has_perm("my_open_house.view_bedroom")
assert user.has_perm("library.view_book")
assert user.has_perm("library.view_journal")
assert not user.has_perm("library.view_fine")
``` |
{
"source": "jhreinholdt/caesar-cipher",
"score": 4
} |
#### File: jhreinholdt/caesar-cipher/ceasar_cipher.py
```python
from types import *
import string
def encode(key, plaintext):
assert type(key) is int, "key is not an integer: %r" % key
ciphertext = ''
for char in plaintext:
# print((ord(char)+key)-97)
cipherchr = chr((((ord(char) + key) - 97) % 26) + 97)
ciphertext += cipherchr
# print("Plaintext: ", char, " Ciphertext: ", cipherchr)
# print("Ciphertext: ", ciphertext)
return ciphertext
def decode(key, ciphertext):
assert type(key) is int, "key is not an integer: %r" % key
plaintext = ''
for char in ciphertext:
plainchr = chr((((ord(char) - key) - 97) % 26) + 97)
plaintext += plainchr
# print("Plaintext: ", plaintext)
return plaintext
def main():
ciphertext = encode(25, input("Enter plaintext: "))
print("Ciphertext: ", ciphertext)
for key in range(1,26):
plaintext = decode(key, ciphertext)
print("Decoded plaintext with key#", key, ":", plaintext)
if __name__ == '__main__':
main()
``` |
{
"source": "jhrmnn/hydra",
"score": 2
} |
#### File: hydra/hydra/utils.py
```python
import logging.config
import os
from pathlib import Path
from typing import Any, Callable
import hydra._internal.instantiate._instantiate2
import hydra.types
from hydra._internal.utils import _locate
from hydra.core.hydra_config import HydraConfig
log = logging.getLogger(__name__)
# Instantiation related symbols
instantiate = hydra._internal.instantiate._instantiate2.instantiate
call = instantiate
ConvertMode = hydra.types.ConvertMode
def get_class(path: str) -> type:
try:
cls = _locate(path)
if not isinstance(cls, type):
raise ValueError(f"Located non-class in {path} : {type(cls).__name__}")
return cls
except Exception as e:
log.error(f"Error initializing class at {path} : {e}")
raise e
def get_method(path: str) -> Callable[..., Any]:
try:
cl = _locate(path)
if not callable(cl):
raise ValueError(f"Non callable object located : {type(cl).__name__}")
return cl
except Exception as e:
log.error(f"Error getting callable at {path} : {e}")
raise e
# Alias for get_method
get_static_method = get_method
def get_original_cwd() -> str:
"""
:return: the original working directory the Hydra application was launched from
"""
if not HydraConfig.initialized():
raise ValueError(
"get_original_cwd() must only be used after HydraConfig is initialized"
)
ret = HydraConfig.get().runtime.cwd
assert ret is not None and isinstance(ret, str)
return ret
def to_absolute_path(path: str) -> str:
"""
converts the specified path to be absolute path.
if the input path is relative, it's interpreted as relative to the original working directory
if it's absolute, it's returned as is
:param path: path to convert
:return:
"""
p = Path(path)
if not HydraConfig.initialized():
base = Path(os.getcwd())
else:
base = Path(get_original_cwd())
if p.is_absolute():
ret = p
else:
ret = base / p
return str(ret)
```
#### File: tests/instantiate/__init__.py
```python
import collections
import collections.abc
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple
from omegaconf import MISSING
from hydra.types import TargetConf
class ArgsClass:
def __init__(self, *args: Any, **kwargs: Any) -> None:
assert isinstance(args, tuple)
assert isinstance(kwargs, dict)
self.args = args
self.kwargs = kwargs
def __repr__(self) -> str:
return f"self.args={self.args},self.kwarg={self.kwargs}"
def __eq__(self, other: Any) -> Any:
if isinstance(other, ArgsClass):
return self.args == other.args and self.kwargs == other.kwargs
else:
return NotImplemented
def add_values(a: int, b: int) -> int:
return a + b
def module_function(x: int) -> int:
return x
@dataclass
class AClass:
a: Any
b: Any
c: Any
d: Any = "default_value"
@staticmethod
def static_method(z: int) -> int:
return z
@dataclass
class BClass:
a: Any
b: Any
c: Any = "c"
d: Any = "d"
@dataclass
class TargetInParamsClass:
target: Any
@dataclass
class UntypedPassthroughConf:
_target_: str = "tests.instantiate.UntypedPassthroughClass"
a: Any = MISSING
@dataclass
class UntypedPassthroughClass:
a: Any
# Type not legal in a config
class IllegalType:
def __eq__(self, other: Any) -> Any:
return isinstance(other, IllegalType)
@dataclass
class AnotherClass:
x: int
class ASubclass(AnotherClass):
@classmethod
def class_method(cls, y: int) -> Any:
return cls(y + 1)
@staticmethod
def static_method(z: int) -> int:
return z
class Parameters:
def __init__(self, params: List[float]):
self.params = params
def __eq__(self, other: Any) -> Any:
if isinstance(other, Parameters):
return self.params == other.params
return False
def __deepcopy__(self, memodict: Any = {}) -> Any:
raise NotImplementedError("Pytorch parameters does not support deepcopy")
@dataclass
class Adam:
params: Parameters
lr: float = 0.001
betas: Tuple[float, ...] = (0.9, 0.999)
eps: float = 1e-08
weight_decay: int = 0
amsgrad: bool = False
@dataclass
class NestingClass:
a: ASubclass = ASubclass(10)
nesting = NestingClass()
class ClassWithMissingModule:
def __init__(self) -> None:
import some_missing_module # type: ignore # noqa: F401
self.x = 1
@dataclass
class AdamConf:
_target_: str = "tests.instantiate.Adam"
lr: float = 0.001
betas: Tuple[float, ...] = (0.9, 0.999)
eps: float = 1e-08
weight_decay: int = 0
amsgrad: bool = False
@dataclass
class BadAdamConf(TargetConf):
# Missing str annotation
_target_ = "tests.instantiate.Adam"
@dataclass
class User:
name: str = MISSING
age: int = MISSING
@dataclass
class UserGroup:
name: str = MISSING
users: List[User] = MISSING
# RECURSIVE
# Classes
class Transform:
...
class CenterCrop(Transform):
def __init__(self, size: int):
self.size = size
def __eq__(self, other: Any) -> Any:
if isinstance(other, type(self)):
return self.size == other.size
else:
return False
class Rotation(Transform):
def __init__(self, degrees: int):
self.degrees = degrees
def __eq__(self, other: Any) -> Any:
if isinstance(other, type(self)):
return self.degrees == other.degrees
else:
return False
class Compose:
transforms: List[Transform]
def __init__(self, transforms: List[Transform]):
self.transforms = transforms
def __eq__(self, other: Any) -> Any:
if isinstance(other, type(self)):
return self.transforms == other.transforms
else:
return False
class Tree:
value: Any
# annotated any because of non recursive instantiation tests
left: Any = None
right: Any = None
def __init__(self, value: Any, left: Any = None, right: Any = None) -> None:
self.value = value
self.left = left
self.right = right
def __eq__(self, other: Any) -> Any:
if isinstance(other, type(self)):
return (
self.value == other.value
and self.left == other.left
and self.right == other.right
)
else:
return False
def __repr__(self) -> str:
return f"Tree(value={self.value}, left={self.left}, right={self.right})"
class Mapping:
dictionary: Optional[Dict[str, "Mapping"]] = None
value: Any = None
def __init__(
self, value: Any = None, dictionary: Optional[Dict[str, "Mapping"]] = None
) -> None:
self.dictionary = dictionary
self.value = value
def __eq__(self, other: Any) -> Any:
if isinstance(other, type(self)):
return self.dictionary == other.dictionary and self.value == other.value
else:
return False
def __repr__(self) -> str:
return f"dictionary={self.dictionary}"
# Configs
@dataclass
class TransformConf:
...
@dataclass
class CenterCropConf(TransformConf):
_target_: str = "tests.instantiate.CenterCrop"
size: int = MISSING
@dataclass
class RotationConf(TransformConf):
_target_: str = "tests.instantiate.Rotation"
degrees: int = MISSING
@dataclass
class ComposeConf:
_target_: str = "tests.instantiate.Compose"
transforms: List[TransformConf] = MISSING
@dataclass
class TreeConf:
_target_: str = "tests.instantiate.Tree"
left: Optional["TreeConf"] = None
right: Optional["TreeConf"] = None
value: Any = MISSING
@dataclass
class MappingConf:
_target_: str = "tests.instantiate.Mapping"
dictionary: Optional[Dict[str, "MappingConf"]] = None
def __init__(self, dictionary: Optional[Dict[str, "MappingConf"]] = None):
self.dictionary = dictionary
@dataclass
class SimpleDataClass:
a: Any = None
b: Any = None
class SimpleClass:
a: Any = None
b: Any = None
def __init__(self, a: Any, b: Any) -> None:
self.a = a
self.b = b
def __eq__(self, other: Any) -> Any:
if isinstance(other, SimpleClass):
return self.a == other.a and self.b == other.b
return False
@dataclass
class SimpleClassPrimitiveConf:
_target_: str = "tests.instantiate.SimpleClass"
_convert_: str = "partial"
a: Any = None
b: Any = None
@dataclass
class SimpleClassNonPrimitiveConf:
_target_: str = "tests.instantiate.SimpleClass"
_convert_: str = "none"
a: Any = None
b: Any = None
@dataclass
class SimpleClassDefaultPrimitiveConf:
_target_: str = "tests.instantiate.SimpleClass"
a: Any = None
b: Any = None
@dataclass
class NestedConf:
_target_: str = "tests.instantiate.SimpleClass"
a: Any = User(name="a", age=1)
b: Any = User(name="b", age=2)
def recisinstance(got: Any, expected: Any) -> bool:
"""Compare got with expected type, recursively on dict and list."""
if not isinstance(got, type(expected)):
return False
if isinstance(expected, collections.abc.Mapping):
return all(recisinstance(got[key], expected[key]) for key in expected)
elif isinstance(expected, collections.abc.Iterable):
return all(recisinstance(got[idx], exp) for idx, exp in enumerate(expected))
return True
```
#### File: tests/test_examples/test_advanced_config_search_path.py
```python
import re
from pathlib import Path
from typing import List, Optional
from omegaconf import OmegaConf
from pytest import mark
from hydra.test_utils.test_utils import (
chdir_hydra_root,
run_python_script,
run_with_error,
)
chdir_hydra_root()
@mark.parametrize(
"args,expected, error",
[
([], {"dataset": {"name": "cifar10", "path": "/datasets/cifar10"}}, None),
(
["dataset=imagenet"],
{"dataset": {"name": "imagenet", "path": "/datasets/imagenet"}},
None,
),
(
["hydra.searchpath=[]", "dataset=imagenet"],
{"dataset": {"name": "imagenet", "path": "/datasets/imagenet"}},
"Could not find 'dataset/imagenet'",
),
],
)
def test_config_search_path(
args: List[str], expected: str, tmpdir: Path, error: Optional[str]
) -> None:
cmd = [
"examples/advanced/config_search_path/my_app.py",
"hydra.run.dir=" + str(tmpdir),
]
cmd.extend(args)
if error is not None:
ret = run_with_error(cmd)
assert re.search(re.escape(error), ret) is not None
else:
result, _err = run_python_script(cmd)
assert OmegaConf.create(result) == expected
```
#### File: configen/example/my_app.py
```python
import hydra
# Generated config dataclasses
from example.config.configen.samples.my_module import AdminConf, UserConf
from hydra.core.config_store import ConfigStore
from omegaconf import DictConfig
# Underlying objects
from configen.samples.my_module import Admin, User
ConfigStore.instance().store(
name="config_schema",
node={
"user": UserConf,
"admin": AdminConf,
},
)
@hydra.main(config_path=".", config_name="config")
def my_app(cfg: DictConfig) -> None:
user: User = hydra.utils.instantiate(cfg.user)
admin: Admin = hydra.utils.instantiate(cfg.admin)
print(user)
print(admin)
if __name__ == "__main__":
my_app()
``` |
{
"source": "jhrmnn/QCEngine",
"score": 2
} |
#### File: qcengine/procedures/base.py
```python
from typing import Set
from ..exceptions import InputError, ResourceError
from .geometric import GeometricProcedure
__all__ = ["register_procedure", "get_procedure", "list_all_procedures", "list_available_procedures"]
procedures = {}
def register_procedure(entry_point: "ProcedureHarness") -> None:
"""
Register a new ProcedureHarness with QCEngine
"""
name = entry_point.name
if name.lower() in procedures.keys():
raise ValueError("{} is already a registered procedure.".format(name))
procedures[name.lower()] = entry_point
def get_procedure(name: str) -> "ProcedureHarness":
"""
Returns a procedures executor class
"""
name = name.lower()
if name not in procedures:
raise InputError(f"Procedure {name} is not registered to QCEngine.")
ret = procedures[name]
if not ret.found():
raise ResourceError(f"Procedure {name} is registered with QCEngine, but cannot be found.")
return ret
def list_all_procedures() -> Set[str]:
"""
List all procedures registered by QCEngine.
"""
return set(procedures.keys())
def list_available_procedures() -> Set[str]:
"""
List all procedures that can be exectued (found) by QCEngine.
"""
ret = set()
for k, p in procedures.items():
if p.found():
ret.add(k)
return ret
register_procedure(GeometricProcedure())
```
#### File: qcengine/procedures/model.py
```python
import abc
from typing import Any, Dict, Union
from pydantic import BaseModel
from ..util import model_wrapper
class ProcedureHarness(BaseModel, abc.ABC):
name: str
procedure: str
class Config:
allow_mutation: False
extra: "forbid"
def __init__(self, **kwargs):
super().__init__(**{**self._defaults, **kwargs})
@abc.abstractmethod
def build_input_model(self, data: Union[Dict[str, Any], "BaseModel"], raise_error: bool = True) -> "BaseModel":
"""
Build and validate the input model, passes if the data was a normal BaseModel input.
Parameters
----------
data : Union[Dict[str, Any], 'BaseModel']
A data blob to construct the model from or the input model itself
raise_error : bool, optional
Raise an error or not if the operation failed.
Returns
-------
BaseModel
The input model for the procedure.
"""
@abc.abstractmethod
def compute(self, input_data: "BaseModel", config: "TaskConfig") -> "BaseModel":
pass
@abc.abstractmethod
def found(self, raise_error: bool = False) -> bool:
"""
Checks if the program can be found.
Returns
-------
bool
If the proceudre was found or not.
"""
def _build_model(self, data: Dict[str, Any], model: "BaseModel") -> "BaseModel":
"""
Quick wrapper around util.model_wrapper for inherited classes
"""
return model_wrapper(data, model)
def get_version(self) -> str:
"""Finds procedure, extracts version, returns normalized version string.
Returns
-------
str
Return a valid, safe python version string.
"""
```
#### File: programs/gamess/keywords.py
```python
import collections
import textwrap
from typing import Any, Dict, Tuple
def format_keyword(keyword: str, val: Any, lop_off: bool = True) -> Tuple[str, str]:
"""Reformat value `val` for `keyword` from python into GAMESS-speak."""
text = ""
# Transform booleans into Fortran booleans
if str(val) == "True":
text += ".true."
elif str(val) == "False":
text += ".false."
# No Transform
else:
text += str(val).lower()
if lop_off:
return keyword[7:].lower(), text
else:
return keyword.lower(), text
def format_keywords(keywords: Dict[str, Any]) -> str:
"""From GAMESS-directed, non-default `keywords` dictionary, write a GAMESS deck."""
grouped_options = collections.defaultdict(dict)
for group_key, val in keywords.items():
group, key = group_key.split("__")
grouped_options[group.lower()][key.lower()] = val
grouped_lines = {}
for group, opts in sorted(grouped_options.items()):
line = []
line.append(f"${group.lower()}")
for key, val in sorted(grouped_options[group].items()):
line.append("=".join(format_keyword(key, val, lop_off=False)))
line.append("$end\n")
grouped_lines[group] = textwrap.fill(" ".join(line), initial_indent=" ", subsequent_indent=" ")
return "\n".join(grouped_lines.values()) + "\n"
```
#### File: programs/gamess/runner.py
```python
import copy
import pprint
from decimal import Decimal
from typing import Any, Dict, Optional
import qcelemental as qcel
from qcelemental.models import AtomicResult
from qcelemental.util import safe_version, unnp, which
from ...exceptions import InputError
from ...util import execute
from ..model import ProgramHarness
from .germinate import muster_modelchem
from .harvester import harvest
from .keywords import format_keywords
pp = pprint.PrettyPrinter(width=120, compact=True, indent=1)
class GAMESSHarness(ProgramHarness):
"""
Notes
-----
Required edits to the ``rungms`` script are as follows::
set SCR=./ # will be managed by QCEngine instead
set USERSCR=./ # ditto
set GMSPATH=/home/psilocaluser/gits/gamess # full path to installation
"""
_defaults = {
"name": "GAMESS",
"scratch": True,
"thread_safe": False,
"thread_parallel": True,
"node_parallel": True,
"managed_memory": True,
}
version_cache: Dict[str, str] = {}
class Config(ProgramHarness.Config):
pass
@staticmethod
def found(raise_error: bool = False) -> bool:
return which(
"rungms",
return_bool=True,
raise_error=raise_error,
raise_msg="Please install via https://www.msg.chem.iastate.edu/GAMESS/GAMESS.html",
)
def get_version(self) -> str:
self.found(raise_error=True)
which_prog = which("rungms")
if which_prog not in self.version_cache:
success, output = execute([which_prog, "v.inp"], {"v.inp": ""})
if success:
for line in output["stdout"].splitlines():
if "GAMESS VERSION" in line:
branch = " ".join(line.strip(" *\t").split()[3:])
self.version_cache[which_prog] = safe_version(branch)
return self.version_cache[which_prog]
def compute(self, input_data: "AtomicInput", config: "TaskConfig") -> "AtomicResult":
self.found(raise_error=True)
job_inputs = self.build_input(input_data, config)
success, dexe = self.execute(job_inputs)
if "INPUT HAS AT LEAST ONE SPELLING OR LOGIC MISTAKE" in dexe["stdout"]:
raise InputError(dexe["stdout"])
if success:
dexe["outfiles"]["stdout"] = dexe["stdout"]
dexe["outfiles"]["stderr"] = dexe["stderr"]
return self.parse_output(dexe["outfiles"], input_data)
def build_input(
self, input_model: "AtomicInput", config: "TaskConfig", template: Optional[str] = None
) -> Dict[str, Any]:
gamessrec = {"infiles": {}, "scratch_directory": config.scratch_directory}
opts = copy.deepcopy(input_model.keywords)
# Handle memory
# for gamess, [GiB] --> [MW]
opts["system__mwords"] = int(config.memory * (1024 ** 3) / 8e6)
# Handle molecule
molcmd, moldata = input_model.molecule.to_string(dtype="gamess", units="Bohr", return_data=True)
opts.update(moldata["keywords"])
# Handle calc type and quantum chemical method
opts.update(muster_modelchem(input_model.model.method, input_model.driver.derivative_int()))
# Handle basis set
# * for gamess, usually insufficient b/c either ngauss or ispher needed
opts["basis__gbasis"] = input_model.model.basis
# Handle conversion from schema (flat key/value) keywords into local format
optcmd = format_keywords(opts)
gamessrec["infiles"]["gamess.inp"] = optcmd + molcmd
gamessrec["command"] = [which("rungms"), "gamess"] # rungms JOB VERNO NCPUS >& JOB.log &
return gamessrec
# Note decr MEMORY=100000 to get
# ***** ERROR: MEMORY REQUEST EXCEEDS AVAILABLE MEMORY
# to test gms fail
# $CONTRL SCFTYP=ROHF MULT=3 RUNTYP=GRADIENT COORD=CART $END
# $SYSTEM TIMLIM=1 MEMORY=800000 $END
# $SCF DIRSCF=.TRUE. $END
# $BASIS GBASIS=STO NGAUSS=2 $END
# $GUESS GUESS=HUCKEL $END
# $DATA
# Methylene...3-B-1 state...ROHF/STO-2G
# Cnv 2
#
# Hydrogen 1.0 0.82884 0.7079 0.0
# Carbon 6.0
# Hydrogen 1.0 -0.82884 0.7079 0.0
# $END
def execute(self, inputs, extra_outfiles=None, extra_commands=None, scratch_name=None, timeout=None):
success, dexe = execute(
inputs["command"], inputs["infiles"], [], scratch_messy=False, scratch_directory=inputs["scratch_directory"]
)
return success, dexe
def parse_output(self, outfiles: Dict[str, str], input_model: "AtomicInput") -> "AtomicResult":
# gamessmol, if it exists, is dinky, just a clue to geometry of gamess results
qcvars, gamessgrad, gamessmol = harvest(input_model.molecule, outfiles["stdout"])
if gamessgrad is not None:
qcvars["CURRENT GRADIENT"] = gamessgrad
qcvars = unnp(qcvars, flat=True)
output_data = {
"schema_name": "qcschema_output",
"molecule": gamessmol,
"schema_version": 1,
"extras": {},
"properties": {"nuclear_repulsion_energy": gamessmol.nuclear_repulsion_energy()},
"return_result": qcvars[f"CURRENT {input_model.driver.upper()}"],
"stdout": outfiles["stdout"],
}
# got to even out who needs plump/flat/Decimal/float/ndarray/list
output_data["extras"]["qcvars"] = {
k.upper(): float(v) if isinstance(v, Decimal) else v for k, v in qcel.util.unnp(qcvars, flat=True).items()
}
# copy qcvars into schema where possible
qcvars_to_properties = {
"DFT XC ENERGY": "scf_xc_energy",
"ONE-ELECTRON ENERGY": "scf_one_electron_energy",
"TWO-ELECTRON ENERGY": "scf_two_electron_energy",
"SCF TOTAL ENERGY": "scf_total_energy",
"MP2 CORRELATION ENERGY": "mp2_correlation_energy",
"MP2 TOTAL ENERGY": "mp2_total_energy",
"CCSD CORRELATION ENERGY": "ccsd_correlation_energy",
"CCSD TOTAL ENERGY": "ccsd_total_energy",
"CCSD(T) CORRELATION ENERGY": "ccsd_prt_pr_correlation_energy",
"CCSD(T) TOTAL ENERGY": "ccsd_prt_pr_total_energy",
}
for qcvar in qcvars:
if qcvar in qcvars_to_properties:
output_data["properties"][qcvars_to_properties[qcvar]] = qcvars[qcvar]
if {"SCF DIPOLE X", "SCF DIPOLE Y", "SCF DIPOLE Z"} & set(qcvars.keys()):
conv = Decimal(qcel.constants.conversion_factor("debye", "e * bohr"))
output_data["properties"]["scf_dipole_moment"] = [
qcvars["SCF DIPOLE X"] * conv,
qcvars["SCF DIPOLE Y"] * conv,
qcvars["SCF DIPOLE Z"] * conv,
]
output_data["success"] = True
return AtomicResult(**{**input_model.dict(), **output_data})
```
#### File: programs/tests/test_turbomole.py
```python
import numpy as np
import pytest
import qcelemental
from qcelemental.testing import compare_values
import qcengine as qcng
from qcengine.testing import using
@pytest.fixture
def h2o():
mol = qcelemental.models.Molecule.from_data(
"""
O 0.000000000000 0.000000000000 -0.068516245955
H 0.000000000000 -0.790689888800 0.543701278274
H 0.000000000000 0.790689888800 0.543701278274
"""
)
return mol
@pytest.mark.parametrize(
"method, keywords, ref_energy",
[
pytest.param("hf", {}, -75.95536954370, marks=using("turbomole")),
pytest.param("pbe0", {"grid": "m5"}, -76.27371135900, marks=using("turbomole")),
pytest.param("ricc2", {}, -76.1603807755, marks=using("turbomole")),
pytest.param("rimp2", {}, -76.1593614075, marks=using("turbomole")),
],
)
def test_turbomole_energy(method, keywords, ref_energy, h2o):
resi = {"molecule": h2o, "driver": "energy", "model": {"method": method, "basis": "def2-SVP"}, "keywords": keywords}
res = qcng.compute(resi, "turbomole", raise_error=True, return_dict=True)
assert res["driver"] == "energy"
assert res["success"] is True
assert compare_values(ref_energy, res["return_result"])
@pytest.mark.parametrize(
"method, keywords, ref_norm",
[
pytest.param("hf", {}, 0.099340, marks=using("turbomole")),
pytest.param("pbe0", {"grid": "m5"}, 0.060631, marks=using("turbomole")),
pytest.param("ricc2", {}, 0.059378, marks=using("turbomole")),
pytest.param("rimp2", {}, 0.061576, marks=using("turbomole")),
],
)
def test_turbomole_gradient(method, keywords, ref_norm, h2o):
resi = {
"molecule": h2o,
"driver": "gradient",
"model": {"method": method, "basis": "def2-SVP"},
"keywords": keywords,
}
res = qcng.compute(resi, "turbomole", raise_error=True)
assert res.driver == "gradient"
assert res.success is True
grad = res.return_result
grad_norm = np.linalg.norm(grad)
assert compare_values(ref_norm, grad_norm)
@using("turbomole")
def test_turbomole_ri_dsp(h2o):
resi = {
"molecule": h2o,
"driver": "energy",
"model": {"method": "b-p", "basis": "def2-SVP"},
"keywords": {"ri": True, "d3bj": True},
}
res = qcng.compute(resi, "turbomole", raise_error=True)
assert res.driver == "energy"
assert res.success is True
energy = res.return_result
ref_energy = -76.36275642866
assert compare_values(ref_energy, energy)
```
#### File: programs/turbomole/define.py
```python
import itertools as it
from subprocess import PIPE, Popen, TimeoutExpired
from typing import Any, Dict, Optional
from qcengine.exceptions import InputError
from .methods import KEYWORDS, METHODS
def decode_define(str_: str) -> str:
"""Decode define output.
Depending on the employed basis set the encoding may differ.
"""
try:
str_ = str_.decode("utf-8")
except UnicodeDecodeError:
# Some of the basis files (cbas, I'm looking at you ...) are saved
# in ISO-8859-15 but most of them are in UTF-8. Decoding will
# crash in the former cases so here we try the correct decoding.
str_ = str_.decode("latin-1")
return str_
def execute_define(stdin: str, cwd: Optional["Path"] = None) -> str:
"""Call define with the input define in stdin."""
# TODO: replace this with a call to the default execute provided by QCEngine
# if possible. May be difficult though, as we have to pipe in stdin and
# be careful with the encoding.
# We cant use univeral_newlines=True or text=True in Popen as some of the
# data that define returns isn't proper UTF-8, so the decoding will crash.
# We will decode it later on manually.
with Popen("define", stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=cwd) as proc:
try:
# stdout, _ = proc.communicate(str.encode(stdin), timeout=30)
stdout, _ = proc.communicate(str.encode(stdin), timeout=15)
stdout = decode_define(stdout)
except TimeoutExpired:
raise InputError(f"define call timed out!")
# TODO: How to get the stdout when define times out? Calling
# communiate may also result in an indefinite hang so I disabled it
# for now...
# # Retrieve output of timed out define call
# stdout, stderr = proc.communicate()
# stdout = decode_define(stdout)
# stderr = decode_define(stderr)
# # Attach stdout and stderr of proc to error, so they can be
# # accessed later on.
# error.stdout = stdout
# error.stderr = stdout
# raise error
proc.terminate()
return stdout
def prepare_stdin(
method: str, basis: str, keywords: Dict[str, Any], charge: int, mult: int, geoopt: Optional[str] = ""
) -> str:
"""Prepares a str that can be sent to define to produce the desired
input for Turbomole."""
# Load data from keywords
unrestricted = keywords.get("unrestricted", False)
grid = keywords.get("grid", "m3")
methods_flat = list(it.chain(*[m for m in METHODS.values()]))
if method not in methods_flat:
raise InputError(f"Method {method} not in supported methods " f"{methods_flat}!")
# This variable may contain substitutions that will be made to
# the control file after it was created from a define call, e.g.
# setting XC functionals that aren't hardcoded in define etc.
subs = None
def occ_num_mo_data(charge: int, mult: int, unrestricted: Optional[bool] = False) -> str:
"""Handles the 'Occupation Number & Molecular Orbital' section
of define. Sets appropriate charge and multiplicity in the
system and decided between restricted and unrestricted calculation.
RHF and UHF are supported. ROHF could be implemented later on
by using the 's' command to list the available MOs and then
close the appropriate number of MOs to doubly occupied MOs
by 'c' by comparing the number of total MOs and the desired
multiplicity."""
# Do unrestricted calculation if explicitly requested or mandatory
unrestricted = unrestricted or (mult != 1)
unpaired = mult - 1
charge = int(charge)
occ_num_mo_data_stdin = f"""eht
y
{charge}
y
"""
if unrestricted:
# Somehow Turbomole/define asks us if we want to write
# natural orbitals... we don't want to.
occ_num_mo_data_stdin = f"""eht
y
{charge}
n
u {unpaired}
*
n
"""
return occ_num_mo_data_stdin
def set_method(method, grid):
if method == "hf":
method_stdin = ""
elif method in METHODS["ricc2"]:
# Setting geoopt in $ricc2 will make the ricc2 module to produce
# a gradient.
# Drop the 'ri'-prefix of the method string.
geoopt_stdin = f"geoopt {method[2:]} ({geoopt})" if geoopt else ""
method_stdin = f"""cc
freeze
*
cbas
*
ricc2
{method}
list models
{geoopt_stdin}
list geoopt
*
*
"""
elif method in METHODS["dft_hardcoded"]:
method_stdin = f"""dft
on
func
{method}
grid
{grid}
"""
# TODO: Handle xcfuncs that aren't defined in define, e.g.
# new functionals introduced in 7.4 from libxc. ...
# Maybe the best idea would be to not set the functional here
# but just turn on DFT and add it to the control file later on.
elif method in METHODS["dft_libxc"]:
raise InputError("libxc functionals are not supported right now.")
return method_stdin
# Resolution of identity
def set_ri(keywords):
# TODO: senex/RIJCOSX?
ri_kws = {ri_kw: keywords.get(ri_kw, False) for ri_kw in KEYWORDS["ri"]}
ri_stdins = {"rijk": "rijk\non\n\n", "ri": "ri\non\n\n", "marij": "marij\n\n"}
ri_stdin = "\n".join([ri_stdins[ri_kw] for ri_kw, use in ri_kws.items() if use])
return ri_stdin
# ri_stdin = ""
# # Use either RIJK or RIJ if requested.
# if ri_kws["rijk"]:
# ri_stdin = """rijk
# on
# """
# elif ri_kws["rij"]:
# ri_stdin = """rij
# on
# """
# # MARIJ can be used additionally.
# if ri_kws["marij"]:
# ri_stdin += """marij
# """
# return ri_stdin
# Dispersion correction
def set_dsp(keywords):
# TODO: set_ri and set_dsp are basically the same funtion. Maybe
# we could abstract this somehow?
dsp_kws = {dsp_kw: keywords.get(dsp_kw, False) for dsp_kw in KEYWORDS["dsp"]}
dsp_stdins = {"d3": "dsp\non\n\n", "d3bj": "dsp\nbj\n\n"}
dsp_stdin = "\n".join([dsp_stdins[dsp_kw] for dsp_kw, use in dsp_kws.items() if use])
return dsp_stdin
kwargs = {
"init_guess": occ_num_mo_data(charge, mult, unrestricted),
"set_method": set_method(method, grid),
"ri": set_ri(keywords),
"dsp": set_dsp(keywords),
"title": "QCEngine Turbomole",
"scf_conv": 8,
"scf_iters": 150,
"basis": basis,
}
stdin = """
{title}
a coord
*
no
b
all {basis}
*
{init_guess}
{set_method}
{ri}
{dsp}
scf
conv
{scf_conv}
iter
{scf_iters}
*
""".format(
**kwargs
)
return stdin, subs
``` |
{
"source": "jhrmnn/schnetpack",
"score": 2
} |
#### File: src/sacred_scripts/run_md.py
```python
from sacred import Experiment
import os
try:
import oyaml as yaml
except ImportError:
import yaml
from shutil import rmtree
from schnetpack.sacred.calculator_ingredients import (calculator_ingradient,
build_calculator)
from schnetpack.sacred.simulator_ingredients import (simulator_ingredient,
build_simulator)
from schnetpack.sacred.integrator_ingredients import (integrator_ingredient,
build_integrator)
from schnetpack.sacred.system_ingredients import (system_ingredient,
build_system)
from schnetpack.sacred.thermostat_ingredients import thermostat_ingredient, \
build_thermostat
md = Experiment('md', ingredients=[simulator_ingredient, calculator_ingradient,
integrator_ingredient, system_ingredient,
thermostat_ingredient])
SETUP_STRING_WIDTH = 30
SETUP_STRING = "\n\n{:s}\n{:s}\n{:s}".format(SETUP_STRING_WIDTH * "=",
f'{{:^{SETUP_STRING_WIDTH}s}}',
SETUP_STRING_WIDTH * '=')
@md.config
def config():
"""configuration for the simulation experiment"""
experiment_dir = 'experiment'
simulation_steps = 1000
device = 'cpu'
overwrite = True
@md.capture
def save_system_config(_config, experiment_dir):
"""
Save the configuration to the model directory.
Args:
_config (dict): configuration of the experiment
simulation_dir (str): path to the simulation directory
"""
with open(os.path.join(experiment_dir, 'config.yaml'), 'w') as f:
yaml.dump(_config, f, default_flow_style=False)
@md.capture
def setup_simulation(_log, simulation_dir, device):
_log.info(SETUP_STRING.format('CALCULATOR SETUP'))
calculator = build_calculator(device=device)
_log.info(SETUP_STRING.format('SYSTEM SETUP'))
system = build_system(device=device)
_log.info(SETUP_STRING.format('INTEGRATOR SETUP'))
integrator = build_integrator(n_beads=system.n_replicas, device=device)
_log.info(SETUP_STRING.format('THERMOSTAT SETUP'))
thermostat = build_thermostat()
_log.info(SETUP_STRING.format('SIMULATOR SETUP'))
simulator = build_simulator(system=system,
integrator_object=integrator,
calculator_object=calculator,
simulation_dir=simulation_dir,
thermostat_object=thermostat)
return simulator
@md.capture
def create_dirs(_log, experiment_dir, overwrite):
"""
Create the directory for the experiment.
Args:
_log:
experiment_dir (str): path to the experiment directory
overwrite (bool): overwrites the model directory if True
"""
simulation_dir = os.path.join(experiment_dir, 'simulation')
_log.info("Create model directory")
if simulation_dir is None:
raise ValueError('Config `experiment_dir` has to be set!')
if os.path.exists(simulation_dir) and not overwrite:
raise ValueError(
'Model directory already exists (set overwrite flag?):',
simulation_dir)
if os.path.exists(simulation_dir) and overwrite:
rmtree(simulation_dir)
if not os.path.exists(simulation_dir):
os.makedirs(simulation_dir)
@md.command
def simulate(experiment_dir, simulation_steps):
simulation_dir = os.path.join(experiment_dir, 'simulation')
create_dirs()
save_system_config()
simulator = setup_simulation(simulation_dir=simulation_dir)
simulator.simulate(simulation_steps)
@md.command
def save_config(_log, _config, experiment_dir):
file_name = f"{experiment_dir}_config.yaml"
with open(file_name, 'w') as f:
yaml.dump(_config, f, default_flow_style=False)
_log.info(f'Stored config to {file_name}')
@md.automain
def main(_log):
save_config()
_log.info('To run simulation call script with "simulate with <config file>"')
```
#### File: schnetpack/data/loader.py
```python
import logging
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
logger = logging.getLogger(__name__)
from .definitions import Structure
from .stats import StatisticsAccumulator
def collate_aseatoms(examples):
"""
Build batch from systems and properties & apply padding
Args:
examples (list):
Returns:
dict[str->torch.Tensor]: mini-batch of atomistic systems
"""
properties = examples[0]
# initialize maximum sizes
max_size = {
prop: np.array(val.size(), dtype=np.int)
for prop, val in properties.items()
}
# get maximum sizes
for properties in examples[1:]:
for prop, val in properties.items():
max_size[prop] = np.maximum(max_size[prop],
np.array(val.size(), dtype=np.int))
# initialize batch
batch = {
p: torch.zeros(len(examples), *[int(ss) for ss in size]).type(
examples[0][p].type()) for p, size in
max_size.items()
}
has_atom_mask = Structure.atom_mask in batch.keys()
has_neighbor_mask = Structure.neighbor_mask in batch.keys()
if not has_neighbor_mask:
batch[Structure.neighbor_mask] = torch.zeros_like(
batch[Structure.neighbors]).float()
if not has_atom_mask:
batch[Structure.atom_mask] = torch.zeros_like(
batch[Structure.Z]).float()
# If neighbor pairs are requested, construct mask placeholders
# Since the structure of both idx_j and idx_k is identical
# (not the values), only one cutoff mask has to be generated
if Structure.neighbor_pairs_j in properties:
batch[Structure.neighbor_pairs_mask] = torch.zeros_like(
batch[Structure.neighbor_pairs_j]).float()
# build batch and pad
for k, properties in enumerate(examples):
for prop, val in properties.items():
shape = val.size()
s = (k,) + tuple([slice(0, d) for d in shape])
batch[prop][s] = val
# add mask
if not has_neighbor_mask:
nbh = properties[Structure.neighbors]
shape = nbh.size()
s = (k,) + tuple([slice(0, d) for d in shape])
mask = nbh >= 0
batch[Structure.neighbor_mask][s] = mask
batch[Structure.neighbors][s] = nbh * mask.long()
if not has_atom_mask:
z = properties[Structure.Z]
shape = z.size()
s = (k,) + tuple([slice(0, d) for d in shape])
batch[Structure.atom_mask][s] = z > 0
# Check if neighbor pair indices are present
# Since the structure of both idx_j and idx_k is identical
# (not the values), only one cutoff mask has to be generated
if Structure.neighbor_pairs_j in properties:
nbh_idx_j = properties[Structure.neighbor_pairs_j]
shape = nbh_idx_j.size()
s = (k,) + tuple([slice(0, d) for d in shape])
batch[Structure.neighbor_pairs_mask][s] = nbh_idx_j >= 0
return batch
class AtomsLoader(DataLoader):
r"""
Convenience for ``torch.data.DataLoader`` which already uses the correct
collate_fn for AtomsData and provides functionality for calculating mean
and stddev.
Arguments:
dataset (Dataset): dataset from which to load the data.
batch_size (int, optional): how many samples per batch to load
(default: 1).
shuffle (bool, optional): set to ``True`` to have the data reshuffled
at every epoch (default: False).
sampler (Sampler, optional): defines the strategy to draw samples from
the dataset. If specified, ``shuffle`` must be False.
batch_sampler (Sampler, optional): like sampler, but returns a batch of
indices at a time. Mutually exclusive with batch_size, shuffle,
sampler, and drop_last.
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means that the data will be loaded in the main process.
(default: 0)
collate_fn (callable, optional): merges a list of samples to form a
mini-batch (default: collate_atons).
pin_memory (bool, optional): If ``True``, the data loader will copy
tensors into CUDA pinned memory before returning them.
drop_last (bool, optional): set to ``True`` to drop the last incomplete
batch, if the dataset size is not divisible by the batch size.
If ``False`` and the size of dataset is not divisible by the batch
size, then the last batch will be smaller. (default: False)
timeout (numeric, optional): if positive, the timeout value for
collecting a batch from workers. Should always be non-negative.
(default: 0)
worker_init_fn (callable, optional): If not None, this will be called
on each worker subprocess with the worker id (an int in
``[0, num_workers - 1]``) as input, after seeding and before data
loading. (default: None)
"""
def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None,
batch_sampler=None,
num_workers=0, collate_fn=collate_aseatoms, pin_memory=False,
drop_last=False,
timeout=0, worker_init_fn=None):
super(AtomsLoader, self).__init__(dataset, batch_size, shuffle,
sampler, batch_sampler,
num_workers, collate_fn, pin_memory,
drop_last,
timeout, worker_init_fn)
def get_statistics(self, property_names, per_atom=False, atomrefs=None):
"""
Compute mean and variance of a property. Uses the incremental Welford
algorithm implemented in StatisticsAccumulator
Args:
property_names (str or list): Name of the property for which the
mean and standard deviation should
be computed
per_atom (bool): If set to true, averages over atoms
atomref (np.ndarray): atomref (default: None)
split_file (str): path to split file. If specified, mean and std
will be cached in this file (default: None)
Returns:
mean: Mean value
stddev: Standard deviation
"""
if type(property_names) is not list:
is_single = True
property_names = [property_names]
atomrefs = [atomrefs]
else:
is_single = False
if atomrefs is None:
atomrefs = [None]*len(property_names)
if type(per_atom) is not list:
per_atom = [per_atom] * len(property_names)
with torch.no_grad():
statistics = [StatisticsAccumulator(batch=True)
for _ in property_names]
logger.info("statistics will be calculated...")
for row in self:
for property_name, statistic, pa, ar in zip(property_names,
statistics,
per_atom,
atomrefs):
self._update_statistic(pa, ar, property_name,
row, statistic)
stats = list(zip(*[s.get_statistics() for s in statistics]))
mean, stddev = stats
if is_single:
mean = mean[0]
stddev = stddev[0]
return mean, stddev
def _update_statistic(self, atomistic, atomref, property_name, row,
statistics):
"""
Helper function to update iterative mean / stddev statistics
"""
property_value = row[property_name]
if atomref is not None:
z = row['_atomic_numbers']
p0 = torch.sum(torch.from_numpy(atomref[z]).float(), dim=1)
property_value -= p0
if atomistic:
property_value /= torch.sum(row['_atom_mask'], dim=1, keepdim=True)
statistics.add_sample(property_value)
```
#### File: schnetpack/md/utils.py
```python
import numpy as np
import torch
from ase import units
class MDUnits:
"""
Basic conversion factors to atomic units used internally:
fs2atu (time): femtoseconds to atomic time units
eV2Ha (energy): electron Volt to Hartree
d2amu (mass): Dalton to atomic mass units
angs2bohr (length): Angstrom to Bohr
auforces2aseforces (forces): Hartee per Bohr to electron Volt per Angstrom
Definitions for constants:
kB: Boltzmann constant in units of Hartree per Kelvin.
hbar: Reduced Planck constant in atomic units.
"""
# Unit conversions
fs2atu = 1e-15 / units._aut
eV2Ha = 1.0 / units.Ha
d2amu = units._amu / units._me
angs2bohr = 1.0 / units.Bohr
auforces2aseforces = angs2bohr / eV2Ha
# Constants
kB = units.kB / units.Ha
hbar = 1.0
class YSWeights:
"""
Weights for Yoshida-Suzuki integration used in propagating the Nose-Hoover chain thermostats.
Args:
device (str): Device used for computation (default='cuda').
"""
YS_weights = {3: np.array([1.35120719195966,
-1.70241438391932,
1.35120719195966]),
5: np.array([0.41449077179438,
0.41449077179438,
-0.65796308717750,
0.41449077179438,
0.41449077179438]),
7: np.array([-1.17767998417887,
0.23557321335936,
0.78451361047756,
1.31518632068390,
0.78451361047756,
0.23557321335936,
-1.17767998417887])}
def __init__(self, device):
self.device = device
def get_weights(self, order):
"""
Get the weights required for an integration scheme of the desired order.
Args:
order (int): Desired order of the integration scheme.
Returns:
torch.Tensor: Tensor of the integration weights
"""
if order not in self.YS_weights:
raise ValueError('Order {:d} not supported for YS integration weights'.format(order))
else:
ys_weights = torch.from_numpy(self.YS_weights[order]).float().to(self.device)
return ys_weights
def compute_centroid(ensemble):
"""
Compute centroids of the system properties (e.g. positions, momenta) given in ensemble with respect to the replica
dimension (0). The original dimensionality of the tensor is kept for the purpose of broadcasting and logging. This
routine is primarily intended to be used for ring polymer simulations.
Args:
ensemble (torch.Tensor): System property tensor (e.g. positions, momenta) of the general dimension
n_replicas x n_molecules x ...
Returns:
torch.Tensor: Centroid averaged over the replica dimension with the general shape 1 x n_molecules x ...
"""
centroid = torch.mean(ensemble, 0, keepdim=True)
return centroid
def batch_inverse(tensor):
"""
Compute the matrix inverse of a batch of square matrices. This routine is used for removing rotational motion
during the molecular dynamics simulation. Taken from https://stackoverflow.com/questions/46595157
Args:
tensor (torch.Tensor): Tensor of square matrices with the shape n_batch x dim1 x dim1
Returns:
torch.Tensor: Tensor of the inverted square matrices with the same shape as the input tensor.
"""
eye = tensor.new_ones(tensor.size(-1), device=tensor.device).diag().expand_as(tensor)
tensor_inv, _ = torch.gesv(eye, tensor)
return tensor_inv
def load_gle_matrices(filename):
"""
Load GLE thermostat files formatted in raw format as generated via http://gle4md.org/index.html?page=matrix
The generated matrices are torch tensors of the shape normal_modes x s+1 x s+1, where normal_modes is 1 except in
the case of the PIGLET thermostat and s is the number of degrees of freedom added via GLE. Automatically recognizes
used units and converts them to atomic units.
Args:
filename (str): Path to the file the GLE thermostat parameters should be loaded from.
Returns:
tuple: Tuple of two square torch tensors containing the a_matrix and c_matrix parameters required to
initialize GLE type thermostats.
"""
a_matrix = GLEMatrixParser('A MATRIX:', stop='C MATRIX:', split='Matrix for normal mode')
c_matrix = GLEMatrixParser('C MATRIX:', split='Matrix for normal mode')
try:
with open(filename) as glefile:
for line in glefile:
a_matrix.read_line(line)
c_matrix.read_line(line)
except FileNotFoundError:
raise FileNotFoundError('Could not open {:s} for reading. Please use GLE parameter files '
'generated via http://gle4md.org/index.html?page=matrix'.format(filename))
return a_matrix.matrix, c_matrix.matrix
class GLEMatrixParser:
"""
General parser for GLE thermostat files. Reads from start string until end of file or a given stop string. If the
argument split is specified, the read matrices are split at the given token. Automatically recognizes
used units and converts them to atomic units.
Args:
start (str): Token when to start reading.
stop (str): Token when to stop reading. If None (default) reads until eno of file.
split (str): If the given token is encountered, matrices are split at this point. If None (default), no split is
performed.
"""
# Automatically recognized format and converts to units
unit_conversions = {
'atomic time units^-1': 1,
'picoseconds^-1': 1 / 1000 / MDUnits.fs2atu,
'seconds^-1': units._aut,
'femtoseconds^-1': 1 / MDUnits.fs2atu,
'eV': 1 / units.Ha,
'atomic energy units': 1,
'K': MDUnits.kB
}
def __init__(self, start, stop=None, split=None):
self.start = start
self.stop = stop
self.split = split
self.read = False
self.units = None
self._matrix = []
self._tmp_matrix = []
def read_line(self, line):
"""
Read and parse a line obtained from an open file object containing GLE parameters.
Args:
line (str): Line of a GLE parameter file.
"""
line = line.strip()
# Filter for empty lines
if line:
# Check if start token is present
if self.start in line:
self.read = True
# Get units used
unit_name = line.split('(')[-1].replace(')', '')
self.units = self.unit_conversions[unit_name]
elif self.read:
if line.startswith('#'):
# Check for stop and split tokens
if self.stop is not None and self.stop in line:
self.read = False
if self.split is not None and self.split in line:
if len(self._tmp_matrix) > 0:
self._matrix.append(self._tmp_matrix)
self._tmp_matrix = []
else:
# Otherwise read and parse line
self._tmp_matrix.append([float(x) for x in line.split()])
@property
def matrix(self):
"""
Property to get parsed matrices converted to numpy arrays using atomic units.
Returns:
numpy.array: Array of the parsed GLE matrix with the shape normal_modes x s+1 x s+1, where normal_modes is 1
except in the case of the PIGLET thermostat and s is the number of degrees of freedom added via
GLE. If no matrix is found, None is returned.
"""
# Write out last buffer
if len(self._tmp_matrix) > 0:
self._matrix.append(self._tmp_matrix)
# Convert to numpy array
_matrix = np.array(self._matrix)
# Perform unit conversion
if self.units is not None:
return _matrix * self.units
else:
return None
class NormalModeTransformer:
"""
Class for transforming between bead and normal mode representation of the ring polymer, used e.g. in propagating the
ring polymer during simulation. An in depth description of the transformation can be found e.g. in [#rpmd3]_. Here,
a simple matrix multiplication is used instead of a Fourier transformation, which can be more performant in certain
cases. On the GPU however, no significant performance gains where observed when using a FT based transformation over
the matrix version.
This transformation operates on the first dimension of the property tensors (e.g. positions, momenta) defined in the
system class. Hence, the transformation can be carried out for several molecules at the same time.
Args:
n_beads (int): Number of beads in the ring polymer.
device (str): Computation device (default='cuda').
References
----------
.. [#rpmd3] Ceriotti, Parrinello, Markland, Manolopoulos:
Efficient stochastic thermostatting of path integral molecular dynamics.
The Journal of Chemical Physics, 133, 124105. 2010.
"""
def __init__(self, n_beads, device='cuda'):
self.n_beads = n_beads
self.device = device
# Initialize the transformation matrix
self.c_transform = self._init_transformation_matrix()
def _init_transformation_matrix(self):
"""
Build the normal mode transformation matrix. This matrix only has to be built once and can then be used during
the whole simulation. The matrix has the dimension n_beads x n_beads, where n_beads is the number of beads in
the ring polymer
Returns:
torch.Tensor: Normal mode transformation matrix of the shape n_beads x n_beads
"""
# Set up basic transformation matrix
c_transform = np.zeros((self.n_beads, self.n_beads))
# Get auxiliary array with bead indices
n = np.arange(1, self.n_beads + 1)
# for k = 0
c_transform[0, :] = 1.0
for k in range(1, self.n_beads // 2 + 1):
c_transform[k, :] = np.sqrt(2) * np.cos(2 * np.pi * k * n / self.n_beads)
for k in range(self.n_beads // 2 + 1, self.n_beads):
c_transform[k, :] = np.sqrt(2) * np.sin(2 * np.pi * k * n / self.n_beads)
if self.n_beads % 2 == 0:
c_transform[self.n_beads // 2, :] = (-1) ** n
# Since matrix is initialized as C(k,n) does not need to be transposed
c_transform /= np.sqrt(self.n_beads)
c_transform = torch.from_numpy(c_transform).float().to(self.device)
return c_transform
def beads2normal(self, x_beads):
"""
Transform a system tensor (e.g. momenta, positions) from the bead representation to normal mode representation.
Args:
x_beads (torch.Tensor): System tensor in bead representation with the general shape
n_beads x n_molecules x ...
Returns:
torch.Tensor: System tensor in normal mode representation with the same shape as the input tensor.
"""
return torch.mm(self.c_transform, x_beads.view(self.n_beads, -1)).view(x_beads.shape)
def normal2beads(self, x_normal):
"""
Transform a system tensor (e.g. momenta, positions) in normal mode representation back to bead representation.
Args:
x_normal (torch.Tensor): System tensor in normal mode representation with the general shape
n_beads x n_molecules x ...
Returns:
torch.Tensor: System tensor in bead representation with the same shape as the input tensor.
"""
return torch.mm(self.c_transform.transpose(0, 1), x_normal.view(self.n_beads, -1)).view(x_normal.shape)
class RunningAverage:
"""
Running average class for logging purposes. Accumulates the average of a given tensor over the course of the
simulation.
"""
def __init__(self):
# Initialize running average and item count
self.average = 0
self.counts = 0
def update(self, value):
"""
Update the running average.
Args:
value (torch.Tensor): Tensor containing the property whose average should be accumulated.
"""
self.average = (self.counts * self.average + value) / (self.counts + 1)
self.counts += 1
```
#### File: schnetpack/sacred/calculator_ingredients.py
```python
from sacred import Ingredient
import os
import torch
from schnetpack.md.calculators import SchnetPackCalculator
from schnetpack.md.utils import MDUnits
calculator_ingradient = Ingredient('calculator')
@calculator_ingradient.config
def config():
"""configuration for the calculator ingredient"""
calculator = 'schnet_calculator'
required_properties = ['y', 'dydx']
force_handle = 'dydx'
position_conversion = 1.0 / MDUnits.angs2bohr
force_conversion = 1.0 / MDUnits.auforces2aseforces
property_conversion = {}
model_path = 'eth_ens_01.model'
# If model is a directory, search for best_model file
if os.path.isdir(model_path):
model_path = os.path.join(model_path, 'best_model')
@calculator_ingradient.capture
def load_model(_log, model_path, device):
_log.info('Loaded model from {:s}'.format(model_path))
model = torch.load(model_path).to(device)
return model
@calculator_ingradient.capture
def build_calculator(_log, required_properties, force_handle,
position_conversion, force_conversion,
property_conversion, calculator, device):
"""
Build the calculator object from the provided settings.
Args:
model (torch.nn.module): the model which is used for property calculation
required_properties (list): list of properties that are calculated by the model
force_handle (str): name of the forces property in the model output
position_conversion (float): conversion factor for positions
force_conversion (float): conversion factor for forces
property_conversion (dict): dictionary with conversion factors for other properties
calculator (src.schnetpack.md.calculator.Calculator): calculator object
Returns:
the calculator object
"""
_log.info(f'Using {calculator}')
if calculator == 'schnet_calculator':
model = load_model(device=device)
return SchnetPackCalculator(model,
required_properties=required_properties,
force_handle=force_handle,
position_conversion=position_conversion,
force_conversion=force_conversion,
property_conversion=property_conversion)
else:
raise NotImplementedError
```
#### File: schnetpack/sacred/initializer_ingredient.py
```python
from sacred import Ingredient
from schnetpack.md.initial_conditions import MaxwellBoltzmannInit
initializer_ing = Ingredient('initializer')
@initializer_ing.config
def config():
"""configuration for the initializer ingredient"""
initializer = 'maxwell_boltzmann'
init_temperature = 300
remove_translation = False
remove_rotation = False
@initializer_ing.named_config
def remove_com():
remove_translation = True
remove_rotation = True
@initializer_ing.capture
def build_initializer(initializer, init_temperature, remove_translation,
remove_rotation):
if initializer == 'maxwell_boltzmann':
return MaxwellBoltzmannInit(init_temperature,
remove_translation=remove_translation,
remove_rotation=remove_rotation)
else:
raise NotImplementedError
```
#### File: schnetpack/simulate/thermostats.py
```python
import torch
import numpy as np
import scipy.linalg as linalg
import logging
from schnetpack.md.utils import MDUnits, load_gle_matrices, \
NormalModeTransformer, YSWeights
from schnetpack.md.integrators import RingPolymer
from schnetpack.simulate.hooks import SimulationHook
class ThermostatError(Exception):
pass
class ThermostatHook(SimulationHook):
# TODO: Could be made a torch nn.Module
def __init__(self, temperature_bath, nm_transformation=None, detach=True):
self.temperature_bath = temperature_bath
self.initialized = False
self.device = None
self.n_replicas = None
self.nm_transformation = nm_transformation
self.detach = detach
def on_simulation_start(self, simulator):
self.device = simulator.system.device
self.n_replicas = simulator.system.n_replicas
# Check if using normal modes is feasible and initialize
if self.nm_transformation is not None:
if type(simulator.integrator) is not RingPolymer:
raise ThermostatError('Normal mode transformation should only be used with ring polymer dynamics.')
else:
self.nm_transformation = self.nm_transformation(self.n_replicas, device=self.device)
if not self.initialized:
self._init_thermostat(simulator)
self.initialized = True
def on_step_begin(self, simulator):
# Apply thermostat
self._apply_thermostat(simulator)
# Re-apply atom masks for differently sized molecules, as some thermostats add random noise
simulator.system.momenta = simulator.system.momenta * simulator.system.atom_masks
# Detach if requested
if self.detach:
simulator.system.momenta = simulator.system.momenta.detach()
def on_step_end(self, simulator):
# Apply thermostat
self._apply_thermostat(simulator)
# Re-apply atom masks for differently sized molecules, as some thermostats add random noise
simulator.system.momenta = simulator.system.momenta * simulator.system.atom_masks
# Detach if requested
if self.detach:
simulator.system.momenta = simulator.system.momenta.detach()
def _init_thermostat(self, simulator):
pass
def _apply_thermostat(self, simulator):
raise NotImplementedError
class BerendsenThermostat(ThermostatHook):
def __init__(self, temperature_bath, time_constant):
super(BerendsenThermostat, self).__init__(temperature_bath)
self.time_constant = time_constant * MDUnits.fs2atu
def _apply_thermostat(self, simulator):
scaling = 1.0 + simulator.integrator.time_step / self.time_constant * (
self.temperature_bath / simulator.system.temperature - 1)
simulator.system.momenta = torch.sqrt(scaling[:, :, None, None]) * simulator.system.momenta
class GLEThermostat(ThermostatHook):
def __init__(self, bath_temperature, gle_file, nm_transformation=None):
super(GLEThermostat, self).__init__(bath_temperature,
nm_transformation=nm_transformation)
self.gle_file = gle_file
# To be initialized on beginning of the simulation, once system and integrator are known
self.c1 = None
self.c2 = None
self.thermostat_momenta = None
self.thermostat_factor = None
def _init_thermostat(self, simulator):
# Generate main matrices
self.c1, self.c2 = self._init_gle_matrices(simulator)
# Get particle masses
self.thermostat_factor = torch.sqrt(simulator.system.masses)[..., None]
# Get initial thermostat momenta
self.thermostat_momenta = self._init_thermostat_momenta(simulator)
def _init_gle_matrices(self, simulator):
a_matrix, c_matrix = load_gle_matrices(self.gle_file)
if a_matrix is None:
raise ThermostatError('Error reading GLE matrices from {:s}'.format(self.gle_file))
elif a_matrix.shape[0] > 1:
raise ThermostatError('More than one A matrix found. Could be PIGLET input.')
else:
# Remove leading dimension (for normal modes)
a_matrix = a_matrix.squeeze()
c1, c2 = self._init_single_gle_matrix(a_matrix, c_matrix, simulator)
return c1, c2
def _init_single_gle_matrix(self, a_matrix, c_matrix, simulator):
if c_matrix is None:
c_matrix = np.eye(a_matrix.shape[-1]) * self.temperature_bath * MDUnits.kB
# Check if normal GLE or GLE for ring polymers is needed:
if type(simulator.integrator) is RingPolymer:
logging.info('RingPolymer integrator detected, initializing C accordingly.')
c_matrix *= simulator.system.n_replicas
else:
c_matrix = c_matrix.squeeze()
logging.info('C matrix for GLE loaded, provided temperature will be ignored.')
# A does not need to be transposed, else c2 is imaginary
c1 = linalg.expm(-0.5 * simulator.integrator.time_step * a_matrix)
# c2 is symmetric
c2 = linalg.sqrtm(c_matrix - np.dot(c1, np.dot(c_matrix, c1.T)))
c1 = torch.from_numpy(c1.T).to(self.device).float()
c2 = torch.from_numpy(c2).to(self.device).float()
return c1, c2
def _init_thermostat_momenta(self, simulator, free_particle_limit=True):
degrees_of_freedom = self.c1.shape[-1]
if not free_particle_limit:
initial_momenta = torch.zeros(*simulator.system.momenta.shape, degrees_of_freedom, device=self.device)
else:
initial_momenta = torch.randn(*simulator.system.momenta.shape, degrees_of_freedom, device=self.device)
initial_momenta = torch.matmul(initial_momenta, self.c2)
return initial_momenta
def _apply_thermostat(self, simulator):
# Generate random noise
thermostat_noise = torch.randn(self.thermostat_momenta.shape, device=self.device)
# Get current momenta
momenta = simulator.system.momenta
# Apply transformation if requested
if self.nm_transformation is not None:
momenta = self.nm_transformation.beads2normal(momenta)
# Set current momenta
self.thermostat_momenta[:, :, :, :, 0] = momenta
# Apply thermostat
self.thermostat_momenta = torch.matmul(self.thermostat_momenta, self.c1) + \
torch.matmul(thermostat_noise, self.c2) * self.thermostat_factor
# Extract momenta
momenta = self.thermostat_momenta[:, :, :, :, 0]
# Apply transformation if requested
if self.nm_transformation is not None:
momenta = self.nm_transformation.normal2beads(momenta)
simulator.system.momenta = momenta
@property
def state_dict(self):
state_dict = {
'c1': self.c1,
'c2': self.c2,
'thermostat_factor': self.thermostat_factor,
'thermostat_momenta': self.thermostat_momenta,
'temperature_bath': self.temperature_bath,
'n_replicas': self.n_replicas
}
return state_dict
@state_dict.setter
def state_dict(self, state_dict):
self.c1 = state_dict['c1']
self.c2 = state_dict['c2']
self.thermostat_factor = state_dict['thermostat_factor']
self.thermostat_momenta = state_dict['thermostat_momenta']
self.temperature_bath = state_dict['temperature_bath']
self.n_replicas = state_dict['n_replicas']
# Set initialized flag
self.initialized = True
class PIGLETThermostat(GLEThermostat):
def __init__(self, temperature_bath, gle_file,
nm_transformation=NormalModeTransformer):
super(PIGLETThermostat, self).__init__(temperature_bath, gle_file, nm_transformation=nm_transformation)
def _init_gle_matrices(self, simulator):
a_matrix, c_matrix = load_gle_matrices(self.gle_file)
if a_matrix is None:
raise ThermostatError('Error reading GLE matrices from {:s}'.format(self.gle_file))
if a_matrix.shape[0] != self.n_replicas:
raise ThermostatError('Expected {:d} beads but found {:d}.'.format(a_matrix.shape[0], self.n_replicas))
if not type(simulator.integrator) is RingPolymer:
raise ThermostatError('PIGLET thermostat should only be used with RPMD.')
all_c1 = []
all_c2 = []
# Generate main matrices
for b in range(self.n_replicas):
c1, c2 = self._init_single_gle_matrix(a_matrix[b], (c_matrix[b], None)[c_matrix is None], simulator)
# Add extra dimension for use with torch.cat, correspond to normal modes of ring polymer
all_c1.append(c1[None, ...])
all_c2.append(c2[None, ...])
# Bring to correct shape for later matmul broadcasting
c1 = torch.cat(all_c1)[:, None, None, :, :]
c2 = torch.cat(all_c2)[:, None, None, :, :]
return c1, c2
class LangevinThermostat(ThermostatHook):
def __init__(self, temperature_bath, time_constant, nm_transformation=None):
super(LangevinThermostat, self).__init__(temperature_bath, nm_transformation=nm_transformation)
self.time_constant = time_constant * MDUnits.fs2atu
self.thermostat_factor = None
self.c1 = None
self.c2 = None
def _init_thermostat(self, simulator):
# Initialize friction coefficients
gamma = torch.ones(1, device=self.device) / self.time_constant
# Initialize coefficient matrices
c1 = torch.exp(-0.5 * simulator.integrator.time_step * gamma)
c2 = torch.sqrt(1 - c1 ** 2)
self.c1 = c1.to(self.device)[:, None, None, None]
self.c2 = c2.to(self.device)[:, None, None, None]
# Get mass and temperature factors
self.thermostat_factor = torch.sqrt(simulator.system.masses * MDUnits.kB * self.temperature_bath)
def _apply_thermostat(self, simulator):
# Get current momenta
momenta = simulator.system.momenta
# Apply transformation
if self.nm_transformation is not None:
momenta = self.nm_transformation.beads2normal(momenta)
# Generate random noise
thermostat_noise = torch.randn(momenta.shape, device=self.device)
# Apply thermostat
momenta = self.c1 * momenta + self.thermostat_factor * self.c2 * thermostat_noise
# Apply transformation if requested
if self.nm_transformation is not None:
momenta = self.nm_transformation.normal2beads(momenta)
simulator.system.momenta = momenta
@property
def state_dict(self):
state_dict = {
'c1': self.c1,
'c2': self.c2,
'thermostat_factor': self.thermostat_factor,
'temperature_bath': self.temperature_bath,
'n_replicas': self.n_replicas
}
return state_dict
@state_dict.setter
def state_dict(self, state_dict):
self.c1 = state_dict['c1']
self.c2 = state_dict['c2']
self.thermostat_factor = state_dict['thermostat_factor']
self.temperature_bath = state_dict['temperature_bath']
self.n_replicas = state_dict['n_replicas']
# Set initialized flag
self.initialized = True
class PILELocalThermostat(LangevinThermostat):
def __init__(self, temperature_bath, time_constant, nm_transformation=NormalModeTransformer):
super(PILELocalThermostat, self).__init__(temperature_bath, time_constant, nm_transformation=nm_transformation)
def _init_thermostat(self, simulator):
if type(simulator.integrator) is not RingPolymer:
raise ThermostatError('PILE thermostats can only be used in RPMD')
# Initialize friction coefficients
gamma_normal = 2 * simulator.integrator.omega_normal
# Use seperate coefficient for centroid mode
gamma_normal[0] = 1.0 / self.time_constant
if self.nm_transformation is None:
raise ThermostatError('Normal mode transformation required for PILE thermostat')
# Initialize coefficient matrices
c1 = torch.exp(-0.5 * simulator.integrator.time_step * gamma_normal)
c2 = torch.sqrt(1 - c1 ** 2)
self.c1 = c1.to(self.device)[:, None, None, None]
self.c2 = c2.to(self.device)[:, None, None, None]
# Get mass and temperature factors
self.thermostat_factor = torch.sqrt(
simulator.system.masses * MDUnits.kB * self.n_replicas * self.temperature_bath
)
@property
def state_dict(self):
state_dict = {
'c1': self.c1,
'c2': self.c2,
'thermostat_factor': self.thermostat_factor,
'temperature_bath': self.temperature_bath,
'n_replicas': self.n_replicas
}
return state_dict
@state_dict.setter
def state_dict(self, state_dict):
self.c1 = state_dict['c1']
self.c2 = state_dict['c2']
self.thermostat_factor = state_dict['thermostat_factor']
self.temperature_bath = state_dict['temperature_bath']
self.n_replicas = state_dict['n_replicas']
# Set initialized flag
self.initialized = True
class PILEGlobalThermostat(PILELocalThermostat):
def __init__(self, temperature_bath, time_constant, nm_transformation=NormalModeTransformer):
super(PILEGlobalThermostat, self).__init__(temperature_bath, time_constant,
nm_transformation=nm_transformation)
def _apply_thermostat(self, simulator):
# Get current momenta
momenta = simulator.system.momenta
# Apply transformation
if self.nm_transformation is not None:
momenta = self.nm_transformation.beads2normal(momenta)
# Generate random noise
thermostat_noise = torch.randn(momenta.shape, device=self.device)
# Apply thermostat to centroid mode
c1_centroid = self.c1[0]
momenta_centroid = momenta[0]
thermostat_noise_centroid = thermostat_noise[0]
# Compute kinetic energy of centroid
kinetic_energy_factor = torch.sum(momenta_centroid ** 2 / simulator.system.masses[0]) / (
self.temperature_bath * MDUnits.kB * self.n_replicas)
centroid_factor = (1 - c1_centroid) / kinetic_energy_factor
alpha_sq = c1_centroid + torch.sum(thermostat_noise_centroid ** 2) * centroid_factor + \
2 * thermostat_noise_centroid[0, 0, 0] * torch.sqrt(c1_centroid * centroid_factor)
alpha_sign = torch.sign(thermostat_noise_centroid[0, 0, 0] + torch.sqrt(c1_centroid / centroid_factor))
alpha = torch.sqrt(alpha_sq) * alpha_sign
# Finally apply thermostat...
momenta[0] = alpha * momenta[0]
# Apply thermostat for remaining normal modes
momenta[1:] = self.c1[1:] * momenta[1:] + self.thermostat_factor * self.c2[1:] * thermostat_noise[1:]
# Apply transformation if requested
if self.nm_transformation is not None:
momenta = self.nm_transformation.normal2beads(momenta)
simulator.system.momenta = momenta
class NHCThermostat(ThermostatHook):
def __init__(self, temperature_bath, time_constant, chain_length=3, massive=False,
nm_transformation=None, multi_step=2, integration_order=3):
super(NHCThermostat, self).__init__(temperature_bath, nm_transformation=nm_transformation)
self.chain_length = chain_length
self.massive = massive
self.frequency = 1 / (time_constant * MDUnits.fs2atu)
# Cpmpute kBT, since it will be used a lot
self.kb_temperature = self.temperature_bath * MDUnits.kB
# Propagation parameters
self.multi_step = multi_step
self.integration_order = integration_order
self.time_step = None
# Find out number of particles (depends on whether massive or not)
self.degrees_of_freedom = None
self.masses = None
self.velocities = None
self.positions = None
self.forces = None
def _init_thermostat(self, simulator):
# Determine integration step via multi step and Yoshida Suzuki weights
integration_weights = YSWeights(self.device).get_weights(self.integration_order)
self.time_step = simulator.integrator.time_step * integration_weights / self.multi_step
# Determine shape of tensors and internal degrees of freedom
n_replicas, n_molecules, n_atoms, xyz = simulator.system.momenta.shape
if self.massive:
state_dimension = (n_replicas, n_molecules, n_atoms, xyz, self.chain_length)
# Since momenta will be masked later, no need to set non-atoms to 0
self.degrees_of_freedom = torch.ones((n_replicas, n_molecules, n_atoms, xyz), device=self.device)
else:
state_dimension = (n_replicas, n_molecules, 1, 1, self.chain_length)
self.degrees_of_freedom = 3 * simulator.system.n_atoms.float()[None, :, None, None]
# Set up masses
self._init_masses(state_dimension, simulator)
# Set up internal variables
self.positions = torch.zeros(state_dimension, device=self.device)
self.forces = torch.zeros(state_dimension, device=self.device)
self.velocities = torch.zeros(state_dimension, device=self.device)
def _init_masses(self, state_dimension, simulator):
self.masses = torch.ones(state_dimension, device=self.device)
# Get masses of innermost thermostat
self.masses[..., 0] = self.degrees_of_freedom * self.kb_temperature / self.frequency ** 2
# Set masses of remaining thermostats
self.masses[..., 1:] = self.kb_temperature / self.frequency ** 2
def _propagate_thermostat(self, kinetic_energy):
# Compute forces on first thermostat
self.forces[..., 0] = (kinetic_energy - self.degrees_of_freedom * self.kb_temperature) / self.masses[..., 0]
scaling_factor = 1.0
for _ in range(self.multi_step):
for idx_ys in range(self.integration_order):
time_step = self.time_step[idx_ys]
# Update velocities of outermost bath
self.velocities[..., -1] += 0.25 * self.forces[..., -1] * time_step
# Update the velocities moving through the beads of the chain
for chain in range(self.chain_length - 2, -1, -1):
coeff = torch.exp(-0.125 * time_step * self.velocities[..., chain + 1])
self.velocities[..., chain] = self.velocities[..., chain] * coeff ** 2 + \
0.25 * self.forces[..., chain] * coeff * time_step
# Accumulate velocity scaling
scaling_factor *= torch.exp(-0.5 * time_step * self.velocities[..., 0])
# Update forces of innermost thermostat
self.forces[..., 0] = (scaling_factor * scaling_factor * kinetic_energy
- self.degrees_of_freedom * self.kb_temperature) / self.masses[..., 0]
# Update thermostat positions
# TODO: Only required if one is interested in the conserved quanity of the NHC.
self.positions += 0.5 * self.velocities * time_step
# Update the thermostat velocities
for chain in range(self.chain_length - 1):
coeff = torch.exp(-0.125 * time_step * self.velocities[..., chain + 1])
self.velocities[..., chain] = self.velocities[..., chain] * coeff ** 2 + \
0.25 * self.forces[..., chain] * coeff * time_step
self.forces[..., chain + 1] = (self.masses[..., chain] * self.velocities[..., chain] ** 2
- self.kb_temperature) / self.masses[..., chain + 1]
# Update velocities of outermost thermostat
self.velocities[..., -1] += 0.25 * self.forces[..., -1] * time_step
return scaling_factor
def _compute_kinetic_energy(self, momenta, masses):
# Compute the kinetic energy (factor of 1/2 can be removed, as it cancels with a times 2)
# TODO: Is no problem, as NM transformation never mixes atom dimension which carries the masses.
kinetic_energy = momenta ** 2 / masses
if self.massive:
return kinetic_energy
else:
return torch.sum(torch.sum(kinetic_energy, 3, keepdim=True), 2, keepdim=True)
def _apply_thermostat(self, simulator):
# Get current momenta
momenta = simulator.system.momenta
# Apply transformation
if self.nm_transformation is not None:
momenta = self.nm_transformation.beads2normal(momenta)
kinetic_energy = self._compute_kinetic_energy(momenta, simulator.system.masses)
scaling_factor = self._propagate_thermostat(kinetic_energy)
momenta = momenta * scaling_factor
# Apply transformation if requested
if self.nm_transformation is not None:
momenta = self.nm_transformation.normal2beads(momenta)
simulator.system.momenta = momenta
@property
def state_dict(self):
state_dict = {
'chain_length': self.chain_length,
'massive': self.massive,
'frequency': self.frequency,
'kb_temperature': self.kb_temperature,
'degrees_of_freedom': self.degrees_of_freedom,
'masses': self.masses,
'velocities': self.velocities,
'forces': self.forces,
'positions': self.positions,
'time_step': self.time_step,
'temperature_bath': self.temperature_bath,
'n_replicas': self.n_replicas,
'multi_step': self.multi_step,
'integration_order': self.integration_order
}
return state_dict
@state_dict.setter
def state_dict(self, state_dict):
self.chain_length = state_dict['chain_length']
self.massive = state_dict['massive']
self.frequency = state_dict['frequency']
self.kb_temperature = state_dict['kb_temperature']
self.degrees_of_freedom = state_dict['degrees_of_freedom']
self.masses = state_dict['masses']
self.velocities = state_dict['velocities']
self.forces = state_dict['forces']
self.positions = state_dict['positions']
self.time_step = state_dict['time_step']
self.temperature_bath = state_dict['temperature_bath']
self.n_replicas = state_dict['n_replicas']
self.multi_step = state_dict['multi_step']
self.integration_order = state_dict['integration_order']
self.initialized = True
class NHCRingPolymerThermostat(NHCThermostat):
def __init__(self, temperature_bath, time_constant, chain_length=3, local=True,
nm_transformation=NormalModeTransformer, multi_step=2, integration_order=3):
super(NHCRingPolymerThermostat, self).__init__(temperature_bath,
time_constant,
chain_length=chain_length,
massive=True,
nm_transformation=nm_transformation,
multi_step=multi_step,
integration_order=integration_order)
self.local = local
def _init_masses(self, state_dimension, simulator):
# Multiply factor by number of replicas
self.kb_temperature = self.kb_temperature * self.n_replicas
# Initialize masses with the frequencies of the ring polymer
polymer_frequencies = simulator.integrator.omega_normal
polymer_frequencies[0] = 0.5 * self.frequency # 0.5 comes from Ceriotti paper, check
# Assume standard massive Nose-Hoover and initialize accordingly
self.masses = torch.ones(state_dimension, device=self.device)
self.masses *= self.kb_temperature / polymer_frequencies[:, None, None, None, None] ** 2
# If a global thermostat is requested, we assign masses of 3N to the first link in the chain on the centroid
if not self.local:
self.masses[0, :, :, :, 0] *= 3 * simulator.system.n_atoms.float()[:, None, None]
# Degrees of freedom also need to be adapted
self.degrees_of_freedom[0, :, :, :] *= 3 * simulator.system.n_atoms.float()[:, None, None]
def _compute_kinetic_energy(self, momenta, masses):
kinetic_energy = momenta ** 2 / masses
# In case of a global NHC for RPMD, use the whole centroid kinetic energy and broadcast it
if not self.local:
kinetic_energy_centroid = torch.sum(torch.sum(kinetic_energy[0, ...], 2, keepdim=True), 1, keepdim=True)
kinetic_energy[0, ...] = kinetic_energy_centroid
return kinetic_energy
@property
def state_dict(self):
state_dict = {
'chain_length': self.chain_length,
'massive': self.massive,
'frequency': self.frequency,
'kb_temperature': self.kb_temperature,
'degrees_of_freedom': self.degrees_of_freedom,
'masses': self.masses,
'velocities': self.velocities,
'forces': self.forces,
'positions': self.positions,
'time_step': self.time_step,
'temperature_bath': self.temperature_bath,
'n_replicas': self.n_replicas,
'multi_step': self.multi_step,
'integration_order': self.integration_order,
'local': self.local
}
return state_dict
@state_dict.setter
def state_dict(self, state_dict):
self.chain_length = state_dict['chain_length']
self.massive = state_dict['massive']
self.frequency = state_dict['frequency']
self.kb_temperature = state_dict['kb_temperature']
self.degrees_of_freedom = state_dict['degrees_of_freedom']
self.masses = state_dict['masses']
self.velocities = state_dict['velocities']
self.forces = state_dict['forces']
self.positions = state_dict['positions']
self.time_step = state_dict['time_step']
self.temperature_bath = state_dict['temperature_bath']
self.n_replicas = state_dict['n_replicas']
self.multi_step = state_dict['multi_step']
self.integration_order = state_dict['integration_order']
self.local = state_dict['local']
self.initialized = True
```
#### File: schnetpack/tests/test_environment.py
```python
import numpy as np
import pytest
from ase import Atoms
from ase.neighborlist import neighbor_list
import schnetpack.environment as env
@pytest.fixture
def single_atom():
return Atoms([6], positions=[[0., 0., 0.]])
@pytest.fixture
def two_atoms():
return Atoms([6, 6], positions=[[0., 0., 0.], [.1, 0., 0.]])
@pytest.fixture
def single_site_crystal():
return Atoms([6], positions=[[0., 0., 0.]], cell=np.eye(3), pbc=True)
@pytest.fixture
def two_site_crystal():
return Atoms([6, 6], positions=[[0., 0., 0.], [.1, 0., 0.]], cell=np.eye(3),
pbc=True)
@pytest.fixture(params=[0, 1])
def crystal(request, single_site_crystal, two_site_crystal):
crystals = [single_site_crystal, two_site_crystal]
yield crystals[request.param]
@pytest.fixture
def simple_env():
return env.SimpleEnvironmentProvider()
@pytest.fixture
def ase_env():
return env.AseEnvironmentProvider(10.)
def test_single_atom(single_atom, simple_env, ase_env):
nbh_simple, offsets_simple = simple_env.get_environment(single_atom)
nbh_ase, offsets_ase = ase_env.get_environment(single_atom)
assert nbh_simple.shape == nbh_ase.shape
assert offsets_simple.shape == offsets_ase.shape
assert np.allclose(nbh_simple, nbh_ase)
assert np.allclose(offsets_simple, offsets_ase)
def test_two_atoms(two_atoms, simple_env, ase_env):
nbh_simple, offsets_simple = simple_env.get_environment(two_atoms)
nbh_ase, offsets_ase = ase_env.get_environment(two_atoms)
assert nbh_simple.shape == nbh_ase.shape
assert offsets_simple.shape == offsets_ase.shape
assert np.allclose(nbh_simple, nbh_ase)
assert np.allclose(offsets_simple, offsets_ase)
def test_single_site_crystal_small_cutoff(crystal, simple_env, ase_env):
# assure that neighboring cells are not included
ase_env.cutoff = 0.5
nbh_simple, offsets_simple = simple_env.get_environment(crystal)
nbh_ase, offsets_ase = ase_env.get_environment(crystal)
assert nbh_simple.shape == nbh_ase.shape
assert offsets_simple.shape == offsets_ase.shape
assert np.allclose(nbh_simple, nbh_ase)
assert np.allclose(offsets_simple, offsets_ase)
def test_single_site_crystal_large_cutoff(crystal, ase_env):
ase_env.cutoff = 0.7
idx_i, idx_j, idx_S, dist = neighbor_list('ijSd', crystal, ase_env.cutoff,
self_interaction=False)
nbh_ase, offsets_ase = ase_env.get_environment(crystal)
# get number of neighbors from index vector
n_nbh = np.unique(np.hstack((idx_i,
np.arange(crystal.get_number_of_atoms()))),
return_counts=True)[1]-1
# get number of neighbors from nbh matrix
n_nbh_env = np.sum(nbh_ase >= 0, 1)
assert n_nbh.shape == n_nbh_env.shape
assert np.allclose(n_nbh, n_nbh_env)
``` |
{
"source": "jhroot/content-store",
"score": 2
} |
#### File: content-store/tests/article_part_test.py
```python
import pytest
from sqlalchemy.orm.exc import NoResultFound
from content_store.api.api import create_app
from content_store.api.config import TestingConfig
from content_store.api.models import ArticlePart
from content_store.api.repositories import ArticlePartRepository
from content_store.api.database import DB
@pytest.fixture
def app():
application = create_app(TestingConfig)
with application.app_context():
DB.drop_all()
DB.create_all()
yield application
@pytest.mark.usefixtures("app")
def test_add_get_parts():
article_parts = ArticlePartRepository(DB)
test_parts = (
ArticlePart("001", 1, "front", "Article 001 front matter content v1"),
ArticlePart("001", 1, "body", "Article 001 body content v1"),
ArticlePart("002", 1, "front", "Article 002 front matter content v1"),
ArticlePart("002", 1, "body", "Article 002 front matter content v1")
)
for part in test_parts:
article_parts.add_article_part(part)
assert part == article_parts.get_article_part(
article_id=part.article_id,
version=part.version,
part_name=part.part_name
)
@pytest.mark.usefixtures("app")
def test_delete_part():
article_parts = ArticlePartRepository(DB)
test_part = ArticlePart("001", 1, "front", "Article 001 front matter content v1")
article_parts.add_article_part(test_part)
part = article_parts.get_article_part(
test_part.article_id,
test_part.version,
test_part.part_name
)
assert part == test_part
article_parts.delete_article_part(
part.article_id,
part.version,
part.part_name
)
with pytest.raises(NoResultFound):
article_parts.get_article_part(
test_part.article_id,
test_part.version,
test_part.part_name
)
``` |
{
"source": "jhs7jhs/gf-telegram-bot",
"score": 3
} |
#### File: gf-telegram-bot/query/database.py
```python
import json
# TODO: merge all the files into one file
equip_dict = json.load(open('dict/equip_dict.json', 'r', encoding='utf8'))
doll_dict = json.load(open('dict/doll_dict.json', 'r', encoding='utf8'))
alias_dict = dict()
for num, doll in doll_dict.items():
for alias in doll['alias']:
alias_dict[alias] = num
def find_by_alias(alias):
if alias in alias_dict:
return alias_dict[alias]
return None
def get_doll_by_time(time):
doll_list = []
# TODO: Improve time complexity by using dictionary
for num in doll_dict:
dict_time = int(doll_dict[num]['time'].replace(':', ''))
if dict_time == time:
doll_list.append(doll_dict[num])
return doll_list
def get_equip_by_time(time):
time = str(time)
if time not in equip_dict:
return None
return equip_dict[time]
def get_doll_by_num(num):
if num not in doll_dict:
return None
doll = doll_dict[num]
return doll
```
#### File: gf-telegram-bot/query/search.py
```python
import logging
from .database import get_doll_by_num
from . import util
SEARCH_MESSAGE_WITH_TIME = """๋๊ฐ NO. : {0}
์ด๋ฆ : {1}
๋ฑ๊ธ : โ
{2}
ํ์
: {3}
ํ๋ ๋ฐฉ๋ฒ : {4}
์ ์กฐ ์๊ฐ : {5}
http://rb-tree.xyz/szimage/{0}.png"""
SEARCH_MESSAGE_WITHOUT_TIME = """๋๊ฐ NO. : {0}
์ด๋ฆ : {1}
๋ฑ๊ธ : โ
{2}
ํ์
: {3}
ํ๋ ๋ฐฉ๋ฒ : {4}
http://rb-tree.xyz/szimage/{0}.png"""
def search(bot, update):
try:
num = util.command_doll(update)
if num is None:
return
doll = get_doll_by_num(num)
if doll['time'] != "00:00":
s = SEARCH_MESSAGE_WITH_TIME.format(doll['no'], doll['name'],
doll['class'], doll['type'],
doll['obtain'], doll['time'])
else:
s = SEARCH_MESSAGE_WITHOUT_TIME.format(doll['no'], doll['name'],
doll['class'], doll['type'],
doll['obtain'])
update.message.reply_text(s)
except:
logger = logging.getLogger('query')
logger.exception("Unhandled Exception (query.search): ")
```
#### File: gf-telegram-bot/query/upgrade.py
```python
import logging
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
from .database import get_doll_by_num
from . import util
UPGRADE_MESSAGE_1 = """๋๊ฐ NO. : {0}
์ด๋ฆ : {1}
๋ฒํ ํจ๊ณผ{2}:
{3}
๋ฒํ ํํ:
"""
UPGRADE_MESSAGE_2 = """๋๊ฐ NO. : {0}
์ด๋ฆ : {1}
์คํฌ๋ช
: {2}
ํจ๊ณผ:
{3}
- ์ดํ ๊ฐํ๋ Lv.10 ๊ธฐ์ค ์์น -
"""
UPGRADE_MESSAGE_3 = """๋๊ฐ NO. : {0}
์ด๋ฆ : {1}
์คํฌ๋ช
: {2}
ํจ๊ณผ:
{3}
- ์ดํ Lv.10 ๊ธฐ์ค ์์น -
"""
UPGRADE_MESSAGE_4 = """๋๊ฐ NO. : {0}
์ด๋ฆ : {1}
์ ์ฉ ์ฅ๋น ํจ๊ณผ:
{2}
"""
UPGRADE_MESSAGE_5 = """๋๊ฐ NO. : {0}
์ด๋ฆ : {1}
- ์คํฏ (Lv.100/110/115/120) -
"""
def upgrade_callback(bot, update):
query = update.callback_query
try:
num, uid, option = query.data.split('_')
if query.from_user.id != int(uid):
return
doll = get_doll_by_num(num)
upgrade = doll['upgrade']
if option == '1':
buff = upgrade['buff']
s = UPGRADE_MESSAGE_1.format(doll['no'], doll['name'],
buff['option'], buff['desc'])
s += "```\n"
for i in range(0, 9, 3):
s += "+---+---+---+\n"
for j in range(3):
c = buff['form'][i + j]
if c == '0':
s += "| "
elif c == '1':
s += "| ! "
else:
s += "| D "
s += "|\n"
s += "+---+---+---+```"
elif option == '2':
skill = upgrade['orig_skill']
s = UPGRADE_MESSAGE_2.format(doll['no'], doll['name'],
skill['name'], skill['desc'])
for i in range(0, len(skill['spec'])):
s += "{0}: {1}\n".format(skill['spec'][i][0],
skill['spec'][i][1])
elif option == '3':
skill = upgrade['new_skill']
s = UPGRADE_MESSAGE_3.format(doll['no'], doll['name'],
skill['name'], skill['desc'])
for i in range(0, len(skill['spec'])):
s += "{0}: {1}\n".format(skill['spec'][i][0],
skill['spec'][i][1])
elif option == '4':
s = UPGRADE_MESSAGE_4.format(doll['no'], doll['name'],
upgrade['equip'])
elif option == '5':
stats = upgrade['stat']
s = UPGRADE_MESSAGE_5.format(doll['no'], doll['name'])
for stat in stats:
s += "{0}: {1}\n".format(stat[0], stat[1])
else:
s = "Something wrong: {}".format(option)
bot.edit_message_text(
text=s,
chat_id=query.message.chat_id,
message_id=query.message.message_id,
parse_mode="Markdown")
except:
logger = logging.getLogger('query')
logger.exception("Unhandled Exception (query.upgrade_callback): ")
def upgrade(bot, update):
try:
num = util.command_doll(update)
if num is None:
return
doll = get_doll_by_num(num)
if 'upgrade' not in doll:
update.message.reply_text("๊ฐ์ฅ ์ ๋ณด๊ฐ ์๊ฑฐ๋, ๊ฐ์ฅ์ด ๋ถ๊ฐ๋ฅํ ์ธํ์
๋๋ค.")
return
s = "์ ํํ ์ธํ: {}\n๋ณด๊ณ ์ถ์ ๊ฐ์ฅ ์ ๋ณด๋ฅผ ์ ํํ์ธ์.".format(doll['name'])
uid = update.message.from_user.id
button_list = [[
InlineKeyboardButton(
"1๋จ๊ณ ๋ฒํ", callback_data="{}_{}_1".format(doll['no'], uid)),
InlineKeyboardButton(
"1๋จ๊ณ ์คํฌ", callback_data="{}_{}_2".format(doll['no'], uid))
],
[
InlineKeyboardButton(
"2๋จ๊ณ ์ถ๊ฐ ์คํฌ",
callback_data="{}_{}_3".format(doll['no'],
uid)),
InlineKeyboardButton(
"3๋จ๊ณ ์ ์ฉ ์ฅ๋น",
callback_data="{}_{}_4".format(doll['no'], uid))
],
[
InlineKeyboardButton(
"์คํฏ ๋ณํ",
callback_data="{}_{}_5".format(doll['no'], uid))
]]
reply_markup = InlineKeyboardMarkup(button_list)
update.message.reply_text(text=s, reply_markup=reply_markup)
except:
logger = logging.getLogger('query')
logger.exception("Unhandled Exception (query.upgrade): ")
``` |
{
"source": "jhsa26/Lina-Seismic-Playground",
"score": 3
} |
#### File: LabelData/LabelSAC2NPZ/plot_matrix_wave.py
```python
def plot_matrix_wave(st,itp,its):
import matplotlib.pyplot as plt
import numpy as np
j=0
plt.figure(figsize=(8, 4.5))
#plt.title(tr.stats.sac.knetwk+tr.stats.sac.kcmpnm+' '+str(tr.stats.sac.mag)+' Earthquake',verticalalignment='top')
for j in np.arange(3):
plt.subplot(3,1,j+1)
plt.plot(st[:,j], 'k')
# plt.ylabel(tr.stats.sac.kcmpnm)
plt.axvline(itp,label="1",color='blue',linestyle="--")
plt.axvline(its,label="2",color='red',linestyle="--")
# title = tr.stats.sac.knetwk+tr.stats.sac.kcmpnm+' '+str(tr.stats.sac.mag)+' Earthquake'+'\n'
plt.suptitle('Label Data')
plt.xlabel('Time [s]')
plt.show()
``` |
{
"source": "jhsa26/SurfTomoCNN-new",
"score": 2
} |
#### File: SurfTomoCNN-new/2TrainingAndTestScripts/config.py
```python
class Config(object):
def __init__(self):
self.filepath_disp_training = '../DataSet/TrainingData/0.5km/USA_Tibet/disp_combine_gaussian_map/'
self.filepath_vs_training = '../DataSet/TrainingData/0.5km/USA_Tibet/vs_curve/'
self.filepath_disp_real = '../DataSet/TestData/real-8s-50s/China/disp_pg_real/'
self.batch_size = 64 # training batch size
self.nEpochs = 600 # maximum number of epochs to train for
self.lr = 0.00001 # learning rate
self.seed = 123 # random seed to use. Default=123
self.plot = True # show validation result during training
self.alpha=0.0000 # damping, not used here
self.testsize=0.2
self.pretrained =True
self.start=600
self.pretrain_net = "./model_para/model_epoch_"+str(self.start)+".pth"
if __name__ == '__main__':
pass
```
#### File: SurfTomoCNN-new/2TrainingAndTestScripts/Main_Train.py
```python
import matplotlib as mpl
# mpl.use('Agg')
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
from tensorboardX import SummaryWriter
import numpy as np
import time
import sys
sys.path.append('./src')
from src.NetModel_curve import Net as Net
from config import Config
from torch.utils.data import DataLoader
from src.loadData import ToTensor, DispVsDataset,getTrainValidationFiles
from torchsummary import summary
from src.util import randomFix,checkpoint,writerlogfile,MyLoss,weights_init
import os
def training(epoch, model,optimizer,option,trainDataset,device,loss_fn=nn.MSELoss()):
# adjust learning rate
# optimizer = adjust_learning_rate(option.lr,optimizer, epoch)
trainLoader = DataLoader(trainDataset, batch_size=option.batch_size,shuffle=True, num_workers=0)
total_num=len(trainDataset)
num=0
epoch_loss=0.0
current_loss=None
for batch_i,sample_batch in enumerate(trainLoader):
inputs,targets=sample_batch['disp'].to(device),sample_batch['vs'].to(device) # cnn input and labels
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_fn(outputs, targets)
loss.backward()
optimizer.step()
# print statistics
num = len(inputs) + num
current_loss = loss.item()
epoch_loss += current_loss
if batch_i % 20 == 19:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, num, total_num,
100. * num / total_num, loss.item()))
total_batch=len(trainLoader)
average_loss = epoch_loss / total_batch
print("===> Epoch {} Complete: Avg. Loss: {:.4f} ".format(epoch, average_loss))
# return the last iteration loss.
return current_loss
def test(epoch, model, option,validDataset,loss_fn=nn.MSELoss()):
if option.plot:
fig = plt.figure(num=1, figsize=(12, 8), dpi=80, clear=True)
validLoader=DataLoader(validDataset,shuffle=True,batch_size=option.batch_size,num_workers=0)
test_loss = 0
rms=0
model.cpu()
model.eval() #
for batch_i, sample_batch in enumerate(validLoader):
# dispersion
input = sample_batch['disp']# input = input.view([1, input.size(0), input.size(1), input.size(2)])
# velocity
label = sample_batch['vs'] # label = label.view([1, label.size(0)])
locationKey=sample_batch['location'].numpy()
# compute output
output = model(input) # output[batchsize,H,W]
loss = loss_fn(output, label).item()
test_loss += loss # sum up batch loss
# collect output loss, need to delete
vel_pred = output.detach().numpy()
vel_syn = label.numpy()
res = (vel_pred - vel_syn)
rms = rms + np.sqrt(np.power(res, 2).sum() / res.size)
if option.plot:
if batch_i % 4 is 0:
num_vs=vel_syn.shape[0]
select_one=np.random.randint(0,num_vs)
vel_syn=vel_syn[select_one,:];vel_pred=vel_pred[select_one,:]
#select_one=np.random.randint(0,option.batch_size) # bug here
#vel_syn=vel_syn[select_one,:];vel_pred=vel_pred[select_one,:]
plt.plot(vel_syn, np.arange(0, len(vel_syn),1)*0.5, '-.', color='red')
plt.plot(vel_pred, np.arange(0, len(vel_pred),1)*0.5, '-', color='green')
plt.title('True')
plt.xlabel('Vs(km/s)')
plt.ylabel('Depth(km)')
plt.gca().invert_yaxis()
plt.savefig('./Figs/Fitting_epoch{:.0f}_{:.3f}_{:.3f}.jpg'.format(epoch,locationKey[select_one,0],locationKey[select_one,1]), dpi=300)
plt.pause(0.01)
fig.clear()
pass
total_batch=len(validLoader)
average_loss = test_loss /total_batch
rms = rms/total_batch
print("===> Avg. test loss: {:.4f} {:.4f} ".format(average_loss, rms))
return average_loss,rms
def main():
os.system("test -d output|| mkdir output")
os.system("test -d Figs|| mkdir Figs")
os.system("test -d model_para|| mkdir model_para")
writer = SummaryWriter()
option = Config()
alpha = option.alpha
randomFix(option.seed); # fix seed,reproduce our results for each run-time
print('===> Building net')
model = Net(image_width=17,
image_height=60,
image_outwidth=301,
image_outheight=1,
inchannel=2, outchannel=4)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # for one gpu
model.to(device) # assign a gpu or cpu
summary(model,(2,17,60)) # how many parameters of designed network
print(model)
# initialize weights of networks
if option.pretrained:
if torch.cuda.is_available():
model.load_state_dict(torch.load(option.pretrain_net))
else:
model.load_state_dict(torch.load(option.pretrain_net,map_location={'cuda:0':'cpu'}))
else:
model.apply(weights_init)
if option.plot:
plt.ion()
# set optimizer
# optimizer = optim.Adam(model.parameters(), lr=option.lr, weight_decay=alpha)
optimizer = optim.RMSprop(model.parameters(), lr=option.lr, weight_decay=alpha)
# optimizer = optim.SGD(model.parameters(), lr=option.lr,weight_decay=alpha,momentum=0.9)
print('===> load train and validation Dataset')
dispPath=option.filepath_disp_training
vsPath=option.filepath_vs_training
trainFiles,validFiles=getTrainValidationFiles(dispPath,vsPath,validSize=0.2)
trainDataset=DispVsDataset(trainFiles,transform=ToTensor())
validDataset=DispVsDataset(validFiles,transform=ToTensor())
print('===> Training Net')
time_loc = list(map(str, list(time.localtime())))
time_loc = '_'.join(time_loc[0:-5])
with open('output/epochInfo' + time_loc + '.txt', 'w') as f:
for epoch in range(option.start, option.start + option.nEpochs + 1):
# training
tloss = training(epoch, model,optimizer,option,trainDataset,device,loss_fn=MyLoss())
# # validation
vloss, vrms = test(epoch, model,option,validDataset,loss_fn=MyLoss())
model.to(device)
# write log file
writer = writerlogfile(writer, epoch, tloss, vloss, vrms)
if epoch % 20 is 0:
checkpoint(epoch, model)
elif epoch == 1:
checkpoint(epoch, model)
string_output = "{}".format("%d %10.7f %10.7f %10.7f %s" % (epoch, tloss, vloss, vrms, '\n'))
f.write(string_output)
writer.export_scalars_to_json("./all_scalars.json")
writer.close()
checkpoint(epoch, model)
print('Finished Training')
if __name__ == '__main__':
main()
pass
```
#### File: 2TrainingAndTestScripts/unuseful/step1_determining_init_learningrate.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from util import *
from NetModel import Net as Net
from config import Config
import matplotlib.pyplot as plt
from tensorboardX import SummaryWriter
import pickle
import time
from Main_plotResults import Mainplot
writer = SummaryWriter()
option = Config()
alpha=option.alpha
plt.ion()
# fixed seed, because pytoch initilize weight randomly
torch.manual_seed(option.seed)
print('===> Loading datasets')
batch_index, train_pos, test_pos = Reader().get_batch_file()
print('===> Building net')
model = Net(inchannel=2,outchannel=8)
model.apply(weights_init)
optimizer = optim.Adam(model.parameters(), lr=option.lr,weight_decay=alpha)
option.lr= 1e-6
print('===> Training Net')
def training(epoch):
epoch_loss =[]
accuracy = []
total_num = len(train_pos)
lr_mult = (1 / 1e-5) ** (1 / 100)
lr = [option.lr]
losses = []
best_loss = 1e9
for iteration in range(len(batch_index)):
index1 = iteration*option.batch_size
index2 = batch_index[iteration]
batch_x,batch_y,batch_loc= Reader().get_batch_data('train',index1, index2,train_pos,test_pos)
inputs = torch.Tensor(batch_x[:,:,:])
# batch_size,channels,H,W
inputs = inputs.view([inputs.size(0),inputs.size(2),1,inputs.size(1)])
targets = batch_y[:,:,1]
targets = torch.Tensor(targets)
optimizer.zero_grad()
outputs = model(inputs)
# cost function
loss = loss_fn(alpha,model,outputs, targets)
loss.backward()
optimizer.step()
# print statistics
epoch_loss.append(loss.item())
if iteration%20==19:
num = index2
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, num, total_num,
100. * num/total_num, loss.item()))
outvspattern = Vel_pattern().get_Vel_pattern_array(outputs)
targetsvspattern = Vel_pattern().get_Vel_pattern_array(targets)
temppatterns = (targetsvspattern - outvspattern)
accuracy.append((temppatterns.nelement()-len(torch.nonzero(temppatterns)))/temppatterns.nelement())
if loss.item() < best_loss:
best_loss = loss.item()
if loss.item() > 10* best_loss or lr[-1]> 1.0:
break
# if lr[-1]> 1.0:
# break
for g in optimizer.param_groups:
g['lr'] = g['lr']*lr_mult
lr.append(g['lr']*lr_mult)
lr = np.array(lr)
losses = np.array(losses)
accuracy=np.array(accuracy)
return lr,accuracy,epoch_loss
for epoch in range(1, 2):
# adjust_learning_rate(optimizer, epoch,decay_rate=0.5)
model.train()
lr, accuracy, epoch_loss= training(epoch)
model.eval()
plt.subplot(3,1,1)
plt.xticks(np.log([1e-6,1e-5 ,1e-4, 1e-3, 1e-2, 1e-1, 1]), (1e-6,1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1))
plt.xlabel('learning rate')
plt.ylabel('loss')
plt.plot(np.log(lr), epoch_loss)
plt.subplot(3,1,2)
plt.xticks(np.log([1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1]), (1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1))
plt.xlabel('learning rate')
plt.ylabel('accuracy')
plt.plot(np.log(lr), accuracy)
plt.subplot(3, 1, 3)
plt.xlabel('num iterations')
plt.ylabel('learning rate')
plt.plot(lr)
plt.tight_layout()
plt.pause(100)
plt.show()
```
#### File: 2TrainingAndTestScripts/unuseful/Traintrain_curve.py
```python
import matplotlib as mpl
# mpl.use('Agg')
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
from tensorboardX import SummaryWriter
import pickle
import time
# import sys
# sys.path.append('./src')
from src.util import *
from src.writerlogfile import writerlogfile
from src.NetModel_curve import Net as Net
from config import Config
from torchsummary import summary
myloss = MyLoss()
Input_fun=Reader()
#def count_parameters(model):
# return sum(p.numel() for p in model.parameters() if p.requires_grad)
def training(epoch, model, train_pos, test_pos, batch_index, option, optimizer, alpha):
epoch_loss = 0.0
random.shuffle(train_pos)
total_num = len(train_pos)
for iteration in range(len(batch_index)):
index1 = iteration * option.batch_size
index2 = batch_index[iteration]
# batch_x,batch_y,batch_loc= Reader().get_batch_gaussian_map('train',index1, index2,train_pos,test_pos)
batch_x, batch_y, batch_loc = Input_fun.get_batch_disp_gaussian_map_vs_curve('train', index1, index2, train_pos,test_pos)
if torch.cuda.is_available():
inputs = torch.Tensor(batch_x).cuda()
targets = torch.Tensor(batch_y).cuda()
else:
inputs = torch.Tensor(batch_x)
targets = torch.Tensor(batch_y)
optimizer.zero_grad()
outputs = model(inputs)
# cost function
# loss = loss_fn(alpha,model,outputs, targets)
loss = myloss(outputs, targets)
loss.backward()
optimizer.step()
# print statistics
epoch_loss += loss.item()
if iteration % 20 == 19:
num = index2
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, num, total_num,
100. * num / total_num, loss.item()))
norm_weight = extract_weights(model)
average_loss = epoch_loss / len(batch_index)
print("===> Epoch {} Complete: Avg. Loss: {:.4f} ".format(epoch, average_loss))
return loss.item(), norm_weight
def test(epoch, model_cpu, test_pos, train_pos, alpha, option):
model_cpu.eval()
random.shuffle(test_pos)
test_x, test_y, vel_loc = Input_fun.get_batch_disp_gaussian_map_vs_curve('test', 0, 0, train_pos, test_pos)
loss_hist = []
test_loss = 0
vs_label_pred = []
res_total = []
rms_count = 0
if option.plot:
fig = plt.figure(num=1, figsize=(12, 8), dpi=80, clear=True)
for i in range(len(test_x)):
# velocity axis
label = torch.Tensor(test_y[i])
label = label.view([1, label.size(0)])
# dispersion axis
input = torch.Tensor(test_x[i])
input = input.view([1, input.size(0), input.size(1), input.size(2)])
# compute output
output = model_cpu(input) # output[batchsize,H,W]
test_loss += myloss(output, label).item() # sum up batch loss
# collect output loss, need to delete
loss_hist.append(myloss(output, label).item())
output = output.view([output.size(1)]).data.numpy()
vel_pred = output
vel_syn = test_y[i]
vs_label_pred.append([vel_pred, vel_syn])
# np.concatenate((vel_pred,vel_syn),axis=0)
# vs_label_pred = np.hstack((vel_pred[:, 1], vel_syn[:, 1]))
res = (vel_pred - vel_syn)
res_total.append(res)
rms_count = rms_count + np.sqrt(np.power(res, 2).sum() / len(res))
if option.plot:
if i % 400 is 0:
# if True:
# print(vel_loc[i].split('_'))
# lat, lon = vel_loc[i].split('_')
# lat = float(lat)
# lon = float(lon)
plt.plot(test_y[i], np.arange(0, len(output), 1)*0.5, '-.', color='red')
plt.plot(output, np.arange(0, len(output), 1)*0.5, '-', color='green')
plt.title('True')
plt.xlabel('Vs(km/s)')
plt.ylabel('Depth(km)')
plt.gca().invert_yaxis()
plt.savefig('./Figs/Fitting_epoch' + str(epoch) + '_' + vel_loc[i] + '.png', dpi=300)
plt.pause(0.01)
fig.clear()
pass
average_loss = test_loss / len(test_x)
rms_count = rms_count / len(test_x)
loss_hist = np.array(loss_hist)
res_total = np.array(res_total)
vs_label_pred = np.array(vs_label_pred)
print("===> Avg. test loss: {:.4f} {:.4f} ".format(average_loss, rms_count))
return average_loss, rms_count, loss_hist, res_total, vs_label_pred
def checkpoint(epoch, model):
torch.save(model.state_dict(), '%s/model_epoch_%d.pth' % ('./model_para', epoch))
print("Checkpoint saved to {}".format('%s/model_epoch_%d.pth' % ('./model_para', epoch)))
def main():
os.system("test -d output|| mkdir output")
os.system("test -d Figs|| mkdir Figs")
os.system("test -d model_para|| mkdir model_para")
writer = SummaryWriter()
option = Config()
alpha = option.alpha
# fixed seed, because pytoch initilize weight randomly
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
torch.cuda.manual_seed_all(option.seed)
print("cuda is available")
else:
print("cuda is not available")
torch.manual_seed(option.seed)
print('===> Loading datasets')
batch_index, train_pos, test_pos = Reader().get_batch_file()
# Saving test information:
# with open('testinfo.pkl', 'wb') as f: # Python 3: open(..., 'wb')
# pickle.dump([batch_index, train_pos, test_pos], f)
print('===> Building net')
model = Net(image_width=17,
image_height=60,
image_outwidth=301,
image_outheight=1,
inchannel=2, outchannel=4)
# model = Unet(in_ch=2, out_ch=1,image_len=17,image_len_out=13)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
summary(model,(2,17,60))
print(model)
# write out network structure
# dummy_input = torch.rand(13, 2, 17, 60)
# ouput network structure
# with SummaryWriter(comment='Net') as w:
# w.add_graph(model, (dummy_input))
if option.pretrained:
if torch.cuda.is_available():
model.load_state_dict(torch.load(option.pretrain_net))
else:
model.load_state_dict(torch.load(option.pretrain_net,map_location={'cuda:0':'cpu'}))
else:
model.apply(weights_init)
if option.plot:
plt.ion()
# set optimizer
# optimizer = optim.Adam(model.parameters(), lr=option.lr, weight_decay=alpha)
optimizer = optim.RMSprop(model.parameters(), lr=option.lr, weight_decay=alpha)
# optimizer = optim.SGD(model.parameters(), lr=option.lr,weight_decay=alpha,momentum=0.9)
print('===> Training Net')
time_loc = list(map(str, list(time.localtime())))
time_loc = '_'.join(time_loc[0:-5])
f = open('output/epochInfo' + time_loc + '.txt', 'w')
for epoch in range(option.start, option.start + option.nEpochs + 1):
# adjust learning rate
# optimizer = adjust_learning_rate(option.lr,optimizer, epoch)
# training
tloss, norm_weight = training(epoch, model, train_pos, test_pos, batch_index, option, optimizer, alpha)
# validation
model.cpu()
vloss, vrms, vloss_hist, res_total, vs_label_pred = test(epoch, model, test_pos, train_pos, alpha, option)
model.to(device)
# write log file
writer = writerlogfile(writer, norm_weight, epoch, tloss, vloss, vrms, vloss_hist)
if epoch % 5 is 0:
checkpoint(epoch, model)
elif epoch == 1:
checkpoint(epoch, model)
with open('./output/HistVal_TestVel_LabelVel_' + time_loc + 'epoch_' + str(epoch) + '.pkl', 'wb') as ff:
pickle.dump([res_total, vs_label_pred], ff)
string_output = "{}".format("%d %10.7f %10.7f %10.7f %s" % (epoch, tloss, vloss, vrms, '\n'))
f.write(string_output)
f.close()
writer.export_scalars_to_json("./all_scalars.json")
writer.close()
print('Finished Training')
checkpoint(epoch, model)
if __name__ == '__main__':
main()
pass
``` |
{
"source": "jhsaraja/testiprojekti",
"score": 4
} |
#### File: jhsaraja/testiprojekti/person.py
```python
class Person(object):
""" Class Person for testing python.
Following packages need to be installed:
- requests
:param name: person's name, string
:param age: person's age, integer
:param phone: person's phone, string
:rtype: object
"""
def __init__(self, name, age, phone):
self.name = name
self.age = age
self.phone = phone
def print(self):
""" Method prints person's data.
:return: None
"""
print("Name: {}, age: {}, phone: {}".format(self.name, self.age, self.phone))
def set_name(self, name):
""" Method saves a new name for the person.
:param name: new name for the person, string
:return: None
"""
self.name = name
def get_name(self):
""" Method returns the name of the person.
:return: name, string
"""
return self.name
def set_age(self, age):
""" Method saves a new age for the person.
:param age: new age for the person, integer
:return: None
"""
if type(age) != int:
print("not valid age {}".format(age))
return
if age >= 0:
self.age = age
else:
print("not valid age {}".format(age))
def get_age(self):
""" Method returns the age of the person.
:return: age, integer
"""
return self.age
def set_phone(self, phone):
""" Method saves a new phone for the person.
:param phone: new phone for the person, string
:return: None
"""
self.phone = phone
def get_phone(self):
""" Method returns the phone of the person.
:return: phone, string
"""
return self.phone
class Employee(Person):
""" Class Employee for testing python.
:param name: person's name, string
:param age: person's age, integer
:param phone: person's phone, string
:param phone: person's title, string
:param phone: person's salary, string
:param phone: person's location, string
:rtype: object
"""
def __init__(self, name, age, phone, title, salary, location):
super().__init__(name, age, phone)
self.title = title
self.salary = salary
self.location = location
def get_title(self):
""" Method returns the title of the person.
:return: title, string
"""
return self.title
def set_title(self, title):
""" Method saves a new title for the person.
:param title: new title for the person, string
:return: None
"""
self.title = title
def get_salary(self):
""" Method returns the salary of the person.
:return: salary, string
"""
return self.salary
def set_salary(self, salary):
""" Method saves a new salary for the person.
:param salary: new salary for the person, string
:return: None
"""
if salary >= 0:
self.salary = salary
def get_location(self):
""" Method returns the location of the person.
:return: location, string
"""
return self.location
def set_location(self, location):
""" Method saves a new location for the person.
:param location: new location for the person, string
:return: None
"""
self.location = location
def print_businesscard(self):
""" Method prints a business card information.
:return: None
"""
print(" Name: {}\n Title: {}\n Phone: {}".format(self.name, self.title, self.phone))
``` |
{
"source": "jhscheer/angrdbg",
"score": 2
} |
#### File: angrdbg/angrdbg/got_builder.py
```python
from context import get_debugger
'''
def get_other_symbols_addrs(proj):
i = 1
while True:
try:
sym = proj.loader.main_object.get_symbol(i)
except: break
if sym.rebased_addr > 0:
yield sym.name, sym.relative_addr
i += 1
'''
def build_cle_got(proj, state):
debugger = get_debugger()
try:
got_start, got_end = debugger.get_got()
except BaseException:
print "angrdbg: cannot find .got.plt section"
return state
entry_len = proj.arch.bits / 8
got_start += 3 * entry_len # skip first 3 entries
'''
print "## angr got - before ##"
for a in xrange(got_start, got_end, entry_len):
print "0x%x: 0x%x" % (a, state.solver.eval(getattr(state.mem[a], "uint%d_t" % proj.arch.bits).resolved))
print
'''
empty_state = proj.factory.blank_state()
state.memory.store(
got_start,
empty_state.memory.load(
got_start,
got_end -
got_start))
'''
print "## angr got - final ##"
for a in xrange(got_start, got_end, entry_len):
print "0x%x: 0x%x" % (a, state.solver.eval(getattr(state.mem[a], "uint%d_t" % proj.arch.bits).resolved))
print
'''
return state
def build_mixed_got(proj, state):
debugger = get_debugger()
try:
got_start, got_end = debugger.get_got()
except BaseException:
print "angrdbg: cannot find .got.plt section"
return state
try:
plt_start, plt_end = debugger.get_plt()
except BaseException:
print "angrdbg: cannot find .plt section"
return state
entry_len = proj.arch.bits / 8
get_mem = debugger.get_dword if entry_len == 4 else debugger.get_qword
got_start += 3 * entry_len # skip first 3 entries
'''
print "## angr got - before ##"
for a in xrange(got_start, got_end, entry_len):
print "0x%x: 0x%x" % (a, state.solver.eval(getattr(state.mem[a], "uint%d_t" % proj.arch.bits).resolved))
print
'''
empty_state = proj.factory.blank_state()
for a in xrange(got_start, got_end, entry_len):
state_val = empty_state.solver.eval(
getattr(
empty_state.mem[a],
"uint%d_t" %
proj.arch.bits).resolved)
if state_val in proj._sim_procedures:
if proj._sim_procedures[state_val].is_stub: # real simprocs or not?
dbg_val = get_mem(a)
name = proj._sim_procedures[state_val].display_name
if dbg_val >= plt_end or dbg_val < plt_start: # already resolved by the loader in the dbg
setattr(state.mem[a], "uint%d_t" % proj.arch.bits, dbg_val)
else:
ea = debugger.resolve_name(name)
if ea is not None:
setattr(state.mem[a], "uint%d_t" % proj.arch.bits, ea)
'''
print "## angr got - final ##"
for a in xrange(got_start, got_end, entry_len):
print "0x%x: 0x%x" % (a, state.solver.eval(getattr(state.mem[a], "uint%d_t" % proj.arch.bits).resolved))
print
'''
return state
def build_bind_now_got(proj, state):
debugger = get_debugger()
try:
got_start, got_end = debugger.get_got()
except BaseException:
print "angrdbg: cannot find .got.plt section"
return state
try:
plt_start, plt_end = debugger.get_plt()
except BaseException:
print "angrdbg: cannot find .plt section"
return state
entry_len = proj.arch.bits / 8
get_mem = debugger.get_dword if entry_len == 4 else debugger.get_qword
got_start += 3 * entry_len # skip first 3 entries
'''
print "## angr got - before ##"
for a in xrange(got_start, got_end, entry_len):
print "0x%x: 0x%x" % (a, state.solver.eval(getattr(state.mem[a], "uint%d_t" % proj.arch.bits).resolved))
print
'''
empty_state = proj.factory.blank_state()
for a in xrange(got_start, got_end, entry_len):
state_val = empty_state.solver.eval(
getattr(
empty_state.mem[a],
"uint%d_t" %
proj.arch.bits).resolved)
if state_val in proj._sim_procedures:
dbg_val = get_mem(a)
name = proj._sim_procedures[state_val].display_name
if dbg_val >= plt_end or dbg_val < plt_start: # already resolved by the loader in the dbg
setattr(state.mem[a], "uint%d_t" % proj.arch.bits, dbg_val)
else:
ea = debugger.resolve_name(name)
if ea is not None:
setattr(state.mem[a], "uint%d_t" % proj.arch.bits, ea)
'''
print "## angr got - final ##"
for a in xrange(got_start, got_end, entry_len):
print "0x%x: 0x%x" % (a, state.solver.eval(getattr(state.mem[a], "uint%d_t" % proj.arch.bits).resolved))
print
'''
return state
``` |
{
"source": "jhseceng/Cloud-AWS",
"score": 2
} |
#### File: Control-Tower-For-Horizon/lambda/create_horizon_stackset_lambda.py
```python
import json
import logging
# from botocore.vendored import requests
import random
import string
import time
import boto3
import os
import requests
from botocore.exceptions import ClientError
# Falcon SDK - Cloud_Connect_AWS and OAuth2 API service classes
from falconpy import cspm_registration as CSPM
from falconpy import oauth2 as FalconAuth
logger = logging.getLogger()
logger.setLevel(logging.INFO)
CF = "https://cs-prod-cloudconnect-templates.s3.amazonaws.com/aws_cspm_cloudformation_v2.json"
CF_NO_CT = "https://cs-prod-cloudconnect-templates.s3.amazonaws.com/aws_cspm_cloudformation_v2_no_cloudtrail.json"
STACKSETURL = "https://cs-prod-cloudconnect-templates.s3.amazonaws.com/aws_cspm_cloudformation_iam_v2.json"
STACKSETNAME = 'CrowdStrike-CSPM-Integration'
STACKNAME = 'CrowdStrike-CSPM-Integration'
SUCCESS = "SUCCESS"
FAILED = "FAILED"
CT_REGION = os.environ['CT_REGION']
SECRET_NAME = os.environ['SECRET_ARN']
QSS3BucketName = os.environ['QSS3BucketName']
CSRoleName = os.environ['CSRoleName']
CSAccountNumber = os.environ['CSAccountNumber']
def cfnresponse_send(event, context, responseStatus, responseData, physicalResourceId=None, noEcho=False):
responseUrl = event['ResponseURL']
print(responseUrl)
responseBody = {'Status': responseStatus,
'Reason': 'See the details in CloudWatch Log Stream: ' + context.log_stream_name,
'PhysicalResourceId': physicalResourceId or context.log_stream_name, 'StackId': event['StackId'],
'RequestId': event['RequestId'], 'LogicalResourceId': event['LogicalResourceId'], 'NoEcho': noEcho,
'Data': responseData}
json_responseBody = json.dumps(responseBody)
print("Response body:\n" + json_responseBody)
headers = {
'content-type': '',
'content-length': str(len(json_responseBody))
}
try:
response = requests.put(responseUrl,
data=json_responseBody,
headers=headers)
print("Status code: " + response.reason)
except Exception as e:
print("send(..) failed executing requests.put(..): " + str(e))
def get_secret_value(secret):
# key = 'falcon_client_secret'
key = secret
SM = boto3.client('secretsmanager')
secret_list = SM.list_secrets()['SecretList']
output = {}
for s in secret_list:
if key in s.values():
output = SM.get_secret_value(SecretId=key)['SecretString']
return output
def get_master_id():
""" Get the master Id from AWS Organization - Only on master"""
masterID = ''
ORG = boto3.client('organizations')
try:
masterID = ORG.list_roots()['Roots'][0]['Arn'].rsplit(':')[4]
return masterID
except Exception as e:
logger.error('This stack runs only on the Master of the AWS Organization')
return False
def create_horizon_stackset(paramList, AdminRoleARN, ExecRole, cList):
""" Launch CRWD Discover Stackset on the Master Account """
CFT = boto3.client('cloudformation')
result = {}
if len(paramList):
try:
result = CFT.create_stack_set(StackSetName=STACKSETNAME,
Description='Roles for CRWD-Horizon',
TemplateURL=STACKSETURL,
Parameters=paramList,
PermissionModel='SERVICE_MANAGED',
Capabilities=cList,
AutoDeployment={
'Enabled': True,
'RetainStacksOnAccountRemoval': False
}, )
return result
except ClientError as e:
if e.response['Error']['Code'] == 'NameAlreadyExistsException':
logger.info("StackSet already exists")
result['StackSetName'] = 'CRWD-ROLES-CREATION'
return result
else:
logger.error("Unexpected error: %s" % e)
result['Status'] = e
return result
def get_random_alphanum_string(stringLength=15):
lettersAndDigits = string.ascii_letters + string.digits
return ''.join((random.choice(lettersAndDigits) for i in range(stringLength)))
def delete_stackset(stacksetName):
CFT = boto3.client('cloudformation')
try:
stackset_result = CFT.describe_stack_set(StackSetName=stacksetName)
if stackset_result and 'StackSet' in stackset_result:
stackset_instances = CFT.list_stack_instances(StackSetName=stacksetName)
while 'NextToken' in stackset_instances:
stackinstancesnexttoken = stackset_instances['NextToken']
morestackinstances = CFT.list_stack_instances(NextToken=stackinstancesnexttoken)
stackset_instances["Summaries"].extend(morestackinstances["Summaries"])
if len(stackset_instances["Summaries"]) > 0:
stack_instance_members = [x["Account"] for x in stackset_instances["Summaries"]]
stack_instance_regions = list(set(x["Region"] for x in stackset_instances["Summaries"]))
CFT.delete_stack_instances(
StackSetName=stacksetName,
Accounts=stack_instance_members,
Regions=stack_instance_regions,
OperationPreferences={'MaxConcurrentCount': 3},
RetainStacks=False
)
stackset_instances = CFT.list_stack_instances(StackSetName=stacksetName)
counter = 2
while len(stackset_instances["Summaries"]) > 0 and counter > 0:
logger.info("Deleting stackset instance from {}, remaining {}, "
"sleeping for 10 sec".format(stacksetName, len(stackset_instances["Summaries"])))
time.sleep(10)
counter = counter - 1
stackset_instances = CFT.list_stack_instances(StackSetName=stacksetName)
if counter > 0:
CFT.delete_stack_set(StackSetName=stacksetName)
logger.info("StackSet {} deleted".format(stacksetName))
else:
logger.info("StackSet {} still has stackset instance, skipping".format(stacksetName))
return True
except ClientError as e:
if e.response['Error']['Code'] == 'StackSetNotFoundException':
logger.info("StackSet {} does not exist".format(stacksetName))
return True
else:
logger.error("Unexpected error: %s" % e)
return False
def deregister_falcon_horizon_account() -> dict:
account_id = get_master_id()
organizational_unit_id = get_org_id()
auth_token = get_cs_token()
cspm_api = CSPM.CSPM_Registration(access_token=auth_token)
# ids=account_id,
try:
response = cspm_api.DeleteCSPMAwsAccount(ids=account_id, organization_ids=organizational_unit_id)
logger.info('Response to register = {}'.format(response))
if response["status_code"] == 201:
logger.info('Account Registered')
return response['body']['resources'][0]
elif response["status_code"] == 409:
logger.info('Account already registered - nothing to do')
return response['resources'][0]
else:
error_code = response["status_code"]
error_msg = response["errors"][0]["message"]
logger.info('Account Registration Failed - Response {} {}'.format(error_code, error_msg))
return response['resources'][0]
except Exception as e:
logger.info('Got exception {}'.format(e))
def register_falcon_horizon_account() -> dict:
account_id = get_master_id()
organizational_unit_id = get_org_id()
api_message = format_registration_message(account_id, organizational_unit_id)
auth_token = get_cs_token()
cpsm_api = CSPM.CSPM_Registration(access_token=auth_token)
# _result = register_falcon_discover_account(falcon_discover, api_message)
try:
response = cpsm_api.CreateCSPMAwsAccount(api_message)
logger.info('Response to register = {}'.format(response))
if response["status_code"] == 201:
logger.info('Account Registered')
return response['body']['resources'][0]
elif response["status_code"] == 409:
logger.info('Account already registered - nothing to do')
return response['resources'][0]
else:
error_code = response["status_code"]
error_msg = response["errors"][0]["message"]
logger.info('Account Registration Failed - Response {} {}'.format(error_code, error_msg))
return response['resources'][0]
except Exception as e:
logger.info('Got exception {}'.format(e))
def get_secret(secret_key):
# Create a Secrets Manager client
SECRET_REGION = CT_REGION
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=CT_REGION)
try:
get_secret_value_response = client.get_secret_value(
SecretId=SECRET_NAME)
logger.debug('Got response code {} calling get_secret with key {}'.format(
get_secret_value_response['ResponseMetadata']['HTTPStatusCode'], secret_key))
if 'SecretString' in get_secret_value_response:
secret_dict = json.loads(get_secret_value_response['SecretString'])
logger.info('Got secret {} '.format(secret_key))
return secret_dict[secret_key]
else:
return None
except Exception as e:
logger.info('Got Error {} retieving secret'.format(e))
def get_cs_token():
try:
region_name = CT_REGION
client_id = get_secret('FalconClientId')
secret = get_secret('FalconSecret')
authorized = FalconAuth.OAuth2(
creds={
'client_id': client_id,
'client_secret': secret
})
except Exception as e:
# We can't communicate with the endpoint, return a false token
logger.info('Got Exception {} generating oauth2 token'.format(e))
authorized.token = lambda: False
# Try to retrieve a token from our authentication, returning false on failure
try:
token = authorized.token()["body"]["access_token"]
return token
except:
logger.info('Unable to generate authorized token for CS API')
# Confirm the token was successfully retrieved
def get_org_id():
''' Get the organization ID for this account'''
ORG = boto3.client('organizations')
orgId = ORG.describe_organization()['Organization']['Id']
return (orgId)
def format_registration_message(account_id, organization_id):
data = {
"resources": [
{
"organization_id": organization_id,
"cloudtrail_region": CT_REGION,
"account_id": account_id,
}
]
}
logger.info('Post Data {}'.format(data))
return data
def format_deregistration_message(account_id, organization_id):
data = {
"resources": [
{
"organization_id": organization_id,
"account_id": account_id,
}
]
}
logger.info('Post Data {}'.format(data))
return data
def create_ioa_stack (paramList):
CFT = boto3.client('cloudformation')
cft_result = CFT.create_stack(
StackName=STACKNAME,
TemplateURL=CF_NO_CT,
Parameters=paramList,
TimeoutInMinutes=5,
Capabilities=[
'CAPABILITY_NAMED_IAM',
],
Tags=[
{
'Key': 'Vendor',
'Value': 'CrowdStrike'
},
],
)
if cft_result:
logger.info('Created Stack {}'.format(cft_result.get('StackId')))
return True
else:
return False
def check_stack_complete():
CFT = boto3.client('cloudformation')
try:
# Monitor the status of the stack. We are looking for 'CURRENT'
state = 'UNKNOWN'
timeout = 300 # [seconds]
timeout_start = time.time()
while time.time() < timeout_start + timeout:
time.sleep(10)
if state == 'CREATE_COMPLETE':
# Stack has been created.
return True
else:
stack_status = CFT.describe_stacks(StackName=STACKNAME)
state = stack_status['Stacks'][0]['StackStatus']
return False
except Exception as e:
logger.info('Unable to determine ')
return False
def lambda_handler(event, context):
try:
AccountId = get_master_id()
#
# Moved to virtual hosted-style URLs.
# See https://aws.amazon.com/fr/blogs/aws/amazon-s3-path-deprecation-plan-the-rest-of-the-story/
# path-style URLs to be depricated
#
cList = ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND']
ExecRole = 'AWSControlTowerExecution'
AdminRoleARN = 'arn:aws:iam::' + AccountId + ':role/service-role/AWSControlTowerStackSetRole'
logger.info('EVENT Received: {}'.format(event))
response_data = {}
if event['RequestType'] in ['Create']:
logger.info('Event = ' + event['RequestType'])
register_result = register_falcon_horizon_account()
logger.info('Account registration result: {}'.format(register_result))
external_id = register_result['external_id']
RoleName = register_result['iam_role_arn'].split('/')[-1]
# Parameters for CRWD-Discover stackset
horizon_stackset_paramList = []
keyDict = {}
keyDict['ParameterKey'] = 'ExternalID'
keyDict['ParameterValue'] = register_result['external_id']
horizon_stackset_paramList.append(dict(keyDict))
keyDict['ParameterKey'] = 'RoleName'
keyDict['ParameterValue'] = RoleName
horizon_stackset_paramList.append(dict(keyDict))
keyDict['ParameterKey'] = 'CSRoleName'
keyDict['ParameterValue'] = CSRoleName
horizon_stackset_paramList.append(dict(keyDict))
keyDict['ParameterKey'] = 'CSAccountNumber'
keyDict['ParameterValue'] = CSAccountNumber
horizon_stackset_paramList.append(dict(keyDict))
logger.debug('CRWD_Horizon Stackset ParamList:{}'.format(horizon_stackset_paramList))
# Create the Stackset
CRWD_Horizon_result = create_horizon_stackset(horizon_stackset_paramList, AdminRoleARN,
ExecRole, cList)
logger.info('CRWD-Horizon Stackset: {}'.format(CRWD_Horizon_result))
horizon_ioa_stack_paramList = []
IOAkeyDict = {}
IOAkeyDict['ParameterKey'] = 'ExternalID'
IOAkeyDict['ParameterValue'] = register_result['external_id']
horizon_ioa_stack_paramList.append(dict(IOAkeyDict))
IOAkeyDict['ParameterKey'] = 'RoleName'
IOAkeyDict['ParameterValue'] = RoleName
horizon_ioa_stack_paramList.append(dict(IOAkeyDict))
IOAkeyDict['ParameterKey'] = 'CSEventBusName'
IOAkeyDict['ParameterValue'] = register_result['eventbus_name']
horizon_ioa_stack_paramList.append(dict(IOAkeyDict))
IOAkeyDict['ParameterKey'] = 'CSBucketName'
IOAkeyDict['ParameterValue'] = register_result['aws_cloudtrail_bucket_name']
horizon_ioa_stack_paramList.append(dict(IOAkeyDict))
IOAkeyDict['ParameterKey'] = 'CSRoleName'
IOAkeyDict['ParameterValue'] = CSRoleName
horizon_ioa_stack_paramList.append(dict(IOAkeyDict))
IOAkeyDict['ParameterKey'] = 'CSAccountNumber'
IOAkeyDict['ParameterValue'] = CSAccountNumber
horizon_ioa_stack_paramList.append(dict(IOAkeyDict))
ioa_stack_id = create_ioa_stack(horizon_ioa_stack_paramList)
logger.info('Created Stack with id {}'.format(ioa_stack_id))
stack_complete = check_stack_complete()
if CRWD_Horizon_result and stack_complete:
cfnresponse_send(event, context, SUCCESS, CRWD_Horizon_result, "CustomResourcePhysicalID")
return
else:
cfnresponse_send(event, context, FAILED, CRWD_Horizon_result, "CustomResourcePhysicalID")
return
elif event['RequestType'] in ['Update']:
logger.info('Event = ' + event['RequestType'])
cfnresponse_send(event, context, 'SUCCESS', response_data, "CustomResourcePhysicalID")
return
elif event['RequestType'] in ['Delete']:
logger.info('Event = ' + event['RequestType'])
deregister_falcon_horizon_account()
delete_stackset(STACKSETNAME)
response_data["Status"] = "Success"
cfnresponse_send(event, context, 'SUCCESS', response_data, "CustomResourcePhysicalID")
return
raise Exception
except Exception as e:
logger.error(e)
response_data = {"Status": str(e)}
cfnresponse_send(event, context, 'FAILED', response_data, "CustomResourcePhysicalID")
return
if __name__ == '__main__':
event = {
"RequestType": "Create",
"ResponseURL": "http://pre-signed-S3-url-for-response",
"StackId": "arn:aws:cloudformation:eu-west-1:123456789012:stack/MyStack/guid",
"RequestId": "unique id for this create request",
"ResourceType": "Custom::TestResource",
"LogicalResourceId": "MyTestResource",
"ResourceProperties": {
"StackName": "MyStack",
"List": [
"1",
"2",
"3"
]
}
}
context = ()
lambda_handler(event, context)
``` |
{
"source": "jhseceng/se-cwp-demo",
"score": 2
} |
#### File: se-cwp-demo/fig/SqsClient.py
```python
import json
import boto3
AWS_SQS_QUEUE_NAME = "test-queue"
class SQSQueue(object):
def __init__(self, queue_name: str, region_name: str):
self.queue_name = queue_name
self.region_name = region_name
self.resource = boto3.resource("sqs", region_name=self.region_name)
self.queue = self.resource.get_queue_by_name(QueueName=self.queue_name)
def __repr__(self):
return f"Queue name is {self.queue_name} and region is {self.region_name}"
def __str__(self):
return f"Queue name is {self.queue_name} and region is {self.region_name}"
def send(self, send_message=None) -> object:
if send_message is None:
send_message = {}
response_data = json.dumps(send_message)
response = self.queue.send_message(MessageBody=response_data)
return response
def receive(self):
try:
queue = self.resource.get_queue_by_name(QueueName=self.queue_name)
for message in queue.receive_messages():
assert isinstance(message.body, object)
data = json.loads(message.body)
message.delete()
return data
except Exception as e:
print(e)
return []
if __name__ == "__main__":
q = SQSQueue(queue_name=AWS_SQS_QUEUE_NAME, region_name="eu-west-1")
json_dict = {
"metadata": {
"customerIDString": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"offset": 14947764,
"eventType": "DetectionSummaryEvent",
"eventCreationTime": 1536846439000,
"version": "1.0",
},
"event": {
"ProcessStartTime": 1536846339,
"ProcessEndTime": 0,
"ProcessId": 38684386611,
"ParentProcessId": 38682494050,
"ComputerName": "CS-SE-EZ64",
"UserName": "demo",
"DetectName": "Process Terminated",
"DetectDescription": "Terminated a process related to the deletion of backups,which is often indicative "
"of ransomware activity.",
"Severity": 4,
"SeverityName": "High",
"FileName": "explorer.exe",
"FilePath": "\\Device\\HarddiskVolume1\\Windows",
"CommandLine": "C:\\Windows\\Explorer.EXE",
"SHA256String": "6a671b92a69755de6fd063fcbe4ba926d83b49f78c42dbaeed8cdb6bbc57576a",
"MD5String": "ac4c51eb24aa95b77f705ab159189e24",
"MachineDomain": "CS-SE-EZ64",
"FalconHostLink": "https:\/\/falcon.crowdstrike.com\/activity\/detections\/detail\/ec86abd353824e96765ecbe18eb4f0b4\/38655257584?_cid=xxxxxxxxxxxxxxxxxx",
"SensorId": "ec86abd353824e96765ecbe18eb4f0b4",
"DetectId": "ldt:ec86abd353824e96765ecbe18eb4f0b4:38655257584",
"LocalIP": "xx.xx.xx.xx",
"MACAddress": "xx-xx-xx-xx-xx",
"Tactic": "Malware",
"Technique": "Ransomware",
"Objective": "Falcon Detection Method",
"PatternDispositionDescription": "Prevention,process killed.",
"PatternDispositionValue": 16,
"PatternDispositionFlags": {
"Indicator": "false",
"Detect": "false",
"InddetMask": "false",
"SensorOnly": "false",
"Rooting": "false",
"KillProcess": "true",
"KillSubProcess": "false",
"QuarantineMachine": "false",
"QuarantineFile": "false",
"PolicyDisabled": "false",
"KillParent": "false",
"OperationBlocked": "false",
"ProcessBlocked": "false",
},
},
}
Message = json_dict
response = q.send(send_message=Message)
# print(response)
data = q.receive()
print(data)
``` |
{
"source": "JHSeng/ros_experiment",
"score": 2
} |
#### File: mmbot_control/scripts/avoid.py
```python
import rospy
import math
from geometry_msgs.msg import Twist
from sensor_msgs.msg import LaserScan
class Mmbot:
def __init__(self):
rospy.init_node('run_avoid', anonymous=True)
self.velocity_publisher = rospy.Publisher(
'cmd_vel', Twist, queue_size=5)
self.pose_subscriber = rospy.Subscriber(
'/scan', LaserScan, self.update_ranges)
self.laser_scan = LaserScan()
self.rate = rospy.Rate(100)
def update_ranges(self, data):
print("This in the callback function: ")
self.laser_scan = data
print(data.ranges[0])
def run(self):
__rate = rospy.Rate(1)
__rate.sleep()
currentMessage = Twist()
currentMessage.linear.x = 0.2
while not rospy.is_shutdown():
r = self.laser_scan.ranges
if (r[2] < 1 or r[3] < 1):
currentMessage.angular.z = 2
elif (r[7] < 1 or r[6] < 1):
currentMessage.angular.z = -2
else:
currentMessage.angular.z = 0
if (r[4] < 1):
currentMessage.angular.z = 2
elif (r[5] < 1):
currentMessage.angular.z = -2
else:
currentMessage.angular.z = 0
self.velocity_publisher.publish(currentMessage)
self.rate.sleep()
if __name__ == '__main__':
try:
mmbot = Mmbot()
mmbot.run()
except rospy.ROSInternalException:
pass
``` |
{
"source": "jhshanyu2008/tensorflow-QT_display-Demo",
"score": 3
} |
#### File: jhshanyu2008/tensorflow-QT_display-Demo/data_load_credit.py
```python
import numpy as np
import random
import tensorflow as tf
import os
import os.path as osp
root_dir = osp.dirname(__file__)
record_dir = osp.join(root_dir, 'data')
DATA_FILE = "credit_card.txt"
label_dic = {0: [0, 1],
1: [1, 0]}
# Generate training and test sets from TXT file
def load_data(data_file=DATA_FILE):
"""
credit_card ็ๅ492่กๆฏ่ฟ่งๆฐๆฎ๏ผๅ้ข็4920ไธชๆฏๆญฃๅธธๆฐๆฎ
ๅ
ถไธญ่ฟ่ง็ๆฐๆฎๅๆ 400ไธชไฝ่ฎญ็ปใ92ไธชๅๆต่ฏ๏ผๆญฃๅธธ็4000ไธชไฝ่ฎญ็ป๏ผ920ไธชๅๆต่ฏ๏ผ
ไธบไบๅนณ่กก่ฟ่งๅๆญฃๅธธ็ๆฐ้๏ผ่ฟ่งๆฐๆฎ่ขซๅคๅถๆฉๅขๅฐๅๆญฃๅธธๆฐๆฎไธๆ ท็ๆฐ้๏ผ
ๆๅๆ8000ไธช่ฎญ็ปๆ ทๆฌ๏ผ1840ไธชๆต่ฏๆ ทๆฌใ
"""
train_list = []
test_list = []
with open(data_file) as f:
iter = 0
f_data_train = []
t_data_train = []
f_data_test = []
t_data_test = []
# form raw training and test lists
for sample in f.readlines():
iter += 1
# map function does a same implementation for every member of list
values = list(map(float, sample.strip().split()))
if iter <= 400:
f_data_train.append(values)
elif iter <= 492:
f_data_test.append(values)
elif iter <= 4492:
t_data_train.append(values)
else:
t_data_test.append(values)
# Balance the number of right and fault samples
for i in range(len(t_data_train) // len(f_data_train)):
train_list.extend(f_data_train)
train_list.extend(t_data_train)
random.shuffle(train_list)
train_array = np.array(train_list)
# Balance the number of right and fault samples
for i in range(len(t_data_test) // len(f_data_test)):
test_list.extend(f_data_test)
test_list.extend(t_data_test)
test_array = np.array(test_list)
return train_array[:, :-1], train_array[:, -1], test_array[:, :-1], test_array[:, -1]
# Merge the samples with their labels and form .tfrecords file
def create_data_record(data_array, label_array, tf_file):
if not osp.exists(record_dir):
os.makedirs(record_dir)
writer = tf.python_io.TFRecordWriter(tf_file)
length = len(label_array)
for i in range(length):
data = data_array[i, :]
# extend it to 3-dim data which seems like a picture
data = data[np.newaxis, :, np.newaxis]
data_raw = data.tobytes()
data_label = int(label_array[i])
# merge sample and its label into an example
example = tf.train.Example(
features=tf.train.Features(
feature={
"data_label": tf.train.Feature(int64_list=tf.train.Int64List(value=[data_label])),
"data_raw": tf.train.Feature(bytes_list=tf.train.BytesList(value=[data_raw]))
}
)
)
writer.write(example.SerializeToString())
writer.close()
# Form training .tfrecord file
def create_training_record(train_array, label_array, tf_file='data/train_credit.tfrecords'):
create_data_record(train_array, label_array, tf_file)
# Form test .tfrecord file
def create_test_record(test_array, label_array, tf_file='data/test_credit.tfrecords'):
create_data_record(test_array, label_array, tf_file)
# Read .tfrecord file and decode its content
def read_data_record(file_name):
# read a series of files, here is only one member anyway
filename_queue = tf.train.string_input_producer([file_name], shuffle=False, num_epochs=None)
reader = tf.TFRecordReader()
key, serialized_example = reader.read(filename_queue)
# read content from serialized_example
features = tf.parse_single_example(
serialized_example,
features={
'data_label': tf.FixedLenFeature([], tf.int64),
'data_raw': tf.FixedLenFeature([], tf.string)
}
)
# get content by name
label_raw = features['data_label']
data_raw = features['data_raw']
# decode and reshape
data = tf.decode_raw(data_raw, tf.float64)
data = tf.reshape(data, [1 * 28 * 1])
return data, label_raw
# Read training .tfrecord file
def read_training_record(file_name='data/train_credit.tfrecords'):
return read_data_record(file_name)
# Read test .tfrecord file
def read_test_record(file_name='data/test_credit.tfrecords'):
return read_data_record(file_name)
if __name__ == '__main__':
# These codes will automatically generate training and test .tfrecord files
print('creating tf records')
train_data, train_labels, test_data, test_labels = load_data()
create_training_record(train_data, train_labels)
create_test_record(test_data, test_labels)
print('create mission completed')
sess = tf.InteractiveSession()
train_sample, train_label = read_training_record()
# Read the training data from the queue, batch size is 5
train_sample_batch, train_label_batch = tf.train.batch([train_sample, train_label], batch_size=5)
test_sample, test_label = read_test_record()
# Read the test data from the queue, batch size is 180
test_sample_batch, test_label_batch = tf.train.batch([test_sample, test_label], batch_size=180)
# Run the queue
threads = tf.train.start_queue_runners(sess=sess)
init = tf.local_variables_initializer()
sess.run(init)
# Get training samples from queue
def get_training_data():
data, labels = sess.run([train_sample_batch, train_label_batch])
new_labes = []
for i in range(len(labels)):
new_labes.append(label_dic[labels[i]])
return data, np.array(new_labes)
# Get test samples from queue
def get_test_data():
data, labels = sess.run([test_sample_batch, test_label_batch])
new_labes = []
for i in range(len(labels)):
new_labes.append(label_dic[labels[i]])
return data, np.array(new_labes)
```
#### File: tensorflow-QT_display-Demo/Qt_display/display_window_FC.py
```python
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QMainWindow, QMessageBox
from PyQt5.QtCore import pyqtSlot
# from Qt_display.display_window_UI import Ui_MainWindow
from Qt_display.display_window_UI import Ui_MainWindow
class Link_MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self):
# use the super init
super(Link_MainWindow, self).__init__()
# self.setupUi(self)
@pyqtSlot()
def on_clear_button_clicked(self):
self.display_txtbox.setText('')
QMessageBox.information(self, 'ๆ็คบ', 'ๆธ
้คๆๅ')
@pyqtSlot()
def on_input_button_clicked(self):
append_str = self.input_txtbox.text()
self.display_txtbox.append(append_str)
@pyqtSlot(int)
def on_test_dial_valueChanged(self, value):
self.test_slider.setValue(value)
@pyqtSlot(int)
def on_test_slider_valueChanged(self, value):
self.test_dial.setValue(value)
@pyqtSlot()
def on_quit_button_clicked(self):
decision = QMessageBox.question(self, 'Warning', 'Quit this program?',
QMessageBox.Yes | QMessageBox.No)
if decision == QMessageBox.Yes:
self.close()
class Func_MainWindow(Link_MainWindow):
def __init__(self):
super(Func_MainWindow, self).__init__()
# send the message to the txtbox
def send_to_display(self, message):
self.display_txtbox.append(message)
if __name__ == '__main__':
# before run this test for checking the UI, you should uncomment the line 12 first.
import sys
app = QtWidgets.QApplication(sys.argv)
Main_window = Func_MainWindow()
Main_window.show()
sys.exit(app.exec_())
``` |
{
"source": "jhshin0717/pae_cralwer",
"score": 2
} |
#### File: naver/cafe/common.py
```python
from enum import IntEnum
class Cafe:
def __init__(self, id, name, addr):
self.id = id
self.name = name
self.addr = addr
class Post:
def __init__(self, id, title, writer, time, read, comments, postlink, commentlink):
self.id = id
self.title = title
self.writer = writer
self.time = time
self.read = read
self.comments = comments
self.postlink = postlink
self.commentlink = commentlink
def __str__(self):
return str(self.id) + ' ' + self.title + ' ' + '[' + str(self.comments) + ']' + ' ' + self.time + ' ' + 'read: ' + str(self.read)
class QueryType(IntEnum):
TOP_VIEW = 1,
TOP_COMMENT = 2
class Range(IntEnum):
PAST_2_HOURS = 1
PAST_4_HOURS = 2
PAST_24_HOURS = 3
PAST_WEEK = 4
class Field(IntEnum):
ID = 0
TITLE = 1
WRITER = 2
TIME = 3
READ = 5
def get_cafe_page_addr(clubid, page_num):
addr = 'http://cafe.naver.com/ArticleList.nhn'
clubid_search = '?search.clubid=' + str(clubid)
menu_search = '&userDisplay=50&search.boardtype=L&search.specialmenutype=&search.questionTab=A&search.totalCount=501'
page_search = '&search.page=' + str(page_num)
return addr + clubid_search + menu_search + page_search
```
#### File: agent/naver/login.py
```python
from agent import driver_util
def login_by_webdriver(id, pw):
driver = driver_util.get_driver()
driver.get('https://nid.naver.com/nidlogin.login')
driver.find_element_by_name('id').send_keys(id)
driver.find_element_by_name('pw').send_keys(pw)
driver.find_element_by_css_selector(
'#frmNIDLogin > fieldset > input').click()
``` |
{
"source": "jhshi/wltrace",
"score": 2
} |
#### File: wltrace/wltrace/radiotap.py
```python
import struct
import binascii
import common
import utils
import dot11
_IT_VERSION = 0
_CHANNEL_FLAG_TURBO = 0x0010
_CHANNEL_FLAG_CCK = 0x0020
_CHANNEL_FLAG_OFDM = 0x0040
_CHANNEL_FLAG_2GHZ = 0x0080
_CHANNEL_FLAG_5GHZ = 0x0100
_CHANNEL_FLAG_PASSIVE_ONLY = 0x0200
_CHANNEL_FLAG_DYNAMIC = 0x0400
_CHANNEL_FLAG_GFSK = 0x0800
_FLAG_HAS_FCS = 0x10
_FLAG_FCS_ERROR = 0x40
_MCS_KNOWN_BANDWIDTH = 0x01
_MCS_KNOWN_MCS = 0x02
_MCS_KNOWN_GI = 0x04
_MCS_KNOWN_HT = 0x08
_MCS_FLAG_BANDWIDTH = 0x03
_MCS_FLAG_GI = 0x04
_MCS_FLAG_HT = 0x08
_PRESENT_FLAG_TSFT = 1 << 0
_PRESENT_FLAG_FLAG = 1 << 1
_PRESENT_FLAG_RATE = 1 << 2
_PRESENT_FLAG_CHANNEL = 1 << 3
_PRESENT_FLAG_SIGNAL = 1 << 5
_PRESENT_FLAG_NOISE = 1 << 6
_PRESENT_FLAG_MCS = 1 << 19
_PRESENT_FLAG_AMPDU = 1 << 20
class RadiotapHeader(common.GenericHeader):
"""Radiotap header.
See this document for radiotap header format:
http://www.radiotap.org/
See this document for all defined radiotap fields:
http://www.radiotap.org/defined-fields/all
"""
PACK_PATTERN = '<BBHI'
"""Radiotap header is always in little endian.
"""
FIELDS = [
'_it_version',
'_it_pad',
'_it_len',
'_it_present',
]
PRESENT_FLAGS = [
# (idx, unpack_fmt, field, align)
(0, 'Q', 'mactime', 8),
(1, 'B', '_flags', 1),
(2, 'B', 'rate', 1),
(3, 'I', '_channel', 2),
(4, 'xx', 'unused', 1),
(5, 'b', 'signal', 1),
(6, 'b', 'noise', 1),
(7, 'xx', 'unused', 2),
(8, 'xx', 'unused', 2),
(9, 'xx', 'unused', 2),
(10, 'x', 'unused', 1),
(11, 'x', 'unused', 1),
(12, 'x', 'unused', 1),
(13, 'x', 'unused', 1),
(14, 'xx', 'unused', 2),
(19, 'bbb', 'mcs', 1),
(20, 'IHxx', '_ampdu', 4),
]
def __init__(self, fh, *args, **kwargs):
cls = self.__class__
super(cls, self).__init__(fh, *args, **kwargs)
if self._it_version != _IT_VERSION:
raise Exception('Incorrect version: expect %d, got %d' %
(cls._it_version, self._it_version))
rest_len = self._it_len - struct.calcsize(cls.PACK_PATTERN)
rest = fh.read(rest_len)
if len(rest) != rest_len:
raise Exception('Short read: expect %d, got %d' %
(rest_len, len(rest)))
self.payload = rest
self.offset = 0
present = self._it_present
shift = 32
while (present >> 31) > 0:
present, = self.unpack('<I')
self._it_present = (present << shift) + self._it_present
shift += 32
for idx, fmt, field, align in cls.PRESENT_FLAGS:
if self._it_present & (1 << idx):
self.offset = utils.align_up(self.offset, align)
val = self.unpack(fmt)
if len(val) == 1:
val = val[0]
setattr(self, field, val)
else:
setattr(self, field, None)
if self._it_present & _PRESENT_FLAG_CHANNEL:
self.freq_mhz = self._channel & 0x0000ffff
self.freq_flag = self._channel >> 16
delattr(self, '_channel')
else:
self.freq_mhz = None
self.freq_flag = None
if self._it_present & _PRESENT_FLAG_FLAG:
self.has_fcs = self._flags & _FLAG_HAS_FCS > 0
self.fcs_error = self._flags & _FLAG_FCS_ERROR
else:
self.has_fcs = False
self.fcs_error = False
if self._it_present & _PRESENT_FLAG_RATE:
self.rate /= 2.0
else:
self.rate = 0
if self._it_present & _PRESENT_FLAG_MCS:
mcs_known, mcs_flags, self.mcs = self.mcs
if mcs_flags & 0x3 in [0, 2, 3]:
bw = 20
else:
bw = 40
long_gi = (mcs_flags & 0x4) == 0
self.rate = dot11.mcs_to_rate(self.mcs, bw, long_gi)
else:
self.mcs = None
if self._it_present & _PRESENT_FLAG_AMPDU:
self.ampdu_ref, ampdu_flag = self._ampdu
self.last_frame = ampdu_flag & 0x8 > 0
else:
self.ampdu_ref = None
self.last_frame = True
@classmethod
def from_phy_info(cls, phy):
header = cls()
header.freq_mhz = phy.freq_mhz
if header.freq_mhz < 3000:
header.freq_flag = _CHANNEL_FLAG_2GHZ | _CHANNEL_FLAG_OFDM
else:
header.freq_flag = _CHANNEL_FLAG_5GHZ | _CHANNEL_FLAG_OFDM
header._channel = (header.freq_flag << 16) + header.freq_mhz
header._flags = _FLAG_HAS_FCS
if phy.fcs_error:
header._flags |= _FLAG_FCS_ERROR
if phy.rate < 256:
header.rate = phy.rate
header.epoch_ts = phy.epoch_ts
header.len = phy.len
header.caplen = phy.caplen
return header
def to_phy(self):
return common.PhyInfo(**self.__dict__)
def to_binary(self):
cls = self.__class__
offset = 0
present_flag = 0
payload = ''
for idx, fmt, field, align in cls.PRESENT_FLAGS:
if getattr(self, field, None) is None:
continue
present_flag |= (1 << idx)
aligned_offset = utils.align_up(offset, align)
if aligned_offset != offset:
fmt = '%s%s' % ('x' * (aligned_offset - offset), fmt)
try:
attr = getattr(self, field)
if type(attr) != tuple:
attr = (attr, )
payload += struct.pack(fmt, *attr)
except:
raise Exception('%s: %s' % (field, getattr(self, field)))
offset += struct.calcsize(fmt)
header = struct.pack(cls.PACK_PATTERN, 0, 0,
struct.calcsize(cls.PACK_PATTERN) + len(payload),
present_flag)
return header + payload
``` |
{
"source": "Jhsmit/AtomicPlot",
"score": 3
} |
#### File: AtomicPlot/atomicplot/data.py
```python
__author__ = 'Smit'
import numpy as np
import operator
from atom.api import Atom, Dict, Str, Typed, Float, Event, List, observe
from .base import UpdateArray
from atomicplot.plot import Plot1D
from atomicplot.fit import Fit1D
# todo allow subslicing of dataset and dataobjects
# todo create general api thing to do operations on list in dataset (isnt that a for loop)
class DataBase(Atom):
""" base class for all objects holding data, DataObject and DataSet. Not to be confused with a database.
"""
metadata = Dict # todo special metadata dict? what did i mean with this?
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class DataObjectBase(DataBase):
"""Base class for DataObjects. Provides core functionality such as arithmetic operators
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __sub__(self, other):
return self.operate(other, operator.sub, self.func_op)
def __add__(self, other):
return self.operate(other, operator.add, self.func_op)
def __mul__(self, other):
return self.operate(other, operator.mul, self.func_op)
def __truediv__(self, other):
return self.operate(other, operator.truediv, self.func_op)
def __floordiv__(self, other):
return self.operate(other, operator.floordiv, self.func_op)
def __pow__(self, power, modulo=None):
if modulo is not None:
return XYDataObject(self.x, pow(self.y, power, modulo)) # todo add test for this
else:
return self.operate(power, operator.pow, self.func_op)
def __neg__(self):
return XYDataObject(self.x, -self.y)
def __abs__(self):
return XYDataObject(self.x, np.absolute(self.y))
def __isub__(self, other):
return self.operate(other, operator.sub, self.func_iop)
def __iadd__(self, other):
return self.operate(other, operator.add, self.func_iop)
def __imul__(self, other):
return self.operate(other, operator.mul, self.func_iop)
def __itruediv__(self, other):
# todo WARNING: numpy division rules are applied. Dividing array of ints gives floored result
return self.operate(other, operator.truediv, self.func_iop)
def __ifloordiv__(self, other):
return self.operate(other, operator.floordiv, self.func_iop)
def __ipow__(self, other):
return self.operate(other, operator.pow, self.func_iop)
def operate(self, other, op, func):
try:
y = other.y
assert np.array_equal(self.x, other.x) or ValueError('Unequal x arrays')
except AttributeError:
y = other
return func(self.x, op(self.y, y))
def func_iop(self, x, y): #kwargs?
self.y = y # This can be done sexier by using internal operators, but that wont trigger traits
return self
def func_op(self, x, y):
return XYDataObject(x, y) #todo if in the future different types of dataobjects are supported, find correct type and return it
def max(self):
return np.max(self.y)
def argmax(self):
#Todo this might be confusing because it returns the x value not the index like the numpy function
return self.x[np.argmax(self.y)]
def min(self):
return np.min(self.y)
def argmin(self):
return self.x[np.argmin(self.y)]
def mean(self):
return np.mean(self.y)
def std(self):
return np.std(self.y)
def var(self):
return np.var(self.y)
def median(self):
return np.median(self.y)
# todo allows resizing of _xdata and _ydata simultaneously without breaking plotting
#todo allow saving to file
class XYDataObject(DataObjectBase):
#might be up for a refactoring of the name if theres also going to be 2d data objects (XYDataObject, ListObject?)
""" has x and y array"""
label = Str('')
x = Typed(np.ndarray)
y = Typed(np.ndarray)
x_updated = Event(kind=bool)
y_updated = Event(kind=bool)
from atomicplot.fit import Fit1D
from atomicplot.plot import Plot1D
fit = Typed(Fit1D)
plot = Typed(Plot1D)
file_path = Str # Optional file path pointing to original data file
def __init__(self, x, y, *args, **kwargs):
if not isinstance(x, (np.ndarray, list)):
raise TypeError("xdata needs to be list or numpy array")
if not isinstance(y, (np.ndarray, list)):
raise TypeError("ydata needs to be list or numpy array")
if isinstance(x, np.ndarray):
if not len(x) == x.size:
raise ValueError("xdata is not an one-dimensional array")
if isinstance(y, np.ndarray):
if not len(y) == y.size:
raise ValueError("ydata is not an one-dimensional array")
if not len(x) == len(y):
raise ValueError("xdata, ydata have unequal length; found {}, {}".format(len(x), len(y)))
self.x = UpdateArray(x, self.x_updated)
self.y = UpdateArray(y, self.y_updated)
self.fit = Fit1D(self)
self.plot = Plot1D(self)
super(XYDataObject, self).__init__(*args, **kwargs)
@observe('x_updated')
def testfunc(self, new):
print('x updated in dataobject')
@observe('y_updated')
def testfunc(self, new):
print('y updated in dataobject')
@observe('y')
def tf1(self, new):
print('fully y updated')
def savetxt(self, file_path, **kwargs):
pass
#todo implement this
def reset(self):
"""restores x, y with data which it was initialized"""
raise NotImplementedError()
``` |
{
"source": "Jhsmit/awesome-panel",
"score": 2
} |
#### File: pages/gallery/gallery.py
```python
import importlib
import pathlib
from types import ModuleType
from typing import List
import panel as pn
from panel import Column
from panel.layout import Divider, HSpacer
from panel.pane import Markdown
from panel.widgets import Button
from application.config import settings
from awesome_panel.application.models import Resource
from awesome_panel.express import spinners
from awesome_panel.express.bootstrap import InfoAlert
ROOT = str(pathlib.Path.cwd())
# pylint: disable=line-too-long
TEXT = """\
# Awesome Panel Gallery 
With this Gallery I hope to
- show you the power of Panel
- help lower the friction of using Panel
- inspire you to build awesome analytics apps in Python.
This Gallery is running on a low end server on Azure.
So the performance can be significantly improved if you have access to a higher end server.
If you have an awesome tool or app you wan't to show here you are very welcome. You can read how to
in the [Contribute](https://github.com/marcskovmadsen/awesome-panel#how-to-contribute-an-app-to-the-gallery)
section of the README."""
# pylint: enable=line-too-long
INFO_TEXT = """\
Please **use FireFox, Safari or Edge** if you can. Alternatively you can use Chrome - but it's
[slower](https://github.com/bokeh/bokeh/issues/9515). Internet Explorer is not supported."""
def info():
"""An InfoAlert with relevant text"""
return Column(InfoAlert(text=INFO_TEXT), sizing_mode="stretch_width",)
def page_code_url_to_html(page: Resource,) -> str:
"""Converts a page to html link to the code with a font awesome icon
Make sure to run pnx.fontawesome.extend() for this to work
Arguments:
page {Resource} -- A page
Returns:
str -- A html string linking to the code and with a nice fontawesome icon
"""
return (
f'<a href={page.url} target="_blank" title="Source Code">' '<i class="fas fa-code"></i></a>'
)
def to_module_function(gallery_url: str,) -> ModuleType:
"""Converts a link to a Python gallery file to a module string
Arguments:
gallery_url {str} -- The link to the Python gallery file
Returns:
Module -- The module string, for example 'gallery.boostrap_dashboard.main'
"""
module_str = (
gallery_url.replace(settings.GITHUB_BLOB_MASTER_URL, "",)
.replace(".py", "",)
.replace("/", ".",)
.replace("\\", ".",)
)
return importlib.import_module(module_str)
class GalleryButton(Button):
"""## Button that loads page.
When clicked the page of the GalleryButton loads"""
def __init__(
self, page: Resource, page_outlet, **kwargs,
):
"""## Button that loads page
When clicked the page of the GalleryButton loads
Arguments:
name {[type]} -- The name/ text of the Button
page {[type]} -- The page to load
page_outlet {[type]} -- The page_outlet to load the page to
"""
super().__init__(
name=page.name, button_type="primary", **kwargs,
)
self.page = page
self.page_outlet = page_outlet
def click_handler(event,): # pylint: disable=unused-argument
text = (
f"<h1>Gallery / {page.name}</h1>"
f"<p>{page.author.to_html()}, {page_code_url_to_html(page)}</p>"
)
page_view = to_module_function(page.url).view()
self.page_outlet[:] = [spinners.DefaultSpinner().center()]
self.page_outlet[:] = [
pn.pane.HTML(text),
page_view,
]
self.on_click(click_handler)
class GalleryCard(Column):
"""A Card consisting of an image and a button"""
def __init__(
self, page: Resource, page_outlet, sizing_mode="fixed", **kwargs,
):
"""A Card consisting of an image and a button
Arguments:
name {[type]} -- The name of the card/ the text on the Button
page {Resource} -- The page to load
page_outlet {[type]} -- The page to load to
"""
self.button = GalleryButton(page, page_outlet, width=365, align="center", **kwargs,)
spacer = pn.layout.HSpacer(height=5)
super().__init__(
spacer,
pn.pane.PNG(page.thumbnail_png_url, width=360, height=272, align="center", embed=False),
# spacer,
self.button,
spacer,
name="gallery-item-" + page.name,
width=400,
margin=10,
sizing_mode=sizing_mode,
css_classes=["card"],
**kwargs,
)
class Gallery: # pylint: disable=too-few-public-methods
"""The Gallery page"""
def __init__(
self, page_outlet: pn.Column, apps_in_gallery: List[Resource],
):
"""Constructs a Gallery
Arguments:
page_outlet {pn.Column} -- A Column to hold the selected gallery page
apps_in_gallery {List[Resource]} -- The list of apps to include in the gallery
"""
self.page_outlet = page_outlet
self.apps_in_gallery = apps_in_gallery
def view(self,) -> Column:
"""The gallery view of awesome-panel.org"""
buttons = []
for app in self.apps_in_gallery:
buttons.append(GalleryCard(app, self.page_outlet,))
return Column(
Markdown(TEXT),
info(),
HSpacer(height=25),
Divider(),
*buttons,
name="Gallery",
sizing_mode="stretch_width",
)
```
#### File: pages/material_template/material_template_builder.py
```python
from typing import List
VERSION = "0.14.1"
MWC_COMPONENTS = [
"mwc-button>"
# "mwc-bottom-app-bar>"
# "mwc-card>"
"mwc-checkbox>"
# "mwc-chip>"
# "mwc-circular-progress>"
# "mwc-data-table>"
"mwc-dialog>"
"mwc-drawer>"
"mwc-fab>"
"mwc-formfield>"
"mwc-icon-button-toggle>"
"mwc-icon-button>"
"mwc-icon>"
"mwc-linear-progress>"
"mwc-list>"
"mwc-menu>"
"mwc-radio>"
"mwc-select>"
"mwc-slider>"
"mwc-snackbar>"
"mwc-switch>"
"mwc-tab-bar>"
"mwc-tab>"
"mwc-textarea>"
"mwc-textfield>"
"mwc-top-app-bar-fixed>"
"mwc-top-app-bar>"
]
MWC_FONT_SCRIPTS = (
'<link href="https://fonts.googleapis.com/css?family=Roboto:300,400,500" rel="stylesheet">'
'<link href="https://fonts.googleapis.com/css?family=Material+Icons&display=block" rel="stylesheet">'
)
def get_component_script(component: str, version=VERSION) -> str:
return (
'<script type="module" application="https://unpkg.com/@material/'
f'{component}@{version}/{component}.js?module"></script>'
)
def get_scripts(components: List[str], version: str = VERSION, include_fonts: bool = True):
component_script_list = [get_component_script(component, version) for component in components]
scripts = "".join(component_script_list)
if include_fonts:
scripts += MWC_FONT_SCRIPTS
return scripts
```
#### File: Jhsmit/awesome-panel/designer_dev.py
```python
from application.pages.pandas_profiling_app.pandas_profiling_app import PandasProfilingApp
from awesome_panel.designer import Designer, ReloadService
def show():
pandas_profiling_app = ReloadService(PandasProfilingApp)
reload_services = [pandas_profiling_app]
Designer(reload_services=reload_services).show()
if __name__ == "__main__":
show()
``` |
{
"source": "Jhsmit/awesome-panel-extensions",
"score": 2
} |
#### File: frameworks/fast/fast_anchor.py
```python
import param # pylint: disable=wrong-import-order
from panel.widgets import Widget
from awesome_panel_extensions.bokeh_extensions.fast.fast_anchor import FastAnchor as _BkFastAnchor
FAST_ANCHOR_APPEARENCES = [
"accent",
"lightweight",
"neutral",
"outline",
"stealth",
"hypertext",
]
# pylint: disable=line-too-long
RELS = [
None,
"alternate", # Provides a link to an alternate representation of the document (i.e. print page, translated or mirror)
"author", # Provides a link to the author of the document
"bookmark", # Permanent URL used for bookmarking
"external", # Indicates that the referenced document is not part of the same site as the current document
"help", # Provides a link to a help document
"license", # Provides a link to licensing information for the document
"next", # Provides a link to the next document in the series
"nofollow", # Links to an unendorsed document, like a paid link.
"noreferrer", # Requires that the browser should not send an HTTP referer header if the user follows the hyperlink
"noopener", # Requires that any browsing context created by following the hyperlink must not have an opener browsing context
"prev", # The previous document in a selection
"search", # Links to a search tool for the document
"tag", # A tag (keyword) for the current document
]
# pylint: enable=line-too-long
TARGETS = [
None,
"_blank",
"_parent",
"_self",
"_top",
]
REFERRER_POLICIES = [
None,
"no-referrer",
"no-referrer-when-downgrade",
"origin",
"origin-when-cross-origin",
"same-origin",
"strict-origin",
"strict-origin-when-cross-origin",
"unsafe-url",
]
class FastAnchor(Widget):
"""The FastAnchor enables the user to click a link and navigate to it.
The component supports several visual apperances
(accent, lightweight, neutral, outline, stealth, hypertext).
When using the `FastTemplate` you can also use the `<fast-anchor>` tag directly inside
`pn.pane.Markdown` and `pn.pane.HTML`.
The FastAnchor wraps the `fast-anchor` of the [Fast Design](https://fast.design/) Framework.
For more information view the [component specification]\
(https://github.com/microsoft/fast/tree/master/packages/web-components/fast-foundation/\
src/anchor/anchor.spec.md).
See also https://explore.fast.design/components/fast-anchor.
"""
value = param.String(
default=None,
allow_None=True,
doc="""The URL that the hyperlink points to. Default is None.""",
label="Href",
)
appearance = param.ObjectSelector(
default=None,
objects=FAST_ANCHOR_APPEARENCES,
doc="""Determines the appearance of the anchor. One of `accent`, `lightweight`, `neutral`,
`outline`, `stealth` or `hypertext`. Defaults to None/ neutral""",
allow_None=True,
)
target = param.ObjectSelector(
default=None,
objects=TARGETS,
allow_None=True,
doc="""Where to display the linked URL. One of None, `_self`, `_blank`, `_parent`, `_self`
or `_top`. Defaults to None""",
)
download = param.String(
default=None,
allow_None=True,
doc="""Prompts the user to save the linked URL instead of navigating to it.
Can be used with or without a value. Defaults to None""",
)
# We call this value instead of href in order to be able to use it
# with pn.Param
hreflang = param.String(
default=None,
allow_None=True,
doc="""Hints at the human language of the linked URL. No built-in functionality.
Default is None.""",
)
ping = param.String(
default=None,
allow_None=True,
doc="""A space-separated list of URLs. When the link is followed, the browser will send
POST requests with the body PING to the URLs. Typically for tracking.
Default is None.""",
)
referrerpolicy = param.ObjectSelector(
default=None,
objects=REFERRER_POLICIES,
allow_None=True,
doc="""How much of the referrer to send when following the link.
one of no-referrer, no-referrer-when-downgrade, origin,origin-when-cross-origin,
same-origin, strict-origin, strict-origin-when-cross-origin and unsafe-url.check_on_set.
Defaults to None""",
)
rel = param.ObjectSelector(
default=None,
objects=RELS,
allow_None=True,
doc="""The relationship of the linked URL as space-separated link types like alternate,
archives, ... See https://developer.mozilla.org/en-US/docs/Web/HTML/Link_types.
Defaults to None""",
)
mimetype = param.String(
default=None,
allow_None=True,
doc="""Hints at the linked URLโs format with a MIME type. No built-in functionality.
Default is None.""",
label="Type",
)
height = param.Integer(default=40, bounds=(0, None))
_widget_type = _BkFastAnchor
_rename = {
"value": "href",
"ref": "referrer", # pylint: disable=protected-access
}
def __init__(self, **params):
super().__init__(**params)
self._set_height()
@param.depends("appearance", watch=True)
def _set_height(self, *_):
if self.appearance == "hypertext":
self.height = 20
else:
self.height = 40
```
#### File: frameworks/fast/fast_button.py
```python
import panel as pn
import param # pylint: disable=wrong-import-order
from panel.widgets import Button
from awesome_panel_extensions.bokeh_extensions.fast.fast_button import FastButton as _BkFastButton
FAST_BUTTON_APPEARENCES = [
"accent",
"lightweight",
"neutral",
"outline",
"stealth",
]
DEFAULT_FAST_BUTTON_APPEARANCE = "neutral"
BUTTON_TYPE_TO_APPEARANCE = {
"default": "neutral",
"primary": "accent",
"success": "outline",
"warning": "accent",
"danger": "accent",
}
class FastButton(Button):
"""The FastButton extends the Panel Button into the Fast Design Framework.
It is built on the the fast-button web component. The component supports several visual apperances
(accent, lightweight, neutral, outline, stealth).
For more information view the [component specification]\
(https://github.com/microsoft/fast/tree/master/packages/web-components/fast-foundation/\
src/button/button.spec.md).
See also https://explore.fast.design/components/fast-button.
"""
clicks = param.Integer(default=0)
appearance = param.ObjectSelector(
default=DEFAULT_FAST_BUTTON_APPEARANCE,
objects=FAST_BUTTON_APPEARENCES,
doc="""Determines the appearance of the button. One of `accent`, `lightweight`, `neutral`,
`outline` or `stealth`. Defaults to neutral""",
allow_None=True,
)
autofocus = param.Boolean(
default=False,
doc="""The autofocus attribute. Defaults to `False`""",
)
height = param.Integer(default=31, bounds=(0, None))
_widget_type = _BkFastButton
_rename = {
**pn.widgets.Button._rename, # pylint: disable=protected-access
}
def __init__(self, **params):
if "button_type" in params and "appearance" not in params:
params["appearance"] = BUTTON_TYPE_TO_APPEARANCE[params["button_type"]]
super().__init__(**params)
@param.depends("button_type", watch=True)
def _update_accent(self, *_):
self.appearance = BUTTON_TYPE_TO_APPEARANCE[self.button_type]
```
#### File: frameworks/material/material_progress.py
```python
import param
from awesome_panel_extensions.frameworks._base.progress import Progress as _BaseProgress
_ATTRIBUTES_TO_WATCH_BASE = {"class": "bar_color"}
_PROPERTIES_TO_WATCH_BASE = {
"indeterminate": "active",
"progress": "_progress",
"closed": "closed",
}
DENSITY_RATIO = 4
class _MaterialProgress(_BaseProgress):
closed = param.Boolean(
default=False,
doc="""Sets the progress indicator to the closed state. Sets content opacity to 0.
Typically should be set to true when loading has finished.""",
)
_progress = param.Number(default=None, bounds=(0, 1), allow_None=True)
attributes_to_watch = param.Dict(_ATTRIBUTES_TO_WATCH_BASE)
properties_to_watch = param.Dict(_PROPERTIES_TO_WATCH_BASE)
def __init__(self, **params):
# Hack: to make sure that value is shown on construction
if "value" in params and "active" not in params:
params["active"] = False
super().__init__(**params)
self._update_progress()
@param.depends("value", "max", watch=True)
def _update_progress(self, *_):
if self.value is None or self.value == 0 or self.max is None or self.max == 0:
self._progress = None
else:
self._progress = self.value / self.max
class LinearProgress(_MaterialProgress):
"""The Material LinearProgress widget conveys progress information to the user"""
html = param.String(
"<mwc-linear-progress style='width:100%' progress='0.0'></mwc-linear-progress"
)
buffer = param.Integer(
default=None,
bounds=(0, 100),
allow_None=True,
doc="""Sets the buffer progress bar's value. Value should be between [0, 1].
Setting this value to be less than max will reveal moving, buffering dots.""",
)
_buffer = param.Number(
default=None,
allow_None=True,
)
reverse = param.Boolean(
default=False, doc="Reverses the direction of the linear progress indicator."
)
properties_to_watch = param.Dict(
{**_PROPERTIES_TO_WATCH_BASE, "buffer": "_buffer", "reverse": "reverse"}
)
@param.depends("max", watch=True)
def _update_buffer_bounds(self):
self.param.buffer.bounds = (0, self.max)
@param.depends("buffer", "max", watch=True)
def _update_buffer(self, *_):
if self.buffer is None or self.buffer == 0 or self.max is None or self.max == 0:
self._buffer = None
else:
self._buffer = self.buffer / self.max
class CircularProgress(_MaterialProgress):
"""The Material Circular Progress Widget conveys progress information to the user"""
html = param.String("<mwc-circular-progress style='width:51px'></mwc-circular-progress")
density = param.Integer(
default=0,
bounds=(-8, 2000),
doc="""Sets the progress indicator's sizing based on density scale. Minimum value is -8.
Each unit change in density scale corresponds to 4px change in side dimensions. The stroke
width adjusts automatically.""",
)
# _style = param.String()
# attributes_to_watch = param.Dict(dict(**_ATTRIBUTES_TO_WATCH_BASE, style="_style"))
properties_to_watch = param.Dict(dict(**_PROPERTIES_TO_WATCH_BASE, density="density"))
def __init__(self, **params):
if "density" in params and "html" not in params:
density = params["density"]
diameter = round((density + 8) * DENSITY_RATIO + 17)
html = (
f"<mwc-circular-progress style='height:{diameter}px;width:{diameter}px' "
f"density={density}></mwc-circular-progress"
)
params["html"] = html
super().__init__(**params)
self._update_diameter()
@param.depends("density", watch=True)
def _update_diameter(self, *_):
diameter = round((self.density + 8) * DENSITY_RATIO + 17)
self.min_height = diameter
self.min_width = diameter
# Cannot get the style updating programmatically. Starts an infinite loop.
# self._style = f"height:{diameter}px;width:{diameter}px;"
```
#### File: awesome_panel_extensions/_shared/logger.py
```python
import logging
LEVEL = logging.INFO
LEVEL_NAME = "INFO"
# pylint: disable=implicit-str-concat
LOG_FORMAT = (
"%(asctime)s :: %(levelname)s :: %(filename)s :: %(funcName)s :: " "%(lineno)s :: %(message)s"
)
# file_handler = logging.FileHandler(filename='test.log', mode='w')
# file_handler.setFormatter(logging.Formatter(LOG_FORMAT))
# CONSOLE_HANDLER = logging.StreamHandler(sys.stdout)
# CONSOLE_HANDLER.setFormatter(logging.Formatter(LOG_FORMAT))
def get_logger(name, level=LEVEL):
"""Returns a logger"""
logger = logging.getLogger(name)
# logger.addHandler(CONSOLE_HANDLER)
logger.setLevel(level)
return logger
def log_started_message():
"""Starts the logger"""
_logger = get_logger(__name__)
_logger.info("Session Started")
_logger.info("Log Level %s", LEVEL_NAME)
```
#### File: awesome_panel_extensions/site/base_model.py
```python
import param
class BaseModel(param.Parameterized):
"""The BaseModel adds ordering by the name parameter to a Class"""
def __lt__(self, other):
if hasattr(other, "name"):
return self.name.casefold() < other.name.casefold()
return True
def __eq__(self, other):
if hasattr(other, "name"):
return self.name == other.name
return False
def __str__(
self,
):
return self.name
def __repr__(
self,
):
return self.name
```
#### File: awesome_panel_extensions/site/site.py
```python
import pathlib
from functools import wraps
from typing import Callable, Dict, List, Optional
import param
from panel.template.base import BasicTemplate
from awesome_panel_extensions.site.application import Application
from awesome_panel_extensions.site.template import TemplateGenerator
class Site(param.Parameterized):
"""The Site provides meta data and functionality for registrering application meta data and
views"""
applications = param.List(doc="The list of applications to include in the site", constant=True)
authors = param.List(doc="The list of authors contributing to the site", constant=True)
css_path = param.ClassSelector(doc="A path to custom css", class_=pathlib.Path)
js_path = param.ClassSelector(doc="A path to custom js", class_=pathlib.Path)
def __init__(self, **params):
if "applications" not in params:
params["applications"] = []
if "authors" not in params:
params["authors"] = []
super().__init__(**params)
self._template_generator = TemplateGenerator(css_path=self.css_path, js_path=self.js_path)
def create_application( # pylint: disable=too-many-arguments
self,
url: str,
name: str,
introduction: str,
description: str,
author: str,
thumbnail_url: str,
code_url: str = "",
documentation_url: str = "",
gif_url: str = "",
mp4_url: str = "",
youtube_url: str = "",
tags: Optional[List] = None,
) -> Application:
"""A Site consists of meta data, Resources, Applications and Routes.
Args:
url (str): The base url of the Site. For example 'https://awesome-panel.org'
name (str): The name of the site. For example 'Awesome Panel'.
introduction (str): A short description of the site.
description (str): A longer description of the site.
author (str): The name of the Author of the site.
thumbnail_url (str): A Thumbnail visualising the site.
code_url (str, optional): [description]. Defaults to "".
documentation_url (str, optional): [description]. Defaults to "".
gif_url (str, optional): [description]. Defaults to "".
mp4_url (str, optional): [description]. Defaults to "".
youtube_url (str, optional): [description]. Defaults to "".
tags (Optional[List], optional): [description]. Defaults to None.
Raises:
ValueError: [description]
Returns:
Application: [description]
"""
if tags is None:
tags = []
# pylint: disable=unsubscriptable-object, not-an-iterable
author_list = [auth for auth in self.authors if auth.name == author]
if author_list:
author_ = author_list[0]
else:
raise ValueError(f"Error. Author '{author}' is not in the list of site authors!")
return Application(
url=url,
name=name,
introduction=introduction,
description=description,
author=author_,
thumbnail_url=thumbnail_url,
tags=tags,
category="Application",
documentation_url=documentation_url,
code_url=code_url,
gif_url=gif_url,
mp4_url=mp4_url,
youtube_url=youtube_url,
)
def add(
self,
application=None,
):
"""Registers you application meta data and view
>>> from awesome_panel_extensions.site import Author
>>> from awesome_panel_extensions.site import Site, Application
>>> site = Site(name="awesome-panel.org")
>>> marc_skov_madsen = Author(
... name="<NAME>",
... url="https://datamodelsanalytics.com",
... avatar_url="https://avatars0.githubusercontent.com/u/42288570",
... twitter_url="https://twitter.com/MarcSkovMadsen",
... linkedin_url="https://www.linkedin.com/in/marcskovmadsen/",
... github_url="https://github.com/MarcSkovMadsen",
... )
>>> site.authors.append(marc_skov_madsen)
>>> application = site.create_application(
... url="home",
... name="Home",
... author="<NAME>",
... description="The home page of awesome-panel.org.",
... introduction="The home page",
... thumbnail_url="",
... documentation_url="",
... code_url="",
... gif_url="",
... mp4_url="",
... youtube_url="",
... tags=["Site"],
... )
>>> @site.add(application)
... def view():
... return pn.pane.Markdown("# Home")
>>> site.applications
[Home]
>>> site.routes
{'home': <function view at...>}
"""
# pylint: disable=unsupported-assignment-operation
if not application in self.applications: # pylint: disable=unsupported-membership-test
self.applications.append(application)
def inner_function(view):
@wraps(view)
def wrapper(*args, **kwargs):
template = view(*args, **kwargs)
if (
isinstance(template, BasicTemplate)
and template.title == template.param.title.default
):
if not self.name == application.name:
template.title = application.name
else:
template.title = ""
self.register_post_view(template=template, application=application)
return template
application.view = wrapper
return wrapper
return inner_function
# pylint: disable=unused-argument
def register_post_view(self, template: BasicTemplate, application: Application):
"""Updates the template or application"""
template.site=self.name
@property
def routes(self) -> Dict[str, Callable]:
"""Returns a dictionary with the url as key and the view as the value
Returns:
Dict[str, Callable]: [description]
"""
# pylint: disable=not-an-iterable
return {app.url: app.view for app in self.applications}
def create_template( # pylint: disable=too-many-arguments, too-complex
self,
template: Optional[str] = None,
theme: Optional[str] = None,
**params,
) -> BasicTemplate:
"""Returns a BasicTemplate
Args:
template (str, optional): The name of the template. Defaults to TEMPLATE.
theme (str, optional): The name of the theme. Defaults to THEME.
**params: Optional template parameters
Returns:
BasicTemplate: The specified Template
"""
params["site"] = params.get("site", self.name)
return self._template_generator.get_template(
template=template,
theme=theme,
**params,
)
def get_application(self, name: str) -> Optional[Application]:
"""Returns the application with the specified name
Args:
name (str): [description]
Returns:
Optional[Application]: [description]
"""
# pylint: disable=not-an-iterable
_app = [app for app in self.applications if app.name == name]
if _app:
return _app[0]
return None
```
#### File: widgets/link_buttons/share_buttons.py
```python
import urllib.parse
import panel as pn
import param
from awesome_panel_extensions.assets import svg_icons
DEFAULT_URL = "https://awesome-panel.org"
DEFAULT_TEXT = "Checkout"
STYLE = """.bk a.button-share-link {
color: inherit;
font-style: none;
}
svg.pnx-icon {
height: 1em;
width: 1em;
}
"""
if not STYLE in pn.config.raw_css:
pn.config.raw_css.append(STYLE)
class ShareOnBase(pn.pane.HTML):
"""Base class for implementing ShareOnFacebook, ShareOnLinkedIn links etc.
- The href property should be overridden
"""
url = param.String(DEFAULT_URL)
icon = param.String(svg_icons.EXTERNAL_LINK, doc="A SVG icon")
text = param.String(DEFAULT_TEXT)
size = param.Integer(default=1, bounds=(0, 20), doc="The fontsize in em")
priority = 0
_rename = dict(pn.pane.HTML._rename, url=None, icon=None, text=None, size=None)
def __init__(self, **params):
super().__init__(**params)
self._update_html_object()
@property
def _url_parsed(
self,
):
return urllib.parse.quote(self.url).replace(
"/",
"%2F",
)
@property
def href(
self,
) -> str:
"""The href to goto when clicked
Override this method in a base class
Raises:
NotImplementedError:
Returns:
str: A href string
"""
raise NotImplementedError()
def to_html(
self,
) -> str:
"""A html string with link and icon tags
Returns:
str: A html string with link and icon tags
"""
return (
f'<a href="{self.href}" class="button-share-link" style="font-size: {self.size}em" '
f'target="_blank">{self.icon}</a>'
)
@param.depends("url", "icon", "size", watch=True)
def _update_html_object(
self,
):
"""A HTML pane with the a link and icon"""
self.object = self.to_html()
class ShareOnFacebook(ShareOnBase):
"""A Share on Facebook button"""
icon = param.String(svg_icons.FACEBOOK)
@property
def href(
self,
):
return f"https://www.facebook.com/sharer/sharer.php?u={self._url_parsed}"
class ShareOnLinkedIn(ShareOnBase):
"""A Share on LinkedIn button"""
icon = param.String(svg_icons.LINKED_IN)
@property
def href(
self,
):
return (
f"http://www.linkedin.com/shareArticle?mini=true&url={self._url_parsed}"
f"&title={self.text}"
)
class ShareOnTwitter(ShareOnBase):
"""A Share on Twitter button"""
icon = param.String(svg_icons.TWITTER)
@property
def href(
self,
):
return f"https://twitter.com/intent/tweet?url={self._url_parsed}&text={self.text}"
class ShareOnReddit(ShareOnBase):
"""A Share on Reddit button"""
icon = param.String(svg_icons.REDDIT)
@property
def href(
self,
):
return f"https://reddit.com/submit?url={self._url_parsed}&title={self.text}"
class ShareOnMail(ShareOnBase):
"""A Share on Mail button"""
icon = param.String(svg_icons.ENVELOPE)
@property
def href(
self,
):
return f"mailto:?subject={self._url_parsed}&body={self.text} {self._url_parsed}"
```
#### File: awesome_panel_extensions/widgets/perspective_viewer.py
```python
from enum import Enum
from typing import List
import panel as pn
import param
from awesome_panel_extensions.bokeh_extensions.perspective_viewer import (
PerspectiveViewer as _BkPerspectiveViewer,
)
from awesome_panel_extensions.widgets.dataframe_base import DataFrameWithStreamAndPatchBaseWidget
# This is need to be able to use Perspective in notebook via pn.extension("perspective")
pn.extension._imports[ # pylint: disable=protected-access
"perspective"
] = "awesome_panel_extensions.bokeh_extensions.perspective_viewer"
DEFAULT_THEME = "material"
THEMES_MAP = {
"material": "perspective-viewer-material",
"material-dark": "perspective-viewer-material-dark",
"material-dense": "perspective-viewer-material-dense",
"material-dense-dark": "perspective-viewer-material-dense-dark",
"vaporwave": "perspective-viewer-vaporwave",
}
THEMES = [*THEMES_MAP.keys()]
# Hack: When the user drags some of the columns, then the class attribute contains "dragging" also.
CSS_CLASS_MAP = {v: k for k, v in THEMES_MAP.items()}
DEFAULT_CSS_CLASS = THEMES_MAP[DEFAULT_THEME]
# Source: https://github.com/finos/perspective/blob/e23988b4b933da6b90fd5767d059a33e70a2493e/python/perspective/perspective/core/plugin.py#L49 # pylint: disable=line-too-long
class Plugin(Enum):
"""The plugins (grids/charts) available in Perspective. Pass these into
the `plugin` arg in `PerspectiveWidget` or `PerspectiveViewer`.
"""
HYPERGRID = "hypergrid" # hypergrid
GRID = "datagrid" # hypergrid
YBAR_D3 = "d3_y_bar" # d3fc
XBAR_D3 = "d3_x_bar" # d3fc
YLINE_D3 = "d3_y_line" # d3fc
YAREA_D3 = "d3_y_area" # d3fc
YSCATTER_D3 = "d3_y_scatter" # d3fc
XYSCATTER_D3 = "d3_xy_scatter" # d3fc
TREEMAP_D3 = "d3_treemap" # d3fc
SUNBURST_D3 = "d3_sunburst" # d3fc
HEATMAP_D3 = "d3_heatmap" # d3fc
CANDLESTICK = "d3_candlestick" # d3fc
CANDLESTICK_D3 = "d3_candlestick" # d3fc
OHLC = "d3_ohlc" # d3fc
OHLC_D3 = "d3_ohlc" # d3fc
@staticmethod
def options() -> List:
"""Returns the list of options of the PerspectiveViewer, like Hypergrid, Grid etc.
Returns:
List: [description]
"""
return list(c.value for c in Plugin)
class PerspectiveViewer(DataFrameWithStreamAndPatchBaseWidget): # pylint: disable=abstract-method
"""The PerspectiveViewer widget enables exploring large tables of data"""
_widget_type = _BkPerspectiveViewer
plugin = param.ObjectSelector(
Plugin.GRID.value,
objects=Plugin.options(),
doc="The name of a plugin to display the data. For example hypergrid or d3_xy_scatter.",
)
theme = param.ObjectSelector(
DEFAULT_THEME,
objects=THEMES,
doc="The style of the PerspectiveViewer. For example material-dark",
)
columns = param.List(
None, doc='A list of source columns to show as columns. For example ["x", "y"]'
)
# We don't expose this as it is not documented
# parsed_computed_columns = param.List(
# None,
# doc='A list of parsed computed columns. For example [{"name":"x+y","func":"add","inputs":["x","y"]}]',
# )
computed_columns = param.List(
None,
doc='A list of computed columns. For example [""x"+"index""]',
)
column_pivots = param.List(
None, doc='A list of source columns to pivot by. For example ["x", "y"]'
)
row_pivots = param.List(
None, doc='A list of source columns to group by. For example ["x", "y"]'
)
aggregates = param.Dict(None, doc='How to aggregate. For example {x: "distinct count"}')
sort = param.List(None, doc='How to sort. For example[["x","desc"]]')
filters = param.List(
None, doc='How to filter. For example [["x", "<", 3],["y", "contains", "abc"]]'
)
# I set this to something > 0. Otherwise the PerspectiveViewer widget will have a height of 0px
# It will appear as if it does not work.
height = param.Integer(default=300, bounds=(0, None))
def __init__(self, **params):
super().__init__(**params)
self._set_source()
```
#### File: awesome-panel-extensions/scripts/issue_button.py
```python
import param
from panel.widgets import button
class Button(button.Button):
_name = param.String()
_rename = {**button.Button._rename, "name": None, "_name": "label"}
def __init__(self, **params):
super().__init__(**params)
self._update_name()
@param.depends("name")
def _update_name(self, *_):
print("update name")
self._name = "allo " + self.name
Button(name="world").servable()
```
#### File: awesome-panel-extensions/scripts/issue_param_value.py
```python
import panel as pn
import param
from awesome_panel_extensions.frameworks.fast import FastTemplate, FastTextInput
WIDGETS = {
"some_text": {"type": FastTextInput, "readonly": True, "sizing_mode": "fixed", "width": 400}
}
class ParameterizedApp(param.Parameterized):
some_text = param.String(default="This is some text")
view = param.Parameter()
def __init__(self, **params):
super().__init__(**params)
self.view = pn.Param(self, parameters=["some_text"], widgets=WIDGETS)
parameterized_app = ParameterizedApp()
paremeterized_template = FastTemplate(main=[parameterized_app.view])
paremeterized_template.servable()
```
#### File: awesome-panel-extensions/scripts/issue_responsive_outside_container.py
```python
import altair as alt
import pandas as pd
import panel as pn
pn.extension("vega")
def get_altair_bar_data():
return pd.DataFrame(
{
"project": ["a", "b", "c", "d", "e", "f", "g"],
"score": [25, 57, 23, 19, 8, 47, 8],
"goal": [25, 47, 30, 27, 38, 19, 4],
}
)
def altair_bar_plot(data):
bar_chart = alt.Chart(data).mark_bar().encode(x="project", y="score")
tick_chart = (
alt.Chart(data)
.mark_tick(
color="red",
thickness=2,
size=40 * 0.9,
) # controls width of tick.
.encode(x="project", y="goal")
)
return (bar_chart + tick_chart).properties(width="container", height="container")
data = get_altair_bar_data()
component = altair_bar_plot(data)
component_panel = pn.pane.Vega(component, sizing_mode="stretch_both")
pn.Column(
component_panel,
background="lightgray",
sizing_mode="stretch_both",
css_classes=["designer-centered-component"],
).show(port=5007)
```
#### File: designer/example/example_components.py
```python
import altair as alt
import hvplot.pandas # pylint: disable=unused-import
import pandas as pd
import plotly.express as px
from matplotlib import cm
from matplotlib.backends.backend_agg import FigureCanvas # pylint: disable=no-name-in-module
from matplotlib.figure import Figure
from mpl_toolkits.mplot3d import axes3d
def get_plotly_carshare_data() -> pd.DataFrame:
"""Returns data for plotly_car_share_plot
Returns:
pd.DataFrame: Data
"""
return px.data.carshare()
def plotly_carshare_plot(carshare):
"""An example component based on plotly"""
fig = px.scatter_mapbox(
carshare,
lat="centroid_lat",
lon="centroid_lon",
color="peak_hour",
size="car_hours",
color_continuous_scale=px.colors.cyclical.Edge,
size_max=15,
zoom=10,
mapbox_style="carto-positron",
)
# Panel does currently not plot responsive Plotly plots well
# https://github.com/holoviz/panel/issues/1514
fig.layout.autosize = True
return fig
def get_altair_bar_data() -> pd.DataFrame:
"""Returns data for the altair_bar_plot example
Returns:
pd.DataFrame: DataFrame with column project, score and goal.
"""
return pd.DataFrame(
{
"project": ["a", "b", "c", "d", "e", "f", "g"],
"score": [25, 57, 23, 19, 8, 47, 8],
"goal": [25, 47, 30, 27, 38, 19, 4],
}
)
def altair_bar_plot(data):
"""An example component based on altair"""
bar_chart = alt.Chart(data).mark_bar().encode(x="project", y="score")
tick_chart = (
alt.Chart(data)
.mark_tick(
color="red",
thickness=2,
size=40 * 0.9,
) # controls width of tick.
.encode(x="project", y="goal")
)
return (bar_chart + tick_chart).properties(width="container", height="container")
def matplotlib_plot():
"""An example component based on matplotlib"""
fig = Figure(figsize=(8, 6))
axis = fig.add_subplot(111, projection="3d")
FigureCanvas(fig) # not needed for mpl >= 3.1
xval, yval, zval = axes3d.get_test_data(0.05)
axis.plot_surface(xval, yval, zval, rstride=8, cstride=8, alpha=0.3)
axis.contourf(xval, yval, zval, zdir="z", offset=-100, cmap=cm.coolwarm)
axis.contourf(xval, yval, zval, zdir="x", offset=-40, cmap=cm.coolwarm)
axis.contourf(xval, yval, zval, zdir="y", offset=40, cmap=cm.coolwarm)
axis.set_xlabel("X")
axis.set_xlim(-40, 40)
axis.set_ylabel("Y")
axis.set_ylim(-40, 40)
axis.set_zlabel("Z")
axis.set_zlim(-100, 100)
return fig
def get_holoviews_plot(data):
"""An example component based on hvplot"""
data = data.set_index("Year").drop("Annual", axis=1).transpose()
return data.hvplot.heatmap(
x="columns",
y="index",
title="US Unemployment 1948โ2016",
cmap=[
"#75968f",
"#a5bab7",
"#c9d9d3",
"#e2e2e2",
"#dfccce",
"#ddb7b1",
"#cc7878",
"#933b41",
"#550b1d",
],
xaxis="top",
rot=70,
responsive=True,
height=600,
).opts(
toolbar=None,
fontsize={"title": 20, "xticks": 5, "yticks": 5},
)
```
#### File: designer/example/example_designer.py
```python
import panel as pn
from bokeh.sampledata import unemployment1948
from awesome_panel_extensions.developer_tools.designer import ComponentReloader, Designer
from tests.developer_tools.designer.example.example_components import (
altair_bar_plot,
get_altair_bar_data,
get_holoviews_plot,
get_plotly_carshare_data,
matplotlib_plot,
plotly_carshare_plot,
)
pn.extension("vega", "plotly")
def _designer():
# Define your components
altair_reloader = ComponentReloader(
component=altair_bar_plot, parameters={"data": get_altair_bar_data}
)
plotly_reloader = ComponentReloader(
component=plotly_carshare_plot,
parameters={"carshare": get_plotly_carshare_data()},
)
holoviews_reloader = ComponentReloader(
component=get_holoviews_plot, parameters={"data": unemployment1948.data}
)
components = [
matplotlib_plot,
altair_reloader,
holoviews_reloader,
plotly_reloader,
]
# Configure the Designer with you components
return Designer(components=components)
if __name__ == "__main__":
_designer().show()
```
#### File: frameworks/fast/test_fast_checkbox.py
```python
from awesome_panel_extensions.frameworks.fast import FastCheckbox
from tests.frameworks.fast.fast_test_app import create_fast_test_app
def test_constructor():
# When
checkbox = FastCheckbox(name="Check Me")
# Then
assert checkbox.disabled is False
assert checkbox.name == "Check Me"
assert checkbox.value is False
if __name__ == "__main__":
checkbox = FastCheckbox(name="Hello Fast Design World")
app = create_fast_test_app(component=checkbox, parameters=["disabled", "readonly", "value", ""])
app.show(port=5007)
```
#### File: frameworks/fast/test_fast_literal_input.py
```python
from awesome_panel_extensions.frameworks.fast import FastLiteralInput
from tests.frameworks.fast.fast_test_app import create_fast_test_app
def test_can_construct_list():
# When
FastLiteralInput(type=(type, list), value=["a", "b", "c"])
# Then
# assert literal_input.type_of_text=="text"
def test_can_construct_dict():
# Given
_type = (type, dict)
value = {"a": 1, "b": 2, "c": 3}
# When
literal_input = FastLiteralInput(type=_type, value=value)
# Then
assert literal_input.type == _type
assert literal_input.value == value
# assert literal_input.type_of_text=="text"
if __name__.startswith("bokeh"):
textinput = FastLiteralInput(
name="Be Fast!", placeholder="Write a list. For example ['a']!", type=(type, list)
)
app = create_fast_test_app(
component=textinput,
parameters=[
"name",
"value",
# "type",
"disabled",
"placeholder",
"appearance",
"autofocus",
# "type_of_text", # Constant
"serializer",
# Some attributes do not work. See https://github.com/microsoft/fast/issues/3852
# "maxlength",
# "minlength",
# "pattern",
# "size",
# "spellcheck",
# "required",
"readonly",
],
)
app.servable()
```
#### File: frameworks/fast/test_fast_template.py
```python
import panel as pn
from panel import Template
from awesome_panel_extensions.frameworks.fast import FastTemplate
def test_constructor():
# Given
column = pn.Column()
main = [column]
# When
template = FastTemplate(main=main)
# Then
assert issubclass(FastTemplate, Template)
assert template.main == main
```
#### File: models/test_resource/test_site.py
```python
import pytest
from awesome_panel_extensions.site import Site
@pytest.fixture
def site():
return Site(name="awesome-panel.org")
def test_site(site, author):
site.authors.append(author)
app = site.create_application(
url="home",
name="Home",
author="<NAME>",
description="The home page of awesome-panel.org.",
introduction="The home page",
thumbnail_url="",
documentation_url="",
code_url="",
gif_url="",
mp4_url="",
youtube_url="",
tags=["Site"],
)
@site.add(app)
def view(): # pylint: disable=unused-variable
return "abcd"
assert len(site.applications) == 1
assert site.applications[0].name == "Home"
assert view() == "abcd"
```
#### File: tests/widgets/test_dataframe_base.py
```python
import pandas as pd
import param
import pytest
from bokeh.models.sources import ColumnDataSource
from awesome_panel_extensions.widgets.dataframe_base import (
DataFrameWithStreamAndPatchBaseWidget as DFWidget,
)
VALUE_CHANGED_COUNT = 0
# region value
@pytest.fixture
def data():
return {"x": [1, 2, 3, 4], "y": ["a", "b", "c", "d"], "z": [True, False, True, False]}
@pytest.fixture
def dataframe(data):
return pd.DataFrame(data)
def test_constructor(dataframe):
# When
component = DFWidget(value=dataframe)
# Then
assert component.value is dataframe
assert isinstance(component._source, ColumnDataSource)
pd.testing.assert_frame_equal(component._source.to_df(), dataframe.reset_index())
def test_constructor_no_value():
# When
component = DFWidget()
# Then
assert isinstance(component._source, ColumnDataSource)
def test_change_value(dataframe):
# Given
component = DFWidget()
# When
component.value = dataframe
# Then
pd.testing.assert_frame_equal(component._source.to_df(), dataframe.reset_index())
# endregion value
# region stream
def test_stream_dataframe_dataframe_value():
# Given
value = pd.DataFrame({"x": [1, 2], "y": ["a", "b"]})
tabulator = DFWidget(value=value)
stream_value = pd.DataFrame({"x": [3, 4], "y": ["c", "d"]})
# Used to test that value event is triggered
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT = 0
@param.depends(tabulator.param.value, watch=True)
def _inc(*_):
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT += 1
# When
tabulator.stream(stream_value)
# Then
tabulator_source_df = tabulator._source.to_df().drop(columns=["index"])
expected = pd.DataFrame({"x": [1, 2, 3, 4], "y": ["a", "b", "c", "d"]})
pd.testing.assert_frame_equal(tabulator.value, expected)
pd.testing.assert_frame_equal(tabulator_source_df, expected)
assert VALUE_CHANGED_COUNT == 1
def test_stream_dataframe_series_value():
# Given
value = pd.DataFrame({"x": [1, 2], "y": ["a", "b"]})
tabulator = DFWidget(value=value)
stream_value = pd.DataFrame({"x": [3, 4], "y": ["c", "d"]}).loc[1]
# Used to test that value event is triggered
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT = 0
@param.depends(tabulator.param.value, watch=True)
def _inc(*_):
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT += 1
# When
tabulator.stream(stream_value)
# Then
tabulator_source_df = tabulator._source.to_df().drop(columns=["index"])
expected = pd.DataFrame({"x": [1, 2, 4], "y": ["a", "b", "d"]})
pd.testing.assert_frame_equal(tabulator.value, expected)
pd.testing.assert_frame_equal(
tabulator_source_df, expected, check_column_type=False, check_dtype=False
)
assert VALUE_CHANGED_COUNT == 1
def test_stream_dataframe_dictionary_value_multi():
# Given
value = pd.DataFrame({"x": [1, 2], "y": ["a", "b"]})
tabulator = DFWidget(value=value)
stream_value = {"x": [3, 4], "y": ["c", "d"]}
# Used to test that value event is triggered
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT = 0
@param.depends(tabulator.param.value, watch=True)
def _inc(*_):
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT += 1
# When PROVIDING A DICTIONARY OF COLUMNS
tabulator.stream(stream_value)
# Then
tabulator_source_df = tabulator._source.to_df().drop(columns=["index"])
expected = pd.DataFrame({"x": [1, 2, 3, 4], "y": ["a", "b", "c", "d"]})
pd.testing.assert_frame_equal(tabulator.value, expected)
pd.testing.assert_frame_equal(
tabulator_source_df, expected, check_column_type=False, check_dtype=False
)
assert VALUE_CHANGED_COUNT == 1
def test_stream_dataframe_dictionary_value_single():
# Given
value = pd.DataFrame({"x": [1, 2], "y": ["a", "b"]})
tabulator = DFWidget(value=value)
stream_value = {"x": 4, "y": "d"}
# Used to test that value event is triggered
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT = 0
@param.depends(tabulator.param.value, watch=True)
def _inc(*_):
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT += 1
# When PROVIDING A DICTIONARY ROW
tabulator.stream(stream_value)
# Then
tabulator_source_df = tabulator._source.to_df().drop(columns=["index"])
expected = pd.DataFrame({"x": [1, 2, 4], "y": ["a", "b", "d"]})
pd.testing.assert_frame_equal(tabulator.value, expected)
pd.testing.assert_frame_equal(
tabulator_source_df, expected, check_column_type=False, check_dtype=False
)
assert VALUE_CHANGED_COUNT == 1
# endregion Stream
# region Patch
def test_patch_dataframe_dataframe_value():
# Given
value = pd.DataFrame({"x": [1, 2], "y": ["a", "b"]})
tabulator = DFWidget(value=value)
patch_value = pd.DataFrame({"x": [3, 4], "y": ["c", "d"]})
# Used to test that value event is triggered
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT = 0
@param.depends(tabulator.param.value, watch=True)
def _inc(*_):
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT += 1
# When
tabulator.patch(patch_value)
# Then
tabulator_source_df = tabulator._source.to_df().drop(columns=["index"])
expected = pd.DataFrame({"x": [3, 4], "y": ["c", "d"]})
pd.testing.assert_frame_equal(tabulator.value, expected)
pd.testing.assert_frame_equal(tabulator_source_df, expected)
assert VALUE_CHANGED_COUNT == 1
# endregion Patch
def test_patch_from_partial_dataframe():
data = pd.DataFrame({"x": [1, 2, 3, 4], "y": ["a", "b", "c", "d"]})
data1 = data.loc[
0:1,
]
data2 = data.loc[2:4]
# When
tabulator = DFWidget(value=data1)
tabulator.value = data2.reset_index(drop=True)
patch_value = tabulator.value["x"] + 2
tabulator.patch(patch_value)
# Then
expected = pd.DataFrame({"x": [5, 6], "y": ["c", "d"]})
pd.testing.assert_frame_equal(tabulator.value, expected)
def test_range_index_of_dataframe_value():
# Given
data = pd.DataFrame({"x": [1, 2, 3, 4], "y": ["a", "b", "c", "d"]})
data2 = data.loc[2:4]
# When
with pytest.raises(ValueError) as error:
DFWidget(value=data2)
assert str(error.value) == (
"Please provide a DataFrame with RangeIndex starting at 0 and with step 1"
)
def test_patch_and_reset():
"""I experienced some strange behaviour which I test below.
The code actually worked as it should. The problem was that I patched the original
data so I could never "reset" back to the original data
"""
# Given
data = pd.DataFrame({"x": [1, 2, 3, 4], "y": ["a", "b", "c", "d"]})
data_copy = data.copy(deep=True)
tabulator = DFWidget(value=data_copy)
patch = tabulator.value["x"] + 2
# When patch Then
tabulator.patch(patch_value=patch)
assert set(tabulator._source.data["x"]) == {3, 4, 5, 6}
# When reset Then
tabulator.value = data
assert set(tabulator._source.data["x"]) == {1, 2, 3, 4}
def test_replace_stream_and_reset():
# Given
data = pd.DataFrame({"x": [1, 2, 3, 4, 5], "y": ["a", "b", "c", "d", "e"]})
data1 = (
data.copy(deep=True)
.loc[
0:1,
]
.reset_index(drop=True)
)
data2 = (
data.copy(deep=True)
.loc[
2:3,
]
.reset_index(drop=True)
)
data3 = data.copy(deep=True).loc[
4:4,
]
tabulator = DFWidget(value=data1)
# When replace, stream and reset
tabulator.value = data2
tabulator.stream(stream_value=data3)
tabulator.value = data.copy(deep=True).loc[
0:1,
]
# Then
assert set(tabulator._source.data["x"]) == {1, 2}
```
#### File: tests/widgets/test_pivottable.py
```python
import pandas as pd
import panel as pn
import pytest
from bokeh.models import ColumnDataSource
from awesome_panel_extensions.widgets.dataframe_base import DataFrameWithStreamAndPatchBaseWidget
from awesome_panel_extensions.widgets.pivot_table import PivotTable
@pytest.fixture
def data():
return {"x": [1, 2, 3, 4], "y": ["a", "b", "c", "d"], "z": [True, False, True, False]}
@pytest.fixture
def dataframe(data):
return pd.DataFrame(data)
def test_is_dataframe_base_widget():
"""A lot of the functionality comes by inheriting from
DataFrameWithStreamAndPatchBaseWidget. If that is changed we would need to add or change some
testing here"""
assert issubclass(PivotTable, DataFrameWithStreamAndPatchBaseWidget)
def test_constructor(dataframe):
# When
component = PivotTable(value=dataframe)
# Then
assert component.height > 0
assert isinstance(component._source, ColumnDataSource)
pd.testing.assert_frame_equal(component._source.to_df(), dataframe.reset_index())
def test_pivot_table_comms(document, comm, dataframe):
# Given
pivot_table = PivotTable(value=dataframe)
widget = pivot_table.get_root(document, comm=comm)
# Then
assert isinstance(widget, pivot_table._widget_type)
assert widget.source == pivot_table._source
def test_example_app():
data = [
{"x": 1, "y": "a", "z": True},
{"x": 2, "y": "b", "z": False},
{"x": 3, "y": "c", "z": True},
{"x": 4, "y": "d", "z": False},
]
dataframe = pd.DataFrame(data)
pivot_table = PivotTable(
height=500,
value=dataframe.copy(deep=True),
columns=["index", "x", None, None, None],
plugin="d3_xy_scatter",
)
def section(component, message=None):
title = "## " + str(type(component)).split(".")[-1][:-2]
parameters = [
"value",
# "columns",
# # "parsed_computed_columns",
# "computed_columns",
# "column_pivots",
# "row_pivots",
# "aggregates",
# "sort",
# "filters",
# "plugin",
# "theme",
]
if message:
return (
pn.pane.Markdown(title),
component,
pn.Param(component, parameters=parameters),
pn.pane.Markdown(message),
pn.layout.Divider(),
)
return (
pn.pane.Markdown(title),
component,
# pn.Param(component, parameters=parameters),
pn.layout.Divider(),
)
return pn.Column(*section(pivot_table), width=800, sizing_mode="stretch_height")
def test_reference_notebook_example():
DARK_BACKGROUND = "rgb(42, 44, 47)" # pylint: disable=invalid-name
top_app_bar = pn.Row(
pn.pane.HTML("<h1 style='color:white'>PivotTable.js</h1>"),
pn.layout.HSpacer(),
margin=0,
background=DARK_BACKGROUND,
)
# pn.config.sizing_mode = "stretch_width"
# Source: https://datahub.io/core/s-and-p-500-companies-financials
data = (
"https://raw.githubusercontent.com/MarcSkovMadsen/awesome-panel/master/application/"
"pages/awesome_panel_express_tests/PerspectiveViewerData.csv"
)
dataframe = pd.read_csv(data)
columns = [
"Name",
"Symbol",
"Sector",
"Price",
"52 Week Low",
"52 Week High",
"Price/Earnings",
"Price/Sales",
"Price/Book",
"Dividend Yield",
"Earnings/Share",
"Market Cap",
"EBITDA",
"SEC Filings",
]
dataframe = dataframe[columns]
pivot_table = PivotTable(
height=500,
value=dataframe.copy(deep=True),
sizing_mode="stretch_width",
)
return pn.Column(
top_app_bar,
pn.Row(
pivot_table,
sizing_mode="stretch_width",
),
sizing_mode="stretch_width",
)
if __name__.startswith("bokeh") or __name__ == "__main__":
test_reference_notebook_example().show(port=5007)
``` |
{
"source": "Jhsmit/awesome-panel",
"score": 2
} |
#### File: application/components/component.py
```python
# class Component(param.Parameterized):
# def view(self, **params):
# raise NotImplementedError
```
#### File: application/models/progress.py
```python
import param
class Progress(param.Parameterized):
"""The Progress model is used to communicate the Progress of the Application,
the active PageComponent etc."""
value = param.Integer(default=0, bounds=(0, None))
value_max = param.Integer(default=100, bounds=(0, None))
message = param.String()
active_count = param.Integer(bounds=(0, None))
@property
def active(self) -> bool:
"""Returns True if there is activity like ETL or Plotting.
Returns:
bool: True if there is activity. Otherwise False is returned.
"""
return self.value > 0 or self.message != "" or self.active_count > 0
```
#### File: application/services/tag_service.py
```python
from typing import List, Optional
import param
from awesome_panel.application.models import Tag
class TagService(param.Parameterized):
"""A CRUD Service for Tags
Notes:
- The tags list is kept sorted.
- DON'T change the tags list manually. Use the functions of the service
"""
tags = param.List(constant=True)
def __init__(self, **params):
super().__init__(**params)
self._tags = {tag.name: tag for tag in self.tags}
def create(self, tag: Tag):
"""Creates the specified Tag
Args:
tag (Tag): A Tag to create
"""
self._tags[tag.name] = tag
self._update_tags_list()
def read(self, name: str) -> Optional[Tag]:
"""Returns the Tag with the given name
Args:
name (str): The name of the Tag to return
Returns:
Optional[Tag]: The Tag with the given name if it exists. Otherwise None
"""
if name in self._tags:
return self._tags[name]
return None
def update(self, tag: Tag):
"""Updates the given tag
Args:
tag (Tag): A Tag to update
"""
self.create(tag)
def delete(self, tag: Tag):
"""Deletes the given tag
Args:
tag (Tag): [description]
"""
if tag.name in self._tags:
self._tags = {key: value for key, value in self._tags.items() if key != tag.name}
self._update_tags_list()
def _update_tags_list(self):
with param.edit_constant(self):
self.tags = sorted(list(self._tags.values()))
def bulk_create(self, tags: List[Tag]):
"""Creates the list of tags
Args:
tags (List[Tag]): A list of Tags to create
"""
old_tags = self._tags
new_tags = {tag.name: tag for tag in tags}
self._tags = {**old_tags, **new_tags}
self._update_tags_list()
```
#### File: express/pane/panes.py
```python
import panel as pn
class Code(pn.pane.Markdown):
"""A HTML code block"""
def __init__(
self, code: str = "", language: str = "python", sizing_mode="stretch_width", **kwargs,
):
"""A HTML code block"""
code_markdown = f"""
```{language}
{code}
```
"""
super().__init__(
code_markdown, sizing_mode=sizing_mode, **kwargs,
)
```
#### File: application/models/test_tag.py
```python
def test_can_construct_tag(tag):
assert isinstance(tag.name, str)
assert repr(tag) == tag.name
assert str(tag) == tag.name
assert hash(tag) == hash(tag.name)
```
#### File: package/tests/test_fontawesome.py
```python
import pathlib
import pytest
from awesome_panel.express import fontawesome
OUT = pathlib.Path(__file__).parent / "out"
def test_get_fontawesome_panel_express_css():
"Test that get_fontawesome_panel_express can return a text string without error"
# When
actual = fontawesome.fontawesome.get_fontawesome_panel_express()
# Then
assert actual
assert fontawesome.fontawesome._FONTAWESOME_PANEL_EXPRESS_HEADER in actual
assert 'div.bk.pa-bus div.bk *::before{content:"\\f207"}' in actual
with open(OUT / "test_fontawesome_panel_express.css", "w",) as file:
file.write(actual)
@pytest.mark.parametrize(
["css", "expected",],
[
(
r'.fa-bus:before{content:"\f207"}.fa-bus-alt:before{content:"\f55e"}',
'div.bk.pa-bus div.bk *::before{content:"\\f207"}\ndiv.bk.pa-bus-alt div.bk *::before'
'{content:"\\f55e"}',
),
],
)
def test__to_fontawesome_panel_express_css(
css, expected,
):
"Test that _to_fontawesome_panel_express can return a text string without error"
# When
actual = fontawesome.fontawesome._to_fontawesome_panel_express(css)
# Then
assert actual == fontawesome.fontawesome._FONTAWESOME_PANEL_EXPRESS_HEADER + expected
```
#### File: package/tests/test_navigation.py
```python
import panel as pn
import pytest
import awesome_panel.express as pnx
@pytest.mark.panel
def test_pn_navigation_button():
"""# Manual Test of the Navigation Buttons
- Ordinary Button
- Button With Font Awesome
"""
# Given:
pnx.fontawesome.extend()
page = pnx.Header("Button Page", name="Button",)
page_font_awesome = pnx.Header("Font Awesome Page", name=" Font Awesome",)
page_outlet = pn.Column(page)
button = pnx.NavigationButton(page=page, page_outlet=page_outlet,)
button_font_awesome = pnx.NavigationButton(
page=page_font_awesome, page_outlet=page_outlet, css_classes=["pab", "pa-twitter",],
)
app = pn.Column(
pn.pane.Markdown(test_pn_navigation_button.__doc__),
button,
button_font_awesome,
page_outlet,
width=400,
)
# When
app.servable("test_pn_navigation")
@pytest.mark.panel
def test_pn_navigation():
"""# Manual Test of the Navigation Component
- Page 1 is shown by default.
- Can navigate to Page 1 and Page 2
"""
page1 = pn.pane.Markdown("## Page 1", name="Page 1",)
page2 = pn.pane.Markdown("## Page 2", name="Page 2",)
pages = [
page1,
page2,
]
content = pn.Column()
sidebar = pn.Column()
app = pn.Column(pn.pane.Markdown(test_pn_navigation.__doc__), sidebar, content,)
menu = pnx.NavigationMenu(pages=pages, page_outlet=content,)
sidebar.append(menu)
app.servable("test_pn_navigation")
@pytest.mark.panel
def test_pn_navigation_with_font_awesome():
"""# Manual Test of the Navigation Component with Font Awesome
- The first button has no icon as we specified None
- The second button has no icon as we specified an empty list
- The third button has a twitter icon as specified
"""
pnx.fontawesome.extend()
page1 = pn.pane.Markdown("## None", name="Page None",)
page2 = pn.pane.Markdown("## Empty", name="Page Empty",)
page3 = pn.pane.Markdown("## Twitter", name=" Page Twitter",)
pages = [
page1,
page2,
page3,
]
content = pn.Column()
sidebar = pn.Column()
app = pn.Column(
pn.pane.Markdown(test_pn_navigation_with_font_awesome.__doc__), sidebar, content,
)
css_classes = [
None,
[],
["pab", "pa-twitter",],
]
menu = pnx.NavigationMenu(pages=pages, page_outlet=content, css_classes=css_classes,)
sidebar.append(menu)
app.servable("test_pn_navigation_with_font_awesome")
if __name__.startswith("bokeh"):
test_pn_navigation_button()
test_pn_navigation()
test_pn_navigation_with_font_awesome()
```
#### File: awesome-panel/scripts/issue_param.py
```python
import panel as pn
import param
class Component1(param.Parameterized):
pass
class Component2(param.Parameterized):
pass
class Service(param.Parameterized):
component = param.Parameter()
do_something = param.Action()
another = param.String()
service1 = Service(name="Service 1")
service2 = Service(name="Service 2")
SERVICES = [service1, service2]
class Designer(param.Parameterized):
service = param.ObjectSelector()
service_pane = param.ClassSelector(class_=pn.Param)
view = param.Parameter()
def __init__(self, services):
self.param.service.objects = services
self.param.service.default = services[0]
super().__init__()
self.service_pane = pn.Param(self.param.service, parameters=["do_something"])
self.service = services[0]
self.view = pn.Column(
pn.Param(self, parameters=["service"], show_name=False, expand_button=None),
self.service_pane,
)
Designer(SERVICES).view.show()
```
#### File: scripts/issues/issue_code_card.py
```python
import panel as pn
import param
class Card(pn.Column):
def __init__(
self, header, body, **kwargs,
):
print("__init__")
content = pn.Row(body)
header_pane = pn.pane.HTML(f"<h5>{header}</h5>")
super().__init__(
header_pane, content, **kwargs,
)
return
def clone(self, **kwargs):
header, body = self.objects
return super().clone(header.object, body, **kwargs)
class Dummy(param.Parameterized):
value = param.Parameter("abcd")
dummy = Dummy()
@param.depends(dummy.param.value)
def get_card(value):
print("get_card")
return Card("Code", pn.pane.HTML(f"<strong>{value}</strong>"))
pn.Column(get_card).servable()
```
#### File: scripts/issues/issue_plotly_none.py
```python
import hvplot.pandas
import pandas as pd
import panel as pn
import param
import plotly.express as px
pn.extension("plotly")
class App(param.Parameterized):
data_is_set = param.Boolean(default=False)
data = param.DataFrame()
@pn.depends(
"data_is_set", watch=True,
)
def set_data(self,):
if self.data_is_set:
rows = [
(1, 2,),
(3, 4,),
]
self.data = pd.DataFrame(rows, columns=["x", "y",],)
else:
self.data = None
@pn.depends("data")
def plot_holoviews(self,):
if self.data is None or self.data.empty:
return None
return self.data.hvplot(x="x", y="y",)
@pn.depends("data")
def plot_plotly(self,):
if self.data is None or self.data.empty:
return None
return px.scatter(self.data, x="x", y="y",)
def view(self,):
return pn.Column(
self.param.data_is_set,
pn.pane.Markdown("## Holoviews"),
self.plot_holoviews,
pn.pane.Markdown("## Plotly"),
self.plot_plotly,
width=800,
)
if __name__.startswith("bokeh"):
App().view().servable()
```
#### File: awesome-panel/scripts/issue_watch.py
```python
import holoviews as hv
import panel as pn
import param
from param import parameterized
class testview(param.Parameterized):
check_val = param.Integer(default=0)
parameters = param.List(default=[])
def add_parameters(self, k):
parameters = self.parameters
for i in range(k):
name = f"param{i}"
self.param._add_parameter(name, param.Integer(default=0))
parameters.append(name)
self.param.watch(self.increment_val, parameters)
self.parameters = parameters
def increment_val(self, *events):
self.check_val += 1
print(self.check_val)
@param.depends("check_val")
def plot(self):
print("plot")
return hv.Text(0.5, 0.5, str(self.check_val))
viewer = testview()
viewer.add_parameters(3)
pn.Row(pn.Param(viewer.param, parameters=viewer.parameters), viewer.plot).servable()
```
#### File: application/detr/conftest.py
```python
import pytest
from application.pages.detr.detr import DETRApp
@pytest.fixture
def detr_app():
return DETRApp()
``` |
{
"source": "Jhsmit/ColiCoords",
"score": 2
} |
#### File: ColiCoords/colicoords/cell.py
```python
import numpy as np
import multiprocess as mp
from scipy.integrate import quad, IntegrationWarning
from scipy.optimize import brentq
import numbers
import mahotas as mh
import operator
from functools import partial
from contextlib import closing
from tqdm.auto import tqdm
import warnings
from colicoords.fitting import CellFit
from colicoords.support import allow_scalars, box_mean, running_mean
from colicoords.minimizers import Powell
from colicoords.data_models import CellListData
class Cell(object):
"""ColiCoords' main single-cell object.
This class organizes all single-cell associated data together with an internal coordinate system.
Parameters
----------
data_object : :class:`~colicoords.data_models.Data`
Holds all data describing this single cell.
coords : :class:`Coordinates`
Calculates transformations from/to cartesian and cellular coordinates.
name : :obj:`str`
Name identifying the cell (optional).
**kwargs:
Additional kwargs passed to :class:`~colicoords.cell.Coordinates`.
Attributes
----------
data : :class:`~colicoords.data_models.Data`
Holds all data describing this single cell.
coords : :class:`Coordinates`
Calculates and optimizes the cell's coordinate system.
name : :obj:`str`
Name identifying the cell (optional).
"""
def __init__(self, data_object, name=None, init_coords=True, **kwargs):
self.data = data_object
self.coords = Coordinates(self.data, initialize=init_coords, **kwargs)
self.name = name
def optimize(self, data_name='binary', cell_function=None, minimizer=Powell, **kwargs):
"""
Optimize the cell's coordinate system.
The optimization is performed on the data element given by ``data_name`` using the function `cell_function`.
A default function depending on the data class is used of objective is omitted.
Parameters
----------
data_name : :obj:`str`, optional
Name of the data element to perform optimization on.
cell_function
Optional subclass of :class:`~colicoords.fitting.CellMinimizeFunctionBase` to use as objective function.
minimizer : Subclass of :class:`symfit.core.minimizers.BaseMinimizer` or :class:`~collections.abc.Sequence`
Minimizer to use for the optimization. Default is the ``Powell`` minimizer.
**kwargs :
Additional kwargs are passed to :meth:`~colicoords.fitting.CellFit.execute`.
Returns
-------
result : :class:`~symfit.core.fit_results.FitResults`
``symfit`` fit results object.
"""
fit = CellFit(self, data_name=data_name, cell_function=cell_function, minimizer=minimizer)
return fit.execute(**kwargs)
@property
def radius(self):
""":obj:`float`: Radius of the cell in pixels."""
return self.coords.r
@property
def length(self):
""":obj:`float`: Length of the cell in pixels."""
a0, a1, a2 = self.coords.coeff
xl, xr = self.coords.xl, self.coords.xr
l = (1 / (4 * a2)) * (
((a1 + 2 * a2 * xr) * np.sqrt(1 + (a1 + 2 * a2 * xr) ** 2) + np.arcsinh((a1 + 2 * a2 * xr))) -
((a1 + 2 * a2 * xl) * np.sqrt(1 + (a1 + 2 * a2 * xl) ** 2) + np.arcsinh((a1 + 2 * a2 * xl)))
)
return l
@property
def circumference(self):
""":obj:`float`: Circumference of the cell in pixels."""
return self.coords._top + self.coords._bot + 2 * np.pi * self.coords.r
@property
def area(self):
""":obj:`float`: Area (2d) of the cell in square pixels."""
return 2 * self.length * self.coords.r + np.pi * self.coords.r ** 2
@property
def surface(self):
""":obj:`float`: Total surface area (3d) of the cell in square pixels."""
return self.length * 2 * np.pi * self.coords.r + 4 * np.pi * self.coords.r ** 2
@property
def volume(self):
""":obj:`float`: Volume of the cell in cubic pixels."""
return np.pi * self.coords.r ** 2 * self.length + (4 / 3) * np.pi * self.coords.r ** 3
def phi_dist(self, step, data_name='', r_max=None, r_min=0, storm_weight=False, method='gauss', sigma=5):
"""
Calculates the angular distribution of signal for a given data element.
Parameters
----------
step : :obj:`float`
Step size between datapoints.
data_name : :obj:`str`
Name of the data element to use.
r_max : :obj:`float`, optional
Datapoints within r_max from the cell midline will be included. If `None` the value from the cell's
coordinate system will be used.
r_min : :obj:`float`, optional
Datapoints outside of r_min from the cell midline will be included.
storm_weight : :obj:`bool`
If `True` the datapoints of the specified STORM-type data will be weighted by their intensity.
method : :obj:`str`
Method of averaging datapoints to calculate the final distribution curve.
sigma : :obj:`float`
Applies only when `method` is set to 'gauss'. `sigma` gives the width of the gaussian used for convoluting
datapoints.
Returns
-------
xvals : :class:`~numpy.ndarray`
Array of angles along the cell pole, values are the middle of the bins/kernel.
yvals_l : :class:`~numpy.ndarray`
Array of bin heights for the left pole.
yvals_r : :class:`~numpy.ndarray`
Array of bin heights for the right pole.
"""
r_max = r_max if r_max is not None else self.coords.r
stop = 180
if not data_name:
try:
data_elem = list(self.data.flu_dict.values())[0] # yuck
except IndexError:
try:
data_elem = list(self.data.storm_dict.values())[0]
except IndexError:
raise IndexError('No valid data element found')
else:
try:
data_elem = self.data.data_dict[data_name]
except KeyError:
raise ValueError('Chosen data not found')
if method == 'gauss' and data_elem.dclass == 'storm':
print("Warning: method 'gauss' is not a storm-compatible method, method was set to 'box'")
method = 'box'
bins = np.arange(0, stop + step, step)
if method == 'gauss':
bin_func = running_mean
bin_kwargs = {'sigma': sigma}
xvals = bins
elif method == 'box':
bin_func = box_mean
bin_kwargs = {'storm_weight': storm_weight}
bins = np.arange(0, stop + step, step)
xvals = bins + 0.5 * step # xval is the middle of the bin
else:
raise ValueError('Invalid method')
if data_elem.ndim == 1:
assert data_elem.dclass == 'storm'
x = data_elem['x']
y = data_elem['y']
phi = self.coords.calc_phi(x, y)
lc = self.coords.calc_lc(x, y)
rc = self.coords.calc_rc(x, y)
y_weight = data_elem['intensity'] if storm_weight else None
elif data_elem.ndim == 2 or data_elem.ndim == 3:
phi = self.coords.phi
lc = self.coords.lc
rc = self.coords.rc
y_weight = data_elem
else:
raise ValueError("Invalid data dimensions")
b_max = rc < r_max
b_min = rc > r_min
b1 = (lc == 0) * b_max * b_min
b2 = (lc == self.length) * b_max * b_min
if data_elem.ndim <= 2:
y_wt = y_weight[b1].flatten() if y_weight is not None else None
yvals_l = bin_func(phi[b1].flatten(), y_wt, bins, **bin_kwargs)
else:
yvals_l = np.vstack([bin_func(phi[b1].flatten(), d[b1].flatten(), bins) for d in data_elem])
if data_elem.ndim <= 2:
y_wt = y_weight[b2].flatten() if y_weight is not None else None
yvals_r = bin_func(phi[b2].flatten(), y_wt, bins, **bin_kwargs)
else:
yvals_r = np.vstack([bin_func(phi[b2].flatten(), d[b2].flatten(), bins, **bin_kwargs) for d in data_elem])
return xvals, yvals_l, yvals_r
def l_dist(self, nbins, start=None, stop=None, data_name='', norm_x=False, l_mean=None, r_max=None, storm_weight=False,
method='gauss', sigma=0.5):
"""
Calculates the longitudinal distribution of signal for a given data element.
Parameters
----------
nbins : :obj:`int`
Number of bins between `start` and `stop`.
start : :obj:`float`
Distance from `xl` as starting point for the distribution, units are either pixels or normalized units
if `norm_x=True`.
stop : :obj:`float`
Distance from `xr` as end point for the distribution, units are are either pixels or normalized units
if `norm_x=True`.
data_name : :obj:`str`
Name of the data element to use.
norm_x : :obj:`bool`
If `True` the output distribution will be normalized.
l_mean : :obj:`float`, optional
When `norm_x` is `True`, all length coordinates are divided by the length of the cell to normalize it. If
`l_mean` is given, the length coordinates at the poles are divided by `l_mean` instead to allow equal scaling
of all pole regions.
r_max : :obj:`float`, optional
Datapoints within r_max from the cell midline will be included. If `None` the value from the cell's
coordinate system will be used.
storm_weight : :obj:`bool`
If `True` the datapoints of the specified STORM-type data will be weighted by their intensity.
method : :obj:`str`
Method of averaging datapoints to calculate the final distribution curve.
sigma : :obj:`float`
Applies only when `method` is set to 'gauss'. `sigma` gives the width of the gaussian used for convoluting
datapoints.
Returns
-------
xvals : :class:`~numpy.ndarray`
Array of distances along the cell midline, values are the middle of the bins/kernel.
yvals : :class:`~numpy.ndarray`
Array of bin heights.
"""
length = 1 if norm_x else self.length
r_max = r_max if r_max else self.coords.r
stop = 1.25 * length if not stop else stop
start = -0.25 * length if not start else start # also needs to be uniform with l_mean? no
if not data_name:
try:
data_elem = list(self.data.flu_dict.values())[0] # yuck
except IndexError:
try:
data_elem = list(self.data.storm_dict.values())[0]
except IndexError:
raise IndexError('No valid data element found')
else:
try:
data_elem = self.data.data_dict[data_name]
except KeyError:
raise ValueError('Chosen data not found')
bins = np.linspace(start, stop, num=nbins, endpoint=True)
if data_elem.ndim == 1:
assert data_elem.dclass == 'storm'
xp = data_elem['x']
yp = data_elem['y']
idx_left, idx_right, xc = self.coords.get_idx_xc(xp, yp)
elif data_elem.ndim == 2 or data_elem.ndim == 3: # image data
xp, yp = self.coords.x_coords, self.coords.y_coords
idx_left, idx_right, xc = self.coords.get_idx_xc(xp, yp)
else:
raise ValueError('Invalid data element dimensions')
r = self.coords.calc_rc(xp, yp)
bools = r < r_max
# todo update to calc_lc
x_len = calc_lc(self.coords.xl, xc[bools].flatten(), self.coords.coeff)
if norm_x:
if l_mean:
len_norm = x_len / self.length
len_norm[x_len < 0] = x_len[x_len < 0] / l_mean
len_norm[x_len > self.length] = ((x_len[x_len > self.length] - self.length) / l_mean) + 1
x_len = len_norm
else:
x_len = x_len / self.length
if method == 'gauss' and data_elem.dclass == 'storm':
print("Warning: method 'gauss' is not a storm-compatible method, method was set to 'box'")
method = 'box'
if method == 'gauss':
bin_func = running_mean
bin_kwargs = {'sigma': sigma}
xvals = bins
elif method == 'box':
bools_box = (x_len > bins.min()) * (x_len < bins.max()) # Remove values outside of bins range
x_len = x_len[bools_box]
bin_func = box_mean
bin_kwargs = {'storm_weight': storm_weight}
xvals = bins + 0.5 * np.diff(bins)[0]
else:
raise ValueError('Invalid method')
if data_elem.ndim == 1:
y_weight = data_elem['intensity'][bools] if storm_weight else None
yvals = bin_func(x_len, y_weight, bins, **bin_kwargs)
elif data_elem.ndim == 2:
y_weight = np.clip(data_elem[bools].flatten(), 0, None) # Negative values are set to zero (why?)
yvals = bin_func(x_len, y_weight, bins, **bin_kwargs)
elif data_elem.ndim == 3:
yvals = np.array([bin_func(x_len, y_weight[bools].flatten(), bins, **bin_kwargs) for y_weight in data_elem])
return xvals, yvals
def l_classify(self, data_name=''):
"""
Classifies foci in STORM-type data by they x-position along the long axis.
The spots are classified into 3 categories: 'poles', 'between' and 'mid'. The pole category are spots who are to
the left and right of xl and xr, respectively. The class 'mid' is a section in the middle of the cell with a
total length of half the cell's length, the class 'between' is the remaining two quarters between 'mid' and
'poles'.
Parameters
----------
data_name : :obj:`str`
Name of the STORM-type data element to classify. When its not specified the first STORM data element is used.
Returns
-------
l_classes : :obj:`tuple`
Tuple with number of spots in poles, between and mid classes, respectively.
"""
if not data_name:
data_elem = list(self.data.storm_dict.values())[0]
else:
data_elem = self.data.data_dict[data_name]
assert data_elem.dclass == 'storm'
x, y = data_elem['x'], data_elem['y']
lc = self.coords.calc_lc(x, y)
lq1 = self.length / 4
lq3 = 3 * lq1
poles = np.sum(lc <= 0) + np.sum(lc >= self.length)
between = np.sum(np.logical_and(lc > 0, lc < lq1)) + np.sum(np.logical_and(lc < self.length, lc > lq3))
mid = np.sum(np.logical_and(lc >= lq1, lc <= lq3))
try:
assert len(x) == (poles + between + mid)
except AssertionError:
raise ValueError("Invalid number of points")
return poles, between, mid
def r_dist(self, stop, step, data_name='', norm_x=False, limit_l=None, storm_weight=False, method='gauss',
sigma=0.3):
"""
Calculates the radial distribution of a given data element.
Parameters
----------
stop : :obj:`float`
Until how far from the cell spine the radial distribution should be calculated.
step : :obj:`float`
The binsize of the returned radial distribution.
data_name : :obj:`str`
The name of the data element on which to calculate the radial distribution.
norm_x : :obj:`bool`
If `True` the returned distribution will be normalized with the cell's radius set to 1.
limit_l : :obj:`str`
If `None`, all datapoints are used. This can be limited by providing the value `full` (omit poles only),
'poles' (include only poles), or a float value between 0 and 1 which will limit the data points by
longitudinal coordinate around the midpoint of the cell.
storm_weight : :obj:`bool`
Only applicable for analyzing STORM-type data elements. If `True` the returned histogram is weighted with
the values in the 'Intensity' field.
method : :obj:`str`, either 'gauss' or 'box'
Method of averaging datapoints to calculate the final distribution curve.
sigma : :obj:`float`
Applies only when `method` is set to 'gauss'. `sigma` gives the width of the gaussian used for convoluting
datapoints.
Returns
-------
xvals : :class:`~numpy.ndarray`
Array of distances from the cell midline, values are the middle of the bins.
yvals : :class:`~numpy.ndarray`
Array of in bin heights.
"""
if not data_name:
try:
data_elem = list(self.data.flu_dict.values())[0] # yuck
except IndexError:
try:
data_elem = list(self.data.storm_dict.values())[0]
except IndexError:
raise IndexError('No valid data element found')
else:
try:
data_elem = self.data.data_dict[data_name]
except KeyError:
raise ValueError('Chosen data not found')
if method == 'gauss' and data_elem.dclass == 'storm':
print("Warning: method 'gauss' is not a storm-compatible method, method was set to 'box'")
method = 'box'
bins = np.arange(0, stop + step, step)
if method == 'gauss':
bin_func = running_mean
bin_kwargs = {'sigma': sigma}
xvals = bins
elif method == 'box':
bin_func = box_mean
bin_kwargs = {'storm_weight': storm_weight}
bins = np.arange(0, stop + step, step)
xvals = bins + 0.5 * step # xval is the middle of the bin
else:
raise ValueError('Invalid method')
if data_elem.ndim == 1:
assert data_elem.dclass == 'storm'
x = data_elem['x']
y = data_elem['y']
xc = self.coords.calc_xc(x, y)
r = self.coords.calc_rc(x, y)
r = r / self.coords.r if norm_x else r
y_weight = data_elem['intensity'] if storm_weight else None
elif data_elem.ndim == 2 or data_elem.ndim == 3:
r = (self.coords.rc / self.coords.r if norm_x else self.coords.rc)
xc = self.coords.xc
x = self.coords.x_coords
y = self.coords.y_coords
y_weight = data_elem
else:
raise ValueError("Invalid data dimensions")
if limit_l is not None:
if limit_l == 'full':
b = (xc > self.coords.xl) * (xc < self.coords.xr).astype(bool)
elif limit_l == 'poles':
b = np.logical_or(xc <= self.coords.xl, xc >= self.coords.xr)
else:
assert 0 < limit_l < 1#, 'The value of limit_l should be between 0 and 1.'
mid_l = self.length / 2
lc = self.coords.calc_lc(x, y)
limit = limit_l * self.length
b = ((lc > mid_l - limit / 2) * (lc < mid_l + limit / 2)).astype(bool)
else:
b = np.ones_like(r, dtype=bool)
if data_elem.ndim <= 2:
y_wt = y_weight[b].flatten() if y_weight is not None else None
yvals = bin_func(r[b].flatten(), y_wt, bins, **bin_kwargs)
else:
yvals = np.vstack([bin_func(r[b].flatten(), d[b].flatten(), bins) for d in data_elem])
return xvals, yvals
def measure_r(self, data_name='brightfield', mode='max', in_place=True, **kwargs):
"""
Measure the radius of the cell.
The radius is found by the intensity-mid/min/max-point of the radial distribution derived from brightfield
(default) or another data element.
Parameters
----------
data_name : :obj:`str`
Name of the data element to use.
mode : :obj:`str`
Mode to find the radius. Can be either 'min', 'mid' or 'max' to use the minimum, middle or maximum value
of the radial distribution, respectively.
in_place : :obj:`bool`
If `True` the found value of `r` is directly substituted in the cell's coordinate system, otherwise the
value is returned.
Returns
-------
radius : :obj:`float`
The measured radius `r` if `in_place` is `False`, otherwise `None`.
"""
step = kwargs.pop('step', 1)
stop = kwargs.pop('stop', int(self.data.shape[0] / 2))
x, y = self.r_dist(stop, step, data_name=data_name) # todo again need sensible default for stop
if mode == 'min':
imin = np.argmin(y)
r = x[imin]
elif mode == 'mid':
mid_val = (np.min(y) + np.max(y)) / 2
imin = np.argmin(y)
imax = np.argmax(y)
y_select = y[imin:imax] if imax > imin else y[imax:imin][::-1]
x_select = x[imin:imax] if imax > imin else x[imax:imin][::-1]
try:
r = np.interp(mid_val, y_select, x_select)
except ValueError:
print("Cell {}: No r value was found".format(self.name))
return
elif mode == 'max':
imax = np.argmax(y)
r = x[imax]
else:
raise ValueError('Invalid value for mode')
if in_place:
self.coords.r = r
else:
return r
def reconstruct_image(self, data_name, norm_x=False, r_scale=1, **kwargs):
# todo stop and step defaults when norm_x=True?
# todo allow reconstruction of standardized cell shape
# todo refactor to reconstruct image?
"""
Reconstruct the image from a given data element and the cell's current coordinate system.
Parameters
----------
data_name : :obj:`str`
Name of the data element to use.
norm_x : :obj:`bool`
Boolean indicating whether or not to normalize to r=1.
r_scale : :obj:`float`
Stretch or compress the image in the radial direction by this factor. Values > 1 will compress the image.
**kwargs
Optional keyword arguments are 'stop' and 'step' which are passed to `r_dist`.
Returns
-------
img : :class:`~numpy.ndarray`
Image of the reconstructed cell.
"""
stop = kwargs.pop('stop', np.ceil(np.max(self.data.shape) / 2))
step = kwargs.pop('step', 1)
xp, fp = self.r_dist(stop, step, data_name=data_name, norm_x=norm_x)
interp = np.interp(r_scale * self.coords.rc, xp, np.nan_to_num(fp)) # todo check nantonum cruciality
return interp
def get_intensity(self, mask='binary', data_name='', func=np.mean):
"""
Returns the mean fluorescence intensity.
Mean fluorescence intensity either in the region masked by the binary image or reconstructed binary image derived
from the cell's coordinate system.
Parameters
----------
mask : :obj:`str`
Either 'binary' or 'coords' to specify the source of the mask used. 'binary' uses the binary image as mask,
'coords' uses reconstructed binary from coordinate system.
data_name : :obj:`str`:
The name of the image data element to get the intensity values from.
func : :obj:`callable`
This function is applied to the data elements pixels selected by the masking operation. The default is
`np.mean()`.
Returns
-------
value : :obj:`float`
Mean fluorescence pixel value.
"""
if mask == 'binary':
m = self.data.binary_img.astype(bool)
elif mask == 'coords':
m = self.coords.rc < self.coords.r
else:
raise ValueError("Mask keyword should be either 'binary' or 'coords'")
if not data_name:
data_elem = list(self.data.flu_dict.values())[0] # yuck
else:
try:
data_elem = self.data.data_dict[data_name]
except KeyError:
raise ValueError('Chosen data not found')
return func(data_elem[m])
@staticmethod
def _bin_func(xvals, y_weight, bins):
"""bins xvals in given bins using y_weight as weights"""
i_sort = xvals.argsort()
r_sorted = xvals[i_sort]
y_weight = y_weight[i_sort] if y_weight is not None else y_weight
bin_inds = np.digitize(r_sorted,
bins) - 1 # -1 to assure points between 0 and step are in bin 0 (the first)
yvals = np.bincount(bin_inds, weights=y_weight, minlength=len(bins))
if y_weight is not None:
yvals /= np.bincount(bin_inds, minlength=len(bins))
return np.nan_to_num(yvals)
def copy(self):
"""
Make a copy of the cell object and all its associated data elements.
This is a deep copy meaning that all numpy data arrays are copied in memory and therefore modifying the copied
cell object does not modify the original cell object.
Returns
-------
cell : :class:`~colicoords.cell.Cell`
Copied cell object.
"""
# todo needs testing (this is done?) arent there more properties to copy?
parameters = {par: getattr(self.coords, par) for par in self.coords.parameters}
parameters['shape'] = self.coords.shape
new_cell = Cell(data_object=self.data.copy(), name=self.name, init_coords=False, **parameters)
return new_cell
class Coordinates(object):
"""
Cell's coordinate system described by the polynomial p(x) and associated functions.
Parameters
----------
data : :class:`~colicoords.data_models.Data`
The `data` object defining the shape.
initialize : :obj:`bool`, optional
If `False` the coordinate system parameters are not initialized with initial guesses.
**kwargs
Can be used to manually supply parameter values if `initialize` is `False`.
Attributes
----------
xl : :obj:`float`
Left cell pole x-coordinate.
xr : :obj:`float`
Right cell pole x-coordinate.
r : :obj:`float`
Cell radius.
coeff : :class:`~numpy.ndarray`
Coefficients [a0, a1, a2] of the polynomial a0 + a1*x + a2*x**2 which describes the cell's shape.
"""
parameters = ['r', 'xl', 'xr', 'a0', 'a1', 'a2']
def __init__(self, data, initialize=True, **kwargs):
self.data = data
self.coeff = np.array([1., 1., 1.])
if initialize:
self.xl, self.xr, self.r, self.coeff = self._initial_guesses(data) # refactor to class method
self.coeff = self._initial_fit()
self.shape = data.shape
else:
for p in self.parameters + ['shape']:
setattr(self, p, kwargs.pop(p, 1))
@property
def a0(self):
"""float: Polynomial p(x) 0th degree coefficient."""
return self.coeff[0]
@a0.setter
def a0(self, value):
self.coeff[0] = value
@property
def a1(self):
"""float: Polynomial p(x) 1st degree coefficient."""
return self.coeff[1]
@a1.setter
def a1(self, value):
self.coeff[1] = value
@property
def a2(self):
"""float: Polynomial p(x) 2nd degree coefficient."""
return self.coeff[2]
@a2.setter
def a2(self, value):
self.coeff[2] = value
def sub_par(self, par_dict):
"""
Substitute the values in `par_dict` as the coordinate systems parameters.
Parameters
----------
par_dict : :obj:`dict`
Dictionary with parameters which values are set to the attributes.
"""
for k, v in par_dict.items():
setattr(self, k, v)
@allow_scalars
def calc_xc(self, xp, yp):
"""
Calculates the coordinate xc on p(x) closest to xp, yp.
All coordinates are cartesian. Solutions are found by solving the cubic equation.
Parameters
----------
xp : :obj:`float` or :class:`~numpy.ndarray`
Input scalar or vector/matrix x-coordinate. Must be the same shape as yp.
yp : :obj:`float` : or :class:`~numpy.ndarray`
Input scalar or vector/matrix x-coordinate. Must be the same shape as xp.
Returns
-------
xc : :obj:`float` or :class:`~numpy.ndarray`
Cellular x-coordinate for point(s) xp, yp
"""
assert xp.shape == yp.shape
# https://en.wikipedia.org/wiki/Cubic_function#Algebraic_solution
a0, a1, a2 = self.coeff
# xp, yp = xp.astype('float32'), yp.astype('float32')
# Converting of cell spine polynomial coefficients to coefficients of polynomial giving distance r
a, b, c, d = 4 * a2 ** 2, 6 * a1 * a2, 4 * a0 * a2 + 2 * a1 ** 2 - 4 * a2 * yp + 2, 2 * a0 * a1 - 2 * a1 * yp - 2 * xp
# a: float, b: float, c: array, d: array
discr = 18 * a * b * c * d - 4 * b ** 3 * d + b ** 2 * c ** 2 - 4 * a * c ** 3 - 27 * a ** 2 * d ** 2
# if np.any(discr == 0):
# raise ValueError('Discriminant equal to zero encountered. This should never happen. Please make an issue.')
if np.all(discr < 0):
x_c = solve_general(a, b, c, d)
else:
x_c = np.zeros(xp.shape)
mask = discr < 0
general_part = solve_general(a, b, c[mask], d[mask])
trig_part = solve_trig(a, b, c[~mask], d[~mask])
x_c[mask] = general_part
x_c[~mask] = trig_part
return x_c
@allow_scalars
def calc_xc_mask(self, xp, yp):
"""
Calculated whether point (xp, yp) is in either the left or right polar areas, or in between.
Returned values are 1 for left pole, 2 for middle, 3 for right pole.
Parameters
----------
xp : :obj:`float` or :class:`~numpy.ndarray`
Input scalar or vector/matrix x-coordinate. Must be the same shape as yp.
yp : :obj:`float` or :class:`~numpy.ndarray`
Input scalar or vector/matrix x-coordinate. Must be the same shape as xp.
Returns
-------
xc_mask : :obj:`float`: or :class:`~numpy.ndarray`:
Array to mask different cellular regions.
"""
idx_left, idx_right, xc = self.get_idx_xc(xp, yp)
mask = 2 * np.ones_like(xp)
mask[idx_left] = 1
mask[idx_right] = 3
return mask
@allow_scalars
def calc_xc_masked(self, xp, yp):
"""
Calculates the coordinate xc on p(x) closest to (xp, yp), where xl < xc < xr.
Parameters
----------
xp : :obj:`float`: or :class:`~numpy.ndarray`:
Input scalar or vector/matrix x-coordinate. Must be the same shape as yp.
yp : :obj:`float`: or :class:`~numpy.ndarray`:
Input scalar or vector/matrix x-coordinate. Must be the same shape as xp.
Returns
-------
xc_mask : :obj:`float` or :class:`~numpy.ndarray`
Cellular x-coordinate for point(s) xp, yp, where xl < xc < xr.
"""
idx_left, idx_right, xc = self.get_idx_xc(xp, yp)
xc[idx_left] = self.xl
xc[idx_right] = self.xr
return xc
@allow_scalars
def calc_rc(self, xp, yp):
"""
Calculates the distance of (xp, yp) to (xc, p(xc)).
The returned value is the distance from the points (xp, yp) to the midline of the cell.
Parameters
----------
xp : :obj:`float` or :class:`~numpy.ndarray`
Input scalar or vector/matrix x-coordinate. Must be the same shape as yp.
yp : :obj:`float` or :class:`~numpy.ndarray`
Input scalar or vector/matrix x-coordinate. Must be the same shape as xp.
Returns
-------
rc : :obj:`float` or :class:`~numpy.ndarray`
Distance to the midline of the cell.
"""
xc = self.calc_xc_masked(xp, yp)
a0, a1, a2 = self.coeff
return np.sqrt((xc - xp) ** 2 + (a0 + xc * (a1 + a2 * xc) - yp) ** 2)
@allow_scalars
def calc_lc(self, xp, yp):
"""
Calculates distance of xc along the midline the cell corresponding to the points (xp, yp).
The returned value is the distance from the points (xp, yp) to the midline of the cell.
Parameters
----------
xp : :obj:`float`: or :class:`~numpy.ndarray`
Input scalar or vector/matrix x-coordinate. Must be the same shape as yp.
yp : :obj:`float` or :class:`~numpy.ndarray`
Input scalar or vector/matrix x-coordinate. Must be the same shape as xp.
Returns
-------
lc : :obj:`float` or :class:`~numpy.ndarray`
Distance along the midline of the cell.
"""
xc = self.calc_xc_masked(xp, yp)
return calc_lc(self.xl, xc, self.coeff)
@allow_scalars
def calc_phi(self, xp, yp):
"""
Calculates the angle between the line perpendical to the cell midline and the line between (xp, yp)
and (xc, p(xc)).
The returned values are in degrees. The angle is defined to be 0 degrees for values in the upper half of the
image (yp < p(xp)), running from 180 to zero along the right polar region, 180 degrees in the lower half and
running back to 0 degrees along the left polar region.
Parameters
----------
xp : :obj:`float` or :class:`~numpy.ndarray`
Input scalar or vector/matrix x-coordinate. Must be the same shape as yp.
yp : :obj:`float` or :class:`~numpy.ndarray`
Input scalar or vector/matrix x-coordinate. Must be the same shape as xp.
Returns
-------
phi : :obj:`float` or :class:`~numpy.ndarray`
Angle phi for (xp, yp).
"""
idx_left, idx_right, xc = self.get_idx_xc(xp, yp)
xc[idx_left] = self.xl
xc[idx_right] = self.xr
yc = self.p(xc)
phi = np.empty(xp.shape)
top = yp < self.p(xp)
phi[top] = 0
phi[~top] = np.pi
th1 = np.arctan2(yp - yc, xc - xp)
th2 = np.arctan(self.p_dx(xc))
thetha = th1 + th2 + np.pi / 2
phi[idx_right] = (np.pi - thetha[idx_right]) % np.pi
phi[idx_left] = thetha[idx_left]
return phi * (180 / np.pi)
#TODO tests
@allow_scalars
def calc_perimeter(self, xp, yp):
"""
Calculates how far along the perimeter of the cell the points (xp, yp) lay.
The perimeter of the cell is the current outline as described by the current coordinate system. The zero-point
is the top-left point where the top membrane section starts (lc=0, phi=0) and increases along the perimeter
clockwise.
Parameters
----------
xp : :obj:`float` or :class:`~numpy.ndarray`
Input scalar or vector/matrix x-coordinate. Must be the same shape as yp.
yp : :obj:`float` or :class:`~numpy.ndarray`
Input scalar or vector/matrix x-coordinate. Must be the same shape as xp.
Returns
-------
per : :obj:`float` or :class:`~numpy.ndarray`
Length along the cell perimeter.
"""
output = np.zeros_like(xp)
lc = self.calc_lc(xp, yp)
phi = self.calc_phi(xp, yp)
sc = np.pi*self.r # Semicircle perimeter length
# Top membrane section
b = phi == 0
output[b] = (lc[b] / self.length) * self._top
# Right pole
b = lc == self.length
output[b] = self._top + phi[b]*(np.pi/180)*self.r
# Bottom, reverse direction
b = phi == 180
output[b] = self._top + sc + (1 - lc[b] / self.length) * self._bot
# Left pole
b = lc == 0
output[b] = self._top + sc + self._bot + (180-phi[b])*(np.pi/180)*self.r
return output
#TODO tests
@allow_scalars
def rev_calc_perimeter(self, par_values):
"""
For a given distance along the perimeter calculate the `xp`, `yp` cartesian coordinates.
Parameters
----------
par_values : :obj:`float` or :class:`~numpy.ndarray`
Input parameter values. Must be between 0 and `:attr:~colicoords.Cell.circumference`
Returns
-------
xp : :obj:`float` or :class:`~numpy.ndarray`
Cartesian x-coordinate corresponding to `lc`, `rc`, `phi`
yp : :obj:`float` or :class:`~numpy.ndarray`
Cartesian y-coordinate corresponding to `lc`, `rc`, `phi`
"""
sc = np.pi*self.r
if np.min(par_values) < 0:
raise ValueError("Minimum value of `par_values` must be larger than 0")
if np.max(par_values) > self._top + self._bot + 2*sc:
raise ValueError("Maximum value of `par_values` must be smaller than the cell's circumference")
lc = np.zeros_like(par_values)
phi = np.zeros_like(par_values)
# Left pole
b = par_values > self._top + sc + self._bot
l = par_values[b] - (self._top + sc + self._bot)
lc[b] = 0.
phi[b] = 180 - (180/l) / sc
# Bottom
b = (par_values <= self._top + sc + self._bot) * (par_values > self._top + sc)
l = par_values[b] - (self._top + sc)
lc[b] = (self._bot - l) / self._bot
phi[b] = (180*l)/sc
# Right pole
b = (par_values <= self._top + sc) * (par_values > self._top)
l = par_values[b] - self._top
lc[b] = 1.
phi[b] = (180/l)/sc
# Top
b = par_values <= self._top
lc[b] = par_values[b] / self._top
phi[b] = 0.
return self.rev_transform(lc, self.r, phi)
def get_idx_xc(self, xp, yp):
"""
Finds the indices of the arrays xp an yp where they either belong to the left or right polar regions,
as well as coordinates xc.
Parameters
----------
xp : :class:`~numpy.ndarray`
Input x-coordinates. Must be the same shape as yp.
yp : :class:`~numpy.ndarray`
Input y-coordinates. Must be the same shape as xp.
Returns
-------
idx_left : :class:`~numpy.ndarray`
Index array of elements in the area of the cell's left pole.
idx_right : :class:`~numpy.ndarray`
Index array of elements in the area of cell's right pole.
xc : :class:`~numpy.ndarray`
Cellular coordinates `xc` corresponding to `xp`, `yp`, extending into the polar regions.
"""
xc = np.array(self.calc_xc(xp, yp).copy())
yp = self.p(xc)
# Area left of perpendicular line at xl:
op = operator.lt if self.p_dx(self.xl) > 0 else operator.gt
idx_left = op(yp, self.q(xc, self.xl))
op = operator.gt if self.p_dx(self.xr) > 0 else operator.lt
idx_right = op(yp, self.q(xc, self.xr))
return idx_left, idx_right, xc
@allow_scalars
# todo scalar input wont work because of sqeeeeeze?
def transform(self, xp, yp):
"""
Transforms image coordinates (xp, yp) to cell coordinates (lc, rc, psi)
Parameters
----------
xp : :obj:`float` or :class:`~numpy.ndarray`
Input scalar or vector/matrix x-coordinate. Must be the same shape as yp
yp : :obj:`float` or :class:`~numpy.ndarray`
Input scalar or vector/matrix x-coordinate. Must be the same shape as xp
Returns
-------
coordinates : :obj:`tuple`
Tuple of cellular coordinates lc, rc, psi
"""
lc = self.calc_lc(xp, yp)
rc = self.calc_rc(xp, yp)
psi = self.calc_phi(xp, yp)
return lc, rc, psi
@allow_scalars
def full_transform(self, xp, yp):
"""
Transforms image coordinates (xp, yp) to cell coordinates (xc, lc, rc, psi).
Parameters
----------
xp : :obj:`float` or :class:`~numpy.ndarray`
Input scalar or vector/matrix x-coordinate. Must be the same shape as yp.
yp : :obj:`float` or :class:`~numpy.ndarray`
Input scalar or vector/matrix x-coordinate. Must be the same shape as xp.
Returns
-------
coordinates : :obj:`tuple`
Tuple of cellular coordinates xc, lc, rc, psi.
"""
xc = self.calc_xc_masked(xp, yp)
lc = self.calc_lc(xp, yp)
rc = self.calc_rc(xp, yp)
psi = self.calc_phi(xp, yp)
return xc, lc, rc, psi
@allow_scalars
def rev_transform(self, lc, rc, phi, l_norm=True):
"""
Reverse transform from cellular coordinates `lc`, `rc`, `phi` to cartesian coordinates `xp`, `yp`.
Parameters
----------
lc : :obj:`float` or :class:`~numpy.ndarray`
Input scalar or vector/matrix l-coordinate.
rc : :obj:`float` or :class:`~numpy.ndarray`
Input scalar or vector/matrix l-coordinate.
phi : :obj:`float` or :class:`~numpy.ndarray`
Input scalar or vector/matrix l-coordinate.
l_norm : :obj:`bool`, optional
If `True` (default), the lc coordinate has to be input as normalized.
Returns
-------
xp : :obj:`float` or :class:`~numpy.ndarray`
Cartesian x-coordinate corresponding to `lc`, `rc`, `phi`
yp : :obj:`float` or :class:`~numpy.ndarray`
Cartesian y-coordinate corresponding to `lc`, `rc`, `phi`
"""
assert lc.min() >= 0
if l_norm:
assert lc.max() <= 1
lc *= self.length
else:
assert lc.max() <= self.length
b_left = lc <= 0
b_right = lc >= self.length
b_mid = np.logical_and(~b_left, ~b_right)
xp, yp = np.empty_like(lc, dtype=float), np.empty_like(lc, dtype=float)
# left:
xc = self.xl
yc = self.p(xc)
th2 = np.arctan(self.p_dx(xc)) * (180 / np.pi)
theta = 180 - (-th2 + phi[b_left])
dx = -rc[b_left] * np.sin(theta * (np.pi / 180))
dy = rc[b_left] * np.cos(theta * (np.pi / 180))
xp[b_left] = xc + dx
yp[b_left] = yc + dy
# middle:
# brute force fsolve xc form lc
sign = (phi[b_mid] / -90) + 1 # top or bottom of the cell
# xc = np.array([fsolve(solve_length, l_guess, args=(self.xl, self.coeff, l_guess)).squeeze() for l_guess in lc[b_mid]])
xc = np.array(
[brentq(solve_length, self.xl, self.xr, args=(self.xl, self.coeff, l_guess)) for l_guess in lc[b_mid]])
# lc_mid = lc[b_mid].copy()
# xc = fsolve(solve_length, lc_mid, args=(self.xl, self.coeff, lc_mid)).squeeze()
yc = self.p(xc)
p_dx_sq = self.p_dx(xc) ** 2
dy = (-rc[b_mid] / np.sqrt(1 + p_dx_sq)) * sign
dx = (rc[b_mid] / np.sqrt(1 + (1 / p_dx_sq))) * sign * np.sign(self.p_dx(xc))
xp[b_mid] = xc + dx
yp[b_mid] = yc + dy
# right
xc = self.xr
yc = self.p(xc)
th2 = np.arctan(self.p_dx(self.xr)) * (180 / np.pi)
theta = 180 - (th2 + phi[b_right])
dx = rc[b_right] * np.sin(theta * (np.pi / 180))
dy = rc[b_right] * np.cos(theta * (np.pi / 180))
xp[b_right] = xc + dx
yp[b_right] = yc + dy
return xp, yp
@property
def x_coords(self):
""":class:`~numpy.ndarray``: Matrix of shape m x n equal to cell image with cartesian x-coordinates."""
ymax = self.shape[0]
xmax = self.shape[1]
return np.repeat(np.arange(xmax), ymax).reshape(xmax, ymax).T + 0.5
@property
def y_coords(self):
""":class:`~numpy.ndarray`: Matrix of shape m x n equal to cell image with cartesian y-coordinates."""
ymax = self.shape[0]
xmax = self.shape[1]
return np.repeat(np.arange(ymax), xmax).reshape(ymax, xmax) + 0.5
@property
def xc(self):
""":class:`~numpy.ndarray`: Matrix of shape m x n equal to cell image with x coordinates on p(x)"""
return self.calc_xc(self.x_coords, self.y_coords)
@property
def yc(self):
""":class:`~numpy.ndarray`: Matrix of shape m x n equal to cell image with y coordinates on p(x)"""
return self.p(self.xc)
@property
def xc_masked(self):
""":class:`~numpy.ndarray`: Matrix of shape m x n equal to cell image with x coordinates on p(x)
where xl < xc < xr.
"""
return self.calc_xc_masked(self.x_coords, self.y_coords)
@property
def xc_mask(self):
""":class:`~numpy.ndarray`: Matrix of shape m x n equal to cell image where elements have values 1, 2, 3 for
left pole, middle and right pole, respectively.
"""
return self.calc_xc_mask(self.x_coords, self.y_coords)
@property
def rc(self):
""":class:`~numpy.ndarray`: Matrix of shape m x n equal to cell with distance r to the cell midline."""
return self.calc_rc(self.x_coords, self.y_coords)
@property
def lc(self):
""":class:`~numpy.ndarray`: Matrix of shape m x n equal to cell with distance l along the cell mideline."""
return self.calc_lc(self.x_coords, self.y_coords)
@property
def phi(self):
""":class:`~numpy.ndarray`: Matrix of shape m x n equal to cell with angle psi relative to the cell midline."""
return self.calc_phi(self.x_coords, self.y_coords)
@property
def length(self):
""":obj:`float`: Length of the cell in pixels."""
a0, a1, a2 = self.coeff
xl, xr = self.xl, self.xr
l = (1 / (4 * a2)) * (
((a1 + 2 * a2 * xr) * np.sqrt(1 + (a1 + 2 * a2 * xr) ** 2) + np.arcsinh((a1 + 2 * a2 * xr))) -
((a1 + 2 * a2 * xl) * np.sqrt(1 + (a1 + 2 * a2 * xl) ** 2) + np.arcsinh((a1 + 2 * a2 * xl)))
)
return l
@property
def _top(self):
""":obj:`float`: Length of the cell's top membrane segment."""
# http://tutorial.math.lamar.edu/Classes/CalcII/ParaArcLength.aspx
def integrant_top(t, a1, a2, r):
return np.sqrt(1 + (a1 + 2 * a2 * t) ** 2 + ((4 * a2 ** 2 * r ** 2) / (1 + (a1 + 2 * a2 * t) ** 2) ** 2) + (
(4 * a2 * r) / np.sqrt(1 + (a1 + 2 * a2 * t))))
top, terr = quad(integrant_top, self.xl, self.xr,
args=(self.a1, self.a2, self.r))
if np.isnan(top) or np.isnan(terr):
msg = "Falling back to numerical approximation"
warnings.warn(msg, IntegrationWarning, stacklevel=2)
t = np.linspace(self.xl, self.xr, num=100)
a0, a1, a2 = self.coeff
x_top = t + self.r * ((a1 + 2 * a2 * t) / np.sqrt(1 + (a1 + 2 * a2 * t) ** 2))
y_top = a0 + a1 * t + a2 * (t ** 2) - self.r * (1 / np.sqrt(1 + (a1 + 2 * a2 * t) ** 2))
top_arr = np.asarray([y_top, x_top]).swapaxes(1, 0)
d = np.diff(top_arr, axis=0)
dists = np.sqrt((d ** 2).sum(axis=1))
top = np.sum(dists)
return top
@property
def _bot(self):
""":obj:`float`: Length of the cell's bottom membrane segment."""
def integrant_bot(t, a1, a2, r):
return np.sqrt(1 + (a1 + 2 * a2 * t) ** 2 + ((4 * a2 ** 2 * r ** 2) / (1 + (a1 + 2 * a2 * t) ** 2) ** 2) - (
(4 * a2 * r) / np.sqrt(1 + (a1 + 2 * a2 * t))))
bot, berr = quad(integrant_bot, self.xl, self.xr,
args=(self.a1, self.a2, self.r))
if np.isnan(bot) or np.isnan(berr):
msg = "Falling back to numerical approximation"
warnings.warn(msg, IntegrationWarning, stacklevel=2)
t = np.linspace(self.xl, self.xr, num=100)
a0, a1, a2 = self.coeff
x_bot = t + - self.r * ((a1 + 2 * a2 * t) / np.sqrt(1 + (a1 + 2 * a2 * t) ** 2))
y_bot = a0 + a1 * t + a2 * (t ** 2) + self.r * (1 / np.sqrt(1 + (a1 + 2 * a2 * t) ** 2))
bot_arr = np.asarray([y_bot, x_bot]).swapaxes(1, 0)
d = np.diff(bot_arr, axis=0)
dists = np.sqrt((d ** 2).sum(axis=1))
bot = np.sum(dists)
return bot
def p(self, x_arr):
"""
Calculate p(x).
The function p(x) describes the midline of the cell.
Parameters
----------
x_arr : :class:`~numpy.ndarray`
Input x values.
Returns
-------
p : :class:`~numpy.ndarray`
Evaluated polynomial p(x)
"""
a0, a1, a2 = self.coeff
return a0 + a1 * x_arr + a2 * x_arr ** 2
def p_dx(self, x_arr):
"""
Calculate the derivative p'(x) evaluated at x.
Parameters
----------
x_arr :class:`~numpy.ndarray`:
Input x values.
Returns
-------
p_dx : :class:`~numpy.ndarray`
Evaluated function p'(x).
"""
a0, a1, a2 = self.coeff
return a1 + 2 * a2 * x_arr
def q(self, x, xp):
"""array_like: Returns q(x) where q(x) is the line perpendicular to p(x) at xp"""
return (-x / self.p_dx(xp)) + self.p(xp) + (xp / self.p_dx(xp))
def get_core_points(self, xl=None, xr=None):
"""
Returns the coordinates of the roughly estimated 'core' points of the cell.
Used for determining the initial guesses for the coefficients of p(x).
Parameters
----------
xl : :obj:`float`, optional
Starting point x of where to get the 'core' points.
xr : :obj:`float`, optional
End point x of where to get the 'core' points.
Returns
-------
xvals : :class:`np.ndarray`
Array of x coordinates of 'core' points.
yvals : :class:`np.ndarray`
Array of y coordinates of 'core' points.
"""
xl = xl if xl else self.xl
xr = xr if xr else self.xr
im_x, im_y = np.nonzero(self.data.data_dict['binary'])
x_range = np.arange(int(xl), int(xr))
y = np.array([np.nanmean(np.where(im_y == y, im_x, np.nan)) for y in x_range])
return x_range, y
@staticmethod
def _initial_guesses(data):
if data.binary_img is not None:
r = np.sqrt(mh.distance(data.binary_img).max())
area = np.sum(data.binary_img)
l = (area - np.pi * r ** 2) / (2 * r)
y_cen, x_cen = mh.center_of_mass(data.binary_img)
xl, xr = x_cen - l / 2, x_cen + l / 2
coeff = np.array([y_cen, 0.01, 0.0001])
else:
raise ValueError("Binary image is required for initial guesses of cell coordinates")
return xl, xr, r, coeff
def _initial_fit(self):
x, y = self.get_core_points()
return np.polyfit(x, y, 2)[::-1]
def optimize_worker(cell, **kwargs):
"""
Worker object for optimize multiprocessing.
Parameters
----------
cell : :class:`~colicoords.cell.Cell`
Cell object to optimize.
**kwargs
Additional keyword arguments passed to :meth:`~colicoords.cell.Cell.optimize`
Returns
-------
result : :class:`~symit.core.fit.FitResults`
"""
res = cell.optimize(**kwargs)
return res
class CellList(object):
"""
List equivalent of the :class:`~colicoords.cell.Cell` object.
This Object holding a list of cell objects exposing several methods to either apply functions to all cells or to
extract values from all cell objects. It supports iteration over Cell objects and Numpy-style array indexing.
Parameters
----------
cell_list : :obj:`list` or :class:`numpy.ndarray`
List of array of :class:`~colicoords.cell.Cell` objects.
Attributes
----------
cell_list : :class:`~numpy.ndarray`
Numpy array of `Cell` objects
data : :class:`~colicoords.data_models.CellListData`
Object with common attributes for all cells
"""
def __init__(self, cell_list):
self.cell_list = np.array(cell_list)
self.data = CellListData(cell_list)
def optimize(self, data_name='binary', cell_function=None, minimizer=Powell, **kwargs):
"""
Optimize the cell's coordinate system.
The optimization is performed on the data element given by ``data_name``
using objective function `objective`. A default depending on the data class is used of objective is omitted.
Parameters
----------
data_name : :obj:`str`, optional
Name of the data element to perform optimization on.
cell_function
Optional subclass of :class:`~colicoords.fitting.CellMinimizeFunctionBase` to use as objective function.
minimizer : Subclass of :class:`symfit.core.minimizers.BaseMinimizer` or :class:`~collections.abc.Sequence`
Minimizer to use for the optimization. Default is the ``Powell`` minimizer.
**kwargs :
Additional kwargs are passed to :meth:`~colicoords.fitting.CellFit.execute`.
Returns
-------
res_list : :obj:`list` of :class:`~symfit.core.fit_results.FitResults`
List of `symfit` ``FitResults`` object.
"""
return [c.optimize(data_name=data_name, cell_function=cell_function, minimizer=minimizer, **kwargs) for c in tqdm(self)]
def optimize_mp(self, data_name='binary', cell_function=None, minimizer=Powell, processes=None, **kwargs):
""" Optimize all cell's coordinate systems using `optimize` through parallel computing.
A call to this method must be protected by if __name__ == '__main__' if its not executed in jupyter notebooks.
Parameters
----------
data_name : :obj:`str`, optional
Name of the data element to perform optimization on.
cell_function
Optional subclass of :class:`~colicoords.fitting.CellMinimizeFunctionBase` to use as objective function.
minimizer : Subclass of :class:`symfit.core.minimizers.BaseMinimizer` or :class:`~collections.abc.Sequence`
Minimizer to use for the optimization. Default is the ``Powell`` minimizer.
processes : :obj:`int`
Number of parallel processes to spawn. Default is the number of logical processors on the host machine.
**kwargs :
Additional kwargs are passed to :meth:`~colicoords.fitting.CellFit.execute`.
Returns
-------
res_list : :obj:`list` of :class:`~symfit.core.fit_results.FitResults`
List of `symfit` ``FitResults`` object.
"""
kwargs = {'data_name': data_name, 'cell_function': cell_function, 'minimizer': minimizer, **kwargs}
f = partial(optimize_worker, **kwargs)
with closing(mp.Pool(processes=processes)) as pool:
res = list(tqdm(pool.imap(f, self), total=len(self)))
for r, cell in zip(res, self):
cell.coords.sub_par(r.params)
return res
def execute(self, worker):
"""
Apply worker function `worker` to all cell objects and returns the results.
Parameters
----------
worker : :obj:`callable`
Worker function to be executed on all cell objects.
Returns
-------
res : :obj:`list`
List of resuls returned from `worker`
"""
res = map(worker, self)
return res
def execute_mp(self, worker, processes=None):
"""
Apply worker function `worker` to all cell objects and returns the results.
Parameters
----------
worker : :obj:`callable`
Worker function to be executed on all cell objects.
processes : :obj:`int`
Number of parallel processes to spawn. Default is the number of logical processors on the host machine.
Returns
-------
res : :obj:`list`
List of results returned from ``worker``.
"""
with closing(mp.Pool(processes=processes)) as pool:
res = list(tqdm(pool.imap(worker, self), total=len(self)))
return res
def append(self, cell_obj):
"""
Append Cell object `cell_obj` to the list of cells.
Parameters
----------
cell_obj : :class:`~colicoords.cell.Cell`
Cell object to append to current cell list.
"""
assert isinstance(cell_obj, Cell)
self.cell_list = np.append(self.cell_list, cell_obj)
def r_dist(self, stop, step, data_name='', norm_x=False, limit_l=None, storm_weight=False, method='gauss',
sigma=0.3):
"""
Calculates the radial distribution for all cells of a given data element.
Parameters
----------
stop : :obj:`float`
Until how far from the cell spine the radial distribution should be calculated
step : :obj:`float`
The binsize of the returned radial distribution
data_name : :obj:`str`
The name of the data element on which to calculate the radial distribution
norm_x : :obj:`bool`
If `True` the returned distribution will be normalized with the cell's radius set to 1.
limit_l : :obj:`str`
If `None`, all datapoints are used. This can be limited by providing the value `full` (omit poles only),
'poles' (include only poles), or a float value between 0 and 1 which will limit the data points by
longitudinal coordinate around the midpoint of the cell.
storm_weight : :obj:`bool`
Only applicable for analyzing STORM-type data elements. If `True` the returned histogram is weighted with
the values in the 'Intensity' field.
method : :obj:`str`, either 'gauss' or 'box'
Method of averaging datapoints to calculate the final distribution curve.
sigma : :obj:`float`
Applies only when `method` is set to 'gauss'. `sigma` gives the width of the gaussian used for convoluting
datapoints
Returns
-------
xvals : :class:`~numpy.ndarray`
Array of distances from the cell midline, values are the middle of the bins
yvals : :class:`~numpy.ndarray`
2D Array where each row is the bin heights for each cell.
"""
# todo might be a good idea to warm the user when attempting this on a list of 3D data
if not data_name:
try:
data_elem = list(self.cell_list[0].data.flu_dict.values())[0] # yuck
except IndexError:
try:
data_elem = list(self.cell_list[0].data.storm_dict.values())[0]
except IndexError:
raise IndexError('No valid data element found')
else:
try:
data_elem = self.cell_list[0].data.data_dict[data_name]
except KeyError:
raise ValueError('Chosen data not found')
if method == 'gauss' and data_elem.dclass == 'storm':
print("Warning: method 'gauss' is not a storm-compatible method, method was set to 'box'")
method = 'box'
numpoints = len(np.arange(0, stop + step, step))
out_arr = np.zeros((len(self), numpoints))
for i, c in enumerate(self):
xvals, yvals = c.r_dist(stop, step, data_name=data_name, norm_x=norm_x, storm_weight=storm_weight,
limit_l=limit_l,
method=method, sigma=sigma)
out_arr[i] = yvals
return xvals, out_arr
def l_dist(self, nbins, start=None, stop=None, data_name='', norm_x=True, method='gauss', r_max=None,
storm_weight=False, sigma=None):
"""
Calculates the longitudinal distribution of signal for a given data element for all cells.
Normalization by cell length is enabled by default to remove cell-to-cell variations in length.
Parameters
----------
nbins : :obj:`int`
Number of bins between `start` and `stop`.
start : :obj:`float`
Distance from `xl` as starting point for the distribution, units are either pixels or normalized units
if `norm_x=True`.
stop : :obj:`float`
Distance from `xr` as end point for the distribution, units are are either pixels or normalized units
if `norm_x=True`.
data_name : :obj:`str`
Name of the data element to use.
norm_x : :obj:`bool`
If *True* the output distribution will be normalized.
r_max : :obj:`float`, optional
Datapoints within r_max from the cell midline will be included. If `None` the value from the cell's
coordinate system will be used.
storm_weight : :obj:`bool`
If `True` the datapoints of the specified STORM-type data will be weighted by their intensity.
method : :obj:`str`
Method of averaging datapoints to calculate the final distribution curve.
sigma : :obj:`float` or array_like
Applies only when `method` is set to 'gauss'. `sigma` gives the width of the gaussian used for convoluting
datapoints. To use a different sigma for each cell `sigma` can be given as a list or array.
Returns
-------
xvals : :class:`~numpy.ndarray`
Array of distances along the cell midline, values are the middle of the bins/kernel
yvals : :class:`~numpy.ndarray`
2D array where every row is the bin heights per cell.
"""
y_arr = np.zeros((len(self), nbins))
x_arr = np.zeros((len(self), nbins))
for i, c in enumerate(self):
if len(sigma) == len(self):
_sigma = sigma[i]
xvals, yvals = c.l_dist(nbins, start=start, stop=stop, data_name=data_name, norm_x=norm_x,
l_mean=self.length.mean(), method=method, r_max=r_max, storm_weight=storm_weight,
sigma=_sigma)
x_arr[i] = xvals
y_arr[i] = yvals
return x_arr, y_arr
def phi_dist(self, step, data_name='', storm_weight=False, method='gauss', sigma=5, r_max=None, r_min=0):
"""
Calculates the angular distribution of signal for a given data element for all cells.
Parameters
----------
step : :obj:`float`
Step size between datapoints.
data_name : :obj:`str`
Name of the data element to use.
r_max : :obj:`float`, optional
Datapoints within r_max from the cell midline will be included. If `None` the value from the cell's
coordinate system will be used.
r_min : :obj:`float`, optional
Datapoints outside of r_min from the cell midline will be included.
storm_weight : :obj:`bool`
If `True` the datapoints of the specified STORM-type data will be weighted by their intensity.
method : :obj:`str`
Method of averaging datapoints to calculate the final distribution curve.
sigma : :obj:`float`
Applies only when `method` is set to 'gauss'. `sigma` gives the width of the gaussian used for convoluting
datapoints.
Returns
-------
xvals : :class:`~numpy.ndarray`
Array of distances along the cell midline, values are the middle of the bins/kernel.
yvals_l : :class:`~numpy.ndarray`
Array of bin heights for the left pole.
yvals_r : :class:`~numpy.ndarray`
Array of bin heights for the right pole.
"""
stop = 180
numpoints = len(np.arange(0, stop + step, step))
out_l = np.zeros((len(self), numpoints))
out_r = np.zeros((len(self), numpoints))
for i, c in enumerate(self):
xvals, yvals_l, yvals_r = c.phi_dist(step, data_name=data_name, storm_weight=storm_weight, sigma=sigma,
method=method, r_max=r_max, r_min=r_min)
out_l[i] = yvals_l
out_r[i] = yvals_r
return xvals, out_l, out_r
def l_classify(self, data_name=''):
"""
Classifies foci in STORM-type data by they x-position along the long axis.
The spots are classified into 3 categories: 'poles', 'between' and 'mid'. The pole category are spots who are to
the left and right of xl and xr, respectively. The class 'mid' is a section in the middle of the cell with a
total length of half the cell's length, the class 'between' is the remaining two quarters between 'mid' and
'poles'
Parameters
----------
data_name : :obj:`str`
Name of the STORM-type data element to classify. When its not specified the first STORM data element is used.
Returns
-------
array : :class:`~numpy.ndarray`
Array of tuples with number of spots in poles, between and mid classes, respectively.
"""
return np.array([c.l_classify(data_name=data_name) for c in self])
def get_intensity(self, mask='binary', data_name='', func=np.mean):
"""
Returns the fluorescence intensity for each cell.
Mean fluorescence intensity either in the region masked by the binary image or reconstructed binary image
derived from the cell's coordinate system. The default return value is the mean fluorescence intensity. Integrated
intensity can be calculated by using `func=np.sum`.
Parameters
----------
mask : :obj:`str`
Either 'binary' or 'coords' to specify the source of the mask used. 'binary' uses the binary image as mask,
'coords' uses reconstructed binary from coordinate system
data_name : :obj:`str`
The name of the image data element to get the intensity values from.
func : :obj:`callable`
This function is applied to the data elements pixels selected by the masking operation. The default is
`np.mean()`.
Returns
-------
value : :obj:`float`
Mean fluorescence pixel value.
"""
return np.array([c.get_intensity(mask=mask, data_name=data_name, func=func) for c in self])
def measure_r(self, data_name='brightfield', mode='max', in_place=True, **kwargs):
"""
Measure the radius of the cells.
The radius is found by the intensity-mid/min/max-point of the radial distribution derived from brightfield
(default) or another data element.
Parameters
----------
data_name : :obj:`str`
Name of the data element to use.
mode : :obj:`str`
Mode to find the radius. Can be either 'min', 'mid' or 'max' to use the minimum, middle or maximum value
of the radial distribution, respectively.
in_place : :obj:`bool`
If `True` the found value of `r` is directly substituted in the cell's coordinate system, otherwise the
value is returned.
Returns
-------
radius : :class:`np.ndarray`
The measured radius `r` values if `in_place` is `False`, otherwise `None`.
"""
if mode not in ['min', 'max', 'mid']:
raise ValueError('Invalid value for mode')
r = [c.measure_r(data_name=data_name, mode=mode, in_place=in_place, **kwargs) for c in self]
if not in_place:
return np.array(r)
def copy(self):
"""
Make a copy of the `CellList` object and all its associated data elements.
This is a deep copy meaning that all numpy data arrays are copied in memory and therefore modifying the copied
cell objects does not modify the original cell objects.
Returns
-------
cell_list : :class:`CellList`
Copied `CellList` object
"""
return CellList([cell.copy() for cell in self])
@property
def radius(self):
""":class:`~numpy.ndarray` Array of cell's radii in pixels"""
return np.array([c.radius for c in self])
@property
def length(self):
""":class:`~numpy.ndarray` Array of cell's lengths in pixels"""
return np.array([c.length for c in self])
@property
def circumference(self):
""":class:`~numpy.ndarray`: Array of cell's circumference in pixels"""
return np.array([c.circumference for c in self])
@property
def area(self):
""":class:`~numpy.ndarray`: Array of cell's area in square pixels"""
return np.array([c.area for c in self])
@property
def surface(self):
""":class:`~numpy.ndarray`: Array of cell's surface area (3d) in square pixels"""
return np.array([c.surface for c in self])
@property
def volume(self):
""":class:`~numpy.ndarray`: Array of cell's volume in cubic pixels"""
return np.array([c.volume for c in self])
@property
def name(self):
""":class:`~numpy.ndarray`: Array of cell's names"""
return np.array([c.name for c in self])
def __len__(self):
return self.cell_list.__len__()
def __iter__(self):
return self.cell_list.__iter__()
def __getitem__(self, key):
if isinstance(key, numbers.Integral):
return self.cell_list.__getitem__(key)
else:
out = self.__class__.__new__(self.__class__)
out.cell_list = self.cell_list.__getitem__(key)
return out
def __setitem__(self, key, value):
assert isinstance(value, Cell)
self.cell_list.__setitem__(key, value)
def __contains__(self, item):
return self.cell_list.__contains__(item)
def solve_general(a, b, c, d):
"""
Solve cubic polynomial in the form a*x^3 + b*x^2 + c*x + d.
Only works if polynomial discriminant < 0, then there is only one real root which is the one that is returned. [1]_
Parameters
----------
a : array_like
Third order polynomial coefficient.
b : array_like
Second order polynomial coefficient.
c : array_like
First order polynomial coefficient.
d : array_like
Zeroth order polynomial coefficient.
Returns
-------
array : array_like
Real root solution.
.. [1] https://en.wikipedia.org/wiki/Cubic_function#General_formula
"""
# todo check type for performance gain?
# 16 16: 5.03 s
# 32 32: 3.969 s
# 64 64: 5.804 s
# 8 8:
d0 = b ** 2. - 3. * a * c
d1 = 2. * b ** 3. - 9. * a * b * c + 27. * a ** 2. * d
r0 = np.square(d1) - 4. * d0 ** 3.
r1 = (d1 + np.sqrt(r0)) / 2
dc = np.cbrt(
r1) # power (1/3) gives nan's for coeffs [1.98537881e+01, 1.44894594e-02, 2.38096700e+00]01, 1.44894594e-02, 2.38096700e+00]
return -(1. / (3. * a)) * (b + dc + (d0 / dc))
# todo hit a runtimewaring divide by zero on line above once
def solve_trig(a, b, c, d):
"""
Solve cubic polynomial in the form a*x^3 + b*x^2 + c*x + d
Only for polynomial discriminant > 0, the polynomial has three real roots [1]_
Parameters
----------
a : array_like
Third order polynomial coefficient.
b : array_like
Second order polynomial coefficient.
c : array_like
First order polynomial coefficient.
d : array_like
Zeroth order polynomial coefficient.
Returns
-------
array : array_like
First real root solution.
.. [1] https://en.wikipedia.org/wiki/Cubic_function#Trigonometric_solution_for_three_real_roots
"""
p = (3. * a * c - b ** 2.) / (3. * a ** 2.)
q = (2. * b ** 3. - 9. * a * b * c + 27. * a ** 2. * d) / (27. * a ** 3.)
assert (np.all(p < 0))
k = 0.
t_k = 2. * np.sqrt(-p / 3.) * np.cos(
(1 / 3.) * np.arccos(((3. * q) / (2. * p)) * np.sqrt(-3. / p)) - (2 * np.pi * k) / 3.)
x_r = t_k - (b / (3 * a))
try:
assert (np.all(
x_r > 0)) # dont know if this is guaranteed otherwise boundaries need to be passed and choosing from 3 slns
except AssertionError:
pass
# todo find out if this is bad or not
# raise ValueError
return x_r
def calc_lc(xl, xr, coeff):
"""
Calculate `lc`.
The returned length is the arc length from `xl` to `xr` integrated along the polynomial p(x) described by `coeff`.
Parameters
----------
xl : array_like
Left bound to calculate arc length from. Shape must be compatible with `xl`.
xr : array_like
Right bound to calculate arc length to. Shape must be compatible with `xr`.
coeff : array_like or :obj:`tuple`
Array or tuple with coordinate polynomial coefficients `a0`, `a1`, `a2`.
Returns
-------
l : array_like
Calculated length `lc`.
"""
a0, a1, a2 = coeff
l = (1 / (4 * a2)) * (
((a1 + 2 * a2 * xr) * np.sqrt(1 + (a1 + 2 * a2 * xr) ** 2) + np.arcsinh((a1 + 2 * a2 * xr))) -
((a1 + 2 * a2 * xl) * np.sqrt(1 + (a1 + 2 * a2 * xl) ** 2) + np.arcsinh((a1 + 2 * a2 * xl)))
)
return l
def solve_length(xr, xl, coeff, length):
"""
Used to find `xc` in reverse coordinate transformation.
Function used to find cellular x coordinate `xr` where the arc length from `xl` to `xr` is equal to length given a
coordinate system with `coeff` as coefficients.
Parameters
----------
xr : :obj:`float`
Right boundary x coordinate of calculated arc length.
xl : :obj:`float`
Left boundary x coordinate of calculated arc length.
coeff : :obj:`list` or :class:`~numpy.ndarray`
Coefficients a0, a1, a2 describing the coordinate system.
length : :obj:`float`
Target length.
Returns
-------
diff : :obj:`float`
Difference between calculated length and specified length.
"""
a0, a1, a2 = coeff
calculated = (1 / (4 * a2)) * (
((a1 + 2 * a2 * xr) * np.sqrt(1 + (a1 + 2 * a2 * xr) ** 2) + np.arcsinh((a1 + 2 * a2 * xr))) -
((a1 + 2 * a2 * xl) * np.sqrt(1 + (a1 + 2 * a2 * xl) ** 2) + np.arcsinh((a1 + 2 * a2 * xl)))
)
return length - calculated
def calc_length(xr, xl, a2, length):
raise DeprecationWarning()
a1 = -a2 * (xr + xl)
l = (1 / (4 * a2)) * (
((a1 + 2 * a2 * xr) * np.sqrt(1 + (a1 + 2 * a2 * xr) ** 2) + np.arcsinh((a1 + 2 * a2 * xr))) -
((a1 + 2 * a2 * xl) * np.sqrt(1 + (a1 + 2 * a2 * xl) ** 2) + np.arcsinh((a1 + 2 * a2 * xl)))
)
return length - l
``` |
{
"source": "Jhsmit/ColiCoords-Paper",
"score": 2
} |
#### File: figures/Figure_7/02_generate_images.py
```python
from colicoords.synthetic_data import add_readout_noise, draw_poisson
from colicoords import load
import numpy as np
import mahotas as mh
from tqdm import tqdm
import os
import tifffile
def chunk_list(l, sizes):
prev = 0
for s in sizes:
result = l[prev:prev+s]
prev += s
yield result
def generate_images(cell_list, num_images, cell_per_img, cell_per_img_std, shape):
nums = np.round(np.random.normal(cell_per_img, cell_per_img_std, num_images)).astype(int)
nums = nums[nums > 0]
assert sum(nums) < len(cell_list), 'Not enough cells'
chunked = [chunk for chunk in tqdm(chunk_list(cell_list, nums))]
dicts = [generate_image(cells, shape) for cells in tqdm(chunked)]
out_dict = {}
for i, d in enumerate(dicts):
for k, v in d.items():
if 'storm' in k:
v['frame'] = i + 1
if k in out_dict:
out_dict[k] = np.append(out_dict[k], v)
else:
out_dict[k] = v
else:
if k in out_dict:
out_dict[k][i] = v
else:
out_dict[k] = np.zeros((num_images, *shape))
out_dict[k][i] = v
return out_dict
def generate_image(cells, shape, max_dist=5):
thetas = 360 * np.random.rand(len(cells))
data_list = [cell.data.rotate(theta) for cell, theta in zip(cells, thetas)]
assert all([data.names == data_list[0].names for data in data_list]), 'All cells must have the same data elements'
out_dict = {name: np.zeros(shape) for name, dclass in zip(data_list[0].names, data_list[0].dclasses) if dclass != 'storm'}
for i, data in enumerate(data_list):
valid_position = False
while not valid_position:
pos_x = int(np.round(shape[1] * np.random.rand()))
pos_y = int(np.round(shape[0] * np.random.rand()))
min1 = pos_y - int(np.floor(data.shape[0]))
max1 = min1 + data.shape[0]
min2 = pos_x - int(np.floor(data.shape[1]))
max2 = min2 + data.shape[1]
# Crop the data for when the cell is on the border of the image
d_min1 = np.max([0 - min1, 0])
d_max1 = np.min([data.shape[0] + (shape[0] - pos_y), data.shape[0]])
d_min2 = np.max([0 - min2, 0])
d_max2 = np.min([data.shape[1] + (shape[1] - pos_x), data.shape[1]])
data_cropped = data[d_min1:d_max1, d_min2:d_max2]
# Limit image position to the edges of the image
min1 = np.max([min1, 0])
max1 = np.min([max1, shape[0]])
min2 = np.max([min2, 0])
max2 = np.min([max2, shape[1]])
temp_binary = np.zeros(shape)
temp_binary[min1:max1, min2:max2] = data_cropped.binary_img
out_binary = (out_dict['binary'] > 0).astype(int)
distance_map = mh.distance(1 - out_binary, metric='euclidean')
if np.any(distance_map[temp_binary.astype(bool)] < max_dist):
continue
valid_position = True
for name in data.names:
data_elem = data_cropped.data_dict[name]
if data_elem.dclass == 'storm':
data_elem['x'] += min2
data_elem['y'] += min1
xmax, ymax = shape[1], shape[0]
bools = (data_elem['x'] < 0) + (data_elem['x'] > xmax) + (data_elem['y'] < 0) + (data_elem['y'] > ymax)
data_out = data_elem[~bools].copy()
if name in out_dict:
out_dict[name] = np.append(out_dict[name], data_out)
else:
out_dict[name] = data_out
continue
elif data_elem.dclass == 'binary':
out_dict[name][min1:max1, min2:max2] += ((i+1)*data_elem)
else:
out_dict[name][min1:max1, min2:max2] += data_elem
return out_dict
def gen_image_from_storm(storm_table, shape, sigma=1.54, sigma_std=0.3):
xmax = shape[1]
ymax = shape[0]
step = 1
xi = np.arange(step / 2, xmax, step)
yi = np.arange(step / 2, ymax, step)
x_coords = np.repeat(xi, len(yi)).reshape(len(xi), len(yi)).T
y_coords = np.repeat(yi, len(xi)).reshape(len(yi), len(xi))
x, y = storm_table['x'], storm_table['y']
img = np.zeros_like(x_coords)
intensities = storm_table['intensity']
sigma = sigma * np.ones_like(x) if not sigma_std else np.random.normal(sigma, sigma_std, size=len(x))
for _sigma, _int, _x, _y in zip(sigma, intensities, x, y):
img += _int * np.exp(-(((_x - x_coords) / _sigma) ** 2 + ((_y - y_coords) / _sigma) ** 2) / 2)
return img
def gen_im(data_dir):
"""Generate microscopy images from a list of cell objects by placing them randomly oriented in the image."""
cell_list = load(os.path.join(data_dir, 'cell_obj', 'cells_final_selected.hdf5'))
out_dict = generate_images(cell_list, 1000, 10, 3, (512, 512))
if not os.path.exists(os.path.join(data_dir, 'images')):
os.mkdir(os.path.join(data_dir, 'images'))
np.save(os.path.join(data_dir, 'images', 'binary.npy'), out_dict['binary'])
np.save(os.path.join(data_dir, 'images', 'brightfield.npy'), out_dict['brightfield'])
np.save(os.path.join(data_dir, 'images', 'foci_inner.npy'), out_dict['foci_inner'])
np.save(os.path.join(data_dir, 'images', 'foci_outer.npy'), out_dict['foci_outer'])
np.save(os.path.join(data_dir, 'images', 'storm_inner.npy'), out_dict['storm_inner'])
np.save(os.path.join(data_dir, 'images', 'storm_outer.npy'), out_dict['storm_outer'])
tifffile.imsave(os.path.join(data_dir, 'images', 'binary.tif'), out_dict['binary'])
tifffile.imsave(os.path.join(data_dir, 'images', 'brightfield.tif'), out_dict['brightfield'])
tifffile.imsave(os.path.join(data_dir, 'images', 'foci_inner.tif'), out_dict['foci_inner'])
tifffile.imsave(os.path.join(data_dir, 'images', 'foci_outer.tif'), out_dict['foci_outer'])
np.savetxt(os.path.join(data_dir, 'images', 'storm_inner.txt'), out_dict['storm_inner'])
np.savetxt(os.path.join(data_dir, 'images', 'storm_outer.txt'), out_dict['storm_inner'])
def noise_bf(data_dir):
"""add poissonian and readout noise to brightfield images"""
noise = 20
img_stack = np.load(os.path.join(data_dir, 'images', 'brightfield.npy'))
for photons in [10000, 1000, 500]:
ratio = 1.0453 # ratio between 'background' (no cells) and cell wall
img = (photons*(ratio-1))*img_stack + photons
img = draw_poisson(img)
img = add_readout_noise(img, noise)
tifffile.imsave(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.tif'.format(photons)), img)
np.save(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.npy'.format(photons)), img)
if __name__ == '__main__':
np.random.seed(42)
data_dir = r'.'
if not os.path.exists(os.path.join(data_dir, 'images')):
os.mkdir(os.path.join(data_dir, 'images'))
gen_im(data_dir)
noise_bf(data_dir)
```
#### File: figures/Figure_7/06_filter_cells.py
```python
from colicoords import load, save
from tqdm.auto import tqdm
import numpy as np
import fastcluster as fc
from scipy.cluster.hierarchy import fcluster
import os
def filter_cells(m_names, gt_names, m_cells, gt_cells, max_d=3):
"""Removes STORM localizations from neighbouring cells and removes cell objects with too few for all conditions."""
m_remove = []
gt_remove = []
for m_name, gt_name in tqdm(zip(m_names, gt_names), total=len(m_names)):
m_i = m_cells.name.tolist().index(m_name)
gt_i = gt_cells.name.tolist().index(gt_name)
m_c = m_cells[m_i]
gt_c = gt_cells[gt_i]
for elem_name in ['storm_inner', 'storm_outer']:
if len(m_c.data.data_dict[elem_name]) > len(gt_c.data.data_dict[elem_name]):
st_elem = m_c.data.data_dict[elem_name]
X = np.array([st_elem['x'], st_elem['y']]).T.copy()
linkage = fc.linkage(X)
clusters = fcluster(linkage, max_d, criterion='distance')
counts = np.bincount(clusters)
i_max = np.argmax(counts)
b = [clusters == i_max]
m_c.data.data_dict[elem_name] = m_c.data.data_dict[elem_name][b].copy()
try:
assert len(m_c.data.data_dict[elem_name]) == len(gt_c.data.data_dict[elem_name])
except AssertionError:
m_remove.append(m_name)
gt_remove.append(gt_name)
elif len(m_c.data.data_dict[elem_name]) < len(gt_c.data.data_dict[elem_name]):
m_remove.append(m_name)
gt_remove.append(gt_name)
m_final = list([name + '\n' for name in m_names if name not in m_remove])
gt_final = list([name + '\n' for name in gt_names if name not in gt_remove])
return m_final, gt_final, m_cells, gt_cells
def filter_all(data_dir):
"""Removes STORM localizations from neighbouring cells and removes cell objects with too few for all conditions."""
gt_cells = load(os.path.join(data_dir, 'cell_obj', 'cells_final_selected.hdf5'))
for ph in [10000, 1000, 500]:
print('Photons', ph)
with open(os.path.join(data_dir, 'matched_names', 'm_cells_ph_{}_match.txt'.format(ph)), 'r') as f:
m_names = f.readlines()
m_names = list([n.rstrip() for n in m_names])
m_cells = load(os.path.join(data_dir, 'cell_obj', 'cell_ph_{}_raw.hdf5'.format(ph)))
with open(os.path.join(data_dir, 'matched_names', 'gt_cells_ph_{}_match.txt'.format(ph)), 'r') as f:
gt_names = f.readlines()
gt_names = list([n.rstrip() for n in gt_names])
m_final, gt_final, m_cells, gt_cells = filter_cells(m_names, gt_names, m_cells, gt_cells)
with open(os.path.join(data_dir, 'matched_names', 'gt_cells_ph_{}_match_filter.txt'.format(ph)), 'w') as f:
f.writelines(gt_final)
with open(os.path.join(data_dir, 'matched_names', 'm_cells_ph_{}_match_filter.txt'.format(ph)), 'w') as f:
f.writelines(m_final)
for i, (m_, gt_) in tqdm(enumerate(zip(m_final, gt_final))):
m_i = m_cells.name.tolist().index(m_.rstrip())
g_i = gt_cells.name.tolist().index(gt_.rstrip())
try:
assert len(m_cells[m_i].data.data_dict['storm_inner']) == len(
gt_cells[g_i].data.data_dict['storm_inner'])
except AssertionError:
print('Assertion error:', i)
save(os.path.join(data_dir, 'cell_obj', 'cell_ph_{}_filtered.hdf5'.format(ph)), m_cells)
if __name__ == '__main__':
data_dir = '.'
filter_all(data_dir)
```
#### File: figures/Figure_7/07_optimize_coords.py
```python
from colicoords import load, save
import numpy as np
import os
def optimize_all(data_dir):
"""Optimize the cell's coordinate systems for each condition based on different data elements"""
for ph in [10000, 1000, 500]:
print('Photons {}'.format(ph))
m_cells = load(os.path.join(data_dir, 'cell_obj', 'cell_ph_{}_filtered.hdf5'.format(ph)))
print('Measured cells loaded')
print('binary')
optimize_cells = m_cells.copy()
res = optimize_cells.optimize_mp()
obj_vals = [r.objective_value for r in res]
np.savetxt(os.path.join(data_dir, 'minimize_res', 'm_cells_ph_{}_binary.txt'.format(ph)), obj_vals)
save(os.path.join(data_dir, 'cell_obj', 'm_cells_ph_{}_filtered_binary.hdf5'.format(ph)), optimize_cells)
print('brightfield')
optimize_cells = m_cells.copy()
res = optimize_cells.optimize_mp('brightfield')
obj_vals = [r.objective_value for r in res]
np.savetxt(os.path.join(data_dir, 'minimize_res', 'm_cells_ph_{}_brightfield.txt'.format(ph)), obj_vals)
save(os.path.join(data_dir, 'cell_obj', 'm_cells_ph_{}_filtered_brightfield.hdf5'.format(ph)), optimize_cells)
print('storm inner')
optimize_cells = m_cells.copy()
res = optimize_cells.optimize_mp('storm_inner')
obj_vals = [r.objective_value for r in res]
np.savetxt(os.path.join(data_dir, 'minimize_res', 'm_cells_ph_{}_storm.txt'.format(ph)), obj_vals)
save(os.path.join(data_dir, 'cell_obj', 'm_cells_ph_{}_filtered_storm_inner.hdf5'.format(ph)), optimize_cells)
if __name__ == '__main__':
data_dir = r'.'
if not os.path.exists(os.path.join(data_dir, 'minimize_res')):
os.mkdir(os.path.join(data_dir, 'minimize_res'))
optimize_all(data_dir)
```
#### File: figures/Figure_7/09_calc_obj_values.py
```python
import numpy as np
from colicoords import load, CellFit
import os
from tqdm.auto import tqdm
def cell_to_dict(cell):
"""returns a dict with parameter name and its value for all parameters describing the coordinate system"""
return {attr: getattr(cell.coords, attr) for attr in ['a0', 'a1', 'a2', 'r', 'xl', 'xr']}
def get_value(gt_cell, m_cell):
"""Get current STORM optimization objective value"""
d_m = cell_to_dict(m_cell)
d_g = cell_to_dict(gt_cell)
d_g['r'] /= (1.5554007217841803 * 1.314602664567288)
d_m['r'] = d_g['r']
# copy the cell object because coords values get changed when calling the objective function
fit_gt = CellFit(gt_cell.copy(), 'storm_inner')
val_gt = fit_gt.fit.objective(**d_g)
fit_m = CellFit(m_cell.copy(), 'storm_inner')
val_m = fit_m.fit.objective(**d_m)
return val_m, val_gt
def get_obj_values_all(data_dir):
gt_cells = load(os.path.join(data_dir, 'cell_obj', 'cells_final_selected.hdf5'))
for ph in [10000, 1000, 500]:
print('Photons', ph)
m_names = np.genfromtxt(os.path.join(data_dir, 'matched_names', 'm_cells_ph_{}_match_filter.txt'.format(ph)), dtype=str)
gt_names = np.genfromtxt(os.path.join(data_dir, 'matched_names', 'gt_cells_ph_{}_match_filter.txt'.format(ph)), dtype=str)
for condition in ['binary', 'brightfield', 'storm_inner']:
print('Condition', condition)
m_cells = load(os.path.join(data_dir, 'cell_obj', 'm_cells_ph_{}_filtered_{}.hdf5'.format(ph, condition)))
# Get index arrays to sort saved cell lists by matched names.
m_index = np.searchsorted(m_cells.name, m_names)
gt_index = np.searchsorted(gt_cells.name, gt_names)
# sorting CellList object by indexing
m_sorted = m_cells[m_index]
gt_sorted = gt_cells[gt_index]
result = np.array([get_value(gt, m) for m, gt in tqdm(zip(m_sorted, gt_sorted), total=len(m_sorted))])
np.savetxt(os.path.join(data_dir, 'obj_values', 'obj_vals_storm_ph_{}_{}.txt'.format(ph, condition)), result)
np.save(os.path.join(data_dir, 'obj_values', 'obj_vals_storm_ph_{}_{}.npy'.format(ph, condition)), result)
if __name__ == '__main__':
data_dir = r'.'
if not os.path.exists(os.path.join(data_dir, 'obj_values')):
os.mkdir(os.path.join(data_dir, 'obj_values'))
get_obj_values_all(data_dir)
``` |
{
"source": "jhsmith/django-auth-pubtkt",
"score": 2
} |
#### File: django-auth-pubtkt/django_auth_pubtkt/views.py
```python
from django.shortcuts import redirect
from django.conf import settings
import urllib
def redirect_to_sso(request):
back = request.is_secure() and 'https://' or 'http://' + request.get_host() + request.GET["next"]
back = urllib.quote_plus(back)
try:
TKT_AUTH_BACK_ARG_NAME = settings.TKT_AUTH_BACK_ARG_NAME
except AttributeError:
TKT_AUTH_BACK_ARG_NAME = "back"
url = settings.TKT_AUTH_LOGIN_URL + "?" + TKT_AUTH_BACK_ARG_NAME + "=" + back
return redirect(url)
``` |
{
"source": "Jhsmit/panel-chemistry",
"score": 2
} |
#### File: tests/pane/test_ngl_viewer.py
```python
import panel as pn
from panel_chemistry.bokeh_extensions.ngl_viewer import NGLViewer as _BkNGLViewer
from panel_chemistry.pane import NGLViewer
def test_can_create():
"""Test of the NGLViewer constructor"""
NGLViewer(object="1CRN", background="yellow", height=500)
def test_has_bokeh_model():
"""Test that the NGL Viewer Exists"""
assert _BkNGLViewer
def test_app():
"""Returns an app for manually testing the NGL Molecule Viewer"""
pn.extension(sizing_mode="stretch_width")
# 1NKT, 2GQ5, 3UOG and 5TXH
viewer = NGLViewer(object="1CRN", background="yellow", height=500)
parameters = [
"object",
"extension",
"representation",
"color_scheme",
"custom_color_scheme",
"effect",
"sizing_mode",
"width",
"height",
"background",
]
settings = pn.Param(
viewer,
parameters=parameters,
)
return pn.Row(
viewer,
pn.WidgetBox(
settings,
width=300,
sizing_mode="fixed",
),
)
if __name__.startswith("bokeh"):
test_app().servable()
``` |
{
"source": "Jhsmit/PyHDX",
"score": 2
} |
#### File: PyHDX/dev/bokeh_extension.py
```python
import numpy as np
from collections import OrderedDict
import bokeh
from bokeh.core.properties import Bool, String, List
from bokeh.models import LayoutDOM
from bokeh.util.compiler import TypeScript
from bokeh.plotting import curdoc
from bokeh.layouts import column
from bokeh.models import Button, CustomJS
import panel as pn
from panel.widgets.base import Widget
import param
from pathlib import Path
from pyhdx.web.base import STATIC_DIR
TS_CODE = """
// This custom model wraps one part of the third-party vis.js library:
//
// http://visjs.org/index.html
//
// Making it easy to hook up python data analytics tools (NumPy, SciPy,
// Pandas, etc.) to web presentations using the Bokeh server.
import {LayoutDOM, LayoutDOMView} from "models/layouts/layout_dom"
import {LayoutItem} from "core/layout"
import * as p from "core/properties"
declare namespace NGL {
class AtomProxy{
}
class Blob{
constructor(list: Array<String>, ob: object)
}
class Colormaker{
atomColor: (atom: AtomProxy) => string
}
class ColormakerRegistry{
static addScheme(scheme: () => void) : String
static addSelectionScheme(dataList: Array<Array<String>>, label: String): String
}
class Component{
removeAllRepresentations(): void
addRepresentation(type: String, params?: object) : RepresentationComponent
reprList: RepresentationElement[]
}
class Matrix4{
elements: Array<Number>
}
class RepresentationComponent{
}
class RepresentationElement{
setParameters(params: any): this
getParameters(): object
}
class Stage {
compList: Array<Component>
viewerControls: ViewerControls
constructor(elementId: String, params?: object)
loadFile(s: String| Blob, params?: object): Promise<StructureComponent>
autoView() : void
setSpin(flag: Boolean): void
removeAllComponents(type: String): void
addRepresentation(representation: String): void
}
class ScriptComponent{
constructor(stage: Stage, params?: object)
addRepresentation(type: String, params?: object) : RepresentationComponent
autoView() : void
removeAllRepresentations(): void
reprList: RepresentationElement[]
}
class StructureComponent{
constructor(stage: Stage, params?: object)
addRepresentation(type: String, params?: object) : RepresentationComponent
autoView() : void
removeAllRepresentations(): void
reprList: RepresentationElement[]
}
class SurfaceComponent{
constructor(stage: Stage, params?: object)
addRepresentation(type: String, params?: object) : RepresentationComponent
autoView() : void
removeAllRepresentations(): void
reprList: RepresentationElement[]
}
class Vector3{
x: number
y: number
z: number
}
class ViewerControls {
position: Vector3
Orientation: Matrix4
}
}
export class NGLView extends LayoutDOMView {
model: ngl
public spin: Boolean
public _stage: NGL.Stage
initialize(): void {
super.initialize()
const url = "https://cdn.jsdelivr.net/gh/arose/[email protected]/dist/ngl.js"
const script = document.createElement("script")
script.onload = () => this._init()
script.async = false
script.src = url
document.head.appendChild(script)
}
public set_variable_x(x: number): void {
this._stage.viewerControls.position.x = x;
}
private _init(): void {
// Create a new Graph3s using the vis.js API. This assumes the vis.js has
// already been loaded (e.g. in a custom app template). In the future Bokeh
// models will be able to specify and load external scripts automatically.
//
// BokehJS Views create <div> elements by default, accessible as this.el.
// Many Bokeh views ignore this default <div>, and instead do things like
// draw to the HTML canvas. In this case though, we use the <div> to attach
// a Graph3d to the DOM.
this.el.setAttribute('id','viewport')
console.log("the id is: " + this.el.getAttribute('id'))
this._stage = new NGL.Stage('viewport')
var m = this.model
var stage = this._stage
var first_scheme = NGL.ColormakerRegistry.addSelectionScheme(m.color_list, "new scheme");
stage.setSpin(m.spin)
document.addEventListener('spin', function(){
stage.setSpin(m.spin);
});
document.addEventListener('representation', function(){
stage.compList[0].removeAllRepresentations();
stage.compList[0].addRepresentation(m.representation, { color: first_scheme })
});
document.addEventListener('rcsb_id', function(){
stage.removeAllComponents("");
stage.loadFile(m.rscb).then(function (o) {
o.addRepresentation(m.representation, { color: first_scheme })
o.autoView()
});
});
document.addEventListener('color_list', function(){
console.log(m.color_list)
var list: Array<Array<String>> = m.color_list
try{
var new_scheme = NGL.ColormakerRegistry.addSelectionScheme(list, "new scheme");
stage.compList[0].reprList[0].setParameters( { color: new_scheme } );
}
catch(err) {
console.log("badly defined color")
}
});
document.addEventListener('pdb_string', function(){
stage.removeAllComponents("");
stage.loadFile( new Blob([m.pdb_string], {type: 'text/plain'}), { ext:'pdb'}).then(function (o) {
o.addRepresentation(m.representation, { color: first_scheme })
o.autoView()
});
});
}
// This is the callback executed when the Bokeh data has an change. Its basic
// function is to adapt the Bokeh data source to the vis.js DataSet format.
//get_data(): vis.DataSet {
// const data = new vis.DataSet()
// const source = this.model.data_source
// for (let i = 0; i < source.get_length()!; i++) {
// data.add({
// x: source.data[this.model.x][i],
// y: source.data[this.model.y][i],
// z: source.data[this.model.z][i],
// })
// }
// return data
//}
get child_models(): LayoutDOM[] {
return []
}
_update_layout(): void {
this.layout = new LayoutItem()
this.layout.set_sizing(this.box_sizing())
}
}
// We must also create a corresponding JavaScript BokehJS model subclass to
// correspond to the python Bokeh model subclass. In this case, since we want
// an element that can position itself in the DOM according to a Bokeh layout,
// we subclass from ``LayoutDOM``
export namespace ngl {
export type Attrs = p.AttrsOf<Props>
export type Props = LayoutDOM.Props & {
spin: p.Property<boolean>
representation: p.Property<string>
rscb: p.Property<string>
no_coverage: p.Property<string>
color_list: p.Property<any>
pdb_string: p.Property<string>
}
}
export interface ngl extends ngl.Attrs {}
export class ngl extends LayoutDOM {
properties: ngl.Props
__view_type__: NGLView
constructor(attrs?: Partial<ngl.Attrs>){
super(attrs)
}
// The ``__name__`` class attribute should generally match exactly the name
// of the corresponding Python class. Note that if using TypeScript, this
// will be automatically filled in during compilation, so except in some
// special cases, this shouldn't be generally included manually, to avoid
// typos, which would prohibit serialization/deserialization of this model.
static __name__ = "ngl"
static init_ngl() {
// This is usually boilerplate. In some cases there may not be a view.
this.prototype.default_view = NGLView
// The @define block adds corresponding "properties" to the JS model. These
// should basically line up 1-1 with the Python model class. Most property
// types have counterparts, e.g. ``bokeh.core.properties.String`` will be
// ``String`` in the JS implementation. Where the JS type system is not yet
// as rich, you can use ``p.Any`` as a "wildcard" property type.
this.define<ngl.Props>(({String, Boolean, Any}) => ({
spin: [Boolean, false],
representation: [String],
rscb: [String],
no_coverage: [String],
color_list: [Any],
pdb_string: [String]
})
)
}
}
"""
# This custom extension model will have a DOM view that should layout-able in
# Bokeh layouts, so use ``LayoutDOM`` as the base class. If you wanted to create
# a custom tool, you could inherit from ``Tool``, or from ``Glyph`` if you
# wanted to create a custom glyph, etc.
class ngl(LayoutDOM):
# The special class attribute ``__implementation__`` should contain a string
# of JavaScript code that implements the browser side of the extension model.
# Below are all the "properties" for this model. Bokeh properties are
# class attributes that define the fields (and their types) that can be
# communicated automatically between Python and the browser. Properties
# also support type validation. More information about properties in
# can be found here:__implementation__ = TypeScript(TS_CODE)
#
# https://docs.bokeh.org/en/latest/docs/reference/core/properties.html#bokeh-core-properties
__implementation__ = TypeScript(TS_CODE)
# This is a Bokeh ColumnDataSource that can be updated in the Bokeh
# server by Python code
# The vis.js library that we are wrapping expects data for x, y, and z.
# The data will actually be stored in the ColumnDataSource, but these
# properties let us specify the *name* of the column that should be
# used for each field.
spin = Bool
representation = String
rscb = String
no_coverage = String
color_list = List(List(String))
pdb_string = String
class NGLview(Widget):
# Set the Bokeh model to use
_widget_type = ngl
# Rename Panel Parameters -> Bokeh Model properties
# Parameters like title that does not exist on the Bokeh model should be renamed to None
_rename = {
"title": None,
}
pdb_string = param.String()
# Parameters to be mapped to Bokeh model properties
spin = param.Boolean(default=False)
representation = param.String(default="cartoon")
rscb = param.String(default="rcsb://1CRN")
no_coverage = param.String(default='0x8c8c8c')
color_list = param.List(default=[["red", "64-74 or 134-154 or 222-254 or 310-310 or 322-326"],
["green", "311-322"],
["yellow",
"40-63 or 75-95 or 112-133 or 155-173 or 202-221 or 255-277 or 289-309"],
["blue", "1-39 or 96-112 or 174-201 or 278-288"],
["white", "*"]])
class test(param.Parameterized):
pdb_string = param.String()
rcsb_id = param.String(default="1qyn", doc='RCSB PDB identifier of protein entry to download and visualize.')
representation = param.Selector(default='cartoon',
objects=['backbone', 'ball+stick', 'cartoon', 'hyperball', 'licorice',
'ribbon', 'rope', 'spacefill', 'surface'],
doc='Representation to use to render the protein.')
spin = param.Boolean(default=False, doc='Rotate the protein around an axis.')
#no_coverage = param.Color(default='#8c8c8c', doc='Color to use for regions of no coverage.')
color_list = param.List(default=[["red", "64-74 or 134-154 or 222-254 or 310-310 or 322-326"],
["green", "311-322"],
["yellow",
"40-63 or 75-95 or 112-133 or 155-173 or 202-221 or 255-277 or 289-309"],
["blue", "1-39 or 96-112 or 174-201 or 278-288"],
["white", "*"]])
class watcher(object):
def __init__(self, ngl_viewer, parent):
self.ngl_viewer = ngl_viewer
self.parent = parent
self.parent.param.watch(self.changeSpin, "spin")
self.parent.param.watch(self.changeRepresentation, "representation")
self.parent.param.watch(self.changerscb, "rcsb_id")
#self.parent.param.watch(self.changeColor, "no_coverage")
self.parent.param.watch(self.changeColorList, "color_list")
self.parent.param.watch(self.changePDBString, "pdb_string")
def changeSpin(self, event):
self.ngl_viewer.spin = event.new
def changeRepresentation(self, event):
print(event.new)
self.ngl_viewer.representation = event.new
def changerscb(self, event):
self.ngl_viewer.rscb = event.new
def changeColor(self, event):
self.ngl_viewer.color = event.new.no_coverage.replace('#', '0x')
def changeColorList(self, event):
self.ngl_viewer.color_list = event.new
def changePDBString(self, event):
print(event.new)
self.ngl_viewer.pdb_string = event.new
class NGLView_factory:
@staticmethod
def create_view():
view = NGLview(sizing_mode='stretch_both', representation='cartoon')
view.jscallback(representation="document.dispatchEvent(new Event('representation'));")
view.jscallback(spin="document.dispatchEvent(new Event('spin'));")
view.jscallback(rscb="document.dispatchEvent(new Event('rcsb_id'));")
#view.jscallback(no_coverage="document.dispatchEvent(new Event('no_coverage'));")
view.jscallback(color_list="document.dispatchEvent(new Event('color_list'));")
view.jscallback(pdb_string="document.dispatchEvent(new Event('pdb_string'));")
return view
from bokeh.settings import settings
print(settings.minified)
import os
os.environ["BOKEH_XSRF_COOKIES"] = "True"
p = test()
view = NGLView_factory.create_view()
watch = watcher(view, p)
result = pn.Row(p.param, view)
pdb_string = Path('1qyn.pdb').read_text()
result.servable()
def cb():
view.pdb_string = pdb_string
pn.state.onload(cb)
current_dir = Path(__file__).parent
#pn.serve(result, static_dirs={'pyhdx': STATIC_DIR, 'bk': current_dir})
# if __name__ == '__main__':
# p = test()
# view = NGLView_factory.create_view()
# watch = watcher(view, p)
#
# result = pn.Column(p.param, view)
# pn.serve(result)
``` |
{
"source": "Jhsmit/PyHDX-paper",
"score": 2
} |
#### File: PyHDX-paper/biorxiv_v2/02_fitting.py
```python
from functions.logging import write_log
from functions.base import settings_dict, data_dict, states, input_data_dir, current_dir
from pyhdx.batch_processing import load_from_yaml
from pyhdx.fileIO import csv_to_protein, save_fitresult
from pyhdx.fitting import fit_gibbs_global
import time
write_log(__file__)
output_dir = current_dir / 'fits'
output_dir.mkdir(exist_ok=True)
def load_and_fit(state, fit_kwargs):
hdxm = load_from_yaml(data_dict[state], data_dir=input_data_dir)
guesses = csv_to_protein(current_dir / 'guesses' / f'{state}_initial_guess.csv')['rate']
gibbs_guess = hdxm.guess_deltaG(guesses)
t0 = time.time()
fr = fit_gibbs_global(hdxm, gibbs_guess, **fit_kwargs)
t1 = time.time()
fit_output_dir = output_dir / state / f"r1_{fit_kwargs['r1']}"
log_lines = [f"Time elapsed: {(t1-t0):.2f} s"]
save_fitresult(fit_output_dir, fr, log_lines=log_lines)
rainbow_fit_kwargs = {
'r1': 0.5,
**settings_dict['single_fit']
}
# Individual fits for rainbowclouds plot
for state in states:
print(state)
load_and_fit(state, rainbow_fit_kwargs)
# Single ecSecB fit for figure 1e, supplementary figure 3
state = 'ecSecB'
ecsecb_fit_kwargs = settings_dict['ecsecb_tetramer_dimer']
ecsecb_fit_kwargs.pop('r2')
for r1 in [0, 0.05, 0.1, 0.5, 1, 2, 5, 10]:
fit_kwargs = {'r1': r1, **settings_dict['single_fit']}
load_and_fit(state, fit_kwargs)
# Individual mtSecB for SI fig 5 (top) and SI fig 9
ecsecb_fit_kwargs = settings_dict['ecsecb_tetramer_dimer']
ecsecb_fit_kwargs.pop('r2', None)
r1 = 0.05
fit_kwargs = {'r1': r1, **settings_dict['single_fit']}
for state in ['mtSecB', 'MBP_wt', 'PPiA_WT', 'PPiB_WT']:
print(state)
load_and_fit(state, fit_kwargs)
```
#### File: biorxiv_v2/functions/fileIO.py
```python
import numpy as np
import pandas as pd
def nma_result_to_df(nma_path, chains=None):
modes_pdb = nma_path / 'modes_CA.pdb'
text = modes_pdb.read_text()
lines = text.split('\n')
residue_numbers = np.array([int([entry for entry in line.split(' ') if entry][5]) for line in lines if line.startswith('ATOM')])
repeated_idx = np.where(np.diff(residue_numbers) == 0)[0]
if chains is None:
chains = np.array([[entry for entry in line.split(' ') if entry][4] for line in lines if line.startswith('ATOM')])
chains = np.unique(chains)
if repeated_idx.size > 0:
residue_numbers = np.delete(residue_numbers, repeated_idx)
all_indices = [0] + list(np.where(np.diff(residue_numbers) < 0)[0] + 1) + [len(residue_numbers)]
split_residues = [residue_numbers[i0: i1] for i0, i1 in zip(all_indices[:-1], all_indices[1:])]
series_list = []
for i in range(6):
displacement = np.genfromtxt(nma_path / 'displacement' / 'displacements.txt', skip_header=1)[:, i + 1]
displacement = np.sqrt(displacement) # Take square root as displacements are squared displacements
mode = i + 7
split_displacement = [displacement[i0: i1] for i0, i1 in zip(all_indices[:-1], all_indices[1:])]
for r, d, chain in zip(split_residues, split_displacement, chains):
index = pd.Index(r, name='r_number')
series = pd.Series(d, name=f"displacement_mode_{mode}_chain_{chain}", index=index)
series_list.append(series)
df = pd.concat(series_list, axis=1)
# Sum all chains
cols = [col for col in df.columns if f'chain_{chains[0]}' in col]
series_list = [] # List of summed displacements per mode
for entry in cols:
base = entry[:-1]
selection = [base + letter for letter in chains]
name = base[:-7]
sub_df = df[selection]
series = sub_df.mean(axis=1) # Average of all chains
series.name = name
series_list.append(series)
df_chains = pd.concat(series_list, axis=1)
# Sum all modes
overall_sum = df_chains.sum(axis=1)
overall_sum.name = 'displacement'
total = pd.concat([df, df_chains, overall_sum], axis=1)
return total
```
#### File: biorxiv_v2/functions/plotting.py
```python
import numpy as np
from Bio.SubsMat.MatrixInfo import pam250
from .formatting import *
from matplotlib.patches import Rectangle, Patch
def alignment_score(s1, s2):
alignment = ''
for s in zip(s1, s2):
if '-' in s:
alignment += ' '
elif s[0] == s[1]:
alignment += '*'
else:
try:
score = pam250[s]
except KeyError:
score = pam250[s[::-1]]
if score == 0.:
alignment += ' '
elif score <= 0.5:
alignment += '.'
elif score > 0.5:
alignment += ':'
else:
raise ValueError('oei')
return alignment
def plot_aligned(axes, names, labels, aligned_dataframe, alignments_dict, size=95):
aa_font_size = 5
#size = size # dont chagne this unless manually changeing y labels
start = -0.5
ranges = [(start, start + size), (start + size, start + 2 * size)]
for ax, r in zip(axes, ranges):
for i, (name, _label) in enumerate(zip(names, labels)):
vals = aligned_dataframe[name]['deltaG']*1e-3
#sequence = aligned_dataframe[name]['sequence']
colors_rgba = rgb_cmap(rgb_norm(vals), bytes=True)
color_arr = rgb_to_hex(colors_rgba)
#color_arr[np.isnan(vals)] = '#8c8c8c'
for j, (c, s) in enumerate(zip(color_arr, alignments_dict[name])):
color = '#ffffff' if s == '-' else c
rect = Rectangle((j - 0.5, 2 - 2 * i - 0.5), width=1, height=1, color=color)
ax.add_patch(rect)
text_color = 'black' # if np.mean(hex_to_rgb(color)) < 200 else 'white'
if j > r[0] and j < r[1]:
ax.annotate(s.upper(), (j, 2 - i * 2), ha='center', va='center', color=text_color, size=aa_font_size,
font=font_pth)
ax.text(r[0] - 10, 2 - 2 * i, _label, size=7, verticalalignment='center')
# add alignment text
score = alignment_score(*[alignments_dict[name].upper() for name in names])
for j, s in enumerate(score):
if j > r[0] and j < r[1]:
ax.text(j, 1, s, horizontalalignment='center', verticalalignment='center', color='k', size=aa_font_size)
ax.set_xlim(r)
ax.set_ylim(-0.5, 2.9)
ax.set_yticks([])
ax.set_xticks([])
ax.tick_params(axis=u'both', which=u'both', length=0)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
top_part = alignments_dict['ecSecB'][:size]
top_num = sum([c.isalpha() for c in top_part])
bot_part = alignments_dict['mtSecB'][:size]
bot_num = sum([c.isalpha() for c in bot_part])
n = 0
axes[n].text(ranges[n][0] - 3, 2, '1', size=7, verticalalignment='center')
axes[n].text(ranges[n][0] - 3, 0, '1', size=7, verticalalignment='center')
n = 1
axes[n].text(ranges[n][0] - 3, 2, top_num + 1, size=7, verticalalignment='center')
axes[n].text(ranges[n][0] - 3, 0, bot_num + 1, size=7, verticalalignment='center')
``` |
{
"source": "Jhsmit/PyHDX",
"score": 2
} |
#### File: PyHDX/pyhdx/plot.py
```python
from contextlib import contextmanager
from copy import copy
from pathlib import Path
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import proplot as pplt
from matplotlib.axes import Axes
from matplotlib.patches import Rectangle
from scipy.stats import kde
from tqdm import tqdm
from pyhdx.config import cfg
from pyhdx.fileIO import load_fitresult
from pyhdx.support import autowrap, color_pymol, apply_cmap
try:
from pymol import cmd
except ModuleNotFoundError:
cmd = None
dG_ylabel = 'ฮG (kJ/mol)'
ddG_ylabel = 'ฮฮG (kJ/mol)'
r_xlabel = 'Residue Number'
ERRORBAR_KWARGS = {
'fmt': 'o',
'ecolor': 'k',
'elinewidth': 0.3,
'markersize': 0,
'alpha': 0.75,
'capthick': 0.3,
'capsize': 0.
}
SCATTER_KWARGS = {
's': 7
}
RECT_KWARGS = {
'linewidth': 0.5,
'linestyle': '-',
'edgecolor': 'k'}
CBAR_KWARGS = {
'space': 0,
'width': cfg.getfloat('plotting', 'cbar_width') / 25.4,
'tickminor': True
}
def peptide_coverage_figure(data, wrap=None, cmap='turbo', norm=None, color_field='rfu', subplot_field='exposure',
rect_fields=('start', 'end'), rect_kwargs=None, **figure_kwargs):
subplot_values = data[subplot_field].unique()
sub_dfs = {value: data.query(f'`{subplot_field}` == {value}') for value in subplot_values}
n_subplots = len(subplot_values)
ncols = figure_kwargs.pop('ncols', min(cfg.getint('plotting', 'ncols'), n_subplots))
nrows = figure_kwargs.pop('nrows', int(np.ceil(n_subplots / ncols)))
figure_width = figure_kwargs.pop('width', cfg.getfloat('plotting', 'page_width')) / 25.4
cbar_width = figure_kwargs.pop('cbar_width', cfg.getfloat('plotting', 'cbar_width')) / 25.4
aspect = figure_kwargs.pop('aspect', cfg.getfloat('plotting', 'peptide_coverage_aspect'))
cmap = pplt.Colormap(cmap)
norm = norm or pplt.Norm('linear', vmin=0, vmax=1)
start_field, end_field = rect_fields
if wrap is None:
wrap = max([autowrap(sub_df[start_field], sub_df[end_field]) for sub_df in sub_dfs.values()])
fig, axes = pplt.subplots(ncols=ncols, nrows=nrows, width=figure_width, aspect=aspect, **figure_kwargs)
rect_kwargs = rect_kwargs or {}
axes_iter = iter(axes)
for value, sub_df in sub_dfs.items():
ax = next(axes_iter)
peptide_coverage(ax, sub_df, cmap=cmap, norm=norm, color_field=color_field, wrap=wrap, cbar=False, **rect_kwargs)
ax.format(title=f'{subplot_field}: {value}')
for ax in axes_iter:
ax.axis('off')
start, end = data[start_field].min(), data[end_field].max()
pad = 0.05*(end-start)
axes.format(xlim=(start-pad, end+pad), xlabel=r_xlabel)
if not cmap.monochrome:
cbar_ax = fig.colorbar(cmap, norm, width=cbar_width)
cbar_ax.set_label(color_field, labelpad=-0)
else:
cbar_ax = None
return fig, axes, cbar_ax
def peptide_coverage(ax, data, wrap=None, cmap='turbo', norm=None, color_field='rfu', rect_fields=('start', 'end'),
labels=False, cbar=True, **kwargs):
start_field, end_field = rect_fields
data = data.sort_values(by=[start_field, end_field])
wrap = wrap or autowrap(data[start_field], data[end_field])
cbar_width = kwargs.pop('cbar_width', cfg.getfloat('plotting', 'cbar_width')) / 25.4
rect_kwargs = {**RECT_KWARGS, **kwargs}
cmap = pplt.Colormap(cmap)
norm = norm or pplt.Norm('linear', vmin=0, vmax=1)
i = -1
for p_num, idx in enumerate(data.index):
elem = data.loc[idx]
if i < -wrap:
i = -1
if color_field is None:
color = cmap(0.5)
else:
color = cmap(norm(elem[color_field]))
width = elem[end_field] - elem[start_field]
rect = Rectangle((elem[start_field] - 0.5, i), width, 1, facecolor=color, **rect_kwargs)
ax.add_patch(rect)
if labels:
rx, ry = rect.get_xy()
cy = ry
cx = rx
ax.annotate(str(p_num), (cx, cy), color='k', fontsize=6, va='bottom', ha='right')
i -= 1
ax.set_ylim(-wrap, 0)
start, end = data[start_field].min(), data[end_field].max()
pad = 0.05*(end-start)
ax.set_xlim(start-pad, end+pad)
ax.set_yticks([])
if cbar and color_field:
cbar_ax = ax.colorbar(cmap, norm=norm, width=cbar_width)
cbar_ax.set_label(color_field, labelpad=-0)
else:
cbar_ax = None
return cbar_ax
def residue_time_scatter_figure(hdxm, field='rfu', cmap='turbo', norm=None, scatter_kwargs=None, cbar_kwargs=None,
**figure_kwargs):
"""per-residue per-exposure values for field `field` by weighted averaging """
n_subplots = hdxm.Nt
ncols = figure_kwargs.pop('ncols', min(cfg.getint('plotting', 'ncols'), n_subplots))
nrows = figure_kwargs.pop('nrows', int(np.ceil(n_subplots / ncols)))
figure_width = figure_kwargs.pop('width', cfg.getfloat('plotting', 'page_width')) / 25.4
aspect = figure_kwargs.pop('aspect', cfg.getfloat('plotting', 'residue_scatter_aspect'))
cbar_width = figure_kwargs.pop('cbar_width', cfg.getfloat('plotting', 'cbar_width')) / 25.4
cmap = pplt.Colormap(cmap) # todo allow None as cmap
norm = norm or pplt.Norm('linear', vmin=0, vmax=1)
fig, axes = pplt.subplots(ncols=ncols, nrows=nrows, width=figure_width, aspect=aspect, sharey=4, **figure_kwargs)
scatter_kwargs = scatter_kwargs or {}
axes_iter = iter(axes)
for hdx_tp in hdxm:
ax = next(axes_iter)
residue_time_scatter(ax, hdx_tp, field=field, cmap=cmap, norm=norm, cbar=False, **scatter_kwargs) #todo cbar kwargs? (check with other methods)
ax.format(title=f'exposure: {hdx_tp.exposure:.1f}')
for ax in axes_iter:
ax.axis('off')
axes.format(xlabel=r_xlabel, ylabel=field)
cbar_kwargs = cbar_kwargs or {}
cbars = []
for ax in axes:
if not ax.axison:
continue
cbar = add_cbar(ax, cmap, norm, **cbar_kwargs)
cbars.append(cbar)
return fig, axes, cbars
def residue_time_scatter(ax, hdx_tp, field='rfu', cmap='turbo', norm=None, cbar=True, **kwargs):
cmap = pplt.Colormap(cmap) # todo allow None as cmap
norm = norm or pplt.Norm('linear', vmin=0, vmax=1)
cbar_width = kwargs.pop('cbar_width', cfg.getfloat('plotting', 'cbar_width')) / 25.4
scatter_kwargs = {**SCATTER_KWARGS, **kwargs}
values = hdx_tp.weighted_average(field)
colors = cmap(norm(values))
ax.scatter(values.index, values, c=colors, **scatter_kwargs)
if not cmap.monochrome and cbar:
add_cbar(ax, cmap, norm, width=cbar_width)
def residue_scatter_figure(hdxm_set, field='rfu', cmap='viridis', norm=None, scatter_kwargs=None,
**figure_kwargs):
n_subplots = hdxm_set.Ns
ncols = figure_kwargs.pop('ncols', min(cfg.getint('plotting', 'ncols'), n_subplots))
nrows = figure_kwargs.pop('nrows', int(np.ceil(n_subplots / ncols))) #todo disallow setting rows
figure_width = figure_kwargs.pop('width', cfg.getfloat('plotting', 'page_width')) / 25.4
cbar_width = figure_kwargs.pop('cbar_width', cfg.getfloat('plotting', 'cbar_width')) / 25.4
aspect = figure_kwargs.pop('aspect', cfg.getfloat('plotting', 'residue_scatter_aspect'))
cmap = pplt.Colormap(cmap)
if norm is None:
tps = np.unique(np.concatenate([hdxm.timepoints for hdxm in hdxm_set]))
tps = tps[np.nonzero(tps)]
norm = pplt.Norm('log', vmin=tps.min(), vmax=tps.max())
else:
tps = np.unique(np.concatenate([hdxm.timepoints for hdxm in hdxm_set]))
fig, axes = pplt.subplots(ncols=ncols, nrows=nrows, width=figure_width, aspect=aspect, **figure_kwargs)
axes_iter = iter(axes)
scatter_kwargs = scatter_kwargs or {}
for hdxm in hdxm_set:
ax = next(axes_iter)
residue_scatter(ax, hdxm, cmap=cmap, norm=norm, field=field, cbar=False, **scatter_kwargs)
ax.format(title=f'{hdxm.name}')
for ax in axes_iter:
ax.axis('off')
#todo function for this?
locator = pplt.Locator(norm(tps))
cbar_ax = fig.colorbar(cmap, width=cbar_width, ticks=locator)
formatter = pplt.Formatter('simple', precision=1)
cbar_ax.ax.set_yticklabels([formatter(t) for t in tps])
cbar_ax.set_label('Exposure time (s)', labelpad=-0)
axes.format(xlabel=r_xlabel)
return fig, axes, cbar_ax
def residue_scatter(ax, hdxm, field='rfu', cmap='viridis', norm=None, cbar=True, **kwargs):
cmap = pplt.Colormap(cmap)
tps = hdxm.timepoints[np.nonzero(hdxm.timepoints)]
norm = norm or pplt.Norm('log', tps.min(), tps.max())
cbar_width = kwargs.pop('cbar_width', cfg.getfloat('plotting', 'cbar_width')) / 25.4
scatter_kwargs = {**SCATTER_KWARGS, **kwargs}
for hdx_tp in hdxm:
if isinstance(norm, mpl.colors.LogNorm) and hdx_tp.exposure == 0.:
continue
values = hdx_tp.weighted_average(field)
color = cmap(norm(hdx_tp.exposure))
scatter_kwargs['color'] = color
ax.scatter(values.index, values, **scatter_kwargs)
if cbar:
locator = pplt.Locator(norm(tps))
cbar_ax = ax.colorbar(cmap, width=cbar_width, ticks=locator)
formatter = pplt.Formatter('simple', precision=1)
cbar_ax.ax.set_yticklabels([formatter(t) for t in tps])
cbar_ax.set_label('Exposure time (s)', labelpad=-0)
def dG_scatter_figure(data, cmap=None, norm=None, scatter_kwargs=None, cbar_kwargs=None, **figure_kwargs):
protein_states = data.columns.get_level_values(0).unique()
n_subplots = len(protein_states)
ncols = figure_kwargs.pop('ncols', min(cfg.getint('plotting', 'ncols'), n_subplots))
nrows = figure_kwargs.pop('nrows', int(np.ceil(n_subplots / ncols)))
figure_width = figure_kwargs.pop('width', cfg.getfloat('plotting', 'page_width')) / 25.4
aspect = figure_kwargs.pop('aspect', cfg.getfloat('plotting', 'deltaG_aspect'))
sharey = figure_kwargs.pop('sharey', 1)
cmap_default, norm_default = default_cmap_norm('dG')
cmap = cmap or cmap_default
cmap = pplt.Colormap(cmap)
norm = norm or norm_default
fig, axes = pplt.subplots(ncols=ncols, nrows=nrows, width=figure_width, aspect=aspect, sharey=sharey, **figure_kwargs)
axes_iter = iter(axes)
scatter_kwargs = scatter_kwargs or {}
for state in protein_states:
sub_df = data[state]
ax = next(axes_iter)
colorbar_scatter(ax, sub_df, cmap=cmap, norm=norm, cbar=False, **scatter_kwargs)
ax.format(title=f'{state}')
for ax in axes_iter:
ax.set_axis_off()
# Set global ylims
ylims = [lim for ax in axes if ax.axison for lim in ax.get_ylim()]
axes.format(ylim=(np.max(ylims), np.min(ylims)), yticklabelloc='none', ytickloc='none')
cbar_kwargs = cbar_kwargs or {}
cbars = []
cbar_norm = pplt.Norm('linear', norm.vmin*1e-3, norm.vmax*1e-3)
for ax in axes:
if not ax.axison:
continue
cbar = add_cbar(ax, cmap, cbar_norm, **cbar_kwargs)
cbars.append(cbar)
return fig, axes, cbars
#alias
deltaG_scatter_figure = dG_scatter_figure
def ddG_scatter_figure(data, reference=None, cmap=None, norm=None, scatter_kwargs=None, cbar_kwargs=None,
**figure_kwargs):
protein_states = data.columns.get_level_values(0).unique()
if reference is None:
reference_state = protein_states[0]
elif isinstance(reference, int):
reference_state = protein_states[reference]
elif reference in protein_states:
reference_state = reference
else:
raise ValueError(f"Invalid value {reference!r} for 'reference'")
dG_test = data.xs('deltaG', axis=1, level=1).drop(reference_state, axis=1)
dG_ref = data[reference_state, 'deltaG']
ddG = dG_test.subtract(dG_ref, axis=0)
ddG.columns = pd.MultiIndex.from_product([ddG.columns, ['deltadeltaG']], names=['State', 'quantity'])
cov_test = data.xs('covariance', axis=1, level=1).drop(reference_state, axis=1)**2
cov_ref = data[reference_state, 'covariance']**2
cov = cov_test.add(cov_ref, axis=0).pow(0.5)
cov.columns = pd.MultiIndex.from_product([cov.columns, ['covariance']], names=['State', 'quantity'])
combined = pd.concat([ddG, cov], axis=1)
n_subplots = len(protein_states) - 1
ncols = figure_kwargs.pop('ncols', min(cfg.getint('plotting', 'ncols'), n_subplots))
nrows = figure_kwargs.pop('nrows', int(np.ceil(n_subplots / ncols)))
figure_width = figure_kwargs.pop('width', cfg.getfloat('plotting', 'page_width')) / 25.4
aspect = figure_kwargs.pop('aspect', cfg.getfloat('plotting', 'deltaG_aspect'))
sharey = figure_kwargs.pop('sharey', 1)
cmap_default, norm_default = default_cmap_norm('ddG')
cmap = cmap or cmap_default
cmap = pplt.Colormap(cmap)
norm = norm or norm_default
fig, axes = pplt.subplots(ncols=ncols, nrows=nrows, width=figure_width, aspect=aspect, sharey=sharey, **figure_kwargs)
axes_iter = iter(axes)
scatter_kwargs = scatter_kwargs or {}
for state in protein_states:
if state == reference_state:
continue
sub_df = combined[state]
ax = next(axes_iter)
colorbar_scatter(ax, sub_df, y='deltadeltaG', cmap=cmap, norm=norm, cbar=False, **scatter_kwargs)
title = f'{state} - {reference_state}'
ax.format(title=title)
for ax in axes_iter:
ax.set_axis_off()
# Set global ylims
ylim = np.abs([lim for ax in axes if ax.axison for lim in ax.get_ylim()]).max()
axes.format(ylim=(ylim, -ylim), yticklabelloc='none', ytickloc='none')
cbar_kwargs = cbar_kwargs or {}
cbars = []
cbar_norm = pplt.Norm('linear', norm.vmin*1e-3, norm.vmax*1e-3)
for ax in axes:
if not ax.axison:
continue
cbar = add_cbar(ax, cmap, cbar_norm, **cbar_kwargs)
cbars.append(cbar)
return fig, axes, cbars
deltadeltaG_scatter_figure = ddG_scatter_figure
def peptide_mse_figure(fit_result, cmap='Haline', norm=None, rect_kwargs=None, **figure_kwargs):
n_subplots = len(fit_result)
ncols = figure_kwargs.pop('ncols', min(cfg.getint('plotting', 'ncols'), n_subplots))
nrows = figure_kwargs.pop('nrows', int(np.ceil(n_subplots / ncols)))
figure_width = figure_kwargs.pop('width', cfg.getfloat('plotting', 'page_width')) / 25.4
aspect = figure_kwargs.pop('aspect', cfg.getfloat('plotting', 'peptide_mse_aspect'))
cmap = pplt.Colormap(cmap)
fig, axes = pplt.subplots(ncols=ncols, nrows=nrows, width=figure_width, aspect=aspect, **figure_kwargs)
axes_iter = iter(axes)
mse = fit_result.get_mse() #shape: Ns, Np, Nt
cbars = []
rect_kwargs = rect_kwargs or {}
for i, mse_sample in enumerate(mse):
mse_peptide = np.mean(mse_sample, axis=1)
hdxm = fit_result.hdxm_set.hdxm_list[i]
peptide_data = hdxm.coverage.data
data_dict = {'start': peptide_data['start'], 'end': peptide_data['end'], 'mse': mse_peptide[:hdxm.Np]}
mse_df = pd.DataFrame(data_dict)
ax = next(axes_iter)
vmax = mse_df['mse'].max()
norm = norm or pplt.Norm('linear', vmin=0, vmax=vmax)
#color bar per subplot as norm differs
#todo perhaps unify color scale? -> when global norm, global cbar
cbar_ax = peptide_coverage(ax, mse_df, color_field='mse', norm=norm, cmap=cmap, **rect_kwargs)
cbar_ax.set_label('MSE')
cbars.append(cbar_ax)
ax.format(xlabel=r_xlabel, title=f'{hdxm.name}')
return fig, axes, cbars
def loss_figure(fit_result, **figure_kwargs):
ncols = 1
nrows = 1
figure_width = figure_kwargs.pop('width', cfg.getfloat('plotting', 'page_width')) / 25.4
aspect = figure_kwargs.pop('aspect', cfg.getfloat('plotting', 'loss_aspect')) # todo loss aspect also in config?
fig, ax = pplt.subplots(ncols=ncols, nrows=nrows, width=figure_width, aspect=aspect, **figure_kwargs)
fit_result.losses.plot(ax=ax)
# ax.plot(fit_result.losses, legend='t') # altnernative proplot plotting
# ox = ax.alty()
# reg_loss = fit_result.losses.drop('mse_loss', axis=1)
# total = fit_result.losses.sum(axis=1)
# perc = reg_loss.divide(total, axis=0) * 100
# perc.plot(ax=ox) #todo formatting (perc as --, matching colors, legend)
#
ax.format(xlabel="Number of epochs", ylabel='Loss')
return fig, ax
def linear_bars_figure(data, reference=None, field='deltaG', norm=None, cmap=None, labels=None, **figure_kwargs):
#todo add sorting
protein_states = data.columns.get_level_values(0).unique()
if isinstance(reference, int):
reference_state = protein_states[reference]
elif reference in protein_states:
reference_state = reference
elif reference is None:
reference_state = None
else:
raise ValueError(f"Invalid value {reference!r} for 'reference'")
if reference_state:
test = data.xs(field, axis=1, level=1).drop(reference_state, axis=1)
ref = data[reference_state, field]
plot_data = test.subtract(ref, axis=0)
plot_data.columns = pd.MultiIndex.from_product([plot_data.columns, [field]], names=['State', 'quantity'])
cmap_default, norm_default = default_cmap_norm('ddG')
n_subplots = len(protein_states) - 1
else:
plot_data = data
cmap_default, norm_default = default_cmap_norm('dG')
n_subplots = len(protein_states)
cmap = cmap or cmap_default
norm = norm or norm_default
ncols = 1
nrows = n_subplots
figure_width = figure_kwargs.pop('width', cfg.getfloat('plotting', 'page_width')) / 25.4
aspect = figure_kwargs.pop('aspect', cfg.getfloat('plotting', 'linear_bars_aspect'))
cbar_width = figure_kwargs.pop('cbar_width', cfg.getfloat('plotting', 'cbar_width')) / 25.4
fig, axes = pplt.subplots(nrows=nrows, ncols=ncols, aspect=aspect, width=figure_width, hspace=0)
axes_iter = iter(axes)
labels = labels or protein_states
if len(labels) != len(protein_states):
raise ValueError('Number of labels provided must be equal to the number of protein states')
for label, state in zip(labels, protein_states):
if state == reference_state:
continue
values = plot_data[state, field]
rmin, rmax = values.index.min(), values.index.max()
extent = [rmin - 0.5, rmax + 0.5, 0, 1]
img = np.expand_dims(values, 0)
ax = next(axes_iter)
from matplotlib.axes import Axes
Axes.imshow(ax, norm(img), aspect='auto', cmap=cmap, vmin=0, vmax=1, interpolation='None',
extent=extent)
# ax.imshow(img, aspect='auto', cmap=cmap, norm=norm, interpolation='None', discrete=False,
# extent=extent)
ax.format(yticks=[])
ax.text(1.02, 0.5, label, horizontalalignment='left',
verticalalignment='center', transform=ax.transAxes)
axes.format(xlabel=r_xlabel)
sclf = 1e-3 # todo kwargs / check value of filed
cmap_norm = copy(norm)
cmap_norm.vmin *= sclf
cmap_norm.vmax *= sclf
if field == 'deltaG':
label = dG_ylabel
elif field == 'deltaG' and reference_state:
label = ddG_ylabel
else:
label = ''
fig.colorbar(cmap, norm=cmap_norm, loc='b', label=label, width=cbar_width)
return fig, axes
def rainbowclouds_figure(data, reference=None, field='deltaG', norm=None, cmap=None, update_rc=True, **figure_kwargs):
# todo add sorting
if update_rc:
plt.rcParams["image.composite_image"] = False
protein_states = data.columns.get_level_values(0).unique()
if isinstance(reference, int):
reference_state = protein_states[reference]
elif reference in protein_states:
reference_state = reference
elif reference is None:
reference_state = None
else:
raise ValueError(f"Invalid value {reference!r} for 'reference'")
if reference_state:
test = data.xs(field, axis=1, level=1).drop(reference_state, axis=1)
ref = data[reference_state, field]
plot_data = test.subtract(ref, axis=0)
plot_data.columns = pd.MultiIndex.from_product([plot_data.columns, [field]], names=['State', 'quantity'])
cmap_default, norm_default = default_cmap_norm('ddG')
else:
plot_data = data
cmap_default, norm_default = default_cmap_norm('dG')
cmap = cmap or cmap_default
norm = norm or norm_default
plot_data = plot_data.xs(field, axis=1, level=1)
#scaling
plot_data *= 1e-3
norm.vmin = norm.vmin * 1e-3
norm.vmax = norm.vmax * 1e-3
f_data = [plot_data[column].dropna().to_numpy() for column in plot_data.columns] # todo make funcs accept dataframes
f_labels = plot_data.columns
ncols = 1
nrows = 1
figure_width = figure_kwargs.pop('width', cfg.getfloat('plotting', 'page_width')) / 25.4
aspect = figure_kwargs.pop('aspect', cfg.getfloat('plotting', 'rainbow_aspect'))
boxplot_width = 0.1
orientation = 'vertical'
strip_kwargs = dict(offset=0.0, orientation=orientation, s=2, colors='k', jitter=0.2, alpha=0.25)
kde_kwargs = dict(linecolor='k', offset=0.15, orientation=orientation, fillcolor=False, fill_cmap=cmap,
fill_norm=norm, y_scale=None, y_norm=0.4, linewidth=1)
boxplot_kwargs = dict(offset=0.2, sym='', linewidth=1., linecolor='k', orientation=orientation,
widths=boxplot_width)
fig, axes = pplt.subplots(nrows=nrows, ncols=ncols, width=figure_width, aspect=aspect, hspace=0)
ax = axes[0]
stripplot(f_data, ax=ax, **strip_kwargs)
kdeplot(f_data, ax=ax, **kde_kwargs)
boxplot(f_data, ax=ax, **boxplot_kwargs)
label_axes(f_labels, ax=ax, rotation=45)
if field == 'deltaG':
label = dG_ylabel
elif field == 'deltaG' and reference_state:
label = ddG_ylabel
else:
label = ''
ax.format(xlim=(-0.75, len(f_data) - 0.5), ylabel=label, yticklabelloc='left', ytickloc='left',
ylim=ax.get_ylim()[::-1])
add_cbar(ax, cmap, norm)
return fig, ax
def colorbar_scatter(ax, data, y='deltaG', yerr='covariance', cmap=None, norm=None, cbar=True, **kwargs):
#todo make error bars optional
#todo custom ylims? scaling?
cmap_default, norm_default = default_cmap_norm(y)
if y in ['deltaG', 'deltadeltaG']:
sclf = 1e-3 # deltaG are given in J/mol but plotted in kJ/mol
else:
if cmap is None or norm is None:
raise ValueError("No valid `cmap` or `norm` is given.")
sclf = 1e-3
cmap = cmap or cmap_default
cmap = pplt.Colormap(cmap)
norm = norm or norm_default
colors = cmap(norm(data[y]))
#todo errorbars using proplot kwargs?
errorbar_kwargs = {**ERRORBAR_KWARGS, **kwargs.pop('errorbar_kwargs', {})}
scatter_kwargs = {**SCATTER_KWARGS, **kwargs}
ax.scatter(data.index, data[y]*sclf, color=colors, **scatter_kwargs)
with autoscale_turned_off(ax):
ax.errorbar(data.index, data[y]*sclf, yerr=data[yerr]*sclf, zorder=-1,
**errorbar_kwargs)
ax.set_xlabel(r_xlabel)
# Default y labels
labels = {'deltaG': dG_ylabel, 'deltadeltaG': ddG_ylabel}
label = labels.get(y, '')
ax.set_ylabel(label)
ylim = ax.get_ylim()
if (ylim[0] < ylim[1]) and y == 'deltaG':
ax.set_ylim(*ylim[::-1])
elif y == 'deltadeltaG':
ylim = np.max(np.abs(ylim))
ax.set_ylim(ylim, -ylim)
if cbar:
cbar_norm = copy(norm)
cbar_norm.vmin *= sclf
cbar_norm.vmax *= sclf
cbar = add_cbar(ax, cmap, cbar_norm)
else:
cbar = None
return cbar
def cmap_norm_from_nodes(colors, nodes, bad=None):
nodes = np.array(nodes)
if not np.all(np.diff(nodes) > 0):
raise ValueError("Node values must be monotonically increasing")
norm = pplt.Norm('linear', vmin=nodes.min(), vmax=nodes.max(), clip=True)
color_spec = list(zip(norm(nodes), colors))
cmap = pplt.Colormap(color_spec)
bad = bad or cfg.get('plotting', 'no_coverage')
cmap.set_bad(bad)
return cmap, norm
def default_cmap_norm(datatype):
if datatype in ['deltaG', 'dG']:
return get_cmap_norm_preset('vibrant', 10e3, 40e3)
elif datatype in ['deltadeltaG', 'ddG']:
return get_cmap_norm_preset('PRGn', -10e3, 10e3)
elif datatype == 'rfu':
norm = pplt.Norm('linear', 0, 1)
cmap = pplt.Colormap('turbo')
return cmap, norm
elif datatype == 'mse':
cmap = pplt.Colormap('Haline')
return cmap, None
else:
raise ValueError(f"Invalid datatype {datatype!r}")
def get_cmap_norm_preset(name, vmin, vmax):
# Paul Tol colour schemes: https://personal.sron.nl/~pault/#sec:qualitative
#todo warn if users use diverging colors with non diverging vmin/vmax?
colors, bad = get_color_scheme(name)
nodes = np.linspace(vmin, vmax, num=len(colors), endpoint=True)
cmap, norm = cmap_norm_from_nodes(colors, nodes, bad)
return cmap, norm
def get_color_scheme(name):
# Paul Tol colour schemes: https://personal.sron.nl/~pault/#sec:qualitative
if name == 'rgb':
colors = ['#0000ff', '#00ff00', '#ff0000'] # red, green, blue
bad = '#8c8c8c'
elif name == 'bright':
colors = ['#ee6677', '#288833', '#4477aa']
bad = '#bbbbbb'
elif name == 'vibrant':
colors = ['#CC3311', '#009988', '#0077BB']
bad = '#bbbbbb'
elif name == 'muted':
colors = ['#882255', '#117733', '#332288']
bad = '#dddddd'
elif name == 'pale':
colors = ['#ffcccc', '#ccddaa', '#bbccee']
bad = '#dddddd'
elif name == 'dark':
colors = ['#663333', '#225522', '#222255']
bad = '#555555'
elif name == 'delta': # Original ddG colors
colors = ['#006d2c', '#ffffff', '#54278e'] # Green, white, purple (flexible, no change, rigid)
bad = '#ffee99'
elif name == 'sunset':
colors = ['#a50026', '#dd3d2d', '#f67e4b', '#fdb366', '#feda8b', '#eaeccc', '#c2e4ef', '#98cae1', '#6ea6cd',
'#4a7bb7', '#364b9a']
bad = '#ffffff'
elif name == 'BuRd':
colors = ['#b2182b', '#d6604d', '#f4a582', '#fddbc7', '#f7f7f7', '#d1e5f0', '#92c5de', '#4393c3', '#2166ac']
bad = '#ffee99'
elif name == 'PRGn':
colors = ['#1b7837', '#5aae61', '#acd39e', '#d9f0d3', '#f7f7f7', '#e7d4e8', '#c2a5cf', '#9970ab', '#762a83']
bad = '#ffee99'
else:
raise ValueError(f"Color scheme '{name}' not found")
return colors, bad
def pymol_figures(data, output_path, pdb_file, reference=None, field='deltaG', cmap=None, norm=None, extent=None,
orient=True, views=None, name_suffix='',
additional_views=None, img_size=(640, 640)):
protein_states = data.columns.get_level_values(0).unique()
if isinstance(reference, int):
reference_state = protein_states[reference]
elif reference in protein_states:
reference_state = reference
elif reference is None:
reference_state = None
else:
raise ValueError(f"Invalid value {reference!r} for 'reference'")
if reference_state:
test = data.xs(field, axis=1, level=1).drop(reference_state, axis=1)
ref = data[reference_state, field]
plot_data = test.subtract(ref, axis=0)
plot_data.columns = pd.MultiIndex.from_product([plot_data.columns, [field]], names=['State', 'quantity'])
cmap_default, norm_default = default_cmap_norm('ddG')
else:
plot_data = data
cmap_default, norm_default = default_cmap_norm('dG')
cmap = cmap or cmap_default
norm = norm or norm_default
#plot_data = plot_data.xs(field, axis=1, level=1)
for state in protein_states:
if state == reference_state:
continue
values = plot_data[state, field]
rmin, rmax = extent or [None, None]
rmin = rmin or values.index.min()
rmax = rmax or values.index.max()
values = values.reindex(pd.RangeIndex(rmin, rmax+1, name='r_number'))
colors = apply_cmap(values, cmap, norm)
name = f'pymol_ddG_{state}_ref_{reference_state}' if reference_state else f'pymol_dG_{state}'
name += name_suffix
pymol_render(output_path, pdb_file, colors, name=name, orient=orient, views=views, additional_views=additional_views,
img_size=img_size)
def pymol_render(output_path, pdb_file, colors, name='Pymol render', orient=True, views=None, additional_views=None, img_size=(640, 640)):
if cmd is None:
raise ModuleNotFoundError("Pymol module is not installed")
px, py = img_size
cmd.reinitialize()
cmd.load(pdb_file)
if orient:
cmd.orient()
cmd.set('antialias', 2)
cmd.set('fog', 0)
color_pymol(colors, cmd)
if views:
for i, view in enumerate(views):
cmd.set_view(view)
cmd.ray(px, py, renderer=0, antialias=2)
output_file = output_path / f'{name}_view_{i}.png'
cmd.png(str(output_file))
else:
cmd.ray(px, py, renderer=0, antialias=2)
output_file = output_path / f'{name}_xy.png'
cmd.png(str(output_file))
cmd.rotate('x', 90)
cmd.ray(px, py, renderer=0, antialias=2)
output_file = output_path / f'{name}_xz.png'
cmd.png(str(output_file))
cmd.rotate('z', -90)
cmd.ray(px, py, renderer=0, antialias=2)
output_file = output_path / f'{name}_yz.png'
cmd.png(str(output_file))
additional_views = additional_views or []
for i, view in enumerate(additional_views):
cmd.set_view(view)
cmd.ray(px, py, renderer=0, antialias=2)
output_file = output_path / f'{name}_view_{i}.png'
cmd.png(str(output_file))
def add_cbar(ax, cmap, norm, **kwargs):
"""Truncate or expand cmap such that it covers axes limit and and colorbar to axes"""
N = cmap.N
ymin, ymax = np.min(ax.get_ylim()), np.max(ax.get_ylim())
values = np.linspace(ymin, ymax, num=N)
norm_clip = copy(norm)
norm_clip.clip = True
colors = cmap(norm_clip(values))
cb_cmap = pplt.Colormap(colors)
cb_norm = pplt.Norm('linear', vmin=ymin, vmax=ymax) #todo allow log norms?
cbar_kwargs = {**CBAR_KWARGS, **kwargs}
reverse = np.diff(ax.get_ylim()) < 0
cbar = ax.colorbar(cb_cmap, norm=cb_norm, reverse=reverse, **cbar_kwargs)
return cbar
#https://stackoverflow.com/questions/38629830/how-to-turn-off-autoscaling-in-matplotlib-pyplot
@contextmanager
def autoscale_turned_off(ax=None):
ax = ax or plt.gca()
lims = [ax.get_xlim(), ax.get_ylim()]
yield
ax.set_xlim(*lims[0])
ax.set_ylim(*lims[1])
def stripplot(data, ax=None, jitter=0.25, colors=None, offset=0., orientation='vertical', **scatter_kwargs):
ax = ax or plt.gca()
color_list = _prepare_colors(colors, len(data))
for i, (d, color) in enumerate(zip(data, color_list)):
jitter_offsets = (np.random.rand(d.size) - 0.5) * jitter
cat_var = i * np.ones_like(d) + jitter_offsets + offset # categorical axis variable
if orientation == 'vertical':
ax.scatter(cat_var, d, color=color, **scatter_kwargs)
elif orientation == 'horizontal':
ax.scatter(d, len(data) - cat_var, color=color, **scatter_kwargs)
def _prepare_colors(colors, N):
if not isinstance(colors, list):
return [colors]*N
else:
return colors
# From joyplot
def _x_range(data, extra=0.2):
""" Compute the x_range, i.e., the values for which the
density will be computed. It should be slightly larger than
the max and min so that the plot actually reaches 0, and
also has a bit of a tail on both sides.
"""
try:
sample_range = np.nanmax(data) - np.nanmin(data)
except ValueError:
return []
if sample_range < 1e-6:
return [np.nanmin(data), np.nanmax(data)]
return np.linspace(np.nanmin(data) - extra*sample_range,
np.nanmax(data) + extra*sample_range, 1000)
def kdeplot(data, ax=None, offset=0., orientation='vertical',
linecolor=None, linewidth=None, zero_line=True, x_extend=1e-3, y_scale=None, y_norm=None, fillcolor=False, fill_cmap=None,
fill_norm=None):
assert not (y_scale and y_norm), "Cannot set both 'y_scale' and 'y_norm'"
y_scale = 1. if y_scale is None else y_scale
color_list = _prepare_colors(linecolor, len(data))
for i, (d, color) in enumerate(zip(data, color_list)):
#todo remove NaNs?
# Perhaps also borrow this part from joyplot
kde_func = kde.gaussian_kde(d)
kde_x = _x_range(d, extra=0.4)
kde_y = kde_func(kde_x)*y_scale
if y_norm:
kde_y = y_norm*kde_y / kde_y.max()
bools = kde_y > x_extend * kde_y.max()
kde_x = kde_x[bools]
kde_y = kde_y[bools]
cat_var = len(data) - i + kde_y + offset # x in horizontal
cat_var_zero = (len(data) - i)*np.ones_like(kde_y) + offset
# x = i * np.ones_like(d) + jitter_offsets + offset # 'x' like, could be y axis
if orientation == 'horizontal':
plot_x = kde_x
plot_y = cat_var
img_data = kde_x.reshape(1, -1)
elif orientation == 'vertical':
plot_x = len(data) - cat_var
plot_y = kde_x
img_data = kde_x[::-1].reshape(-1, 1)
else:
raise ValueError(f"Invalid value '{orientation}' for 'orientation'")
line, = ax.plot(plot_x, plot_y, color=color, linewidth=linewidth)
if zero_line:
ax.plot([plot_x[0], plot_x[-1]], [plot_y[0], plot_y[-1]], color=line.get_color(), linewidth=linewidth)
if fillcolor:
#todo refactor to one if/else orientation
color = line.get_color() if fillcolor is True else fillcolor
if orientation == 'horizontal':
ax.fill_between(kde_x, plot_y, np.linspace(plot_y[0], plot_y[-1], num=plot_y.size, endpoint=True),
color=color)
elif orientation == 'vertical':
ax.fill_betweenx(kde_x, len(data) - cat_var, len(data) - cat_var_zero, color=color)
if fill_cmap:
fill_norm = fill_norm or (lambda x: x)
color_img = fill_norm(img_data)
xmin, xmax = np.min(plot_x), np.max(plot_x)
ymin, ymax = np.min(plot_y), np.max(plot_y)
extent = [xmin-offset, xmax-offset, ymin, ymax] if orientation == 'horizontal' else [xmin, xmax, ymin-offset, ymax-offset]
im = Axes.imshow(ax, color_img, aspect='auto', cmap=fill_cmap, extent=extent) # left, right, bottom, top
fill_line, = ax.fill(plot_x, plot_y, facecolor='none')
im.set_clip_path(fill_line)
def boxplot(data, ax, offset=0., orientation='vertical', widths=0.25, linewidth=None, linecolor=None, **kwargs):
if orientation == 'vertical':
vert = True
positions = np.arange(len(data)) + offset
elif orientation == 'horizontal':
vert = False
positions = len(data) - np.arange(len(data)) - offset
else:
raise ValueError(f"Invalid value '{orientation}' for 'orientation', options are 'horizontal' or 'vertical'")
#todo for loop
boxprops = kwargs.pop('boxprops', {})
whiskerprops = kwargs.pop('whiskerprops', {})
medianprops = kwargs.pop('whiskerprops', {})
boxprops['linewidth'] = linewidth
whiskerprops['linewidth'] = linewidth
medianprops['linewidth'] = linewidth
boxprops['color'] = linecolor
whiskerprops['color'] = linecolor
medianprops['color'] = linecolor
Axes.boxplot(ax, data, vert=vert, positions=positions, widths=widths, boxprops=boxprops, whiskerprops=whiskerprops,
medianprops=medianprops, **kwargs)
def label_axes(labels, ax, offset=0., orientation='vertical', **kwargs):
#todo check offset sign
if orientation == 'vertical':
ax.set_xticks(np.arange(len(labels)) + offset)
ax.set_xticklabels(labels, **kwargs)
elif orientation == 'horizontal':
ax.set_yticks(len(labels) - np.arange(len(labels)) + offset)
ax.set_yticklabels(labels, **kwargs)
class FitResultPlotBase(object):
def __init__(self, fit_result):
self.fit_result = fit_result
#todo equivalent this for axes?
def _make_figure(self, figure_name, **kwargs):
if not figure_name.endswith('_figure'):
figure_name += '_figure'
function = globals()[figure_name]
args_dict = self._get_arg(figure_name)
# return dictionary
# keys: either protein state name (hdxm.name) or 'All states'
figures_dict = {name: function(arg, **kwargs) for name, arg in args_dict.items()}
return figures_dict
def make_figure(self, figure_name, **kwargs):
figures_dict = self._make_figure(figure_name, **kwargs)
if len(figures_dict) == 1:
return next(iter(figures_dict.values()))
else:
return figures_dict
def get_fit_timepoints(self):
all_timepoints = np.concatenate([hdxm.timepoints for hdxm in self.fit_result.hdxm_set])
#x_axis_type = self.settings.get('fit_time_axis', 'Log')
x_axis_type = 'Log' # todo configureable
num = 100
if x_axis_type == 'Linear':
time = np.linspace(0, all_timepoints.max(), num=num)
elif x_axis_type == 'Log':
elem = all_timepoints[np.nonzero(all_timepoints)]
start = np.log10(elem.min())
end = np.log10(elem.max())
pad = (end - start)*0.1
time = np.logspace(start-pad, end+pad, num=num, endpoint=True)
else:
raise ValueError("Invalid value for 'x_axis_type'")
return time
# repeated code with fitreport (pdf) -> base class for fitreport
def _get_arg(self, plot_func_name):
#Add _figure suffix if not present
if not plot_func_name.endswith('_figure'):
plot_func_name += '_figure'
if plot_func_name == 'peptide_coverage_figure':
return {hdxm.name: hdxm.data for hdxm in self.fit_result.hdxm_set.hdxm_list}
elif plot_func_name == 'residue_time_scatter_figure':
return {hdxm.name: hdxm for hdxm in self.fit_result.hdxm_set.hdxm_list}
elif plot_func_name == 'residue_scatter_figure':
return {'All states': self.fit_result.hdxm_set}
elif plot_func_name == 'dG_scatter_figure':
return {'All states': self.fit_result.output}
elif plot_func_name == 'ddG_scatter_figure':
return {'All states': self.fit_result.output}
elif plot_func_name == 'linear_bars_figure':
return {'All states': self.fit_result.output}
elif plot_func_name == 'rainbowclouds_figure':
return {'All states': self.fit_result.output}
elif plot_func_name == 'peptide_mse_figure':
return {'All states': self.fit_result}
elif plot_func_name == 'loss_figure':
return {'All states': self.fit_result}
else:
raise ValueError(f"Unknown plot function {plot_func_name!r}")
ALL_PLOT_TYPES = ['peptide_coverage', 'residue_scatter', 'dG_scatter', 'ddG_scatter', 'linear_bars', 'rainbowclouds',
'peptide_mse', 'loss']
class FitResultPlot(FitResultPlotBase):
def __init__(self, fit_result, output_path=None, **kwargs):
super().__init__(fit_result)
self.output_path = Path(output_path) if output_path else None
self.output_path.mkdir(exist_ok=True)
if self.output_path and not self.output_path.is_dir():
raise ValueError(f"Output path {output_path!r} is not a valid directory")
#todo save kwargs / rc params? / style context (https://matplotlib.org/devdocs/tutorials/introductory/customizing.html)
def save_figure(self, fig_name, ext='.png', **kwargs):
figures_dict = self._make_figure(fig_name, **kwargs)
if self.output_path is None:
raise ValueError(f"No output path given when `FitResultPlot` object as initialized")
for name, fig_tup in figures_dict.items():
fig = fig_tup if isinstance(fig_tup, plt.Figure) else fig_tup[0]
if name == 'All states': # todo variable for 'All states'
file_name = f"{fig_name.replace('_figure', '')}{ext}"
else:
file_name = f"{fig_name.replace('_figure', '')}_{name}{ext}"
file_path = self.output_path / file_name
fig.savefig(file_path)
plt.close(fig)
def plot_all(self, **kwargs):
for plot_type in tqdm(ALL_PLOT_TYPES):
fig_kwargs = kwargs.get(plot_type, {})
self.save_figure(plot_type, **fig_kwargs)
def plot_fitresults(fitresult_path, reference=None, plots='all', renew=False, cmap_and_norm=None, output_path=None,
output_type='.png', **save_kwargs):
"""
Parameters
----------
fitresult_path
plots
renew
cmap_and_norm: :obj:`dict`, optional
Dictionary with cmap and norms to use. If `None`, reverts to defaults.
Dict format: {'dG': (cmap, norm), 'ddG': (cmap, norm)}
output_type: list or str
Returns
-------
"""
# batch results only
history_path = fitresult_path / 'model_history.csv'
output_path = output_path or fitresult_path
output_type = list([output_type]) if isinstance(output_type, str) else output_type
fitresult = load_fitresult(fitresult_path)
protein_states = fitresult.output.df.columns.get_level_values(0).unique()
if isinstance(reference, int):
reference_state = protein_states[reference]
elif reference in protein_states:
reference_state = reference
elif reference is None:
reference_state = None
else:
raise ValueError(f"Invalid value {reference!r} for 'reference'")
# todo needs tidying up
cmap_and_norm = cmap_and_norm or {}
dG_cmap, dG_norm = cmap_and_norm.get('dG', (None, None))
dG_cmap_default, dG_norm_default = default_cmap_norm('dG')
ddG_cmap, ddG_norm = cmap_and_norm.get('ddG', (None, None))
ddG_cmap_default, ddG_norm_default = default_cmap_norm('ddG')
dG_cmap = ddG_cmap or dG_cmap_default
dG_norm = dG_norm or dG_norm_default
ddG_cmap = ddG_cmap or ddG_cmap_default
ddG_norm = ddG_norm or ddG_norm_default
#check_exists = lambda x: False if renew else x.exists()
#todo add logic for checking renew or not
if plots == 'all':
plots = ['loss', 'rfu_coverage', 'rfu_scatter', 'dG_scatter', 'ddG_scatter', 'linear_bars', 'rainbowclouds',
'peptide_mse']
# def check_update(pth, fname, extensions, renew):
# # Returns True if the target graph should be renewed or not
# if renew:
# return True
# else:
# pths = [pth / (fname + ext) for ext in extensions]
# return any([not pth.exists() for pth in pths])
# plots = [p for p in plots if check_update(output_path, p, output_type, renew)]
if 'loss' in plots:
loss_df = fitresult.losses
loss_df.plot()
mse_loss = loss_df['mse_loss']
reg_loss = loss_df.iloc[:, 1:].sum(axis=1)
reg_percentage = 100*reg_loss / (mse_loss + reg_loss)
fig = plt.gcf()
ax = plt.gca()
ax1 = ax.twinx()
reg_percentage.plot(ax=ax1, color='k')
ax1.set_xlim(0, None)
for ext in output_type:
f_out = output_path / ('loss' + ext)
plt.savefig(f_out)
plt.close(fig)
if 'rfu_coverage' in plots:
for hdxm in fitresult.hdxm_set:
fig, axes, cbar_ax = peptide_coverage_figure(hdxm.data)
for ext in output_type:
f_out = output_path / (f'rfu_coverage_{hdxm.name}' + ext)
plt.savefig(f_out)
plt.close(fig)
#todo rfu_scatter_timepoint
if 'rfu_scatter' in plots:
fig, axes, cbar = residue_scatter_figure(fitresult.hdxm_set)
for ext in output_type:
f_out = output_path / (f'rfu_scatter' + ext)
plt.savefig(f_out)
plt.close(fig)
if 'dG_scatter' in plots:
fig, axes, cbars = dG_scatter_figure(fitresult.output.df, cmap=dG_cmap, norm=dG_norm)
for ext in output_type:
f_out = output_path / (f'dG_scatter' + ext)
plt.savefig(f_out)
plt.close(fig)
if 'ddG_scatter' in plots:
fig, axes, cbars = ddG_scatter_figure(fitresult.output.df, reference=reference, cmap=ddG_cmap, norm=ddG_norm)
for ext in output_type:
f_out = output_path / (f'ddG_scatter' + ext)
plt.savefig(f_out)
plt.close(fig)
if 'linear_bars' in plots:
fig, axes = linear_bars_figure(fitresult.output.df)
for ext in output_type:
f_out = output_path / (f'dG_linear_bars' + ext)
plt.savefig(f_out)
plt.close(fig)
if reference_state:
fig, axes = linear_bars_figure(fitresult.output.df, reference=reference)
for ext in output_type:
f_out = output_path / (f'ddG_linear_bars' + ext)
plt.savefig(f_out)
plt.close(fig)
if 'rainbowclouds' in plots:
fig, ax = rainbowclouds_figure(fitresult.output.df)
for ext in output_type:
f_out = output_path / (f'dG_rainbowclouds' + ext)
plt.savefig(f_out)
plt.close(fig)
if reference_state:
fig, axes = rainbowclouds_figure(fitresult.output.df, reference=reference)
for ext in output_type:
f_out = output_path / (f'ddG_rainbowclouds' + ext)
plt.savefig(f_out)
plt.close(fig)
if 'peptide_mse' in plots:
fig, axes, cbars = peptide_mse_figure(fitresult)
for ext in output_type:
f_out = output_path / (f'peptide_mse' + ext)
plt.savefig(f_out)
plt.close(fig)
#
# if 'history' in plots:
# for h_df, name in zip(history_list, names):
# output_path = fitresult_path / f'{name}history.png'
# if check_exists(output_path):
# break
#
# num = len(h_df.columns)
# max_epochs = max([int(c) for c in h_df.columns])
#
# cmap = mpl.cm.get_cmap('winter')
# norm = mpl.colors.Normalize(vmin=1, vmax=max_epochs)
# colors = iter(cmap(np.linspace(0, 1, num=num)))
#
# fig, axes = pplt.subplots(nrows=1, width=width, aspect=aspect)
# ax = axes[0]
# for key in h_df:
# c = next(colors)
# to_hex(c)
#
# ax.scatter(h_df.index, h_df[key] * 1e-3, color=to_hex(c), **scatter_kwargs)
# ax.format(xlabel=r_xlabel, ylabel=dG_ylabel)
#
# values = np.linspace(0, max_epochs, endpoint=True, num=num)
# colors = cmap(norm(values))
# tick_labels = np.linspace(0, max_epochs, num=5)
#
# cbar = fig.colorbar(colors, values=values, ticks=tick_labels, space=0, width=cbar_width, label='Epochs')
# ax.format(yticklabelloc='None', ytickloc='None')
#
# plt.savefig(output_path)
# plt.close(fig)
```
#### File: pyhdx/web/main_controllers.py
```python
import logging
import param
import panel as pn
from pyhdx.models import PeptideMasterTable, HDXMeasurement
from pyhdx import VERSION_STRING
from pyhdx.models import HDXMeasurementSet
from panel.template import BaseTemplate
from lumen.sources import Source
from lumen.filters import FacetFilter
from functools import partial
from dask.distributed import Client
class MainController(param.Parameterized):
"""
Base class for application main controller
Subclass to extend
Parameters
----------
control_panels : :obj:`list`
List of strings referring to which ControlPanels to use for this MainController instance
Should refer to subclasses of :class:`~pyhdx.panel.base.ControlPanel`
client : dask client
Attributes
----------
doc : :class:`~bokeh.document.Document`
Currently active Bokeh document
logger : :class:`~logging.Logger`
Logger instance
control_panels : :obj:`dict`
Dictionary with :class:`~pyhdx.panel.base.ControlPanel` instances (__name__ as keys)
figure_panels : :obj:`dict`
Dictionary with :class:`~pyhdx.panel.base.FigurePanel` instances (__name__ as keys)
"""
sources = param.Dict({}, doc='Dictionary of source objects available for plotting', precedence=-1)
transforms = param.Dict({}, doc='Dictionary of transforms')
filters = param.Dict({}, doc="Dictionary of filters")
opts = param.Dict({}, doc="Dictionary of formatting options (opts)")
views = param.Dict({}, doc="Dictionary of views")
logger = param.ClassSelector(logging.Logger, doc="Logger object")
def __init__(self, control_panels, client=False, **params):
super(MainController, self).__init__(**params)
self.client = client if client else Client()
if self.logger is None:
self.logger = logging.getLogger(str(id(self)))
self.control_panels = {ctrl.name: ctrl(self) for ctrl in control_panels} #todo as param?
self.template = None # Panel template
self.future_queue = [] # queue of tuples: (future, callback)
for filt in self.filters.values():
if isinstance(filt, FacetFilter):
continue
filt.param.watch(partial(self._rerender, invalidate_cache=True), 'value')
for trs in self.transforms.values():
if hasattr(trs, 'updated'):
trs.param.watch(partial(self._rerender, invalidate_cache=True), 'updated')
self._update_views()
self.start()
# from lumen.target.Target
def _rerender(self, *events, invalidate_cache=False):
self._update_views(invalidate_cache=invalidate_cache)
def _update_views(self, invalidate_cache=True, update_views=True, events=[]):
for view in self.views.values():
view.update(invalidate_cache=invalidate_cache)
def __panel__(self):
# This does something but not as expected
return self.template
@property
def panel(self):
return self.template
def update(self):
for view in self.views:
view.update()
def check_futures(self):
if self.future_queue:
for future, callback in self.future_queue[:]:
if future.status == 'finished':
callback(future)
self.future_queue.remove((future, callback))
def start(self):
refresh_rate = 1000
pn.state.add_periodic_callback(
self.check_futures, refresh_rate
)
class PyHDXController(MainController):
"""
Main controller for PyHDX web application.
"""
data_objects = param.Dict(default={}, doc='Dictionary for all datasets (HDXMeasurement objects)') # todo refactor
# for guesses (nested): <fit name>: {state1: state2:, ...
# for global fit (batch): <fit name>: fit_result_object
# for global fit (series): <fit name>: {state1: <fit_result_object>, state2:....}
fit_results = param.Dict({}, doc='Dictionary of fit results', precedence=-1)
sample_name = param.String(doc='Name describing the selected protein(s) state')
def __init__(self, *args, **kwargs):
super(PyHDXController, self).__init__(*args, **kwargs)
@param.depends('data_objects', watch=True)
def _datasets_updated(self):
if len(self.data_objects) == 0:
self.sample_name = ''
elif len(self.data_objects) == 1:
self.sample_name = str(next(iter(self.data_objects.keys())))
elif len(self.data_objects) < 5:
self.sample_name = ', '.join(self.data_objects.keys())
@param.depends('sample_name', watch=True)
def _update_name(self):
self.template.header[0].title = VERSION_STRING + ': ' + self.sample_name
@property
def hdx_set(self):
"""Returns combined HDXMeasurementSet of all currently added data objects"""
#todo when alignments are added in, update this as (fixed) attribute
return HDXMeasurementSet(list(self.data_objects.values()))
class ComparisonController(MainController):
"""
Main controller for binary comparison web application.
"""
datasets = param.Dict(default={}, doc='Dictionary for all datasets')
comparisons = param.Dict(default={}, doc='Dictionary for all comparisons (should be in sources)')
```
#### File: pyhdx/web/transforms.py
```python
from lumen.transforms import Transform
import param
from matplotlib.colors import Colormap, Normalize
from pyhdx.support import rgb_to_hex, autowrap
import itertools
import pandas as pd
import numpy as np
import panel as pn
class WebAppTransform(param.Parameterized): #todo subclass from Transform?
updated = param.Event()
def __init__(self, **params):
super().__init__(**params)
self.widgets = self.generate_widgets()
@property
def panel(self):
return pn.Column(*self.widgets.values())
# widget = self.widget.clone()
# self.widget.link(widget, value='value', bidirectional=True)
# return widget
def generate_widgets(self, **kwargs):
"""returns a dict with keys parameter names and values default mapped widgets"""
names = [p for p in self.param if self.param[p].precedence is None or self.param[p].precedence > 1]
widgets = pn.Param(self.param, show_name=False, show_labels=True, widgets=kwargs)
return {k: v for k, v in zip(names[1:], widgets)}
class RescaleTransform(Transform):
"""
This transform takes a field from a table and transforms in with a scaling factors
"""
transform_type = 'rescale'
scale_factor = param.Number(doc='Scaling factor to multiply field values with')
field = param.String(doc='Name of the field to transform')
def apply(self, table):
table = table.copy() # Performance-wise this might not be ideal
table[self.field] *= self.scale_factor
return table
class AccumulateRegularizersTransform(Transform):
"""
Very niche and temporary transform to accumulate reg losses to one column
"""
transform_type = 'accumulate_regularizers'
def apply(self, table):
# first two columns are index and mse_loss?
reg_total = table.iloc[:, 2:].sum(axis=1)
reg_total.name = 'reg_loss'
result = pd.concat([table.iloc[:, :2], reg_total], axis=1)
return result
class ResetIndexTransform(Transform):
level = param.ClassSelector(class_=(int, list, str), doc="""
Only remove the given levels from the index. Removes all levels by default.""")
drop = param.Boolean(default=False, doc="""
Do not try to insert index into dataframe columns. This resets the index to the default integer index.""")
col_level = param.ClassSelector(default=0, class_=(int, str), doc="""
If the columns have multiple levels, determines which level the labels are inserted into. By default it is
inserted into the first level.""")
col_fill = param.Parameter(default='', doc="""If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.""")
transform_type = 'reset_index'
def apply(self, table):
return table.reset_index(level=self.level, drop=self.drop, col_level=self.col_level, col_fill=self.col_fill)
class SetIndexTransform(Transform):
keys = param.Parameter(doc="""
This parameter can be either a single column key, a single array of the same length as the calling DataFrame,
or a list containing an arbitrary combination of column keys and arrays. Here, โarrayโ encompasses Series,
Index, np.ndarray, and instances of Iterator.""") ## label or array-like or list of labels/arrays
drop = param.Boolean(default=True, doc="""
Delete columns to be used as the new index.""")
append = param.Boolean(default=False, doc="""
Whether to append column to an existing index""")
verify_integrity = param.Boolean(default=False, doc="""
Check the new index for duplicates. Otherwise defer the check until necessary. Setting to False will improve
the performance of this method.""")
transform_type = 'set_index'
def apply(self, table):
return table.set_index(self.keys, drop=self.drop, append=self.append, inplace=self.inplace,
verify_integrity=self.verify_integrity)
class ApplyCmapTransform(Transform, WebAppTransform):
"""
This transform takes data from a specified field, applies a norm and color map, and adds the resulting colors
in a new column
"""
fields = param.Selector(doc='Fields to choose from to apply cmap to')
field = param.String(doc='Name of the field to apply colors to')
cmap = param.ClassSelector(Colormap)
norm = param.ClassSelector(Normalize)
color_column = param.String('color', doc='Name of the added color column')
# default_color =
transform_type = 'color'
def __init__(self, **params):
super().__init__(**params)
#temporariy
self.param['fields'].objects = ['deltaG', 'pfact']
self.fields = 'deltaG'
def apply(self, table):
values = table[self.fields]
colors = self.cmap(self.norm(values), bytes=True)
colors_hex = rgb_to_hex(colors)
table[self.color_column] = colors_hex
return table
@param.depends('cmap', 'fields', 'norm', watch=True)
def _updated(self):
self.updated = True
class PeptideLayoutTransform(Transform):
"""
Takes data with peptide start, end positions and transforms it to bottom, left, right, top positions of rectangles.
By default, for a given interval start, end (inclusive, exclusive) a rectangle is returned with a height of spanning from
(start - 0.5) to (end - 0.5)
"""
left = param.String('start', doc="Field name to use for left coordinate")
right = param.String('end', doc="Field name to use for for the right coordinate")
height = param.Integer(1, doc="Height of the rectangles", constant=True)
value = param.String('', doc="Optional field name to pass through as value column")
passthrough = param.List([], doc="Optional field names to pass through (for hovertools)")
wrap = param.Integer(doc="Amount of peptides to plot on the y axis before wrapping around to y axis top")
step = param.Integer(5, bounds=(1, None), doc="Step size used for finding 'wrap' when its not specified")
margin = param.Integer(4, doc="Margin space to keep between peptides when finding 'wrap'")
transform_type = 'peptide_layout'
def apply(self, table):
if not self.wrap:
self.wrap = autowrap(table[self.left], table[self.right], margin=self.margin, step=self.step)
# Order of columns determines their role, not the names
columns = ['x0', 'y0', 'x1', 'y1'] # bottom-left (x0, y0) and top right (x1, y1)
output_table = pd.DataFrame(index=table.index, columns=columns)
output_table['x0'] = table[self.left] - 0.5
output_table['x1'] = table[self.right] - 0.5
cycle = itertools.cycle(range(self.height*self.wrap, 0, -self.height)) # Starts at y top, cyles with wrap
yvals = np.array(list(itertools.islice(cycle, len(table)))) # Repeat cycle until the correct length
output_table['y0'] = yvals - self.height
output_table['y1'] = yvals
if self.value:
output_table['value'] = table[self.value]
if self.passthrough:
for item in self.passthrough:
assert item not in ['value', 'index'], "Invalid field name, 'index' and 'value' names are reserved"
output_table[item] = table[item]
output_table['index'] = table.index
return output_table
class RemoveValueTransform(Transform):
"""Removes entires where values in specified field match specified value"""
field = param.String(doc='Target field')
value = param.Number(doc='Rows with this value to remove')
@property
def query(self):
return f'{self.field} != {self.value}'
def apply(self, table):
try:
return table.query(self.query)
except pd.core.computation.ops.UndefinedVariableError:
return table
```
#### File: PyHDX/tests/test_batchprocessing.py
```python
from pyhdx.batch_processing import yaml_to_hdxm, yaml_to_hdxmset
from pyhdx.models import HDXMeasurement, HDXMeasurementSet
import numpy as np
from pathlib import Path
import yaml
cwd = Path(__file__).parent
input_dir = cwd / 'test_data' / 'input'
output_dir = cwd / 'test_data' / 'output'
np.random.seed(43)
class TestBatchProcessing(object):
def test_load_from_yaml(self):
yaml_pth = Path(input_dir / 'data_states.yaml')
data_dict = yaml.safe_load(yaml_pth.read_text())
hdxm = yaml_to_hdxm(data_dict['SecB_tetramer'], data_dir=input_dir)
assert isinstance(hdxm, HDXMeasurement)
assert hdxm.metadata['temperature'] == data_dict['SecB_tetramer']['temperature']['value'] + 273.15
assert hdxm.name == 'SecB WT apo'
hdxm_set = yaml_to_hdxmset(data_dict, data_dir=input_dir)
assert isinstance(hdxm_set, HDXMeasurementSet)
assert hdxm_set.names == list(data_dict.keys())
```
#### File: PyHDX/tests/test_config.py
```python
from pathlib import Path
from pyhdx.config import ConfigurationSettings, read_config, config_file_path, reset_config, write_config
import pytest
directory = Path(__file__).parent
class TestConfig(object):
def test_cfg_singleton(self, tmp_path):
cfg = ConfigurationSettings()
scheduler_address = '127.0.0.1:00000'
cfg.set('cluster', 'scheduler_address', scheduler_address)
assert cfg.get('cluster', 'scheduler_address') == scheduler_address
cfg2 = ConfigurationSettings()
assert id(cfg) == id(cfg2)
fpath = Path(tmp_path) / 'config.ini'
write_config(fpath, cfg._config)
cp_config = read_config(fpath)
assert cp_config['cluster']['scheduler_address'] == '127.0.0.1:00000'
reset_config() # the effect of this is currently not tested, needs more extensive tests
cp_config = read_config(config_file_path)
assert cp_config['cluster']['scheduler_address'] == '127.0.0.1:52123'
```
#### File: PyHDX/tests/test_fitting.py
```python
import pytest
from pyhdx import PeptideMasterTable, HDXMeasurement
from pyhdx.fileIO import read_dynamx, csv_to_protein, csv_to_dataframe, save_fitresult, load_fitresult
from pyhdx.fitting import fit_rates_weighted_average, fit_gibbs_global, fit_gibbs_global_batch, \
fit_gibbs_global_batch_aligned, fit_rates_half_time_interpolate, GenericFitResult
from pyhdx.models import HDXMeasurementSet
from pyhdx.config import cfg
import numpy as np
import torch
import time
from dask.distributed import LocalCluster
from pathlib import Path
import pandas as pd
from pandas.testing import assert_series_equal
cwd = Path(__file__).parent
input_dir = cwd / 'test_data' / 'input'
output_dir = cwd / 'test_data' / 'output'
np.random.seed(43)
torch.manual_seed(43)
class TestSecBDataFit(object):
@classmethod
def setup_class(cls):
fpath_apo = input_dir / 'ecSecB_apo.csv'
fpath_dimer = input_dir / 'ecSecB_dimer.csv'
data = read_dynamx(fpath_apo, fpath_dimer)
control = ('Full deuteration control', 0.167*60)
cls.temperature, cls.pH = 273.15 + 30, 8.
pf = PeptideMasterTable(data, drop_first=1, ignore_prolines=True, remove_nan=False)
pf.set_control(control)
cls.hdxm_apo = HDXMeasurement(pf.get_state('SecB WT apo'), temperature=cls.temperature, pH=cls.pH)
cls.hdxm_dimer = HDXMeasurement(pf.get_state('SecB his dimer apo'), temperature=cls.temperature, pH=cls.pH)
data = pf.get_state('SecB WT apo')
reduced_data = data[data['end'] < 40]
cls.reduced_hdxm = HDXMeasurement(reduced_data)
cluster = LocalCluster()
cls.address = cluster.scheduler_address
def test_initial_guess_wt_average(self):
result = fit_rates_weighted_average(self.reduced_hdxm)
output = result.output
assert output.size == 100
check_rates = csv_to_protein(output_dir / 'ecSecB_reduced_guess.csv')
pd.testing.assert_series_equal(check_rates['rate'], output['rate'])
def test_initial_guess_half_time_interpolate(self):
result = fit_rates_half_time_interpolate(self.reduced_hdxm)
assert isinstance(result, GenericFitResult)
assert result.output.index.name == 'r_number'
assert result.output['rate'].mean() == pytest.approx(0.04343354509254464)
# todo additional tests:
# result = fit_rates_half_time_interpolate()
def test_dtype_cuda(self):
check_deltaG = csv_to_protein(output_dir / 'ecSecB_torch_fit.csv')
initial_rates = csv_to_dataframe(output_dir / 'ecSecB_guess.csv')
cfg.set('fitting', 'device', 'cuda')
gibbs_guess = self.hdxm_apo.guess_deltaG(initial_rates['rate']).to_numpy()
if torch.cuda.is_available():
fr_global = fit_gibbs_global(self.hdxm_apo, gibbs_guess, epochs=1000, r1=2)
out_deltaG = fr_global.output
for field in ['deltaG', 'k_obs', 'covariance']:
assert_series_equal(check_deltaG[field], out_deltaG[self.hdxm_apo.name, field], rtol=0.01, check_dtype=False)
else:
with pytest.raises(AssertionError, match=r".* CUDA .*"):
fr_global = fit_gibbs_global(self.hdxm_apo, gibbs_guess, epochs=1000, r1=2)
cfg.set('fitting', 'device', 'cpu')
cfg.set('fitting', 'dtype', 'float32')
fr_global = fit_gibbs_global(self.hdxm_apo, gibbs_guess, epochs=1000, r1=2)
dg = fr_global.model.deltaG
assert dg.dtype == torch.float32
out_deltaG = fr_global.output
for field in ['deltaG', 'k_obs']:
assert_series_equal(check_deltaG[field], out_deltaG[self.hdxm_apo.name, field], rtol=0.01,
check_dtype=False, check_names=False)
cfg.set('fitting', 'dtype', 'float64')
def test_global_fit(self):
initial_rates = csv_to_dataframe(output_dir / 'ecSecB_guess.csv')
t0 = time.time() # Very crude benchmarks
gibbs_guess = self.hdxm_apo.guess_deltaG(initial_rates['rate']).to_numpy()
fr_global = fit_gibbs_global(self.hdxm_apo, gibbs_guess, epochs=1000, r1=2)
t1 = time.time()
assert t1 - t0 < 5
out_deltaG = fr_global.output
check_deltaG = csv_to_protein(output_dir / 'ecSecB_torch_fit.csv')
for field in ['deltaG', 'covariance', 'k_obs']:
assert_series_equal(check_deltaG[field], out_deltaG[self.hdxm_apo.name, field], rtol=0.01,
check_names=False)
mse = fr_global.get_mse()
assert mse.shape == (1, self.hdxm_apo.Np, self.hdxm_apo.Nt)
@pytest.mark.skip(reason="Longer fit is not checked by default due to long computation times")
def test_global_fit_extended(self):
check_deltaG = csv_to_protein(output_dir / 'ecSecB_torch_fit_epochs_20000.csv')
initial_rates = csv_to_dataframe(output_dir / 'ecSecB_guess.csv')
gibbs_guess = self.hdxm_apo.guess_deltaG(initial_rates['rate']).to_numpy()
t0 = time.time() # Very crude benchmarks
fr_global = fit_gibbs_global(self.hdxm_apo, gibbs_guess, epochs=20000, r1=2)
t1 = time.time()
assert t1 - t0 < 20
out_deltaG = fr_global.output
for field in ['deltaG', 'k_obs', 'covariance']:
assert_series_equal(check_deltaG[field], out_deltaG[field], rtol=0.01, check_dtype=False)
mse = fr_global.get_mse()
assert mse.shape == (self.hdxm_apo.Np, self.hdxm_apo.Nt)
@pytest.mark.skip(reason="Longer fit is not checked by default due to long computation times")
def test_global_fit_extended_cuda(self):
check_deltaG = csv_to_protein(output_dir / 'ecSecB_torch_fit_epochs_20000.csv')
initial_rates = csv_to_dataframe(output_dir / 'ecSecB_guess.csv')
gibbs_guess = self.hdxm_apo.guess_deltaG(initial_rates['rate']).to_numpy()
#todo allow contextmanger?
cfg.set('fitting', 'device', 'cuda')
cfg.set('fitting', 'dtype', 'float32')
fr_global = fit_gibbs_global(self.hdxm_apo, gibbs_guess, epochs=20000, r1=2)
out_deltaG = fr_global.output
for field in ['deltaG', 'k_obs']:
assert_series_equal(check_deltaG[field], out_deltaG[field], rtol=0.01, check_dtype=False)
cfg.set('fitting', 'device', 'cpu')
cfg.set('fitting', 'dtype', 'float64')
def test_batch_fit(self, tmp_path):
hdx_set = HDXMeasurementSet([self.hdxm_apo, self.hdxm_dimer])
guess = csv_to_dataframe(output_dir / 'ecSecB_guess.csv')
gibbs_guess = hdx_set.guess_deltaG([guess['rate'], guess['rate']])
fr_global = fit_gibbs_global_batch(hdx_set, gibbs_guess, epochs=1000)
fpath = Path(tmp_path) / 'fit_result_batch.csv'
fr_global.to_file(fpath)
df = csv_to_dataframe(fpath)
assert df.attrs['metadata'] == fr_global.metadata
output = fr_global.output
check_protein = csv_to_protein(output_dir / 'ecSecB_batch.csv')
states = ['SecB WT apo', 'SecB his dimer apo']
for state in states:
from pandas.testing import assert_series_equal
result = output[state]['deltaG']
test = check_protein[state]['deltaG']
assert_series_equal(result, test, rtol=0.1)
mse = fr_global.get_mse()
assert mse.shape == (hdx_set.Ns, hdx_set.Np, hdx_set.Nt)
mock_alignment = {
'apo': 'MSEQNNTEMTFQIQRIYTKDI------------SFEAPNAPHVFQKDWQPEVKLDLDTASSQLADDVYEVVLRVTVTASLG-------------------EETAFLCEVQQGGIFSIAGIEGTQMAHCLGAYCPNILFPYARECITSMVSRG----TFPQLNLAPVNFDALFMNYLQQQAGEGTEEHQDA',
'dimer': 'MSEQNNTEMTFQIQRIYTKDISFEAPNAPHVFQKDWQPEVKLDLDTASSQLADDVY--------------EVVLRVTVTASLGEETAFLCEVQQGGIFSIAGIEGTQMAHCLGA----YCPNILFPAARECIASMVARGTFPQLNLAPVNFDALFMNYLQQQAGEGTEEHQDA-----------------',
}
hdx_set.add_alignment(list(mock_alignment.values()))
gibbs_guess = hdx_set.guess_deltaG([guess['rate'], guess['rate']])
aligned_result = fit_gibbs_global_batch_aligned(hdx_set, gibbs_guess, r1=2, r2=5, epochs=1000)
output = aligned_result.output
check_protein = csv_to_protein(output_dir / 'ecSecB_batch_aligned.csv')
states = ['SecB WT apo', 'SecB his dimer apo']
for state in states:
from pandas.testing import assert_series_equal
result = output[state]['deltaG']
test = check_protein[state]['deltaG']
assert_series_equal(result, test, rtol=0.1)
``` |
{
"source": "Jhsmit/SmitSuite",
"score": 3
} |
#### File: SmitSuite/smitsuite/_old_utils.py
```python
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
def _get_appdata_path():
import ctypes
from ctypes import wintypes, windll
CSIDL_APPDATA = 26
_SHGetFolderPath = windll.shell32.SHGetFolderPathW
_SHGetFolderPath.argtypes = [wintypes.HWND,
ctypes.c_int,
wintypes.HANDLE,
wintypes.DWORD,
wintypes.LPCWSTR]
path_buf = wintypes.create_unicode_buffer(wintypes.MAX_PATH)
result = _SHGetFolderPath(0, CSIDL_APPDATA, 0, 0, path_buf)
return path_buf.value
def dropbox_home():
from platform import system
import base64
import os
import platform
if platform.platform() == 'Windows-8-6.2.9200': # HP LAptop
host_db_path = os.path.join(os.getenv('LOCALAPPDATA'),
'Dropbox',
'host.db')
elif _system in ('Windows', 'cli'):
host_db_path = os.path.join(_get_appdata_path(),
'Dropbox',
'host.db')
elif _system in ('Linux', 'Darwin'):
host_db_path = os.path.expanduser('~'
'/.dropbox'
'/host.db')
else:
raise RuntimeError('Unknown system={}'
.format(_system))
if not os.path.exists(host_db_path):
raise RuntimeError("Config path={} doesn't exists"
.format(host_db_path))
with open(host_db_path, 'r') as f:
data = f.read().split()
return base64.b64decode(data[1])
def set_wkdir(path=''):
import os
dropbox_path = dropbox_home()
os.chdir(dropbox_path + '/wkdir' + '/' + path)
def rolling_window(a, w):
#http://stackoverflow.com/questions/6811183/rolling-window-for-1d-arrays-in-numpy
shape = a.shape[:-1] + (a.shape[-1] - w + 1, w)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def plot_line(*args, **kwargs):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(*args, **kwargs)
plt.show()
def plot_2d(*args, **kwargs):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(*args, **kwargs)
plt.show()
#jasco files
class JascoFile(object):
def __init__(self, path):
self.path = path
from jwslib import read_file
t = read_file(self.path)
self.header = t[1]
if t[0] != 0:
print t[1]
raise Exception("Invalid file")
if len(t[2]) == 1:
self.y_data = np.array(t[2][0])
self.y_data_norm = (self.y_data - self.y_data.min()) / (self.y_data.max() - self.y_data.min())
else:
raise Exception("Multiple channels")
self.x_data = np.linspace(self.header.x_for_first_point, self.header.x_for_last_point, num=self.header.point_number)
def export_ascii(self):
export_data = np.column_stack((self.x_data, self.y_data, self.y_data_norm))
name = os.path.splitext(self.path)[0] + '.txt'
np.savetxt(name, export_data, fmt='%10.5f')
def export_plot(self):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.x_data, self.y_data, linewidth=2)
ax.set_xlabel("Wavelength (nm)")
ax.set_ylabel("Intensity (a.u.)")
ax.set_title(os.path.basename(self.path))
plt.savefig(os.path.splitext(self.path)[0] + '.png')
plt.close()
def gen_files():
for path, subdir, files in os.walk(os.getcwd()):
for name in files:
yield os.path.join(path, name)
def filter_files(ext, startwith=None, endwith=None, notendwith=None):
for filename in gen_files():
b_ext = os.path.splitext(filename)[1] == ext
filebasename = os.path.splitext(os.path.basename(filename))[0]
b_endwith, b_notendwith, b_startwith = True, True, True
if startwith:
b_startwith = filebasename[:len(startwith)] == startwith
if endwith:
b_endwith = filebasename[-len(endwith):] == name_endwith
if notendwith:
b_notendwith = filebasename[-len(notendwith):] != notendwith
bools = np.array([b_ext, b_startwith, b_endwith, b_notendwith])
if np.all(bools):
yield filename
def batch_jws():
files = [filename for filename in gen_files() if os.path.splitext(filename)[1] == '.jws']
for f in files:
print f
js_file = JascoFile(f)
js_file.export_ascii()
js_file.export_plot()
def rec_split(path):
rest, tail = os.path.split(path)
while rest not in ('', os.path.sep, 'C:\\'):
yield tail
rest, tail = os.path.split(rest)
if __name__ == '__main__':
import platform
print platform.platform()
import sys
print sys.getwindowsversion()
print os.getenv('LOCALAPPDATA')
print _get_appdata_path()
print dropbox_home()
```
#### File: SmitSuite/smitsuite/utils.py
```python
import os
import numpy as np
import re
from symfit import Parameter, Variable, Fit, exp
class CreateDict(dict):
def __getitem__(self, item):
try:
return super(CreateDict, self).__getitem__(item)
except KeyError:
self[item] = CreateDict()
return super(CreateDict, self).__getitem__(item)
def model_gauss2d(a_val, x_mu_val, y_mu_val, sig_x_val, sig_y_val, base, has_base=True):
a = Parameter(name='a', value=a_val)
sig_x = Parameter(name='sig_x', value=sig_x_val)
sig_y = Parameter(name='sig_y', value=sig_y_val)
x_mu = Parameter(name='x_mu', value=x_mu_val)
y_mu = Parameter(name='y_mu', value=y_mu_val)
if has_base:
b = Parameter(name='b', value=base)
else:
b = base
x_var = Variable(name='x_var')
y_var = Variable(name='y_var')
z_var = Variable(name='z_var')
model = {z_var: a * exp(-(((x_var - x_mu) ** 2 / (2 * sig_x ** 2)) + ((y_var - y_mu) ** 2 / (2 * sig_y ** 2)))) + b}
return model
def fit_gauss2d(arr):
Y, X = np.indices(arr.shape)
total = arr.sum()
x = (X * arr).sum() / total
y = (Y * arr).sum() / total
col = arr[:, int(y)]
width_x = np.sqrt(np.abs((np.arange(col.size) - y) ** 2 * col).sum() / col.sum())
row = arr[int(x), :]
width_y = np.sqrt(np.abs((np.arange(row.size) - x) ** 2 * row).sum() / row.sum())
base = 0
idx = np.argmax(arr)
y_mu, x_mu = np.unravel_index(idx, arr.shape)
print(arr.max(), x_mu, y_mu, width_x, width_y, base)
model = model_gauss2d(arr.max(), x_mu, y_mu, width_x, width_y, base, has_base=False)
fit = Fit(model, z_var=arr, x_var=X, y_var=Y)
return fit.execute(), fit.model
# All credits to giltay
#https://stackoverflow.com/questions/120656/directory-listing-in-python
def listdir_fullpath(d):
return [os.path.join(d, f) for f in os.listdir(d)]
def splitname(d):
return os.path.splitext(os.path.basename(d))[0]
def find_max_square(img, corner=None):
"""
find the largest subarray in img where all elements are 1
corner can be any of 'lower_right', 'upper_right', 'lower_left', 'upper_left' to indicate which (if any)
corner of the images is all 1
"""
ymax, xmax = img.shape
img = img.astype('int')
def get_idx(diffs, y):
try:
return np.where(diffs[y] == 1)[0][0]
except IndexError:
return np.nan
if corner == 'lower_right':
diffs = np.abs(np.diff(img, axis=1))
ys = np.arange(0, ymax).astype(int)
xs = np.array([get_idx(diffs, y) for y in ys]).astype(int)
areas = np.nan_to_num((ymax - ys) * (xmax - xs - 1))
i = np.where(areas == areas.max())[0][0]
min1, max1, min2, max2 = ys[i], ymax, xs[i] + 1, xmax
elif corner == 'upper_right':
diffs = np.abs(np.diff(img, axis=1))
ys = np.arange(0, ymax).astype(int)
xs = np.array([get_idx(diffs, y) for y in ys]).astype(int)
areas = np.nan_to_num(ys * (xmax - xs))
i = np.where(areas == areas.max())[0][0]
min1, max1, min2, max2 = 0, ys[i], xs[i], xmax
elif corner == 'lower_left':
diffs = np.abs(np.diff(img, axis=1))
ys = np.arange(0, ymax).astype(int)
xs = np.array([get_idx(diffs, y) for y in ys]).astype(int)
areas = np.nan_to_num((xs + 1) * (ymax - ys))
i = np.where(areas == areas.max())[0][0]
min1, max1, min2, max2 = ys[i], ymax, 0, xs[i] + 1
elif corner == 'upper_left':
diffs = np.abs(np.diff(img, axis=1))
ys = np.arange(0, ymax).astype(int)
xs = np.array([get_idx(diffs, y) for y in ys]).astype(int)
areas = np.nan_to_num((xs + 1) * (ys + 1))
i = np.where(areas == areas.max())[0][0]
min1, max1, min2, max2 = 0, ys[i] + 1, 0, xs[i] + 1
else:
return ValueError('Invalid value for corner')
selection = img[min1:max1, min2:max2]
assert np.all(selection)
return min1, max1, min2, max2
def gen_files(folder=None, ext=None):
if not folder:
folder = os.getcwd()
for path, subdir, files in os.walk(folder):
for name in files:
if ext:
if os.path.splitext(name)[1] == ext:
yield os.path.join(path, name)
else:
yield os.path.join(path, name)
#todo depracate endswith, startswith
def filter_files(file_list, ext=None, regex=None, endswith=None, startswith=None):
#todo return list not gen
for f in file_list:
filename = os.path.basename(f)
basename, f_ext = os.path.splitext(filename)
b = True
if ext:
if not ext == f_ext:
b = False
if regex: #todo re.match(...)
match = re.search(regex, filename)
if not match:
b = False
if endswith:
if not basename[-len(endswith):] == endswith:
b = False
if startswith:
pass
if b:
yield f
``` |
{
"source": "jhsoby/citationhunt",
"score": 2
} |
#### File: citationhunt/handlers/common.py
```python
import chdb
import config
import flask
import contextlib
from datetime import datetime
import functools
def get_db(lang_code):
localized_dbs = getattr(flask.g, '_localized_dbs', {})
db = localized_dbs.get(lang_code, None)
if db is None:
db = localized_dbs[lang_code] = chdb.init_db(lang_code)
flask.g._localized_dbs = localized_dbs
return db
@contextlib.contextmanager
def log_time(operation):
before = datetime.now()
yield
after = datetime.now()
ms = (after - before).microseconds / 1000.
flask.current_app.logger.debug('%s took %.2f ms', operation, ms)
def get_stats_db():
db = getattr(flask.g, '_stats_db', None)
if db is None:
db = flask.g._stats_db = chdb.init_stats_db()
return db
def validate_lang_code(handler):
@functools.wraps(handler)
def wrapper(lang_code = '', *args, **kwds):
flask.request.lang_code = lang_code
if lang_code not in config.lang_code_to_config:
response = flask.redirect(
flask.url_for('citation_hunt', lang_code = 'en',
**flask.request.args))
if flask.request.path != '/':
response.headers['Location'] += flask.request.path
return response
return handler(lang_code, *args, **kwds)
return wrapper
```
#### File: citationhunt/scripts/print_unsourced_pageids_from_wikipedia.py
```python
import os
import sys
_upper_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
if _upper_dir not in sys.path:
sys.path.append(_upper_dir)
import chdb
import config
def print_unsourced_ids_from_wikipedia():
cfg = config.get_localized_config()
db = chdb.init_wp_replica_db()
cursor = db.cursor()
categories = set([cfg.citation_needed_category])
while True:
cursor.execute(
'SELECT cl_from, cl_type FROM categorylinks WHERE (' +
' OR '.join(['cl_to = %s'] * len(categories)) + ')', categories)
subcategories = set()
for page_id, type in cursor:
if type == 'page':
print page_id
elif type == 'subcat':
subcategories.add(page_id)
if not subcategories:
break
# need to convert the page ids of subcategories into page
# titles so we can query recursively
cursor.execute(
'SELECT page_title FROM page WHERE (' +
' OR '.join(['page_id = %s'] * len(subcategories)) + ')',
subcategories)
categories = set([r[0] for r in cursor])
if __name__ == '__main__':
print_unsourced_ids_from_wikipedia()
```
#### File: citationhunt/snippet_parser/fr.py
```python
from __future__ import unicode_literals
from base import *
def handle_drapeau(template):
return template.get(1)
def handle_date(template):
year = None
if len(template.params) >= 3:
try:
year = int(sp(template.params[2]))
except ValueError:
pass
if isinstance(year, int):
# assume {{date|d|m|y|...}}
return ' '.join(sp(template.params[:3]))
elif template.params:
# assume {{date|d m y|...}}
return sp(template.params[0])
return ''
def handle_s(template):
if not template.params:
return ''
ret = sp(template.params[0]).upper()
if len(template.params) == 2 and sp(template.params[1]) == 'er':
ret += 'แตสณ'
else:
ret += 'แต'
if template.name != 'siรจcle':
ret += ' siรจcle'
if template.name.matches('-s'):
ret += ' av. J.-C'
return ret
def handle_phonetique(template):
if not template.params:
return ''
return sp(template.params[0])
def handle_citation(template):
if template.params:
return 'ยซ ' + sp(template.params[0]) + ' ยป'
def handle_quand(template):
return ''.join(sp(p) for p in template.params if not p.showkey)
def handle_lesquelles(template):
# quand and lesquelles are basically the same template
return handle_quand(template)
class SnippetParser(SnippetParserBase):
def strip_template(self, template, normalize, collapse):
if template.name.matches('unitรฉ'):
return ' '.join(sp(template.params[:2]))
elif template.name.matches('date'):
return handle_date(template)
elif matches_any(template, ('s', '-s', 's-', 'siรจcle')):
return handle_s(template)
elif template.name.matches('phonรฉtique'):
return handle_phonetique(template)
elif template.name.matches('citation'):
return handle_citation(template)
elif template.name.matches('quand'):
return handle_quand(template)
elif template.name.matches('lesquelles'):
return handle_lesquelles(template)
elif template.name.matches('drapeau'):
return handle_drapeau(template)
return super(SnippetParser, self).strip_template(
template, normalize, collapse)
``` |
{
"source": "JH-Soft-Technology/pyinels",
"score": 2
} |
#### File: pyinels/api/__init__.py
```python
import asyncio
import logging
from functools import partial
from pyinels.device import Device
from pyinels.const import (
ATTR_DOWN,
ATTR_GROUP,
ATTR_ID,
ATTR_SHUTTER,
ATTR_TEMP,
ATTR_THERM,
ATTR_TYPE,
ATTR_UP,
DEVICE_TYPE_DICT,
INELS_BUS_ATTR_DICT
)
from pyinels.exception import (
ApiConnectionException,
ApiDataTypeException,
ApiException
)
from xmlrpc.client import ServerProxy
_LOGGER = logging.getLogger(__name__)
class Api:
"""Class of iNels BUS."""
def __init__(self, host, port, version):
"""Initialize Api class."""
self.__host = host
self.__port = port
self.__version = version
self.__proxy = None
self.__devices = None
self.loop = asyncio.get_event_loop()
@property
def proxy(self):
"""Proxy of the bus server."""
if isinstance(self._Api__proxy, ServerProxy):
return self.__proxy
self.__proxy = self.__conn()
return self.__proxy
@property
def devices(self):
"""Loaded devices by getAllDevices."""
return self.__devices
def set_devices(self, devices):
"""Set device prop."""
self.__devices = devices
def __conn(self):
"""Instantient Api iNels BUS connection class."""
try:
con = ServerProxy(self.__host + ":" + str(self.__port))
return con
except BlockingIOError as err:
raise ApiConnectionException(err.errno, err.strerror, err)
except Exception as err:
raise ApiException(
"common_exception", "Exception occur", err)
async def ping(self):
"""Check connection iNels BUS with ping."""
return await self.loop.run_in_executor(None, self.proxy.ping)
async def getPlcIp(self):
"""Get Ip address of PLC."""
return await self.loop.run_in_executor(None, self.proxy.getPlcIP)
async def getRooms(self):
"""List of all rooms from Connection server website."""
return await self.loop.run_in_executor(None, self.proxy.getRooms)
async def getRoomDevicesRaw(self, room_name):
"""List of all devices in deffined room."""
return await self.loop.run_in_executor(
None, self.proxy.getRoomDevices, room_name)
async def getRoomDevices(self, room_name):
"""List of all devices in defined room."""
return await self.__roomDevicesToJson(room_name)
async def getAllDevices(self):
"""Get all devices from all rooms."""
devices = []
rooms = await self.getRooms()
# go trough all rooms
for room in rooms:
room_devices = await self.getRoomDevices(room)
# go trough all devices in room
for room_dev in room_devices:
is_in = False
# check when duplicate devices occured
for device in devices:
if device.id == room_dev.id:
is_in = True
break
# not presented in the list, then append
if is_in is False:
devices.append(room_dev)
return devices
async def read(self, device_ids):
"""Get the value from the proxy by device id."""
if not isinstance(device_ids, list):
raise ApiDataTypeException(
'readDeviceData', f'{device_ids} is not a list!')
return await self.__readDeviceData(device_ids)
async def write(self, device, value):
"""Write data to multiple devices."""
if not hasattr(device, 'id'):
raise ApiDataTypeException(
'readDeviceData', f'{device} has no id')
try:
await self.__writeValues(value)
except Exception as err:
raise ApiException("write_proxy", err)
async def __writeValues(self, command):
"""Write data to the proxy."""
self.loop.run_in_executor(None, self.proxy.writeValues, command)
async def __roomDevicesToJson(self, room_name):
"""Create json object from devices listed in preffered room."""
d_type = None
devices = []
raw_list = await self.getRoomDevicesRaw(room_name)
devices_list = raw_list.split('\n')
for item in devices_list:
start = len(item) - 1
end = len(item)
if start > 0:
if item[start:end] == ":":
d_type = item[0:start]
else:
json_dev = item.split('" ')
obj = {}
obj[INELS_BUS_ATTR_DICT.get(ATTR_GROUP)] = room_name
for prop in json_dev:
frag = prop.split("=")
obj[frag[0]] = frag[1].replace("\"", " ").strip()
obj[INELS_BUS_ATTR_DICT
.get(ATTR_TYPE)] = DEVICE_TYPE_DICT.get(d_type)
obj = self.__recognizeAndSetUniqueIdToDevice(obj)
devices.append(Device(obj, self))
return devices
def __recognizeAndSetUniqueIdToDevice(self, raw_device):
"""Some of the devices does not have unique id
presented in inels attribute. We need do create
one from other unique attributes."""
def set_shutter_id(dev):
"""Set the id to the shutter."""
dev[INELS_BUS_ATTR_DICT.get(
ATTR_ID)] = dev[INELS_BUS_ATTR_DICT.get(ATTR_UP)] + \
"_" + dev[INELS_BUS_ATTR_DICT.get(ATTR_DOWN)]
return dev
def set_therm_id(dev):
"""Set the id to the therms."""
dev[INELS_BUS_ATTR_DICT.get(
ATTR_ID)] = dev[INELS_BUS_ATTR_DICT.get(ATTR_TEMP)]
return dev
# use a switch to create identifier inside of the raw data
# from usefull attributes
if INELS_BUS_ATTR_DICT.get(ATTR_ID) not in raw_device:
switcher = {
ATTR_SHUTTER: partial(set_shutter_id, raw_device),
ATTR_THERM: partial(set_therm_id, raw_device)
}
fnc = switcher.get(raw_device[INELS_BUS_ATTR_DICT.get(ATTR_TYPE)])
# call selected function to set the identifier
raw_device = fnc()
return raw_device
async def __readDeviceData(self, device_names):
"""Reading devices data from proxy."""
return await self.loop.run_in_executor(
None, self.proxy.read, device_names)
```
#### File: pyinels/device/pyShutter.py
```python
from pyinels.device.pyBase import pyBase
# from pyinels.pyTimer import pyTimer
from pyinels.const import (
ATTR_DOWN,
ATTR_UP,
ATTR_STOP,
ATTR_SWITCH_ON,
ATTR_SWITCH_OFF,
DIRECTIONS_DICT,
RANGE_BLIND,
SUPPORT_OPEN,
SUPPORT_CLOSE,
SUPPORT_SET_POSITION,
SUPPORT_STOP,
SUPPORT_OPEN_TILT,
SUPPORT_CLOSE_TILT,
SUPPORT_STOP_TILT,
STATE_OPEN,
STATE_CLOSING,
STATE_OPENING,
# STATE_CLOSED
)
MIN_RANGE = RANGE_BLIND[0]
MAX_RANGE = RANGE_BLIND[1]
class pyShutter(pyBase):
"""Inels class shutter."""
async def __init__(self, device):
"""Initialize shutter."""
await super().__init__(device)
# self._timer = pyTimer()
# self.__time_to_stop = 0
# self.__last_position = MAX_RANGE
@property
def state(self):
"""State where the shutter is."""
up_device = self.up
down_device = self.down
up_on = up_device == ATTR_SWITCH_ON \
and down_device == ATTR_SWITCH_OFF
down_on = up_device == ATTR_SWITCH_OFF \
and down_device == ATTR_SWITCH_ON
# if self.should_stop:
# if up_on and not down_on:
# state = STATE_OPEN
# elif not up_on and down_on:
# state = STATE_CLOSED
# else:
return (STATE_OPENING if up_on and not down_on else STATE_CLOSING
if not up_on and down_on else STATE_OPEN)
# @property
# def should_stop(self):
# """It is watching if the time to stop evaluate or not"""
# # stop the shutter
# result = True
# if self._timer.is_running:
# # timer is still working
# self._timer.update_tick()
# # when the timer reach of counting, then stop it
# # otherwise return false
# if self._timer.elapsed_time < 0:
# self._timer.stop()
# else:
# result = False
# return result
@property
def supported_features(self):
"""Definition what the devices supports."""
return SUPPORT_OPEN \
| SUPPORT_CLOSE \
| SUPPORT_SET_POSITION \
| SUPPORT_STOP \
| SUPPORT_OPEN_TILT \
| SUPPORT_CLOSE_TILT \
| SUPPORT_STOP_TILT
# @property
# def current_position(self):
# """Current position of the shutter."""
# # It is calculated from the time to close the shutter,
# # defined with pull up or pull down fnc called.
# # 0 - fully closed - MIN_RANGE
# # 100 - fully opened - MAX_RANGE
# state = self.state
# position = self.__last_position
# # this is a situation when is not set the timer to count
# if self.__time_to_stop == 0:
# position = MAX_RANGE if state is STATE_CLOSING else MIN_RANGE
# else:
# percent = 0
# # calculate the position based on time to stop and current
# # tick of the timer
# if self._timer.tick is not None:
# tick = int(self._timer.tick)
# percent = percent if tick == 0 else (
# tick / self.__time_to_stop) * 100
# percent = int(MAX_RANGE if percent > MAX_RANGE else percent)
# # when the timer stops and last position is the same as one
# # of the range side then return the last position
# elif self._timer.tick is None and \
# (self.__last_position == MIN_RANGE \
# or self.__last_position
# == MAX_RANGE):
# return self.__last_position
# if state is STATE_CLOSING:
# position = MIN_RANGE if position < MIN_RANGE \
# else MAX_RANGE - percent
# elif state is STATE_OPENING:
# position = MAX_RANGE if position > MAX_RANGE \
# else MIN_RANGE + percent
# elif state is STATE_CLOSED:
# position = MIN_RANGE
# elif state is STATE_OPEN:
# position = MAX_RANGE
# self.__last_position = position
# return int(self.__last_position)
async def pull_up(self, stop_after=None):
"""Turn up the shutter."""
# self.__set_time_to_stop(stop_after)
# self._timer.start(self.__time_to_stop)
await self.__call_service(DIRECTIONS_DICT.get(ATTR_UP))
async def pull_down(self, stop_after=None):
""" Turn down the shutter."""
# self.__set_time_to_stop(stop_after)
# self._timer.start(self.__time_to_stop)
await self.__call_service(DIRECTIONS_DICT.get(ATTR_DOWN))
async def stop(self):
""" Stop the shutter."""
# if self._timer.is_running:
# self._timer.stop()
await self.__call_service(DIRECTIONS_DICT.get(ATTR_STOP))
async def __call_service(self, direction):
"""Internal call of the device write value."""
if direction == DIRECTIONS_DICT.get(ATTR_STOP):
await self._device.write_value(self.__set_value(
ATTR_SWITCH_OFF, ATTR_SWITCH_OFF))
elif direction == DIRECTIONS_DICT.get(ATTR_UP):
await self._device.write_value(self.__set_value(
ATTR_SWITCH_OFF, ATTR_SWITCH_ON))
elif direction == DIRECTIONS_DICT.get(ATTR_DOWN):
await self._device.write_value(self.__set_value(
ATTR_SWITCH_ON, ATTR_SWITCH_OFF))
def __set_value(self, down, up):
"""Set the value to call service."""
return {f'{self._device.down}': down, f'{self._device.up}': up}
# def __set_time_to_stop(self, stop_after):
# """Set time to stop private function."""
# if stop_after is not None:
# self.__time_to_stop = stop_after
``` |
{
"source": "jhson989/jhML",
"score": 3
} |
#### File: playground/step4/test4.py
```python
if '__file__' in globals():
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import numpy as np
from jhML import Variable
def sphere(x, y):
z = x ** 2 + y ** 2
return z
def matyas(x, y):
z = 0.26 * (x ** 2 + y ** 2) - 0.48 * x * y
return z
def goldstein(x, y):
z = (1 + (x + y + 1)**2 * (19 - 14*x + 3*x**2 - 14*y + 6*x*y + 3*y**2)) * \
(30 + (2*x - 3*y)**2 * (18 - 32*x + 12*x**2 + 48*y - 36*x*y + 27*y**2))
return z
x = Variable(np.array(1.0))
y = Variable(np.array(1.0))
z = goldstein(x, y) # sphere(x, y) / matyas(x, y)
z.backward()
print(x.grad, y.grad)
```
#### File: nn_test/xor/test_xor.py
```python
import os
import sys
sys.path.append( os.path.dirname( os.path.dirname( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ) ) ))
import jhML
import jhML.functions as F
import numpy as np
def clear_grad(parameters):
for param in parameters:
param.clear_grad()
def update_grad(parameters, lr=1e-1):
for param in parameters:
if param.grad is not None:
param.data -= lr*param.grad
def forward(x, parameters):
activ = F.relu
W1, b1, W2, b2, W3, b3, W4, b4 = parameters
t = x
t = activ((F.linear(t, W1, b1)))
t = activ((F.linear(t, W2, b2)))
# t = activ((F.linear(t, W3, b3)))
return F.linear(t, W4, b4)
if __name__ == "__main__":
x = [[0, 0],
[0, 1],
[1, 0],
[1, 1]]
gt = [[0],
[1],
[1],
[0]]
W1 = jhML.Variable(np.random.randn(2, 4))
b1 = jhML.Variable(np.random.randn(4))
W2 = jhML.Variable(np.random.randn(4, 12))
b2 = jhML.Variable(np.random.randn(12))
W3 = jhML.Variable(np.random.randn(12, 12))
b3 = jhML.Variable(np.random.randn(12))
W4 = jhML.Variable(np.random.randn(12, 1))
b4 = jhML.Variable(np.random.randn(1))
parameters = [W1, b1, W2, b2, W3, b3, W4, b4]
num_epoch = int(1e+5)
for epoch in range(num_epoch):
clear_grad(parameters)
pred = forward(x, parameters)
loss = F.mean_squared_error(pred, gt)
loss.backward()
if epoch % (num_epoch/100) == 0:
print("%d/%d" % (epoch, num_epoch))
print(pred)
update_grad(parameters, lr=8e-5)
for num in x:
pred = forward([num], parameters)
print("%d xor %d = %.4f" % (num[0], num[1], pred.data[0][0]))
``` |
{
"source": "jhsrcmh/h2o",
"score": 2
} |
#### File: h2o/py/find_cloud.py
```python
import time, sys, json, re, getpass, requests, argparse, os, shutil
parser = argparse.ArgumentParser(description='Creates h2o-node.json for cloud cloning from existing cloud')
# parser.add_argument('-v', '--verbose', help="verbose", action='store_true')
parser.add_argument('-f', '--flatfile',
help="Use this flatfile to start probes\ndefaults to pytest_flatfile-<username> which is created by python tests",
type=str)
parser.add_argument('-hdfs_version', '--hdfs_version',
choices=['0.20.2', 'cdh3', 'cdh4', 'cdh4_yarn', 'mapr2.1.3', 'mapr3.0.1'],
default='cdh3',
help="Use this for setting hdfs_version in the cloned cloud",
type=str)
parser.add_argument('-hdfs_config', '--hdfs_config',
default=None,
help="Use this for setting hdfs_config in the cloned cloud",
type=str)
parser.add_argument('-hdfs_name_node', '--hdfs_name_node',
default='192.168.1.176',
help="Use this for setting hdfs_name_node in the cloned cloud. Can be ip, ip:port, hostname, hostname:port",
type=str)
parser.add_argument('-expected_size', '--expected_size',
default=None,
help="Require that the discovered cloud has this size, at each discovered node, otherwise exception",
type=int)
args = parser.parse_args()
# "hdfs_version": "cdh3",
# "hdfs_config": "None",
# "hdfs_name_node": "192.168.1.176",
#********************************************************************
# shutil.rmtree doesn't work on windows if the files are read only.
# On unix the parent dir has to not be readonly too.
# May still be issues with owner being different, like if 'system' is the guy running?
# Apparently this escape function on errors is the way shutil.rmtree can
# handle the permission issue. (do chmod here)
# But we shouldn't have read-only files. So don't try to handle that case.
def handleRemoveError(func, path, exc):
# If there was an error, it could be due to windows holding onto files.
# Wait a bit before retrying. Ignore errors on the retry. Just leave files.
# Ex. if we're in the looping cloud test deleting sandbox.
excvalue = exc[1]
print "Retrying shutil.rmtree of sandbox (2 sec delay). Will ignore errors. Exception was", excvalue.errno
time.sleep(2)
try:
func(path)
except OSError:
pass
LOG_DIR = 'sandbox'
# Create a clean sandbox, like the normal cloud builds...because tests
# expect it to exist (they write to sandbox/commands.log)
# find_cloud.py creates h2o-node.json for tests to use with -ccj
# the side-effect they also want is clean sandbox, so we'll just do it here too
def clean_sandbox():
if os.path.exists(LOG_DIR):
# shutil.rmtree fails to delete very long filenames on Windoze
#shutil.rmtree(LOG_DIR)
# was this on 3/5/13. This seems reliable on windows+cygwin
### os.system("rm -rf "+LOG_DIR)
shutil.rmtree(LOG_DIR, ignore_errors=False, onerror=handleRemoveError)
# it should have been removed, but on error it might still be there
if not os.path.exists(LOG_DIR):
os.mkdir(LOG_DIR)
def dump_json(j):
return json.dumps(j, sort_keys=True, indent=2)
def create_url(addr, port, loc):
return 'http://%s:%s/%s' % (addr, port, loc)
def do_json_request(addr=None, port=None, jsonRequest=None, params=None, timeout=5, **kwargs):
if params is not None:
paramsStr = '?' + '&'.join(['%s=%s' % (k,v) for (k,v) in params.items()])
else:
paramsStr = ''
url = create_url(addr, port, jsonRequest)
print 'Start ' + url + paramsStr
try:
r = requests.get(url, timeout=timeout, params=params, **kwargs)
# the requests json decoder might fail if we didn't get something good
rjson = r.json()
if not isinstance(rjson, (list,dict)):
# probably good
print "INFO: h2o json responses should always be lists or dicts"
rjson = None
# may get legitimate 404
# elif '404' in r.text:
# print "INFO: json got 404 result"
# rjson = None
elif r.status_code != requests.codes.ok:
print "INFO: Could not decode any json from the request. code:" % r.status_code
rjson = None
except requests.ConnectionError, e:
print "INFO: json got ConnectionError or other exception"
rjson = None
# print rjson
return rjson
#********************************************************************
# we create this node level state that h2o_import.py uses to create urls
# normally it's set during build_cloud.
# As a hack, we're going to force it, from arguments to find_cloud
# everything should just work then...the runner*sh know what hdfs clusters they're targeting
# so can tell us (and they built the cloud anyhow!..so they are the central place that's deciding
# hdfs_config is only used on ec2, but we'll support it in case find_cloud.py is used there.
# "hdfs_version": "cdh3",
# "hdfs_config": "None",
# "hdfs_name_node": "192.168.1.176",
# force these to the right state, although I should update h2o*py stuff, so they're not necessary
# they force the prior settings to be ignore (we used to do stuff if hdfs was enabled, writing to hdfs
# this was necessary to override the settings above that caused that to happen
# "use_hdfs": true,
# "use_maprfs": false,
def probe_node(line, h2oNodes):
http_addr, sep, port = line.rstrip('\n').partition(":")
http_addr = http_addr.lstrip('/') # just in case it's an old-school flatfile with leading /
if port == '':
port = '54321'
if http_addr == '':
http_addr = '127.0.0.1'
# print "http_addr:", http_addr, "port:", port
probes = []
gc = do_json_request(http_addr, port, 'Cloud.json', timeout=3)
if gc is None:
return probes
consensus = gc['consensus']
locked = gc['locked']
cloud_size = gc['cloud_size']
node_name = gc['node_name']
cloud_name = gc['cloud_name']
nodes = gc['nodes']
if args.expected_size and (cloud_size!=args.expected_size):
raise Exception("cloud_size %s at %s disagrees with -expected_size %s" % (cloud_size, node_name, args.expected_size))
for n in nodes:
# print "free_mem_bytes (GB):", "%0.2f" % ((n['free_mem_bytes']+0.0)/(1024*1024*1024))
# print "tot_mem_bytes (GB):", "%0.2f" % ((n['tot_mem_bytes']+0.0)/(1024*1024*1024))
java_heap_GB = (n['tot_mem_bytes']+0.0)/(1024*1024*1024)
java_heap_GB = int(round(java_heap_GB,0))
# print "java_heap_GB:", java_heap_GB
# print 'num_cpus:', n['num_cpus']
name = n['name'].lstrip('/')
# print 'name:', name
### print dump_json(n)
ip, sep, port = name.partition(':')
# print "ip:", ip
# print "port:", port
if not ip or not port:
raise Exception("bad ip or port parsing from h2o get_cloud nodes 'name' %s" % n['name'])
# creating the list of who this guy sees, to return
probes.append(name)
node_id = len(h2oNodes)
use_maprfs = 'mapr' in args.hdfs_version
use_hdfs = not use_maprfs # we default to enabling cdh3 on 192.168.1.176
node = {
'http_addr': ip,
'port': int(port), # print it as a number for the clone ingest
'java_heap_GB': java_heap_GB,
# this list is based on what tests actually touch (fail without these)
'node_id': node_id,
'remoteH2O': 'true',
'sandbox_error_was_reported': 'false', # odd this is touched..maybe see about changing h2o.py
'sandbox_ignore_errors': 'false',
# /home/0xcustomer will have the superset of links for resolving remote paths
# the cloud may be started by 0xdiag or 0xcustomer, but this is just going to be
# used for looking for buckets (h2o_import.find_folder_and_filename() will
# (along with other rules) try to look in # /home/h2o.nodes[0].username when trying
# to resolve a path to a bucket
'username': '0xcustomer', # most found clouds are run by 0xcustomer. This doesn't really matter
'redirect_import_folder_to_s3_path': 'false', # no..we're not on ec2
'redirect_import_folder_to_s3n_path': 'false', # no..we're not on ec2
'delete_keys_at_teardown': 'true', # yes we want each test to clean up after itself
'use_hdfs': use_hdfs,
'use_maprfs': use_maprfs,
'h2o_remote_buckets_root': 'false',
'hdfs_version': args.hdfs_version, # something is checking for this. I guess we could set this in tests as a hack
'hdfs_name_node': args.hdfs_name_node, # hmm. do we have to set this to do hdfs url generation correctly?
}
# this is the total list so far
if name not in h2oNodes:
h2oNodes[name] = node
print "Added node %s to probes" % name
# we use this for our one level of recursion
return probes # might be empty!
#********************************************************************
def flatfile_name():
if args.flatfile:
a = args.flatfile
else:
a = 'pytest_flatfile-%s' %getpass.getuser()
print "Starting with contents of ", a
return a
#********************************************************************
# hostPortList.append("/" + h.addr + ":" + str(port + ports_per_node*i))
# partition returns a 3-tuple as (LHS, separator, RHS) if the separator is found,
# (original_string, '', '') if the separator isn't found
with open(flatfile_name(), 'r') as f:
possMembers = f.readlines()
f.close()
h2oNodes = {}
probes = set()
tries = 0
for n1, possMember in enumerate(possMembers):
tries += 1
if possMember not in probes:
probes.add(possMember)
members2 = probe_node(possMember, h2oNodes)
for n2, member2 in enumerate(members2):
tries += 1
if member2 not in probes:
probes.add(member2)
probe_node(member2, h2oNodes)
print "\nWe did %s tries" % tries
print "len(probe):", len(probes)
# get rid of the name key we used to hash to it
h2oNodesList = [v for k, v in h2oNodes.iteritems()]
print "Checking for two h2os at same ip address"
ips = {}
count = {}
for h in h2oNodesList:
# warn for more than 1 h2o on the same ip address
# error for more than 1 h2o on the same port (something is broke!)
# but ip+port is how we name them, so that can't happen ehrer
ip = h['http_addr']
if ip in ips:
# FIX! maybe make this a fail exit in the future?
count[ip] += 1
print "\nWARNING: appears to be %s h2o's at the same IP address" % count[ip]
print "initial:", ips[ip]
print "another:", h, "\n"
else:
ips[ip] = h
count[ip] = 1
print "Writing h2o-nodes.json"
expandedCloud = {
'cloud_start':
{
'time': 'null',
'cwd': 'null',
'python_test_name': 'null',
'python_cmd_line': 'null',
'config_json': 'null',
'username': 'null',
'ip': 'null',
},
'h2o_nodes': h2oNodesList
}
print "Cleaning sandbox, (creating it), so tests can write to commands.log normally"
clean_sandbox()
with open('h2o-nodes.json', 'w+') as f:
f.write(json.dumps(expandedCloud, indent=4))
```
#### File: py/testdir_multi_jvm/test_parse_fs_schmoo_fvec.py
```python
import unittest, time, sys
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i, h2o_browse as h2b
def write_syn_dataset(csvPathname, rowCount, headerData, rowData):
dsf = open(csvPathname, "w+")
dsf.write(headerData + "\n")
for i in range(rowCount):
dsf.write(rowData + "\n")
dsf.close()
# append!
def append_syn_dataset(csvPathname, rowData):
with open(csvPathname, "a") as dsf:
dsf.write(rowData + "\n")
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
# fails with 3
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(3, java_heap_GB=4, use_flatfile=True)
else:
h2o_hosts.build_cloud_with_hosts()
h2b.browseTheCloud()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud(h2o.nodes)
def test_parse_fs_schmoo_fvec(self):
h2o.beta_features = True
SYNDATASETS_DIR = h2o.make_syn_dir()
csvFilename = "syn_prostate.csv"
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
headerData = "ID,CAPSULE,AGE,RACE,DPROS,DCAPS,PSA,VOL,GLEASON"
# rowData = "1,0,65,1,2,1,1.4,0,6"
rowData = "1,0,65,1,2,1,1,0,6"
totalRows = 99860
write_syn_dataset(csvPathname, totalRows, headerData, rowData)
print "This is the same format/data file used by test_same_parse, but the non-gzed version"
print "\nSchmoo the # of rows"
print "Updating the key and hex_key names for each trial"
for trial in range (200):
append_syn_dataset(csvPathname, rowData)
totalRows += 1
start = time.time()
key = csvFilename + "_" + str(trial)
hex_key = csvFilename + "_" + str(trial) + ".hex"
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key)
print "trial #", trial, "totalRows:", totalRows, "parse end on ", \
csvFilename, 'took', time.time() - start, 'seconds'
h2o_cmd.runInspect(key=hex_key)
# only used this for debug to look at parse (red last row) on failure
### h2b.browseJsonHistoryAsUrlLastMatch("Inspect")
h2o.check_sandbox_for_errors()
if __name__ == '__main__':
h2o.unit_main()
```
#### File: py/testdir_multi_jvm/test_parse_multiprocess_fvec.py
```python
import unittest, sys, random, time
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_hosts, h2o_jobs, h2o_exec as h2e
import h2o_util
import multiprocessing, os, signal, time
from multiprocessing import Process, Queue, Pool
print "Back to Basics with a multiprocessing twist!"
DO_EXEC_QUANT = False
DO_SUMMARY = True
DO_XORSUM = False
DO_BIGFILE = True
DO_IRIS = True
# overrides the calc below if not None
DO_PARSE_ALSO = True
UPLOAD_PARSE_DIFF_NODES = True
RANDOM_HEAP = False
PARSE_NOPOLL = False
thresholdsList = [0.5]
thresholds = ",".join(map(str, thresholdsList))
# problem with keyboard interrupt described
# http://bryceboe.com/2012/02/14/python-multiprocessing-pool-and-keyboardinterrupt-revisited/
def function_no_keyboard_intr(result_queue, function, *args):
signal.signal(signal.SIGINT, signal.SIG_IGN)
result_queue.put(function(*args))
def parseit(n, pattern, hex_key, timeoutSecs=60, retryDelaySecs=1, pollTimeoutSecs=30):
h2i.parse_only(node=h2o.nodes[n], pattern=pattern, hex_key=hex_key,
timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs, pollTimeoutSecs=pollTimeoutSecs,
noPoll=PARSE_NOPOLL)
print pattern, "started in parseit (nopoll)"
return 'Done'
def uploadit(n, bucket, path, src_key, hex_key, timeoutSecs=60, retryDelaySecs=1, pollTimeoutSecs=30):
# apparently the putfile has some conflicts. but afte the put completes, its okay
# to be parallel with the src_key if it has a different name
(importResult, importPattern) = h2i.import_only(node=h2o.nodes[n],
bucket=bucket, path=path, schema='put',
src_key=src_key,
timeoutSecs=timeoutSecs, retryDelaySecs=10, pollTimeoutSecs=60)
print "uploadit:", importPattern, hex_key
# do the parse on the next node
if UPLOAD_PARSE_DIFF_NODES:
np1 = (n+1) % len(h2o.nodes)
else:
np1 = n
if DO_PARSE_ALSO:
parseit(np1, importPattern, hex_key,
timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs, pollTimeoutSecs=pollTimeoutSecs)
h2o.nodes[0].rebalance(source=hex_key, after=hex_key + "_2", chunks=32)
return (importPattern, hex_key)
pool = Pool(16)
class Basic(unittest.TestCase):
def tearDown(self):
pool.close()
# pool.join()
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
print "Will build_cloud() with random heap size and do overlapped import folder/parse (groups)"
global SEED, localhost
SEED = h2o.setup_random_seed()
if RANDOM_HEAP:
tryHeap = random.randint(4,28)
else:
tryHeap = 28
# print "\n", tryHeap,"GB heap, 1 jvm per host, import 192.168.1.176 hdfs, then parse"
print "\n", tryHeap,"GB heap, 1 jvm per host, import, then parse"
localhost = h2o.decide_if_localhost()
h2o.beta_features = True # for the beta tab in the browser
if (localhost):
h2o.build_cloud(node_count=3, java_heap_GB=4, base_port=54323,
# use_hdfs=True, hdfs_name_node='192.168.1.176', hdfs_version='cdh3'
)
else:
h2o_hosts.build_cloud_with_hosts(node_count=1, java_heap_GB=tryHeap, base_port=54321,
# use_hdfs=True, hdfs_name_node='192.168.1.176', hdfs_version='cdh3'
)
@classmethod
def tearDownClass(cls):
pool.close()
# pool.join()
h2o.tear_down_cloud()
def test_parse_multiprocess_fvec(self):
h2o.beta_features = True
# hdfs://<name node>/datasets/manyfiles-nflx-gz/file_1.dat.gz
# don't raise exception if we find something bad in h2o stdout/stderr?
# h2o.nodes[0].sandboxIgnoreErrors = True
OUTSTANDING = min(10, len(h2o.nodes))
if DO_IRIS:
global DO_BIGFILE
DO_BIGFILE = False
bucket = 'smalldata'
importFolderPath = "iris"
csvFilename = "iris2.csv"
csvFilePattern = "iris2.csv"
if localhost:
trialMax = 20
else:
trialMax = 100
elif DO_BIGFILE:
bucket = 'home-0xdiag-datasets'
importFolderPath = "standard"
csvFilename = "covtype20x.data"
csvFilePattern = "covtype20x.data"
trialMax = 2 * OUTSTANDING
else:
bucket = 'home-0xdiag-datasets'
importFolderPath = "standard"
csvFilename = "covtype.data"
csvFilePattern = "covtype.data"
trialMax = 40 * OUTSTANDING
# add one just to make it odd
# OUTSTANDING = min(10, len(h2o.nodes) + 1)
# don't have more than one source file per node OUTSTANDING? (think of the node increment rule)
# okay to reuse the src_key name. h2o deletes? use unique hex to make sure it's not reused.
# might go to unique src keys also ..oops have to, to prevent complaints about the key (lock)
# can't repeatedly import the folder
# only if not noPoll. otherwise parse isn't done
# I guess I have to use 'put' so I can name the src key unique, to get overlap
# I could tell h2o to not delete, but it's nice to get the keys in a new place?
# maybe rebalance? FIX! todo
parseTrial = 0
summaryTrial = 0
uploader_resultq = multiprocessing.Queue()
while parseTrial <= trialMax:
start = time.time()
uploaders = []
if not DO_IRIS:
assert OUTSTANDING<=10 , "we only have 10 links with unique names to covtype.data"
for o in range(OUTSTANDING):
src_key = csvFilename + "_" + str(parseTrial)
hex_key = csvFilename + "_" + str(parseTrial) <KEY>"
# "key": "hdfs://192.168.1.176/datasets/manyfiles-nflx-gz/file_99.dat.gz",
# hacked hard ln so source keys would have different names? was getting h2o locking issues
if DO_IRIS:
csvPathname = importFolderPath + "/" + csvFilePattern
else:
csvPathname = importFolderPath + "/" + csvFilePattern + "_" + str(o)
start = time.time()
# walk the nodes
# if this rule is matched for exec/summary below, it should find the name okay? (npe with xorsum)
# summary2 not seeing it?
np = parseTrial % len(h2o.nodes)
retryDelaySecs=5 if DO_BIGFILE else 1
timeoutSecs=60 if DO_BIGFILE else 15
tmp = multiprocessing.Process(target=function_no_keyboard_intr,
args=(uploader_resultq, uploadit, np, bucket, csvPathname, src_key, hex_key, timeoutSecs, retryDelaySecs))
tmp.start()
uploaders.append(tmp)
parseTrial += 1
# now sync on them
for uploader in uploaders:
try:
uploader.join()
# don't need him any more
uploader.terminate()
(importPattern, hex_key) = uploader_resultq.get(timeout=2)
except KeyboardInterrupt:
print 'parent received ctrl-c'
for uploader in uploaders:
uploader.terminate()
uploader.join()
elapsed = time.time() - start
print "Parse group end at #", parseTrial, "completed in", "%6.2f" % elapsed, "seconds.", \
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
print "We might have parses that haven't completed. The join just says we can reuse some files (parse still going)"
if PARSE_NOPOLL:
h2o_jobs.pollWaitJobs(timeoutSecs=180)
h2o_cmd.runStoreView()
# h2o_jobs.pollStatsWhileBusy(timeoutSecs=300, pollTimeoutSecs=15, retryDelaySecs=0.25)
if DO_PARSE_ALSO: # only if we parsed
print "These all go to node [0]"
# getting a NPE if I do xorsum (any exec?) ..just do summary for now..doesn't seem to have the issue
# suspect it's about the multi-node stuff above
for summaryTrial in range(trialMax):
# do last to first..to get race condition?
firstXorUll = None
firstQuantileUll = None
hex_key = csvFilename + "_" + str(summaryTrial) + ".hexxx"
if DO_EXEC_QUANT:
execExpr = "r2=c(1); r2=quantile(%s[,1], c(%s));" % (hex_key, thresholds)
(resultExec, fpResult) = h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
ullResult = h2o_util.doubleToUnsignedLongLong(fpResult)
print "%30s" % "median ullResult (0.16x):", "0x%0.16x %s" % (ullResult, fpResult)
if firstQuantileUll:
self.assertEqual(ullResult, firstQuantileUll)
else:
firstQuantileUll = ullResult
if DO_XORSUM:
execExpr = "r2=c(1); r2=xorsum(%s[,1], c(%s));" % (hex_key, thresholds)
(resultExec, fpResult) = h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
ullResult = h2o_util.doubleToUnsignedLongLong(fpResult)
print "%30s" % "xorsum ullResult (0.16x):", "0x%0.16x %s" % (ullResult, fpResult)
if firstXorUll:
self.assertEqual(ullResult, firstXorUll)
else:
firstXorUll = ullResult
if DO_SUMMARY:
h2o_cmd.runSummary(key=hex_key)
if __name__ == '__main__':
h2o.unit_main()
```
#### File: py/testdir_multi_jvm/test_parse_rand_schmoo2_fvec.py
```python
import unittest, time, sys, random
sys.path.extend(['.','..','py'])
import h2o, h2o_hosts, h2o_cmd, h2o_browse as h2b, h2o_import as h2i
def write_syn_dataset(csvPathname, rowCount, headerData, rowData):
dsf = open(csvPathname, "w+")
dsf.write(headerData + "\n")
for i in range(rowCount):
dsf.write(rowData + "\n")
dsf.close()
# append!
def append_syn_dataset(csvPathname, rowData, num):
with open(csvPathname, "a") as dsf:
for i in range(num):
dsf.write(rowData + "\n")
def rand_rowData():
# UPDATE: maybe because of byte buffer boundary issues, single byte
# data is best? if we put all 0s or 1, then I guess it will be bits?
rowData = str(random.uniform(0,7))
for i in range(8):
rowData = rowData + "," + str(random.uniform(-1e59,1e59))
return rowData
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(2,java_heap_GB=4,use_flatfile=True)
else:
h2o_hosts.build_cloud_with_hosts()
h2b.browseTheCloud()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud(h2o.nodes)
def test_parse_rand_schmoo2_fvec(self):
h2o.beta_features = True
SYNDATASETS_DIR = h2o.make_syn_dir()
csvFilename = "syn_prostate.csv"
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
headerData = "ID,CAPSULE,AGE,RACE,DPROS,DCAPS,PSA,VOL,GLEASON"
rowData = rand_rowData()
totalRows = 1
write_syn_dataset(csvPathname, totalRows, headerData, rowData)
print "This is the same format/data file used by test_same_parse, but the non-gzed version"
print "\nSchmoo the # of rows"
for trial in range (100):
rowData = rand_rowData()
num = random.randint(1, 10096)
append_syn_dataset(csvPathname, rowData, num)
totalRows += num
start = time.time()
# make sure all key names are unique, when we re-put and re-parse (h2o caching issues)
key = csvFilename + "_" + str(trial)
hex_key = csvFilename + "_" + str(trial) + ".hex"
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key,
timeoutSecs=70, pollTimeoutSecs=150)
print "trial #", trial, "totalRows:", totalRows, "last num:", num, \
"parse end on ", csvFilename, 'took', time.time() - start, 'seconds'
### h2o_cmd.runInspect(key=hex_key)
### h2b.browseJsonHistoryAsUrlLastMatch("Inspect")
h2o.check_sandbox_for_errors()
if __name__ == '__main__':
h2o.unit_main()
``` |
{
"source": "jhssilva/Smartie",
"score": 3
} |
#### File: Smartie/app/main_yfinance.py
```python
import yfinance as yf #Librarie to connect to Yahoo Finance
#Edited : Add to line 286-292 in base.py of yfinance (https://github.com/ranaroussi/yfinance/issues/208)
#Edited: Comment line 319 in base.py of yfinance because of the error self._info['regularMarketPrice'] = self._info['regularMarketOpen'] KeyError: 'regularMarketOpen'
# General Libraries
import json
import sys
import threading #Librarie for threads
import time #Librarie for time purposes
# Libraries from the project
import stock as stk
'''
Functions
'''
# Read file with all stock symbols in the market (The file should be updated, but unfortunately just have one request to the api for month )
def fileReadSymbolStocks():
f = open("Files/StockList.txt", "r")
p = json.loads(f.read())
f.close()
return p
# Creating objects of a stock, with Symbol that identifies the stock and the ticker for doing the request.
def creatingStocks(symbol):
for x in symbolStock:
stocks.append(stk.stock(x, yf.Ticker(x)))
'''
Get Functions
'''
# General info of a stock
def getStockInfo(stock):
return stock.getTicker().info
# Historical Market Data of a Stock
def getStockHistoricalMarketData(stock):
return stock.getTicker().history(period = "max", threads = False)
# Actions (Dividends, splits) of a stock
def getStockActions(stock):
return (stock.getTicker().actions)
# Dividends of a stock
def getStockDividends(stock):
return (stock.getTicker().dividends)
# Splits of a stock
def getStockSplits(stock):
return (stock.getTicker().splits)
# Financials of a stock
def getStockFinancials(stock):
return (stock.getTicker().financials)
# Quarterly financials of a stock
def getStockQuarterlyFinancials(stock):
return (stock.getTicker().quarterly_financials)
# Major holders of a stock
def getStockMajorHolders(stock):
return (stock.getTicker().major_holders)
# Institutional holders of a stock
def getStockInstitutionalHolders(stock):
return (stock.getTicker().institutional_holders)
# Balance sheet of a stock
def getStockBalanceSheet(stock):
return (stock.getTicker().balance_sheet)
# Quartely Balance sheet of a stock
def getStockQuarterlyBalanceSheet(stock):
return (stock.getTicker().quarterly_balance_sheet)
# Cashflow of a stock
def getStockCashFlow(stock):
return (stock.getTicker().cashflow)
# Quarterly Cashflow of a stock
def getStockQuarterlyCashFlow(stock):
return (stock.getTicker().quarterly_cashflow)
# Earnings of a stock
def getStockEarnings(stock):
return (stock.getTicker().earnings)
# Quarterly Earnings of a stock
def getStockQuarterlyEarnings(stock):
return (stock.getTicker().quarterly_earnings)
# Sustainability of a stock
def getStockSustainability(stock):
return (stock.getTicker().sustainability)
# Analysts recommendations of a stock
def getStockAnalystsRecommendations(stock):
return (stock.getTicker().recommendations)
# Next event (earnings, etc) of a stock
def getStockCalendar(stock):
return (stock.getTicker().calendar)
# get ISIN code - *experimental* of a stock
# ISIN = International Securities Identification Number
def getStock_ISIN_code(stock):
return (stock.getTicker().isin)
# get options expirations of a stock
def getStockOptions(stock):
return (stock.getTicker().options)
'''
Print to the screen Functions
'''
# Version of the Yfinance
def versionYfinance():
print('Yfinance = version = ' + yf.__version__)
''' Missing this one chapter '''
# get option chain for specific expiration
#opt = msft.option_chain('YYYY-MM-DD')
# data available via: opt.calls, opt.puts
# Menu options
def showMenu():
print("Menu")
print("1 - Show stock info")
print("2 - Show historical Market Data")
print("3 - Show Actions (dividends, splits)")
print("4 - Show dividends")
print("5 - Show splits")
print("6 - Show financials")
print("7 - Show major holders")
print("8 - Show Institutional holders")
print("9 - Show balance sheet")
print("10 - Show Cashflow")
print("11 - Show earnings")
print("12 - Show sustainability")
print("13 - Show analysys recommendations")
print("14 - Show next event (earnings, etc)")
print("15 - Show ISIN code")
print("Option: ")
'''
Handlers
'''
# Menu Handler
def menu():
run = True
while (run):
showMenu()
option = int(sys.stdin.readline())
if (option == 0):
run = False
exit(0)
elif (option == 1):
print("Stock Info")
print(getStockInfo(stocks[0]))
elif (option == 2):
print("Historical market data")
print(getStockHistoricalMarketData(stocks[0]))
elif (option == 3):
print("Actions (Dividends, splits)")
print(getStockActions(stocks[0]))
elif (option == 4):
print("Dividends")
print(getStockDividends(stocks[0]))
elif (option == 5):
print("Splits")
print(getStockSplits(stocks[0]))
elif (option == 6):
print("Financials")
print(getStockFinancials(stocks[1]))
print(getStockQuarterlyFinancials(stocks[2]))
elif (option == 7):
print("Major holders")
print(getStockMajorHolders(stocks[0]))
elif (option == 8):
print("Institutional holders")
print(getStockInstitutionalHolders(stocks[0]))
elif (option == 9):
print("Balance Sheet")
print(getStockBalanceSheet(stocks[0]))
print(getStockQuarterlyBalanceSheet(stocks[0]))
elif (option == 10):
print("Cashflow")
print(getStockCashFlow(stocks[0]))
print(getStockQuarterlyCashFlow(stocks[0]))
elif (option == 11):
print("Earnings")
print(getStockEarnings(stocks[0]))
print(getStockQuarterlyEarnings(stocks[0]))
elif (option == 12):
print("Sustainability")
print(getStockSustainability(stocks[0]))
elif (option == 13):
print("Analysys recommendations")
print(getStockAnalystsRecommendations(stocks[0]))
elif (option == 14):
print("Next event (earnings, etc)")
print(getStockCalendar(stocks[0]))
elif(option == 15):
print("ISIS code")
print(getStock_ISIN_code(stocks[0]))
else:
print("Not a valid option!")
time.sleep(5)
'''
Variables
'''
# Global Variables
stocks = [] # Array of Object's Stock
symbolStock = fileReadSymbolStocks() # All Symbol Stocks read from the file
versionYfinance() # Function that prints the version that it's operating
'''
MAIN LOGIC
'''
t = threading.Thread(target = creatingStocks, args = (symbolStock,)) #Thread for creating objects
t.daemon = True # Needed for when the program shut's down, the thread stops
t.start() #initializes the thread
menu() # initializes the menu
``` |
{
"source": "jhsu98/ifb-wrapper",
"score": 2
} |
#### File: ifb-wrapper/tests/fixtures.py
```python
import ifb
import pytest
#####
#
# Fixtures: Different user roles
# (basic, form_builde, company, server)
#
#####
@pytest.fixture
def basic_user():
return ifb.IFB("","","")
@pytest.fixture
def form_builder():
return ifb.IFB("","","")
@pytest.fixture
def company_admin():
return ifb.IFB("","","")
@pytest.fixture
def server_admin():
return ifb.IFB("","","")
# - End Fixtures -----------------------
``` |
{
"source": "jhsu98/zws-py",
"score": 2
} |
#### File: zws-py/tests/test_dfa.py
```python
import pytest
from fixtures import dfa_client
import time
def test_describeResources(dfa_client):
result = dfa_client.describeResources()
assert len(result) > 0
def test_describeResource(dfa_client):
result = dfa_client.describeResource('Dataflows')
assert isinstance(result, dict)
def test_Dataflows_GET(dfa_client):
dfa_client.Dataflows('GET')
def test_Dataflow_GET(dfa_client):
result = dfa_client.Dataflows('GET')
if len(result.response) > 0:
dataflow = result.response[0]
result = dfa_client.Dataflows('GET', dataflow['_id'])
assert result.status_code == 200
def test_invalid_method(dfa_client):
with pytest.raises(ValueError):
dfa_client.Dataflows('PATCH')
```
#### File: zws-py/zerionAPI/ifb_utilities.py
```python
from zerionAPI import IFB
from pprint import pprint
import json
import os
import requests
import shutil
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def exportImages(api, profile_id, page_id, isRecursive=False, directory = '.'):
print(
'Getting page...',
result := api.Pages('GET', profile_id, page_id)
)
if result.status_code == 200:
count = 0
page = result.response
try:
directory = f'{directory}/{page["name"]}'
os.makedirs(directory, exist_ok=True)
except FileExistsError as e:
print(e)
pass
elements_field_grammar = 'name,data_type((="11")|(="18")|(="28")),data_size' if isRecursive else 'name,data_type((="11")|(="28"))'
print(
'Getting elements...',
result := api.Elements('GET', profile_id, page['id'], params={'fields': elements_field_grammar})
)
if result.status_code == 200 and len(result.response) > 0:
elements = result.response
image_elements = [element for element in elements if element['data_type'] in (11, 28)]
subform_elements = [element for element in elements if element['data_type'] == 18]
# Image Element Loop
if len(image_elements) > 0:
print('Getting records...')
result = api.Records('GET', profile_id, page['id'], params={'fields': ','.join([e['name'] for e in image_elements])})
if result.status_code == 200 and len(result.response) > 0:
records = result.response
total = int(result.headers.get('Total-Count'))
print(f'Retrieved {len(records)}/{total} records...')
while len(records) < total:
result = api.Records('GET', profile_id, page['id'], params={'fields': ','.join([e['name'] for e in image_elements]), 'offset': len(records)})
if result.status_code == 200 and len(result.response) > 0:
records += result.response
for record in records:
record_id = record['id']
elements = {key: record[key] for key in record if key != 'id' and record[key] != None}
for element in elements:
r = requests.get(record[element], verify=False, stream=True)
r.raw.decode_content = True
filename = f'{record_id}_{element}.{record[element].split(".")[-1]}'
filepath = f'{directory}/{filename}'
with open(filepath, 'wb') as f:
print(f'Exporting <{record[element]}> as "{filepath}"')
shutil.copyfileobj(r.raw, f)
else:
print('No records found...')
else:
print('No image elements found...')
# Subform Element Loop
if isRecursive and len(subform_elements) > 0:
for element in subform_elements:
print(f'Recursing into {element["name"]}...')
count += exportImages(api, profile_id, element['data_size'], isRecursive, directory=directory)
else:
print('No image elements found...')
return 0
else:
print('Page not found...')
return 0
if __name__ == "__main__":
print('Not directly accessible')
exit()
``` |
{
"source": "Jh-SYSU/MolRep",
"score": 2
} |
#### File: MolRep/Experiments/experiments.py
```python
import os
import random
from MolRep.Models.losses import get_loss_func
from MolRep.Models.netWrapper import NetWrapper
from MolRep.Models.schedulers import build_lr_scheduler
from MolRep.Utils.config_from_dict import Config, DatasetConfig
from MolRep.Utils.utils import *
class Experiment:
"""
Experiment provides a layer of abstraction to avoid that all models implement the same interface
"""
def __init__(self, model_configuration, dataset_config, exp_path):
self.model_config = Config.from_dict(model_configuration) if isinstance(model_configuration, dict) else model_configuration
self.dataset_config = dataset_config
self.exp_path = exp_path
if not os.path.exists(exp_path):
os.makedirs(exp_path)
def run_valid(self, dataset, logger, other=None):
"""
This function returns the training and validation accuracy. DO WHATEVER YOU WANT WITH VL SET,
BECAUSE YOU WILL MAKE PERFORMANCE ASSESSMENT ON A TEST SET
:return: (training accuracy, validation accuracy)
"""
raise NotImplementedError('You must implement this function!')
def run_test(self, dataset, logger, other=None):
"""
This function returns the training and test accuracy
:return: (training accuracy, test accuracy)
"""
raise NotImplementedError('You must implement this function!')
class EndToEndExperiment(Experiment):
def __init__(self, model_configuration, dataset_config, exp_path):
super(EndToEndExperiment, self).__init__(model_configuration, dataset_config, exp_path)
def run_valid(self, dataset_getter, logger, other=None):
"""
This function returns the training and validation or test accuracy
:return: (training accuracy, validation/test accuracy)
"""
# print('dataset: ', dataset_getter.outer_k, dataset_getter.inner_k)
dataset = dataset_getter.get_dataset
model_class = self.model_config.model
optim_class = self.model_config.optimizer
stopper_class = self.model_config.early_stopper
clipping = self.model_config.gradient_clipping
loss_fn = get_loss_func(self.dataset_config['task_type'], self.model_config.exp_name)
shuffle = self.model_config['shuffle'] if 'shuffle' in self.model_config else True
train_loader, val_loader, scaler, features_scaler = dataset_getter.get_train_val(dataset, self.model_config['batch_size'],
shuffle=shuffle)
model = model_class(dim_features=dataset.dim_features, dim_target=dataset.dim_target, model_configs=self.model_config, dataset_configs=self.dataset_config)
net = NetWrapper(model, dataset_configs=self.dataset_config, model_config=self.model_config,
loss_function=loss_fn)
optimizer = optim_class(model.parameters(),
lr=self.model_config['learning_rate'], weight_decay=self.model_config['l2'])
scheduler = build_lr_scheduler(optimizer, model_configs=self.model_config, num_samples=dataset.num_samples)
train_loss, train_metric, val_loss, val_metric, _ = net.train(train_loader=train_loader,
optimizer=optimizer, scheduler=scheduler,
clipping=clipping, scaler=scaler,
valid_loader=val_loader,
early_stopping=stopper_class,
logger=logger)
if other is not None and 'model_path' in other.keys():
save_checkpoint(path=other['model_path'], model=model, scaler=scaler, features_scaler=features_scaler)
return train_metric, train_loss, val_metric, val_loss
def run_test(self, dataset_getter, logger, other=None):
"""
This function returns the training and test accuracy. DO NOT USE THE TEST FOR TRAINING OR EARLY STOPPING!
:return: (training accuracy, test accuracy)
"""
dataset = dataset_getter.get_dataset
model_class = self.model_config.model
optim_class = self.model_config.optimizer
stopper_class = self.model_config.early_stopper
clipping = self.model_config.gradient_clipping
loss_fn = get_loss_func(self.dataset_config['task_type'], self.model_config.exp_name)
shuffle = False # Test dataloader should not shuffle.
train_loader, val_loader, scaler = dataset_getter.get_train_val(dataset, self.model_config['batch_size'],
shuffle=shuffle)
test_loader = dataset_getter.get_test(dataset, self.model_config['batch_size'], shuffle=shuffle)
model = model_class(dim_features=dataset.dim_features, dim_target=dataset.dim_target, model_configs=self.model_config, dataset_configs=self.dataset_config)
net = NetWrapper(model, dataset_configs=self.dataset_config, model_config=self.model_config,
loss_function=loss_fn)
optimizer = optim_class(model.parameters(),
lr=self.model_config['learning_rate'], weight_decay=self.model_config['l2'])
scheduler = build_lr_scheduler(optimizer, model_configs=self.model_config, num_samples=dataset.num_samples)
_, train_metric, _, valid_metric, _ = \
net.train(train_loader=train_loader, valid_loader=val_loader,
optimizer=optimizer, scheduler=scheduler, clipping=clipping,
early_stopping=stopper_class, scaler=scaler,
logger=logger)
y_preds, y_labels, test_metric = net.test(test_loader=test_loader, scaler=scaler, logger=logger)
if other is not None and 'model_path' in other.keys():
save_checkpoint(path=other['model_path'], model=model, scaler=scaler)
return train_metric, valid_metric, test_metric
def run_independent_test(self, dataset_getter, logger, other=None):
"""
This function returns the training and test accuracy. DO NOT USE THE TEST FOR TRAINING OR EARLY STOPPING!
:return: (training accuracy, test accuracy)
"""
dataset = dataset_getter.get_dataset
shuffle = self.model_config['shuffle'] if 'shuffle' in self.model_config else True
model_class = self.model_config.model
loss_fn = get_loss_func(self.dataset_config['task_type'], self.model_config.exp_name)
shuffle = False # Test dataloader should not shuffle.
test_loader = dataset_getter.get_test(dataset, self.model_config['batch_size'], shuffle=shuffle)
model = model_class(dim_features=dataset.dim_features, dim_target=dataset.dim_target, model_configs=self.model_config, dataset_configs=self.dataset_config)
if other is not None and 'model_path' in other.keys():
model = load_checkpoint(path=other['model_path'], model=model)
scaler, features_scaler = load_scalers(path=other['model_path'])
net = NetWrapper(model, dataset_configs=self.dataset_config, model_config=self.model_config,
loss_function=loss_fn)
y_preds, y_labels, test_metric = net.test(test_loader=test_loader, scaler=scaler, logger=logger)
return y_preds, y_labels, test_metric
```
#### File: utils/fuseprop/chemprop_utils.py
```python
from typing import List
import torch
import torch.nn as nn
from tqdm import trange
from MolRep.Models.scalers import StandardScaler
from MolRep.Experiments.Graph_Data.MPNN_data import *
def get_data_from_smiles(smiles: List[str], logger = None) -> MoleculeDataset:
"""
Converts SMILES to a MoleculeDataset.
:param smiles: A list of SMILES strings.
:param skip_invalid_smiles: Whether to skip and filter out invalid smiles.
:param logger: Logger.
:return: A MoleculeDataset with all of the provided SMILES.
"""
data = MoleculeDataset([MoleculeDatapoint(smiles=smile) for smile in smiles])
return data
def predict(model: nn.Module,
data: MoleculeDataset,
batch_size: int,
scaler: StandardScaler = None) -> List[List[float]]:
"""
Makes predictions on a dataset using an ensemble of models.
:param model: A model.
:param data: A MoleculeDataset.
:param batch_size: Batch size.
:param scaler: A StandardScaler object fit on the training targets.
:return: A list of lists of predictions. The outer list is examples
while the inner list is tasks.
"""
model.eval()
preds = []
num_iters, iter_step = len(data), batch_size
for i in range(0, num_iters, iter_step):
# Prepare batch
mol_batch = MoleculeDataset(data[i:i + batch_size])
smiles_batch, features_batch = mol_batch.smiles(), mol_batch.features()
# Run model
batch = smiles_batch
batch_data = (batch, features_batch, None)
with torch.no_grad():
batch_preds = model(batch_data)
batch_preds = batch_preds.data.cpu().numpy()
# Inverse scale if regression
if scaler is not None:
batch_preds = scaler.inverse_transform(batch_preds)
# Collect vectors
batch_preds = batch_preds.tolist()
preds.extend(batch_preds)
return preds
```
#### File: MolRep/Explainer/explainerExperiments.py
```python
import os
import random
import sklearn
import collections
from rdkit import Chem
from rdkit.Chem import rdDepictor
from rdkit.Chem.Draw import rdMolDraw2D
from MolRep.Models.losses import get_loss_func
from MolRep.Explainer.explainerNetWrapper import ExplainerNetWrapper
from MolRep.Models.schedulers import build_lr_scheduler
from MolRep.Utils.config_from_dict import Config
from MolRep.Explainer.Metrics import attribution_metric as att_metrics
from MolRep.Utils.utils import *
GREEN_COL = (0, 1, 0)
RED_COL = (1, 0, 0)
class ExplainerExperiments:
def __init__(self, model_configuration, dataset_config, exp_path):
self.model_config = Config.from_dict(model_configuration) if isinstance(model_configuration, dict) else model_configuration
self.dataset_config = dataset_config
self.exp_path = exp_path
if not os.path.exists(exp_path):
os.makedirs(exp_path)
def run_valid(self, dataset, attribution, logger, other=None):
"""
This function returns the training and test accuracy. DO NOT USE THE TEST FOR TRAINING OR EARLY STOPPING!
:return: (training accuracy, test accuracy)
"""
shuffle = self.model_config['shuffle'] if 'shuffle' in self.model_config else True
model_class = self.model_config.model
optim_class = self.model_config.optimizer
stopper_class = self.model_config.early_stopper
clipping = self.model_config.gradient_clipping
loss_fn = get_loss_func(self.dataset_config['task_type'], self.model_config.exp_name)
shuffle = self.model_config['shuffle'] if 'shuffle' in self.model_config else True
train_loader, scaler = dataset.get_train_loader(self.model_config['batch_size'],
shuffle=shuffle)
model = model_class(dim_features=dataset.dim_features, dim_target=dataset.dim_target, model_configs=self.model_config, dataset_configs=self.dataset_config)
net = ExplainerNetWrapper(model, attribution, dataset_configs=self.dataset_config, model_config=self.model_config,
loss_function=loss_fn)
optimizer = optim_class(model.parameters(),
lr=self.model_config['learning_rate'], weight_decay=self.model_config['l2'])
scheduler = build_lr_scheduler(optimizer, model_configs=self.model_config, num_samples=dataset.num_samples)
train_loss, train_metric, _, _, _, _, _ = net.train(train_loader=train_loader,
optimizer=optimizer, scheduler=scheduler,
clipping=clipping, scaler=scaler,
early_stopping=stopper_class,
logger=logger)
if other is not None and 'model_path' in other.keys():
save_checkpoint(path=other['model_path'], model=model, scaler=scaler)
return train_metric
def molecule_importance(self, dataset, attribution, logger, testing=True, other=None):
model_class = self.model_config.model
loss_fn = get_loss_func(self.dataset_config['task_type'], self.model_config.exp_name)
model = model_class(dim_features=dataset.dim_features, dim_target=dataset.dim_target, model_configs=self.model_config, dataset_configs=self.dataset_config)
assert 'model_path' in other.keys()
model = load_checkpoint(path=other['model_path'], model=model)
scaler, features_scaler = load_scalers(path=other['model_path'])
net = ExplainerNetWrapper(model, attribution, dataset_configs=self.dataset_config, model_config=self.model_config,
loss_function=loss_fn)
if testing:
test_loader = dataset.get_test_loader()
else:
test_loader = dataset.get_all_dataloader()
y_preds, y_labels, results, atom_importance, bond_importance = net.explainer(test_loader=test_loader, scaler=scaler, logger=logger)
return results, atom_importance, bond_importance
def visualization(self, dataset, atom_importance, bond_importance, threshold=1e-4, set_weights=True, svg_dir=None, vis_factor=1.0, img_width=400, img_height=200, testing=True):
smiles_list = dataset.get_smiles_list(testing=testing)
att_probs = self.preprocessing_attributions(smiles_list, atom_importance, bond_importance, normalizer='MinMaxScaler')
for idx, smiles in enumerate(smiles_list):
mol = Chem.MolFromSmiles(smiles)
cp = Chem.Mol(mol)
atom_imp = att_probs[idx]
highlightAtomColors, cp = self.determine_atom_col(cp, atom_imp, eps=0.1, set_weights=True)
highlightAtoms = list(highlightAtomColors.keys())
highlightBondColors = self.determine_bond_col(highlightAtomColors, mol)
highlightBonds = list(highlightBondColors.keys())
highlightAtomRadii = {
# k: np.abs(v) * vis_factor for k, v in enumerate(atom_imp)
k: 0.1 * vis_factor for k, v in enumerate(atom_imp)
}
rdDepictor.Compute2DCoords(cp, canonOrient=True)
drawer = rdMolDraw2D.MolDraw2DCairo(img_width, img_height)
drawer.DrawMolecule(
cp,
highlightAtoms=highlightAtoms,
highlightAtomColors=highlightAtomColors,
highlightAtomRadii=highlightAtomRadii,
highlightBonds=highlightBonds,
highlightBondColors=highlightBondColors,
)
drawer.FinishDrawing()
drawer.WriteDrawingText(os.path.join(svg_dir, f"{idx}.png"))
# svg = drawer.GetDrawingText().replace("svg:", "")
# svg = None
# svg_list.append(svg)
# return svg_list
return
def preprocessing_attributions(self, smiles_list, atom_importance, bond_importance, normalizer='MinMaxScaler'):
att_probs = []
for idx, smiles in enumerate(smiles_list):
mol = Chem.MolFromSmiles(smiles)
atom_imp = atom_importance[idx]
if bond_importance is not None:
bond_imp = bond_importance[idx]
bond_idx = []
for bond in mol.GetBonds():
bond_idx.append((bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()))
for (atom_i_idx, atom_j_idx), b_imp in zip(bond_idx, bond_imp):
atom_imp[atom_i_idx] += b_imp / 2
atom_imp[atom_j_idx] += b_imp / 2
att_probs.append(atom_imp)
att_probs = [att[:, -1] if att_probs[0].ndim > 1 else att for att in att_probs]
att_probs = self.normalize_attributions(att_probs, normalizer)
return att_probs
def determine_atom_col(self, cp, atom_importance, eps=1e-5, set_weights=True):
""" Colors atoms with positive and negative contributions
as green and red respectively, using an `eps` absolute
threshold.
Parameters
----------
mol : rdkit mol
atom_importance : np.ndarray
importances given to each atom
bond_importance : np.ndarray
importances given to each bond
version : int, optional
1. does not consider bond importance
2. bond importance is taken into account, but fixed
3. bond importance is treated the same as atom importance, by default 2
eps : float, optional
threshold value for visualization - absolute importances below `eps`
will not be colored, by default 1e-5
Returns
-------
dict
atom indexes with their assigned color
"""
atom_col = {}
for idx, v in enumerate(atom_importance):
if v > eps:
atom_col[idx] = RED_COL
if v < -eps:
atom_col[idx] = RED_COL
if set_weights:
cp.GetAtomWithIdx(idx).SetProp("atomNote","%.3f"%(v))
return atom_col, cp
def determine_bond_col(self, atom_col, mol):
"""Colors bonds depending on whether the atoms involved
share the same color.
Parameters
----------
atom_col : np.ndarray
coloring assigned to each atom index
mol : rdkit mol
Returns
-------
dict
bond indexes with assigned color
"""
bond_col = {}
for idx, bond in enumerate(mol.GetBonds()):
atom_i_idx, atom_j_idx = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
if atom_i_idx in atom_col and atom_j_idx in atom_col:
if atom_col[atom_i_idx] == atom_col[atom_j_idx]:
bond_col[idx] = atom_col[atom_i_idx]
return bond_col
def evaluate_attributions(self, dataset, atom_importance, bond_importance, binary=False):
att_true = dataset.get_attribution_truth()
stats = collections.OrderedDict()
smiles_list = dataset.get_smiles_list()
att_probs = self.preprocessing_attributions(smiles_list, atom_importance, bond_importance)
if binary:
opt_threshold = -1
stats['ATT F1'] = np.nanmean(
att_metrics.attribution_f1(att_true, att_probs))
stats['ATT ACC'] = np.nanmean(
att_metrics.attribution_accuracy(att_true, att_probs))
else:
opt_threshold = att_metrics.get_optimal_threshold(att_true, att_probs)
att_binary = [np.array([1 if att>opt_threshold else 0 for att in att_prob]) for att_prob in att_probs]
stats['ATT AUROC'] = np.nanmean(
att_metrics.attribution_auroc(att_true, att_probs))
stats['ATT F1'] = np.nanmean(
att_metrics.attribution_f1(att_true, att_binary))
stats['ATT ACC'] = np.nanmean(
att_metrics.attribution_accuracy(att_true, att_binary))
return stats, opt_threshold
def evaluate_cliffs(self, dataset, atom_importance, bond_importance):
smiles_list = dataset.get_smiles_list()
att_true_pair = dataset.get_attribution_truth()
att_probs = self.preprocessing_attributions(smiles_list, atom_importance, bond_importance)
att_probs_reset, att_true = [], []
smiles_list = list(smiles_list)
for idx in range(len(att_true_pair)):
smiles_1 = att_true_pair[idx]['SMILES_1']
smiles_2 = att_true_pair[idx]['SMILES_2']
idx_1 = smiles_list.index(smiles_1)
idx_2 = smiles_list.index(smiles_2)
att_probs_reset.append(att_probs[idx_1])
att_true_1 = att_true_pair[idx]['attribution_1']
att_true.append(att_true_1)
att_probs_reset.append(att_probs[idx_2])
att_true_2 = att_true_pair[idx]['attribution_2']
att_true.append(att_true_2)
opt_threshold = att_metrics.get_optimal_threshold(att_true, att_probs_reset, multi=True)
att_binary = [np.array([1 if att>0.5 else -1 if att<(-0.5) else 0 for att in att_prob]) for att_prob in att_probs_reset]
stats = collections.OrderedDict()
stats['ATT F1'] = np.nanmean(
att_metrics.attribution_f1(att_true, att_binary))
stats['ATT ACC'] = np.nanmean(
att_metrics.attribution_accuracy(att_true, att_binary))
return stats, opt_threshold
def normalize_attributions(self, att_list, positive = False, normalizer='MinMaxScaler'):
"""Normalize all nodes to 0 to 1 range via quantiles."""
all_values = np.concatenate(att_list)
all_values = all_values[all_values > 0] if positive else all_values
if normalizer == 'QuantileTransformer':
normalizer = sklearn.preprocessing.QuantileTransformer()
else:
normalizer = sklearn.preprocessing.MinMaxScaler()
normalizer.fit(all_values.reshape(-1, 1))
new_att = []
for att in att_list:
normed_nodes = normalizer.transform(att.reshape(-1, 1)).ravel()
new_att.append(normed_nodes)
return new_att
```
#### File: MolRep/Explainer/explainer.py
```python
# dataset_configuration = {
# 'name': args.dataset_name,
# 'path': args.dataset_path,
# 'smiles_column': args.smiles_column,
# 'target_columns': args.target_columns,
# 'task_type': args.task_type,
# 'metric_type': "auc" if args.task_type == 'Classification' else "rmse",
# 'split_type': "random"
# }
# dataset_wrapper = DatasetWrapper(dataset_config=dataset_configuration,
# model_name=model_configuration.exp_name,
# split_dir=split_dir, features_dir=data_dir,
# outer_k=outer_k, inner_k=inner_k, holdout_test_size=holdout_test_size)
# return train_loader, model
# def molecule_importance(dataloader, model, args):
```
#### File: Models/graph_based/ECC.py
```python
import torch
from torch import nn
from torch.nn import functional as F
from torch_geometric.nn import MessagePassing, global_mean_pool
from torch_geometric.utils import degree, dense_to_sparse
from torch_geometric.nn import ECConv
from torch_scatter import scatter_add
def _make_block_diag(mats, mat_sizes):
block_diag = torch.zeros(sum(mat_sizes), sum(mat_sizes))
for i, (mat, size) in enumerate(zip(mats, mat_sizes)):
cum_size = sum(mat_sizes[:i])
block_diag[cum_size:cum_size+size,cum_size:cum_size+size] = mat
return block_diag
class ECCLayer(nn.Module):
def __init__(self, dim_input, dim_embedding, dropout=0.):
super().__init__()
fnet1 = nn.Sequential(nn.Linear(1, 16),
nn.ReLU(),
nn.Linear(16, dim_embedding * dim_input))
fnet2 = nn.Sequential(nn.Linear(1, 16),
nn.ReLU(),
nn.Linear(16, dim_embedding * dim_embedding))
fnet3 = nn.Sequential(nn.Linear(1, 16),
nn.ReLU(),
nn.Linear(16, dim_embedding * dim_embedding))
self.conv1 = ECConv(dim_input, dim_embedding, nn=fnet1)
self.conv2 = ECConv(dim_embedding, dim_embedding, nn=fnet2)
self.conv3 = ECConv(dim_embedding, dim_embedding, nn=fnet3)
self.bn1 = nn.BatchNorm1d(dim_embedding)
self.bn2 = nn.BatchNorm1d(dim_embedding)
self.bn3 = nn.BatchNorm1d(dim_embedding)
self.dropout = dropout
def forward(self, x, edge_index, edge_attr):
edge_attr = edge_attr.unsqueeze(-1) if edge_attr.dim() == 1 else edge_attr
x = F.relu(self.conv1(x, edge_index, edge_attr))
x = F.dropout(self.bn1(x), p=self.dropout, training=self.training)
x = F.relu(self.conv2(x, edge_index, edge_attr))
x = F.dropout(self.bn2(x), p=self.dropout, training=self.training)
x = F.relu(self.conv3(x, edge_index, edge_attr))
x = F.dropout(self.bn3(x), p=self.dropout, training=self.training)
return x
class ECC(nn.Module):
"""
Uses fixed architecture.
IMPORTANT NOTE: we will consider dataset which do not have edge labels.
Therefore, we avoid learning the function that associates a weight matrix
to an edge specific weight.
"""
def __init__(self, dim_features, dim_target, model_configs, dataset_configs):
super().__init__()
self.model_configs = model_configs
self.dropout = model_configs['dropout']
self.dropout_final = model_configs['dropout_final']
self.num_layers = model_configs['num_layers']
dim_embedding = model_configs['dim_embedding']
self.layers = nn.ModuleList([])
for i in range(self.num_layers):
dim_input = dim_features if i == 0 else dim_embedding
layer = ECCLayer(dim_input, dim_embedding, dropout=self.dropout)
self.layers.append(layer)
fnet = nn.Sequential(nn.Linear(1, 16),
nn.ReLU(),
nn.Linear(16, dim_embedding * dim_embedding))
self.final_conv = ECConv(dim_embedding, dim_embedding, nn=fnet)
self.final_conv_bn = nn.BatchNorm1d(dim_embedding)
self.fc1 = nn.Linear(dim_embedding, dim_embedding)
self.fc2 = nn.Linear(dim_embedding, dim_target)
self.task_type = dataset_configs["task_type"]
self.multiclass_num_classes = dataset_configs["multiclass_num_classes"] if self.task_type == 'Multi-Classification' else None
self.classification = self.task_type == 'Classification'
if self.classification:
self.sigmoid = nn.Sigmoid()
self.multiclass = self.task_type == 'Multi-Classification'
if self.multiclass:
self.multiclass_softmax = nn.Softmax(dim=2)
self.regression = self.task_type == 'Regression'
if self.regression:
self.relu = nn.ReLU()
assert not (self.classification and self.regression and self.multiclass)
def make_block_diag(self, matrix_list):
mat_sizes = [m.size(0) for m in matrix_list]
return _make_block_diag(matrix_list, mat_sizes)
def get_ecc_conv_parameters(self, data, layer_no):
v_plus_list, laplacians = data.v_plus, data.laplacians
# print([v_plus[layer_no] for v_plus in v_plus_list])
v_plus_batch = torch.cat([v_plus[layer_no] for v_plus in v_plus_list], dim=0)
laplacian_layer_list = [laplacians[i][layer_no] for i in range(len(laplacians))]
laplacian_block_diagonal = self.make_block_diag(laplacian_layer_list)
# First layer
lap_edge_idx, lap_edge_weights = dense_to_sparse(laplacian_block_diagonal)
lap_edge_weights = lap_edge_weights.squeeze(-1)
# Convert v_plus_batch to boolean
return lap_edge_idx, lap_edge_weights, (v_plus_batch == 1)
def forward(self, data):
x, edge_index, batch = data.x, data.edge_index, data.batch
x.requires_grad = True
self.conv_acts = []
self.conv_grads = []
self.edge_grads = []
for i, layer in enumerate(self.layers):
# TODO should lap_edge_index[0] be equal to edge_idx?
lap_edge_idx, lap_edge_weights, v_plus_batch = self.get_ecc_conv_parameters(data, layer_no=i)
edge_index = lap_edge_idx if i != 0 else edge_index
edge_weight = lap_edge_weights if i != 0 else x.new_ones((edge_index.size(1), ))
edge_index = edge_index.to(self.model_configs["device"])
edge_weight = edge_weight.to(self.model_configs["device"])
edge_weight.requires_grad = True
# apply convolutional layer
with torch.enable_grad():
x = layer(x, edge_index, edge_weight)
x.register_hook(self.activations_hook)
self.conv_acts.append(x)
edge_weight.register_hook(self.edge_attrs_hook)
# pooling
x = x[v_plus_batch]
batch = batch[v_plus_batch]
# final_convolution
lap_edge_idx, lap_edge_weight, v_plus_batch = self.get_ecc_conv_parameters(data, layer_no=self.num_layers)
lap_edge_idx = lap_edge_idx.to(self.model_configs["device"])
lap_edge_weight = lap_edge_weight.to(self.model_configs["device"])
lap_edge_weight.requires_grad = True
x = F.relu(self.final_conv(x, lap_edge_idx, lap_edge_weight.unsqueeze(-1)))
x = F.dropout(self.final_conv_bn(x), p=self.dropout, training=self.training)
lap_edge_weight.register_hook(self.edge_attrs_hook)
self.lap_edge_weight = lap_edge_weight
# TODO: is the following line needed before global pooling?
# batch = batch[v_plus_batch]
graph_emb = global_mean_pool(x, batch)
x = F.relu(self.fc1(graph_emb))
x = F.dropout(x, p=self.dropout_final, training=self.training)
# No ReLU specified here todo check with source code (code is not so clear)
x = self.fc2(x)
# Don't apply sigmoid during training b/c using BCEWithLogitsLoss
if self.classification and not self.training:
x = self.sigmoid(x)
if self.multiclass:
x = x.reshape((x.size(0), -1, self.multiclass_num_classes)) # batch size x num targets x num classes per target
if not self.training:
x = self.multiclass_softmax(x) # to get probabilities during evaluation, but not during training as we're using CrossEntropyLoss
return x
def get_gap_activations(self, data):
output = self.forward(data)
output.backward()
return self.conv_acts[-1], None
def get_prediction_weights(self):
w = self.fc2.weight.t()
return w[:, 0]
def get_intermediate_activations_gradients(self, data):
output = self.forward(data)
output.backward()
conv_grads = [conv_g.grad for conv_g in self.conv_grads]
return self.conv_acts, self.conv_grads
def activations_hook(self, grad):
self.conv_grads.append(grad)
def edge_attrs_hook(self, grad):
self.edge_grads.append(grad)
def get_gradients(self, data):
data.x.requires_grad_()
data.x.retain_grad()
output = self.forward(data)
output.backward()
atom_grads = data.x.grad
edge_grads_list = [edge_g.grad for edge_g in self.edge_grads]
edge_grads = edge_grads_list[-1]
return data.x, atom_grads, self.lap_edge_weight, edge_grads
```
#### File: Models/graph_based/GraphNet.py
```python
from operator import mod
from numpy.lib.arraysetops import isin
import torch
from torch import nn
from torch.nn import functional as F
from torch_geometric.nn import NNConv, Set2Set
class GraphNet(nn.Module):
def __init__(self, dim_features, dim_target, model_configs, dataset_configs):
super().__init__()
assert isinstance(dim_features, tuple)
node_features, edge_features = dim_features
dim_node_hidden = model_configs['dim_node_hidden']
dim_edge_hidden = model_configs['dim_edge_hidden']
num_layers = model_configs['num_layers']
num_step_set2set = model_configs['num_step_set2set']
num_layer_set2set = model_configs['num_layer_set2set']
aggr_type = model_configs['aggregation_type']
self.project_node_feats = nn.Sequential(
nn.Linear(node_features, dim_node_hidden),
nn.ReLU()
)
self.num_layers = num_layers
fnet = nn.Sequential(
nn.Linear(edge_features, dim_edge_hidden),
nn.ReLU(),
nn.Linear(dim_edge_hidden, dim_node_hidden * dim_node_hidden)
)
self.gnn = NNConv(
in_channels=dim_node_hidden,
out_channels=dim_node_hidden,
nn=fnet,
aggr=aggr_type
)
self.gru = nn.GRU(dim_node_hidden, dim_node_hidden)
self.readout = Set2Set(
in_channels=dim_node_hidden,
processing_steps=num_step_set2set,
num_layers=num_layer_set2set
)
# For graph classification
self.fc1 = nn.Linear(2 * dim_node_hidden, dim_node_hidden)
self.fc2 = nn.Linear(dim_node_hidden, dim_target)
self.task_type = dataset_configs["task_type"]
self.multiclass_num_classes = dataset_configs["multiclass_num_classes"] if self.task_type == 'Multi-Classification' else None
self.classification = self.task_type == 'Classification'
if self.classification:
self.sigmoid = nn.Sigmoid()
self.multiclass = self.task_type == 'Multi-Classification'
if self.multiclass:
self.multiclass_softmax = nn.Softmax(dim=2)
self.regression = self.task_type == 'Regression'
if self.regression:
self.relu = nn.ReLU()
assert not (self.classification and self.regression and self.multiclass)
def forward(self, data):
x, edge_index, edge_attrs, batch = data.x, data.edge_index, data.edge_attr, data.batch
x.requires_grad = True
edge_attrs.requires_grad = True
x = self.project_node_feats(x) # (batch_size, node hidden features)
hidden_feats = x.unsqueeze(0) # (1, batch_size, node hidden features)
self.conv_acts = []
self.conv_grads = []
for _ in range(self.num_layers):
with torch.enable_grad():
x = self.gnn(x, edge_index, edge_attrs)
x.register_hook(self.activations_hook)
self.conv_acts.append(x)
x = F.relu(x)
x, hidden_feats = self.gru(x.unsqueeze(0), hidden_feats)
x = x.squeeze(0)
graph_feats = self.readout(x, batch)
x = F.relu(self.fc1(graph_feats))
x = self.fc2(x)
# Don't apply sigmoid during training b/c using BCEWithLogitsLoss
if self.classification and not self.training:
x = self.sigmoid(x)
if self.multiclass:
x = x.reshape(x.size(0), -1, self.multiclass_num_classes) # batch size x num targets x num classes per target
if not self.training:
x = self.multiclass_softmax(x) # to get probabilities during evaluation, but not during training as we're using CrossEntropyLoss
return x
def get_gap_activations(self, data):
output = self.forward(data)
output.backward(torch.ones_like(output))
return self.conv_acts[-1], None
def get_prediction_weights(self):
w = self.fc2.weight.t()
return w[:, 0]
def get_intermediate_activations_gradients(self, data):
output = self.forward(data)
output.backward(torch.ones_like(output))
return self.conv_acts, self.conv_grads
def activations_hook(self, grad):
self.conv_grads.append(grad)
def get_gradients(self, data):
data.x.requires_grad_()
data.x.retain_grad()
data.edge_attr.requires_grad_()
data.edge_attr.retain_grad()
output = self.forward(data)
output.backward(torch.ones_like(output))
atom_grads = data.x.grad
bond_grads = data.edge_attr.grad
return data.x, atom_grads, data.edge_attr, bond_grads
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.