metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jennhu/vmf_vae_nlp",
"score": 2
}
|
#### File: modules/dec/decoder_step.py
```python
# import torch
# import torch.nn.functional as F
# import torch.optim
# from torch.autograd import Variable as Var
#
# from genut.modules.dec.decoder import RNNDecoderBase
#
#
#
# class InputFeedRNNDecoder(RNNDecoderBase):
#
# def __init__(self, opt):
# super().__init__(opt)
```
#### File: genut/modules/embedding.py
```python
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable as Var
class MultiEmbeddings(nn.Module):
def __init__(self, opt, pretrain=None):
super(MultiEmbeddings, self).__init__()
self.opt = opt
self.word_embedding = nn.Embedding(opt.full_dict_size, opt.inp_dim)
if pretrain is not None:
self.word_embedding.weight = nn.Parameter(torch.FloatTensor(pretrain))
self.pos_embedding = nn.Embedding(opt.pos_dict_size, opt.tag_dim)
self.ner_embedding = nn.Embedding(opt.ner_dict_size, opt.tag_dim)
def forward(self, inp):
"""
:param inp: list obj with word, pos, ner.
:return: Concatenated word embedding. seq_len, batch_sz, all_dim
"""
seq_word, seq_pos, seq_ner = inp
embedded_word = self.word_embedding(seq_word)
# print(torch.max(seq_word))
# print(embedded_word)
# print(torch.max(seq_pos))
# print(torch.max(seq_ner))
# print(self.pos_embedding)
# print(self.ner_embedding)
embedded_pos = self.pos_embedding(seq_pos)
# print(embedded_pos)
embedded_ner = self.ner_embedding(seq_ner)
# print(embedded_ner)
# print(torch.max(seq_ner))
# if self.opt.dbg:
# final_embedding = embedded_word
# else:
final_embedding = torch.cat((embedded_word, embedded_pos, embedded_ner), dim=2)
if self.opt.pe:
seq_len, batch_sz, dim = final_embedding.size()
position_enc = np.array(
[[pos / np.power(10000, 2. * i / dim) for i in range(dim)] for pos in range(seq_len)])
position_enc[:, 0::2] = np.sin(position_enc[:, 0::2])
position_enc[:, 1::2] = np.cos(position_enc[:, 1::2])
position_enc = torch.from_numpy(position_enc).type(torch.FloatTensor)
x = position_enc.unsqueeze(1)
x = Var(x.expand_as(final_embedding)).cuda()
final_embedding = final_embedding + 0.5 * x
# print(final_embedding.size())
return final_embedding
def forward_decoding(self, inp):
embedded_word = self.word_embedding(inp)
return embedded_word
class SingleEmbeddings(nn.Module):
def __init__(self, opt, pretrain=None):
super(SingleEmbeddings, self).__init__()
self.opt = opt
self.drop = nn.Dropout(opt.dropout_emb)
self.word_embedding = nn.Embedding(opt.full_dict_size, opt.inp_dim)
if pretrain is not None:
self.word_embedding.weight = nn.Parameter(torch.FloatTensor(pretrain))
def forward(self, inp):
"""
:param inp:
:return: seq_len, batch_sz, word_dim
"""
embedded_word = self.word_embedding(inp)
emb = self.drop(embedded_word)
return emb
def forward_decoding(self, inp):
embedded_word = self.word_embedding(inp)
emb = self.drop(embedded_word)
return emb
```
#### File: modules/enc/encoder.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable as Var
import unittest
class CNNEncoder(nn.Module):
def __init__(self, inp_dim, hid_dim, kernel_sz, pad, dilat):
super(CNNEncoder, self).__init__()
self.encoder = torch.nn.Conv1d(in_channels=inp_dim, out_channels=hid_dim, kernel_size=kernel_sz, stride=1,
padding=pad, dilation=dilat)
def forward(self, inp, inp_mask):
# seq,batch,dim
inp = inp.permute(1, 2, 0)
# batch, dim, seq
# print('1')
x = torch.nn.functional.relu(self.encoder(inp))
# batch, dim, seq
# seq,batch,dim
# print('1')
x = x.permute(2, 0, 1)
h_t = (x[-1], x[-1])
# print('1')
return x, h_t
# print(h_t.size())
# print(x)
class DCNNEncoder(nn.Module):
def __init__(self, inp_dim, hid_dim=150, kernel_sz=5, pad=2, dilat=1):
super(DCNNEncoder, self).__init__()
self.encoder = torch.nn.Conv1d(in_channels=inp_dim, out_channels=hid_dim, kernel_size=kernel_sz, stride=1,
padding=pad, dilation=1)
def forward(self, inp, mask):
inp = inp.permute(1, 2, 0)
x = torch.nn.functional.relu(self.encoder(inp))
print(x.size())
class TestStringMethods(unittest.TestCase):
def test_upper(self):
self.assertEqual('foo'.upper(), 'FOO')
def test_isupper(self):
self.assertTrue('FOO'.isupper())
self.assertFalse('Foo'.isupper())
def test_split(self):
s = 'hello world'
self.assertEqual(s.split(), ['hello', 'world'])
# check that s.split fails when the separator is not a string
with self.assertRaises(TypeError):
s.split(2)
if __name__ == "__main__":
unittest.main()
batch = 16
seq = 200
dim = 400
inp = torch.autograd.Variable(torch.rand((seq, batch, dim)))
cnn = DCNNEncoder(inp_dim=dim, hid_dim=150, kernel_sz=5, pad=2, dilat=1)
cnn.forward(inp, 0)
```
#### File: genut/util/train_lm.py
```python
import logging
import math
import os
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable as Var
from archive.genut import msk_list_to_mat
from archive.genut import Trainer
class LMTrainer(Trainer):
def __init__(self, opt, model, data):
super().__init__(opt, model, data)
# self.logger = Logger(opt.print_every, self.n_batch)
# weight = torch.ones(opt.full_dict_size)
# weight[0] = 0
# assert 0 == opt.word_dict.fword2idx('<pad>')
self.crit = nn.CrossEntropyLoss(size_average=True, reduce=True, ignore_index=0)
self.crit_test = nn.CrossEntropyLoss(size_average=False, reduce=False, ignore_index=0)
self.opt = opt
self.model = model
self.train_bag = data[0]
self.test_bag = data[1]
self.n_batch = len(self.train_bag)
# parameters = filter(lambda p: p.requires_grad, self.model.parameters())
# self.optimizer = torch.optim.Adagrad(parameters, lr=opt.lr) # TODO
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=opt.lr, momentum=0.9)
# self.optimizer = torch.optim.SGD(parameters,lr=opt.lr)
# self.mul_loss = opt.mul_loss
# self.add_loss = opt.add_loss
# dicts = [word_dict, pos_dict, ner_dict]
self.word_dict = opt.word_dict
self.clip = opt.clip
def func_train(self, inp_var, inp_msk):
self.optimizer.zero_grad() # clear grad
aux = {}
batch_size = inp_var.size()[0]
batch_size_ = len(inp_msk)
assert batch_size == batch_size_
target_len = inp_msk[0]
decoder_outputs_prob, decoder_outputs = self.model.forward(inp_var, inp_msk, inp_var, inp_msk, aux)
valid_pos_mask = Var(msk_list_to_mat(inp_msk), requires_grad=False).view(target_len * batch_size, 1)
if self.opt.use_cuda:
valid_pos_mask = valid_pos_mask.cuda()
# Compulsory NLL loss part
pred_prob = decoder_outputs_prob.view(target_len * batch_size, -1)
# print(inp_var)
seq_first_inp_var = inp_var.transpose(1, 0).contiguous()
gold_dist = seq_first_inp_var.view(target_len * batch_size)
gold_dist = Var(gold_dist)
if self.opt.use_cuda:
gold_dist = gold_dist.cuda()
loss = self.crit(pred_prob, gold_dist)
loss.backward()
torch.nn.utils.clip_grad_norm(self.model.parameters(), self.clip)
self.optimizer.step()
return loss.data[0], math.exp(loss.data[0])
def func_test(self, inp_var, inp_msk):
target_len = inp_msk[0]
batch_size = inp_var.size()[0]
decoder_outputs_prob, decoder_outputs = self.model.forward(inp_var, inp_msk, tgt_var=inp_var, tgt_msk=inp_msk,
aux=None)
# Compulsory NLL loss part
pred_prob = decoder_outputs_prob.view(target_len * batch_size, -1)
seq_first_inp_var = inp_var.transpose(1, 0).contiguous()
gold_dist = Var(seq_first_inp_var.view(target_len * batch_size))
if self.opt.use_cuda:
gold_dist = gold_dist.cuda()
loss = self.crit_test(pred_prob, gold_dist)
loss = torch.sum(loss)
return loss.data[0], decoder_outputs
def train_iters(self):
"""
Training function called from main.py.
:return:
"""
for epo in range(self.opt.start_epo, self.opt.n_epo + 1):
self.model.train()
batch_order = np.arange(self.n_batch)
np.random.shuffle(batch_order)
for idx, batch_idx in enumerate(batch_order):
# self.logger.init_new_batch(batch_idx)
current_batch = self.train_bag[batch_idx]
# current_batch = copy.deepcopy(tmp_cur_batch)
inp_var = current_batch['txt']
inp_msk = current_batch['txt_msk']
# out_var = current_batch['cur_out_var']
# out_mask = current_batch['cur_out_mask']
# scatter_msk = current_batch['cur_scatter_mask'].cuda()
# replacement = current_batch['replacement']
# max_oov_len = len(replacement)
# self.logger.set_oov(max_oov_len)
# inp_var = Var(inp_var)
if self.opt.use_cuda:
# inp_var = [x.contiguous().cuda() for x in inp_var]
inp_var = inp_var.contiguous().cuda()
nll, ppl = self.func_train(inp_var, inp_msk)
if idx % self.opt.print_every == 0:
logging.info('NLL:%.2f \tPPL:%s' % (nll, str(ppl)))
if idx % self.opt.save_every == 0:
ppl = self.evaluate()
os.chdir(self.opt.save_dir)
name_string = '%d_%.2f'.lower() % (epo, ppl)
logging.info("Saving in epo %s" % name_string)
torch.save(self.model.emb.state_dict(),
name_string + '_emb')
# torch.save(self.model.enc.state_dict(), name_string + '_enc')
torch.save(self.model.dec.state_dict(), name_string + '_dec')
torch.save(self.model.opt, name_string + '_opt')
os.chdir('..')
def evaluate(self):
self.model.eval()
n_batch = len(self.test_bag)
test_len = 0
accumulated_ppl = 0
for idx in range(n_batch):
current_batch = self.test_bag[idx]
inp_var = current_batch['txt']
inp_mask = current_batch['txt_msk']
batch_size = inp_var.size()[0]
test_len += inp_mask[0] * batch_size
nll, decoder_output = self.func_test(inp_var, inp_mask)
accumulated_ppl += nll
final_ppl = accumulated_ppl / test_len
final_ppl = math.exp(final_ppl)
logging.info('PPL: %f' % final_ppl)
return final_ppl
```
#### File: archive/nvdm/main.py
```python
import argparse
import logging
import math
import os
import time
import torch
import numpy as np
from tensorboardX import SummaryWriter
from archive import nvdm as util
from archive.nvdm import BowVAE
parser = argparse.ArgumentParser(description='PyTorch VAE LSTM Language Model')
parser.add_argument('--data_name', type=str, default='20news', help='name of the data corpus')
parser.add_argument('--data_path', type=str, default='../data/20news', help='location of the data corpus')
parser.add_argument('--distribution', type=str, default='nor', help='nor or vmf')
parser.add_argument('--kappa', type=float, default=5)
parser.add_argument('--emsize', type=int, default=800, help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=800, help='number of hidden units per layer')
parser.add_argument('--lat_dim', type=int, default=800, help='dim of latent vec z')
parser.add_argument('--lr', type=float, default=0.01,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=200,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=20, metavar='N', help='batch size')
parser.add_argument('--eval_batch_size', type=int, default=10, help='evaluation batch size')
parser.add_argument('--dropout', type=float, default=0.5,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--tied', action='store_true',
help='tie the word embedding and softmax weights')
parser.add_argument('--seed', type=int, default=1111, help='random seed')
parser.add_argument('--cuda', action='store_true', help='use CUDA')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, default='model.pt',
help='path to save the final model')
parser.add_argument('--kl_weight', type=float, default=1,
help='scaling item for KL')
parser.add_argument('--load', type=str, default=None, help='restoring previous model')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
args.save_name = 'Data{}_Dist{}_Emb{}_Hid{}_lat{}_lr{}_drop{}'.format(
args.data_name, str(args.dist),
args.emsize,
args.nhid, args.lat_dim, args.lr,
args.dropout)
writer = SummaryWriter(log_dir='exps/' + args.save_name)
log_name = args.save_name + '.log'
logging.basicConfig(filename=log_name, level=logging.INFO)
###############################################################################
# Load data
###############################################################################
# corpus = data.Corpus(args.data)
if '20news' in args.data_name:
corpus = util.NewsCorpus(args.data_path)
else:
raise NotImplementedError
test_batches = util.create_batches(len(corpus.test), args.eval_batch_size, shuffle=True)
dev_batches = util.create_batches(len(corpus.dev), args.eval_batch_size, shuffle=True)
###############################################################################
# Build the model
###############################################################################
model = BowVAE(vocab_size=2000, n_hidden=args.nhid, n_lat=args.lat_dim,
n_sample=5, batch_size=args.batch_size, non_linearity='Tanh', dist=args.dist)
print("Model {}".format(model))
logging.info("Model {}".format(model))
if args.load != None:
if os.path.isfile(args.load):
with open(args.load, 'rb') as f:
model = torch.load(f)
logging.info("Successfully load previous model! {}".format(args.load))
if args.cuda:
model.cuda()
else:
model.cpu()
logging.info(model)
###############################################################################
# Training code
###############################################################################
def train(train_batches, glob_iteration):
# Turn on training mode which enables dropout.
model.train()
optim = torch.optim.Adam(model.parameters(), lr=args.lr)
acc_loss = 0
acc_kl_loss = 0
word_cnt = 0
doc_cnt = 0
acc_real_ppl = 0
start_time = time.time()
ntokens = 2000
for idx, batch in enumerate(train_batches):
optim.zero_grad()
glob_iteration += 1
data_batch, count_batch, mask = util.fetch_data(
corpus.train, corpus.train_cnt, batch, ntokens)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
# hidden = repackage_hidden(hidden)
model.zero_grad()
data_batch = torch.autograd.Variable(torch.FloatTensor(data_batch))
mask = torch.autograd.Variable(torch.FloatTensor(mask))
if args.cuda:
data_batch = data_batch.cuda()
mask = mask.cuda()
recon_loss, kld, _ = model(data_batch, mask)
if idx % (args.log_interval / 2) == 0:
print("RecLoss: %f\tKL: %f" % (torch.mean(recon_loss).data, torch.mean(kld).data))
total_loss = torch.mean(recon_loss + kld * args.kl_weight)
total_loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
optim.step()
count_batch = torch.FloatTensor(count_batch).cuda()
real_ppl = torch.div((recon_loss + kld).data, count_batch) * mask.data
acc_real_ppl += torch.sum(real_ppl)
acc_loss += torch.sum(recon_loss).data #
# print(kld.size(), mask.size())
acc_kl_loss += torch.sum(kld.data * torch.sum(mask.data))
count_batch = count_batch + 1e-12
word_cnt += torch.sum(count_batch)
doc_cnt += torch.sum(mask.data)
if idx % args.log_interval == 0 and idx > 0:
# word ppl
cur_loss = acc_loss[0] / word_cnt # word loss
cur_kl = acc_kl_loss / doc_cnt
print_ppl = acc_real_ppl / doc_cnt
logging.info(
"\t{}\t{}\t{}\t{}\t{}\t{}".format(epoch, glob_iteration, cur_loss, cur_kl, print_ppl,
math.exp(print_ppl)))
writer.add_scalars('train', {'lr': args.lr, 'kl_weight': args.kl_weight,
'cur_loss': cur_loss, 'cur_kl': cur_kl,
'cur_sum': print_ppl, 'ppl': math.exp(print_ppl)
}, global_step=glob_iteration)
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | KLW {:5.2f}|'
'loss {:5.2f} | KL {:5.2f} | ppl {:8.2f}'.format(
epoch, idx, lr,
elapsed * 1000 / args.log_interval, args.kl_weight, cur_loss, cur_kl,
np.exp(print_ppl)))
word_cnt = 0
doc_cnt = 0
acc_loss = 0
acc_kl_loss = 0
acc_real_ppl = 0
start_time = time.time()
return glob_iteration
# Loop over epochs.
lr = args.lr
best_val_loss = None
# At any point you can hit Ctrl + C to break out of training early.
try:
glob_iter = 0
for epoch in range(1, args.epochs + 1):
args.kl_weight = util.schedule(epoch)
epoch_start_time = time.time()
train_batches = util.create_batches(len(corpus.train), args.batch_size, shuffle=True)
glob_iter = train(train_batches, glob_iter)
val_loss = util.evaluate(args, model, corpus.dev, corpus.dev_cnt, dev_batches)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
writer.add_scalars('valid', {'lr': args.lr, 'kl_weight': args.kl_weight,
'val_loss': val_loss,
'ppl': math.exp(val_loss)
}, global_step=glob_iter)
print('-' * 89)
with open('Valid_PPL_' + log_name, 'w') as f:
f.write("{}\t{}".format(epoch, math.exp(val_loss)))
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
with open(args.save_name, 'wb') as f:
torch.save(model, f)
best_val_loss = val_loss
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
lr /= 1.0
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
# Load the best saved model.
with open(args.save_name, 'rb') as f:
model = torch.load(f)
# Run on test data.
test_loss = util.evaluate(args, model, corpus.test, corpus.test_cnt, test_batches)
print('=' * 89)
print('| End of training | Test Loss {:5.2f} | Test PPL {:8.2f}'.format(
test_loss, math.exp(test_loss)))
print('=' * 89)
writer.close()
```
#### File: archive/nvdm/util.py
```python
import math
import os
import random
import numpy as np
import torch
from NVLL.util.gpu_flag import device
class NewsCorpus(object):
def __init__(self, path):
self.test, self.test_cnt = self.read_data(os.path.join(path, 'test.feat'))
self.train, self.train_cnt = self.read_data(os.path.join(path, 'train.feat'))
l = list(range(len(self.test)))
random.shuffle(l)
l = l[:500]
self.dev = []
self.dev_cnt = []
for i in l:
self.dev.append(self.test[i])
self.dev_cnt.append(self.test_cnt[i])
def read_data(self, path_file):
_id = 0
idx = []
data = []
word_count = []
fin = open(path_file)
while True:
line = fin.readline()
if not line:
break
id_freqs = line.split()
doc = {}
count = 0
for id_freq in id_freqs[1:]:
items = id_freq.split(':')
# python starts from 0
doc[int(items[0]) - 1] = int(items[1])
count += int(items[1])
if count > 0:
idx.append(_id)
_id += 1
data.append(doc)
word_count.append(count)
fin.close()
# sorted_idx = sorted(idx, key=lambda sample: word_count[sample], reverse=True)
# new_data = []
# new_count = []
# for i, this_id in enumerate(sorted_idx):
# new_data.append(data[this_id])
# new_count.append(word_count[this_id])
return data, word_count
def data_set(data_url):
"""process data input."""
data = []
word_count = []
fin = open(data_url)
while True:
line = fin.readline()
if not line:
break
id_freqs = line.split()
doc = {}
count = 0
for id_freq in id_freqs[1:]:
items = id_freq.split(':')
# python starts from 0
doc[int(items[0]) - 1] = int(items[1])
count += int(items[1])
if count > 0:
data.append(doc)
word_count.append(count)
fin.close()
return data, word_count
def create_batches(data_size, batch_size, shuffle=True):
"""create index by batches."""
batches = []
ids = list(range(data_size))
if shuffle:
random.shuffle(ids)
for i in range(int(data_size / batch_size)):
start = i * batch_size
end = (i + 1) * batch_size
batches.append(ids[start:end])
# the batch of which the length is less than batch_size
rest = data_size % batch_size
if rest > 0:
# batches.append(list(ids[-rest:]) + [-1] * (batch_size - rest)) # -1 as padding
batches.append(list(ids[-rest:])) # -1 as padding
return batches
def fetch_data(data, count, idx_batch, vocab_size):
"""fetch input data by batch."""
batch_size = len(idx_batch)
data_batch = np.zeros((batch_size, vocab_size))
count_batch = []
mask = np.zeros(batch_size)
indices = []
values = []
for i, doc_id in enumerate(idx_batch):
if doc_id != -1:
for word_id, freq in data[doc_id].items():
data_batch[i, word_id] = freq
count_batch.append(count[doc_id])
mask[i] = 1.0
else:
count_batch.append(0)
return data_batch, count_batch, mask
def schedule(epo, eval=False):
return float(torch.sigmoid(torch.ones(1) * (epo - 5)))
def variable_parser(var_list, prefix):
"""return a subset of the all_variables by prefix."""
ret_list = []
for var in var_list:
varname = var.name
varprefix = varname.split('/')[0]
if varprefix == prefix:
ret_list.append(var)
return ret_list
#
#
# def linear(inputs,
# output_size,
# no_bias=False,
# bias_start_zero=False,
# matrix_start_zero=False,
# scope=None):
# """Define a linear connection."""
# with tf.variable_scope(scope or 'Linear'):
# if matrix_start_zero:
# matrix_initializer = tf.constant_initializer(0)
# else:
# matrix_initializer = None
# if bias_start_zero:
# bias_initializer = tf.constant_initializer(0)
# else:
# bias_initializer = None
# input_size = inputs.get_shape()[1].value
# matrix = tf.get_variable('Matrix', [input_size, output_size],
# initializer=matrix_initializer)
# bias_term = tf.get_variable('Bias', [output_size],
# initializer=bias_initializer)
# output = tf.matmul(inputs, matrix)
# if not no_bias:
# output = output + bias_term
# return output
#
#
# def mlp(inputs,
# mlp_hidden=[],
# mlp_nonlinearity=tf.nn.tanh,
# scope=None):
# """Define an MLP."""
# with tf.variable_scope(scope or 'Linear'):
# mlp_layer = len(mlp_hidden)
# res = inputs
# for l in range(mlp_layer):
# res = mlp_nonlinearity(linear(res, mlp_hidden[l], scope='l' + str(l)))
# return res
#
#
import time
def evaluate(args, model, corpus_dev, corpus_dev_cnt, dev_batches):
# Turn on training mode which enables dropout.
model.eval()
acc_loss = 0
acc_kl_loss = 0
acc_real_ppl = 0
word_cnt = 0
doc_cnt = 0
start_time = time.time()
ntokens = 2000
for idx, batch in enumerate(dev_batches):
data_batch, count_batch, mask = fetch_data(
corpus_dev, corpus_dev_cnt, batch, ntokens)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
# hidden = repackage_hidden(hidden)
data_batch = torch.FloatTensor(data_batch).to(device)
mask = torch.FloatTensor(mask).to(device)
recon_loss, kld, _ = model(data_batch, mask)
count_batch = torch.FloatTensor(count_batch).to(device)
real_ppl = torch.div((recon_loss + kld).data, count_batch) * mask.data
# remove nan
for n in real_ppl:
if n == n:
acc_real_ppl += n
# acc_real_ppl += torch.sum(real_ppl)
acc_loss += torch.sum(recon_loss).data #
# acc_kl_loss += kld.data * torch.sum(mask.data)
acc_kl_loss += torch.sum(kld.data * torch.sum(mask.data))
count_batch = count_batch + 1e-12
word_cnt += torch.sum(count_batch)
doc_cnt += torch.sum(mask.data)
# word ppl
cur_loss = acc_loss[0] / word_cnt # word loss
cur_kl = acc_kl_loss / doc_cnt
print_ppl = acc_real_ppl / doc_cnt
elapsed = time.time() - start_time
print('loss {:5.2f} | KL {:5.2f} | ppl {:8.2f}'.format(
cur_loss, cur_kl, np.exp(print_ppl)))
return print_ppl
```
#### File: archive/vae_proto/main.py
```python
import argparse
import logging
import math
import time
import os
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from torch.autograd import Variable as Var
from archive.vae_proto import data
from archive.vae_proto import util
parser = argparse.ArgumentParser(description='PyTorch VAE LSTM Language Model')
parser.add_argument('--data_name', type=str, default='20news', help='name of the data corpus')
parser.add_argument('--data_path', type=str, default='../data/20news', help='location of the data corpus')
parser.add_argument('--encoder', type=str, default='bow', help='bow or lstm')
parser.add_argument('--decoder', type=str, default='bow', help='lstm or bow; Using LSTM or BoW as decoder')
parser.add_argument('--distribution', type=str, default=None, help='default: None (no vae) ; nor or vmf')
parser.add_argument('--kappa', type=float, default=5)
parser.add_argument('--fly', action='store_true', help='Without previous ground truth = inputless decode',
default=False)
parser.add_argument('--emsize', type=int, default=650, help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=650, help='number of hidden units per layer')
parser.add_argument('--lat_dim', type=int, default=650, help='dim of latent vec z')
parser.add_argument('--nlayers', type=int, default=1,
help='number of layers')
parser.add_argument('--lr', type=float, default=0.01,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=100,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=20, metavar='N', help='batch size')
parser.add_argument('--eval_batch_size', type=int, default=10, help='evaluation batch size')
parser.add_argument('--dropout', type=float, default=0.5,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--tied', action='store_true',
help='tie the word embedding and softmax weights')
parser.add_argument('--seed', type=int, default=1111, help='random seed')
parser.add_argument('--cuda', action='store_true', help='use CUDA')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, default='model.pt',
help='path to save the final model')
parser.add_argument('--kl_weight', type=float, default=1,
help='scaling item for KL')
parser.add_argument('--load', type=str, default=None, help='restoring previous model')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
args.save_name = 'Data{}_Model{}_Dec{}_Dist{}_Fly{}_Emb{}_Hid{}_lat{}_nlay{}_lr{}_drop{}'.format(
args.data_name, args.encoder, args.decoder, str(args.dist),
args.fly, args.emsize,
args.nhid, args.lat_dim, args.nlayers, args.lr,
args.dropout)
writer = SummaryWriter(log_dir='exps/' + args.save_name)
log_name = args.save_name + '.log'
logging.basicConfig(filename=log_name, level=logging.INFO)
###############################################################################
# Load data
###############################################################################
# corpus = data.Corpus(args.data)
if '20news' in args.data_name:
corpus = data.NewsCorpus(args.data_path)
elif 'yelp' in args.data_name:
corpus = data.Corpus(args.data_path, start_idx=1, end_idx=130)
else:
corpus = data.Corpus(args.data_path)
train_data = util.make_batch(args, corpus.train, args.batch_size)
val_data = util.make_batch(args, corpus.valid, args.eval_batch_size)
test_data = util.make_batch(args, corpus.test, args.eval_batch_size)
###############################################################################
# Build the model
###############################################################################
ntokens = len(corpus.dictionary)
print('Dict size: %d' % ntokens)
if args.model.lower() == 'lstm':
from archive.vae_proto import rnn_model
model = rnn_model.RNNModel(ntokens, args.emsize, args.nhid, 0, args.nlayers, args.dropout, args.tied)
elif args.model.lower() == 'vae':
from archive.vae_proto import vae_model
model = vae_model.VAEModel(args, args.decoder, ntokens, args.emsize, args.nhid, args.lat_dim, args.nlayers,
args.dropout, args.tied)
else:
raise NotImplementedError
print("Model {}".format(model))
logging.info("Model {}".format(model))
if args.load != None:
if os.path.isfile(args.load):
with open(args.load, 'rb') as f:
model = torch.load(f)
logging.info("Successfully load previous model! {}".format(args.load))
if args.cuda:
model.cuda()
else:
model.cpu()
logging.info(model)
criterion = nn.CrossEntropyLoss(ignore_index=0)
###############################################################################
# Training code
###############################################################################
def train(glob_iteration):
# Turn on training mode which enables dropout.
model.train()
if args.model == 'vae':
# params_kl = list(model.fc_mu.parameters()) + list(model.fc_logvar.parameters()) \
# + list(model.z_to_c.parameters()) + list(model.z_to_h.parameters())
optim = torch.optim.Adam(model.parameters(), lr=args.lr)
# params_dict = dict(model.named_parameters())
# params = []
# for key, value in params_dict.items():
# if 'fc_' in key or 'z_to_' in key:
# params += [{'params': [value], 'lr': args.lr/50}]
# else:
# params += [{'params': [value], 'lr': args.lr}]
#
# optim = torch.optim.Adam(params)
else:
optim = torch.optim.SGD(model.parameters(), lr=args.lr)
acc_loss = 0
acc_kl_loss = 0
acc_total_loss = 0
start_time = time.time()
ntokens = len(corpus.dictionary)
# hidden = model.init_hidden(args.batch_size)
cnt = 0
for batch, i in enumerate(range(0, len(train_data))):
optim.zero_grad()
glob_iteration += 1
data, targets = util.get_batch(args, train_data, i)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
# hidden = repackage_hidden(hidden)
seq_len, bsz = data.size()
model.zero_grad()
# output, hidden = model(data, hidden)
if args.model == 'lstm':
if args.fly:
output, _ = model.forward_decode(args, data, ntokens)
else:
output, hidden = model(data)
loss = criterion(output.view(-1, ntokens), targets)
total_loss = loss
total_loss.backward()
elif args.model == 'vae':
if args.fly:
output, _, mu, logvar = model.forward_decode(args, data, ntokens)
else:
output, mu, logvar = model(data)
loss = criterion(output.view(-1, ntokens), targets)
if args.dist == 'nor':
kld = util.kld(mu, logvar, 1)
elif args.dist == 'vmf':
kld = Var(torch.zeros(1)).cuda()
if batch % (args.log_interval / 2) == 0:
print("RecLoss: %f\tKL: %f" % (loss.data, kld.data))
total_loss = loss + kld * args.kl_weight
total_loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
# for p in model.parameters():
# p.data.add_(-lr, p.grad.data)
optim.step()
if args.model == 'lstm':
acc_loss += loss.data * seq_len * bsz
elif args.model == 'vae':
acc_loss += loss.data * seq_len * bsz
acc_kl_loss += kld.data * seq_len * bsz
acc_total_loss += total_loss.data * seq_len * bsz
cnt += seq_len * bsz
if batch % args.log_interval == 0 and batch > 0:
if args.model == 'lstm':
cur_loss = acc_loss[0] / cnt
logging.info(
"\t{}\t{}\t{}\t{}".format(epoch, glob_iteration, cur_loss,
math.exp(cur_loss)))
writer.add_scalars('train', {'lr': args.lr, 'kl_weight': args.kl_weight,
'cur_loss': cur_loss,
'ppl': math.exp(cur_loss)
}, global_step=glob_iteration)
elif args.model == 'vae':
cur_loss = acc_loss[0] / cnt
cur_kl = acc_kl_loss[0] / cnt
cur_sum = acc_total_loss[0] / cnt
logging.info(
"\t{}\t{}\t{}\t{}\t{}\t{}".format(epoch, glob_iteration, cur_loss, cur_kl, cur_sum,
math.exp(cur_loss)))
writer.add_scalars('train', {'lr': args.lr, 'kl_weight': args.kl_weight,
'cur_loss': cur_loss, 'cur_kl': cur_kl,
'cur_sum': cur_sum, 'ppl': math.exp(cur_loss)
}, global_step=glob_iteration)
cnt = 0
elapsed = time.time() - start_time
if args.model == 'lstm':
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data), lr,
elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
acc_loss = 0
elif args.model == 'vae':
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | KLW {:5.2f}|'
'loss {:5.2f} | KL {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data), lr,
elapsed * 1000 / args.log_interval, args.kl_weight, cur_loss, cur_kl, math.exp(cur_loss)))
acc_loss = 0
acc_kl_loss = 0
acc_total_loss = 0
else:
raise NotImplementedError
start_time = time.time()
return glob_iteration
# Loop over epochs.
lr = args.lr
best_val_loss = None
# At any point you can hit Ctrl + C to break out of training early.
try:
glob_iter = 0
for epoch in range(1, args.epochs + 1):
args.kl_weight = util.schedule(epoch)
epoch_start_time = time.time()
glob_iter = train(glob_iter)
val_loss = util.evaluate(args, model, corpus, val_data, criterion)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
writer.add_scalars('valid', {'lr': args.lr, 'kl_weight': args.kl_weight,
'val_loss': val_loss,
'ppl': math.exp(val_loss)
}, global_step=glob_iter)
print('-' * 89)
with open('Valid_PPL_' + log_name, 'w') as f:
f.write("{}\t{}".format(epoch, math.exp(val_loss)))
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
with open(args.save_name, 'wb') as f:
torch.save(model, f)
best_val_loss = val_loss
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
lr /= 2.0
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
# Load the best saved model.
with open(args.save_name, 'rb') as f:
model = torch.load(f)
# Run on test data.
test_loss = util.evaluate(args, model, corpus, test_data, criterion)
print('=' * 89)
print('| End of training | Test Loss {:5.2f} | Test PPL {:8.2f}'.format(
test_loss, math.exp(test_loss)))
print('=' * 89)
writer.close()
```
#### File: archive/vae_proto/rnn_model.py
```python
import torch
import torch.nn as nn
from torch.autograd import Variable
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, ntoken, ninp, nhid, agenda_dim, nlayers=1, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.embed = nn.Embedding(ntoken, ninp)
self.decoder_rnn = nn.LSTM(ninp + agenda_dim, nhid, nlayers, dropout=dropout)
self.decoder_out = nn.Linear(nhid, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder_out.weight = self.embed.weight
self.init_weights()
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.decoder_out.bias.data.fill_(0)
self.decoder_out.weight.data.uniform_(-initrange, initrange)
# # kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal'
# torch.nn.init.xavier_uniform(self.decoder_rnn.weight_ih_l0.data, gain=nn.init.calculate_gain('sigmoid'))
# torch.nn.init.orthogonal(self.decoder_rnn.weight_hh_l0.data, gain=nn.init.calculate_gain('sigmoid'))
#
# # embedding uniform
# torch.nn.init.xavier_uniform(self.embed.weight.data, gain=nn.init.calculate_gain('linear'))
#
# # Linear kernel_initializer='glorot_uniform'
# torch.nn.init.xavier_uniform(self.decoder_out.weight.data, gain=nn.init.calculate_gain('linear'))
def forward(self, input, hidden=None):
batch_sz = input.size()[1]
if hidden is None:
hidden = self.init_hidden(batch_sz)
emb = self.drop(self.embed(input))
output, hidden = self.decoder_rnn(emb, hidden)
output = self.drop(output)
# output (seq_len, batch, hidden_size * num_directions)
decoded = self.decoder_out(output.view(output.size(0) * output.size(1), output.size(2)))
return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden
def forward_decode(self, args, input, ntokens):
seq_len = input.size()[0]
batch_sz = input.size()[1]
# emb: seq_len, batchsz, hid_dim
# hidden: ([2(nlayers),10(batchsz),200],[])
hidden = None
outputs_prob = Variable(torch.FloatTensor(seq_len, batch_sz, ntokens))
if args.cuda:
outputs_prob = outputs_prob.cuda()
outputs = torch.LongTensor(seq_len, batch_sz)
# First time step sos
sos = Variable(torch.ones(batch_sz).long()) # id for sos =1
unk = Variable(torch.ones(batch_sz).long()) * 2 # id for unk =2
if args.cuda:
sos = sos.cuda()
unk = unk.cuda()
emb_0 = self.drop(self.encoder(sos)).unsqueeze(0)
emb_t = self.drop(self.encoder(unk)).unsqueeze(0)
for t in range(seq_len):
# input (seq_len, batch, input_size)
if t == 0:
emb = emb_0
else:
emb = emb_t
output, hidden = self.rnn(emb, hidden)
output_prob = self.decoder(self.drop(output))
output_prob = output_prob.squeeze(0)
outputs_prob[t] = output_prob
value, ind = torch.topk(output_prob, 1, dim=1)
outputs[t] = ind.squeeze(1).data
return outputs_prob, outputs
def init_hidden(self, bsz):
return (Variable(torch.zeros(self.nlayers, bsz, self.nhid)).cuda(),
Variable(torch.zeros(self.nlayers, bsz, self.nhid)).cuda())
# weight = next(self.parameters()).data
# if self.rnn_type == 'LSTM':
# return (Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()),
# Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()))
# else:
# return Variable(weight.new(self.nlayers, bsz, self.nhid).zero_())
```
#### File: NVLL/analysis/analyze_nvrnn.py
```python
"Dataptb_Distnor_Modelnvrnn_Emb400_Hid400_lat200_lr0.001_drop0.2"
import logging
import os
import random
import shutil
import time
import numpy
import torch
from NVLL.analysis.analyzer_argparse import parse_arg
from NVLL.data.lm import DataLM
from NVLL.model.nvrnn import RNNVAE
from NVLL.util.util import GVar, swap_by_batch, replace_by_batch, replace_by_batch_with_unk
cos = torch.nn.CosineSimilarity()
class Sample():
def __init__(self, gt, pred, code, recon_nll, kl):
self.gt = gt
self.pred = pred
self.code = code
self.recon_nll = recon_nll
self.kl = kl
self.total_nll = recon_nll + kl
# self.ppl = math.exp(self.total_nll)
def set_nor_stat(self, mean, logvar):
self.dist_type = "nor"
self.mean = mean
self.logvar = logvar
def set_vmf_stat(self, mu):
self.dist_type = "vmf"
self.mu = mu
def set_zero_stat(self):
self.dist_type = "zero"
def __repr__(self):
def list_to_str(l):
l = [str(i) for i in l]
return "\t".join(l)
wt_str = "gt\tpred\trecon_nll\tkl\ttotal_nll\n{}\n{}\n{}\n{}\n{}\n".format(self.gt, self.pred, self.recon_nll,
self.kl,
self.total_nll
)
if self.dist_type == 'nor':
wt_str += "{}\n{}\n{}".format(list_to_str(self.code), list_to_str(self.mean), list_to_str(self.logvar))
elif self.dist_type == 'vmf':
wt_str += "{}\n{}".format(list_to_str(self.code), list_to_str(self.mu))
return wt_str
class ExpAnalyzer():
def __init__(self, root_path="/home/cc/vae_txt",
exp_path="/home/cc/exp-nvrnn",
instance_name=None,
data_path="data/ptb",
eval_batch_size=5,
mix_unk=0,
swap=0, replace=0,
cd_bow=0, cd_bit=0):
self.exp_path = exp_path
self.instance_name = instance_name
self.temp = 1
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fh = logging.FileHandler(os.path.join(exp_path, instance_name + ".log"))
ch = logging.StreamHandler()
logger.addHandler(fh)
logger.addHandler(ch)
self.logger = logger
self.logger.info("Loading File: {}".format(os.path.join(exp_path, instance_name)))
self.args = self.load_args(exp_path, instance_name)
self.logger.info(
"Pre config: Swap:{}\tReplace:{}\tMixUnk:{}\tCdBit:{}\tCdBoW:{}\nLoaded Hyper-param WILL overwrite pre-config.".format(
self.args.swap, self.args.replace, self.args.mix_unk, self.args.cd_bit, self.args.cd_bow))
self.logger.info("Post config: Swap:{}\tReplace:{}\tMixUnk:{}\tCdBit:{}\tCdBoW:{}".format(
swap, replace, mix_unk, cd_bit, cd_bow
))
if swap != 0:
self.args.swap = swap
if replace != 0:
self.args.replace = replace
if mix_unk != 0:
self.args.mix_unk = mix_unk
if cd_bow > 1 and cd_bow != self.args.cd_bow:
self.logger.warning("Unexpected chage: CD BoW")
self.args.cd_bow = cd_bow
if cd_bit > 1 and cd_bit != self.args.cd_bit:
self.logger.warning("Unexpected chage: CD Bit")
self.args.cd_bit = cd_bit
self.data = self.load_data(os.path.join(root_path, data_path), eval_batch_size, cd_bit > 1)
self.model = self.load_model(self.args, len(self.data.dictionary), exp_path, instance_name)
self.criterion = torch.nn.CrossEntropyLoss(ignore_index=0)
self.crit_sample = torch.nn.CrossEntropyLoss(ignore_index=0, reduce=False)
@staticmethod
def load_args(path, name):
with open(os.path.join(path, name + '.args'), 'rb') as f:
args = torch.load(f)
return args
@staticmethod
def load_data(data_path, eval_batch_siez, condition):
data = DataLM(data_path, eval_batch_siez, eval_batch_siez, condition)
return data
@staticmethod
def load_model(args, ntoken, path, name):
model = RNNVAE(args, args.enc_type, ntoken, args.emsize,
args.nhid, args.lat_dim, args.nlayers,
dropout=args.dropout, tie_weights=args.tied,
input_z=args.input_z, mix_unk=args.mix_unk,
condition=(args.cd_bit or args.cd_bow),
input_cd_bow=args.cd_bow, input_cd_bit=args.cd_bit)
model.load_state_dict(torch.load(os.path.join(path, name + '.model')))
from NVLL.util.gpu_flag import GPU_FLAG
if torch.cuda.is_available() and GPU_FLAG:
model = model.cuda()
model = model.eval()
return model
def analyze_batch(self, target, kld, tup, vecs, decoded):
_tmp_bag = []
seq_len, batch_sz = target.size()
for b in range(batch_sz):
gt = target[:, b]
deco = decoded[:, b, :]
if self.model.dist_type == 'zero':
sample = self.analyze_zero(gt, deco)
else:
kl = kld[b]
lat_code = vecs[:, b, :]
if self.model.dist_type == 'vmf':
mu = tup['mu'][b]
sample = self.analyze_vmf(gt, kl, mu, lat_code, deco)
elif self.model.dist_type == 'nor':
mean = tup['mean'][b]
logvar = tup['logvar'][b]
sample = self.analyze_nor(gt, kl, mean, logvar, lat_code, deco)
else:
raise NotImplementedError
_tmp_bag.append(sample)
return _tmp_bag
def analyze_batch_order(self, original_vecs, manipu_vecs):
"""
Given original codes and manipilated codes, comp their cos similarity
:param original_vecs: sample_num, batch_size, lat_code
:param manipu_vecs: sample_num, batch_size, lat_code
:return:
"""
original_vecs = torch.mean(original_vecs, dim=0).unsqueeze(2)
manipu_vecs = torch.mean(manipu_vecs, dim=0).unsqueeze(2)
x = cos(original_vecs, manipu_vecs)
# print(x)
return torch.mean(x.squeeze())
def analyze_batch_word_importance(self, original_vecs, manipu_vecs, masked_words):
pass
def analyze_batch_order_and_importance(self, original_vecs, manipulated_vecs):
_tmp_bag = []
batch_sz = original_vecs.size()[1]
for b in range(batch_sz):
if self.model.dist_type == 'zero':
raise NotImplementedError
else:
lat_code = vecs[:, b, :]
lat_code = torch.mean(lat_code, dim=0)
if self.model.dist_type == 'vmf':
mu = tup['mu']
sample = self.analyze_vmf(gt, kl, mu, lat_code, deco)
elif self.model.dist_type == 'nor':
mean = tup['mean'][b]
logvar = tup['logvar'][b]
sample = self.analyze_nor(gt, kl, mean, logvar, lat_code, deco)
else:
raise NotImplementedError
_tmp_bag.append(sample)
return _tmp_bag
def analyze_zero(self, gt, decoded):
pred_id = self.decode_to_ids(decoded)
gt_id = gt.data.tolist()
pred_words = self.ids_to_words(pred_id)
gt_words = self.ids_to_words(gt_id)
recon_nll = self.criterion(decoded, gt).data[0]
s = Sample(gt=gt_words,
pred=pred_words,
code=None,
recon_nll=recon_nll, kl=0)
s.set_zero_stat()
return s
def analyze_vmf(self, gt, kld, mu, lat_code, decoded):
pred_id = self.decode_to_ids(decoded)
gt_id = gt.data.tolist()
pred_words = self.ids_to_words(pred_id)
gt_words = self.ids_to_words(gt_id)
kl_val = kld.data[0]
mu = mu.data.tolist()
lat_code = torch.mean(lat_code, dim=0)
lat_code = lat_code.data.tolist()
recon_nll = self.criterion(decoded, gt).data[0]
s = Sample(gt=gt_words,
pred=pred_words,
code=lat_code,
recon_nll=recon_nll, kl=kl_val)
s.set_vmf_stat(mu)
return s
def analyze_nor(self, gt, kld, mean, logvar, lat_code, decoded):
pred_id = self.decode_to_ids(decoded)
gt_id = gt.data.tolist()
pred_words = self.ids_to_words(pred_id)
gt_words = self.ids_to_words(gt_id)
kl_val = kld.data[0]
mean = mean.data.tolist()
logvar = logvar.data.tolist()
lat_code = torch.mean(lat_code, dim=0)
lat_code = lat_code.data.tolist()
recon_nll = self.criterion(decoded, gt).data[0]
s = Sample(gt=gt_words,
pred=pred_words,
code=lat_code,
recon_nll=recon_nll, kl=kl_val)
s.set_nor_stat(mean, logvar)
return s
def decode_to_ids(self, prob):
seq_len, vocab_szie = prob.size()
assert vocab_szie == len(self.data.dictionary)
prob = torch.exp(prob).div(self.temp)
out = torch.multinomial(prob, 1)
# _, argmax = torch.max(prob, dim=1, keepdim=False)
ids = out.data.tolist()
ids = [x[0] for x in ids]
return ids
def ids_to_words(self, ids):
words = []
for i in ids:
words.append(self.data.dictionary.query(i))
return " ".join(words)
def write_samples(self, bag):
if os.path.exists(os.path.join(self.exp_path, self.instance_name + 'logs')):
shutil.rmtree(os.path.join(self.exp_path, self.instance_name + 'logs'))
os.mkdir(os.path.join(self.exp_path, self.instance_name + 'logs'))
self.logger.info("Logs path: {}".format(os.path.join(self.exp_path, self.instance_name + 'logs')))
os.chdir(os.path.join(self.exp_path, self.instance_name + 'logs'))
for idx, b in enumerate(bag):
with open("log-" + str(idx) + '.txt', 'w') as fd:
fd.write(repr(b))
def analysis_evaluation(self):
self.logger.info("Start Analyzing ...")
start_time = time.time()
test_batches = self.data.test
self.logger.info("Total {} batches to analyze".format(len(test_batches)))
acc_loss = 0
acc_kl_loss = 0
acc_aux_loss = 0
acc_avg_cos = 0
acc_avg_norm = 0
batch_cnt = 0
all_cnt = 0
cnt = 0
sample_bag = []
try:
for idx, batch in enumerate(test_batches):
if idx % 10 == 0:
print("Idx: {}".format(idx))
seq_len, batch_sz = batch.size()
if self.data.condition:
seq_len -= 1
bit = batch[0, :]
batch = batch[1:, :]
bit = GVar(bit)
else:
bit = None
feed = self.data.get_feed(batch)
if self.args.swap > 0.00001:
feed = swap_by_batch(feed, self.args.swap)
if self.args.replace > 0.00001:
feed = replace_by_batch(feed, self.args.replace, self.model.ntoken)
target = GVar(batch)
recon_loss, kld, aux_loss, tup, vecs, decoded = self.model(feed, target, bit)
# target: seq_len, batchsz
# decoded: seq_len, batchsz, dict_sz
# tup: 'mean' 'logvar' for Gaussian
# 'mu' for vMF
# vecs
bag = self.analyze_batch(target, kld, tup, vecs, decoded)
sample_bag += bag
acc_loss += recon_loss.data * seq_len * batch_sz
acc_kl_loss += torch.sum(kld).data
acc_aux_loss += torch.sum(aux_loss).data
acc_avg_cos += tup['avg_cos'].data
acc_avg_norm += tup['avg_norm'].data
cnt += 1
batch_cnt += batch_sz
all_cnt += batch_sz * seq_len
except KeyboardInterrupt:
print("early stop")
self.write_samples(sample_bag)
cur_loss = acc_loss[0] / all_cnt
cur_kl = acc_kl_loss[0] / all_cnt
cur_real_loss = cur_loss + cur_kl
return cur_loss, cur_kl, cur_real_loss
def analysis_eval_word_importance(self, feed, batch, bit):
"""
Given a sentence, replace a certain word by UNK and see how lat code change from the origin one.
:param feed:
:param batch:
:param bit:
:return:
"""
seq_len, batch_sz = batch.size()
target = GVar(batch)
origin_feed = feed.clone()
original_recon_loss, kld, _, original_tup, original_vecs, _ = self.model(origin_feed, target, bit)
# original_vecs = torch.mean(original_vecs, dim=0).unsqueeze(2)
original_mu = original_tup['mu']
# table_of_code = torch.FloatTensor(seq_len, batch_sz )
table_of_mu = torch.FloatTensor(seq_len, batch_sz)
for t in range(seq_len):
cur_feed = feed.clone()
cur_feed[t, :] = 2
cur_recon, _, _, cur_tup, cur_vec, _ = self.model(cur_feed, target, bit)
cur_mu = cur_tup['mu']
# cur_vec = torch.mean(cur_vec, dim=0).unsqueeze(2)
# x = cos(original_vecs, cur_vec)
# x= x.squeeze()
y = cos(original_mu, cur_mu)
y = y.squeeze()
# table_of_code[t,:] = x.data
table_of_mu[t, :] = y.data
bag = []
for b in range(batch_sz):
weight = table_of_mu[:, b]
word_ids = feed[:, b]
words = self.ids_to_words(word_ids.data.tolist())
seq_of_words = words.split(" ")
s = ""
for t in range(seq_len):
if weight[t] < 0.98:
s += "*" + seq_of_words[t] + "* "
else:
s += seq_of_words[t] + " "
bag.append(s)
return bag
def analysis_eval_order(self, feed, batch, bit):
assert 0.33 > self.args.swap > 0.0001
origin_feed = feed.clone()
feed_1x = swap_by_batch(feed.clone(), self.args.swap)
feed_2x = swap_by_batch(feed.clone(), self.args.swap * 2)
feed_3x = swap_by_batch(feed.clone(), self.args.swap * 3)
feed_4x = swap_by_batch(feed.clone(), self.args.swap * 4)
feed_5x = swap_by_batch(feed.clone(), self.args.swap * 5)
feed_6x = swap_by_batch(feed.clone(), self.args.swap * 6)
target = GVar(batch)
# recon_loss, kld, aux_loss, tup, vecs, decoded = self.model(feed, target, bit)
original_recon_loss, kld, _, original_tup, original_vecs, _ = self.model(origin_feed, target, bit)
if 'Distnor' in self.instance_name:
key_name = "mean"
elif 'vmf' in self.instance_name:
key_name = "mu"
else:
raise NotImplementedError
original_mu = original_tup[key_name]
recon_loss_1x, _, _, tup_1x, vecs_1x, _ = self.model(feed_1x, target, bit)
recon_loss_2x, _, _, tup_2x, vecs_2x, _ = self.model(feed_2x, target, bit)
recon_loss_3x, _, _, tup_3x, vecs_3x, _ = self.model(feed_3x, target, bit)
recon_loss_4x, _, _, tup_4x, vecs_4x, _ = self.model(feed_4x, target, bit)
recon_loss_5x, _, _, tup_5x, vecs_5x, _ = self.model(feed_5x, target, bit)
recon_loss_6x, _, _, tup_6x, vecs_6x, _ = self.model(feed_6x, target, bit)
# target: seq_len, batchsz
# decoded: seq_len, batchsz, dict_sz
# tup: 'mean' 'logvar' for Gaussian
# 'mu' for vMF
# vecs
# cos_1x = self.analyze_batch_order(original_vecs, vecs_1x).data
# cos_2x = self.analyze_batch_order(original_vecs, vecs_2x).data
# cos_3x = self.analyze_batch_order(original_vecs, vecs_3x).data
cos_1x = torch.mean(cos(original_mu, tup_1x[key_name])).data
cos_2x = torch.mean(cos(original_mu, tup_2x[key_name])).data
cos_3x = torch.mean(cos(original_mu, tup_3x[key_name])).data
cos_4x = torch.mean(cos(original_mu, tup_4x[key_name])).data
cos_5x = torch.mean(cos(original_mu, tup_5x[key_name])).data
cos_6x = torch.mean(cos(original_mu, tup_6x[key_name])).data
# print(cos_1x, cos_2x, cos_3x)
return [
[original_recon_loss.data, recon_loss_1x.data, recon_loss_2x.data, recon_loss_3x.data, recon_loss_4x.data,
recon_loss_5x.data, recon_loss_6x.data]
, [cos_1x, cos_2x, cos_3x, cos_4x, cos_5x, cos_6x]]
def unpack_bag_order(self, sample_bag):
import numpy as np
l = len(sample_bag)
print("Total {} batches".format(l))
acc_loss = np.asarray([0, 0, 0, 0, 0, 0, 0, 0])
acc_cos = np.asarray([0., 0., 0., 0, 0, 0])
acc_cnt = 0
# print(sample_bag)
for b in sample_bag:
acc_cnt += 1
losses = b[0]
# print(losses)
for idx, x in enumerate(losses):
acc_loss[idx] += x
_cos = b[1]
# print(b[1])
acc_cos += np.asarray(_cos)
# for idx, x in enumerate(_cos):
# acc_cos[idx] += np.asarray(x[0])
acc_loss = [x / acc_cnt for x in acc_loss]
acc_cos = [x / acc_cnt for x in acc_cos]
instance.logger.info("-" * 50)
instance.logger.info(
"Origin Loss|1x|2x|3x|4x:\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(acc_loss[0], acc_loss[1], acc_loss[2],
acc_loss[3], acc_loss[4],
acc_loss[5], acc_loss[6]))
instance.logger.info(
"Cos 1x|2x|3x|4x|5x|6x:\t{}\t{}\t{}\t{}\t{}\t{}\n".format(acc_cos[0], acc_cos[1], acc_cos[2], acc_cos[3],
acc_cos[4], acc_cos[5]))
return acc_cos, acc_loss
def unpack_bag_word_importance(self, sample_bag):
for b in sample_bag:
for x in b:
print(x)
print("-" * 80)
def analysis_evaluation_order_and_importance(self):
"""
Measure the change of cos sim given different encoding sequence
:return:
"""
self.logger.info("Start Analyzing ... Picking up 100 batches to analyze")
start_time = time.time()
test_batches = self.data.test
random.shuffle(test_batches)
test_batches = test_batches[:100]
self.logger.info("Total {} batches to analyze".format(len(test_batches)))
acc_loss = 0
acc_kl_loss = 0
batch_cnt = 0
all_cnt = 0
cnt = 0
sample_bag = []
try:
for idx, batch in enumerate(test_batches):
if idx % 10 == 0:
print("Now Idx: {}".format(idx))
seq_len, batch_sz = batch.size()
if self.data.condition:
seq_len -= 1
bit = batch[0, :]
batch = batch[1:, :]
bit = GVar(bit)
else:
bit = None
feed = self.data.get_feed(batch)
if self.args.swap > 0.0001:
bag = self.analysis_eval_order(feed, batch, bit)
elif self.args.replace > 0.0001:
bag = self.analysis_eval_word_importance(feed, batch, bit)
else:
print("Maybe Wrong mode?")
raise NotImplementedError
sample_bag.append(bag)
except KeyboardInterrupt:
print("early stop")
if self.args.swap > 0.0001:
return self.unpack_bag_order(sample_bag)
elif self.args.replace > 0.0001:
return self.unpack_bag_word_importance(sample_bag)
else:
raise NotImplementedError
class visual_gauss():
def __init__(self, d):
self.logs = []
self.dict = d
def add_batch(self, target, tup, kld, loss):
seq_len, batch_sz = loss.size()
_seq_len, _batch_sz = target.size()
__batch = kld.size()[0]
assert seq_len == _seq_len
assert batch_sz == _batch_sz == __batch
mean = tup['mean']
logvar = tup['logvar']
# print(target.size())
# print(batch_sz)
for b in range(batch_sz):
this_target = target[:, b]
this_mean = mean[b]
this_logvar = logvar[b]
this_kld = kld[b]
this_loss = loss[:, b]
self.add_single(this_target, this_mean, this_logvar,
this_kld, this_loss)
def add_single(self, target, mean, logvar, kld, loss):
norm_mean = torch.norm(mean).data[0]
norm_var = torch.norm(torch.exp(logvar)).data[0]
length = len(target)
seq = ''
for t in target:
seq += self.dict.idx2word[t] + '_'
self.logs.append("{}\t{}\t{}\t{}\t{}\t{}".format(norm_mean, norm_var, kld, torch.mean(loss)
, length, seq))
def write_log(self):
with open('vslog.txt', 'w') as f:
f.write('\n'.join(self.logs))
class visual_vmf():
def __init__(self, d):
self.logs = []
self.dict = d
def add_batch(self, target, tup, kld):
_seq_len, _batch_sz = target.size()
# __batch = kld.size()[0]
mu = tup['mu']
# print(target.size())
# print(batch_sz)
for b in range(_batch_sz):
this_target = target[:, b]
this_mu = mu[b]
self.add_single(this_target, this_mu)
def add_single(self, target, mu):
thismu = mu.data
length = len(target)
seq = ''
for t in target:
seq += self.dict.idx2word[t] + '_'
# self.logs.append("{}\t{}\t{}\t{}\t{}\t{}".format(norm_mean,kld,torch.mean(loss)
# ,length, seq))
tmp = []
for i in thismu:
tmp.append(str(i))
s = '\t'.join(tmp)
self.logs.append(s)
def write_log(self):
with open('vh.txt', 'w') as f:
f.write('\n'.join(self.logs))
# with open('vu.txt', 'w') as f:
# f.write('\n'.join(self.logs))
def query(word):
with open('/home/jcxu/vae_txt/data/ptb/test.txt', 'r') as f:
lines = f.read().splitlines()
bag = []
for l in lines:
if word in l:
bag.append(l)
with open('/home/jcxu/vae_txt/data/ptb/test_' + word + '.txt', 'w') as f:
f.write('\n'.join(bag))
def compute_cos(files):
bags = []
for fname in files:
with open(fname, 'r') as fd:
lines = fd.read().splitlines()
bag = []
for l in lines:
nums = []
tabs = l.split('\t')
for t in tabs:
nums.append(float(t))
x = torch.FloatTensor(numpy.asarray(nums))
bag.append(x)
bags.append(bag)
def _mean_of_bag(bag):
x = 0
for b in range(len(bag)):
x += bag[b]
tmp = x / len(bag)
# print('avg of bag {}'.format(tmp))
return tmp
def comp_cos(a, b):
return (torch.sum(a * b) / (torch.norm(a) * torch.norm(b)))
A = bags[0] # h
B = bags[1] # j
print(comp_cos(_mean_of_bag(A), _mean_of_bag(B)))
print('-' * 50)
arec = []
for idx, aa in enumerate(A):
for jdx in range(idx, len(A)):
print('{}\t{}\t{}'.format(idx, jdx, comp_cos(aa, A[jdx])))
arec.append(comp_cos(aa, A[jdx]))
print(sum(arec) / float(len(arec)))
print('-' * 50)
brec = []
for idx, aa in enumerate(B):
for jdx in range(idx, len(B)):
print("{}\t{}\t{}".format(idx, jdx, comp_cos(aa, B[jdx])))
brec.append(comp_cos(aa, B[jdx]))
print(sum(brec) / float(len(brec)))
if __name__ == '__main__':
args = parse_arg()
instance = ExpAnalyzer(root_path=args.root_path,
exp_path=args.exp_path,
instance_name=
# "Datayelp_Distvmf_Modelnvrnn_EnclstmBiFalse_Emb100_Hid400_lat100_lr10.0_drop0.5_kappa40.0_auxw0.0001_normfFalse_nlay1_mixunk0.0_inpzTrue_cdbit50_cdbow0"
args.instance_name
,
data_path=args.data_path,
eval_batch_size=args.eval_batch_size,
mix_unk=args.mix_unk,
swap=args.swap, replace=args.replace,
cd_bow=args.cd_bow, cd_bit=args.cd_bit)
cur_loss, cur_kl, cur_real_loss = instance.analysis_evaluation()
# instance.logger.info("{}\t{}\t{}".format(cur_loss, cur_kl, cur_real_loss))
# print(cur_loss, cur_kl, cur_real_loss, numpy.math.exp(cur_real_loss))
# with open(os.path.join(args.exp_path,args.board),'a' )as fd:
# fd.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(
# args.data_path, args.instance_name, args.mix_unk,
# args.swap, args.replace ,args.cd_bow,args.cd_bit,cur_loss,cur_kl, cur_real_loss,
# numpy.math.exp(cur_real_loss)))
acc_cos, acc_loss = instance.analysis_evaluation_order_and_importance()
with open(os.path.join(args.exp_path, args.board), 'a')as fd:
fd.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(
args.data_path, args.instance_name, args.mix_unk,
args.swap, args.replace, args.cd_bow, args.cd_bit,
acc_loss[0], acc_loss[1], acc_loss[2], acc_loss[3], acc_cos[0], acc_cos[1], acc_cos[2]))
# "--data_path data/yelp --swap 0 --replace 0 --cd_bit 50 --root_path /home/cc/vae_txt --exp_path /home/cc/save-nvrnn --instance_name Datayelp_Distvmf_Modelnvrnn_EnclstmBiFalse_Emb100_Hid400_lat100_lr10.0_drop0.5_kappa200.0_auxw0.0001_normfFalse_nlay1_mixunk1.0_inpzTrue_cdbit50_cdbow0_4.9021353610814655 --cd_bow 0 --mix_unk 1"
```
#### File: NVLL/analysis/analyzer_argparse.py
```python
import argparse
def parse_arg():
parser = argparse.ArgumentParser(description='Analyzer')
parser.add_argument('--board', type=str, default='ana_board.log')
parser.add_argument('--root_path', type=str, default='/home/cc/vae_txt')
parser.add_argument('--exp_path', type=str, default='/home/cc/exp-nvrnn')
parser.add_argument('--instance_name', type=str)
parser.add_argument('--data_path', type=str, default='data/ptb', help='location of the data corpus')
parser.add_argument('--eval_batch_size', type=int, default=10, help='evaluation batch size')
parser.add_argument('--mix_unk', type=float, default=0)
parser.add_argument('--swap', action='store', default=0.2, type=float,
help='Probability of swapping a word')
parser.add_argument('--replace', action='store', default=0, type=float,
help='Probability of replacing a word with a random word.')
parser.add_argument('--cd_bow', action='store', default=0, type=int)
parser.add_argument('--cd_bit', action='store', default=0, type=int)
parser.add_argument('--temp', action='store', default=1, type=float)
parser.add_argument('--split', action='store', default=0, type=int)
args = parser.parse_args()
return args
```
#### File: NVLL/analysis/word_freq.py
```python
import os
def count(dic, fname):
with open(fname, 'r') as fd:
lines = fd.read().splitlines()
filtered_sents = []
for l in lines:
words = l.split(" ")
_ratio = comp_unk_ratio(words)
if _ratio <= 0.05:
filtered_sents.append(words)
for w in words:
if w in dic:
dic[w] += 1
else:
dic[w] = 1
return dic, filtered_sents
def read_sent():
pass
def comp_unk_ratio(sent):
total = len(sent) + 0.000001
cnt = 0
for w in sent:
if w == '<unk>':
cnt += 1
return cnt / total
def comp_ratio():
pass
def generate_based_on_word_freq():
count_word_freq()
def generate_based_on_sentiment():
pass
def count_word_freq():
d = {}
os.chdir("../../data/yelp")
d, _ = count(d, "valid.txt")
d, filtered_sents_test = count(d, "test.txt")
sorted_d = sorted(d, key=d.get, reverse=True)
print("Len of trimmed vocab {}".format(len(sorted_d)))
print("Num of Test samples after trimming {}".format(len(filtered_sents_test)))
uncommon = sorted_d[-10000:]
print(uncommon)
divide = 5
every = int(len(filtered_sents_test) / divide)
sent_dictionary = {}
for sent in filtered_sents_test:
total = len(sent)
cnt = 0.
for w in sent:
if w in uncommon:
cnt += 1
sent_dictionary[" ".join(sent)] = cnt / total
sorted_sents = sorted(sent_dictionary, key=sent_dictionary.get, reverse=True)
for piece in range(divide):
start = int(piece * every)
end = int((piece + 1) * every)
tmp_sents = sorted_sents[start:end]
with open("test-rare-" + str(piece) + ".txt", 'w') as fd:
fd.write("\n".join(tmp_sents))
if __name__ == "__main__":
bank_size = 1000
# Generate 2 set of sentences.
# Before beginning
# if a sentence has more than 10% UNK, remove it.
############
# Based on WordFreq Vocab size=15K
# Divide
# Top 1K sample with largest Common Word Ratio (common word= top3K freq word)
# Top 1K sample with largest Uncommon Word Ratio (uncommon word= top3K infreq word)
generate_based_on_word_freq()
############
# Based on Sentiment (sample from 5star and 1star)
#############
```
#### File: NVLL/distribution/archived_vmf.py
```python
import numpy as np
import torch
from scipy import special as sp
from NVLL.util.util import GVar
class vMF(torch.nn.Module):
def __init__(self, lat_dim, kappa=0):
super().__init__()
self.lat_dim = lat_dim
self.func_mu = torch.nn.Linear(lat_dim, lat_dim)
self.kappa = kappa
self.norm_eps = 1
self.normclip = torch.nn.Hardtanh(0, 10 - 1)
def estimate_param(self, latent_code):
mu = self.mu(latent_code)
return {'mu': mu}
def compute_KLD(self):
kld = GVar(torch.zeros(1))
return kld
def vmf_unif_sampler(self, mu):
batch_size, id_dim = mu.size()
result_list = []
for i in range(batch_size):
munorm = mu[i].norm().expand(id_dim)
munoise = self.add_norm_noise(munorm, self.norm_eps)
if float(mu[i].norm().data.cpu().numpy()) > 1e-10:
# sample offset from center (on sphere) with spread kappa
w = self._sample_weight(self.kappa, id_dim)
wtorch = GVar(w * torch.ones(id_dim))
# sample a point v on the unit sphere that's orthogonal to mu
v = self._sample_orthonormal_to(mu[i] / munorm, id_dim)
# compute new point
scale_factr = torch.sqrt(GVar(torch.ones(id_dim)) - torch.pow(wtorch, 2))
orth_term = v * scale_factr
muscale = mu[i] * wtorch / munorm
sampled_vec = (orth_term + muscale) * munoise
else:
rand_draw = GVar(torch.randn(id_dim))
rand_draw = rand_draw / torch.norm(rand_draw, p=2).expand(id_dim)
rand_norms = (torch.rand(1) * self.norm_eps).expand(id_dim)
sampled_vec = rand_draw * GVar(rand_norms) # mu[i]
result_list.append(sampled_vec)
return torch.stack(result_list, 0)
def vmf_sampler(self, mu):
mu = mu.cpu()
batch_size, id_dim = mu.size()
result_list = []
for i in range(batch_size):
munorm = mu[i].norm().expand(id_dim) # TODO norm p=?
if float(mu[i].norm().data.cpu().numpy()) > 1e-10:
# sample offset from center (on sphere) with spread kappa
# w = self._sample_weight(self.kappa, id_dim) # TODO mine?
w = vMF.sample_vmf_w(self.kappa, id_dim)
wtorch = GVar(w * torch.ones(id_dim))
# sample a point v on the unit sphere that's orthogonal to mu
v = self._sample_orthonormal_to(mu[i] / munorm, id_dim)
# v= vMF.sample_vmf_v(mu[i])
# compute new point
scale_factr = torch.sqrt(GVar(torch.ones(id_dim)) - torch.pow(wtorch, 2))
orth_term = v * scale_factr
muscale = mu[i] * wtorch / munorm
sampled_vec = (orth_term + muscale) * munorm
else:
rand_draw = GVar(torch.randn(id_dim))
rand_draw = rand_draw / torch.norm(rand_draw, p=2).expand(id_dim)
rand_norms = (torch.rand(1) * self.norm_eps).expand(id_dim)
sampled_vec = rand_draw * GVar(rand_norms) # mu[i]
result_list.append(sampled_vec)
return torch.stack(result_list, 0).cuda()
def build_bow_rep(self, lat_code, n_sample):
batch_sz = lat_code.size()[0]
tup = self.estimate_param(latent_code=lat_code)
kld = self.compute_KLD()
vecs = []
for ns in range(n_sample):
vec = self.vmf_unif_sampler(tup['mu'])
vecs.append(vec)
# eps = self.vmf_sampler(tup['mu'])
return tup, kld, vecs
@staticmethod
def _sample_weight(kappa, dim):
"""Rejection sampling scheme for sampling distance from center on
surface of the sphere.
"""
dim = dim - 1 # since S^{n-1}
b = dim / (np.sqrt(4. * kappa ** 2 + dim ** 2) + 2 * kappa) # b= 1/(sqrt(4.* kdiv**2 + 1) + 2 * kdiv)
x = (1. - b) / (1. + b)
c = kappa * x + dim * np.log(1 - x ** 2) # dim * (kdiv *x + np.log(1-x**2))
while True:
z = np.random.beta(dim / 2., dim / 2.) # concentrates towards 0.5 as d-> inf
w = (1. - (1. + b) * z) / (1. - (1. - b) * z)
u = np.random.uniform(low=0, high=1)
if kappa * w + dim * np.log(1. - x * w) - c >= np.log(
u): # thresh is dim *(kdiv * (w-x) + log(1-x*w) -log(1-x**2))
return w
def _sample_orthonormal_to(self, mu, dim):
"""Sample point on sphere orthogonal to mu.
"""
v = GVar(torch.randn(dim))
rescale_value = mu.dot(v) / mu.norm()
proj_mu_v = mu * rescale_value.expand(dim)
ortho = v - proj_mu_v
ortho_norm = torch.norm(ortho)
return ortho / ortho_norm.expand_as(ortho)
@staticmethod
def sample_vmf_v(mu):
import scipy.linalg as la
mat = np.matrix(mu)
if mat.shape[1] > mat.shape[0]:
mat = mat.T
U, _, _ = la.svd(mat)
nu = np.matrix(np.random.randn(mat.shape[0])).T
x = np.dot(U[:, 1:], nu[1:, :])
return x / la.norm(x)
@staticmethod
def sample_vmf_w(kappa, m):
b = (-2 * kappa + np.sqrt(4. * kappa ** 2 + (m - 1) ** 2)) / (m - 1)
a = (m - 1 + 2 * kappa + np.sqrt(4 * kappa ** 2 + (m - 1) ** 2)) / 4
d = 4 * a * b / (1 + b) - (m - 1) * np.log(m - 1)
while True:
z = np.random.beta(0.5 * (m - 1), 0.5 * (m - 1))
W = (1 - (1 + b) * z) / (1 + (1 - b) * z)
T = 2 * a * b / (1 + (1 - b) * z)
u = np.random.uniform(0, 1)
if (m - 1) * np.log(T) - T + d >= np.log(u):
return W
def add_norm_noise(self, munorm, eps):
"""
KL loss is - log(maxvalue/eps)
cut at maxvalue-eps, and add [0,eps] noise.
"""
trand = torch.rand(1).expand(munorm.size()) * eps
return (self.normclip(munorm) + GVar(trand))
def mod_Bessel_first_kind(kappa, d):
# Modified Bessel function of the first kind
return sp.iv(d, kappa)
def _vmfKL(k, d):
return k * ((sp.iv(d / 2.0 + 1.0, k) \
+ sp.iv(d / 2.0, k) * d / (2.0 * k)) / sp.iv(d / 2.0, k) - d / (2.0 * k)) \
+ d * np.log(k) / 2.0 - np.log(sp.iv(d / 2.0, k)) \
- sp.loggamma(d / 2 + 1) - d * np.log(2) / 2
def _Constant(k, d):
return np.power(k, d / 2 - 1) / (np.power(2 * np.pi, d / 2) * mod_Bessel_first_kind(d, k))
print(type(torch.ones(1) * _vmfKL(10, 200).real))
print(_Constant(0.1, 100))
```
#### File: NVLL/distribution/gauss.py
```python
import torch.nn as nn
import torch
import numpy as np
from NVLL.util.gpu_flag import device
class Gauss(nn.Module):
# __slots__ = ['lat_dim', 'logvar', 'mean']
def __init__(self, hid_dim, lat_dim):
super().__init__()
self.hid_dim = hid_dim
self.lat_dim = lat_dim
self.func_mean = torch.nn.Linear(hid_dim, lat_dim)
self.func_logvar = torch.nn.Linear(hid_dim, lat_dim)
# self.gate_mean = nn.Parameter(torch.rand(1))
# self.gate_var = nn.Parameter(torch.rand(1))
def estimate_param(self, latent_code):
mean = self.func_mean(latent_code)
logvar = self.func_logvar(latent_code)
return {'mean': mean, 'logvar': logvar}
def compute_KLD(self, tup):
mean = tup['mean']
logvar = tup['logvar']
kld = -0.5 * torch.sum(1 - torch.mul(mean, mean) +
2 * logvar - torch.exp(2 * logvar), dim=1)
return kld
def sample_cell(self, batch_size):
eps = torch.autograd.Variable(torch.normal(torch.zeros((batch_size, self.lat_dim))))
eps.to(device)
# if torch.cuda.is_available() and GPU_FLAG:
# eps = eps.cuda()
return eps.unsqueeze(0)
def build_bow_rep(self, lat_code, n_sample):
batch_sz = lat_code.size()[0]
tup = self.estimate_param(latent_code=lat_code)
mean = tup['mean']
logvar = tup['logvar']
kld = self.compute_KLD(tup)
if n_sample == 1:
eps = self.sample_cell(batch_size=batch_sz)
vec = torch.mul(torch.exp(logvar), eps) + mean
return tup, kld, vec
vecs = []
for ns in range(n_sample):
eps = self.sample_cell(batch_size=batch_sz)
vec = torch.mul(torch.exp(logvar), eps) + mean
vecs.append(vec)
vecs = torch.cat(vecs, dim=0)
return tup, kld, vecs
def get_aux_loss_term(self, tup):
return torch.from_numpy(np.zeros([1]))
```
#### File: NVLL/distribution/vmf_unif.py
```python
import numpy as np
import torch
from scipy import special as sp
from NVLL.util.util import GVar
class unif_vMF(torch.nn.Module):
def __init__(self, hid_dim, lat_dim, kappa=1, norm_max=2, norm_func=True):
super().__init__()
self.hid_dim = hid_dim
self.lat_dim = lat_dim
self.kappa = kappa
# self.func_kappa = torch.nn.Linear(hid_dim, lat_dim)
self.func_mu = torch.nn.Linear(hid_dim, lat_dim)
self.func_norm = torch.nn.Linear(hid_dim, 1)
# self.noise_scaler = kappa
self.norm_eps = 1
self.norm_max = norm_max
self.norm_clip = torch.nn.Hardtanh(0.00001, self.norm_max - self.norm_eps)
self.norm_func = norm_func
# KLD accounts for both VMF and uniform parts
kld_value = unif_vMF._vmf_kld(kappa, lat_dim) \
+ unif_vMF._uniform_kld(0., self.norm_eps, 0., self.norm_max)
self.kld = GVar(torch.from_numpy(np.array([kld_value])).float())
print('KLD: {}'.format(self.kld.data[0]))
def estimate_param(self, latent_code):
"""
Compute z_dir and z_norm for vMF.
norm_func means using another NN to compute the norm (batchsz, 1)
:param latent_code: batchsz, hidden size
:return: dict with kappa, mu(batchsz, lat_dim), norm (duplicate in row) (batchsz, lat_dim), (opt)redundant_norm
"""
ret_dict = {}
ret_dict['kappa'] = self.kappa
mu = self.func_mu(latent_code)
# Use additional function to compute z_norm
mu = mu / torch.norm(mu, p=2, dim=1, keepdim=True)
ret_dict['mu'] = mu
norm = self.func_norm(latent_code) # TODO guarantee norm>0?
clipped_norm = self.norm_clip(norm)
redundant_norm = torch.max(norm - clipped_norm, torch.zeros_like(norm))
ret_dict['norm'] = clipped_norm.expand_as(mu)
ret_dict['redundant_norm'] = redundant_norm
return ret_dict
def compute_KLD(self, tup, batch_sz):
return self.kld.expand(batch_sz)
@staticmethod
def _vmf_kld(k, d):
tmp = (k * ((sp.iv(d / 2.0 + 1.0, k) + sp.iv(d / 2.0, k) * d / (2.0 * k)) / sp.iv(d / 2.0, k) - d / (2.0 * k)) \
+ d * np.log(k) / 2.0 - np.log(sp.iv(d / 2.0, k)) \
- sp.loggamma(d / 2 + 1) - d * np.log(2) / 2).real
return tmp
@staticmethod
# KL divergence of Unix([x1,x2]) || Unif([y1,y2]), where [x1,x2] should be a subset of [y1,y2]
def _uniform_kld(x1, x2, y1, y2):
if x1 < y1 or x2 > y2:
raise Exception(
"KLD is infinite: Unif([" + repr(x1) + "," + repr(x2) + "])||Unif([" + repr(y1) + "," + repr(y2) + "])")
return np.log((y2 - y1) / (x2 - x1))
def build_bow_rep(self, lat_code, n_sample):
batch_sz = lat_code.size()[0]
tup = self.estimate_param(latent_code=lat_code)
mu = tup['mu']
norm = tup['norm']
kappa = tup['kappa']
kld = self.compute_KLD(tup, batch_sz)
vecs = []
if n_sample == 1:
return tup, kld, self.sample_cell(mu, norm, kappa)
for n in range(n_sample):
sample = self.sample_cell(mu, norm, kappa)
vecs.append(sample)
vecs = torch.cat(vecs, dim=0)
return tup, kld, vecs
def sample_cell(self, mu, norm, kappa):
"""
:param mu: z_dir (batchsz, lat_dim) . ALREADY normed.
:param norm: z_norm (batchsz, lat_dim).
:param kappa: scalar
:return:
"""
"""vMF sampler in pytorch.
http://stats.stackexchange.com/questions/156729/sampling-from-von-mises-fisher-distribution-in-python
Args:
mu (Tensor): of shape (batch_size, 2*word_dim)
kappa (Float): controls dispersion. kappa of zero is no dispersion.
"""
batch_sz, lat_dim = mu.size()
# Unif VMF
norm_with_noise = self.add_norm_noise_batch(norm, self.norm_eps)
# Unif VMF
w = self._sample_weight_batch(kappa, lat_dim, batch_sz)
w = w.unsqueeze(1)
w_var = GVar(w * torch.ones(batch_sz, lat_dim))
v = self._sample_ortho_batch(mu, lat_dim)
scale_factr = torch.sqrt(
GVar(torch.ones(batch_sz, lat_dim)) - torch.pow(w_var, 2))
orth_term = v * scale_factr
muscale = mu * w_var
sampled_vec = (orth_term + muscale) * norm_with_noise
return sampled_vec.unsqueeze(0)
#
# result_list = []
# for i in range(batch_size):
#
# norm_with_noise = self.add_norm_noise(norm[i], self.norm_eps)
#
# if float(mu[i].norm().data.cpu().numpy()) > 1e-10:
# # sample offset from center (on sphere) with spread kappa
# w = self._sample_weight(kappa, id_dim)
# wtorch = GVar(w * torch.ones(id_dim))
#
# # sample a point v on the unit sphere that's orthogonal to mu
# v = self._sample_orthonormal_to(mu[i], id_dim)
#
# # compute new point
# scale_factr = torch.sqrt(GVar(torch.ones(id_dim)) - torch.pow(wtorch, 2))
# orth_term = v * scale_factr
# muscale = mu[i] * wtorch
# sampled_vec = (orth_term + muscale) * norm_with_noise
# else:
# rand_draw = GVar(torch.randn(id_dim))
# rand_draw = rand_draw / torch.norm(rand_draw, p=2).expand(id_dim)
# rand_norms = (torch.rand(1) * self.norm_eps).expand(id_dim)
# sampled_vec = rand_draw * GVar(rand_norms) # mu[i]
# result_list.append(sampled_vec)
#
# return torch.stack(result_list, 0).unsqueeze(0)
def _sample_weight(self, kappa, dim):
"""Rejection sampling scheme for sampling distance from center on
surface of the sphere.
"""
dim = dim - 1 # since S^{n-1}
b = dim / (np.sqrt(4. * kappa ** 2 + dim ** 2) + 2 * kappa) # b= 1/(sqrt(4.* kdiv**2 + 1) + 2 * kdiv)
x = (1. - b) / (1. + b)
c = kappa * x + dim * np.log(1 - x ** 2) # dim * (kdiv *x + np.log(1-x**2))
while True:
z = np.random.beta(dim / 2., dim / 2.) # concentrates towards 0.5 as d-> inf
w = (1. - (1. + b) * z) / (1. - (1. - b) * z)
u = np.random.uniform(low=0, high=1)
if kappa * w + dim * np.log(1. - x * w) - c >= np.log(
u): # thresh is dim *(kdiv * (w-x) + log(1-x*w) -log(1-x**2))
return w
def _sample_orthonormal_to(self, mu, dim):
"""Sample point on sphere orthogonal to mu.
"""
v = GVar(torch.randn(dim))
rescale_value = mu.dot(v) / mu.norm()
proj_mu_v = mu * rescale_value.expand(dim)
ortho = v - proj_mu_v
ortho_norm = torch.norm(ortho)
return ortho / ortho_norm.expand_as(ortho)
def add_norm_noise(self, munorm, eps):
"""
KL loss is - log(maxvalue/eps)
cut at maxvalue-eps, and add [0,eps] noise.
"""
# if np.random.rand()<0.05:
# print(munorm[0])
trand = torch.rand(1).expand(munorm.size()) * eps
return munorm + GVar(trand)
def add_norm_noise_batch(self, mu_norm, eps):
batch_sz, lat_dim = mu_norm.size()
noise = GVar(torch.FloatTensor(batch_sz, lat_dim).uniform_(0, eps))
noised_norm = noise + mu_norm
return noised_norm
def _sample_weight_batch(self, kappa, dim, batch_sz=1):
result = torch.FloatTensor((batch_sz))
for b in range(batch_sz):
result[b] = self._sample_weight(kappa, dim)
return result
def _sample_ortho_batch(self, mu, dim):
"""
:param mu: Variable, [batch size, latent dim]
:param dim: scala. =latent dim
:return:
"""
_batch_sz, _lat_dim = mu.size()
assert _lat_dim == dim
squeezed_mu = mu.unsqueeze(1)
v = GVar(torch.randn(_batch_sz, dim, 1)) # TODO random
# v = GVar(torch.linspace(-1, 1, steps=dim))
# v = v.expand(_batch_sz, dim).unsqueeze(2)
rescale_val = torch.bmm(squeezed_mu, v).squeeze(2)
proj_mu_v = mu * rescale_val
ortho = v.squeeze() - proj_mu_v
ortho_norm = torch.norm(ortho, p=2, dim=1, keepdim=True)
y = ortho / ortho_norm
return y
def _sample_orthonormal_to(self, mu, dim):
"""Sample point on sphere orthogonal to mu.
"""
v = GVar(torch.randn(dim)) # TODO random
# v = GVar(torch.linspace(-1,1,steps=dim))
rescale_value = mu.dot(v) / mu.norm()
proj_mu_v = mu * rescale_value.expand(dim)
ortho = v - proj_mu_v
ortho_norm = torch.norm(ortho)
return ortho / ortho_norm.expand_as(ortho)
```
#### File: NVLL/framework/eval_nvrnn.py
```python
"Dataptb_Distnor_Modelnvrnn_Emb400_Hid400_lat200_lr0.001_drop0.2"
import os
import random
import time
import numpy
import scipy
import torch
from NVLL.data.lm import DataLM
from NVLL.framework.train_eval_nvrnn import Runner
from NVLL.model.nvrnn import RNNVAE
from NVLL.util.util import GVar, swap_by_batch, replace_by_batch
class PlayNVRNN():
def __init__(self, load_path, load_name, data_path, swap, replace, mix_unk):
self.args = self.load_args(load_path, load_name)
print(swap, replace, mix_unk)
if swap is not None:
self.args.swap = swap
if replace is not None:
self.args.replace = replace
if mix_unk is not None:
self.args.mix_unk = mix_unk
self.data = self.load_data(data_path)
self.model = self.load_model(load_path, load_name)
self.criterion = torch.nn.CrossEntropyLoss(ignore_index=0)
self.detail_crit = torch.nn.CrossEntropyLoss(ignore_index=0, reduce=False)
def load_data(self, data_path):
data = DataLM(data_path, self.args.batch_size, self.args.eval_batch_size)
return data
def load_args(self, path, name):
from NVLL.argparser import parse_arg
with open(os.path.join(path, name + '.args'), 'rb') as f:
args = torch.load(f)
return args
def load_model(self, path, name):
model = RNNVAE(self.args, self.args.enc_type, len(self.data.dictionary), self.args.emsize,
self.args.nhid, self.args.lat_dim, self.args.nlayers,
dropout=self.args.dropout, tie_weights=self.args.tied,
input_z=self.args.input_z, mix_unk=self.args.mix_unk,
condition=(self.args.cd_bit or self.args.cd_bow),
input_cd_bow=self.args.cd_bow, input_cd_bit=self.args.cd_bit)
model.load_state_dict(torch.load(os.path.join(path, name + '.model')))
model = model.cuda()
return model
def eva(self):
# Load the best saved model.
cur_loss, cur_kl, test_loss = self.evaluate(self.args, self.model,
self.data.test)
Runner.log_eval(None, 0, cur_loss, cur_kl, test_loss, True)
return cur_loss, cur_kl, test_loss
def evaluate(self, args, model, dev_batches):
# Turn on training mode which enables dropout.
model.eval()
model.FLAG_train = False
acc_loss = 0
acc_kl_loss = 0
acc_aux_loss = 0
acc_avg_cos = 0
acc_avg_norm = 0
batch_cnt = 0
all_cnt = 0
cnt = 0
start_time = time.time()
for idx, batch in enumerate(dev_batches):
feed = self.data.get_feed(batch)
target = GVar(batch)
seq_len, batch_sz = batch.size()
if self.args.swap > 0.00001:
feed = swap_by_batch(feed, self.args.swap)
if self.args.replace > 0.00001:
feed = replace_by_batch(feed, self.args.replace, self.model.ntoken)
recon_loss, kld, aux_loss, tup, vecs = model(feed, target)
acc_loss += recon_loss.data * seq_len * batch_sz
acc_kl_loss += torch.sum(kld).data
acc_aux_loss += torch.sum(aux_loss).data
acc_avg_cos += tup['avg_cos'].data
acc_avg_norm += tup['avg_norm'].data
cnt += 1
batch_cnt += batch_sz
all_cnt += batch_sz * seq_len
cur_loss = acc_loss[0] / all_cnt
cur_kl = acc_kl_loss[0] / all_cnt
cur_aux_loss = acc_aux_loss[0] / all_cnt
cur_avg_cos = acc_avg_cos[0] / cnt
cur_avg_norm = acc_avg_norm[0] / cnt
cur_real_loss = cur_loss + cur_kl
# Runner.log_eval(print_ppl)
# print('loss {:5.2f} | KL {:5.2f} | ppl {:8.2f}'.format( cur_loss, cur_kl, math.exp(print_ppl)))
return cur_loss, cur_kl, cur_real_loss
def play_eval(self, args, model, train_batches, epo, epo_start_time, glob_iter):
# reveal the relation between latent space and length and loss
# reveal the distribution of latent space
model.eval()
model.FLAG_train = False
start_time = time.time()
acc_loss = 0
acc_kl_loss = 0
acc_aux_loss = 0
acc_avg_cos = 0
acc_avg_norm = 0
batch_cnt = 0
all_cnt = 0
cnt = 0
random.shuffle(train_batches)
if self.args.dist == 'nor':
vs = visual_gauss(self.data.dictionary)
elif self.args.dist == 'vmf':
vs = visual_vmf(self.data.dictionary)
for idx, batch in enumerate(train_batches):
seq_len, batch_sz = batch.size()
feed = self.data.get_feed(batch)
glob_iter += 1
target = GVar(batch)
recon_loss, kld, aux_loss, tup, vecs = model(feed, target)
acc_loss += recon_loss.data * seq_len * batch_sz
acc_kl_loss += torch.sum(kld).data
acc_aux_loss += torch.sum(aux_loss).data
acc_avg_cos += tup['avg_cos'].data
acc_avg_norm += tup['avg_norm'].data
cnt += 1
batch_cnt += batch_sz
all_cnt += batch_sz * seq_len
vs.add_batch(target.data, tup, kld.data)
cur_loss = acc_loss[0] / all_cnt
cur_kl = acc_kl_loss[0] / all_cnt
cur_aux_loss = acc_aux_loss[0] / all_cnt
cur_avg_cos = acc_avg_cos[0] / cnt
cur_avg_norm = acc_avg_norm[0] / cnt
cur_real_loss = cur_loss + cur_kl
Runner.log_instant(None, self.args, glob_iter, epo, start_time, cur_avg_cos, cur_avg_norm,
cur_loss
, cur_kl, cur_aux_loss,
cur_real_loss)
vs.write_log()
class visual_gauss():
def __init__(self, d):
self.logs = []
self.dict = d
def add_batch(self, target, tup, kld, loss):
seq_len, batch_sz = loss.size()
_seq_len, _batch_sz = target.size()
__batch = kld.size()[0]
assert seq_len == _seq_len
assert batch_sz == _batch_sz == __batch
mean = tup['mean']
logvar = tup['logvar']
# print(target.size())
# print(batch_sz)
for b in range(batch_sz):
this_target = target[:, b]
this_mean = mean[b]
this_logvar = logvar[b]
this_kld = kld[b]
this_loss = loss[:, b]
self.add_single(this_target, this_mean, this_logvar,
this_kld, this_loss)
def add_single(self, target, mean, logvar, kld, loss):
norm_mean = torch.norm(mean).data[0]
norm_var = torch.norm(torch.exp(logvar)).data[0]
length = len(target)
seq = ''
for t in target:
seq += self.dict.idx2word[t] + '_'
self.logs.append("{}\t{}\t{}\t{}\t{}\t{}".format(norm_mean, norm_var, kld, torch.mean(loss)
, length, seq))
def write_log(self):
with open('vslog.txt', 'w') as f:
f.write('\n'.join(self.logs))
class visual_vmf():
def __init__(self, d):
self.logs = []
self.dict = d
def add_batch(self, target, tup, kld):
_seq_len, _batch_sz = target.size()
# __batch = kld.size()[0]
mu = tup['mu']
# print(target.size())
# print(batch_sz)
for b in range(_batch_sz):
this_target = target[:, b]
this_mu = mu[b]
self.add_single(this_target, this_mu)
def add_single(self, target, mu):
thismu = mu.data
length = len(target)
seq = ''
for t in target:
seq += self.dict.idx2word[t] + '_'
# self.logs.append("{}\t{}\t{}\t{}\t{}\t{}".format(norm_mean,kld,torch.mean(loss)
# ,length, seq))
tmp = []
for i in thismu:
tmp.append(str(i))
s = '\t'.join(tmp)
self.logs.append(s)
def write_log(self):
with open('vh.txt', 'w') as f:
f.write('\n'.join(self.logs))
# with open('vu.txt', 'w') as f:
# f.write('\n'.join(self.logs))
def query(word):
with open('/home/jcxu/vae_txt/data/ptb/test.txt', 'r') as f:
lines = f.read().splitlines()
bag = []
for l in lines:
if word in l:
bag.append(l)
with open('/home/jcxu/vae_txt/data/ptb/test_' + word + '.txt', 'w') as f:
f.write('\n'.join(bag))
import scipy.spatial.distance as ds
def compute_cos(files):
bags = []
for fname in files:
with open(fname, 'r') as fd:
lines = fd.read().splitlines()
bag = []
for l in lines:
nums = []
tabs = l.split('\t')
for t in tabs:
nums.append(float(t))
x = torch.FloatTensor(numpy.asarray(nums))
bag.append(x)
bags.append(bag)
def _mean_of_bag(bag):
x = 0
for b in range(len(bag)):
x += bag[b]
tmp = x / len(bag)
# print('avg of bag {}'.format(tmp))
return tmp
def comp_cos(a, b):
return (torch.sum(a * b) / (torch.norm(a) * torch.norm(b)))
A = bags[0] # h
B = bags[1] # j
print(comp_cos(_mean_of_bag(A), _mean_of_bag(B)))
print('-' * 50)
arec = []
for idx, aa in enumerate(A):
for jdx in range(idx, len(A)):
print('{}\t{}\t{}'.format(idx, jdx, comp_cos(aa, A[jdx])))
arec.append(comp_cos(aa, A[jdx]))
print(sum(arec) / float(len(arec)))
print('-' * 50)
brec = []
for idx, aa in enumerate(B):
for jdx in range(idx, len(B)):
print("{}\t{}\t{}".format(idx, jdx, comp_cos(aa, B[jdx])))
brec.append(comp_cos(aa, B[jdx]))
print(sum(brec) / float(len(brec)))
if __name__ == '__main__':
# bag = []
# for swap in [0.,0.25,0.5,1]:
# for replace in [0.,0.25,0.5,1]:
# for unk in [0.,0.25,0.5,1]:
#
#
# player = PlayNVRNN('/backup2/jcxu/exp-nvrnn',
# 'Dataptb_Distnor_Modelnvrnn_Emb100_Hid400_lat32_lr0.1_drop0.7_kappa16.0_auxw0.0_normfFalse_nlay1_mixunk1.0_inpzTrue'
# , '/home/jcxu/vae_txt/data/ptb',swap=swap,replace=replace,mix_unk=unk)
# cur_loss, cur_kl, test_loss = player.eva()
# s = '{}\t{}\t{}\t{}\t{}\t{}'.format(swap, replace, unk, cur_loss,cur_kl,cur_loss)
# bag.append(s)
# print(bag)
# for b in bag:
# print(b)
player = PlayNVRNN('/backup2/jcxu/exp-nvrnn',
'Dataptb_Distvmf_Modelnvrnn_Emb100_Hid800_lat32_lr10.0_drop0.5_kappa64.0_auxw0.01_normfFalse_nlay1_mixunk1.0_inpzTrue'
, '/home/jcxu/vae_txt/data/ptb', swap=0, replace=0, mix_unk=1)
cur_loss, cur_kl, test_loss = player.eva()
# player.play_eval(player.args, player.model, player.data.demo_h, 0, 0, 0)
# os.chdir('/home/jcxu/vae_txt/NVLL/framework')
# compute_cos(['vu.txt', 've.txt'])
```
#### File: NVLL/framework/train_eval_nvdm.py
```python
import logging
import math
import os
import random
import time
import torch
from NVLL.util.gpu_flag import device
from NVLL.data.ng import DataNg
# from NVLL.util.util import schedule, GVar, maybe_cuda
from NVLL.util.util import schedule, GVar
random.seed(2018)
class Runner():
def __init__(self, args, model, data, writer):
self.args = args
self.data = data
self.model = model
self.writer = writer
self.args.cur_lr = self.args.lr
if args.optim == 'sgd':
self.optim = torch.optim.SGD(model.parameters(), lr=self.args.lr)
elif args.optim == 'adam':
self.optim = torch.optim.Adam(model.parameters(), lr=self.args.lr)
else:
raise NotImplementedError
self.dead_cnt = 0
self.glob_iter = 0
self.best_val_loss = None
def start(self):
print("Model {}".format(self.model))
logging.info("Model {}".format(self.model))
try:
for epoch in range(1, self.args.epochs + 1):
# self.args.kl_weight = schedule(epoch)
epoch_start_time = time.time()
self.data.set_train_batches(self.args)
self.train_epo(self.args, self.model, self.data.train_batches, epoch,
epoch_start_time)
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
@staticmethod
def write_board(args, cur_loss, cur_kl, test_loss):
from datetime import datetime
with open(os.path.join(args.exp_path, args.board), 'a') as fd:
part_id = str(datetime.utcnow()) + "\t"
for k, v in vars(args).items():
part_id += str(k) + ":\t" + str(v) + "\t"
part_loss = "{}\t{}\t{}\t{}\n".format(
cur_loss, cur_kl, test_loss, math.exp(test_loss))
print(part_id + part_loss)
fd.write(part_id + part_loss)
def end(self):
# Load the best saved model.
if self.args.data_name == '20ng':
from NVLL.model.nvdm import BowVAE
elif self.args.data_name == 'rcv':
from NVLL.model.nvdm import BowVAE
else:
raise NotImplementedError
model = BowVAE(self.args, vocab_size=self.data.vocab_size, n_hidden=self.args.nhid,
n_lat=self.args.lat_dim,
n_sample=3, dist=self.args.dist)
model.load_state_dict(torch.load(self.args.save_name + '.model'),
strict=False)
model.to(device)
# if torch.cuda.is_available() and GPU_FLAG:
# model = model.cuda()
print(model)
print(self.args)
model = model.eval()
cur_loss, cur_kl, test_loss = self.evaluate(self.args, model,
self.data.test[0], self.data.test[1], self.data.test_batches)
Runner.log_eval(self.writer, None, cur_loss, cur_kl, test_loss, True)
cur_loss = cur_loss.item()
cur_kl = cur_kl.item()
test_loss = test_loss.item()
os.rename(self.args.save_name + '.model', self.args.save_name + '_' + str(test_loss) + '.model')
os.rename(self.args.save_name + '.args', self.args.save_name + '_' + str(test_loss) + '.args')
self.write_board(self.args, cur_loss, cur_kl, test_loss)
self.writer.close()
@staticmethod
def log_eval(writer, glob_iter, recon_loss, kl_loss, loss, is_test=False):
recon_loss = recon_loss.item()
kl_loss = kl_loss.item()
loss = loss.item()
# print('=' * 89)
if is_test:
print(
'| End of training | Recon Loss {:5.2f} | KL Loss {:5.2f} | Test Loss {:5.2f} | Test PPL {:8.2f} |'.format(
recon_loss, kl_loss, loss, math.exp(loss)))
if writer is not None:
writer.add_scalars('test', {'recon_loss': recon_loss, 'kl_loss': kl_loss,
'val_loss': loss,
'ppl': math.exp(loss)
})
else:
print(
'| EVAL | Step: {} | Recon Loss {:5.2f} | KL Loss {:5.2f} | Eval Loss {:5.2f} | Eval PPL {:8.2f} |'.format(
glob_iter, recon_loss, kl_loss, loss, math.exp(loss)))
writer.add_scalars('eval', {'recon_loss': recon_loss, 'kl_loss': kl_loss,
'val_loss': loss,
'ppl': math.exp(loss)
}, global_step=glob_iter)
print('=' * 89)
@staticmethod
def log_instant(writer, args, glob_iter, epoch, epoch_start_time, cur_avg_cos, cur_avg_norm,
recon_loss, kl_loss, aux_loss, val_loss):
try:
print(
'| epoch {:3d} | time: {:5.2f}s | KL Weight {:5.2f} | AvgCos {:5.2f} | AvgNorm {:5.2f} |Recon Loss {:5.2f} | KL Loss {:5.2f} | Aux '
'loss: {:5.2f} | Total Loss {:5.2f} | PPL {:8.2f}'.format(
epoch, (time.time() - epoch_start_time), args.kl_weight, cur_avg_cos, cur_avg_norm,
recon_loss, kl_loss, aux_loss, val_loss, math.exp(val_loss)))
if writer is not None:
writer.add_scalars('train', {'lr': args.lr, 'kl_weight': args.kl_weight, 'cur_avg_cos': cur_avg_cos,
'cur_avg_norm': cur_avg_norm, 'recon_loss': recon_loss, 'kl_loss': kl_loss,
'aux_loss': aux_loss,
'val_loss': val_loss,
'ppl': math.exp(val_loss)
}, global_step=glob_iter)
except OverflowError:
print('Overflow')
def train_epo(self, args, model, train_batches, epo, epo_start_time):
model.train()
start_time = time.time()
if self.args.optim == 'sgd':
self.optim = torch.optim.SGD(model.parameters(), lr=self.args.cur_lr)
else:
raise NotImplementedError
acc_loss = 0
acc_kl_loss = 0
acc_aux_loss = 0
acc_avg_cos = 0
acc_avg_norm = 0
# acc_real_loss = 0
word_cnt = 0
doc_cnt = 0
cnt = 0
random.shuffle(train_batches)
for idx, batch in enumerate(train_batches):
self.optim.zero_grad()
self.glob_iter += 1
data_batch, count_batch = DataNg.fetch_data(
self.data.train[0], self.data.train[1], batch, self.data.vocab_size)
model.zero_grad()
data_batch = GVar(torch.FloatTensor(data_batch))
recon_loss, kld, aux_loss, tup, vecs = model(data_batch)
# print("Recon: {}\t KL: {}".format(recon_loss,kld))
# total_loss = torch.mean(recon_loss + kld * args.kl_weight)
total_loss = torch.mean(recon_loss + kld * args.kl_weight + aux_loss * args.aux_weight)
total_loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
self.optim.step()
count_batch = GVar(torch.FloatTensor(count_batch))
doc_num = len(count_batch)
# real_loss = torch.div((recon_loss + kld).data, count_batch)
# acc_real_loss += torch.sum(real_loss)
acc_loss += torch.sum(recon_loss).item()
acc_kl_loss += torch.sum(kld).item()
acc_aux_loss += torch.sum(aux_loss).item()
acc_avg_cos += tup['avg_cos'].item()
acc_avg_norm += tup['avg_norm'].item()
cnt += 1
count_batch = count_batch + 1e-12
word_cnt += torch.sum(count_batch).item()
doc_cnt += doc_num
if idx % args.log_interval == 0 and idx > 0:
cur_loss = acc_loss / word_cnt # word loss
cur_kl = acc_kl_loss / word_cnt
cur_aux_loss = acc_aux_loss / word_cnt
cur_avg_cos = acc_avg_cos / cnt
cur_avg_norm = acc_avg_norm / cnt
# cur_real_loss = acc_real_loss / doc_cnt
cur_real_loss = cur_loss + cur_kl
# if cur_kl < 0.14 or cur_kl > 1.2:
# raise KeyboardInterrupt
Runner.log_instant(self.writer, self.args, self.glob_iter, epo, start_time,
cur_avg_cos, cur_avg_norm, cur_loss
, cur_kl, cur_aux_loss,
cur_real_loss)
acc_loss = 0
acc_kl_loss = 0
acc_aux_loss = 0
acc_avg_cos = 0
acc_avg_norm = 0
word_cnt = 0
doc_cnt = 0
cnt = 0
if idx % (3 * args.log_interval) == 0 and idx > 0:
with torch.no_grad():
self.eval_interface()
def eval_interface(self):
cur_loss, cur_kl, val_loss = self.evaluate(self.args, self.model,
self.data.dev[0], self.data.dev[1], self.data.dev_batches)
Runner.log_eval(self.writer, self.glob_iter, cur_loss, cur_kl, val_loss, False)
print(self.args.save_name)
val_loss = val_loss.item()
if not self.best_val_loss or val_loss < self.best_val_loss:
with open(self.args.save_name + ".model", 'wb') as f:
torch.save(self.model.state_dict(), f)
with open(self.args.save_name + ".args", 'wb') as f:
torch.save(self.args, f)
self.best_val_loss = val_loss
self.dead_cnt = 0
else:
self.dead_cnt += 1
self.args.cur_lr /= 1.1
# if self.glob_iter > 1000:
# raise KeyboardInterrupt
if self.dead_cnt == 15:
raise KeyboardInterrupt
def evaluate(self, args, model, corpus_dev, corpus_dev_cnt, dev_batches):
# Turn on training mode which enables dropout.
model.eval()
acc_loss = 0
acc_kl_loss = 0
acc_real_loss = 0
word_cnt = 0
doc_cnt = 0
start_time = time.time()
ntokens = self.data.vocab_size
for idx, batch in enumerate(dev_batches):
data_batch, count_batch = self.data.fetch_data(
corpus_dev, corpus_dev_cnt, batch, ntokens)
data_batch = GVar(torch.FloatTensor(data_batch))
recon_loss, kld, aux_loss, tup, vecs = model(data_batch)
count_batch = GVar(torch.FloatTensor(count_batch))
# real_loss = torch.div((recon_loss + kld).data, count_batch)
doc_num = len(count_batch)
# remove nan
# for n in real_loss:
# if n == n:
# acc_real_loss += n
# acc_real_ppl += torch.sum(real_ppl)
acc_loss += torch.sum(recon_loss).item() #
acc_kl_loss += torch.sum(kld).item()
count_batch = count_batch + 1e-12
word_cnt += torch.sum(count_batch)
doc_cnt += doc_num
# word ppl
cur_loss = acc_loss / word_cnt # word loss
cur_kl = acc_kl_loss / word_cnt
# cur_real_loss = acc_real_loss / doc_cnt
cur_real_loss = cur_loss + cur_kl
elapsed = time.time() - start_time
# Runner.log_eval(print_ppl)
# print('loss {:5.2f} | KL {:5.2f} | ppl {:8.2f}'.format( cur_loss, cur_kl, math.exp(print_ppl)))
return cur_loss, cur_kl, cur_real_loss
```
#### File: NVLL/util/try.py
```python
import time
import torch
from NVLL.util.util import GVar
from NVLL.util.gpu_flag import device
print(device)
#
# start = time.time()
# hard = torch.nn.Hardtanh()
# softmax = torch.nn.Softmax()
# for i in range(100):
# x = torch.zeros(100000).cuda()
# y = torch.rand(100000).cuda()
# z = y * y * y
# c = y * y / (y + y)
# d = c * c + c
# m = y + z + y
# m = GVar(m)
#
# for j in range(1000):
# k = hard(m)
# e = softmax(m + m)
# q = softmax(m)
#
# print(time.time() - start)
import numpy as np
def _sample_weight(kappa, dim):
"""Rejection sampling scheme for sampling distance from center on
surface of the sphere.
"""
dim = dim - 1 # since S^{n-1}
b = dim / (np.sqrt(4. * kappa ** 2 + dim ** 2) + 2 * kappa) # b= 1/(sqrt(4.* kdiv**2 + 1) + 2 * kdiv)
x = (1. - b) / (1. + b)
c = kappa * x + dim * np.log(1 - x ** 2) # dim * (kdiv *x + np.log(1-x**2))
cnt = 0
while True:
cnt += 1
z = np.random.beta(dim / 2., dim / 2.) # concentrates towards 0.5 as d-> inf
w = (1. - (1. + b) * z) / (1. - (1. - b) * z)
u = np.random.uniform(low=0, high=1)
if kappa * w + dim * np.log(1. - x * w) - c >= np.log(
u): # thresh is dim *(kdiv * (w-x) + log(1-x*w) -log(1-x**2))
return w, cnt
kappa = [32, 64, 128]
# for k in kappa:
# for d in kappa:
# l = []
# for _ in range(1000):
# _, cnt =_sample_weight(k,d)
# l.append(cnt)
# print("{}\t{}\t{}".format(k,d,sum(l)/len(l)))
input = torch.FloatTensor()
torch.multinomial(input, 1, replacement=False)
```
#### File: vmf_vae_nlp/sample/sampler.py
```python
from utils import *
class Sampler():
def __init__(self, n_words=[5,10], corpus='ptb',
cluster_path='IARPA_200clusters.csv', strict_stop=False,
sample_path='samples/sample_data.json'):
self.n_words = n_words
self.corpus = corpus
self.cluster_path = cluster_path
self.strict_stop = strict_stop
self.sample_path = sample_path
if corpus == 'ptb':
self.corpus_path = '../data/ptb.zip'
elif corpus == 'wiki':
self.corpus_path = '../data/WestburyLab.Wikipedia.Corpus.txt.bz2'
else:
raise NameError('Only ptb and wiki are currently supported')
self.make_cluster_dict()
def make_cluster_dict(self):
'''
Returns dictionary where each key is a target word, and its value
is a list of 20 related words.
'''
import pandas as pd
df = pd.read_csv(self.cluster_path,
usecols=['target word', '20 related words'])
cluster_dict = df.set_index('target word').T.to_dict('records')[0]
cluster_dict = {t : w.split(' ') for t,w in cluster_dict.items()}
self.clusters = cluster_dict
def read_corpus(self):
'''
Returns list of sentences, delimited by <delim>,
from self.corpus_path. The test.txt file is loaded by default.
'''
if self.corpus == 'ptb':
delim = '\n'
fname = 'test.txt'
data = read_from_archive(self.corpus_path,
'{}/{}'.format(self.corpus, fname))
sentences = data.split(delim)
elif self.corpus == 'wiki':
data = read_from_archive(self.corpus_path)
sentences = texts_to_sentences(data)
print(len(sentences))
return sentences
def pick_sentence(self, target, n, strict_stop):
'''
Returns a sentence (str) from self.corpus with length <n>
that contains one of the 20 words related to <target>.
'''
# get list of 20 related words
rel_words = self.clusters[target]
# get sentence of specified length that contains one of the related words
sentences_iter = iter([s for s in self.sentences_n[n]
if any([w in self.tokens[s] for w in rel_words])])
# if strict_stop, then raise StopIteraction if no sentence is found
if strict_stop:
try:
sentence = next(sentences_iter)
except StopIteration:
err_str = 'No sentence found: n={}, target={}.'.format(n, target)
raise NameError(err_str)
# otherwise, return None
else:
sentence = next(sentences_iter, None)
return sentence
def get_sample(self):
'''
Returns nested dictionary where <targets> are the keys. The values are
dictionaries keyed by lengths from <n_words_list>, with sentences
as values.
'''
print('Reading corpus at {}\nThis may take a while...'.format(self.corpus_path))
sentences = self.read_corpus()
print('Tokenizing...')
self.tokens = {
s : word_tokenize(s) for s in sentences
}
print('Getting sentences of length {}...'.format(self.n_words))
# get sentences of specified length (number of NLTK tokens)
self.sentences_n = {
n : sentences_of_length_n(n, self.tokens) for n in self.n_words
}
print('Generating sample...')
sample = {
t : {n : self.pick_sentence(t, n, self.strict_stop)
for n in self.n_words}
for t in self.clusters.keys()
}
return sample
def write_sample(self, sample):
'''
Writes sample dictionary to .json file.
'''
data_dict = {
'corpus_path' : self.corpus_path,
'cluster_path' : self.cluster_path,
'sample': sample
}
write_to_json(data_dict, self.sample_path)
print('Wrote sample to {}'.format(self.sample_path))
```
|
{
"source": "jenni4j/algo-practice",
"score": 4
}
|
#### File: jenni4j/algo-practice/sort.py
```python
import random
# Quick Sort
def quicksort(array):
qs(array, 0, len(array)-1)
return array
def qs(array, left, right):
if left >= right:
return
pivot = array[random.randint(left,right)]
partition = partition_arr(array, left, right, pivot)
qs(array, left, partition-1)
qs(array, partition, right)
def partition_arr(array, left, right, pivot):
while left <= right:
while array[left] < pivot:
left+=1
while array[right] > pivot:
right-=1
if left <= right:
temp = array[right]
array[right] = array[left]
array[left] = temp
left+=1
right-=1
return left
test = [21, 4, 1, 3, 9, 20, 25, 6, 21, 14]
print quicksort(test)
```
|
{
"source": "jenni4j/Climate_Dreamer_Pros",
"score": 3
}
|
#### File: jenni4j/Climate_Dreamer_Pros/word_freak.py
```python
import functools
import operator
import ignored
def frequency(some_df, column):
word_list_block = []
answers = list(some_df[column])
for i in range(len(answers)):
answers[i] = answers[i].lower().split()
for word in answers:
# print(word)
word_list_block.append(word)
Words_list = functools.reduce(operator.iconcat, word_list_block, [])
# return Words
unique_words = {}
prep = ignored.ignore_these_words()
for W in Words_list:
if W in prep:
pass
else:
if W in unique_words:
unique_words[W] += 1
else:
unique_words[W] = 1
for key, value in sorted(unique_words.items(), key=operator.itemgetter(1)):
print(key, value)
```
|
{
"source": "jennie6151/inwiththeold",
"score": 2
}
|
#### File: inwiththeold/antiqueProjectApp/test_forms.py
```python
from django.test import TestCase
from .forms import AntiquePurchaseForm
from antiqueProjectApp.models import Antique
# Create your tests here.
class TestAntiquePurchaseForm(TestCase):
#Test to check that the payment form cannot be submitted by just customerFirstName
def test_cannot_make_a_payment_with_just_a_name(self):
form = AntiquePurchaseForm({'customerFirstName': 'Create Tests'})
self.assertFalse(form.is_valid())
#Test to check that the payment form can only be submitted if all fields are complete
def test_can_only_make_a_payment_with_all_details(self):
Antique.objects.create(AntiqueName='Test')
pk=Antique.objects.get(AntiqueName='Test').pk
form = AntiquePurchaseForm({'antique':pk, 'customerFirstName': 'Create Tests', 'customerLastName': 'Create Tests', 'customerAddressLine1': 'Create Tests', 'customerAddressLine2': 'Create Tests', 'customerAddressCity': 'Create Tests', 'customerAddressCounty': 'Create Tests', 'customerAddressPostcode': 'Create Tests', 'customerEmail': 'Create Tests', 'customerTelephone': 'Create Tests'})
self.assertTrue(form.is_valid())
```
#### File: inwiththeold/antiqueProjectApp/test_models.py
```python
from django.test import TestCase
from .forms import AntiquePurchaseForm
from antiqueProjectApp.models import Antique
class TestAntique(TestCase):
#Test to check that the price of an item is correctly multiplied by 100
def test_price_is_correct_when_times_100(self):
antique = Antique(Price=10.00)
self.assertEqual(antique.price_in_pence(), antique.Price*100)
self.assertNotEqual(antique.price_in_pence(),10)
```
|
{
"source": "jenniebrown/health-equity-tracker",
"score": 3
}
|
#### File: python/data_server/dataset_cache.py
```python
import threading
import cachetools
from data_server import gcs_utils
class DatasetCache():
"""DatasetCache manages and stores datasets accessed through GCS.
DatasetCache is a thin, thread-safe wrapper around cachetools.TTLCache."""
def __init__(self, max_cache_size=8, cache_ttl=2 * 3600):
"""max_cache_size: Max number of cache elements. Default 8.
cache_ttl: TTL per object in seconds. Default 2 hours."""
self.cache = cachetools.TTLCache(maxsize=max_cache_size, ttl=cache_ttl)
self.cache_lock = threading.Lock()
def clear(self):
"""Clears entries from the cache. Mostly useful for tests."""
with self.cache_lock:
self.cache.clear()
def getDataset(self, gcs_bucket: str, table_id: str):
"""Returns the given dataset identified by table_id as bytes.
getDataset will return the dataset from memory if it exists in the
cache. Otherwise, it will request the file from GCS and update the
cache on success.
gcs_bucket: Name of GCS bucket where the dataset is stored.
table_id: Name of the data set file to access.
Returns: Bytes object containing the dataset if successful. Throws
NotFoundError on failure."""
with self.cache_lock:
item = self.cache.get(table_id)
if item is not None:
return item
# Release the lock while performing IO.
blob_str = gcs_utils.download_blob_as_bytes(gcs_bucket, table_id)
# If this has been updated since we last checked, it's still okay to
# overwrite since it will only affect freshness.
with self.cache_lock:
self.cache[table_id] = blob_str
return blob_str
```
#### File: python/datasources/data_source.py
```python
from abc import ABC
import re
from ingestion import url_file_to_gcs, gcs_to_bq_util
# Abstract base class for all data sources ingested by the Health Equity
# Tracker. This also includes default implementations for each of the
# ingestion methods.
class DataSource(ABC):
@staticmethod
def get_id():
"""Returns the data source's unique id. """
pass
@staticmethod
def get_table_name():
"""Returns the BigQuery base table name where the data source's data will
stored. """
pass
def get_historical_table_name(self):
"""Returns the BigQuery historical data table name."""
return self.get_table_name() + "_historical"
def get_latest_table_name(self):
"""Returns the BigQuery latest data table name."""
return self.get_table_name() + "_latest"
def get_attr(self, attributes, key):
attr = attributes.get(key)
if attr is None:
raise RuntimeError(
"Attribute: {} not found on payload".format(key))
return attr
def upload_to_gcs(self, gcs_bucket, **attrs):
"""
Attempts to download a file from a url and upload as a
blob to the given GCS bucket.
Parameters:
gcs_bucket: Name of the GCS bucket to upload to (without gs://).
attrs: Additional message attributes such as url and filename that
are needed for this data source.
Returns: A boolean indication of a file diff.
In the case that there are many files to download, this will
return true if there is at least one file that is different.
"""
return url_file_to_gcs.url_file_to_gcs(
self.get_attr(attrs, 'url'), None, gcs_bucket,
self.get_attr(attrs, 'filename'))
def write_to_bq(self, dataset, gcs_bucket, **attrs):
"""Writes source data from GCS bucket to BigQuery
dataset: The BigQuery dataset to write to
gcs_bucket: The name of the gcs bucket to read the data from
attrs: Additional message attributes such as url and filename that are
needed for this data source."""
self.write_to_bq_table(dataset, gcs_bucket,
self.get_attr(attrs, 'filename'),
self.get_table_name())
def write_to_bq_table(self, dataset: str, gcs_bucket: str,
filename: str, table_name: str, project=None):
"""Writes source data from GCS bucket to BigQuery
dataset: The BigQuery dataset to write to
gcs_bucket: The name of the gcs bucket to read the data from
filename: The name of the file in the gcs bucket to read from
table_name: The name of the BigQuery table to write to"""
chunked_frame = gcs_to_bq_util.load_csv_as_dataframe(
gcs_bucket, filename, chunksize=1000)
# For the very first chunk, we set the mode to overwrite to clear the
# previous table. For subsequent chunks we append.
overwrite = True
for chunk in chunked_frame:
self.clean_frame_column_names(chunk)
gcs_to_bq_util.add_dataframe_to_bq(
chunk, dataset, table_name, project=project,
overwrite=overwrite)
overwrite = False
def clean_frame_column_names(self, frame):
""" Replaces unfitting BigQuery characters and
makes all coumn names lower case.
frame: The pandas dataframe with unclean columns
"""
frame.rename(columns=lambda col: (
re.sub('[^0-9a-zA-Z_=%]+', '_', col)
.lower()
.replace('=', 'eq')
.replace('%', 'pct')
), inplace=True)
def export_to_gcs(self):
# TODO: Implement
pass
```
#### File: python/ingestion/url_file_to_gcs.py
```python
import logging
import os
from google.cloud import storage
import google.cloud.exceptions
import requests
import filecmp
def local_file_path(filename):
return '/tmp/{}'.format(filename)
def url_file_to_gcs(url, url_params, gcs_bucket, dest_filename):
"""
Attempts to download a file from a url and upload as a
blob to the given GCS bucket.
Parameters:
url: The URL of the file to download.
url_params: URL parameters to be passed to requests.get().
gcs_bucket: Name of the GCS bucket to upload to (without gs://).
dest_filename: What to name the downloaded file in GCS.
Include the file extension.
Returns: A boolean indication of a file diff
"""
return download_first_url_to_gcs(
[url], gcs_bucket, dest_filename, url_params)
def get_first_response(url_list, url_params):
for url in url_list:
try:
file_from_url = requests.get(url, params=url_params)
file_from_url.raise_for_status()
return file_from_url
except requests.HTTPError as err:
logging.error("HTTP error for url %s: %s", url, err)
return None
def download_first_url_to_gcs(url_list, gcs_bucket, dest_filename,
url_params={}):
"""
Iterates over the list of potential URLs that may point to the data
source until one of the URLs succeeds in downloading. If no URL suceeds,
the method will return an error.
Parameters:
url_list: List of URLs where the file may be found.
gcs_bucket: Name of the GCS bucket to upload to (without gs://).
dest_filename: What to name the downloaded file in GCS.
Include the file extension.
url_params: URL parameters to be passed to requests.get().
Returns:
files_are_diff: A boolean indication of a file diff
"""
# Establish connection to valid GCS bucket
try:
storage_client = storage.Client()
bucket = storage_client.get_bucket(gcs_bucket)
except google.cloud.exceptions.NotFound:
logging.error("GCS Bucket %s not found", gcs_bucket)
return
# Find a valid file in the URL list or exit
file_from_url = get_first_response(url_list, url_params)
if file_from_url is None:
logging.error(
"No file could be found for intended destination: %s",
dest_filename)
return
# Download the contents of the URL to a local file
new_file_local_path = local_file_path(dest_filename)
with file_from_url, open(new_file_local_path, 'wb') as new_file:
new_file.write(file_from_url.content)
# Downloads the current file in GCS to a local file
old_file_local_path = local_file_path("gcs_local_file")
with open(old_file_local_path, "wb") as old_file:
try:
bucket.blob(dest_filename).download_to_file(old_file)
except google.cloud.exceptions.NotFound:
files_are_diff = True
else:
# Compare the file contents for a diff
files_are_diff = not filecmp.cmp(
old_file_local_path, new_file_local_path)
# Only update the bucket if the files are diff
if files_are_diff:
# Upload the contents to the bucket
bucket.blob(dest_filename).upload_from_filename(new_file_local_path)
print(f'Uploading to Gcs_Bucket: {gcs_bucket}, FileName: {dest_filename}')
# Remove local files
os.remove(new_file_local_path)
os.remove(old_file_local_path)
return files_are_diff
```
|
{
"source": "jenniekone/Item-Catalog",
"score": 3
}
|
#### File: jenniekone/Item-Catalog/database_setup.py
```python
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
email = Column(String(250), nullable=False)
picture = Column(String(250))
class Category(Base):
__tablename__ = 'category'
id = Column(Integer, primary_key = True)
name = Column(String(80), nullable = False)
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User)
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return {
'id': self.id,
'name': self.name,
}
class CategoryItem(Base):
__tablename__ = 'category_item'
name = Column(String(80), nullable = False)
description = Column(String(250))
id = Column(Integer, primary_key = True)
category_id = Column(Integer, ForeignKey('category.id'))
category = relationship(Category)
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User)
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return {
'category': self.category.name,
'description': self.description,
'name': self.name,
}
engine = create_engine('sqlite:///itemcatalog.db')
Base.metadata.create_all(engine)
```
|
{
"source": "jennie-n/fitbitDataVisualization",
"score": 3
}
|
#### File: jennie-n/fitbitDataVisualization/calories.py
```python
import requests
import csv
from pprint import pprint
# CALORIES BURNED FROM A SPECIFIC TIME PERIOD
def get_calories(user_id, access_token):
calories = requests.get('https://api.fitbit.com/1/user/' + user_id + '/activities/tracker/calories/date/2020-08-16/2020-08-31.json',
headers={'Authorization': 'Bearer ' + access_token})
if (calories.status_code != 200):
print('Error fetching calories request. Need a new access token')
else:
# pprint(calories.json()) # print out the json response of the fetched data
# pprint(calories.json()['activities-tracker-calories']) # print out more specific part of the response
data = calories.json()['activities-tracker-calories']
# extract calories values to new csv file
with open("./csv/calories.csv", "w", newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
for line in data:
# print(line['value'])
writer.writerow(line.values())
```
#### File: jennie-n/fitbitDataVisualization/generate_token.py
```python
import requests
import time
import oauth2 as oauth2
from pprint import pprint
import json
from dotenv import load_dotenv
load_dotenv()
import os
ACCESS_TOKEN = os.getenv('ACCESS_TOKEN')
REFRESH_TOKEN = os.getenv('REFRESH_TOKEN')
AUTH = os.getenv('AUTH')
USER_ID = os.getenv('USER_ID')
def get_new_refresh_token():
headers = {'Authorization': f'Basic {AUTH}', 'Content-Type': 'application/x-www-form-urlencoded'}
params = { 'grant_type': 'refresh_token', 'refresh_token': REFRESH_TOKEN }
new_token_object = requests.post('https://api.fitbit.com/oauth2/token', headers=headers, params=params)
# print(f'New Token: {new_token_object}')
# print('JSON ACCESS TOKEN: ' + new_token_object.json()['access_token'])
# print('JSON REFRESH TOKEN: ' + new_token_object.json()['refresh_token'])
new_access_token = new_token_object.json()['access_token']
new_refresh_token = new_token_object.json()['refresh_token']
return [new_access_token, new_refresh_token]
def overwrite_tokens(new_tokens):
a = open('.env', 'r+')
lines = a.readlines() # read all the lines from the file into an array
offset = 0 # used to keep track of the offset for overwriting new .env values
# for loop to change ONLY the access token and refresh token values in the .env file
for x in range(2):
value = lines[x].find('=') # .find() will return first index of the symbol, otherwise -1 if not in string
a.seek(offset + value + 1) # set the file's current pointer to where we will start overwritting a new value
a.write(f'\'{new_tokens[x]}\'')
offset = offset + len(lines[x]) # add the length of the current line to the offset
a.close()
new_tokens = get_new_refresh_token()
overwrite_tokens(new_tokens)
```
#### File: jennie-n/fitbitDataVisualization/steps.py
```python
import requests
import csv
from pprint import pprint
# STEPS FOR TODAY'S DATE, IF APPLICABLE
def get_steps_today(user_id, access_token):
# STEPS ON THIS DAY
activity_request = requests.get('https://api.fitbit.com/1/user/' + user_id + '/activities/steps/date/2020-09-12/2020-09-12.json',
headers={'Authorization': 'Bearer ' + access_token})
# print(activity_request.status_code)
# pprint(activity_request.json()) # print out the json response of the fetched data
# pprint(activity_request.json()['activities-steps']) # print out more specific part of the response
# STEPS FROM A SPECIFIC TIME PERIOD
def get_steps(user_id, access_token):
steps = requests.get('https://api.fitbit.com/1/user/' + user_id + '/activities/steps/date/2020-08-16/2020-08-31.json', headers={'Authorization': 'Bearer ' + access_token})
if (steps.status_code != 200):
print('Error fetching steps request. Need a new access token')
else:
# pprint(steps.json())
# pprint(steps.json()['activities-steps'])
data = steps.json()['activities-steps']
# extract steps values to new csv file
with open("./csv/steps.csv", "w", newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
for line in data:
# print(line['value'])
writer.writerow(line.values())
```
|
{
"source": "jenniening/Frag20_prepare",
"score": 3
}
|
#### File: Frag20_prepare/DataGen/analysis.py
```python
import os
import rdkit
from rdkit import Chem
import rdkit.Chem.Descriptors as Descriptors
import pandas as pd
from DataGen.util import convert_logtosdf
class getInfor:
"""Get SMILES and InChI
Args:
infile1: the initial SMILES
infile2: the sdf file for MMFF optimized structure
infile3: the sdf file for QM optimized structure
infor will include all 6 informations
"""
def __init__(self,infile1, infile2, infile3, isomericSmiles):
self._infile = [infile1, infile2, infile3]
self._ftype = ["smiles", "sdf", "sdf"]
self._isomericSmiles = isomericSmiles
self._infor = []
self._inforname = ["SMILES", "initial_InChI", "initial_SMILES", "MMFF_InChI", "MMFF_SMILES", "QM_InChI", "QM_SMILES"]
@property
def infile(self):
return self._infile
@property
def ftype(self):
return self._ftype
@property
def isomericSmiles(self):
return self._isomericSmiles
@property
def inforname(self):
return self._inforname
@property
def infor(self):
if self._infor == []:
self._getAll3()
return self._infor
def _getAll3(self):
"""
Get information for all three files, if this process failed, the output will be None, None
"""
self._infor.append(self._infile[0])
for idx, infile in enumerate(self._infile):
try:
self._infor.extend(self._getInfor(infile, self._ftype[idx], True, self._isomericSmiles))
except:
self._infor.extend(["None","None"])
def _getInfor(self, infile, file_type, removeHs, isomericSmiles):
"""
Generate SMILES and InChI strings
"""
if file_type == "sdf":
mol = Chem.SDMolSupplier(infile, removeHs=removeHs)[0]
else:
mol = Chem.MolFromSmiles(infile)
Inchi = Chem.MolToInchi(mol)
Smiles = Chem.MolToSmiles(mol, isomericSmiles=isomericSmiles)
return [Inchi, Smiles]
class check:
"""
Analysis data after QM calculations
"""
def __init__(self, datadir, index_list, outdir, confs=False):
"""
Args:
datadir: directory for all data files including MMFF optimized sdf file and QM calculated log file
outdir: directory for all output data index files
index_list: index_list for all successful calculated files. Exp. 1.opt.log, 2.opt.log --> index_list[1,2]
confs: if calculated files are from different conformations, consf=True, else, False. Defaults to False
"""
self.datadir = os.path.abspath(datadir)
self.outdir = os.path.abspath(outdir)
self.olddir = os.getcwd()
self.index_list = index_list
self.confs = confs
if self.confs:
self.suffix = ".sdf"
else:
self.suffix = "_min.sdf"
self.smiles_list, self.index_init_list = self.__get_relation__()
def __get_relation__(self):
"""
Get initial index list and smiles list for each calculated file
"""
os.chdir(self.datadir)
smiles_list = []
init_index_list = []
for i in self.index_list:
with open(str(i) + self.suffix) as infile:
lines = infile.readlines()
index_init = lines[-9].rstrip()
smiles_init = lines[-15].rstrip()
smiles_list.append(smiles_init)
init_index_list.append(index_init)
os.chdir(self.olddir)
return smiles_list, init_index_list
def update_list(self,infile):
"""Update current index_list and initial_index_list based on provided file
Args:
infile: data index file
"""
data = pd.read_csv(os.path.join(self.outdir,infile))
self.index_list = list(data["index"])
if "initial_index" in data.columns:
self.initial_index_list = list(data["initial_index"])
def built_initialdata(self):
"""
Build initial information data
"""
os.chdir(self.datadir)
out = open(os.path.join(self.outdir,"initial_dataset.csv"), "w")
out.write("index initial_index SMILES initial_InChI initial_SMILES MMFF_InChI MMFF_SMILES QM_InChI QM_SMILES\n")
for idx, i in enumerate(self.index_list):
infile1 = self.smiles_list[idx]
infile2 = str(i) + self.suffix
infile3 = str(i) + ".opt.sdf"
if not os.path.exists(infile3):
convert_logtosdf(self.datadir, i)
infile = getInfor(infile1, infile2, infile3, isomericSmiles=False)
out.write(str(i) + " " + self.index_init_list[idx] + " " + " ".join(infile.infor) + "\n")
out.close()
os.chdir(self.olddir)
def check_consistency(self, rule):
"""
Check consistency of initial data, MMFF optimized data, and QM optimized data.
Remove structures which are not consistent based on rule
Args:
rule: "strict" or "loose"
"""
data = pd.read_csv(os.path.join(self.outdir, "initial_dataset.csv"), sep=" ")
if rule == "strict":
data = data[(data["initial_SMILES"] == data["MMFF_SMILES"]) & (data["MMFF_SMILES"] == data["QM_SMILES"])]
data.to_csv(os.path.join(self.outdir, "data_consistent_strict.csv"), index = False)
elif rule == "loose":
data_initial = data[(data["initial_SMILES"] == data["MMFF_SMILES"]) | (data["initial_InChI"] == data["MMFF_InChI"])]
data = data_initial[(data_initial["MMFF_SMILES"] == data_initial["QM_SMILES"]) | (data_initial["MMFF_InChI"] == data_initial["QM_InChI"])]
data.to_csv(os.path.join(self.outdir, "data_consistent_loose.csv"), index = False)
def check_others(self, infile):
"""
Check radicals and partial charges, MMFF_SMILES is used to check
Args:
infile: data index file
"""
update_list = []
os.chdir(self.datadir)
data = pd.read_csv(os.path.join(self.outdir, infile))
index_list = list(data["index"])
for i in index_list:
mol = Chem.SDMolSupplier(str(i) + self.suffix)[0]
if Descriptors.NumRadicalElectrons(mol) == 0:
if len([atom for atom in mol.GetAtoms() if atom.GetFormalCharge() != 0]) == 0:
update_list.append(i)
data = data[data["index"].isin(update_list)]
data.to_csv(os.path.join(self.outdir, infile.split(".")[0] + "_rmrpc.csv"), index = False)
os.chdir(self.olddir)
```
#### File: Frag20_prepare/DataGen/genconfs.py
```python
import os, sys
from rdkit import Chem
from rdkit.Chem import AllChem, TorsionFingerprints
from rdkit.ML.Cluster import Butina
from rdkit.Chem import PandasTools
import pandas as pd
from tqdm import tqdm
def gen_conformers(mol, numConfs=1):
"""Generate conformation with local minimization"""
### generate conf using ETKDG method ###
ps = AllChem.ETKDG()
ps.maxAttempts = 1000
ps.randomSeed = 1
ps.pruneRmsThresh = 0.1
ps.numThreads = 0
ids = AllChem.EmbedMultipleConfs(mol, numConfs, ps)
### Check MMFF parms ###
if AllChem.MMFFHasAllMoleculeParams(mol):
### MMFF optimize ###
method = "MMFF"
for cid in ids:
_ = AllChem.MMFFOptimizeMolecule(mol, confId=cid)
else:
### UFF optimize ###
method = "UFF"
for cid in ids:
_ = AllChem.UFFOptimizeMolecule(mol, confId=cid)
return list(ids), method
def cluster_conformers(mol, mode="RMSD", threshold=0.2):
"""
Cluster conf based on heavy atom rmsd
Then Butina is used for clustering
"""
### get heavy atom idx ###
heavyatomidx = []
for a in mol.GetAtoms():
if a.GetAtomicNum() != 1:
heavyatomidx.append(a.GetIdx())
### align on heavy atom for each pair and get dmat ###
n = mol.GetNumConformers()
dmat = []
for i in range(n):
for j in range(i):
dmat.append(Chem.rdMolAlign.AlignMol(mol, mol, i, j, atomMap=[(k, k) for k in heavyatomidx]))
### clustering ###
rms_clusters = Butina.ClusterData(dmat, mol.GetNumConformers(), threshold, isDistData=True, reordering=True)
return rms_clusters
def calc_energy(idx, mol, conformerId, method, minimizeIts=0):
"""
Set minimizeIts to be 0 to turn off min
since MMFF opt have been done before
Here, it is used to get MMFF energy
"""
if method == "MMFF":
try:
mp = AllChem.MMFFGetMoleculeProperties(mol)
ff = AllChem.MMFFGetMoleculeForceField(mol, mp, confId=conformerId)
results = {}
### default not minimize,since we already did MMFF optimization, conducting minimization here or not is doesn't matter ###
if minimizeIts > 0:
ff.Initialize()
ff.Minimize(maxIts=minimizeIts)
ff = AllChem.MMFFGetMoleculeForceField(mol, mp, confId=conformerId)
results["energy_abs"] = ff.CalcEnergy()
except:
### for some molecules, such as HF, they can't be minimized ###
results = {}
results["energy_abs"] = None
else:
try:
ff = AllChem.UFFGetMoleculeForceField(mol)
results = {}
if minimizeIts > 0:
ff.Initialize()
ff.Minimize(maxIts=minimizeIts)
ff = AllChem.UFFGetMoleculeForceField(mol)
results["energy_abs"] = ff.CalcEnergy()
except:
### for some molecules, such as HF, they can't be minimized ###
results = {}
results["energy_abs"] = None
return results
def runGenerator(index_list, smiles_list, source_data_name, datadir, structure_dir=None, numConfs=300,
clusterMethod="RMSD", clusterThreshold=0.2):
"""Generate conformation as sdf for all smiles in input
Args:
index_list: the list of initial indexes from source data
smiles_list: the rdkit smiles generated using initial source data SMILES
source_data_name: the source data name, such as ccdc, zinc, and pubchem, which is used to record the initial index in that source data
datadir: directory for outputfile
structure_dir: the directory used to find the 3D structure file, if it is None, the numConfs should not be None
numConfs: the number of conformations generated before clustering, defaults to 300; if numConfs=None, we conducted local MMFF minimization for structure
clusterMethod: the distance calculation method used in clustering, defaults to RMSD
clusterThreshold: the clustering threshold, defaults to 0.2
notice: we only conducted one conformation generation for each SMILES, but when numConfs=None, we conducted local optimization for each structure
Returns:
saved *_confs.sdf for conformation generation or *_min.sdf for local minimization with MMFF; Failed_list
"""
Failed_list = []
Used_smiles_list = []
for idx, smiles in enumerate(tqdm(smiles_list)):
# print(idx, smiles)
if numConfs:
### check SMILES to make sure only conduct one time conformation generation for each SMILES ###
if smiles in Used_smiles_list:
continue
if structure_dir:
if str(index_list[idx]) + ".sdf" in os.listdir(structure_dir):
print("Use 3D Structure as Reference:", str(index_list[idx]) + ".sdf")
mol = Chem.SDMolSupplier(os.path.join(structure_dir, str(index_list[idx]) + ".sdf"), removeHs=False)[0]
if mol == None:
print("Wring 3D Structure!")
continue
else:
print("Need 3D Structure File Provided")
else:
if numConfs == None:
print("Can't Conducted Structure Local Minimization without Structure!")
continue
mol = Chem.MolFromSmiles(smiles)
if mol != None:
### add H to initial SMILES ###
mol = Chem.AddHs(mol)
else:
print("Wrong SMILES!")
continue
if mol != None:
if numConfs:
### generated conformations are saved in *_confors.sdf file ###
w = Chem.SDWriter(os.path.join(datadir, str(index_list[idx]) + "_confors.sdf"))
try:
conformerIds, method = gen_conformers(mol, numConfs=numConfs)
except:
### failed cases have been captured by Failed_list ###
### situtation 1: conformation generation process is failed ###
Failed_list.append(idx)
continue
if conformerIds == []:
### situation 2: no conformation has been generated ###
Failed_list.append(idx)
continue
### cluster conformations ###
rmsClusters = cluster_conformers(mol, clusterMethod, clusterThreshold)
conformerPropsDict = {}
n = 0
for clusterId in rmsClusters:
n = n + 1
### each cluster, we only keep the centroid ###
for conformerId in clusterId[:1]:
conformerPropsDict[conformerId] = {}
### structure minimization (optional) and energy calculation ###
conformerPropsDict[conformerId]["energy_abs"] = calc_energy(idx, mol, conformerId, method)[
"energy_abs"]
### situation 3: no minimized energy ###
if conformerPropsDict[conformerId]["energy_abs"] == None:
Failed_list.append(idx)
continue
conformerPropsDict[conformerId]["SMILES"] = smiles
conformerPropsDict[conformerId]["cluster_no"] = n
conformerPropsDict[conformerId][source_data_name + "_id"] = index_list[idx]
conformerPropsDict[conformerId]["initial_conformation_id"] = conformerId
conformerPropsDict[conformerId]["minimize_method"] = method
for key in conformerPropsDict[conformerId].keys():
mol.SetProp(key, str(conformerPropsDict[conformerId][key]))
w.write(mol, confId=conformerId)
# print("The total number of conformers after clustring: " + str(n))
### only append smiles in the Used_smiles_list after the successful conformation generation ###
Used_smiles_list.append(smiles)
else:
### no conformation generation, just minimization of structure ###
### local minimized structure is saved in *_min.sdf ###
w = Chem.SDWriter(os.path.join(datadir, str(index_list[idx]) + "_min.sdf"))
conformerPropsDict = {}
conformerId = 0
method = "MMFF"
conformerPropsDict[conformerId] = {}
### here, we need to conduct minimization using calc_energy function, which is controled by minimizeIts=200 ###
conformerPropsDict[conformerId]["energy_abs"] = \
calc_energy(idx, mol, conformerId, method, minimizeIts=200)["energy_abs"]
if conformerPropsDict[conformerId]["energy_abs"] == None:
Failed_list.append(idx)
continue
conformerPropsDict[conformerId]["SMILES"] = smiles
conformerPropsDict[conformerId][source_data_name + "_id"] = index_list[idx]
conformerPropsDict[conformerId]["initial_conformation_id"] = conformerId
conformerPropsDict[conformerId]["minimize_method"] = method
for key in conformerPropsDict[conformerId].keys():
mol.SetProp(key, str(conformerPropsDict[conformerId][key]))
w.write(mol, confId=conformerId)
print("Finish Local Minimization with MMFF")
else:
print("Wrong Structure!")
w.flush()
w.close()
return Failed_list
def get_index(infile, smiles_name, index_name):
"""Get index and SMILES list
Args:
smiles_name: column name of SMILES
index_name: column name of index
"""
infile = pd.read_csv(infile)
smiles = infile[smiles_name].tolist()
index = infile[index_name].tolist()
return smiles, index
if __name__ == "__main__":
size = "20"
if size + "_confs" not in os.listdir("."):
os.mkdir(size + "_confs")
os.chdir(size + "_confs")
index_list = []
smiles_list = []
for size in ["20"]:
smiles, indexs = get_index(size)
index_list.extend(indexs)
smiles_list.extend(smiles)
index_list_redo = []
smiles_list_redo = []
problem_list = [int(i.rstrip()) for i in open("../redoconfs.csv")]
for idx, i in enumerate(index_list):
if i in problem_list:
index_list_redo.append(i)
smiles_list_redo.append(smiles_list[idx])
print(len(index_list_redo))
structure_dir = "/Users/jianinglu1/Documents/Python_API_2019/my_code/ccdc_structures"
runGenerator(index_list_redo, smiles_list_redo, structure_dir=structure_dir, numConfs=1000)
```
|
{
"source": "jenniew/analytics-zoo",
"score": 2
}
|
#### File: zoo/common/nncontext.py
```python
from bigdl.util.common import *
def get_nncontext(conf=None):
"""
Gets a SparkContext with optimized configuration for BigDL performance. The method
will also initialize the BigDL engine.
Note: if you use spark-shell or Jupyter notebook, as the Spark context is created
before your code, you have to set Spark conf values through command line options
or properties file, and init BigDL engine manually.
:param conf: User defined Spark conf
"""
sc = get_spark_context(conf)
redire_spark_logs()
show_bigdl_info_logs()
init_engine()
return sc
```
#### File: examples/textclassification/news20.py
```python
import tarfile
import zipfile
from bigdl.dataset import base
from bigdl.util.common import *
NEWS20_URL = 'http://qwone.com/~jason/20Newsgroups/20news-18828.tar.gz'
GLOVE_URL = 'http://nlp.stanford.edu/data/glove.6B.zip'
CLASS_NUM = 20
def get_news20(base_dir="./data/news20"):
"""
Parse 20 Newsgroup dataset and return a list of (tokens, label).
The dataset will be downloaded automatically if not found in the target base_dir.
"""
news20_dir = base_dir + "/20news-18828/"
if not os.path.isdir(news20_dir):
download_news20(base_dir)
texts = []
label_id = 0
for category in sorted(os.listdir(news20_dir)):
category_dir = os.path.join(news20_dir, category)
label_id += 1
if os.path.isdir(category_dir):
for text_file in sorted(os.listdir(category_dir)):
if text_file.isdigit():
text_file_path = os.path.join(category_dir, text_file)
if sys.version_info < (3,):
f = open(text_file_path)
else:
f = open(text_file_path, encoding='latin-1')
content = f.read()
texts.append((content, label_id))
f.close()
print('Found %s texts.' % len(texts))
return texts
def get_glove(base_dir="./data/news20", dim=100):
"""
Parse the pre-trained glove6B word2vec and return a dict mapping from word to vector,
given the dim of a vector.
The word embeddings will be downloaded automatically if not found in the target base_dir.
"""
glove_dir = base_dir + "/glove.6B"
if not os.path.isdir(glove_dir):
download_glove(base_dir)
glove_path = os.path.join(glove_dir, "glove.6B.%sd.txt" % dim)
if sys.version_info < (3,):
w2v_f = open(glove_path)
else:
w2v_f = open(glove_path, encoding='latin-1')
pre_w2v = {}
for line in w2v_f.readlines():
items = line.split(" ")
pre_w2v[items[0]] = [float(i) for i in items[1:]]
w2v_f.close()
return pre_w2v
def download_news20(dest_dir):
news20 = "20news-18828.tar.gz"
news20_path = base.maybe_download(news20, dest_dir, NEWS20_URL)
tar = tarfile.open(news20_path, "r:gz")
news20_dir = os.path.join(dest_dir, "20news-18828")
if not os.path.exists(news20_dir):
print("Extracting %s to %s" % (news20_path, news20_dir))
tar.extractall(dest_dir)
tar.close()
def download_glove(dest_dir):
glove = "glove.6B.zip"
glove_path = base.maybe_download(glove, dest_dir, GLOVE_URL)
zip_ref = zipfile.ZipFile(glove_path, 'r')
glove_dir = os.path.join(dest_dir, "glove.6B")
if not os.path.exists(glove_dir):
print("Extracting %s to %s" % (glove_path, glove_dir))
zip_ref.extractall(glove_dir)
zip_ref.close()
```
#### File: keras/engine/topology.py
```python
from bigdl.nn.keras.layer import KerasLayer
from bigdl.nn.layer import Node
from zoo.pipeline.api.keras.utils import *
if sys.version >= '3':
long = int
unicode = str
class ZooKerasCreator(JavaValue):
def jvm_class_constructor(self):
name = "createZooKeras" + self.__class__.__name__
print("creating: " + name)
return name
class ZooKerasLayer(ZooKerasCreator, KerasLayer):
@classmethod
def of(cls, jvalue, bigdl_type="float"):
return KerasLayer(jvalue, bigdl_type)
class KerasNet(ZooKerasLayer):
def compile(self, optimizer, loss, metrics=None):
"""
Configure the learning process. It MUST be called before fit or evaluate.
# Arguments
optimizer: Optimization method to be used. One can alternatively pass in the corresponding
string representation, such as 'sgd'.
loss: Criterion to be used. One can alternatively pass in the corresponding string
representation, such as 'mse'.
metrics: List of validation methods to be used. Default is None if no validation is needed.
One can alternatively use ['accuracy'].
"""
if isinstance(optimizer, six.string_types):
optimizer = to_bigdl_optim_method(optimizer)
if isinstance(loss, six.string_types):
loss = to_bigdl_criterion(loss)
if callable(loss):
from zoo.pipeline.api.autograd import CustomLoss
loss = CustomLoss(loss, self.get_output_shape()[1:])
if metrics and all(isinstance(metric, six.string_types) for metric in metrics):
metrics = to_bigdl_metrics(metrics)
callBigDlFunc(self.bigdl_type, "zooCompile",
self.value,
optimizer,
loss,
metrics)
def set_tensorboard(self, log_dir, app_name):
"""
Set summary information during the training process for visualization purposes.
Saved summary can be viewed via TensorBoard.
In order to take effect, it needs to be called before fit.
Training summary will be saved to 'log_dir/app_name/train'
and validation summary (if any) will be saved to 'log_dir/app_name/validation'.
# Arguments
log_dir: The base directory path to store training and validation logs.
app_name: The name of the application.
"""
callBigDlFunc(self.bigdl_type, "zooSetTensorBoard",
self.value,
log_dir,
app_name)
def set_checkpoint(self, path, over_write=True):
"""
Configure checkpoint settings to write snapshots every epoch during the training process.
In order to take effect, it needs to be called before fit.
# Arguments
path: The path to save snapshots. Make sure this path exists beforehand.
over_write: Whether to overwrite existing snapshots in the given path. Default is True.
"""
callBigDlFunc(self.bigdl_type, "zooSetCheckpoint",
self.value,
path,
over_write)
def fit(self, x, y=None, batch_size=32, nb_epoch=10, validation_data=None, distributed=True):
"""
Train a model for a fixed number of epochs on a dataset.
# Arguments
x: Input data. A Numpy array or RDD of Sample or Image DataSet.
y: Labels. A Numpy array. Default is None if x is already RDD of Sample or Image DataSet.
batch_size: Number of samples per gradient update.
nb_epoch: Number of iterations to train.
validation_data: Tuple (x_val, y_val) where x_val and y_val are both Numpy arrays.
Or RDD of Sample. Default is None if no validation is involved.
distributed: Boolean. Whether to train the model in distributed mode or local mode.
Default is True. In local mode, x and y must both be Numpy arrays.
"""
if distributed:
if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
training_data = to_sample_rdd(x, y)
if validation_data:
validation_data = to_sample_rdd(*validation_data)
elif (isinstance(x, RDD) or isinstance(x, DataSet)) and not y:
training_data = x
else:
raise TypeError("Unsupported training data type: %s" % type(x))
callBigDlFunc(self.bigdl_type, "zooFit",
self.value,
training_data,
batch_size,
nb_epoch,
validation_data)
else:
if validation_data:
val_x = [JTensor.from_ndarray(x) for x in to_list(validation_data[0])]
val_y = JTensor.from_ndarray(validation_data[1])
else:
val_x, val_y = None, None
callBigDlFunc(self.bigdl_type, "zooFit",
self.value,
[JTensor.from_ndarray(x) for x in to_list(x)],
JTensor.from_ndarray(y),
batch_size,
nb_epoch,
val_x,
val_y)
def evaluate(self, x, y=None, batch_size=32):
"""
Evaluate a model on a given dataset in distributed mode.
# Arguments
x: Input data. A Numpy array or RDD of Sample.
y: Labels. A Numpy array. Default is None if x is already RDD of Sample.
batch_size: Number of samples per gradient update.
"""
if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
evaluation_data = to_sample_rdd(x, y)
elif isinstance(x, RDD) and not y:
evaluation_data = x
else:
raise TypeError("Unsupported evaluation data type: %s" % type(x))
return callBigDlFunc(self.bigdl_type, "zooEvaluate",
self.value,
evaluation_data,
batch_size)
def predict(self, x, distributed=True):
"""
Use a model to do prediction.
# Arguments
x: Input data. A Numpy array or RDD of Sample.
distributed: Boolean. Whether to do prediction in distributed mode or local mode.
Default is True. In local mode, x must be a Numpy array.
"""
if is_distributed:
if isinstance(x, np.ndarray):
features = to_sample_rdd(x, np.zeros([x.shape[0]]))
elif isinstance(x, RDD):
features = x
else:
raise TypeError("Unsupported prediction data type: %s" % type(x))
return self.predict_distributed(features)
else:
if isinstance(x, np.ndarray):
return self.predict_local(x)
else:
raise TypeError("Unsupported prediction data type: %s" % type(x))
class Input(ZooKerasCreator, Node):
"""
Used to instantiate an input node.
# Arguments
shape: A shape tuple, not including batch.
name: String to set the name of the input node. If not specified, its name will by default to be a generated string.
>>> input = Input(name="input1", shape=(3, 5))
creating: createZooKerasInput
"""
def __init__(self, shape=None, name=None, bigdl_type="float"):
super(Input, self).__init__(None, bigdl_type,
name,
list(shape) if shape else None)
class InputLayer(ZooKerasLayer):
"""
Used as an entry point into a model.
# Arguments
input_shape: A shape tuple, not including batch.
name: String to set the name of the input layer. If not specified, its name will by default to be a generated string.
>>> inputlayer = InputLayer(input_shape=(3, 5), name="input1")
creating: createZooKerasInputLayer
"""
def __init__(self, input_shape=None, **kwargs):
super(InputLayer, self).__init__(None,
list(input_shape) if input_shape else None,
**kwargs)
class Merge(ZooKerasLayer):
"""
Used to merge a list of inputs into a single output, following some merge mode.
Merge must have at least two input layers.
When using this layer as the first layer in a model, you need to provide the argument
input_shape for input layers (a list of shape tuples, does not include the batch dimension).
# Arguments
layers: A list of layer instances. Must be more than one layer.
mode: Merge mode. String, must be one of: 'sum', 'mul', 'concat', 'ave', 'cos',
'dot', 'max', 'sub', 'div', 'min'. Default is 'sum'.
concat_axis: Int, axis to use when concatenating layers. Only specify this when merge mode is 'concat'.
Default is -1, meaning the last axis of the input.
input_shape: A list of shape tuples, each not including batch.
name: String to set the name of the layer. If not specified, its name will by default to be a generated string.
>>> l1 = InputLayer(input_shape=(3, 5))
creating: createZooKerasInputLayer
>>> l2 = InputLayer(input_shape=(3, 5))
creating: createZooKerasInputLayer
>>> merge = Merge(layers=[l1, l2], mode='sum', name="merge1")
creating: createZooKerasMerge
"""
def __init__(self, layers=None, mode="sum", concat_axis=-1,
input_shape=None, **kwargs):
super(Merge, self).__init__(None,
list(layers) if layers else None,
mode,
concat_axis,
input_shape,
**kwargs)
def merge(inputs, mode="sum", concat_axis=-1, name=None):
"""
Functional merge. Only use this method if you are defining a graph model.
Used to merge a list of input nodes into a single output node (NOT layers!),
following some merge mode.
# Arguments
inputs: A list of node instances. Must be more than one node.
mode: Merge mode. String, must be one of: 'sum', 'mul', 'concat', 'ave', 'cos',
'dot', 'max', 'sub', 'div', 'min'. Default is 'sum'.
concat_axis: Int, axis to use when concatenating nodes. Only specify this when merge mode is 'concat'.
Default is -1, meaning the last axis of the input.
name: String to set the name of the merge. If not specified, its name will by default to be a generated string.
"""
return Merge(mode=mode, concat_axis=concat_axis, name=name)(list(inputs))
```
#### File: pipeline/nnframes/nn_classifier.py
```python
from pyspark.ml.param.shared import *
from pyspark.ml.wrapper import JavaModel, JavaEstimator, JavaTransformer
from bigdl.optim.optimizer import SGD
from bigdl.util.common import *
from zoo.feature.common import *
if sys.version >= '3':
long = int
unicode = str
class HasBatchSize(Params):
"""
Mixin for param batchSize: batch size.
"""
# a placeholder to make it appear in the generated doc
batchSize = Param(Params._dummy(), "batchSize", "batchSize (>= 0).")
def __init__(self):
super(HasBatchSize, self).__init__()
#: param for batch size.
self.batchSize = Param(self, "batchSize", "batchSize")
self._setDefault(batchSize=1)
def setBatchSize(self, val):
"""
Sets the value of :py:attr:`batchSize`.
"""
self._paramMap[self.batchSize] = val
return self
def getBatchSize(self):
"""
Gets the value of batchSize or its default value.
"""
return self.getOrDefault(self.batchSize)
class HasOptimMethod:
optimMethod = SGD()
def __init__(self):
super(HasOptimMethod, self).__init__()
def setOptimMethod(self, val):
"""
Sets optimization method. E.g. SGD, Adam, LBFGS etc. from bigdl.optim.optimizer.
default: SGD()
"""
pythonBigDL_method_name = "setOptimMethod"
callBigDlFunc(self.bigdl_type, pythonBigDL_method_name, self.value, val)
self.optimMethod = val
return self
def getOptimMethod(self):
"""
Gets the optimization method
"""
return self.optimMethod
class NNEstimator(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasBatchSize,
HasOptimMethod, JavaValue):
"""
NNEstimator extends org.apache.spark.ml.Estimator and supports training a BigDL model with
Spark DataFrame data. It can be integrated into a standard Spark ML Pipeline to enable
users for combined usage with Spark MLlib.
NNEstimator supports different feature and label data type through operation defined in
Preprocessing. We provide pre-defined Preprocessing for popular data types like Array
or Vector in package zoo.feature, while user can also develop customized Preprocess
which extends from feature.common.Preprocessing. During fit, NNEstimator
will extract feature and label data from input DataFrame and use the Preprocessing to prepare
data for the model.
Using the Preprocessing allows NNEstimator to cache only the raw data and decrease the
memory consumption during feature conversion and training.
More concrete examples are available in package com.intel.analytics.zoo.examples.nnframes
"""
def __init__(self, model, criterion, sample_preprocessing, jvalue=None, bigdl_type="float"):
"""
Construct a NNEstimator with BigDL model, criterion and a sample_preprocessing that
transform a (feature, Option[label]) tuple to a BigDL Sample. This constructor is only
recommended for the expert users. Most users should use class method
NNEstimator.create.
:param model: BigDL Model to be trained.
:param criterion: BigDL criterion.
:param sample_preprocessing: Expert param. A Preprocessing that transforms the (feature,
Option[label]) tuple to a BigDL Sample[T], where T is decided by the BigDL model.
Note that sample_preprocessing should be able to handle the case that label = None.
During fit, NNEstimator will extract (feature, Option[label]) tuple from input
DataFrame and use sample_preprocessing to transform the tuple into BigDL Sample
to be ingested by the model. If Label column is not available, (feature, None)
will be sent to sample_preprocessing.
The sample_preprocessing will also be copied to the generated NNModel and applied
to feature column during transform, where (feature, None) will be passed to the
sample_preprocessing.
:param jvalue: Java object create by Py4j
:param bigdl_type: optional parameter. data type of model, "float"(default) or "double".
"""
super(NNEstimator, self).__init__()
self.value = jvalue if jvalue else callBigDlFunc(
bigdl_type, self.jvm_class_constructor(), model, criterion, sample_preprocessing)
self.bigdl_type = bigdl_type
self._java_obj = self.value
self.maxEpoch = Param(self, "maxEpoch", "number of max Epoch")
self.learningRate = Param(self, "learningRate", "learning rate")
self._setDefault(maxEpoch=50, learningRate=1e-3, batchSize=1)
self.sample_preprocessing = sample_preprocessing
self.train_summary = None
self.validation_config = None
self.validation_summary = None
@classmethod
def create(cls, model, criterion, feature_preprocessing, label_preprocessing,
jvalue=None, bigdl_type="float"):
"""
Construct a NNEstimator with a feature_preprocessing and a label_Preprocessing, which
convert the data in feature column and label column to Tensors (Multi-dimension array)
for model. This is the the recommended constructor for most users.
The feature_preprocessing will be copied to the fitted NNModel, and apply to feature
column data during transform.
:param model: BigDL Model to be trained.
:param criterion: BigDL criterion.
:param feature_preprocessing: A Preprocessing that transforms the feature data to a
Tensor[T]. Some pre-defined Preprocessing are provided in package
zoo.feature. E.g.
ArrayToTensor is used to transform Array[_] in DataFrame to Tensor. For a feature
column that contains 576 floats in an Array, Users can set
ArrayToTensor(Array(28, 28)) as feature_preprocessing, which will convert the feature
data into Tensors with dimension 28 * 28 to be processed by a convolution Model.
For a simple linear model, user may just use ArrayToTensor(Array(576)), which will
convert the data into Tensors with single dimension (576).
MLlibVectorToTensor is used to transform org.apache.spark.mllib.linalg.Vector
to a Tensor.
ScalarToTensor transform a number to a Tensor with single dimension of length 1.
Multiple Preprocessing can be combined as a ChainedPreprocessing.
:param label_preprocessing: similar to feature_preprocessing, but applies to Label data.
:param jvalue: Java object create by Py4j
:param bigdl_type: optional parameter. data type of model, "float"(default) or "double".
"""
return cls(model, criterion,
FeatureLabelPreprocessing(feature_preprocessing, label_preprocessing),
jvalue, bigdl_type)
@classmethod
def createWithSize(cls, model, criterion, feature_size, label_size,
jvalue=None, bigdl_type="float"):
"""
Construct a NNEstimator with a feature size and label size. The constructor is useful
when the feature column and label column contains the following data types:
Float, Double, Int, Array[Float], Array[Double], Array[Int] and MLlib Vector. The
feature and label data are converted to Tensors with the specified sizes before sending
to the model.
:param model: BigDL Model to be trained.
:param criterion: BigDL criterion.
:param feature_size: The size (Tensor dimensions) of the feature data. e.g. an image
may be with width * height = 28 * 28, featureSize = Array(28, 28).
:param label_size: The size (Tensor dimensions) of the label data.
:param jvalue: Java object create by Py4j
:param bigdl_type: optional parameter. data type of model, "float"(default) or "double".
"""
return cls(model, criterion,
FeatureLabelPreprocessing(SeqToTensor(feature_size), SeqToTensor(label_size)),
jvalue, bigdl_type)
def setMaxEpoch(self, val):
"""
Sets the value of :py:attr:`maxEpoch`.
"""
self._paramMap[self.maxEpoch] = val
return self
def getMaxEpoch(self):
"""
Gets the value of maxEpoch or its default value.
"""
return self.getOrDefault(self.maxEpoch)
def setLearningRate(self, val):
"""
Sets the value of :py:attr:`learningRate`.
"""
self._paramMap[self.learningRate] = val
return self
def getLearningRate(self):
"""
Gets the value of learningRate or its default value.
"""
return self.getOrDefault(self.learningRate)
def setTrainSummary(self, val):
"""
Statistics (LearningRate, Loss, Throughput, Parameters) collected during training for the
training data, which can be used for visualization via Tensorboard.
Use setTrainSummary to enable train logger. Then the log will be saved to
logDir/appName/train as specified by the parameters of TrainSummary.
Default: Not enabled
:param summary: a TrainSummary object
"""
pythonBigDL_method_name = "setTrainSummary"
callBigDlFunc(self.bigdl_type, pythonBigDL_method_name, self.value, val)
self.train_summary = val
return self
def getTrainSummary(self):
"""
Gets the train summary
"""
return self.train_summary
def setValidationSummary(self, val):
"""
Statistics (LearningRate, Loss, Throughput, Parameters) collected during training for the
validation data if validation data is set, which can be used for visualization via
Tensorboard. Use setValidationSummary to enable validation logger. Then the log will be
saved to logDir/appName/ as specified by the parameters of validationSummary.
Default: None
"""
pythonBigDL_method_name = "setValidationSummary"
callBigDlFunc(self.bigdl_type, pythonBigDL_method_name, self.value, val)
self.validation_summary = val
return self
def getValidationSummary(self):
"""
Gets the Validation summary
"""
return self.validation_summary
def setValidation(self, trigger, val_df, val_method, batch_size):
"""
Set a validate evaluation during training
:param trigger: validation interval
:param val_df: validation dataset
:param val_method: the ValidationMethod to use,e.g. "Top1Accuracy", "Top5Accuracy", "Loss"
:param batch_size: validation batch size
"""
pythonBigDL_method_name = "setValidation"
callBigDlFunc(self.bigdl_type, pythonBigDL_method_name, self.value,
trigger, val_df, val_method, batch_size)
self.validation_config = [trigger, val_df, val_method, batch_size]
return self
def getValidation(self):
"""
Gets the validate configuration. If validation config has been set, getValidation will
return a List of [ValidationTrigger, Validation data, Array[ValidationMethod[T]],
batchsize]
"""
return self.validation_config
def _create_model(self, java_model):
nnModel = NNModel.of(java_model, FeatureToTupleAdapter(self.sample_preprocessing),
self.bigdl_type)
nnModel.setFeaturesCol(self.getFeaturesCol()) \
.setPredictionCol(self.getPredictionCol()) \
.setBatchSize(self.getBatchSize())
return nnModel
class NNModel(JavaTransformer, HasFeaturesCol, HasPredictionCol, HasBatchSize, JavaValue):
"""
NNModel extends Spark ML Transformer and supports BigDL model with Spark DataFrame.
NNModel supports different feature data type through Preprocessing. Some common
Preprocessing have been defined in com.intel.analytics.zoo.feature.
After transform, the prediction column contains the output of the model as Array[T], where
T (Double or Float) is decided by the model type.
"""
def __init__(self, model, sample_preprocessing, jvalue=None, bigdl_type="float"):
"""
create a NNModel with a BigDL model
:param model: trained BigDL model to use in prediction.
:param sample_preprocessing: A Preprocessing that transforms the feature data to a
Sample[T].
:param jvalue: Java object create by Py4j
:param bigdl_type: optional parameter. data type of model, "float"(default) or "double".
"""
super(NNModel, self).__init__()
self.value = jvalue if jvalue else callBigDlFunc(
bigdl_type, self.jvm_class_constructor(), model, sample_preprocessing)
self._java_obj = self.value
self.bigdl_type = bigdl_type
@classmethod
def create(cls, model, feature_preprocessing, jvalue=None, bigdl_type="float"):
"""
Construct NNModel with a BigDL model and a feature-to-tensor Preprocessing
:param model: trainned BigDL models to use in prediction.
:param feature_preprocessing: A Preprocessing that transforms the feature data to a
Tensor[T]. Some pre-defined Preprocessing are provided in package
zoo.feature. E.g.
ArrayToTensor is used to transform Array[_] in DataFrame to Tensor. For a feature
column that contains 576 floats in an Array, Users can set
ArrayToTensor(Array(28, 28)) as feature_preprocessing, which will convert the feature
data into Tensors with dimension 28 * 28 to be processed by a convolution Model.
For a simple linear model, user may just use ArrayToTensor(Array(576)), which will
convert the data into Tensors with single dimension (576).
MLlibVectorToTensor is used to transform org.apache.spark.mllib.linalg.Vector
to a Tensor.
ScalarToTensor transform a number to a Tensor with single dimension of length 1.
Multiple Preprocessing can be combined as a ChainedPreprocessing.
:param jvalue: Java object create by Py4j
:param bigdl_type(optional): Data type of BigDL model, "float"(default) or "double".
:return:
"""
chained_transformer = ChainedPreprocessing([feature_preprocessing, TensorToSample()])
return NNModel(model, chained_transformer, jvalue, bigdl_type)
@classmethod
def of(self, jvalue, sample_preprocessing=None, bigdl_type="float"):
model = NNModel(model=None, sample_preprocessing=sample_preprocessing, jvalue=jvalue,
bigdl_type=bigdl_type)
return model
class NNClassifier(NNEstimator):
"""
NNClassifier is a specialized NNEstimator that simplifies the data format for
classification tasks. It only supports label column of DoubleType, and the fitted
NNClassifierModel will have the prediction column of DoubleType.
"""
def __init__(self, model, criterion, sample_preprocessing, jvalue=None, bigdl_type="float"):
"""
:param model: BigDL module to be optimized
:param criterion: BigDL criterion method
:param sample_preprocessing: Expert param. A Preprocessing that transforms the (feature,
Option[label]) tuple to a BigDL Sample[T], where T is decided by the BigDL model.
Note that sample_preprocessing should be able to handle the case that label = None.
During fit, NNEstimator will extract (feature, Option[label]) tuple from input
DataFrame and use sample_preprocessing to transform the tuple into BigDL Sample
to be ingested by the model. If Label column is not available, (feature, None)
will be sent to sample_preprocessing.
The sample_preprocessing will also be copied to the generated NNModel and applied
to feature column during transform, where (feature, None) will be passed to the
sample_preprocessing.
Multiple Preprocessing can be combined as a ChainedPreprocessing.
:param bigdl_type(optional): Data type of BigDL model, "float"(default) or "double".
"""
super(NNClassifier, self).__init__(
model, criterion, sample_preprocessing, jvalue, bigdl_type)
@classmethod
def create(cls, model, criterion, feature_preprocessing, jvalue=None, bigdl_type="float"):
"""
Construct a NNEstimator with a feature_preprocessing and a label_Preprocessing, which
convert the data in feature column and label column to Tensors (Multi-dimension array)
for model. This is the the recommended constructor for most users.
:param model: BigDL module to be optimized
:param criterion: BigDL criterion method
:param feature_Preprocessing: A Preprocessing that transforms the feature data to a
Tensor[T]. Some pre-defined Preprocessing are provided in package
zoo.feature. E.g.
ArrayToTensor is used to transform Array[_] in DataFrame to Tensor. For a feature
column that contains 576 floats in an Array, Users can set
ArrayToTensor(Array(28, 28)) as feature_Preprocessing, which will convert the feature
data into Tensors with dimension 28 * 28 to be processed by a convolution Model.
For a simple linear model, user may just use ArrayToTensor(Array(576)), which will
convert the data into Tensors with single dimension (576).
MLlibVectorToTensor is used to transform org.apache.spark.mllib.linalg.Vector
to a Tensor.
ScalarToTensor transform a number to a Tensor with single dimension of length 1.
Multiple Preprocessing can be combined as a ChainedPreprocessing.
"""
return NNClassifier(model, criterion,
FeatureLabelPreprocessing(feature_preprocessing, ScalarToTensor()),
jvalue, bigdl_type)
@classmethod
def createWithSize(cls, model, criterion, feature_size, jvalue=None, bigdl_type="float"):
"""
Construct a NNClassifier with a feature size. The constructor is useful
when the feature column contains the following data types:
Float, Double, Int, Array[Float], Array[Double], Array[Int] and MLlib Vector. The
feature data are converted to Tensors with the specified sizes before sending
to the model.
:param model: BigDL Model to be trained.
:param criterion: BigDL criterion.
:param feature_size: The size (Tensor dimensions) of the feature data. e.g. an image
may be with width * height = 28 * 28, featureSize = Array(28, 28).
:param label_size: The size (Tensor dimensions) of the label data.
:param jvalue: Java object create by Py4j
:param bigdl_type: optional parameter. data type of model, "float"(default) or "double".
"""
return cls(model, criterion,
FeatureLabelPreprocessing(SeqToTensor(feature_size), ScalarToTensor()),
jvalue, bigdl_type)
class NNClassifierModel(NNModel):
"""
NNClassifierModel is a specialized [[NNModel]] for classification tasks. The prediction
column will have the datatype of Double.
"""
def __init__(self, model, feature_preprocessing, jvalue=None, bigdl_type="float"):
"""
:param model: trained BigDL model to use in prediction.
:param feature_Preprocessing: A Preprocessing that transforms the feature data to a
Tensor[T]. Some pre-defined Preprocessing are provided in package
zoo.feature. E.g.
ArrayToTensor is used to transform Array[_] in DataFrame to Tensor. For a feature
column that contains 576 floats in an Array, Users can set
ArrayToTensor(Array(28, 28)) as feature_Preprocessing, which will convert the feature
data into Tensors with dimension 28 * 28 to be processed by a convolution Model.
For a simple linear model, user may just use ArrayToTensor(Array(576)), which will
convert the data into Tensors with single dimension (576).
MLlibVectorToTensor is used to transform org.apache.spark.mllib.linalg.Vector
to a Tensor.
ScalarToTensor transform a number to a Tensor with single dimension of length 1.
Multiple Preprocessing can be combined as a ChainedPreprocessing.
:param jvalue: Java object create by Py4j
:param bigdl_type(optional): Data type of BigDL model, "float"(default) or "double".
"""
super(NNClassifierModel, self).__init__(model, feature_preprocessing, jvalue, bigdl_type)
@classmethod
def of(self, jvalue, feaTran=None, bigdl_type="float"):
model = NNClassifierModel(
model=None, feature_preprocessing=feaTran, jvalue=jvalue, bigdl_type=bigdl_type)
return model
```
|
{
"source": "jenniew/BigDL",
"score": 2
}
|
#### File: bigdl/keras/test_load_model.py
```python
from __future__ import print_function
import numpy as np
import pytest
from numpy.testing import assert_allclose
import bigdl.nn.layer as BLayer
from bigdl.keras.converter import WeightLoader
from bigdl.keras.converter import DefinitionLoader
np.random.seed(1337) # for reproducibility
from test.bigdl.test_utils import BigDLTestCase, TestModels
class TestLoadModel(BigDLTestCase):
def __kmodel_load_def_weight_test(self, kmodel, input_data):
keras_model_path_json, keras_model_path_hdf5 = self._dump_keras(kmodel, dump_weights=True)
bmodel = DefinitionLoader.from_json_path(keras_model_path_json)
WeightLoader.load_weights_from_hdf5(bmodel,
kmodel,
keras_model_path_hdf5)
bmodel.training(False)
boutput = bmodel.forward(input_data)
koutput = kmodel.predict(input_data)
assert_allclose(boutput, koutput, rtol=1e-5)
def test_load_api_with_hdf5(self):
kmodel, input_data, output_data = TestModels.kmodel_graph_1_layer()
keras_model_json_path, keras_model_hdf5_path = self._dump_keras(kmodel, dump_weights=True)
bmodel = BLayer.Model.load_keras(keras_model_json_path, keras_model_hdf5_path)
self.assert_allclose(kmodel.predict(input_data),
bmodel.forward(input_data))
def test_load_api_no_hdf5(self):
kmodel, input_data, output_data = TestModels.kmodel_graph_1_layer()
keras_model_json_path, keras_model_hdf5_path = self._dump_keras(kmodel, dump_weights=True)
bmodel = BLayer.Model.load_keras(keras_model_json_path)
def test_load_def_weights_graph_1_layer(self):
kmodel, input_data, output_data = TestModels.kmodel_graph_1_layer()
self.__kmodel_load_def_weight_test(kmodel, input_data)
def test_load_def_weights_graph_activation(self):
kmodel, input_data, output_data = TestModels.kmodel_graph_activation_is_layer()
self.__kmodel_load_def_weight_test(kmodel, input_data)
def test_load_def_weights_kmodel_seq_lenet_mnist(self):
kmodel, input_data, output_data = TestModels.kmodel_seq_lenet_mnist()
self.__kmodel_load_def_weight_test(kmodel, input_data)
if __name__ == "__main__":
pytest.main([__file__])
```
|
{
"source": "jenniew/IntelCaffeOnSpark_mirror",
"score": 2
}
|
#### File: ml/caffe/DataSource.py
```python
from ConversionUtil import wrapClass
from RegisterContext import registerContext
class DataSource:
"""Base class for various data sources.
Each subclass must have a constructor with the following signature: (conf: Config, layerId: Int, isTrain: Boolean).
This is required by CaffeOnSpark at startup.
:ivar SparkContext sc: The spark context of the current spark session
"""
def __init__(self,sc):
registerContext(sc)
self.dataSource=wrapClass("com.yahoo.ml.caffe.DataSource")
def getSource(self, conf, isTraining):
"""Returns a DataSource which can be used to train, test or extract features
:param Config conf: Config object with details of datasource file location, devices, model file path and other relevant configurations
:param Boolean isTraining: True for training and False for Test or feature extraction
:rtype: DataSource
:returns: a DataSource object
"""
return self.dataSource.getSource(conf.__dict__['config'],isTraining)
```
|
{
"source": "jenniexie/happy",
"score": 2
}
|
#### File: happy/happy/HappyNodeJoin.py
```python
import os
import sys
from happy.ReturnMsg import ReturnMsg
from happy.Utils import *
from happy.utils.IP import IP
from happy.HappyLink import HappyLink
from happy.HappyNetwork import HappyNetwork
from happy.HappyNode import HappyNode
import happy.HappyLinkAdd
import happy.HappyNodeAddress
import happy.HappyNodeRoute
options = {}
options["quiet"] = False
options["node_id"] = None
options["tap"] = False
options["network_id"] = None
options["fix_hw_addr"] = None
options["customized_eui64"] = None
def option():
return options.copy()
class HappyNodeJoin(HappyLink, HappyNode, HappyNetwork):
"""
Assigns a virtual node to a specific network.
happy-node-join [-h --help] [-q --quiet] [-i --id <NODE_NAME>]
[-n --network <NETWORK_NAME>] [-m --mac <HW_ADDR>]
[-c --customizedeui64 <CUST_EUI64>] [-p --tap]
-i --id Required. Node to be added to a network. Find using
happy-node-list or happy-state.
-n --network Required. Network to add the node to. Find using
happy-network-list or happy-state.
-m --mac The MAC hardware address for the node.
-c --customizedeui64 The EUI64 address for the node.
-p --tap Configure the link between the node and the network as an
L2 TAP device with a virtual bridge. Omit this parameter to
default to an L3 TUN configuration for normal IP routing.
Example:
$ happy-node-join ThreadNode HomeThread
Adds the ThreadNode node to the HomeThread network.
$ happy-node-join -i onhub -n HomeWiFi -m 5
Adds the onhub node to the HomeWiFi network with a MAC hardware address of
00:00:00:00:00:05.
$ happy-node-join -i onhub -n HomeWiFi -c 00:00:00:00:00:00:00:05
Adds the onhub node to the HomeWiFi network with an EUI64 address of
fc00:db20:35b:7399::5.
return:
0 success
1 fail
"""
def __init__(self, opts=options):
HappyNetwork.__init__(self)
HappyNode.__init__(self)
HappyLink.__init__(self)
self.quiet = opts["quiet"]
self.node_id = opts["node_id"]
self.tap = opts["tap"]
self.network_id = opts["network_id"]
self.fix_hw_addr = opts["fix_hw_addr"]
self.customized_eui64 = opts["customized_eui64"]
if not self.fix_hw_addr and opts["customized_eui64"]:
self.fix_hw_addr = self.customized_eui64[6:]
self.customized_eui64 = self.customized_eui64.replace(':', '-')
def __pre_check(self):
# Check if the name of the node is given
if not self.node_id:
emsg = "Missing name of the virtual node that should join a network."
self.logger.error("[localhost] HappyNodeJoin: %s" % (emsg))
self.exit()
# Check if the name of the network is given
if not self.network_id:
emsg = "Missing name of the virtual network that be joined by a virtual node."
self.logger.error("[localhost] HappyNodeJoin: %s" % (emsg))
self.exit()
# Check if node exists
if not self._nodeExists():
emsg = "virtual node %s does not exist." % (self.node_id)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
# Check if network exists
if not self._networkExists():
emsg = "virtual network %s does not exist." % (self.network_id)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
# Check if node already joined that network
if self.network_id in self.getNodeNetworkIds():
emsg = "virtual node %s is already part of %s network." % (self.node_id, self.network_id)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
self.fix_hw_addr = self.fixHwAddr(self.fix_hw_addr)
# Check if HW MAC address is valid
if self.fix_hw_addr is not None and self.fix_hw_addr.count(":") != 5:
emsg = "virtual node %s get invalid MAC HW address %s." % (self.node_id, self.fix_hw_addr)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
def __create_link(self):
options = happy.HappyLinkAdd.option()
options["quiet"] = self.quiet
options["type"] = self.getNetworkType()
options["tap"] = self.tap
link = happy.HappyLinkAdd.HappyLinkAdd(options)
ret = link.run()
self.link_id = ret.Data()
self.readState()
def __post_check_1(self):
# Ensure that the link is saved in the state
if self.link_id not in self.getLinkIds():
emsg = "Link %s does not exist." % (self.link_id)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
def __get_node_interface_info(self):
self.link_type = self.getLinkType(self.link_id)
self.link_network_end = self.getLinkNetworkEnd(self.link_id)
self.link_node_end = self.getLinkNodeEnd(self.link_id)
self.node_interface_name = self.getNodeInterfaceName(self.node_id, self.link_type)
def __connect_to_network(self):
self.moveInterfaceToNamespace(self.link_network_end, self.network_id)
# Attach to bridge
cmd = "brctl addif " + self.uniquePrefix(self.network_id) + " " + self.link_network_end
cmd = self.runAsRoot(cmd)
ret = self.CallAtNetwork(self.network_id, cmd)
def __connect_to_node(self):
if not self.isNodeLocal(self.node_id):
if self.getLinkTap(self.link_id):
self.moveLwipInterfaceToNamespace(self.link_id, self.node_id)
else:
self.moveInterfaceToNamespace(self.link_node_end, self.node_id)
cmd = "ip link set " + self.link_node_end
cmd += " name " + self.node_interface_name
if self.fix_hw_addr is not None:
cmd += " address " + self.fix_hw_addr
cmd = self.runAsRoot(cmd)
ret = self.CallAtNode(self.node_id, cmd)
def __nmconf(self):
if not self.isNodeLocal(self.node_id):
return
if not self.tap:
cmd = "nmcli dev disconnect iface " + self.node_interface_name
cmd = self.runAsRoot(cmd)
ret = self.CallAtHost(cmd)
def __check_node_hw_addr(self):
hw_addr = self.getHwAddress(self.node_interface_name, self.node_id)
hw_addr_int = IP.mac48_string_to_int(hw_addr)
if (hw_addr_int & (1 << 41)):
hw_addr_int = hw_addr_int & ~(1 << 41)
new_hw_addr = IP.mac48_string_to_int(hw_addr_int)
cmd = "ip link set " + self.node_interface_name + " address " + str(new_hw_addr)
cmd = self.runAsRoot(cmd)
r = self.CallAtNode(self.node_id, cmd)
def __post_check_2(self):
return
def __bring_up_interface(self):
self.bringLinkUp(self.link_id, self.node_interface_name, self.node_id, self.network_id)
def __add_new_interface_state(self):
self.setLinkNetworkNodeHw(self.link_id, self.network_id, self.node_id, self.fix_hw_addr)
new_network_interface = {}
self.setNetworkLink(self.network_id, self.link_id, new_network_interface)
new_node_interface = {}
new_node_interface["link"] = self.link_id
new_node_interface["type"] = self.link_type
new_node_interface["ip"] = {}
if self.customized_eui64:
new_node_interface["customized_eui64"] = self.customized_eui64
self.setNodeInterface(self.node_id, self.node_interface_name, new_node_interface)
def __assign_network_addresses(self):
network_prefixes = self.getNetworkPrefixes(self.network_id)
for prefix in network_prefixes:
options = happy.HappyNodeAddress.option()
options["quiet"] = self.quiet
options["node_id"] = self.node_id
options["interface"] = self.node_interface_name
if IP.isIpv6(prefix):
nid = self.getInterfaceId(self.node_interface_name, self.node_id)
else:
nid = self.getNextNetworkIPv4Id(prefix, self.network_id)
options["address"] = self.getNodeAddressOnPrefix(prefix, nid)
options["add"] = True
addrctrl = happy.HappyNodeAddress.HappyNodeAddress(options)
ret = addrctrl.run()
def __load_network_routes(self):
routes = self.getNetworkRoutes(self.network_id)
for route_to in routes.keys():
route_record = self.getNetworkRoute(route_to, self.network_id)
options = happy.HappyNodeRoute.option()
options["quiet"] = self.quiet
options["add"] = True
options["node_id"] = self.node_id
options["to"] = route_to
options["via"] = route_record["via"]
options["prefix"] = route_record["prefix"]
noder = happy.HappyNodeRoute.HappyNodeRoute(options)
ret = noder.run()
def run(self):
with self.getStateLockManager():
self.__pre_check()
self.__create_link()
self.__post_check_1()
self.__get_node_interface_info()
self.__connect_to_network()
self.__connect_to_node()
self.__nmconf()
self.__check_node_hw_addr()
self.__bring_up_interface()
self.__post_check_2()
self.__add_new_interface_state()
self.writeState()
self.__assign_network_addresses()
self.__load_network_routes()
return ReturnMsg(0)
```
#### File: happy/happy/HappyProcess.py
```python
import os
import psutil
import sys
import time
import math
from happy.Utils import *
from happy.HappyHost import HappyHost
import happy.HappyLinkDelete
class HappyProcess(HappyHost):
def __init__(self, node_id=None):
HappyHost.__init__(self)
def GetProcessByPID(self, pid, create_time):
"""A helper method for finding the process by PID and creation time. Returns a
psutils.Process object if there is a process matching the PID, creation
time tuple, or None if no such process exist.
"""
if pid is None or create_time is None:
return None
p = psutil.Process(pid)
try:
p_create_time = p.create_time()
except Exception:
p_create_time = p.create_time
return create_time == p_create_time and p or None
def processExists(self, tag, node_id=None):
if node_id is None:
node_id = self.node_id
pid = self.getNodeProcessPID(tag, node_id)
create_time = self.getNodeProcessCreateTime(tag, node_id)
if pid is None:
return False
try:
p = self.GetProcessByPID(pid, create_time)
return p is not None and p.is_running() and p.status not in [psutil.STATUS_ZOMBIE, psutil.STATUS_DEAD]
except Exception:
return False
def BlockOnProcessPID(self, pid, create_time, timeout=None):
if pid is None:
return
p = None
try:
p = self.GetProcessByPID(pid, create_time)
if p is not None and p.is_running() and p.status not in [psutil.STATUS_ZOMBIE, psutil.STATUS_DEAD]:
val = p.wait(timeout)
if val is None:
self.logger.debug("Process is terminated ")
else:
self.logger.debug("Process is terminated, possibly by os ")
except psutil.TimeoutExpired:
self.logger.info("TimeoutExpired happens")
if p is not None:
self.logger.info("kill process")
self.TerminateProcessTree(pid, create_time)
except Exception:
self.logger.debug("Process is terminated for unknown reasons")
pass
return
def BlockOnProcess(self, tag, node_id=None, timeout=None):
if node_id is None:
node_id = self.node_id
pid = self.getNodeProcessPID(tag, node_id)
create_time = self.getNodeProcessCreateTime(tag, node_id)
if pid is None:
return
self.BlockOnProcessPID(pid, create_time, timeout)
def GetProcessTreeAsList(self, pid, create_time):
try:
p = self.GetProcessByPID(pid, create_time)
if p is None:
return []
# python psutil 2.x and later expose Process.children() method; the
# equivalent functionality in psutil 1.2.1 was called get_children()
try:
childs = p.children(recursive=True)
except AttributeError:
childs = p.get_children(recursive=True)
# At the time of this writing, get_children returns a list of the
# children in breadth-first order. All leaves
# are at the end of the list.
return [p] + childs
except Exception:
return []
def __wait_procs(self, procs, timeout):
before = time.time()
after = before
alive = procs
# (old versions of psutil have a bug and return too soon)
while alive and (after - before) < timeout:
next_timeout = math.ceil(timeout - (after - before))
gone, alive = psutil.wait_procs(alive, timeout=next_timeout)
after = time.time()
if after < before:
after = before
return alive
def __signal_procs(self, procs, signal):
for c in procs:
try:
# We sudo, in case we don't own the process
cmd = "kill -" + signal + " " + str(c.pid)
cmd = self.runAsRoot(cmd)
ret = self.CallAtHost(cmd)
if (ret != 0):
emsg = "Failed to send %s to process with PID %s." % (signal, str(c.pid))
self.logger.debug("[%s] HappyProcessStop: %s" % (self.node_id, emsg))
except Exception:
emsg = "Failed to send %s to process with PID %s." % (signal, str(c.pid))
self.logger.debug("[%s] HappyProcessStop: %s" % (self.node_id, emsg))
pass
def TerminateProcessTree(self, pid, create_time):
# HappyProcessStart creates a tree of processes.
# For example, if a normal user types "happy-process-start node01 ping ...",
# ps reports:
# root 141987 0.1 0.0 88764 5480 pts/43 S 19:37 0:00 sudo ip netns exec happy000 sudo -u andreello ping 127.0.0.1
# root 141988 0.1 0.1 124400 42524 pts/43 S 19:37 0:00 \_ sudo -u andreello ping 127.0.0.1
# andreel+ 141989 0.0 0.0 6500 628 pts/43 S 19:37 0:00 \_ ping 127.0.0.1
# But in some cases it will create only one process.
# If the command above is entered by root, ps shows:
#
# root 142652 0.0 0.0 6500 628 pts/43 S 19:41 0:00 ping 127.0.0.1
#
# Note that HappyProcessStart stores the pid of the oldest parent.
#
# The goal is to send a SIGUSR1 to the actual process (in the example above, 'watch ls')
# If the process has not registered a handler for SIGUSR1, it will be terminated.
# Otherwise, the test process should handle the signal by cleaning up and exiting gracefully.
# All processes up the hierarchy should then exit naturally.
#
# So, it should be sufficient to send a SIGUSR1 to the youngest child process.
# But, we want to support the case of a process that itself spawns several children.
# For that reason, the code below sends a SIGUSR1 to all children of the main process
# and to the main process itself without checking if a process is a leaf of the tree or not.
# Note that sudo relays the signals it receives to its child process, so we're potentially
# sending the same signal twice to some of the children.
#
# Note that sending signals to different processes is not atomic, and so we don't know
# in which order the processes will actually exit. Also, PIDs can be reused, and so
# while looping through the process list and sending signals, there is no hard guarantee
# that the PID we're sending a signal to is still the same process.
# We do know that the PID stored in the happy state still refers to the right process because
# we also store and double check the create_time attribute.
# psutil also checks timestamps between invocations, and so psutil.wait_procs() won't get
# fooled by a new process having the same PID as one of the PIDs in procs.
# If we wanted to send signals using psutil we'd need to be root as most of the
# Happy processes belong to root.
#
# If the processes have not terminated after 30 seconds, they are sent a SIGTERM, and
# and then a SIGKILL.
# The timeouts are set at 30 seconds in case many Happy instances are run in parallel
# and the system is under heavy load.
#
procs = self.GetProcessTreeAsList(pid, create_time)
self.TerminateProcesses(procs)
def TerminateProcesses(self, procs):
# first send SIGUSR1
self.__signal_procs(procs, "SIGUSR1")
alive = self.__wait_procs(procs, 30)
if alive:
# if process ignored SIGUSR1, try sending terminate
self.__signal_procs(alive, "SIGTERM")
alive = self.__wait_procs(alive, 30)
if alive:
# if process is still around, just kill it
self.__signal_procs(alive, "SIGKILL")
def GetProcessByName(self, name):
# TODO: Sometimes pid is there, but psutil cannot get this process, need to fix it.
processlist = []
for pid in psutil.get_pid_list():
try:
p = psutil.Process(pid)
if p.name == name:
processlist.append(p)
except:
pass
return processlist
```
#### File: happy/happy/HappyState.py
```python
import itertools
import json
import os
import sys
from happy.Utils import *
from happy.State import State
import happy.HappyNodeStatus
import happy.HappyNetworkStatus
try:
import networkx as nx
has_networkx = True
except Exception:
has_networkx = False
try:
import matplotlib.pyplot as plt
has_matplotlib = True
except Exception:
has_matplotlib = False
options = {}
options["quiet"] = False
options["save"] = None
options["graph"] = None
options["log"] = False
options["json"] = False
options["unlock"] = False
options["id"] = False
options["all"] = False
def option():
return options.copy()
class HappyState(State):
"""
Displays the state of the Happy network topology.
happy-state [-h --help] [-q --quiet] [-s --save <JSON_FILE>] [-g --graph]
[-l --logs] [-j --json] [-u --unlock] [-i --id] [-a --all]
-s --save Saves the current network topology state in a JSON file.
-g --graph Generates a network topology graph.
-l --logs Display Happy run-time logs. Run in a separate terminal
window to observe logs while using Happy.
-j --json Display the current state in JSON format.
-u --unlock Force unlock the Happy state file (~/.happy_state.json).
-i --id Displays all known state IDs.
-a --all Displays the network topology state for all known states.
Examples:
$ happy-state
Displays the current network topology state.
$ happy-state -s mystate.json
Saves the current network topology state in mystate.json.
$ happy-state -a -l
Displays Happy run-time logs for all known states.
return:
0 success
1 fail
"""
def __init__(self, opts=options):
State.__init__(self)
self.quiet = opts["quiet"]
self.save = opts["save"]
self.graph = opts["graph"]
self.log = opts["log"]
self.json = opts["json"]
self.unlock_state = opts["unlock"]
self.show_id = opts["id"]
self.all = opts["all"]
def __pre_check(self):
pass
def __print_data_state(self):
if self.quiet or self.graph or self.save or self.log or self.json or \
self.unlock_state or self.show_id:
return
self.__print_own_state()
if self.all:
states = self.__get_state_ids()
this_state = self.getStateId()
og_state = os.environ.get(self.state_environ, None)
for state in states:
if state == this_state:
continue
os.environ[self.state_environ] = state
cmd = HappyState(option())
cmd.run()
if og_state is None:
os.environ.pop(self.state_environ)
else:
os.environ[self.state_environ] = og_state
def __print_own_state(self):
print
print "State Name: ",
print self.getStateId()
print
options = happy.HappyNetworkStatus.option()
options["quiet"] = self.quiet
nets = happy.HappyNetworkStatus.HappyNetworkStatus(options)
nets.run()
print
options = happy.HappyNodeStatus.option()
options["quiet"] = self.quiet
nodes = happy.HappyNodeStatus.HappyNodeStatus(options)
nodes.run()
print
def __print_json_state(self):
if self.json:
json_data = json.dumps(self.state, sort_keys=True, indent=4)
print
print json_data
print
def __save_state(self):
if self.save is None:
return
if self.save.split(".")[-1] != "json":
self.save = self.save + ".json"
try:
json_data = json.dumps(self.state, sort_keys=True, indent=4)
except Exception:
print "Failed to save state file: %s" % (self.save)
self.logger.error("calls self.exit()")
self.exit()
with open(self.save, 'w') as jfile:
jfile.write(json_data)
def __graph_state(self):
if self.graph is None:
return
if not has_networkx:
emsg = "Cannot generate graph. Localhost is missing networkx libraries."
self.logger.warning("[localhost] HappyState: %s" % (emsg))
print hyellow(emsg)
extra_msg = "Try, apt-get install python-networkx"
print hyellow(extra_msg)
return
if not has_matplotlib:
emsg = "Cannot generate graph. Localhost is missing matplotlib libraries."
self.logger.warning("[localhost] HappyState: %s" % (emsg))
print hyellow(emsg)
extra_msg = "Try, apt-get install python-matplotlib"
print hyellow(extra_msg)
return
G = nx.Graph()
node_points = []
node_colors = []
node_sizes = []
node_labels = {}
for node_id in self.getNodeIds():
node_points.append(node_id)
node_colors.append("#1FBCE0")
node_sizes.append(3000)
node_labels[node_id] = node_id
edge_points = []
edge_weights = []
edge_colors = []
for network_id in self.getNetworkIds():
color = None
weight = None
network_type = self.getNetworkType(network_id)
if network_type == "thread":
color = "green"
weight = 1
if network_type == "wifi":
color = "blue"
weight = 1.5
if network_type == "wan":
color = "red"
weigth = 2
if network_type == "cellular":
color = "black"
weigth = 2
if network_type == "out-of-band":
color = "yellow"
weigth = 2
if color is None or weight is None:
continue
nodes = []
for interface_id in self.getNetworkLinkIds(network_id):
nodes.append(self.getLinkNode(interface_id))
# cartesian product assuming everybody is connected
points = list(itertools.product(nodes, nodes))
edge_points += points
edge_colors += [color] * len(points)
edge_weights += [weight] * len(points)
G.add_nodes_from(node_points)
G.add_edges_from(edge_points)
pos = nx.shell_layout(G)
pos = nx.spring_layout(G)
nx.draw_networkx_nodes(G, pos, nodelist=node_points, node_color=node_colors, node_size=node_sizes)
nx.draw_networkx_edges(G, pos, edgelist=edge_points, width=edge_weights, edge_color=edge_colors)
nx.draw_networkx_labels(G, pos, node_labels, font_size=14)
plt.axis('off')
if self.save:
if self.save.split(".")[-1] != "png":
self.save = self.save + ".png"
else:
plt.show()
def __post_check(self):
pass
def __try_unlock(self):
if self.unlock_state:
lock_manager = self.getStateLockManager()
lock_manager.break_lock()
def __get_state_ids(self):
states = []
files = os.listdir(os.path.dirname(os.path.expanduser(self.state_file_prefix)))
for f in files:
if f.endswith(self.state_file_suffix):
s = f.rstrip(self.state_file_suffix)
s = s.lstrip(".")
states.append(s)
return states
def __show_state_id(self):
if self.show_id:
states = self.__get_state_ids()
this_state = self.getStateId()
print this_state + " <"
for s in states:
if s == this_state:
continue
print s
def __get_log_path(self):
file_path = None
if "handlers" in self.log_conf.keys():
if "file" in self.log_conf["handlers"].keys():
if "filename" in self.log_conf["handlers"]["file"].keys():
file_path = self.log_conf["handlers"]["file"]["filename"]
return file_path
def __show_logs(self):
if not self.log:
return
if self.all:
states = self.__get_state_ids()
og_state = os.environ.get(self.state_environ, None)
logs = ['tail']
for state in states:
os.environ[self.state_environ] = state
hs = HappyState(option())
log_file = hs.__get_log_path()
if log_file:
logs.append(log_file)
if og_state is None:
os.environ.pop(self.state_environ)
else:
os.environ[self.state_environ] = og_state
cmd = ' -f '.join(logs)
else:
file_path = self.__get_log_path()
if file_path is None:
emsg = "Happy aggregated logs file is unknown."
self.logger.warning("[localhost] HappyState: %s" % (emsg))
print hyellow(emsg)
return
cmd = "tail -n 100 -f " + file_path
print hgreen("Happy Runtime Logs. Press <Ctrl-C> to exit.")
os.system(cmd)
def run(self):
self.__pre_check()
self.__print_data_state()
self.__print_json_state()
self.__try_unlock()
self.__show_state_id()
self.__save_state()
self.__graph_state()
self.__show_logs()
self.__post_check()
```
#### File: happy/happy/State.py
```python
import logging
from happy.utils.IP import IP
from happy.Driver import Driver
class State(Driver):
def __init__(self):
Driver.__init__(self)
if not bool(self.isp_state):
self.isp_state = {}
if not bool(self.state):
self.state = {}
def isStateEmpty(self, state=None):
state = self.getState(state)
if self.getNodeIds():
return False
if self.getNetworkIds():
return False
if self.getLinkIds():
return False
return True
def getState(self, state=None):
if state is None:
state = self.state
return state
def getIspState(self, state=None):
if state is None:
state = self.isp_state
return state
def getNetNS(self, state=None):
state = self.getState(state)
if "netns" not in state.keys():
state["netns"] = {}
return state["netns"]
def getNetNSIds(self, state=None):
netns = self.getNetNS(state)
ids = netns.keys()
ids.sort()
return ids
def getIdentifiers(self, state=None):
state = self.getState(state)
if "identifiers" not in state.keys():
state["identifiers"] = {}
return state["identifiers"]
def getIdentifierByNodeId(self, node_id, state=None):
state = self.getState(state)
if "netns" not in state.keys():
return None
if node_id not in state["netns"]:
return None
return state["netns"][node_id]
def getNodes(self, state=None):
state = self.getState(state)
if "node" not in state.keys():
state["node"] = {}
return state["node"]
def getNodeIds(self, state=None):
state = self.getState(state)
if "node" not in state.keys():
state["node"] = {}
ids = state["node"].keys()
ids.sort()
return ids
def getNetworks(self, state=None):
state = self.getState(state)
if "network" not in state.keys():
state["network"] = {}
return state["network"]
def getNetworkIds(self, state=None):
state = self.getState(state)
if "network" not in state.keys():
state["network"] = {}
ids = state["network"].keys()
ids.sort()
return ids
def getLinks(self, state=None):
state = self.getState(state)
if "link" not in state.keys():
state["link"] = {}
return state["link"]
def getLinkIds(self, state=None):
state = self.getState(state)
if "link" not in state.keys():
state["link"] = {}
ids = state["link"].keys()
ids.sort()
return ids
def getGlobal(self, state=None):
state = self.getState(state)
if "global" not in state.keys():
state["global"] = {}
return state["global"]
def getGlobalIsp(self, state=None):
state = self.getIspState(state)
if "global_isp" not in state.keys():
state["global_isp"] = {}
return state["global_isp"]
# Retrieve Node interface information
def getNode(self, node_id=None, state=None):
if node_id is None:
node_id = self.node_id
if node_id not in self.getNodeIds(state):
return {}
nodes = self.getNodes(state)
return nodes[node_id]
def getNodeInterfaces(self, node_id=None, state=None):
node_record = self.getNode(node_id, state)
if "interface" not in node_record.keys():
return {}
return node_record["interface"]
def getNodeInterfaceIds(self, node_id=None, state=None):
node_interfaces = self.getNodeInterfaces(node_id, state)
return node_interfaces.keys()
def getNodeInterface(self, interface_id, node_id=None, state=None):
node_interfaces = self.getNodeInterfaces(node_id, state)
if interface_id not in node_interfaces.keys():
return {}
return node_interfaces[interface_id]
def getNodeInterfaceType(self, interface_id, node_id=None, state=None):
node_interface = self.getNodeInterface(interface_id, node_id, state)
if node_interface == {}:
return ""
return node_interface["type"]
def getNodeInterfaceAddresses(self, interface_id, node_id=None, state=None):
node_interface = self.getNodeInterface(interface_id, node_id, state)
if node_interface == {}:
return []
return node_interface["ip"].keys()
def getNodeInterfaceAddressInfo(self, interface_id, addr, node_id=None, state=None):
node_interface = self.getNodeInterface(interface_id, node_id, state)
if node_interface == {}:
return {}
if addr not in self.getNodeInterfaceAddresses(interface_id, node_id, state):
return {}
return node_interface["ip"][addr]
def getNodeInterfaceAddressMask(self, interface_id, addr, node_id=None, state=None):
node_address_info = self.getNodeInterfaceAddressInfo(interface_id, addr, node_id, state)
if node_address_info == {}:
return None
return node_address_info["mask"]
def getNodeInterfaceLinkId(self, interface_id, node_id=None, state=None):
node_interface = self.getNodeInterface(interface_id, node_id, state)
if node_interface == {}:
return []
return node_interface["link"]
def getNodeAddresses(self, node_id=None, state=None):
node_interfaces = self.getNodeInterfaceIds(node_id, state)
addrs = []
for interface_id in node_interfaces:
addr = self.getNodeInterfaceAddresses(interface_id, node_id, state)
addrs += addr
return addrs
def getNodePublicIPv4Address(self, node_id=None, state=None):
node_public_interfaces = self.getNodePublicInterfaces(node_id, state)
for interface_id in node_public_interfaces:
addresses = self.getNodeInterfaceAddresses(interface_id, node_id, state)
for addr in addresses:
if IP.isIpv4(addr):
return addr
return None
def getNodePublicInterfaces(self, node_id=None, state=None):
public_interfaces = []
node_interfaces = self.getNodeInterfaces(node_id, state)
for interface_id in node_interfaces.keys():
interface_type = self.getNodeInterfaceType(interface_id, node_id, state)
if interface_type == self.network_type["wan"]:
public_interfaces.append(interface_id)
return public_interfaces
def getNodeIdFromAddress(self, addr, state=None):
for node_id in self.getNodeIds(state):
node_addresses = self.getNodeAddresses(node_id, state)
if addr in node_addresses:
return node_id
return None
def getNodeInterfaceAddressMask(self, interface_id, addr, node_id=None, state=None):
interface_addresses = self.getNodeInterfaceAddresses(interface_id, node_id, state)
node_interface = self.getNodeInterface(interface_id, node_id, state)
if node_interface == {}:
return 0
if addr not in interface_addresses:
return 0
return node_interface["ip"][addr]["mask"]
def getNodeInterfacePrefixes(self, interface_id, node_id=None, state=None):
if node_id is None:
node_id = self.node_id
if node_id is None:
return []
if node_id not in self.getNodeIds(state):
return []
if interface_id not in self.getNodeInterfaceIds(node_id, state):
return []
prefixes = []
for addr in self.getNodeInterfaceAddresses(interface_id, node_id, state):
mask = self.getNodeInterfaceAddressMask(interface_id, addr, node_id, state)
prefix = IP.getPrefix(addr, mask)
prefixes.append(prefix)
return prefixes
def getNodeRoutes(self, node_id=None, state=None):
node_record = self.getNode(node_id, state)
if "route" not in node_record.keys():
return {}
return node_record["route"]
def getNodeRouteIds(self, node_id=None, state=None):
node_routes = self.getNodeRoutes(node_id, state)
return node_routes.keys()
def getNodeRoute(self, route_to, node_id=None, state=None):
node_routes = self.getNodeRoutes(node_id, state)
if route_to not in node_routes.keys():
return {}
return node_routes[route_to]
def getNodeLinkIds(self, node_id=None, state=None):
node_interfaces = self.getNodeInterfaces(node_id, state)
links = []
for interface_id in node_interfaces.keys():
links.append(node_interfaces[interface_id]["link"])
links.sort()
return links
def getNodeLinkFromInterface(self, interface_id, node_id=None, state=None):
for interface in self.getNodeInterfaceIds(node_id, state):
if interface == interface_id:
node_interface = self.getNodeInterface(interface_id, node_id, state)
return node_interface["link"]
return None
def getNodeInterfaceFromLink(self, link_id, node_id=None, state=None):
for interface_id in self.getNodeInterfaceIds(node_id, state):
node_interface = self.getNodeInterface(interface_id, node_id, state)
if node_interface["link"] == link_id:
return interface_id
return None
def getNodeTmuxSessionIds(self, node_id=None, state=None):
node_record = self.getNode(node_id, state)
if "tmux" not in node_record.keys():
return []
return node_record["tmux"].keys()
def getNodeTmuxSession(self, session_id, node_id=None, state=None):
node_record = self.getNode(node_id, state)
if "tmux" not in node_record.keys():
return {}
if session_id not in self.getNodeTmuxSessionIds(node_id, state):
return {}
return node_record["tmux"][session_id]
def getNodeTmuxSessionUser(self, session_id, node_id=None, state=None):
tmux_session = self.getNodeTmuxSession(session_id, node_id, state)
if tmux_session == {}:
return None
if "run_as_user" in tmux_session.keys():
return tmux_session["run_as_user"]
else:
return None
def getNodeProcesses(self, node_id=None, state=None):
node_record = self.getNode(node_id, state)
if "process" not in node_record.keys():
return {}
return node_record["process"]
def getNodeProcessIds(self, node_id=None, state=None):
node_processes = self.getNodeProcesses(node_id, state)
return node_processes.keys()
def getNodeProcess(self, tag, node_id=None, state=None):
node_processes = self.getNodeProcesses(node_id, state)
if tag not in node_processes.keys():
return {}
return node_processes[tag]
def getNodeType(self, node_id=None, state=None):
node_record = self.getNode(node_id, state)
if "type" not in node_record.keys():
return None
return node_record["type"]
def getNodeProcessPID(self, tag=None, node_id=None, state=None):
process_record = self.getNodeProcess(tag, node_id, state)
if "pid" not in process_record.keys():
return None
return process_record["pid"]
def getNodeProcessCreateTime(self, tag=None, node_id=None, state=None):
process_record = self.getNodeProcess(tag, node_id, state)
if "create_time" not in process_record.keys():
return None
return process_record["create_time"]
def getNodeProcessOutputFile(self, tag=None, node_id=None, state=None):
process_record = self.getNodeProcess(tag, node_id, state)
if "out" not in process_record.keys():
return None
return process_record["out"]
def getNodeProcessStraceFile(self, tag=None, node_id=None, state=None):
process_record = self.getNodeProcess(tag, node_id, state)
if "strace" not in process_record.keys():
return None
return process_record["strace"]
def getNodeProcessCommand(self, tag=None, node_id=None, state=None):
process_record = self.getNodeProcess(tag, node_id, state)
if "command" not in process_record.keys():
return None
return process_record["command"]
def getNodeNetNS(self, node_id=None, state=None):
node_record = self.getNode(node_id, state)
if "netns" not in node_record.keys():
return None
return node_record["netns"]
# Retrieve Node network information
def getNetwork(self, network_id=None, state=None):
if network_id is None:
network_id = self.network_id
if network_id not in self.getNetworkIds(state):
return {}
networks = self.getNetworks(state)
return networks[network_id]
def getNetworkNetNS(self, network_id=None, state=None):
network_record = self.getNetwork(network_id, state)
if "netns" not in network_record.keys():
return None
return network_record["netns"]
def getNodeNetworkIds(self, node_id=None, state=None):
ids = []
for link_id in self.getNodeLinkIds(node_id, state):
link_record = self.getLink(link_id, state)
if link_record != {}:
ids.append(link_record["network"])
ids.sort()
return ids
def getNodeInterfacesOnNetwork(self, network_id, node_id=None, state=None):
network_links = self.getNetworkLinkIds(network_id, state)
node_links = self.getNodeLinkIds(node_id, state)
common_links = set.intersection(set(network_links), set(node_links))
common_links = list(common_links)
node_interfaces = []
for interface_id in self.getNodeInterfaceIds(node_id, state):
node_interface = self.getNodeInterface(interface_id, node_id, state)
if node_interface["link"] in common_links:
node_interfaces.append(interface_id)
return node_interfaces
def getNodeAddressesOnNetwork(self, network_id, node_id=None, state=None):
addrs = []
interfaces = self.getNodeInterfacesOnNetwork(network_id, node_id, state)
for interface in interfaces:
a = self.getNodeInterfaceAddresses(interface, node_id, state)
addrs += a
addrs.sort(key=len, reverse=True)
return addrs
def getNodeAddressesOnPrefix(self, prefix, node_id=None, state=None):
addrs = self.getNodeAddresses(node_id, state)
res = []
for addr in addrs:
if IP.prefixMatchAddress(prefix, addr):
res.append(addr)
return res
def getNodeAddressesOnNetworkOnPrefix(self, network_id, prefix, node_id=None, state=None):
addrs_on_network = self.getNodeAddressesOnNetwork(network_id, node_id, state)
addrs_on_prefix = self.getNodeAddressesOnPrefix(prefix, node_id, state)
addrs = list(set(addrs_on_network).intersection(addrs_on_prefix))
return addrs
# Retrieve network interface information
def getNetworkNodesIds(self, network_id=None, state=None):
if network_id not in self.getNetworkIds(state):
return []
ids = []
for link_id in self.getNetworkLinkIds(network_id, state):
ids.append(self.getLinkNode(link_id, state))
ids.sort()
return ids
def getNetworkLinks(self, network_id=None, state=None):
network_record = self.getNetwork(network_id, state)
if "interface" not in network_record.keys():
return {}
return network_record["interface"]
def getNetworkLinkIds(self, network_id=None, state=None):
network_links = self.getNetworkLinks(network_id, state)
ids = network_links.keys()
ids.sort()
return ids
def getNetworkLink(self, interface_id, network_id=None, state=None):
network_links = self.getNetworkLinks(network_id, state)
if interface_id not in network_links.keys():
return {}
return network_links[interface_id]
def getNetworkType(self, network_id=None, state=None):
network_record = self.getNetwork(network_id, state)
if "type" not in network_record.keys():
return None
return network_record["type"]
def getNetworkState(self, network_id=None, state=None):
network_record = self.getNetwork(network_id, state)
if "state" not in network_record.keys():
return None
return network_record["state"]
def getNetworkPrefixRecords(self, network_id=None, state=None):
network_record = self.getNetwork(network_id, state)
if "prefix" not in network_record.keys():
return {}
return network_record["prefix"]
def getNetworkPrefixes(self, network_id=None, state=None):
network_record = self.getNetwork(network_id, state)
if "prefix" not in network_record.keys():
return []
return network_record["prefix"].keys()
def getNetworkPrefixMask(self, prefix, network_id=None, state=None):
network_prefixes = self.getNetworkPrefixRecords(network_id, state)
if prefix not in self.getNetworkPrefixes(network_id, state):
return None
if "mask" not in network_prefixes[prefix].keys():
return None
return network_prefixes[prefix]["mask"]
def getNetworkRoutes(self, network_id=None, state=None):
network_record = self.getNetwork(network_id, state)
if "route" not in network_record.keys():
return {}
return network_record["route"]
def getNetworkRouteIds(self, network_id=None, state=None):
network_record = self.getNetwork(network_id, state)
if "route" not in network_record.keys():
return []
return network_record["route"].keys()
def getNetworkRoute(self, route_to, network_id=None, state=None):
network_routes = self.getNetworkRoutes(network_id, state)
if route_to not in network_routes.keys():
return {}
return network_routes[route_to]
# Retrieve Link information
def getLink(self, link_id=None, state=None):
if link_id is None:
link_id = self.link_id
if link_id not in self.getLinkIds(state):
return {}
links = self.getLinks(state)
return links[link_id]
def getLinkNode(self, link_id=None, state=None):
link = self.getLink(link_id, state)
if "node" not in link.keys():
return None
return link["node"]
def getLinkNetwork(self, link_id=None, state=None):
link = self.getLink(link_id, state)
if "network" not in link.keys():
return None
return link["network"]
def getLinkType(self, link_id=None, state=None):
link = self.getLink(link_id, state)
if "type" not in link.keys():
return None
return link["type"]
def getLinkNumber(self, link_id=None, state=None):
link = self.getLink(link_id, state)
if "number" not in link.keys():
return None
return link["number"]
def getLinkTap(self, link_id=None, state=None):
link = self.getLink(link_id, state)
if "tap" not in link.keys():
return None
return link["tap"]
def getLinkNodeEnd(self, link_id=None, state=None):
link = self.getLink(link_id, state)
if "node_end" not in link.keys():
return None
return link["node_end"]
def getLinkNetworkEnd(self, link_id=None, state=None):
link = self.getLink(link_id, state)
if "network_end" not in link.keys():
return None
return link["network_end"]
def getInternet(self, state=None):
global_record = self.getGlobal(state)
if "internet" not in global_record.keys():
return {}
return global_record["internet"]
def getIsp(self, state=None):
global_isp_record = self.getGlobalIsp(state)
if "isp" not in global_isp_record.keys():
return {}
return global_isp_record["isp"]
def getDNS(self, state=None):
global_record = self.getGlobal(state)
if "DNS" not in global_record.keys():
return None
return global_record["DNS"]
def getInternetHostLinkId(self, isp_id, state=None):
internet_record = self.getInternet(state)
if isp_id in internet_record.keys() and "host_link" not in internet_record[isp_id]:
return None
return internet_record[isp_id]["host_link"]
def getInternetNodeLinkId(self, isp_id, state=None):
internet_record = self.getInternet(state)
if isp_id in internet_record.keys() and "node_link" not in internet_record[isp_id]:
return None
return internet_record[isp_id]["node_link"]
def getInternetNodeId(self, isp_id, state=None):
internet_record = self.getInternet(state)
if isp_id in internet_record.keys() and "node_id" not in internet_record[isp_id]:
return None
return internet_record[isp_id]["node_id"]
def getInternetIspAddr(self, isp_id, state=None):
internet_record = self.getInternet(state)
if isp_id in internet_record.keys() and "isp_addr" not in internet_record[isp_id]:
return None
return internet_record[isp_id]["isp_addr"]
def getInternetIspIndex(self, isp_id, state=None):
internet_record = self.getInternet(state)
if isp_id in internet_record.keys() and "isp_index" not in internet_record[isp_id]:
return None
return internet_record[isp_id]["isp_index"]
def getIspAvailable(self, state=None):
isp_record = self.getIsp(state)
available_ip_pool = filter(lambda s: not s["occupy"], isp_record)
return available_ip_pool
def getIspAvailableIndex(self, state=None):
isp_record = self.getIsp(state)
available_ip_pool = filter(lambda s: not s["occupy"], isp_record)
return int(available_ip_pool[0]['isp_index']) - 1
def getIspAddr(self, index, state=None):
isp_record = self.getIsp(state)
if index >= len(isp_record) or index < 0:
return None
if "isp_addr" not in isp_record[index].keys():
return None
return isp_record[index]["isp_addr"]
def getIspHostLinkId(self, index, state=None):
isp_record = self.getIsp(state)
if index > len(isp_record) or index < 0:
return None
if "isp_host_end" not in isp_record[index].keys():
return None
return isp_record[index]["isp_host_end"]
def getIspNodeLinkId(self, index, state=None):
isp_record = self.getIsp(state)
if index >= len(isp_record) or index < 0:
return None
if "isp_node_end" not in isp_record[index].keys():
return None
return isp_record[index]["isp_node_end"]
def getIspIndex(self, index, state=None):
isp_record = self.getIsp(state)
if index >= len(isp_record) or index < 0:
return None
if "isp_index" not in isp_record[index].keys():
return None
return isp_record[index]["isp_index"]
def setNodeProcess(self, process, tag, node_id=None, state=None):
node_record = self.getNode(node_id, state)
if "process" not in node_record.keys():
node_record["process"] = {}
node_record["process"][tag] = process
def setLink(self, link_id, link, state=None):
links = self.getLinks(state)
if links is not None:
links[link_id] = link
def setLinkNetworkNodeHw(self, link_id, network_id, node_id, hw_addr, state=None):
links = self.getLinks(state)
if links is not None:
if link_id not in links.keys():
links[link_id] = {}
links[link_id]["network"] = network_id
links[link_id]["node"] = node_id
links[link_id]["fix_hw_addr"] = hw_addr
def setNode(self, node_id, node, state=None):
nodes = self.getNodes(state)
if nodes is not None:
nodes[node_id] = node
def setNetwork(self, network_id, network, state=None):
networks = self.getNetworks(state)
if networks is not None:
networks[network_id] = network
def setNodeIpAddress(self, node_id, interface_id, ip_address, record, state=None):
node_interface = self.getNodeInterface(interface_id, node_id, state)
if node_interface is not None:
if "ip" not in node_interface.keys():
node_interface["ip"] = {}
node_interface["ip"][ip_address] = record
def setNodeTmux(self, node_id, session_id, record, state=None):
node_record = self.getNode(node_id, state)
if "tmux" not in node_record.keys():
node_record["tmux"] = {}
node_record["tmux"][session_id] = record
def setNodeInterface(self, node_id, interface_id, record, state=None):
node_record = self.getNode(node_id, state)
if node_record is not None:
node_record["interface"][interface_id] = record
def setNodeRoute(self, node_id, to, record, state=None):
node_record = self.getNode(node_id, state)
if node_record is not None:
if "route" not in node_record.keys():
node_record["route"] = {}
if ("via" in record.keys() and IP.isIpv6(record["via"])) or \
("prefix" in record.keys() and IP.isIpv6(record["prefix"])):
to = to + "_v6"
else:
to = to + "_v4"
node_record["route"][to] = record
def setNetworkState(self, network_id, network_state, state=None):
network_record = self.getNetwork(network_id, state)
if network_record is not None:
network_record["state"] = network_state
def setNetworkLink(self, network_id, link_id, record, state=None):
network_record = self.getNetwork(network_id, state)
if network_record is not None:
network_record["interface"][link_id] = record
def setNetworkRoute(self, network_id, to, record, state=None):
network_record = self.getNetwork(network_id, state)
if network_record is not None:
if "route" not in network_record.keys():
network_record["route"] = {}
if ("via" in record.keys() and IP.isIpv6(record["via"])) or \
("prefix" in record.keys() and IP.isIpv6(record["prefix"])):
to = to + "_v6"
else:
to = to + "_v4"
network_record["route"][to] = record
def setNetworkPrefix(self, network_id, prefix, record, state=None):
network_record = self.getNetwork(network_id, state)
if network_record is not None:
if "prefix" not in network_record.keys():
network_record["prefix"] = {}
network_record["prefix"][prefix] = record
def setGlobalInternet(self, record, state=None):
global_record = self.getGlobal(state)
global_record["internet"] = record
def setGlobalIsp(self, record, state=None):
global_isp_record = self.getGlobalIsp(state)
global_isp_record["isp"] = record
def setIspOccupancy(self, index, value, state=None):
isp_record = self.getIsp(state)
if index >= len(isp_record) or index < 0:
return None
isp_record[index]["occupy"] = value
def setGlobalDNS(self, record, state=None):
global_record = self.getGlobal(state)
global_record["DNS"] = record
def removeLink(self, link_id, state=None):
links = self.getLinks(state)
if link_id in self.getLinkIds(state):
del links[link_id]
def removeNode(self, node_id, state=None):
nodes = self.getNodes(state)
if node_id in self.getNodeIds(state):
del nodes[node_id]
def removeNodeNetNsMap(self, node_id, state=None):
netns = self.getNetNS(state)
if node_id in self.getNetNSIds(state):
del netns[node_id]
def removeIdentifiersMap(self, node_id, state=None):
identifiers = self.getIdentifiers(state)
identifier = self.getIdentifierByNodeId(node_id)
if identifier in identifiers:
del identifiers[identifier]
def removeNetwork(self, network_id, state=None):
networks = self.getNetworks(state)
if network_id in self.getNetworkIds(state):
del networks[network_id]
def removeNodeInterface(self, node_id, interface_id, state=None):
node_interfaces = self.getNodeInterfaces(node_id, state)
if interface_id in self.getNodeInterfaceIds(node_id, state):
del node_interfaces[interface_id]
def removeNodeTmux(self, node_id, session_id, state=None):
node_record = self.getNode(node_id, state)
if "tmux" in node_record.keys():
if session_id in node_record["tmux"].keys():
del node_record["tmux"][session_id]
def removeNodeRoute(self, node_id, to, state=None):
node_record = self.getNode(node_id, state)
if "route" in node_record.keys():
if to in node_record["route"].keys():
del node_record["route"][to]
def removeNodeInterfaceAddress(self, node_id, interface_id, ip_address, state=None):
node_interface = self.getNodeInterface(interface_id, node_id, state)
if ip_address in self.getNodeInterfaceAddresses(interface_id, node_id, state):
del node_interface["ip"][ip_address]
def removeNetworkLink(self, network_id, link_id, state=None):
network_links = self.getNetworkLinks(network_id, state)
if link_id in self.getNetworkLinkIds(network_id, state):
del network_links[link_id]
def removeNetworkRoute(self, network_id, to, state=None):
network_record = self.getNetwork(network_id, state)
if "route" in network_record.keys():
if to in network_record["route"].keys():
del network_record["route"][to]
def removeNetworkPrefix(self, network_id, prefix, state=None):
network_record = self.getNetwork(network_id, state)
if "prefix" in network_record.keys():
if prefix in network_record["prefix"].keys():
del network_record["prefix"][prefix]
def removeGlobalInternet(self, isp_id, state=None):
global_record = self.getGlobal(state)
if "internet" in global_record.keys() and isp_id in global_record['internet'].keys():
del global_record["internet"][isp_id]
if not bool(global_record['internet']):
del global_record["internet"]
def removeGlobalIsp(self, state=None):
global_record = self.getGlobalIsp(state)
if "isp" in global_record.keys():
del global_record["isp"]
def removeGlobalDNS(self, state=None):
global_record = self.getGlobal(state)
if "DNS" in global_record.keys():
del global_record["DNS"]
```
|
{
"source": "jenniexie/openweave-core",
"score": 2
}
|
#### File: service/wdmNext/test_weave_wdm_next_service_update_faults.py
```python
import getopt
import sys
import unittest
import itertools
import copy
from happy.Utils import *
from weave_wdm_next_test_service_base import weave_wdm_next_test_service_base
import WeaveUtilities
gScenarios = [ "UpdateResponseDelay", "UpdateResponseTimeout", "UpdateResponseBusyAndFailed", "CondUpdateBadVersion",
"UpdateRequestSendError", "UpdateRequestSendErrorInline", "UpdateRequestBadProfile", "UpdateRequestBadProfileBeforeSub",
"PartialUpdateRequestSendError", "PartialUpdateRequestSendErrorInline", "DiscardUpdatesOnNoResponse", "DiscardUpdatesOnStatusReport",
"UpdateResponseMultipleTimeout", "UpdateResponseMultipleTimeoutBeforeSub",
"FailBindingBeforeSub", "PathStoreFullOnSendError", "PathStoreFullOnSendErrorInline"]
gConditionalities = [ "Conditional", "Unconditional", "Mixed" ]
gFaultopts = WeaveUtilities.FaultInjectionOptions()
gOpts = { "conditionality" : None,
"scenario" : None
}
class test_weave_wdm_next_service_update_faults(weave_wdm_next_test_service_base):
def configure_test(self, scenario, conditionality):
wdm_next_args = self.wdm_next_args
# By default, empty the arrays of strings to look for in the logs; rely on the default check for "Good Iteration"
wdm_next_args['client_log_check'] = []
wdm_next_args['server_log_check'] = []
# Usually we run 3 iterations; the first one or two can fail because of the fault being injected.
# The third one should always succeed; the second one fails only because a fault that
# was supposed to be triggered at the end of the first one ends up hitting the beginning
# of the second one instead.
# But since running tests against the service is very slow, just run 1 iteration for now and check
# that the failure is handled; to properly test the success-on-retry we need to improve the base class so
# we can specify a different log_check per iteration.
wdm_next_args['test_client_iterations'] = 1
fault_config = None
if scenario == "UpdateResponseDelay":
fault_config = "Weave_WDMDelayUpdateResponse_s0_f1"
# Check that everything succeeds as usual
client_log_check = copy.copy(self.happy_path_log_check)
# Check that the notifications received before the StatusReport cause the PotentialDataLoss flag to be set;
# it is then cleared by the StatusReport.
client_log_check += [("Potential data loss set for traitDataHandle", 2),
("Potential data loss cleared for traitDataHandle", 2)]
if scenario == "UpdateResponseTimeout":
fault_config = "Weave_WDMUpdateRequestTimeout_s0_f1"
if conditionality == "Conditional":
client_log_check = [
# The UpdateRequest timeout:
("Update: path failed: Weave Error 4050: Timeout", 2),
# But the service has received it and processed it;
# The first notification causes a PotentialDataLoss, and purges the udpate on the trait
("Potential data loss set for traitDataHandle", 1),
("MarkFailedPendingPaths", 1),
("Update: path failed: Weave Error 4176: The conditional update of a WDM path failed for a version mismatch", 1),
# The other path is retried, but it fails with VersionMismatch in the StatusReport
("Update: path failed: Weave Error 4044:.*:37", 1)]
elif conditionality == "Unconditional":
client_log_check = [
# The UpdateRequest timeout:
("Update: path failed: Weave Error 4050: Timeout", 2),
# The notifications are received and they mark data loss
("Potential data loss set for traitDataHandle", 2),
# There is no purging
("MarkFailedPendingPaths", 0),
# Finally the update succeeds:
('Update: path result: success', 2)]
elif conditionality == "Mixed":
client_log_check = [
# The UpdateRequest timeout:
("Update: path failed: Weave Error 4050: Timeout", 2),
# But the service has received it and processed it;
# The notification on the conditional trait causes its update to be purged and the subscription to go down
("Potential data loss set for traitDataHandle", 1),
("MarkFailedPendingPaths", 1),
# The unconditional update succeeds on retry
('Update: path result: success', 1)]
if scenario == "UpdateResponseMultipleTimeout":
num_failures = 2
fault_config = "Weave_WDMUpdateRequestTimeout_s0_f" + str(num_failures)
if conditionality == "Conditional":
client_log_check = [
# The UpdateRequest timeout: there are 3 because it retries 2 times, but one
# of the two paths gets purged at the first notification
("Update: path failed: Weave Error 4050: Timeout", 2*num_failures -1),
# The service receives the message, and the first notification purges one path and causes the subscription
# to go down.
# Everything else depends on how quickly the subscription is re-established (service will send kStatus_Busy a couple of times)
# and how the notifications interleave with the update attempts.
('Update: path result: success', 0)]
elif conditionality == "Unconditional":
client_log_check = [
# The UpdateRequest timeout:
("Update: path failed: Weave Error 4050: Timeout", 2*num_failures),
# We receive notifications back because the the requests receive the service:
("Potential data loss set for traitDataHandle", 2),
("MarkFailedPendingPaths", 0),
("The conditional update of a WDM path failed for a version mismatch", 0),
# Finally the update succeeds:
('Update: path result: success', 2)]
else:
client_log_check = [
# only the unconditional one goes through eventually
('Update: path result: success', 1)]
if scenario == "UpdateResponseMultipleTimeoutBeforeSub":
num_failures = 2
fault_config = "Weave_WDMUpdateRequestTimeout_s0_f" + str(num_failures)
wdm_next_args['client_update_timing'] = "BeforeSub"
if conditionality == "Unconditional":
client_log_check = [
# The UpdateRequest timeout:
("Update: path failed: Weave Error 4050: Timeout", 2*num_failures),
# It's hard to guess what happens with multiple timeouts and the
# subscription being established.
# Finally the update succeeds:
('Update: path result: success', 2)]
else:
return False
if scenario == "UpdateRequestSendError":
fault_config = "Weave_WDMUpdateRequestSendErrorAsync_s0_f1"
# In this case, there is no significant difference between Conditional and Unconditional
client_log_check = [
# The UpdateRequest SendError (note: the responder does not receive the request):
("Update: path failed: Weave Error 4099: Message not acknowledged", 2),
# The update succeeds after retrying:
('Update: path result: success', 2),
# when resubscribing, the notification does not trigger PotentialDataLoss:
("Potential data loss set for traitDataHandle", 0),
# After the notification, no pending paths are purged
("MarkFailedPendingPaths", 0),
("The conditional update of a WDM path failed for a version mismatch", 0)]
if scenario == "PathStoreFullOnSendError":
# Inject a SendError, and a PathStoreFull late enough that it hits when the SendError
# is processed.
fault_config = "Weave_WDMUpdateRequestSendErrorAsync_s0_f1:Weave_WDMPathStoreFull_s5_f1"
# In this case, there is no significant difference between Conditional and Unconditional
client_log_check = [
# The UpdateRequest SendError (note: the responder does not receive the request):
("Update: path failed: Weave Error 4099: Message not acknowledged.*will retry", 2),
# The PATH_STORE_FULL errors
("Update: path failed: Weave Error 4181: A WDM TraitPath store is full.*will not retry", 2),
# The update never succeeds
('Update: path result: success', 0)]
if scenario == "PathStoreFullOnSendErrorInline":
# Inject a SendError inline, and a PathStoreFull late enough that it hits when the SendError
# is processed.
fault_config = "Weave_WDMUpdateRequestSendErrorInline_s0_f1:Weave_WDMPathStoreFull_s5_f1"
# In this case, there is no significant difference between Conditional and Unconditional
client_log_check = [
# The UpdateRequest SendError (note: the responder does not receive the request):
("Update: path failed: Inet Error.*will retry", 2),
# The PATH_STORE_FULL errors
("Update: path failed: Weave Error 4181: A WDM TraitPath store is full.*will not retry", 2),
# The update never succeeds
('Update: path result: success', 0)]
if scenario == "UpdateRequestSendErrorInline":
fault_config = "Weave_WDMUpdateRequestSendErrorInline_s0_f1"
# In this case, there is no significant difference between Conditional and Unconditional
client_log_check = [
# The UpdateRequest SendError (note: the responder does not receive the request):
("Update: path failed: Inet Error", 2),
# The update succeeds after retrying:
('Update: path result: success', 2),
# when resubscribing, the notification does not trigger PotentialDataLoss:
("Potential data loss set for traitDataHandle", 0),
# After the notification, no pending paths are purged
("MarkFailedPendingPaths", 0),
("The conditional update of a WDM path failed for a version mismatch", 0)]
if scenario == "DiscardUpdatesOnNoResponse":
# Inject a SendError and make the initiator discard the updates
wdm_next_args['client_update_mutation'] = "FewDictionaryItems"
wdm_next_args['client_update_discard_on_error'] = True
fault_config = "Weave_WDMUpdateRequestSendErrorAsync_s0_f1"
# In this case, there is no significant difference between Conditional and WholeDictionary
client_log_check = [
# The UpdateRequest SendError (note: the responder does not receive the request):
# Note that there is only one of these instead of 5 (4 paths in one trait and 1 in the other)
# because the application discards the paths at the first error.
("Update: path failed: Weave Error 4099: Message not acknowledged", 1),
# Also, AbortUpdates sees all 5 paths to discard because the one triggering the notification
# would have been retried.
("Discarded 0 pending and 5 inProgress paths", 1),
# The update is not retried
('Update: path result: success', 0),
# when resubscribing, the notification does not trigger PotentialDataLoss:
("Potential data loss set for traitDataHandle", 0),
# After the notification, no pending paths are purged
("MarkFailedPendingPaths", 0)]
if scenario == "DiscardUpdatesOnStatusReport":
# Inject a SendError and make the initiator discard the updates
wdm_next_args['client_update_mutation'] = "FewDictionaryItems"
wdm_next_args['client_update_discard_on_error'] = True
fault_config = "Weave_WDMSendUpdateBadVersion_s0_f1"
if conditionality == "Conditional":
client_log_check = [
# Note that there is one of these instead of 5 (4 paths in one trait and 1 in the other)
# because the application discards the paths at the first error.
("Update: path failed: Weave Error 4044: Status Report received from peer, \[ WDM\(0000000B\):37 \]", 1),
# Also, AbortUpdates only sees 4 paths to discard because the one triggering the notification
# has been deleted already (it was not going to be retried).
("Discarded 0 pending and 4 inProgress paths", 1),
# The update is not retried
('Update: path result: success', 0),
# when resubscribing, the notification does not trigger PotentialDataLoss:
("Potential data loss set for traitDataHandle", 0),
# No resubscription from the UpdateResponse handler
("UpdateResponse: triggering resubscription", 0),
# After the notification, no pending paths are purged
("MarkFailedPendingPaths", 0)]
elif conditionality == "Mixed":
client_log_check = [
# Like above, only the first trait is conditional and fails...
("Update: path failed: Weave Error 4044: Status Report received from peer, \[ WDM\(0000000B\):37 \]", 1),
# Also, AbortUpdates only sees 4 paths to discard because the one triggering the notification
# has been deleted already (it was not going to be retried).
("Discarded 0 pending and 4 inProgress paths", 1),
# ... but the application calls DiscardUpdates, and so it does not get notified of the success either.
('Update: path result: success', 0),
# No resubscription from the UpdateResponse handler
("UpdateResponse: triggering resubscription", 0),
# when resubscribing, the notification does not trigger PotentialDataLoss:
("Potential data loss set for traitDataHandle", 0),
# After the notification, no pending paths are purged
("MarkFailedPendingPaths", 0)]
else:
# Can't inject a bad version in a scenarion without conditional updates
return False
if scenario == "PartialUpdateRequestSendError":
fault_config = "Weave_WDMUpdateRequestSendErrorAsync_s0_f1"
wdm_next_args['client_update_mutation'] = "WholeLargeDictionary"
wdm_next_args['client_update_num_traits'] = 1
# In this case, there is no significant difference between Conditional and Unconditional
# One trait with two paths: a leaf and a huge dictionary.
client_log_check = [
# The UpdateRequest SendError (note: the responder does not receive the request):
("Update: path failed: Weave Error 4099: Message not acknowledged", 2),
# The update succeeds after retrying:
('Update: path result: success', 2),
# The notification does not trigger PotentialDataLoss:
("Potential data loss set for traitDataHandle", 0),
("MarkFailedPendingPaths", 0),
("The conditional update of a WDM path failed for a version mismatch", 0)]
if scenario == "PartialUpdateRequestSendErrorInline":
fault_config = "Weave_WDMUpdateRequestSendErrorInline_s0_f1"
wdm_next_args['client_update_mutation'] = "WholeLargeDictionary"
wdm_next_args['client_update_num_traits'] = 1
# In this case, there is no significant difference between Conditional and Unconditional
# One trait with two paths: a leaf and a huge dictionary.
client_log_check = [
# The UpdateRequest SendError (note: the responder does not receive the request):
("Update: path failed: Inet Error", 2),
# The update succeeds after retrying:
('Update: path result: success', 2),
# The notification does not trigger PotentialDataLoss:
("Potential data loss set for traitDataHandle", 0),
("MarkFailedPendingPaths", 0),
("The conditional update of a WDM path failed for a version mismatch", 0)]
if scenario == "CondUpdateBadVersion":
fault_config = "Weave_WDMSendUpdateBadVersion_s0_f1"
if conditionality == "Conditional":
client_log_check = [
# Status report "Multiple failures"
("Received StatusReport .*WDM.0000000B.:43", 1),
# Both paths fail (even if the second trait has a good version - the service should really treat them separately)
# and the application is notified:
("Update: path failed: Weave Error 4044: Status Report received from peer.*WDM.0000000B.:37", 2),
# The UpdateResponse handler decides to resubscribe
("UpdateResponse: triggering resubscription", 1),
# In this case there are no notification while paths are pending/in-progress:
("Potential data loss set for traitDataHandle", 0)]
elif conditionality == "Mixed":
client_log_check = [
# Status report "Multiple failures"
("Received StatusReport .*WDM.0000000B.:43", 1),
# The conditional path fails and the application is notified:
("Update: path failed: Weave Error 4044: Status Report received from peer.*WDM.0000000B.:37", 1),
# The unconditional path succeeds:
('Update: path result: success', 1),
# The UpdateResponse handler decides to resubscribe because of the failed conditional update:
("UpdateResponse: triggering resubscription", 1),
# In this case there are no notification while paths are pending/in-progress:
("Potential data loss set for traitDataHandle", 0)]
else:
# Can't inject a bad version in a scenarion without conditional updates
return False
if scenario == "UpdateResponseBusyAndFailed":
# The first fault makes the service reply with an InternalError status.
# Then, the second fault overrides the second StatusElement with a Busy code
fault_config = "Weave_WDMUpdateRequestBadProfile_s0_f1:Weave_WDMUpdateResponseBusy_s1_f1"
# Weave holds on to the metadata of the busy one and retries the update.
client_log_check = [
# Status report "Internal error"
("Received StatusReport.*Common.*Internal error", 1),
# The paths fail and the application is notified:
("Update: path failed: Weave Error 4044: Status Report received from peer, .*Internal error", 1),
("Update: path failed: Weave Error 4044: Status Report received from peer, .*Sender busy", 1),
# The update that failed with BUSY is tried again and it succeeds
('Update: path result: success', 1),
# There are no notification while paths are pending/in-progress:
("Potential data loss set for traitDataHandle", 0)
]
if scenario == "UpdateRequestBadProfile":
fault_config = "Weave_WDMUpdateRequestBadProfile_s0_f1"
# This fault makes the service reply with an InternalError status.
# Internal Error means the update should not be retried, and so there is no success.
client_log_check = [
# Status report "Internal error"
("Received StatusReport.*Common.*Internal error", 1),
# The paths fail and the application is notified:
("Update: path failed: Weave Error 4044: Status Report received from peer, .*Internal error", 2),
# The update is not tried again because Internal Error is fatal
('Update: path result: success', 0),
# There are no notification while paths are pending/in-progress:
("Potential data loss set for traitDataHandle", 0)
]
if scenario == "UpdateRequestBadProfileBeforeSub":
fault_config = "Weave_WDMUpdateRequestBadProfile_s0_f1"
# This fault makes the service reply with an InternalError status.
# Internal Error means the update should not be retried, and so there is no success.
wdm_next_args['client_update_timing'] = "BeforeSub"
if conditionality == "Unconditional":
client_log_check = [
("Received StatusReport.*Common.*Internal error", 1),
('Update: path result: success', 0),
]
else:
return False
if scenario == "FailBindingBeforeSub":
fault_config = "Weave_CASEKeyConfirm_s0_f2"
wdm_next_args['client_update_timing'] = "BeforeSub"
if conditionality == "Unconditional":
client_log_check = [
('Update: path result: success', 2),
]
else:
return False
wdm_next_args['test_tag'] = self.base_test_tag + "_" + str(self.num_tests) + "_" + scenario + "_" + conditionality + "_" + fault_config
print wdm_next_args['test_tag']
wdm_next_args['client_faults'] = fault_config
wdm_next_args['client_update_conditionality'] = conditionality
# In all cases, at the end the application should be notified that there are
# no more pending updates.
client_log_check.append(('Update: no more pending updates', 1))
wdm_next_args['client_log_check'] = client_log_check
return True
def get_default_options(self):
wdm_next_args = {}
wdm_next_args['wdm_option'] = "mutual_subscribe"
wdm_next_args['final_client_status'] = 0
wdm_next_args['enable_client_flip'] = 1
wdm_next_args['test_client_iterations'] = 1
wdm_next_args['test_client_delay'] = 4000
wdm_next_args['timer_client_period'] = 4000
wdm_next_args['client_clear_state_between_iterations'] = False
wdm_next_args['test_client_case'] = 10 # kTestCase_TestUpdatableTraits
wdm_next_args['total_client_count'] = 1
wdm_next_args['enable_retry'] = True
wdm_next_args['client_update_mutation'] = "OneLeaf"
wdm_next_args['client_update_conditionality'] = gOpts["conditionality"]
wdm_next_args['client_update_num_traits'] = 2
wdm_next_args['client_update_num_mutations'] = 1
wdm_next_args['test_case_name'] = ['WDM Update sequence for fault-injection']
self.base_test_tag = 'test_weave_wdm_next_service_update_faults'
wdm_next_args['test_tag'] = self.base_test_tag
wdm_next_args['client_faults'] = None
wdm_next_args['server_faults'] = None
return wdm_next_args
def test_weave_wdm_next_service_update_faults(self):
self.happy_path_log_check = [('Mutual: Good Iteration', 1),
('Update: path result: success', 2),
('Update: no more pending updates', 1),
('Update: path failed', 0),
('Need to resubscribe', 0)]
if not (gOpts["conditionality"] or gOpts["scenario"]):
# Run a clean sequence once; usually we do this to profile it and know which faults
# to inject. In this case it's just to know that the happy path works: we only
# execute a few specific fault handling cases, for performance reasons.
# Also, send a "WholeDictionary" update so we reset the test traits to something
# normal.
self.wdm_next_args = self.get_default_options()
wdm_next_args = self.wdm_next_args
wdm_next_args['client_log_check'] = self.happy_path_log_check
wdm_next_args['client_update_mutation'] = "WholeDictionary"
node = 'client'
print 'test file: ' + self.__class__.__name__
print "weave-wdm-next test update with faults"
super(test_weave_wdm_next_service_update_faults, self).weave_wdm_next_test_service_base(wdm_next_args)
self.num_tests = 0
fault_configs = []
scenarios = [ gOpts["scenario"] ]
conditionalities = [ gOpts["conditionality"] ]
if (conditionalities[0] == None):
conditionalities = gConditionalities
if (scenarios[0] == None):
scenarios = gScenarios
print "scenarios: " + str(scenarios)
print "conditionalities: " + str(conditionalities)
results = []
num_failures = 0
for (scenario, conditionality) in itertools.product(scenarios, conditionalities):
# restore defaults, and then cofigure for this particular test
wdm_next_args = self.get_default_options()
self.wdm_next_args = wdm_next_args
if not self.configure_test(scenario, conditionality):
continue
print "Testing: " + scenario + " " + conditionality
self.assertTrue(len(self.wdm_next_args["client_log_check"]) > 0, "will not run a test without log checks")
result = "Success"
try:
super(test_weave_wdm_next_service_update_faults, self).weave_wdm_next_test_service_base(wdm_next_args)
except:
result = "Failure"
num_failures = num_failures + 1
pass
results.append(result + " " + scenario + " " + conditionality)
print results[-1]
self.num_tests += 1
print "Executed " + str(self.num_tests) + " tests; " + str(num_failures) + " failures"
if (results):
print "\n".join(results)
if __name__ == "__main__":
help_str = """usage:
--help Print this usage info and exit
--scenario { """ + """, """.join(gScenarios) + """ } (default: all of them)
--conditionality { """ + """, """.join(gConditionalities) + """ } (default: all of them)
"""
longopts = ["help", "conditionality=", "scenario="]
try:
opts, args = getopt.getopt(sys.argv[1:], "h", longopts)
except getopt.GetoptError as err:
print help_str
print hred(str(err))
sys.exit(hred("%s: Failed to parse arguments." % (__file__)))
for o, a in opts:
if o in ("-h", "--help"):
print help_str
sys.exit(0)
if o in ("--conditionality"):
if not (a in gConditionalities):
print help_str
sys.exit(0)
gOpts["conditionality"] = a
if o in ("--scenario"):
if not (a in gScenarios):
print help_str
sys.exit(0)
gOpts["scenario"] = a
sys.argv = [sys.argv[0]]
WeaveUtilities.run_unittest()
```
|
{
"source": "jennifer19-meet/y2s18-python_review",
"score": 4
}
|
#### File: y2s18-python_review/exercises/functions.py
```python
def is_prime(x):
num = int (x)
for i in range (2, num):
if i!=num-1:
if num% i == 0:
print("not prime!!!")
return("not prime!!!")
else:
if num% i == 0:
print("not prime!!!")
return("not prime!!!")
else:
print("its a prime!!!")
return("its a prime!!")
pass
a = is_prime(101)
```
|
{
"source": "jennifer-admin/pytest-learning",
"score": 2
}
|
#### File: jennifer-admin/pytest-learning/pytest_jennifer_plugin.py
```python
from collections import defaultdict
import pytest
durations = defaultdict(dict)
slow = 3.0
def pytest_addoption(parser):
parser.addoption(
"--slow",
action="store",
default="N",
help="'Default 'No' for slow, option: Y or N"
)
def pytest_configure(config):
durations.update(
config.cache.get("cache/case_duration", defaultdict(dict))
)
def pytest_runtest_logreport(report):
durations[report.nodeid][report.when] = report.duration
@pytest.mark.tryfirst
def pytest_collection_modifyitems( session, config, items):
if config.getoption("--slow") == "Y":
for item in items:
duration = sum(durations[item.nodeid].values())
if duration > slow:
item.add_marker(pytest.mark.slow)
def pytest_sessionfinish(session):
session.config.cache.set("cache/case_duration", durations)
```
|
{
"source": "Jennifercheukyin/High-Speed-Pedestrian-Crossing-Prediction",
"score": 2
}
|
#### File: Jennifercheukyin/High-Speed-Pedestrian-Crossing-Prediction/dataset.py
```python
import os
import numpy as np
import torch
import torchvision
from torchvision import transforms, utils
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.image as mpimg
from action_predict import *
from jaad_data import *
from PIL import Image, ImageDraw
from torch.utils.data import DataLoader
from utils import *
class JAADDataset(torch.utils.data.Dataset):
def __init__(self, data_type, model_name):
self.data_type = data_type # whether it is train or test
self.data_raw = None # all data info, including images, ped_ids, bbox, crossing...
self.mode_name = model_name
self.config_file = 'config_files/ours/' + model_name + '.yaml' # config file path
self.configs_default ='config_files/configs_default.yaml' # default config file path
self.configs = None
self.model_configs = None
self.imdb = JAAD(data_path='./JAAD/')
self.method_class = None
# get data sequence
self.readConfigFile()
beh_seq_train = self.imdb.generate_data_trajectory_sequence(self.data_type, **self.configs['data_opts'])
self.method_class = action_prediction(self.configs['model_opts']['model'])(**self.configs['net_opts'])
self.data_raw = self.get_data(self.data_type, beh_seq_train, {**self.configs['model_opts'], 'batch_size': 2})
self.poses = get_pose(self.data_raw['data']['image'],
self.data_raw['data']['ped_id'],
data_type=self.data_type,
file_path='data/features/jaad/poses',
dataset='jaad')
# use bounding box crop data
self.transform = transforms.Compose([transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
def __getitem__(self, idx):
"""
Args:
idx: id of video sequence to get
Return:
A dictionary containing a video sequence of index idx
"""
img_paths = self.data_raw['data']['image'][idx] # (16,1)
ped_ids = self.data_raw['data']['ped_id'][idx]
labels = self.data_raw['data']['crossing'][idx]
bbox = self.data_raw['data']['box_org'][idx] # 'bbox': list([x1, y1, x2, y2])
poses = self.poses[idx]
poses = np.reshape(poses, (poses.shape[0], 18, 2))
speed = self.data_raw['data']['speed'][idx] # (16,1)
# cordinates = bbox[0]
# bw, bh = cordinates[2] - cordinates[0], cordinates[3] - cordinates[1]
# print(bbox)
# fig, ax = plt.subplots()
# imp = img_paths[0]
# im = Image.open(imp)
# im_crop = im.crop(cordinates)
# img = self.transform(im_crop)
# img = img.cpu().detach().numpy()
# img = img.transpose(1,2,0)
# ax.imshow(img)
# # rect = patches.Rectangle((cordinates[0], cordinates[1]), bw, bh, linewidth=1, edgecolor='r', facecolor='none')
# # ax.add_patch(rect)
# # plot pose
# pose = poses[0]
# plt.scatter(pose[:, 0] * 224, pose[:, 1] * 224)
# plt.show()
# read img from paths and transform img path to image of size (3,224,224)
img_seq = []
for imp, coordinates in zip(img_paths, bbox): # img = './JAAD/images/video_0001/00491.png
img = Image.open(imp)
img = img.crop(coordinates)
img = self.transform(img)
img = torch.squeeze(img, axis=0)
img = img.detach().numpy()
img_seq.append(img)
img_seq = torch.Tensor(img_seq) # tensor (16,3,224,224)
labels = torch.Tensor(labels)
poses = torch.Tensor(poses)
speed = torch.Tensor(speed)
sigmoid = torch.nn.Sigmoid()
speed = sigmoid(speed)
return img_seq, labels, poses, speed
def __len__(self):
return self.data_raw['data']['crossing'].shape[0] # 2134
def readConfigFile(self):
print(self.config_file)
# Read default Config file
with open(self.configs_default, 'r') as f:
self.configs = yaml.safe_load(f)
with open(self.config_file, 'r') as f:
self.model_configs = yaml.safe_load(f)
# Update configs based on the model configs
for k in ['model_opts', 'net_opts']:
if k in self.model_configs:
self.configs[k].update(self.model_configs[k])
# Calculate min track size
tte = self.configs['model_opts']['time_to_event'] if isinstance(self.configs['model_opts']['time_to_event'], int) else \
self.configs['model_opts']['time_to_event'][1]
self.configs['data_opts']['min_track_size'] = self.configs['model_opts']['obs_length'] + tte
# update model and training options from the config file
dataset = self.model_configs['exp_opts']['datasets']
self.configs['data_opts']['sample_type'] = 'beh' if 'beh' in dataset else 'all'
self.configs['model_opts']['overlap'] = 0.6 if 'pie' in dataset else 0.8
self.configs['model_opts']['dataset'] = dataset.split('_')[0]
self.configs['train_opts']['batch_size'] = self.model_configs['exp_opts']['batch_size']
self.configs['train_opts']['lr'] = self.model_configs['exp_opts']['lr']
self.configs['train_opts']['epochs'] = self.model_configs['exp_opts']['epochs']
model_name = self.configs['model_opts']['model']
# Remove speed in case the dataset is jaad
if 'RNN' in model_name and 'jaad' in dataset:
self.configs['model_opts']['obs_input_type'] = self.configs['model_opts']['obs_input_type']
for k, v in self.configs.items():
print(k,v)
# set batch size
if model_name in ['ConvLSTM']:
self.configs['train_opts']['batch_size'] = 2
if model_name in ['C3D', 'I3D']:
self.configs['train_opts']['batch_size'] = 4
if model_name in ['PCPA']:
self.configs['train_opts']['batch_size'] = 1
if 'MultiRNN' in model_name:
self.configs['train_opts']['batch_size'] = 8
if model_name in ['TwoStream']:
self.configs['train_opts']['batch_size'] = 16
# if self.configs['model_opts']['dataset'] == 'pie':
# pass
# # imdb = PIE(data_path=os.environ.copy()['PIE_PATH'])
# elif self.configs['model_opts']['dataset'] == 'jaad':
# # if use docker:
# # imdb = JAAD(data_path=os.environ.copy()['JAAD_PATH'])
# # if use local path
# self.imdb = JAAD(data_path='./JAAD/')
def get_data(self, data_type, data_raw, model_opts):
"""
Generates data train/test/val data
Args:
data_type: Split type of data, whether it is train, test or val
data_raw: Raw tracks from the dataset
model_opts: Model options for generating data
Returns:
A dictionary containing, data, data parameters used for model generation,
effective dimension of data (the number of rgb images to be used calculated accorfing
to the length of optical flow window) and negative and positive sample counts
"""
print('Enter MASK_PCPA_4_2D get_data')
assert model_opts['obs_length'] == 16
model_opts['normalize_boxes'] = False
# self._generator = model_opts.get('generator', False)
# data_type_sizes_dict = {}
# process = model_opts.get('process', True)
data, neg_count, pos_count = self.method_class.get_data_sequence(data_type, data_raw, model_opts)
return {'data': data,
'ped_id': data['ped_id'],
'image': data['image'],
'tte': data['tte'],
'count': {'neg_count': neg_count, 'pos_count': pos_count}}
if __name__ == "__main__":
train_dataset = JAADDataset('train', 'MASK_PCPA_jaad_2d')
train_dataloader = DataLoader(train_dataset, batch_size=4, shuffle=True)
img_seq, labels, poses, speed = train_dataset.__getitem__(30)
print(img_seq.shape)
print(labels.shape)
print(poses.shape)
print(speed)
# train_dataset.__getitem__(0)
# poses = get_pose(train_dataset.data_raw['data']['image'],
# train_dataset.data_raw['data']['ped_id'],
# data_type='train',
# file_path='data/features/jaad/poses',
# dataset='jaad')
# print(poses.shape)
```
#### File: Jennifercheukyin/High-Speed-Pedestrian-Crossing-Prediction/my_train_test.py
```python
from dataset import JAADDataset
from PIL import Image
from torchvision import transforms, utils
from action_predict import *
from jaad_data import *
import torch
import torchvision.models as models
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from my_model import MyModel
import pdb
def train_test(epoch):
train_dataset = JAADDataset('train', 'MASK_PCPA_jaad_2d')
train_dataloader = DataLoader(train_dataset, batch_size=4, shuffle=True, drop_last=True)
test_dataset = JAADDataset('test', 'MASK_PCPA_jaad_2d')
test_dataloader = DataLoader(test_dataset, batch_size=4, shuffle=True, drop_last=True)
model = MyModel().cuda()
weight = torch.Tensor([1760.0/2134.0, 1-1760.0/2134.0]).cuda()
label_criterion = nn.CrossEntropyLoss(weight=weight)
pose_criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)
_lambda = 1
for e in range(epoch):
train_running_loss = 0.0
train_running_acc = 0.0
test_running_loss = 0.0
test_running_acc = 0.0
model.train()
for i, data in enumerate(train_dataloader):
train_img_seq, train_labels, train_poses = data # train_extra_features: pose
train_img_seq = train_img_seq.cuda()
train_labels = train_labels.cuda().long().squeeze()
train_poses = train_poses.cuda()
optimizer.zero_grad()
h0 = torch.zeros(2,4,512).cuda() # (n_layers * n_directions, batch_size, hidden_size)
train_outputs, train_predicted_poses = model(train_img_seq, h0) # + pose prediction
prediction = torch.softmax(train_outputs.detach(), dim=1)[:,1] > 0.5
prediction = prediction * 1.0
# pdb.set_trace()
correct = (prediction == train_labels.float()) * 1.0
loss_labels = label_criterion(train_outputs, train_labels)
loss_poses = pose_criterion(train_predicted_poses, train_poses)
loss = loss_labels + _lambda * loss_poses
print('pose loss: ', loss_poses)
acc = correct.sum() / train_labels.shape[0]
train_running_loss += loss.item()
train_running_acc += acc.item()
loss.backward()
optimizer.step()
if (i + 1) % 10 == 0:
print('Train loss: ', train_running_loss / (10 * ((i + 1) / 10)))
print('Train acc: ', train_running_acc / (10 * ((i + 1) / 10)))
model.eval()
for i, data in enumerate(test_dataloader):
test_img_seq, test_labels, test_poses = data
test_img_seq = test_img_seq.cuda()
test_labels = test_labels.cuda().long().squeeze()
test_poses = test_poses.cuda()
h0 = torch.zeros(2,4,512).cuda()
test_outputs, test_predicted_poses = model(test_img_seq, h0)
prediction = torch.softmax(test_outputs.detach(), dim=1)[:,1] > 0.5
prediction = prediction * 1.0
# pdb.set_trace()
correct = (prediction == test_labels.float()) * 1.0
loss_labels = label_criterion(test_outputs, test_labels)
loss_poses = pose_criterion(test_predicted_poses, test_poses)
loss = loss_labels + _lambda * loss_poses
acc = correct.sum() / test_labels.shape[0]
test_running_loss += loss.item()
test_running_acc += acc.item()
if (i + 1) % 10 == 0:
print('Test loss: ', test_running_loss / (10 * ((i + 1) / 10)))
print('Test acc: ', test_running_acc / (10 * ((i + 1) / 10)))
avg_train_loss = train_running_loss / len(train_dataloader)
avg_train_acc = train_running_acc / len(train_dataloader)
avg_test_loss = test_running_loss / len(test_dataloader)
avg_test_acc = test_running_acc / len(test_dataloader)
print("Train loss: ", avg_train_loss)
print("Train accuracy: ", avg_train_acc)
print("Test loss: ", avg_test_loss)
print("Test accuracy: ", avg_test_acc)
if __name__ == "__main__":
train_test(epoch=2)
```
|
{
"source": "Jennifer-Daniel/GAN_N3",
"score": 3
}
|
#### File: Jennifer-Daniel/GAN_N3/model.py
```python
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import plot_model
from tensorflow.keras.models import Sequential, Model, load_model
from tensorflow.keras.layers import Activation, ZeroPadding2D, LeakyReLU, UpSampling2D, Conv2D, Conv2DTranspose, Embedding, Concatenate
from tensorflow.keras.layers import Input, Reshape, Dropout, Dense, Flatten, BatchNormalization
# define the standalone discriminator model
def define_cond_discriminator(args):
"""
define_cond_discriminator: This function defines the architecture of the discriminator model, where we
transformed the labels of attributes into the same shape dimension as image
using embedding layers. And the labels are concatenated with the image
as a channel. The model has 3 layers of convolutional neural networks with
same numbers of filters.
Arguments:
args: Parser which contains all the variables and paths.
Returns:
model: It returns the architecture of the model.
"""
n_classes = args.NUMBER_OF_CLASSES
in_shape = (args.IMAGE_SIZE, args.IMAGE_SIZE, 3)
# label input
in_label = Input(shape = (1,), name = "Input_Label")
li = Embedding( n_classes , 50, name="Embedding_D")(in_label)
n_nodes = in_shape[0] * in_shape[1] * 1 # 128*128*1
li = Dense(n_nodes, name="Cond_D_Dense_1")(li)
li = Reshape((in_shape[0], in_shape[1], 1), name="Cond_D_Reshape_1")(li)
# image input
in_image = Input(shape=in_shape, name="Cond_D_Input_Image")
# concat label as a channel
merge = Concatenate(name="Cond_D_Concatenate_1")([in_image, li])
fe = Conv2D(64, (3,3), strides=(2,2), padding='same', name="Cond_D_Conv_1")(merge)
fe = LeakyReLU(alpha=0.2, name="Cond_D_LeakyRelu_1")(fe)
fe = Conv2D(128, (3,3), strides=(2,2), padding='same', name="Cond_D_conv_2")(fe)
fe = LeakyReLU(alpha=0.2, name="Cond_D_LeakyRelu_2")(fe)
fe = Conv2D(256, (3,3), strides=(2,2), padding='same', name="Cond_D_conv_3")(fe)
fe = LeakyReLU(alpha=0.2, name="Cond_D_LeakyRelu_3")(fe)
fe = Conv2D(512, (3,3), strides=(2,2), padding='same', name="Cond_D_conv_4")(fe)
fe = LeakyReLU(alpha=0.2, name="Cond_D_LeakyRelu_4")(fe)
fe = Flatten(name="Cond_D_Flatten_1")(fe)
fe = Dropout(0.3, name="Cond_D_Dropout_1")(fe)
out_layer = Dense(1, activation='sigmoid', name="Cond_D_Dense_2")(fe)
model = Model([in_image, in_label], out_layer)
opt = Adam(lr=0.0002, beta_1=0.5)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
# define the standalone generator model
def define_cond_generator(latent_dim, args):
"""
define_cond_generator: This function defines the architecture of the generator model, where we
transformed the labels of attributes into the same shape dimension as image
using embedding layers. And the labels are concatenated with the image
as a channel. The model has 5 layers of convolutional neural networks with
decreasing numbers of filters. The generator function is not trained alone,
but with discriminator function.
Arguments:
latent_dim: Array of random input noise of length = 100.
args: Parser which contains all the variables and paths.
Returns:
model: It returns the architecture of the model.
"""
n_classes = args.NUMBER_OF_CLASSES
# label input
in_label = Input(shape=(1,))
li = Embedding(n_classes, 50)(in_label)
n_nodes = 8*8*1
li = Dense(n_nodes)(li)
li = Reshape((8, 8, 1), name="Cond_G_Reshape_2")(li)
# image generator input
in_lat = Input(shape=(latent_dim,))
n_nodes = 3*8*8 # since 3 channels
gen = Dense(n_nodes)(in_lat)
gen = ReLU()(gen)
gen = Reshape((8, 8, 3), name="Cond_G_Reshape_3")(gen)
merge = Concatenate()([gen, li])
# 16x16
gen = Conv2DTranspose(1024, (4,4), strides=(2,2), padding='same')(merge)
gen = ReLU()(gen)
# 32x32
gen = Conv2DTranspose(512, (4,4), strides=(2,2), padding='same')(gen)
gen = ReLU()(gen)
# 64x64
gen = Conv2DTranspose(256, (4,4), strides=(2,2), padding='same')(gen)
gen = ReLU()(gen)
# 128x128
gen = Conv2DTranspose(128, (4,4), strides=(2,2), padding='same')(gen)
gen = ReLU()(gen)
# 1X1 conv, reduce channels to 3 - rgb
out_layer = Conv2D(3, (7, 7), activation='tanh', padding='same')(gen) # or 128, 128
model = Model([in_lat, in_label], out_layer)
return model
# define the combined generator and discriminator model, for updating the generator
def define_cond_gan(g_model, d_model):
"""
define_cond_generator: This function defines the architecture of the composite model, of both discriminator function
and generator function. But, only the generator function trained and discriminator functio
is not.
Arguments:
g_model: Instance of Generator model.
d_model: Instance of discriminator model.
Returns:
model: It returns the architecture of the model.
"""
d_model.trainable = False
gen_noise, gen_label = g_model.input
gen_output = g_model.output
# connect image output and label input from generator as inputs to discriminator
gan_output = d_model([gen_output, gen_label])
# define gan model as taking noise and label and outputting a classification
model = Model([gen_noise, gen_label], gan_output)
opt = Adam(lr=0.0002, beta_1=0.5)
model.compile(loss='binary_crossentropy', optimizer=opt)
return model
```
|
{
"source": "jennifereldiaz/fly-tnbc",
"score": 3
}
|
#### File: jennifereldiaz/fly-tnbc/BRCA_2getCNVs2_update_withtumorpurity.py
```python
import csv
import math
import numpy as np
#import scipy
#from scipy import stats
#import matplotlib.pyplot as plt
import math
import itertools
from itertools import zip_longest
import pandas as pd
#function to transpose
def transpose(mylist):
return [list(i) for i in zip(*mylist)]
#function for significant digits
from math import log10, floor
def round_to_2(x):
digits = -int(floor(log10(x))-1)
digit_str = '.' + str(digits) + 'f'
return float(format(x, digit_str))
#function for testing if a string is a number
def isnumber(s):
try:
float(s)
return True
except ValueError:
return False
#function to pull out a copy number
def get_copy_number(sample_list, gene_list):
#sample_CNV_file is the file for that sample ID. gene is a list from the candidate file
copy_number = None
for row in sample_list:
if str.isdigit(row[1]):
if str.isdigit(gene_list[2]):
if int(row[1]) == int(gene_list[2]):
start_CNV = int(row[2])
stop_CNV = int(row[3])
start_gene = int(gene_list[3])
stop_gene = int(gene_list[4])
seg_tuple_CNV = (start_CNV, stop_CNV)
seg_tuple_gene = (start_gene, stop_gene) #this figures out if the known CNV is entirely contained in the CNV in the sample
overlap = min(seg_tuple_CNV[1], seg_tuple_gene[1]) - max(seg_tuple_CNV[0], seg_tuple_gene[0])
seg_gene = seg_tuple_gene[1] - seg_tuple_gene[0]
if overlap >= seg_gene:
copy_number = math.pow(2,float(row[5]))
return copy_number
elif row[1] =='X':
if gene_list[2]=='X':
if row[1] == gene_list[2]:
start_CNV = int(row[2])
stop_CNV = int(row[3])
start_gene = int(gene_list[3])
stop_gene = int(gene_list[4])
seg_tuple_CNV = (start_CNV, stop_CNV)
seg_tuple_gene = (start_gene, stop_gene) #this figures out if the known CNV is entirely contained in the CNV in the sample
overlap = min(seg_tuple_CNV[1], seg_tuple_gene[1]) - max(seg_tuple_CNV[0], seg_tuple_gene[0])
seg_gene = seg_tuple_gene[1] - seg_tuple_gene[0]
if overlap >= seg_gene:
copy_number = math.pow(2,float(row[5]))
return copy_number
#list of candidate genes
cand_genes = []
tncand = pd.read_csv('new_TN_CNV_run/CNV_candidates.csv',header=0)
ercand = pd.read_csv('ER+_CNV_run/CNV_candidates.csv',header=0)
cand = pd.concat([tncand,ercand])
cand.drop_duplicates(inplace=True)
cand.reset_index(drop=True,inplace=True)
##cand = cand.ix[:4]
cand_genes= cand.Symbol.unique().tolist()
print('Initial Gene List:')
print(len(cand_genes))
##import consensus tumor purity estimate
pur = pd.read_excel('purity_aran2015-s2.xlsx',sheetname='Supp Data 1',skiprows = 3,header=0,index_col='Sample ID')
#generate summary files of candidate amplifications and deletions for all tumors by original barcode, adjusted for tumor purity from aran 2015
with open('BRCA_CNVs_genes_foldchange.csv', 'w+') as CNV_file:
CNVs = csv.writer(CNV_file)
with open('BRCA_pathology_2014.csv', 'r') as path_file:
path = csv.reader(path_file)
path = list(path)
secondrow = ['TCGA_ID',]
firstrow = ['HUGO-->',]
firstrow += cand.Symbol.tolist()
secondrow += cand.Synonym.tolist()
secondrow.append('tumor')
secondrow.append('normal')
firstrow.append('tumor')
firstrow.append('normal')
CNVs.writerow(firstrow)
CNVs.writerow(secondrow)
with open('CNV_data/FILE_SAMPLE_MAP.txt', 'r') as file_map:
file_map = csv.reader(file_map, delimiter='\t')
file_map = list(file_map)
for item in file_map:
if len(item) <1:
file_map.remove(item)
for tumorID in path:
print(tumorID[0])
if len(tumorID[0]) > 0:
primary = tumorID[0] + '-01' #note that all tumors in my final file are primary, and none were excluded on the basis of having a
##met sample.
met = tumorID[0] + '-06'
blood = tumorID[0] + '-10'
solid = tumorID[0] + '-11'
sample_CNVs_list = []
germline = ''
tumor = ''
if any(primary in row[1] for row in file_map):
purityid = primary+'A'
if purityid in pur.index.tolist():
purity = float(pur.ix[purityid].CPE)
else:
purity = np.nan
#print(primary)
for row in file_map:
if len(row) > 0:
if primary in row[1]:#adds all the CNVs for the tumor sample
if 'nocnv_hg19' in row[0]:
tumor = 'primary'
if sample_CNVs_list == []:
sample_CNVs_list.append(tumorID[0])
file_name = 'CNV_data/Level_3/' + row[0]
with open(file_name, 'r') as sample_CNVs:
sample_CNVs = csv.reader(sample_CNVs, delimiter='\t')
sample_CNVs = list(sample_CNVs)
for i,row in cand.iterrows():
if len(sample_CNVs_list) <= i:
sample_CNVs_list.append('')
line = row.tolist()
copy_number_primary = get_copy_number(sample_CNVs, line)
if copy_number_primary != None:
if any(blood in row[1] for row in file_map):
for row2 in file_map:
if len(row2) > 0:
if blood in row2[1]:#subtracts the CNVs from the germline
if 'nocnv_hg19' in row2[0]:
file_name2 = 'CNV_data/Level_3/' + row2[0]
with open(file_name2, 'r') as sample_CNVs_germ:
sample_CNVs_germ = csv.reader(sample_CNVs_germ, delimiter='\t')
sample_CNVs_germ = list(sample_CNVs_germ)
copy_number_germline = get_copy_number(sample_CNVs_germ, line)
if copy_number_germline != None:
if np.isnan(purity): #formula to correct for tumor purity: observedCN = trueCN*purity + germlineCN*(1-purity)
new_copy_number = copy_number_primary/copy_number_germline #fold change over germline
else:
new_copy_number = (copy_number_primary+(purity-1)*copy_number_germline)/(purity*copy_number_germline)
sample_CNVs_list.append(new_copy_number)
else:
if np.isnan(purity):
sample_CNVs_list.append(copy_number_primary)
else:
new_copy_number = (copy_number_primary+(purity-1))/purity
sample_CNVs_list.append(new_copy_number)
germline = 'blood'
elif any(solid in row[1] for row in file_map):
for row2 in file_map:
if len(row2) > 0:
if solid in row2[1]:
if 'nocnv_hg19' in row2[0]:
file_name2 = 'CNV_data/Level_3/' + row2[0]
with open(file_name2, 'r') as sample_CNVs_germ_solid:
sample_CNVs_germ_solid = csv.reader(sample_CNVs_germ_solid, delimiter='\t')
sample_CNVs_germ_solid = list(sample_CNVs_germ_solid)
copy_number_germline = get_copy_number(sample_CNVs_germ_solid, line)
if copy_number_germline != None:
if np.isnan(purity): #formula to correct for tumor purity
new_copy_number = copy_number_primary/copy_number_germline
else:
new_copy_number = (copy_number_primary+(purity-1)*copy_number_germline)/(purity*copy_number_germline)
sample_CNVs_list.append(new_copy_number)
else:
if np.isnan(purity):
sample_CNVs_list.append(copy_number_primary)
else:
new_copy_number = (copy_number_primary+(purity-1))/purity
sample_CNVs_list.append(new_copy_number)
germline = 'solid'
else: sample_CNVs_list.append(copy_number_primary)
else:
if any(met in row[1] for row in file_map):
purityid = met+'A'
if purityid in pur.index.tolist():
purity = float(pur.ix[purityid].CPE)
else:
purity = np.nan
#print(met)
for row in file_map:
if len(row) > 0:
if met in row[1]:#adds all the CNVs for the tumor sample
if 'nocnv_hg19' in row[0]:
tumor = 'met'
if sample_CNVs_list == []:
sample_CNVs_list.append(tumorID[0])
file_name = 'CNV_data/Level_3/' + row[0]
with open(file_name, 'r') as sample_CNVs:
sample_CNVs = csv.reader(sample_CNVs, delimiter='\t')
sample_CNVs = list(sample_CNVs)
for i,row in cand.iterrows():
if len(sample_CNVs_list) <= i:
sample_CNVs_list.append('')
line = row.tolist()
copy_number_met = get_copy_number(sample_CNVs, line)
if copy_number_met != None:
if any(blood in row[1] for row in file_map):
for row2 in file_map:
if len(row2) > 0:
if blood in row2[1]:#subtracts the CNVs from the germline
if 'nocnv_hg19' in row2[0]:
file_name2 = 'CNV_data/Level_3/' + row2[0]
with open(file_name2, 'r') as sample_CNVs_germ:
sample_CNVs_germ = csv.reader(sample_CNVs_germ, delimiter='\t')
sample_CNVs_germ = list(sample_CNVs_germ)
copy_number_germline = get_copy_number(sample_CNVs_germ, line)
if copy_number_germline != None:
if np.isnan(purity): #formula to correct for tumor purity
new_copy_number = copy_number_met/copy_number_germline
else:
new_copy_number = (copy_number_met+(purity-1)*copy_number_germline)/(purity*copy_number_germline)
sample_CNVs_list.append(new_copy_number)
else:
if purity != np.nan:
sample_CNVs_list.append(copy_number_met)
else:
new_copy_number = (copy_number_met+(purity-1))/purity
sample_CNVs_list.append(new_copy_number)
germline = 'blood'
elif any(solid in row[1] for row in file_map):
for row2 in file_map:
if len(row2) > 0:
if solid in row2[1]:
if 'nocnv_hg19' in row2[0]:
file_name2 = 'CNV_data/Level_3/' + row2[0]
with open(file_name2, 'r') as sample_CNVs_germ_solid:
sample_CNVs_germ_solid = csv.reader(sample_CNVs_germ_solid, delimiter='\t')
sample_CNVs_germ_solid = list(sample_CNVs_germ_solid)
copy_number_germline = get_copy_number(sample_CNVs_germ, line)
if copy_number_germline != None:
if np.isnan(purity): #formula to correct for tumor purity
new_copy_number = copy_number_met/copy_number_germline
else:
new_copy_number = (copy_number_met+(purity-1)*copy_number_germline)/(purity*copy_number_germline)
sample_CNVs_list.append(new_copy_number)
else:
if np.isnan(purity):
sample_CNVs_list.append(copy_number_met)
else:
new_copy_number = (copy_number_met+(purity-1))/purity
sample_CNVs_list.append(new_copy_number)
germline = 'solid'
else: sample_CNVs_list.append(copy_number_met)
#print(sample_CNVs_list)
if len(sample_CNVs_list) < len(cand)+1:
sample_CNVs_list.append('')
if len(sample_CNVs_list) > len(cand)+1:
del sample_CNVs_list[1:]
sample_CNVs_list.append('duplicates')
if len(sample_CNVs_list) == len(cand)+1:
sample_CNVs_list.append(tumor)
sample_CNVs_list.append(germline)
if len(cand)+1 < len(sample_CNVs_list) < len(cand) + 3:
sample_CNVs_list.append('')
if len(sample_CNVs_list) > 1:
CNVs.writerow(sample_CNVs_list)
```
#### File: jennifereldiaz/fly-tnbc/BRCA_6CNV_testRNA_update.py
```python
import csv
import math
import numpy as np
import scipy
from scipy import stats
import matplotlib.pyplot as plt
import math
import itertools
from itertools import zip_longest
import pandas as pd
#in order to create a candidate CNV file for a large number of genes,
#I need to automatically pull out the genomic coordinates for build hg19 for each gene
#function to transpose
def transpose(mylist):
return [list(i) for i in zip(*mylist)]
#function for significant digits
from math import log10, floor
def round_to_2(x):
digits = -int(floor(log10(x))-1)
digit_str = '.' + str(digits) + 'f'
return float(format(x, digit_str))
#function for testing if a string is a number
def isnumber(s):
try:
float(s)
return True
except ValueError:
return False
#get filtered gene list
with open('BRCA_CNVs_foldchange_TN_filtered.csv', 'r') as filtered:
filtered = csv.reader(filtered)
filtered_genelist = next(filtered)
filtered_genelist = list(filtered_genelist)[1:]
print('Initial Gene List:')
print(len(filtered_genelist),'genes') #9694
#get amps and del
dict_ampdel = {}
with open('BRCA_TN_CNVs_foldchange_parsed_counts.csv', 'r') as parsed_counts_file:
parsed_counts = csv.reader(parsed_counts_file)
parsed_counts = list(parsed_counts)
for gene in filtered_genelist:
i = parsed_counts[0].index(gene)
if float(parsed_counts[2][i]) > 50:
dict_ampdel[gene] = 'amp'
elif float(parsed_counts[3][i]) > 50:
dict_ampdel[gene] = 'del'
else:
print('PROBLEM GENE:')
print(gene, 'up:', parsed_counts[2][i], 'down:', parsed_counts[3][i])
#remove genes not differentially expressed (i.e. where the value is 0 in most samples).
#borrowing this step from Akavia et al, 2010, from dana peer's lab
RNA = pd.read_csv('BRCA_RNA_candidates.csv',header=0,index_col=0)
#print('RNA BEFORE FILTERING:')
#print(RNA.head())
RNA = RNA.T
RNA['StDev'] = RNA.std(axis=1)
RNA = RNA[RNA['StDev']>0.25]
RNA.drop('StDev',axis=1)
RNA = RNA.T
#print('RNA AFTER FILTERING:')
#print(RNA.head())
RNA.to_csv('BRCA_RNA_candidates_filtered.csv')
CNV = pd.read_csv('BRCA_CNVs_foldchange_all_filtered.csv',header=0,index_col=0)
CNV = CNV[list(RNA.columns.values)]
CNV.to_csv('BRCA_CNVs_foldchange_all_filtered2.csv')
TNcounts = pd.read_csv('BRCA_TN_CNVs_foldchange_parsed_counts.csv',header=0,index_col=0)
TNcounts = TNcounts[list(RNA.columns.values)]
TNcounts.to_csv('BRCA_TN_CNVs_foldchange_parsed_counts2.csv')
TNsum = pd.read_csv('BRCA_CNVs_foldchange_TN_filtered.csv',header=0,index_col=0)
TNsum = TNsum[list(RNA.columns.values)]
TNsum.to_csv('BRCA_CNVs_foldchange_TN_filtered2.csv')
filtered_genelist = list(TNsum.columns.values)
print('Filtered Gene List:')
print(len(filtered_genelist),'genes') #6709
#convert to z-scores
with open('BRCA_RNA_candidates_filtered.csv', 'r') as RNA:
RNA = csv.reader(RNA)
RNA = list(RNA)
RNA_tr = transpose(RNA)
z_list_tr = []
z_list_tr.append(RNA_tr[0])
for cand in range(1,len(RNA[0])):
#print(RNA[0][cand])
RNA_list = []
for i in RNA_tr[cand]:
if isnumber(i):
RNA_list.append(float(i))
normal = scipy.stats.normaltest(RNA_list)
z_array = scipy.stats.zscore(RNA_list)
z_list_cand = list(z_array)
z_list_cand.insert(0, RNA[0][cand])
z_list_tr.append(z_list_cand)
z_list = transpose(z_list_tr)
with open('BRCA_RNA_z_scores.csv','w+') as z_scores:
z_scores = csv.writer(z_scores)
for line in z_list:
z_scores.writerow(line)
print('created z-scores file')
#import RNA file and CNV file as list and concatenate:
with open('BRCA_CNVs_foldchange_all_filtered2.csv', 'r') as CNVs:
CNVs = csv.reader(CNVs)
CNVs = list(CNVs)
with open('BRCA_RNA_z_scores.csv', 'r') as RNA:
RNA = csv.reader(RNA)
RNA = list(RNA)
with open('BRCA_CNVs_foldchange_and_RNA.csv', 'w+') as comb:
comb = csv.writer(comb)
firstrow = ['Complete TCGA ID']
for CNV_name in CNVs[0][1:len(CNVs[0])]:
if CNV_name != '':
CNV_header = CNV_name + '-CNV'
firstrow.append(CNV_header)
# firstrow.append('tumor')
# firstrow.append('normal')
for RNA_name in RNA[0][1:len(RNA[0])]:
if RNA_name != '':
RNA_header = RNA_name + '-RNA'
firstrow.append(RNA_header)
comb.writerow(firstrow)
for sample in CNVs[1:]:
for ID in RNA[1:]:
if sample[0] == ID[0]:
sample_list = [sample[0]]
for i in sample[1:]:
sample_list.append(i)
for i in ID[1:]:
sample_list.append(i)
comb.writerow(sample_list)
##thenconcatenate the CNV and RNA files and do pairwise t-tests.
##set cutoff automatically by amp or del
##then output a list of genes after filtering them by p-value
datadf = pd.read_csv('BRCA_CNVs_foldchange_and_RNA.csv', header=0)
print('COMBINED FILE:')
#print(datadf.head(n=10))
print(datadf.shape)
final_genelist = []
with open('BRCA_CNV_foldchange_siggenes.csv','w+') as sig:
sig = csv.writer(sig)
sig.writerow(['Gene','CNV type','CN cutoff','Percent Altered','p-value for RNA t-test','Result','CN-RNA relationship'])
equal = 0
unequal = 0
non_equal = 0
non_unequal = 0
upnormal = False
downnormal = False
for gene in filtered_genelist:
if float((len(filtered_genelist) - filtered_genelist.index(gene))/50).is_integer():
print(str(len(filtered_genelist) - filtered_genelist.index(gene)) + ' ' + 'genes left')
#print(gene)
CNV_header = gene + '-CNV'
RNA_header = gene + '-RNA'
testdf = datadf[[CNV_header, RNA_header]]
#print(testdf.head())
#print(testdf.shape)
testdf.dropna(inplace=True)
testdf.columns = ['CNV', 'RNA']
#print(testdf.head())
#print(testdf.shape)
nodup = testdf.RNA #checking to see that there is more than one value in RNA
nodup.drop_duplicates(inplace=True)
if nodup.shape[0] > 1:
if dict_ampdel[gene] == 'amp': #test amplifications. here I will ONLY use the 1.2 cutoff.
testdf = testdf[testdf.CNV > 2.**-0.3] #remove deletions
upmask = (testdf.CNV > 2.**0.3)
upRNA = testdf[upmask].RNA
upmean = upRNA.mean()
upmedian = upRNA.median()
cutoff = 2.**0.3
downRNA = testdf[~upmask].RNA
downmean = downRNA.mean()
downmedian = downRNA.median()
perc = TNcounts[gene].ix['percent up of total']*100
elif dict_ampdel[gene] == 'del': #test deletions
testdf = testdf[testdf.CNV < 2.**0.3] #remove amplifications
upmask = (testdf.CNV > 2.**-0.3)
upRNA = testdf[upmask].RNA
upmean = upRNA.mean()
upmedian = upRNA.median()
downRNA = testdf[~upmask].RNA
downmean = downRNA.mean()
downmedian = downRNA.median()
cutoff = 2.**-0.3
perc = TNcounts[gene].ix['percent down of total']*100
if scipy.stats.normaltest(upRNA)[1] > 0.05 or len(upRNA) >=30:
upnormal = True
if scipy.stats.normaltest(downRNA)[1] > 0.05 or len(downRNA) >= 30: #using the central limit theorem to say if the sample is large enough in approximates normal.
downnormal = True
if upnormal and downnormal: #will use one-sided t tests here, because I only want those cases where upRNA > downRNA, not the other way around.
if scipy.stats.bartlett(upRNA,downRNA)[1] > 0.05:
p_value = scipy.stats.ttest_ind(upRNA, downRNA)[1]/2
equal += 1
else:
p_value = scipy.stats.ttest_ind(upRNA, downRNA,equal_var=False)[1]/2 #Welsch t-test
unequal += 1
if upmean > downmean:
relationship = '+'
else: relationship = '-'
else:
if scipy.stats.levene(upRNA,downRNA)[1] > 0.05:
p_value = scipy.stats.mannwhitneyu(upRNA, downRNA)[1]/2 #non-parametric test
non_equal += 1
else:
p_value = scipy.stats.mannwhitneyu(upRNA, downRNA)[1]/2 ##using the Mann-Whitney U test here. Not robust for unequal variances. could try transform
non_unequal += 1
if upmedian > downmedian: #can't consider means for a nonnormal samples
relationship = '+'
else: relationship = '-'
#print(p_value)
if p_value < 0.05/len(filtered_genelist):
final_genelist.append(gene)
stat = 'Significant'
else: stat = 'Non significant'
sig.writerow([gene,dict_ampdel[gene],cutoff,perc,p_value,stat,relationship])
print('Normal, equal variance:',equal) #
print('Normal, unequal variance:', unequal)#
print('Nonnormal, equal variance:',non_equal) #note that all samples were normal in this run
print('Nonnormal, unequal variance:',non_unequal)
if non_unequal > 0:
print('WARNING: Nonnormal, unequal variance samples were identified. Modify the script to show the distribution of these samples and attempt to transform the samples to achieve equal variances or normality.')
#that is, either they actually were normally distributed, or the sample size was > 30. So it is actually just extra conservative to use only the Welsch test in
#this case, as I suspect the CONEXIC people will have done. I have used either a regular t-test or a welsch t-test where appropriate.
print(len(final_genelist), 'significant genes')
print('created significant genes file')
#read in the sig genes file and output counts
siggenes = pd.read_csv('BRCA_CNV_foldchange_siggenes.csv',header=0)
sigonly = siggenes[siggenes['Result']=='Significant']
#print('Number of significant genes:', sigonly.shape[0])
posonly = sigonly[sigonly['CN-RNA relationship']=='+']
print('Significant genes with + relationship:', posonly.shape[0]) #
siggenes.set_index('Gene',inplace=True)
siggenes = siggenes[['CNV type','CN cutoff','Percent Altered','p-value for RNA t-test','Result','CN-RNA relationship']]
cands = pd.read_csv('BRCA_allGISTIC2.0andISARgenes_foldchange_compressed_step5.csv',header=0,index_col='Symbol')
sub1 = cands[cands['Has RNA data']=='yes']
rest1 = cands[~(cands['Has RNA data']=='yes')]
sub2 = sub1[sub1.index.isin(filtered_genelist)]
rest2 = sub1[~(sub1.index.isin(filtered_genelist))]
sub2['RNA differentially expressed'] = 'yes'
rest2['RNA differentially expressed'] = 'no'
rest1['RNA differentially expressed'] = ''
cands = pd.concat([sub2,rest2,rest1])
cands = cands.merge(siggenes,how='outer',left_index=True, right_index=True)
cands.to_csv('BRCA_allGISTIC2.0andISARgenes_foldchange_compressed_step6.csv',index=True)
```
#### File: jennifereldiaz/fly-tnbc/BRCA_8results_NEW_neel_tndbs_update.py
```python
#alteration in the top 3
#or top quartile for small (=<12) and large segments(>12) respectively
#and in aure et al or Myc SL or breast or basal essential or in a pan-cancer analysis
#tier 3:rest of ISAR genes with significant CN-RNA p-value and frequency of
#alteration in the top 3
#or top quartile for small (=<12) and large segments(>12) respectively
#tier 4:GISTIC genes with significant CN-RNA p-value and frequency of
#alteration in the top 3
#or top quartile for small (=<12) and large segments(>12) respectively
#and in aure et al or Myc SL or breast or basal essential or in a pan-cancer analysis
#tier 5: rest of the GISTIC genes with significant CN-RNA p-value and
#frequency of alteration in the top 3
#or top quartile for small (=<12) and large segments(>12) respectively
#tier 6: rest of the ISAR genes with significant p-value for CN-RNA
#tier 7: rest of the GISTIC genes with p-value significant after multiple hypothesis correction
#First version Nov 21 2014
#Last updated 9/29/20
import pandas as pd
import numpy as np
import math
cands = pd.read_csv('BRCA_allGISTIC2.0andISARgenes_foldchange_compressed_step7.csv',header=0) #12906 genes
#cands.rename(columns={'Symbol_x': 'Symbol'},inplace=True)
cands.set_index('Unnamed: 0',inplace=True)
cands = cands[cands.index.astype(str)!='nan']
print('has CNV data',cands[cands['CNV data exists']=='yes'].index.unique().shape) #12621 genes
##it's important to note that a lot of genes simply couldn't be put through all tests
##because I couldn't find the RNA data
print('no RNA data',cands[cands['Has RNA data']=='no'].index.unique().shape) #5606
##start with genes with skewed copy number in TN
#tn = cands[~(np.isnan(cands['Copy number skewed in TN']))]
tn = cands[cands['Copy number skewed in TN']=='yes'] #9708
print('TNBC set',tn.index.dropna().unique().shape)
#tn.set_index('Symbol',inplace=True)
##can check more of these steps with a line like this
cands[(cands['Gene in known common CNV']=='no')&(
cands['CNV data exists']=='yes')&(
cands['Copy number skewed in TN']=='yes')&(
cands['Has RNA data']=='yes')&(
cands['RNA differentially expressed']=='no')].index.unique().shape
##get amp or del for all skewed genes
parsed = pd.read_csv('BRCA_TN_CNVs_foldchange_parsed_counts.csv')
parsed.set_index('gene',inplace=True)
parsed = parsed.T
amp = parsed[parsed['percent up of altered']>50]
dl = parsed[parsed['percent down of altered']>50]
amp['CNV type'] ='amp'
amp['Percent altered'] = amp['percent up of total']
dl['CNV type'] = 'del'
dl['Percent altered'] = dl['percent down of total']
parsed = pd.concat([amp,dl])
parsed['Percent altered'] = parsed['Percent altered']*100
tn.drop('CNV type',axis=1,inplace=True)
tn = tn.merge(parsed[['Percent altered','CNV type']],how='outer',left_index=True,right_index=True)
##eliminate genes who are del and whose CN-RNA relationship is '-' (doesn't make any biological sense)
##genes that are amp and '-' could be due to chromatin remodeling/epigenetic silencing
amp = tn[tn['CNV type']=='amp']
dl = tn[tn['CNV type']=='del']
dl = dl[(dl['CN-RNA relationship']=='+')|(dl['t-test Result']=='Non significant')]
tn = pd.concat([amp,dl])
print('after removing - dels',tn.index.dropna().unique().shape)
print('TN set without RNA data',tn[tn['Has RNA data']=='no'].index.dropna().unique().shape)
print('TN set with RNA diff exp',tn[tn['RNA differentially expressed']=='yes'].index.dropna().unique().shape)##6694
#check a bunch of other databases - this time i'm putting it in a separate dataframe
db = cands[['Synonym', 'Chromosome', 'from', 'to']].copy()
#genes from pan-cancer analyses (mutations and CNVs)
#cosmic
cosmic = pd.read_excel('../COSMIC_cancer_gene_census.xls',sheetname='List')
cosmic = cosmic[['Symbol','Mutation Type']]
cosmic.set_index('Symbol',inplace=True)
cosmic.columns = ['Cosmic']
db = db.merge(cosmic,how='left',right_index=True,left_index=True)
#pan-cancer
pan = pd.read_excel('../Pan_Cancer.xlsx',sheetname='Sheet1')
pan = pan[(pan.Type=='METHYLATION')|(pan.Type=='MUTATION')]
pan = pan[['Altered Locus/Gene','Type']]
pan.columns = ['Symbol','Pan-cancer']
pan.set_index('Symbol',inplace=True)
db = db.merge(pan,how='left',right_index=True,left_index=True)
#vogelstein
vogmut = pd.read_excel('../Vogelstein-cancer-genes.xlsx',sheetname='Table S2A',
skiprows=1)
vogmut['Type'] = 'Mutation'
vogmut = vogmut[['Gene Symbol','Type']]
vogcnv = pd.read_excel('../Vogelstein-cancer-genes.xlsx',sheetname='Table S2B',
skiprows=1)
vogcnv['Type'] = 'CNA'
vogcnv = vogcnv[['Gene Symbol','Type']]
voggerm = pd.read_excel('../Vogelstein-cancer-genes.xlsx',sheetname='Table S4',
skiprows=1)
voggerm['Type'] = 'Germline'
voggerm = voggerm[['Gene Symbol','Type']]
vog = pd.concat([vogmut,vogcnv,voggerm])
vog.columns = ['Symbol','Vogelstein']
vog.set_index('Symbol',inplace=True)
db = db.merge(vog,how='left',right_index=True,left_index=True)
#civic
civ = pd.read_csv('../nightly-GeneSummaries_CiVICdb_160329.tsv',sep='\t',header=0)
civ = civ[['name','description']]
civ.set_index('name',inplace=True)
civ.columns= ['CiVIC']
db = db.merge(civ,how='left',right_index=True,left_index=True)
#fix dtypes
db[['Cosmic','Pan-cancer','Vogelstein','CiVIC']] = db[[
'Cosmic','Pan-cancer','Vogelstein','CiVIC']].astype(str)
#kessler RNAi Myc synthetic lethal screen
kessler = pd.read_csv('kesslerS1.txt',sep = ' ',header=0)
kessler.dropna(how='all',inplace=True)
kessler.dropna(axis=1,how='all',inplace=True)
kessler.drop(['v2hs','median.pair.diffs'],axis=1,inplace=True)
kessler.set_index('symbol',inplace=True)
kessler['MYC SL (Kessler et al)'] = 'yes'
db = db.merge(kessler,how='left',right_index=True,left_index=True)
#aure computational study
aure = pd.read_csv('genes_from_Aure2013.txt',header=0)
aure.dropna(how='all',inplace=True)
aure.dropna(axis=1,how='all',inplace=True)
aure['Gene in Aure et al 2013'] = 'yes'
aure.set_index('Gene',inplace=True)
db = db.merge(aure,how='left',right_index=True,left_index=True)
db1 = db.copy() #this is what I'll use to differentiate tier 4 and 5
#ben neel essential genes in cell lines
#originally I got these before they published. I only sent him genes in 'tn', not the whole brca set.
#neel = pd.read_csv('BRCA_essential_genes_Neel.csv', header=0)
#basalspec = neel[['Basal']].dropna().set_index('Basal')
#breastspec = neel[['Breast-Specific']].dropna().set_index('Breast-Specific')
#basalspec['Basal-specific essential (Neel)'] = 'yes'
#breastspec['Breast-specific essential (Neel)'] ='yes'
#
#db = db.merge(basalspec,how='left',right_index=True,left_index=True)
#db = db.merge(breastspec, how='left',right_index=True,left_index=True)
#now I'm updating to check everything they published.
for c in [1+i*4 for i in range(0,7)]:
db['Marcotte_'+pd.read_excel('../Marcotte_Neel_BRCA/mmc4.xlsx',sheetname='S3A',
skiprows=2,nrows=1,usecols=[c]).loc[0,'Unnamed: '+str(c)]+
'_essential'] = np.where(db.index.isin(
pd.read_excel('../Marcotte_Neel_BRCA/mmc4.xlsx',sheetname='S3A',
skiprows=4,usecols=list(range(c+2)),index_col='Gene').index.dropna()),
'yes',np.nan)
for c in [1+i*4 for i in range(0,4)]:
db['Marcotte_Neve_'+pd.read_excel('../Marcotte_Neel_BRCA/mmc4.xlsx',sheetname='S3B',
skiprows=2,nrows=1,usecols=[c]).loc[0,'Unnamed: '+str(c)]+
'_essential'] = np.where(db.index.isin(
pd.read_excel('../Marcotte_Neel_BRCA/mmc4.xlsx',sheetname='S3B',
skiprows=4,usecols=list(range(c+2)),index_col='Gene').index.dropna()),
'yes',np.nan)
for c in [1+i*4 for i in range(0,6)]:
db['Marcotte_Lehmann_'+pd.read_excel('../Marcotte_Neel_BRCA/mmc4.xlsx',sheetname='S3C',
skiprows=2,nrows=1,usecols=[c]).loc[0,'Unnamed: '+str(c)]+
'_essential'] = np.where(db.index.isin(
pd.read_excel('../Marcotte_Neel_BRCA/mmc4.xlsx',sheetname='S3C',
skiprows=4,usecols=list(range(c+2)),index_col='Gene').index.dropna()),
'yes',np.nan)
for c in [1+i*4 for i in range(0,10)]:
db['Marcotte_Curtis_'+pd.read_excel('../Marcotte_Neel_BRCA/mmc4.xlsx',sheetname='S3D',
skiprows=2,nrows=1,usecols=[c]).loc[0,'Unnamed: '+str(c)]+
'_essential'] = np.where(db.index.isin(
pd.read_excel('../Marcotte_Neel_BRCA/mmc4.xlsx',sheetname='S3D',
skiprows=4,usecols=list(range(c+2)),index_col='Gene').index.dropna()),
'yes',np.nan)
for c in [1+i*4 for i in range(0,7)]:
db['Marcotte_RPPA_'+pd.read_excel('../Marcotte_Neel_BRCA/mmc4.xlsx',sheetname='S3E',
skiprows=2,nrows=1,usecols=[c]).loc[0,'Unnamed: '+str(c)]+
'_essential'] = np.where(db.index.isin(
pd.read_excel('../Marcotte_Neel_BRCA/mmc4.xlsx',sheetname='S3E',
skiprows=4,usecols=list(range(c+2)),index_col='Gene').index.dropna()),
'yes',np.nan)
db['Marcotte_basal_essential'] = np.where(db.index.isin(
pd.read_excel('../Marcotte_Neel_BRCA/mmc4.xlsx',sheetname='S3F',
skiprows=3,usecols=list(range(c+2)),index_col='Gene').index.dropna()),
'yes',np.nan)
for c in [1+i*3 for i in range(0,45)]:
db['Marcotte_METABRIC_'+pd.read_excel('../Marcotte_Neel_BRCA/mmc5.xlsx',sheetname='S4A',
skiprows=1,nrows=1,usecols=[c]).loc[0,'Unnamed: '+str(c)]+
'_essential'] = np.where(db.index.isin(
pd.read_excel('../Marcotte_Neel_BRCA/mmc5.xlsx',sheetname='S4A',
skiprows=3,usecols=list(range(c+2)),index_col='Gene').index.dropna()),
'yes',np.nan)
for c in [1+i*3 for i in range(0,45)]:
db['Marcotte_ISAR_'+pd.read_excel('../Marcotte_Neel_BRCA/mmc5.xlsx',sheetname='S4B',
skiprows=1,nrows=1,usecols=[c]).loc[0,'Unnamed: '+str(c)]+
'_essential'] = np.where(db.index.isin(
pd.read_excel('../Marcotte_Neel_BRCA/mmc5.xlsx',sheetname='S4B',
skiprows=3,usecols=list(range(c+2)),index_col='Gene').index.dropna()),
'yes',np.nan)
db['Marcotte_decreased_expression_essential'] = np.where(db.index.isin(
pd.read_excel('../Marcotte_Neel_BRCA/mmc7.xlsx',sheetname='S6A',
skiprows=2,index_col='Gene').index.dropna()),
'yes',np.nan)
db['Marcotte_increased_expression_essential'] = np.where(db.index.isin(
pd.read_excel('../Marcotte_Neel_BRCA/mmc7.xlsx',sheetname='S6B',
skiprows=3,index_col='Gene').index.dropna()),
'yes',np.nan)
db['Marcotte_heterozygous_copy_loss_essential'] = np.where(db.index.isin(
pd.read_excel('../Marcotte_Neel_BRCA/mmc7.xlsx',sheetname='S6C',
skiprows=2,index_col='Gene').index.dropna()),
'yes',np.nan)
#TN specific functional databases
db['Patel_TN_gene_addictions'] = np.where(db.index.isin(
pd.read_excel(
'patel_gene_addictions/41467_2018_3283_MOESM6_ESM.xlsx',
sheetname='Supplementary Data 4')[
'Integrated Gene List (n=37)'].dropna().tolist()),
'yes',np.nan)
db['Koedoot_TN_Hs578T_migration_drivers'] = np.where(db.index.isin(
pd.read_excel(
'koedoot_migration_drivers/41467_2019_11020_MOESM18_ESM.xlsx',
sheetname='HS hits',skiprows=2)[
'GeneSymbol'].dropna().tolist()),
'yes',np.nan)
db['Koedoot_TN_MDA231_migration_drivers'] = np.where(db.index.isin(
pd.read_excel(
'koedoot_migration_drivers/41467_2019_11020_MOESM18_ESM.xlsx',
sheetname='MDA hits',skiprows=2)[
'GeneSymbol'].dropna().tolist()),
'yes',np.nan)
db['Miao_sleeping_beauty_BrWSB'] = np.where(db.index.isin(
pd.read_excel(
'miao_sleeping_beauty/41467_2020_16936_MOESM6_ESM_fixed.xlsx',
sheetname='Sheet1',skiprows=2)[
'Candidate genes in BrWSB group'].str.upper().dropna().tolist()),
'yes',np.nan)
db['Miao_sleeping_beauty_BrMSB'] = np.where(db.index.isin(
pd.read_excel(
'miao_sleeping_beauty/41467_2020_16936_MOESM7_ESM.xlsx',
sheetname='Sheet1',skiprows=2)[
'Candidate genes in BrMSB group'].str.upper().dropna().tolist()),
'yes',np.nan)
db[(db[db.columns[28:]]=='yes').any(axis=1)].shape
#export this and use to validate genes from screen
#db.Chromosome=db.Chromosome.astype(int)
#db['from'] = db['from'].astype(int)
#next time exporting this file, replace column names:
#Cosmic (Forbes et al) Pan-cancer (Ciriello et al) Parsons et al CiVIC (Griffith et al) MYC SL (Kessler et al) Aure et al
db.drop_duplicates().fillna('').replace('nan','').sort_values(['Chromosome',
'from']).to_csv(
'BRCA_allGISTIC2.0andISARgenes_foldchange_compressed_final_TNgenes_databases.csv')
#,index=False
#reset index
#tn.Chromosome=tn.Chromosome.astype(int)
#tn['from'] = tn['from'].astype(int)
#db1.Chromosome=db1.Chromosome.astype(int)
#db1['from'] = db1['from'].astype(int)
db1 = db1[db1.index.isin(tn.index)]
db1.sort_values(['Chromosome','from'],inplace=True)
db1 = db1.fillna('nan')[['Cosmic', 'Pan-cancer',
'Vogelstein', 'CiVIC', 'MYC SL (Kessler et al)',
'Gene in Aure et al 2013']]
tn.sort_values(['Chromosome','from'],inplace=True)
#db1.reindex(tn.index)#,inplace=True
tn.reset_index(drop=False,inplace=True)
tn.rename(columns ={'index':'Symbol'},inplace=True)
#db1.reset_index(drop=False,inplace=True)
#db1.rename(columns ={'index':'Symbol'},inplace=True)
def segtest(df,column):
newdf = pd.DataFrame(columns = list(df.columns.values))
#df = df[df['CNV type consistent with dataset']=='yes']
for cytoband in df['cytoband'].unique():
cytodf = df[df['cytoband'] == cytoband]
if len(cytodf[column].unique()) > 1:
#print(cytodf.columns)
cytodf.reset_index(drop=True,inplace=True)
cytodf.sort_values([column],axis=0,ascending=False,inplace=True)
#I'm going to take the top genes in each list, based on the assumption that no more than
#3-4 driver genes are in a segment - don't know if that's true
if cytodf.shape[0] > 12:
cutoff = cytodf.ix[int(math.floor(cytodf.shape[0]/4)),column] #take top quartile
last = cytodf[cytodf[column]==cutoff].drop_duplicates(
subset=column,keep='last').index.values[0]
cytodf = cytodf[:last]
elif cytodf.shape[0] > 3:
cutoff = cytodf.ix[3,column]
last = cytodf[cytodf[column]==cutoff].drop_duplicates(
subset=column,keep='last').index.values[0]
cytodf = cytodf[:last]
newdf = pd.concat([newdf,cytodf])
#newdf['Gene'] = newdf.index
#newdf = newdf[['Gene','CNV type','CN cutoff','CN-RNA relationship','Result','p-value for RNA t-test',
# column,'N','chromosome','cytoband',
# 'genes in peak','Seg p-value','small seg score','wide peak start','wide peak end']]
#newdf['small seg score'] = 1/newdf['genes in peak']
#newdf.sort(['small seg score','Seg p-value'],ascending = [False,True],inplace=True)
#newdf.drop_duplicates(cols=['Gene'],inplace=True)
return newdf
#stratify genes picked up by isar
isar = tn[~np.isnan(tn.ISARpeak)]
isar.rename(columns={'ISARpeak':'cytoband'},inplace=True)
tier1 = segtest(isar,'HeliosScore')
tier1['tier1'] = 1 #128 genes #37 regions
tier1.set_index('Symbol',inplace=True)
tn.set_index('Symbol',inplace=True)
tn = tn.merge(tier1[['tier1']],how='outer',left_index=True,right_index=True)
tn.reset_index(drop=False,inplace=True)
isar = isar[isar['t-test Result']=='Significant']
tier2 = segtest(isar,'Percent altered')
tier3 = tier2[tier2.Symbol.isin(db1[(db1=='nan').all(axis=1)].index)]
tier2 = tier2[tier2.Symbol.isin(db1[(db1!='nan').any(axis=1)].index)]
tier2['tier2'] = 2 #35 regions #139 genes
tier3['tier2'] = 3
tier2 = pd.concat([tier2,tier3])
tier2.sort_values('tier2').drop_duplicates(
[col for col in tier2.columns if 'tier2' not in col])
tier2.set_index('Symbol',inplace=True)
tn.set_index('Symbol',inplace=True)
tn = tn.merge(tier2[['tier2']],how='outer',left_index=True,right_index=True).drop_duplicates()
tn.reset_index(drop=False,inplace=True)
#stratify genes picked up by gistic but not isar
gistic = tn[np.isnan(tn['ISARpeak'])|((tn['CNV type']=='del')&(
tn['TCGA basal del cytoband'].astype(str)!='nan')|(
tn['TCGA total del cytoband'].astype(str)!='nan'))]
##also eliminiate genes not differentially expressed
gistic = gistic[gistic['RNA differentially expressed']=='yes']
gistic = gistic[gistic['t-test Result']=='Significant']
#and consistent cnv type
gistic = gistic[gistic['CNV type consistent with dataset']=='yes']
basalamp = gistic.rename(columns={'TCGA basal amp cytoband':'cytoband'})
basalamp = segtest(basalamp,'Percent altered')
basalamp.rename(columns={'cytoband':'TCGA basal amp cytoband'},inplace=True)
basaldel = gistic.rename(columns={'TCGA basal del cytoband':'cytoband'})
basaldel = segtest(basaldel,'Percent altered')
basaldel.rename(columns={'cytoband':'TCGA basal del cytoband'},inplace=True)
totalamp = gistic.rename(columns={'TCGA total amp cytoband':'cytoband'})
totalamp = segtest(totalamp,'Percent altered')
totalamp.rename(columns={'cytoband':'TCGA total amp cytoband'},inplace=True)
totaldel = gistic.rename(columns={'TCGA total del cytoband':'cytoband'})
totaldel = segtest(totaldel,'Percent altered')
totaldel.rename(columns={'cytoband':'TCGA total del cytoband'},inplace=True)
tier4 = pd.concat([basalamp,basaldel,totalamp,totaldel]).drop_duplicates()
#tier1.5--small segments
tier1point5 = tier4[(tier4['TCGA basal amp genes in peak']<10)|(
tier4['TCGA basal del genes in peak']<10)|
(tier4['TCGA total amp genes in peak']<10)|(
tier4['TCGA total del genes in peak']<10)]
#tier5 = tier4[tier4.Symbol.isin(db1[(db1=='nan').all(axis=1)].index)&~(
# tier4.Symbol.isin(tier1point5.Symbol))]
tier4 = tier4[tier4.Symbol.isin(db1[(db1!='nan').any(axis=1)].index)&~(
tier4.Symbol.isin(tier1point5.Symbol))]
#gistic.set_index('Symbol',inplace=True)
tier5 = gistic[~gistic.Symbol.isin(tier1point5.index)&~gistic.Symbol.isin(tier4.index)]
tier5['tier4'] = 5
tier4['tier4'] = 4
tier1point5['tier4'] = 1.5 #16 genes #18 regions
tier4 = pd.concat([tier1point5,tier4,tier5]) #tiers 4-5: 1211 genes 68 regions
tier4.sort_values('tier4').drop_duplicates(
[col for col in tier4.columns if 'tier4' not in col])
tier4.set_index('Symbol',inplace=True)
tn.set_index('Symbol',inplace=True)
tn = tn.merge(tier4[['tier4']],how='outer',left_index=True,right_index=True).drop_duplicates()
tn.reset_index(drop=False,inplace=True)
tn['tier'] = tn[['tier1','tier2','tier4']].min(axis=1)
#tn.drop(['tier1','tier2','tier4'],axis=1,inplace=True)
tn.drop_duplicates(inplace=True)
tn.set_index('Symbol',inplace=True)
#add tier information to full file
cands = cands.merge(tn[['tier']],how='outer',
left_index=True,right_index=True)
###tier X: Any genes NOT picked up by the TN-specific analysis - skip this for now
#othertypes = cands[cands['Copy number skewed in TN']=='no']
#TN = cands[cands['Copy number skewed in TN']=='yes']
#known = othertypes[~(othertypes.Cosmic=='nan')|~(othertypes['Pan-cancer']=='nan')
# |~(othertypes['Vogelstein']=='nan')|~(othertypes['CiVIC']=='nan')]
#known.tier = 10
#rest = othertypes[(othertypes.Cosmic=='nan')&(othertypes['Pan-cancer']=='nan')
# &(othertypes['Vogelstein']=='nan')&(othertypes['CiVIC']=='nan')]
#cands = pd.concat([TN,known,rest])
cands.sort_values(['Chromosome','from'],inplace=True)
#I selected out 'tier 0' manually based on the Helios scores because tier 1 was too much.
#also selected out some of group 2 manually by looking at the different databases.
#import these
tier0 = pd.concat([pd.read_excel('../../../CNV_screen/final_group1_linelevel.xlsx',
sheetname='Group 1I',index_col='Unnamed: 0'),
pd.read_excel('../../../CNV_screen/final_group1_linelevel.xlsx',
sheetname='Group 1G',index_col='Unnamed: 0')])
amb = pd.concat([pd.read_excel('../../../CNV_screen/final_group2_linelevel.xlsx',
sheetname='Group 2G',index_col='Unnamed: 0'),
pd.read_excel('../../../CNV_screen/final_group2_linelevel.xlsx',
sheetname='Group 2I',index_col='Unnamed: 0')])
#raise SystemExit(0)
##reset index and export
cands['group'] = np.where(cands.index.isin(tier0.index),'1I',np.where(
cands.index.isin(amb.index)&(
cands['CNV type consistent with dataset']=='yes'),
'2IG',np.where(cands.index.isin(amb.index)&(
cands['CNV type consistent with dataset']=='no'),
'2I',np.where(cands.tier==1.5,'1G',
np.where(cands.tier.isin([1,2])&(
cands['CNV type consistent with dataset']=='yes'),
'3I',np.where(
(cands.tier==4)&(
cands['CNV type consistent with dataset']=='yes'),
'3G',
np.where(~np.isnan(cands.ISARpeak),'4I',
np.where(cands.tier==5,'4G',''))))))))
#fly orthologs
orth = pd.read_csv('New human-fy orthologs library +Diopt.csv',header=0)
#orth.set_index('Human gene',inplace=True)
homs = cands[cands.index.isin(orth['Human gene'])]
nohoms = cands[~cands.index.isin(orth['Human gene'])]
homs['Has fly ortholog'] = 'yes'
nohoms['Has fly ortholog'] = 'no'
cands = pd.concat([homs,nohoms])
cands.sort_values(['Chromosome','from'],inplace=True)
cands.reset_index(drop=False,inplace=True)
cands.rename(columns={'index':'Symbol'},inplace=True)
cands.sort_values(['Chromosome','from'],inplace=True)
cands.drop_duplicates().fillna('').to_csv(
'BRCA_allGISTIC2.0andISARgenes_foldchange_compressed_final_dbupdate.csv',index=False)
cands[~cands['Symbol'].str.contains('RN7')&~cands['Symbol'].str.contains(
'RNU')].drop_duplicates().fillna('').to_csv(
'BRCA_allGISTIC2.0andISARgenes_folchange_compressed_final_dbupdate_nopseudogenes.csv',index=False)
orth.set_index('Human gene',inplace=True)
homs = homs.merge(orth,how = 'left',left_index=True,right_index=True)
homs.sort_values(['Chromosome','from'],inplace=True)
homs.drop_duplicates(inplace=True)
#ok, now here is a replicate of the 6694 genes that remain after 'selection' step:
cands[(cands['RNA differentially expressed']=='yes')&(
cands['Copy number skewed in TN']=='yes')&~(
(cands['CN-RNA relationship']=='-')&(
cands['t-test Result']=='Significant')&(
cands['CNV type']=='del'))]['Symbol'].unique().shape
##here is the equivalent selection from genes with homologs:
homs[(homs['RNA differentially expressed']=='yes')&(
homs['Copy number skewed in TN']=='yes')&~(
(homs['CN-RNA relationship']=='-')&(
homs['t-test Result']=='Significant')&(
homs['CNV type']=='del'))].index.unique().shape
#4455
#so this is how many genes I have that passed selection and have orthologs.
#new breakdown by group:
#group 1:
homs[homs.group=='1I'].ISARpeak.unique().shape
homs[homs.group=='1I'].index.unique().shape
#count 1G regions manually
homs[homs.group=='1G'].index.unique().shape
#group 2:
homs[homs.group=='2I'].ISARpeak.unique().shape
homs[homs.group=='2I'].index.unique().shape
homs[homs.group=='2IG'].ISARpeak.unique().shape
homs[homs.group=='2IG'].index.unique().shape
# number genes tested, 2IG:
amb[(amb['CNV type consistent with dataset']=='yes')&~(
amb['Lethality result'].astype(str)=='nan')].index.unique().shape
# number genes tested, 2I:
amb[(amb['CNV type consistent with dataset']=='no')&~(
amb['Lethality result'].astype(str)=='nan')].index.unique().shape
# number lines tested, 2IG:
amb[(amb['CNV type consistent with dataset']=='yes')&~(
amb['Lethality result'].astype(str)=='nan')]['Stock tested'].unique().shape
# number lines tested, 2I:
amb[(amb['CNV type consistent with dataset']=='no')&~(
amb['Lethality result'].astype(str)=='nan')]['Stock tested'].unique().shape
#group 3:
homs[homs.group=='3I'].ISARpeak.unique().shape
homs[homs.group=='3I'].index.unique().shape
len(set(homs[homs.group=='3G'][['TCGA basal amp cytoband',
'TCGA basal del cytoband',
'TCGA total amp cytoband',
'TCGA total del cytoband']].values.ravel())-set([np.nan]))
homs[homs.group=='3G'].index.unique().shape
#group 4:
homs[homs.group=='4I'].ISARpeak.unique().shape
homs[homs.group=='4I'].index.unique().shape
len(set(homs[homs.group=='4G'][['TCGA basal amp cytoband',
'TCGA basal del cytoband',
'TCGA total amp cytoband',
'TCGA total del cytoband']].values.ravel())-set([np.nan]))
homs[homs.group=='4G'].index.unique().shape
#check for duplicates among the groups
groups = homs[['tier','group']]
groups['Symbol'] = groups.index
groups.reset_index(drop=True,inplace=True)
uniq = groups.drop_duplicates()
dups = uniq[uniq.duplicated('Symbol')]
uniq[uniq.Symbol.isin(dups.Symbol)]
#fill in driver group numbers
driv = pd.read_excel(
'../../../CNV_screen/BRCA_allGISTIC2.0andISARgenes_compressed_final_summary_drivers.xlsx',
sheetname='drivers',
index_col=0)
drivers = cands[cands['Symbol'].isin(driv.index)]
drivers.Chromosome = drivers.Chromosome.astype(int)
print(drivers.sort_values(['Chromosome',
'from'])[['Symbol','group']].drop_duplicates())
print(drivers.sort_values(['Chromosome',
'from'])[['Symbol','group']].drop_duplicates()[['group']].to_csv(index=False))
#driver neel and tn databases
for driver in drivers['Symbol'].unique():
print(driver)
print(', '.join([i.replace('_',' ') for i in db.loc[driver][db.loc[driver].astype(
str)!='nan'].index.tolist() if not i in ['Synonym',
'Chromosome', 'from', 'to']]))
##old breakdown by tier:
#one = homs[homs.tier==1]
#one[(one['CNV type']=='del')&((
# one['TCGA basal del cytoband'].astype(str)!='nan')|(
# one['TCGA total del cytoband'].astype(str)!='nan'))].to_csv(
# 'BRCA_allGISTIC2.0andISARgenes_foldchange_compressed_final_dbupdate_tier1_ambiguousdeletions.csv')#12hum,16fly, 5reg
#one[(one['CNV type']=='del')&((
# one['TCGA basal del cytoband'].astype(str)=='nan')&(
# one['TCGA total del cytoband'].astype(str)=='nan'))].to_csv(
# 'BRCA_allGISTIC2.0andISARgenes_foldchange_compressed_final_dbupdate_tier1_paradoxicaldeletions.csv')#119hum,316fly ,27reg
#one[(one['CNV type']!='del')].to_csv('BRCA_allGISTIC2.0andISARgenes_foldchange_compressed_final_dbupdate_tier1_amps.csv')#89hum,225fly,30reg
#two = homs[homs.tier==2]
#two[two['CNV type']=='amp'].to_csv('BRCA_allGISTIC2.0andISARgenes_foldchange_compressed_final_dbupdate_tier2.csv')#10hum,20fly,9reg
#homs[(homs.tier==3)&(
# homs['CNV type']=='amp')].to_csv('BRCA_allGISTIC2.0andISARgenes_foldchange_compressed_final_dbupdate_tier3.csv')#32hum,63fly,15reg
#homs[homs.tier==4].to_csv('BRCA_allGISTIC2.0andISARgenes_foldchange_compressed_final_dbupdate_tier4.csv')#191hum,475fly,51reg
#homs[homs.tier==5].to_csv('BRCA_allGISTIC2.0andISARgenes_foldchange_compressed_final_dbupdate_tier5.csv')#807hum,2302fly,61reg
#homs[homs.tier==1.5].to_csv('BRCA_allGISTIC2.0andISARgenes_foldchange_compressed_final_dbupdate_tier1.5.csv')#16hum,37fly,16reg
##amps: 6hum,11fly,5regions #dels: 10hum,26fly,10regions
##homs[homs.tier==10].to_csv('BRCA_allGISTIC2.0andISARgenes_foldchange_compressed_final_dbupdate_tierX.csv')#65hum,162fly
##here's the new breakdown for the paper
#high priority: tiers 0, 1.5
#intermediate: tiers 1, 2, 4
#low priority -- intersect the remainder of genes with the selection of genes with homologs above:
remainder = homs[(homs.tier==3)|(homs.tier>4)|np.isnan(homs.tier)]
remainder.index.dropna().unique().shape
background = homs[(homs['RNA differentially expressed']=='yes')&(
homs['Copy number skewed in TN']=='yes')&~(
(homs['CN-RNA relationship']=='-')&(
homs['t-test Result']=='Significant')&(
homs['CNV type']=='del'))]
background.index.unique().shape
#human genes --> len(set(remainder.index.dropna())&set(background.index.dropna())) #4138
remainder[remainder.index.isin(background.index)].index.dropna().unique().shape
r = remainder[remainder.index.isin(background.index)]
#low priority regions:
len(set(r['TCGA basal amp cytoband'].dropna())|set(
r['TCGA basal del cytoband'].dropna())|set(
r['TCGA total amp cytoband'].dropna())|set(
r['TCGA total del cytoband'].dropna())|set(
r.ISARpeak.dropna()))
##125 regions (ISAR and GISTIC)
#old:
#everything after tier 5: 4344 humn genes, 16684 fly, in 149 regions
###select 5 random genes from tier 5 for negative controls (ish)
##import random
##df = homs[homs.tier==5].reset_index(drop=False)
##for i in range(5):
## print(df.ix[random.randrange(df.shape[0]),'index'])
#selected genes: ##didn't end up using these because I had some tier 5 and lower genes on hand
##RDH14
##ARHGEF3
##KIAA1841
##NT5C3L
##VPS33A
```
|
{
"source": "JenniferHahn/data_integration_using_deep_learning",
"score": 3
}
|
#### File: LocalBusiness/Preprocessing/functions.py
```python
import pandas as pd
def open_product_zip_file(file_name):
file = pd.read_json(file_name, compression='gzip', lines=True)
return file
def files_to_df_extended(files):
df_list = []
for file in files:
df = open_product_zip_file(file)
if 'name' in df.columns:
df_reduced = pd.DataFrame({'name': df['name']})
df_reduced['origin'] = file
df_list.append(df_reduced)
df_merged = pd.concat(df_list)
df_merged = df_merged.reset_index()
return df_merged
def delete_empty_rows(df):
rows = []
for i, row in enumerate(df['name']):
if type(row) != str:
rows.append(i)
df_clean = df.drop(index=rows)
df_clean = df_clean.reset_index(drop=True)
return df_clean
def remove_punctuations(df, keep_numbers='yes'): # keep numbers as default
if keep_numbers=='yes':
df["new_column"] = df['name'].str.replace('[^\w\s]', ' ')
else:
df["new_column"] = df['name'].str.replace('[^A-Za-z_\s]', ' ')
return df
def extract_most_similar(model, index, df):
similar_doc = model.docvecs.most_similar(str(index), topn = 10)
index_list = []
for index, similarity in similar_doc:
index = int(index)
index_list.append(index)
return df.iloc[index_list]
```
|
{
"source": "jenniferhaoba/AutomationTest",
"score": 2
}
|
#### File: web/utils/Log.py
```python
import logging
from logging.handlers import TimedRotatingFileHandler
from com.web.utils.Config import LOG_PATH, Config
import os
class Logger(object):
def __init__(self, logger_name='AutoTestlog'):
self.logger = logging.getLogger(logger_name)
logging.root.setLevel(logging.NOTSET)
c = Config().get('log')
#config文件中log配置不为空时取配置文件否则取‘test.log’
self.log_file_name = c.get('file_name') if c and c.get('file_name') else 'test.log'
# 保留的日志数量
self.backup_count = c.get('backup_count') if c and c.get('backup_count') else 7
self.console_output_level = c.get('console_level') if c and c.get('console_level') else 'WARNING'
self.file_output_level = c.get('file_level') if c and c.get('file_level') else 'DEBUG'
pattern = c.get('pattern') if c and c.get('pattern') else '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
self.formatter = logging.Formatter(pattern)
def get_logger(self):
"""在logger中添加日志句柄并返回,如果logger已有句柄,则直接返回
这里添加两个句柄,一个输出日志到控制台,另一个输出到日志文件。
两个句柄的日志级别不同,在配置文件中可设置。
"""
if not self.logger.handlers: # 避免重复日志
console_handler = logging.StreamHandler()
console_handler.setFormatter(self.formatter)
console_handler.setLevel(self.console_output_level)
self.logger.addHandler(console_handler)
# 每天重新创建一个日志文件,最多保留backup_count份
file_handler = TimedRotatingFileHandler(filename=os.path.join(LOG_PATH, self.log_file_name),
when='D',
interval=1, # one week
backupCount=self.backup_count,
delay=True,
encoding='utf-8'
)
file_handler.setFormatter(self.formatter)
file_handler.setLevel(self.file_output_level)
self.logger.addHandler(file_handler)
return self.logger
loggerUtils = Logger() #类方法不能直接调用,先实例化对象再调用
logger = loggerUtils.get_logger()
```
#### File: web/utils/Mail.py
```python
import re
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
from socket import gaierror, error
import time
from smtplib import SMTPResponseException, SMTPAuthenticationError
from com.web.utils.Log import logger
"""
邮件类。用来给指定用户发送邮件。可指定多个收件人,可带附件。
Content-disposition 是 MIME 协议的扩展,MIME 协议指示 MIME 用户代理如何显示附加的文件
Content-Disposition就是当用户想把请求所得的内容存为一个文件的时候提供一个默认的文件名
当你在响应类型为application/octet- stream情况下使用了这个头信息的话,那就意味着你不想直接显示内容,而是弹出一个”文件下载”的对话框,接下来就是由你来决定“打开”还是“保存” 了
使用MIMEMultipart发送多个附件的邮件
发送HTML格式的邮件与发送纯文本消息的邮件不同之处就是将MIMEText中_subtype设置为html或plain
"""
class Email:
def __init__(self, server, port, sender, password, receiver, title, message=None, path=None):
"""初始化Email
:param title: 邮件标题,必填。
:param message: 邮件正文,非必填。
:param path: 附件路径,可传入list(多附件)或str(单个附件),非必填。
:param server: smtp服务器,必填。
:param sender: 发件人,必填。
:param password: 发件人密码,必填。
:param receiver: 收件人,多收件人用“;”隔开,必填。
"""
self.title = title
self.message = message
self.files = path
self.msg = MIMEMultipart('related')#采用related定义内嵌资源的邮件体
self.server = server
self.password = password
self.receiver = receiver
self.sender = sender
self.port = port
def _attach_file(self, att_file):
"""将单个文件添加到附件列表中"""
att = MIMEText(open('%s' %att_file, 'rb').read(), 'plain', 'utf-8')##_subtype有plain,html等格式
att["Content-Type"] = 'application/octet-stream'#request header内容设置,上传附件类型设置,该类型可以上传多种附件图片,视频,文件等
file_name = re.split(r'[\\|/]', att_file) #按正则表达式设置的模式分隔附件
#打开文件时需要提示用户保存,就要利用header中Content-Disposition进行一下处理
#filename参数可以包含路径信息,但User-Agnet会忽略掉这些信息,只会把路径信息的最后一部分做为文件名file_name[-1]
att["Content-Disposition"] = 'attachment; filename="%s"' % file_name[-1]
self.msg.attach(att)
logger.info('attach file{}'.format(att_file))
def send(self):
self.msg['Subject'] = self.title
self.msg['from'] = self.sender
self.msg['To'] = self.receiver
#邮件正文
if self.message:
self.msg.attach(MIMEText(self.message))#attach new subparts to the message by using the attach() method
# 添加附件,支持多个附件(传入list),或者单个附件(传入str)
if self.files:
if isinstance(self.files, list):
for f in self.files:
self._attach_file(f)
elif isinstance(self.files, str):
self._attach_file(self.files)
# 连接服务器并发送
try:
smtp_server = smtplib.SMTP_SSL(self.server)# 连接sever
smtp_server.connect(self.server, self.port)
except (gaierror, error) as e:
logger.exception('发送邮件失败,无法连接到SMTP服务器,检查网络以及SMTP服务器. %s', e)
else:
try:
smtp_server.login(self.sender, self.password)# 登录
time.sleep(5)
smtp_server.sendmail(self.sender, self.receiver.split(';'), self.msg.as_string())# 发送邮件
logger.info('发送邮件"{0}"成功! 收件人:{1}。如果没有收到邮件,请检查垃圾箱,'
'同时检查收件人地址是否正确'.format(self.title, self.receiver))
except SMTPAuthenticationError as e:
logger.exception('用户名密码验证失败!%s', e)
except SMTPResponseException as e :
logger.exception("send exception:%s", str(e))
finally:
smtp_server.quit()# 断开连接
```
|
{
"source": "jenniferliddle/iixi",
"score": 3
}
|
#### File: iixi/Jixi/commandTab.py
```python
from PyQt4 import QtGui, QtCore
class commandTab(QtGui.QWidget):
serial_send_signal = QtCore.pyqtSignal(str)
def __init__(self):
super(commandTab, self).__init__()
setHomeBtn = QtGui.QPushButton('Set Home Position')
setHomeBtn.setStatusTip('Set the current positon to be the home position')
setHomeBtn.clicked.connect(self.setHomePosition)
goHomeBtn = QtGui.QPushButton('Return Home')
goHomeBtn.setStatusTip('Return to the home position')
goHomeBtn.clicked.connect(self.goHomePosition)
stepw = QtGui.QDoubleSpinBox()
stepw.setRange(0.1,10.0)
stepw.setSingleStep(0.1)
stepw.setSuffix('mm')
# make buttons fixed size to make them square, rather than default to rectangular
xplus = QtGui.QPushButton('X+')
xplus.setFixedSize(50,50)
xminus = QtGui.QPushButton('X-')
xminus.setFixedSize(50,50)
yplus = QtGui.QPushButton('Y+')
yplus.setFixedSize(50,50)
yminus = QtGui.QPushButton('Y-')
yminus.setFixedSize(50,50)
zplus = QtGui.QPushButton('Z+')
zplus.setFixedSize(50,50)
zminus = QtGui.QPushButton('Z-')
zminus.setFixedSize(50,50)
self.grid = QtGui.QGridLayout()
maxcol = 14
self.grid.addWidget(setHomeBtn,0,0,1,3)
self.grid.addWidget(goHomeBtn,1,0,1,3)
self.grid.addWidget(yplus, 2, maxcol-5, 2, 2)
self.grid.addWidget(xminus, 3, maxcol-7, 2, 2)
self.grid.addWidget(xplus, 3, maxcol-3, 2, 2)
self.grid.addWidget(yminus, 5, maxcol-5, 2, 2)
self.grid.addWidget(zplus, 2, maxcol-1, 2, 2)
self.grid.addWidget(zminus, 5, maxcol-1, 2, 2)
self.grid.addWidget(QtGui.QLabel('Step size'),0,maxcol-3, 1, 2)
self.grid.addWidget(stepw,0,maxcol-1, 1, 2)
self.setLayout(self.grid)
def setHomePosition(self):
self.serial_send_signal.emit('?')
return True
def goHomePosition(self):
return True
```
#### File: iixi/Jixi/fileTab.py
```python
from PyQt4 import QtGui, QtCore
from Jixi.jFileThread import jFileThread
import os
import time
import Jixi.jStatus
import shlex, subprocess
class fileTab(QtGui.QWidget):
serial_send_signal = QtCore.pyqtSignal(str)
def __init__(self):
super(fileTab, self).__init__()
btn = QtGui.QPushButton('Select File')
btn.setStatusTip('Select g-code file')
btn.clicked.connect(self.showDialog)
self.sendBtn = QtGui.QPushButton('Send')
self.sendBtn.setStatusTip('Send file to X-Carve')
self.sendBtn.setEnabled(False)
self.sendBtn.clicked.connect(self.sendFile)
self.pauseBtn = QtGui.QPushButton('Pause')
self.pauseBtn.setStatusTip('Pause carving')
self.pauseBtn.setEnabled(False)
self.pauseBtn.clicked.connect(self.pauseFile)
self.cancelBtn = QtGui.QPushButton('Cancel')
self.cancelBtn.setStatusTip('Cancel carving')
self.cancelBtn.setEnabled(False)
self.cancelBtn.clicked.connect(self.cancelFile)
self.visualizeBtn = QtGui.QPushButton('Visualize')
self.visualizeBtn.setStatusTip('Visualize file')
self.visualizeBtn.setEnabled(False)
self.visualizeBtn.clicked.connect(self.visualizeFile)
self.fnamew = QtGui.QLabel()
self.fstatw = QtGui.QLabel()
self.durationw = QtGui.QLabel()
self.estimationw = QtGui.QLabel()
self.grid = QtGui.QGridLayout()
self.grid.addWidget(QtGui.QLabel('Filename:'),0,0)
self.grid.addWidget(btn,0,4)
self.grid.addWidget(self.fnamew,0,1,1,2)
self.grid.addWidget(self.fstatw,1,0,1,3)
self.grid.addWidget(self.visualizeBtn,1,4)
self.grid.addWidget(self.sendBtn,2,0)
self.grid.addWidget(self.pauseBtn,2,1)
self.grid.addWidget(self.cancelBtn,2,2)
self.grid.addWidget(QtGui.QLabel('Duration'),3,0)
self.grid.addWidget(self.durationw,3,1)
self.grid.addWidget(QtGui.QLabel('Remaining'),4,0)
self.grid.addWidget(self.estimationw,4,1)
self.setLayout(self.grid)
self.filethread = jFileThread()
self.filethread.serial_send_signal.connect(self.serial_send_signal)
def showDialog(self):
fname = QtGui.QFileDialog.getOpenFileName(self, 'Select file')
if (fname):
self.showFileDetails(fname)
self.sendBtn.setEnabled(True)
self.pauseBtn.setEnabled(True)
self.cancelBtn.setEnabled(True)
self.visualizeBtn.setEnabled(True)
def sendFile(self):
Jixi.jStatus.msg('sending')
self.filethread.setFilename(self.fnamew.text())
self.filethread.open()
self.filethread.setStatus('send')
self.filethread.start()
def pauseFile(self):
Jixi.jStatus.msg('pausing')
self.filethread.setStatus('pause')
def cancelFile(self):
Jixi.jStatus.msg('cancelling')
self.filethread.cancel()
def visualizeFile(self):
Jixi.jStatus.msg('visualizing')
config = QtCore.QCoreApplication.instance().config
program = config.get('Visualizer','program')
args = shlex.split(program + ' ' + str(self.fnamew.text()))
subprocess.Popen(args)
def showFileDetails(self, fname):
self.fnamew.setText(fname)
fs = os.stat(fname)
txt = 'Size: '
txt += '<b>' + str(fs.st_size/1024) + ' Kb</b>'
txt += ' Date: '
txt += '<b>' + time.ctime(fs.st_mtime) + '</b>'
self.fstatw.setText(txt)
```
#### File: iixi/Jixi/jComPort.py
```python
from PyQt4 import QtCore
from PyQt4.QtCore import SIGNAL
import serial
import Jixi.jStatus
class jComPort(QtCore.QObject):
connected = QtCore.pyqtSignal(bool)
def __init__(self):
self.serial = serial.Serial()
#serial.Serial.__init__(self)
QtCore.QObject.__init__(self)
def __del__(self):
if self.isOpen(): self.serial.close()
def open(self):
self.serial.port = 'jport'
try:
#super(jComPort,self).open()
self.serial.open()
except serial.SerialException as e:
Jixi.jStatus.error(e.message)
except ValueError as e:
Jixi.jStatus.error(e.message)
# send signal up to dialog to show status
self.connected.emit(self.isOpen())
def read(self):
if (not self.isOpen()): self.open()
c = ''
try:
c = self.serial.read()
except serial.SerialException as e:
Jixi.jStatus.error(e.message)
except ValueError as e:
Jixi.jStatus.error(e.message)
return c
def write(self,txt):
if (not self.isOpen()): self.open()
#super(jComPort,self).write(txt)
self.serial.write(txt)
def isOpen(self):
return self.serial.isOpen()
```
#### File: iixi/Jixi/jFieldset.py
```python
from PyQt4 import QtGui, QtCore
class jFieldset(QtGui.QFrame):
def __init__(self, title=''):
super(jFieldset,self).__init__()
self.setFrameShadow(QtGui.QFrame.Sunken)
self.setFrameShape(QtGui.QFrame.Box)
self.setMidLineWidth(3)
vbox = QtGui.QVBoxLayout()
if (title):
font = QtGui.QFont("Helvetica [Cronyx]", 8)
label = QtGui.QLabel(title)
label.setFont(font)
#label.setStyleSheet("QLabel { background-color : red; color : blue; }")
vbox.addWidget(label, alignment = QtCore.Qt.AlignTop)
self.grid = QtGui.QGridLayout()
vbox.addLayout(self.grid)
self.setLayout(vbox)
```
|
{
"source": "jenniferliddle/jsquared",
"score": 3
}
|
#### File: jenniferliddle/jsquared/j2monitor.py
```python
import smtplib
import sys
import re
import subprocess
import urllib
import urllib2
''' Send email to <EMAIL> with new IP address'''
def sendMail(newip):
s = smtplib.SMTP('localhost')
msg = "Subject: New IP address for jtwo\nIP address for jtwo is: "+newip
s.sendmail("<EMAIL>",["<EMAIL>"],msg)
''' Update single DNS record '''
def updateDNS(values):
url = 'https://dnsapi.mythic-beasts.com/'
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
''' Update DNS records for jtwo.org and hydranet.co.uk
using the Mythic Beasts API
'''
def updateAllDNS(newip, password):
values = {'domain' : 'jtwo.org', 'password' : password, 'command' : 'REPLACE @ 86400 A '+newip }
updateDNS(values)
values = {'domain' : 'jtwo.org', 'password' : password, 'command' : 'REPLACE www 86400 A '+newip }
updateDNS(values)
values = {'domain' : 'hydranet.co.uk', 'password' : password, 'command' : 'REPLACE @ 86400 A '+newip }
updateDNS(values)
values = {'domain' : 'hydranet.co.uk', 'password' : password, 'command' : 'REPLACE www 86400 A '+newip }
updateDNS(values)
values = {'domain' : 'hydranet.co.uk', 'password' : password, 'command' : 'REPLACE dev 86400 A '+newip }
updateDNS(values)
'''
Parse syslog to extract the IP address of jtwo.org, and see if it has changed
'''
password = sys.argv[1]
verbose = (len(sys.argv) > 2 and sys.argv[2] == '-v')
txt = subprocess.check_output(["tail","-800","/var/log/syslog"])
m=re.search('\n.*root@jtwo.*\n(.*)\n',txt)
if (m and len(m.groups()) > 0):
x = m.group(0)
if verbose: print "Found : \n", x
m=re.search("client_address=(\d+\.\d+\.\d+\.\d+)",x)
if (m and len(m.groups()) > 0):
newip = m.group(1)
if verbose: print "Found newip:", newip
oldip = subprocess.check_output(["cat","/etc/jtwo_ip"])
if (oldip != newip):
sendMail(newip)
updateAllDNS(newip,password)
f = open("/etc/jtwo_ip","w")
f.write(newip)
f.close()
```
#### File: jenniferliddle/jsquared/timelapse.py
```python
import datetime
from time import sleep
from astral.sun import sun
from astral import LocationInfo
import picamera
import sys
import getopt
destDir = '.'
nightMode = False
def displayHelp():
print('timelapse.py: take photographs used for timelapse photography')
print('Usage: timelapse.py [options]')
print('Where [options] are one or more of:')
print(' -h --help Display this help message and exit.')
print(' -d --destDir Directory to store pictures.')
print(' Default is the current directory.')
print(' -n --night Include pictures taken at night.')
print(' Default is not to take pictures')
print(' between the hours of dusk to dawn')
print('')
try:
opts, args = getopt.getopt(sys.argv[1:],"hd:n",["destdir=","night"])
except getopt.GetoptError:
displayHelp()
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '-?', '--help'):
displayHelp()
sys.exit(0)
if opt in ('-d', '--destDir'):
destDir = arg
if opt in ('-n', '--night'):
nightMode = True
now = datetime.datetime.now()
filename = destDir + '/' + now.strftime("%Y%m%d_%H%M%S") + '.jpg'
# Find dusk and dawn
city = LocationInfo("London", "England", "Europe/London", 51.5, -0.116)
s = sun(city.observer, date=datetime.date.today())
dusk = s['dusk']
dawn = s['dawn']
now = datetime.datetime.now(dawn.tzinfo)
# is it dark?
dark = (now < dawn) or (now > dusk)
if not dark or nightMode:
c = picamera.PiCamera()
c.annotate_background = picamera.Color('black')
c.annotate_text = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
sleep(5)
c.capture(filename)
c.close()
```
|
{
"source": "jenniferlim07/dynamic-programming",
"score": 4
}
|
#### File: dynamic-programming/lib/max_subarray.py
```python
def max_sub_array(nums):
""" Returns the max subarray of the given list of numbers.
Returns 0 if nums is None or an empty list.
Time Complexity: O(n)
Space Complexity: O(n)
"""
if not nums:
return 0
max_so_far = nums[0]
current_max = nums[0]
for i in range(1, len(nums)):
current_max = max(nums[i], current_max + nums[i])
max_so_far = max(max_so_far, current_max)
print(max_so_far)
return max_so_far
```
|
{
"source": "JenniferMah/dx-automator",
"score": 2
}
|
#### File: dx-automator/examples/closed_items.py
```python
from functools import lru_cache
from common.admins import ADMINS
from common.issue import get_date_time, get_issues, Issue
from common.repos import ALL_REPOS
class ClosedItemsCollector:
def __init__(self):
self.community_issues = 0
self.community_prs = 0
self.admin_issues = 0
self.admin_prs = 0
def run(self, start_date: str, end_date: str) -> None:
for org in ALL_REPOS:
for repo in ALL_REPOS[org]:
self.process_repo(org, repo, start_date, end_date)
print(f'Closed community issues: {self.community_issues}')
print(f'Merged community PRs: {self.community_prs}')
print(f'Closed admin issues: {self.admin_issues}')
print(f'Merged admin PRs: {self.admin_prs}')
def process_repo(self, org: str, repo: str,
start_date: str, end_date: str) -> None:
start_date = get_date_time(start_date)
end_date = get_date_time(end_date)
issues = get_closed_items(org, repo)
for issue_json in issues:
issue = Issue(issue_json, end_date=end_date)
issue.process_events()
close_event = issue.merged if issue.is_pr else issue.closed
if close_event and start_date <= close_event['createdAt'] < end_date:
if issue.author in ADMINS:
if issue.is_pr:
self.admin_prs = self.admin_prs + 1
else:
self.admin_issues = self.admin_issues + 1
else:
if issue.is_pr:
self.community_prs = self.community_prs + 1
else:
self.community_issues = self.community_issues + 1
@lru_cache(maxsize=None)
def get_closed_items(org: str, repo: str):
fragments = """
... on Issue {
author {
login
}
createdAt
url
timelineItems(first: 100, itemTypes: [CLOSED_EVENT]) {
nodes {
__typename
... on ClosedEvent {
createdAt
}
}
}
}
... on PullRequest {
author {
login
}
createdAt
url
timelineItems(first: 100, itemTypes: [MERGED_EVENT]) {
nodes {
__typename
... on MergedEvent {
createdAt
}
}
}
}"""
return list(get_issues(org, repo, fragments, issue_state='closed'))
if __name__ == '__main__':
ClosedItemsCollector().run(start_date='2019-10-01',
end_date='2020-01-01')
ClosedItemsCollector().run(start_date='2020-01-01',
end_date='2020-04-01')
```
#### File: examples/common/git_hub_api.py
```python
import json
import os
import re
from typing import Iterator, Dict
import backoff
import github
import requests
from requests import Response
GRAPH_QL_URL = 'https://api.github.com/graphql'
def get_client() -> github.Github:
github_token = os.environ['GITHUB_TOKEN']
return github.Github(github_token)
def submit_graphql_query(query: str) -> Dict:
github_token = os.environ['GITHUB_TOKEN']
headers = {'Authorization': f'token {github_token}'}
response = post(GRAPH_QL_URL, json={'query': query}, headers=headers)
response = response.json()
if 'data' not in response:
print(json.dumps(response, indent=2))
return response['data']
def submit_graphql_search_query(query: str) -> Iterator[Dict]:
cursor = None
while True:
paged_query = substitute(query, {'cursor': f'after: "{cursor}"' if cursor else ''})
data = submit_graphql_query(paged_query)
search = data['search']
for node in search['nodes']:
yield node
page_info = search['pageInfo']
cursor = page_info['endCursor']
if not page_info['hasNextPage']:
break
@backoff.on_exception(backoff.expo,
requests.exceptions.RequestException,
# Keep trying on 403s which indicate rate-limiting.
giveup=lambda e: e.response.status_code != 403)
def post(url, **kwargs) -> Response:
response = requests.post(url, **kwargs)
response.raise_for_status()
return response
def substitute(target: str, values: Dict) -> str:
for name, value in values.items():
value = ' '.join(value) if isinstance(value, list) else value
target = target.replace(f'%{name}%', value)
return re.sub('%[^%]*%', '', target)
```
#### File: dx-automator/examples/sonar_cloud_metrics.py
```python
import argparse
from datetime import datetime
from typing import Any, Dict, Iterator, List
from datadog_api_client.v1.model.point import Point
from datadog_api_client.v1.model.series import Series
from common.datadog_api import DatadogApi
from common.repos import ALL_REPOS, Repo, get_repos
from common.sonar_cloud_api import Metrics, SonarCloudApi
from metrics import DatadogSeriesType
METRICS = [Metrics.LINES_TO_COVER, Metrics.UNCOVERED_LINES, Metrics.BRANCH_COVERAGE]
class SonarCloudMetricCollector:
def __init__(self, sonar_cloud_api: SonarCloudApi, datadog_api: DatadogApi):
self.sonar_cloud_api = sonar_cloud_api
self.datadog_api = datadog_api
def run(self, repos: List[Repo]) -> None:
series = []
for repo in repos:
measures = self.sonar_cloud_api.get_component_measures(repo.org, repo.name, METRICS)
if measures:
series += self.get_series(repo, measures)
print("Series data:", series)
self.datadog_api.submit_metrics(series)
def get_series(self, repo: Repo, measures: List[Dict[str, Any]]) -> Iterator[Series]:
for metric in METRICS:
measure = next((measure for measure in measures if measure['metric'] == metric), None)
if not measure:
print(f'Failed to find metric "{metric}" in measures for {repo}')
continue
yield Series(
metric=f'sonar_cloud.measures.{metric}',
type=f'{DatadogSeriesType.GAUGE}',
points=[Point([datetime.now().timestamp(), float(measure['value'])])],
tags=[f'org:{repo.org}', f'repo:{repo.org}/{repo.name}'],
)
def parse_args(command_args: List[str] = None) -> Dict[str, Any]:
parser = argparse.ArgumentParser(description='sonar-cloud-metrics')
parser.add_argument('--org', '-o', nargs='*',
help='if none specified, runs on all orgs',
default=[],
choices=ALL_REPOS.keys())
parser.add_argument('--include', '-i', nargs='*',
help='repos to include',
default=[],
choices=[repo.name for repo in get_repos()])
parser.add_argument('--exclude', '-e', nargs='*',
help='repos to exclude',
default=[],
choices=[repo.name for repo in get_repos()])
return vars(parser.parse_args(command_args))
if __name__ == '__main__':
parsed_args = parse_args()
collector = SonarCloudMetricCollector(SonarCloudApi(), DatadogApi())
collector.run(get_repos(parsed_args['org'], parsed_args['include'], parsed_args['exclude']))
```
#### File: dx-automator/examples/update_git_hub_labels.py
```python
from typing import List, Dict
from github import Github, Label as GitHubLabel
from github.Repository import Repository as GitHubRepository
from common.git_hub_api import get_client, submit_graphql_query
from common.labels import Label, get_labels
from common.repos import Repo, get_repos
class GitHubLabelManager:
def __init__(self, git_hub_client: Github):
self.client = git_hub_client
def update_github_labels(self, repos: List[Repo], labels: Dict[str, Label]) -> None:
error = None
for repo in repos:
full_repo_name = f'{repo.org}/{repo.name}'
print('--------------------------------------------------')
print('Checking labels: ' + full_repo_name)
github_repo = self.client.get_repo(full_repo_name, lazy=True)
try:
self.update_repo_labels(github_repo, labels)
except RuntimeError as err:
error = err
if error:
raise error
def update_repo_labels(self, github_repo: GitHubRepository, labels: Dict[str, Label]) -> None:
expected_labels = labels.copy()
labels_with_issues = []
# Wrap the labels in a list so all pages get pulled. Prevents pagination
# issues if we delete labels mid-iteration.
actual_labels = list(github_repo.get_labels())
for actual_label in actual_labels:
if actual_label.name in expected_labels:
self.update_label(actual_label, expected_labels)
else:
if not self.delete_label(actual_label, github_repo):
labels_with_issues.append(actual_label)
if labels_with_issues:
# Don't attempt to create new labels if we have existing labels that we
# can't delete; they may just need to be renamed.
raise RuntimeError('found unexpected labels with issues')
for label in expected_labels.values():
print('Creating label: ' + label.name)
github_repo.create_label(label.name, label.color, label.description)
def update_label(self, actual_label: GitHubLabel, expected_labels: Dict[str, Label]) -> None:
# Remove the found label from the expected list, so we don't try to
# create it later.
expected_label = expected_labels.pop(actual_label.name)
if not self.are_labels_equal(actual_label, expected_label):
print('Updating label: ' + actual_label.name)
actual_label.edit(expected_label.name, expected_label.color, expected_label.description)
def delete_label(self, actual_label: GitHubLabel, git_hub_repo: GitHubRepository) -> bool:
# If the label is attached to any issues (or PRs), we don't want to
# delete it.
if self.get_label_issue_count(git_hub_repo, actual_label) > 0:
print('Cannot delete label with issues: ' + actual_label.name)
return False
print('Deleting unused label: ' + actual_label.name)
actual_label.delete()
return True
def get_label_issue_count(self, git_hub_repo: GitHubRepository, label: GitHubLabel) -> int:
response = submit_graphql_query(f"""
query{{
repository(owner: "{git_hub_repo.organization.login}",
name: "{git_hub_repo.name}") {{
label(name: "{label.name}") {{
issues {{
totalCount
}}
pullRequests {{
totalCount
}}
}}
}}
}}""")
response_label = response['repository']['label']
return (response_label['issues']['totalCount'] +
response_label['pullRequests']['totalCount'])
def are_labels_equal(self, actual_label: GitHubLabel, expected_label: Label) -> bool:
return (actual_label.name == expected_label.name and
actual_label.color == expected_label.color and
actual_label.description == expected_label.description)
if __name__ == '__main__':
manager = GitHubLabelManager(get_client())
manager.update_github_labels(get_repos(), get_labels())
```
#### File: dx-automator/tests/test_repos.py
```python
import unittest
from examples.common.repos import ALL_REPOS, get_repos, is_repo_included
class TestRepos(unittest.TestCase):
def test_is_repo_included(self):
self.assertTrue(is_repo_included('twilio', 'twilio-cli', [], [], []))
self.assertTrue(is_repo_included('twilio', 'twilio-cli', ['twilio'], [], []))
self.assertTrue(is_repo_included('twilio', 'twilio-cli', [], ['twilio-cli'], []))
self.assertTrue(is_repo_included('twilio', 'twilio-cli', [], [], ['twilio-csharp']))
self.assertTrue(is_repo_included('twilio', 'twilio-cli', ['sendgrid'], ['twilio-cli'], []))
self.assertFalse(is_repo_included('twilio', 'twilio-cli', ['sendgrid'], [], []))
self.assertFalse(is_repo_included('twilio', 'twilio-cli', [], ['twilio-csharp'], []))
self.assertFalse(is_repo_included('twilio', 'twilio-cli', [], [], ['twilio-cli']))
def test_get_repos(self):
total_repos_length = len([repo for org in ALL_REPOS for repo in ALL_REPOS[org]])
self.assertEqual(total_repos_length, len(get_repos()))
self.assertEqual(1, len(get_repos(include_repos=['twilio-node'])))
self.assertTrue(all(repo.org == 'sendgrid' for repo in get_repos(include_orgs=['sendgrid'])))
```
|
{
"source": "jennifermarsman/AIforEarth-API-Development",
"score": 2
}
|
#### File: ai4e_api_tools/task_management/api_task.py
```python
import random, datetime
print("Creating task manager.")
class TaskManager:
def __init__(self):
self.status_dict = {}
def GetTaskId(self):
id = str(random.randint(1, 10000))
while id in self.status_dict:
id = str(random.randint(1, 10000))
return id
def AddTask(self, request):
id = self.GetTaskId()
self.status_dict[id] = ('created', datetime.datetime.strftime(datetime.datetime.now(), "%Y-%m-%d %H:%M:%S"), 'task')
ret = {}
ret['TaskId'] = id
ret['Status'] = self.status_dict[id][0]
ret['Timestamp'] = self.status_dict[id][1]
ret['Endpoint'] = self.status_dict[id][2]
return(ret)
def UpdateTaskStatus(self, taskId, status):
if (taskId in self.status_dict):
stat = self.status_dict[taskId]
self.status_dict[taskId] = (status, stat[1], stat[2])
else:
self.status_dict[taskId] = (status, stat[1], stat[2])
def AddPipelineTask(self, taskId, organization_moniker, version, api_name, body):
next_url = organization_moniker + '/' + version + '/' + api_name
UpdateTaskStatus(taskId, "Pipelining is not supported in a single node deployment, but the next service is: " + next_url)
return "Pipelining is not supported in a single node deployment, but the next service is: " + next_url
def CompleteTask(self, taskId, status):
self.UpdateTaskStatus(taskId, status)
def FailTask(self, taskId, status):
self.UpdateTaskStatus(taskId, status)
def GetTaskStatus(self, taskId):
try:
if taskId in self.status_dict:
return self.status_dict[taskId]
else:
return "not found"
except:
print(sys.exc_info()[0])
```
|
{
"source": "jennifermf/towing-capacity",
"score": 4
}
|
#### File: jennifermf/towing-capacity/towcalculator.py
```python
def payloadcheck(availablepayload):
if availablepayload >= 0:
print("Available payload: {}".format(availablepayload))
return True
else:
print("Exceeded Payload! Over payload capacity by {}".format(availablepayload))
return False
# check if over/under GCVWR
def gcvwcheck(towcapacity, newgcvw):
if newgcvw <= towcapacity:
print("Acceptable! With combined weight of {}".format(newgcvw))
print("You are under your GCVWR by {}".format(towcapacity - newgcvw))
return True
else:
print("Exceeded GCVWR! Over capacity by {}".format(newgcvw - towcapacity))
return False
def getdata():
vehicle = input("What vehicle will be doing the towing? ")
# dry weight, or curb weight = unloaded vehicle weight, WITHOUT driver, passengers, or cargo
truckdrywt = int(input("Curb weight: "))
# gvwr = maximum allowed weight of a fully loaded vehicle (dry wt + driver + passengers + cargo)
gvwr = int(input("Gross Vehicle Weight Rating (GVWR): "))
# gcvw = curb weight + allowable payload + passenger weight + trailer weight
gcvwr = int(input("Gross Combined Vehicle Weight Rating (GCVWR): "))
# payload = maximum allowed weight of passengers + cargo + hitch
payload = int(input("Payload: "))
# tow capacity = defined by manufacturer, listed in owners manual or a manufacturer's tow guide
towcapacity = int(input("Tow capacity: "))
# passenger weight = driver + all human and non-human riders
passengerwt = int(input("Combined driver and passenger weight: "))
# cargo = everything in the vehicle that isn't sentient
cargo = int(input("Cargo weight inside the vehicle: "))
trailer = input("What trailer are you towing? ")
# trailer dry weight, or curb weight = unloaded weight, can be determined by a sticker on the trailer or by taking an unloaded trailer to a truck scale. Published unloaded weights may or may not include water/waste/propane
trailerdrywt = int(input("Trailer curb weight: "))
# trailer gcvw = dry weight + everything inside it
trailergvwr = int(input("Trailer Gross Vehicle Weight Rating (GVWR): "))
# hitch weight = defined by manufacturer, listed in manual or sticker on the trailer
hitchwt = int(input("Trailer hitch weight: "))
actualpayload = cargo + passengerwt + hitchwt
availablepayload = payload - actualpayload
newgvw = truckdrywt + actualpayload
newgcvw = trailergvwr + truckdrywt + actualpayload
print("\nWith the {} towing the {}: ".format(vehicle, trailer))
payloadcheck(availablepayload)
gcvwcheck(towcapacity, newgcvw)
def main():
print("==================================================")
print("Towing calculator.")
print("It does not matter if you use pounds or kilograms, just be consistent. \n")
getdata()
print("\nThis does not account for tires or gross axle weight ratings (GAWR).")
print("==================================================")
main()
''' inspiration for this came from Marc Leach's Excel worksheet here: http://www.keepyourdaydream.com/payload/ '''
```
|
{
"source": "jennifernazario/Mock_ATM_Machine",
"score": 4
}
|
#### File: jennifernazario/Mock_ATM_Machine/database.py
```python
import os
import validation
user_db_path = "data/user_record/"
def create(user_account_number, first_name, last_name, email_address, password):
# create a file
# name of the file will be account_number.txt
# add the user details to the file
# return true
# if saving to file fails, then delete created file
user_data = first_name + "," + last_name + "," + email_address + "," + password + "," + str(0)
if account_number_exists(user_account_number):
return False
if email_exists(email_address):
print("user already exists")
return False
completion_state = False
try:
# if file is created,
f = open(user_db_path + str(user_account_number) + ".txt", "x")
except FileExistsError:
file_contains_data = read(user_db_path + str(user_account_number) + ".txt")
if not file_contains_data:
delete(user_account_number)
delete(user_account_number)
else:
# if the file is created and there are no errors, add the user details to the file
f.write(str(user_data));
completion_state = True
# always runs
finally:
f.close();
return completion_state
def read(user_account_number):
# find user with account number
# fetch content of the file
valid_account_number = validation.account_number_validation(user_account_number)
try:
# if it is a string
if valid_account_number:
f = open(user_db_path + str(user_account_number) + ".txt", "r")
# if it is an integer
else:
f = open(user_db_path + user_account_number, "r")
except FileNotFoundError:
# help
print("user not found")
except FileExistsError:
print("user does not exist")
except TypeError:
print("Invalid account number format")
else:
# if there are no errors, read the file and send back to the user
return f.readline()
return False
def update(user_account_number):
print("update user record")
# find user with account number
# fetch the content of the file
# update the content of the file
# save the file
# return true
def delete(user_account_number):
# if the user exists, delete user account from the database
# find user with account number
# delete the user record (file)
# return true
successfully_deleted = False
if os.path.exists(user_db_path + str(user_account_number) + ".txt"):
try:
os.remove(user_db_path + str(user_account_number) + ".txt")
successfully_deleted = True
except FileNotFoundError:
print("User not found.")
finally:
return successfully_deleted
# finds user based on account number
def email_exists(email):
# lists every user in the database
all_users = os.listdir(user_db_path)
for user in all_users:
user_list = str.split(read(user), ",")
if email in user_list:
return True
return False
def account_number_exists(account_number):
# lists every user in the database
all_users = os.listdir(user_db_path)
for user in all_users:
if user == str(account_number) + ".txt":
return True
return False
def authenticated_user(account_number, password):
if account_number_exists(account_number):
user = str.split(read(account_number), ",")
if password == user[3]:
return user
return False
print(account_number_exists(9054003195))
# print(email_exists("<EMAIL>"))
# print(read({'one': 'two'}))
```
#### File: jennifernazario/Mock_ATM_Machine/validation.py
```python
def account_number_validation(account_number):
# Check to see if the account number is not empty.
# Check to see if the account number is 10 digits.
# Check if the account number is an integer.
# check if account number exists
if account_number:
# casting: converts account number into a string because we cannot check the length of an integer
# if account number exists, checks data type, and then check if account number is 10 strings long
try:
# checks if datatype is accurate
# casting: tries to convert account number string to integer
int(account_number)
if len(str(account_number)) == 10:
# if we successfully convert the string to an integer, return true
return True
# if we are unable to convert into an integer, we catch the error
except ValueError:
return False
except TypeError:
return False
return False
# def registration_input_validation():
# check if there is a list of inputs
# check each item in the list and make sure they are the correct data types
```
|
{
"source": "jennifernolan/FYP-Development-Navigation-Assistant",
"score": 3
}
|
#### File: Final_Dissertation_Code/Unit_Tests/TestObjectScores.py
```python
import unittest
import numpy as np
import ObjectDetection as ObjectClass
class TestObjectScores(unittest.TestCase):
obj = ObjectClass.ObjectDetection()
def setUp(self):
pass
def tearDown(self):
pass
def test_counter_zero(self):
classes = []
num = np.array([0.])
scores = [[0.56, 0 ]]
boxes = []
self.obj.instructions(classes, num, 0, scores, boxes)
def test_counter_one(self):
classes = []
num = np.array([0.])
scores = [[0.56, 0]]
boxes = []
self.obj.instructions(classes, num, 1, scores, boxes)
def test_score_less_than_70(self):
classes = []
num = np.array([1.])
scores = [[0.56, 0]]
boxes = []
self.obj.instructions(classes, num, 1, scores, boxes)
def test_score_greater_than_70(self):
classes = [[1]]
num = np.array([1.])
scores = [[0.86, 0]]
boxes = [[[0., 0., 0., 0.]]]
self.obj.instructions(classes, num, 1, scores, boxes)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jennifernolan/Software-for-the-Global-Market",
"score": 3
}
|
#### File: lab_3/boards/tests.py
```python
from django.core.urlresolvers import reverse
from django.urls import resolve
from django.test import TestCase
from .views import home
class HomeTests(TestCase):
def test_home_view_status_code(self):
url = reverse('home')
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
#we are making use of the resolve function.
#Django uses it to match a requested URL with a list of URLs listed in the urls.py module.
#This test will make sure the URL /, which is the root URL, is returning the home view.
def test_home_url_resolves_home_view(self):
view = resolve('/')
self.assertEquals(view.func, home)
```
#### File: assignmentproj/templatetags/form_tags.py
```python
from django import template
register = template.Library()
@register.filter
def field_type(bound_field):
return bound_field.field.widget.__class__.__name__
@register.filterdef
def input_class(bound_field):
css_class = ''
if bound_field.form.is_bound:
if bound_filed.errors:
css_class = 'is-invalid'
elif field_type(bound_field) != 'PasswordImput':
css_class = 'is_valid'
return 'form-control {}'.format(css_class)
```
|
{
"source": "jenniferplusplus/pynamo-factories",
"score": 3
}
|
#### File: pynamo-factories/pynamodb_factories/fields.py
```python
class Use:
def __init__(self, call, *args, **kwargs):
self.call = call
self.args = args
self.kwargs = kwargs
def to_value(self):
return self.call(*self.args, **self.kwargs)
pass
class Required:
pass
class Ignored:
pass
```
#### File: pynamo-factories/tests/test_factory.py
```python
from datetime import datetime
from pytest import mark
from pynamodb.models import Model
from pynamodb_factories.factory import PynamoModelFactory
from tests.test_models.models import EmptyModel, NumberModel, BinaryModel, BooleanModel, UnicodeModel, JsonModel, \
VersionModel, TtlModel, DateModel, NullModel, MapModel, ListModel, ComplexMap, MapListMapModel
t = int(datetime.utcnow().timestamp())
@mark.parametrize('seed', range(t, t + 20))
# @mark.parametrize('seed', [1647351215])
class TestFactories:
def test_model(self, seed):
class EmptyFactory(PynamoModelFactory):
__model__ = EmptyModel
pass
EmptyFactory.set_random_seed(seed)
actual = EmptyFactory.build()
assert actual
assert actual.serialize() is not None
assert isinstance(actual, Model)
assert isinstance(actual, EmptyModel)
pass
def test_number_attributes(self, seed):
class NumberFactory(PynamoModelFactory):
__model__ = NumberModel
pass
NumberFactory.set_random_seed(seed)
actual = NumberFactory.build()
assert actual
assert actual.serialize() is not None
assert hasattr(actual, "num")
assert isinstance(actual.num, int)
assert hasattr(actual, "nums")
for n in actual.nums if actual.nums else range(0):
assert isinstance(n, int)
pass
def test_binary_attributes(self, seed):
class BinaryFactory(PynamoModelFactory):
__model__ = BinaryModel
pass
BinaryFactory.set_random_seed(seed)
actual = BinaryFactory.build()
assert actual
assert actual.serialize() is not None
assert hasattr(actual, "bin")
assert isinstance(actual.bin, bytes)
assert hasattr(actual, 'bins')
for b in actual.bins if actual.bins else range(0):
assert isinstance(b, bytes)
pass
def test_boolean_attributes(self, seed):
class BoolFactory(PynamoModelFactory):
__model__ = BooleanModel
pass
BoolFactory.set_random_seed(seed)
actual = BoolFactory.build()
assert actual
assert actual.serialize() is not None
assert hasattr(actual, "boolean")
assert isinstance(actual.boolean, bool)
pass
def test_unicode_attributes(self, seed):
class UnicodeFactory(PynamoModelFactory):
__model__ = UnicodeModel
pass
UnicodeFactory.set_random_seed(seed)
actual = UnicodeFactory.build()
assert actual
assert actual.serialize() is not None
assert hasattr(actual, "line")
assert isinstance(actual.line, str)
assert hasattr(actual, "lines")
for line in actual.lines if actual.lines else range(0):
assert isinstance(line, str)
pass
def test_json_attributes(self, seed):
class JsonFactory(PynamoModelFactory):
__model__ = JsonModel
pass
JsonFactory.set_random_seed(seed)
actual = JsonFactory.build()
assert actual
assert actual.serialize() is not None
assert hasattr(actual, "json")
assert isinstance(actual.json, dict)
pass
def test_version_attribute(self, seed):
class VersionFactory(PynamoModelFactory):
__model__ = VersionModel
pass
VersionFactory.set_random_seed(seed)
actual = VersionFactory.build()
assert actual
assert actual.serialize()
assert hasattr(actual, 'ver')
assert isinstance(actual.ver, int)
pass
def test_ttl_attributes(self, seed):
class TtlFactory(PynamoModelFactory):
__model__ = TtlModel
pass
TtlFactory.set_random_seed(seed)
actual = TtlFactory.build()
assert actual
assert actual.serialize()
assert hasattr(actual, 'ttl')
assert isinstance(actual.ttl, datetime)
pass
def test_date_attributes(self, seed):
class DateFactory(PynamoModelFactory):
__model__ = DateModel
pass
DateFactory.set_random_seed(seed)
actual = DateFactory.build()
assert actual
assert actual.serialize()
assert hasattr(actual, 'date')
assert isinstance(actual.date, datetime)
pass
def test_null_attributes(self, seed):
class NullFactory(PynamoModelFactory):
__model__ = NullModel
pass
NullFactory.set_random_seed(seed)
actual = NullFactory.build()
assert actual
assert actual.serialize() is not None
assert hasattr(actual, 'null')
assert actual.null is None
pass
def test_map_attributes(self, seed):
class MapFactory(PynamoModelFactory):
__model__ = MapModel
pass
MapFactory.set_random_seed(seed)
actual = MapFactory.build()
assert actual
assert actual.serialize() is not None
assert hasattr(actual, 'map')
assert actual.map is not None
assert hasattr(actual, 'dyn_map')
assert actual.dyn_map is not None
assert hasattr(actual, 'map_of')
assert isinstance(actual.map_of, ComplexMap)
pass
def test_list_attributes(self, seed):
class ListFactory(PynamoModelFactory):
__model__ = ListModel
pass
ListFactory.set_random_seed(seed)
actual = ListFactory.build()
assert actual
assert actual.serialize() is not None
assert hasattr(actual, 'list')
assert actual.list is not None
assert hasattr(actual, 'list_of')
for each in actual.list_of if actual.list_of else range(0):
assert isinstance(each, ComplexMap)
pass
def test_build_args(self, seed):
class MapFactory(PynamoModelFactory):
__model__ = MapModel
pass
build_args = {
'map_of': {
'name': '<NAME>',
'email': '<EMAIL>',
'birthday': datetime(1990, 1, 1, 12, 0, 0)
}
}
MapFactory.set_random_seed(seed)
actual = MapFactory.build(**build_args)
assert actual.serialize() is not None
assert actual.map_of.name == '<NAME>'
assert actual.map_of.email == '<EMAIL>'
assert actual.map_of.birthday == datetime(1990, 1, 1, 12, 0, 0)
def test_nested_build_args(self, seed):
class MapListMapFactory(PynamoModelFactory):
__model__ = MapListMapModel
pass
build_args = {
'map': {'arr': [
{'name': 'one', 'birthday': datetime(1990, 1, 1, 12, 0, 0)},
{'name': 'two', 'email': '<EMAIL>'},
]}
}
MapListMapFactory.set_random_seed(seed)
actual = MapListMapFactory.build(**build_args)
assert actual.serialize() is not None
def test_extra_build_args(self, seed):
class MapFactory(PynamoModelFactory):
__model__ = UnicodeModel
pass
build_args = {
'dne': 'some value'
}
MapFactory.set_random_seed(seed)
actual = MapFactory.build(**build_args)
assert actual.serialize() is not None
```
|
{
"source": "jennifer-richberg/jennifer-richberg.githhub.io",
"score": 4
}
|
#### File: jennifer-richberg/jennifer-richberg.githhub.io/Text-Base Adventure Game.py
```python
def scene1():
import time
print(#"""Enter in story""")
c1 = input()
time.sleep(2)
ans = 'incorrect'
while(ans=='incorrect')
if(c1.upper()==(#"Enter Choice 1"))
print(#"Continue story")
ans = 'correct'
elif(c1.upper()==(#"Enter Choice 2"))
print(#"Continue story")
ans = 'correct'
scene2()
else:
print("Enter the correct choice!")
c1 = input
def scene2():
import time
print(#"""Enter in story""")
c2 = input()
time.sleep(2)
ans = 'incorrect'
while(ans=='incorrect')
if(c2.upper()==(#"Enter Choice 1"))
print(#"Continue story")
ans = 'correct'
elif(c2.upper()==(#"Enter Choice 2"))
print(#"Continue story")
ans = 'correct'
scene2()
else:
print("Enter the correct choice!")
c2 = input
def scene3():
import time
print(#"""Enter in story""")
c3 = input()
time.sleep(2)
ans = 'incorrect'
while(ans=='incorrect')
if(c3.upper()==(#"Enter Choice 1"))
print(#"Continue story")
ans = 'correct'
elif(c3.upper()==(#"Enter Choice 2"))
print(#"Continue story")
ans = 'correct'
scene2()
else:
print("Enter the correct choice!")
c3 = input
time.sleep(2)
scene3(pick)
def scene4(pick_value):
import time
print(#Enter story)
if(pick_value=='True'):
time.sleep(2)
print(#Enter story)
time.sleep(2)
print(#Enter story)
elif(pick_value=='False')
print(#Enter story)
```
|
{
"source": "JenniferStamm/EvaP",
"score": 2
}
|
#### File: contributor/tests/test_views.py
```python
from model_mommy import mommy
from evap.evaluation.models import Course, UserProfile
from evap.evaluation.tests.tools import ViewTest, course_with_responsible_and_editor
TESTING_COURSE_ID = 2
class TestContributorView(ViewTest):
url = '/contributor/'
test_users = ['editor', 'responsible']
@classmethod
def setUpTestData(cls):
course_with_responsible_and_editor()
class TestContributorSettingsView(ViewTest):
url = '/contributor/settings'
test_users = ['editor', 'responsible']
@classmethod
def setUpTestData(cls):
course_with_responsible_and_editor()
def test_save_settings(self):
user = mommy.make(UserProfile)
page = self.get_assert_200(self.url, "responsible")
form = page.forms["settings-form"]
form["delegates"] = [user.pk]
form.submit()
self.assertEquals(list(UserProfile.objects.get(username='responsible').delegates.all()), [user])
class TestContributorCourseView(ViewTest):
url = '/contributor/course/%s' % TESTING_COURSE_ID
test_users = ['editor', 'responsible']
@classmethod
def setUpTestData(cls):
cls.course = course_with_responsible_and_editor(course_id=TESTING_COURSE_ID)
def test_wrong_state(self):
self.course.revert_to_new()
self.course.save()
self.get_assert_403(self.url, 'responsible')
class TestContributorCoursePreviewView(ViewTest):
url = '/contributor/course/%s/preview' % TESTING_COURSE_ID
test_users = ['editor', 'responsible']
@classmethod
def setUpTestData(cls):
cls.course = course_with_responsible_and_editor(course_id=TESTING_COURSE_ID)
def setUp(self):
self.course = Course.objects.get(pk=TESTING_COURSE_ID)
def test_wrong_state(self):
self.course.revert_to_new()
self.course.save()
self.get_assert_403(self.url, 'responsible')
class TestContributorCourseEditView(ViewTest):
url = '/contributor/course/%s/edit' % TESTING_COURSE_ID
test_users = ['editor', 'responsible']
@classmethod
def setUpTestData(cls):
cls.course = course_with_responsible_and_editor(course_id=TESTING_COURSE_ID)
def setUp(self):
self.course = Course.objects.get(pk=TESTING_COURSE_ID)
def test_not_authenticated(self):
"""
Asserts that an unauthorized user gets redirected to the login page.
"""
response = self.app.get(self.url)
self.assertRedirects(response, '/?next=/contributor/course/%s/edit' % TESTING_COURSE_ID)
def test_wrong_usergroup(self):
"""
Asserts that a user who is not part of the usergroup
that is required for a specific view gets a 403.
Regression test for #483
"""
self.get_assert_403(self.url, 'student')
def test_wrong_state(self):
"""
Asserts that a contributor attempting to edit a course
that is in a state where editing is not allowed gets a 403.
"""
self.course.editor_approve()
self.course.save()
self.get_assert_403(self.url, 'responsible')
def test_contributor_course_edit(self):
"""
Tests whether the "save" button in the contributor's course edit view does not
change the course's state, and that the "approve" button does that.
"""
page = self.get_assert_200(self.url, user="responsible")
form = page.forms["course-form"]
form["vote_start_datetime"] = "2098-01-01 11:43:12"
form["vote_end_date"] = "2099-01-01"
form.submit(name="operation", value="save")
self.course = Course.objects.get(pk=self.course.pk)
self.assertEqual(self.course.state, "prepared")
form.submit(name="operation", value="approve")
self.course = Course.objects.get(pk=self.course.pk)
self.assertEqual(self.course.state, "editor_approved")
# test what happens if the operation is not specified correctly
response = form.submit(expect_errors=True)
self.assertEqual(response.status_code, 403)
def test_contributor_course_edit_preview(self):
"""
Asserts that the preview button either renders a preview or shows an error.
"""
page = self.app.get(self.url, user="responsible")
form = page.forms["course-form"]
form["vote_start_datetime"] = "2099-01-01 11:43:12"
form["vote_end_date"] = "2098-01-01"
response = form.submit(name="operation", value="preview")
self.assertNotIn("preview_modal", response)
self.assertIn("The preview could not be rendered", response)
form["vote_start_datetime"] = "2098-01-01 11:43:12"
form["vote_end_date"] = "2099-01-01"
response = form.submit(name="operation", value="preview")
self.assertIn("preview_modal", response)
self.assertNotIn("The preview could not be rendered", response)
```
#### File: management/commands/refresh_results_cache.py
```python
from django.core.management.base import BaseCommand
from django.core.serializers.base import ProgressBar
from django.core.cache import cache
from evap.evaluation.models import Course
from evap.results.tools import calculate_results
class Command(BaseCommand):
args = ''
help = 'Clears the cache and pre-warms it with the results of all courses'
requires_migrations_checks = True
def handle(self, *args, **options):
self.stdout.write("Clearing cache...")
cache.clear()
total_count = Course.objects.count()
self.stdout.write("Calculating results for all courses...")
self.stdout.ending = None
progress_bar = ProgressBar(self.stdout, total_count)
for counter, course in enumerate(Course.objects.all()):
progress_bar.update(counter + 1)
calculate_results(course)
self.stdout.write("Results cache has been refreshed.\n")
```
#### File: evaluation/migrations/0002_initial_data.py
```python
from django.db import models, migrations
from django.contrib.auth.models import Group
def insert_emailtemplates(apps, schema_editor):
emailtemplates = [
("Lecturer Review Notice", "[EvaP] New Course ready for approval"),
("Student Reminder", "[EvaP] Evaluation period is ending"),
("Publishing Notice", "[EvaP] A course has been published"),
("Login Key Created", "[EvaP] A login key was created"),
]
EmailTemplate = apps.get_model("evaluation", "EmailTemplate")
for name, subject in emailtemplates:
if not EmailTemplate.objects.filter(name=name).exists():
EmailTemplate.objects.create(name=name, subject=subject, body="")
Group.objects.create(name="Staff")
class Migration(migrations.Migration):
dependencies = [
('evaluation', '0001_initial'),
]
operations = [
migrations.RunPython(insert_emailtemplates),
]
```
#### File: evaluation/migrations/0032_populate_rating_answer_counters.py
```python
from __future__ import unicode_literals
from django.db import models, migrations
def populateRatingAnswerCounters(apps, schema_editor):
LikertAnswerCounter = apps.get_model('evaluation', 'LikertAnswerCounter')
GradeAnswerCounter = apps.get_model('evaluation', 'GradeAnswerCounter')
RatingAnswerCounter = apps.get_model('evaluation', 'RatingAnswerCounter')
for counter in list(LikertAnswerCounter.objects.all()) + list(GradeAnswerCounter.objects.all()):
RatingAnswerCounter.objects.create(question=counter.question, contribution=counter.contribution, answer=counter.answer, count=counter.count)
class Migration(migrations.Migration):
dependencies = [
('evaluation', '0031_add_rating_answer_counter'),
]
operations = [
migrations.RunPython(populateRatingAnswerCounters),
]
```
#### File: evaluation/migrations/0045_populate_course_types.py
```python
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
def populateCourseTypes(apps, schema_editor):
Course = apps.get_model('evaluation', 'Course')
CourseType = apps.get_model('evaluation', 'CourseType')
for course in Course.objects.all():
course.type = CourseType.objects.get(name_de=course.type_old)
course.save()
def revertCourseTypes(apps, schema_editor):
Course = apps.get_model('evaluation', 'Course')
CourseType = apps.get_model('evaluation', 'CourseType')
for course in Course.objects.all():
course.type_old = course.type.name_de
course.save()
class Migration(migrations.Migration):
dependencies = [
('evaluation', '0044_add_course_type_model'),
]
operations = [
migrations.RunPython(populateCourseTypes, reverse_code=revertCourseTypes),
]
```
#### File: evaluation/migrations/0055_reviewer_group.py
```python
from django.contrib.auth.models import Group
from django.db import models, migrations
def add_group(apps, schema_editor):
Group.objects.create(name="Reviewer")
def delete_group(apps, schema_editor):
Group.objects.get(name="Reviewer").delete()
class Migration(migrations.Migration):
dependencies = [
('evaluation', '0054_userprofile_language'),
]
operations = [
migrations.RunPython(add_group, reverse_code=delete_group),
]
```
#### File: evap/evaluation/models.py
```python
from datetime import datetime, date, timedelta
import logging
import random
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, Group, PermissionsMixin
from django.core.exceptions import ValidationError
from django.core.mail import EmailMessage
from django.db import models, transaction
from django.db.models import Count, Q
from django.dispatch import Signal, receiver
from django.template import Context, Template
from django.template.base import TemplateEncodingError, TemplateSyntaxError
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from django_fsm import FSMField, transition
from django_fsm.signals import post_transition
# see evaluation.meta for the use of Translate in this file
from evap.evaluation.meta import LocalizeModelBase, Translate
from evap.evaluation.tools import date_to_datetime
from evap.settings import EVALUATION_END_OFFSET_HOURS
logger = logging.getLogger(__name__)
# for converting state into student_state
STUDENT_STATES_NAMES = {
'new': 'upcoming',
'prepared': 'upcoming',
'editor_approved': 'upcoming',
'approved': 'upcoming',
'in_evaluation': 'in_evaluation',
'evaluated': 'evaluationFinished',
'reviewed': 'evaluationFinished',
'published': 'published'
}
class NotArchiveable(Exception):
"""An attempt has been made to archive something that is not archiveable."""
pass
class Semester(models.Model, metaclass=LocalizeModelBase):
"""Represents a semester, e.g. the winter term of 2011/2012."""
name_de = models.CharField(max_length=1024, unique=True, verbose_name=_("name (german)"))
name_en = models.CharField(max_length=1024, unique=True, verbose_name=_("name (english)"))
name = Translate
is_archived = models.BooleanField(default=False, verbose_name=_("is archived"))
created_at = models.DateField(verbose_name=_("created at"), auto_now_add=True)
class Meta:
ordering = ('-created_at', 'name_de')
verbose_name = _("semester")
verbose_name_plural = _("semesters")
def __str__(self):
return self.name
@property
def can_staff_delete(self):
return all(course.can_staff_delete for course in self.course_set.all())
@property
def is_archiveable(self):
return not self.is_archived and all(course.is_archiveable for course in self.course_set.all())
@transaction.atomic
def archive(self):
if not self.is_archiveable:
raise NotArchiveable()
for course in self.course_set.all():
course._archive()
self.is_archived = True
self.save()
@classmethod
def get_all_with_published_courses(cls):
return cls.objects.filter(course__state="published").distinct()
@classmethod
def active_semester(cls):
return cls.objects.order_by("created_at").last()
class Questionnaire(models.Model, metaclass=LocalizeModelBase):
"""A named collection of questions."""
name_de = models.CharField(max_length=1024, unique=True, verbose_name=_("name (german)"))
name_en = models.CharField(max_length=1024, unique=True, verbose_name=_("name (english)"))
name = Translate
description_de = models.TextField(verbose_name=_("description (german)"), blank=True, null=True)
description_en = models.TextField(verbose_name=_("description (english)"), blank=True, null=True)
description = Translate
public_name_de = models.CharField(max_length=1024, verbose_name=_("display name (german)"))
public_name_en = models.CharField(max_length=1024, verbose_name=_("display name (english)"))
public_name = Translate
teaser_de = models.TextField(verbose_name=_("teaser (german)"), blank=True, null=True)
teaser_en = models.TextField(verbose_name=_("teaser (english)"), blank=True, null=True)
teaser = Translate
index = models.IntegerField(verbose_name=_("ordering index"), default=0)
is_for_contributors = models.BooleanField(verbose_name=_("is for contributors"), default=False)
staff_only = models.BooleanField(verbose_name=_("display for staff only"), default=False)
obsolete = models.BooleanField(verbose_name=_("obsolete"), default=False)
class Meta:
ordering = ('is_for_contributors', 'index', 'name_de')
verbose_name = _("questionnaire")
verbose_name_plural = _("questionnaires")
def __str__(self):
return self.name
def __lt__(self, other):
return (self.is_for_contributors, self.index) < (other.is_for_contributors, other.index)
def __gt__(self, other):
return (self.is_for_contributors, self.index) > (other.is_for_contributors, other.index)
@property
def can_staff_edit(self):
return not self.contributions.exclude(course__state='new').exists()
@property
def can_staff_delete(self):
return not self.contributions.exists()
@property
def text_questions(self):
return [question for question in self.question_set.all() if question.is_text_question]
@property
def rating_questions(self):
return [question for question in self.question_set.all() if question.is_rating_question]
SINGLE_RESULT_QUESTIONNAIRE_NAME = "Single result"
@classmethod
def single_result_questionnaire(cls):
return cls.objects.get(name_en=cls.SINGLE_RESULT_QUESTIONNAIRE_NAME)
class Degree(models.Model, metaclass=LocalizeModelBase):
name_de = models.CharField(max_length=1024, verbose_name=_("name (german)"), unique=True)
name_en = models.CharField(max_length=1024, verbose_name=_("name (english)"), unique=True)
name = Translate
order = models.IntegerField(verbose_name=_("degree order"), default=-1)
class Meta:
ordering = ['order', ]
def __str__(self):
return self.name
def can_staff_delete(self):
if self.pk is None:
return True
return not self.courses.all().exists()
class CourseType(models.Model, metaclass=LocalizeModelBase):
"""Model for the type of a course, e.g. a lecture"""
name_de = models.CharField(max_length=1024, verbose_name=_("name (german)"), unique=True)
name_en = models.CharField(max_length=1024, verbose_name=_("name (english)"), unique=True)
name = Translate
class Meta:
ordering = ['name_de', ]
def __str__(self):
return self.name
def __lt__(self, other):
return self.name_de < other.name_de
def can_staff_delete(self):
if not self.pk:
return True
return not self.courses.all().exists()
class Course(models.Model, metaclass=LocalizeModelBase):
"""Models a single course, e.g. the Math 101 course of 2002."""
state = FSMField(default='new', protected=True)
semester = models.ForeignKey(Semester, models.PROTECT, verbose_name=_("semester"))
name_de = models.CharField(max_length=1024, verbose_name=_("name (german)"))
name_en = models.CharField(max_length=1024, verbose_name=_("name (english)"))
name = Translate
# type of course: lecture, seminar, project
type = models.ForeignKey(CourseType, models.PROTECT, verbose_name=_("course type"), related_name="courses")
# e.g. Bachelor, Master
degrees = models.ManyToManyField(Degree, verbose_name=_("degrees"), related_name="courses")
# default is True as that's the more restrictive option
is_graded = models.BooleanField(verbose_name=_("is graded"), default=True)
# defines whether results can only be seen by contributors and participants
is_private = models.BooleanField(verbose_name=_("is private"), default=False)
# graders can set this to True, then the course will be handled as if final grades have already been uploaded
gets_no_grade_documents = models.BooleanField(verbose_name=_("gets no grade documents"), default=False)
# whether participants must vote to qualify for reward points
is_required_for_reward = models.BooleanField(verbose_name=_("is required for reward"), default=True)
# students that are allowed to vote
participants = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name=_("participants"), blank=True, related_name='courses_participating_in')
_participant_count = models.IntegerField(verbose_name=_("participant count"), blank=True, null=True, default=None)
# students that already voted
voters = models.ManyToManyField(settings.AUTH_USER_MODEL, verbose_name=_("voters"), blank=True, related_name='courses_voted_for')
_voter_count = models.IntegerField(verbose_name=_("voter count"), blank=True, null=True, default=None)
# when the evaluation takes place
vote_start_datetime = models.DateTimeField(verbose_name=_("start of evaluation"))
vote_end_date = models.DateField(verbose_name=_("last day of evaluation"))
# who last modified this course
last_modified_time = models.DateTimeField(auto_now=True)
last_modified_user = models.ForeignKey(settings.AUTH_USER_MODEL, models.SET_NULL, null=True, blank=True, related_name="course_last_modified_user+")
course_evaluated = Signal(providing_args=['request', 'semester'])
class Meta:
ordering = ('name_de',)
unique_together = (
('semester', 'name_de'),
('semester', 'name_en'),
)
verbose_name = _("course")
verbose_name_plural = _("courses")
def __str__(self):
return self.name
def save(self, *args, **kw):
super().save(*args, **kw)
# make sure there is a general contribution
if not self.general_contribution:
self.contributions.create(contributor=None)
del self.general_contribution # invalidate cached property
assert self.vote_end_date >= self.vote_end_date
@property
def is_fully_reviewed(self):
return not self.open_textanswer_set.exists()
@property
def vote_end_datetime(self):
# The evaluation ends at EVALUATION_END_OFFSET_HOURS:00 of the day AFTER self.vote_end_date.
return date_to_datetime(self.vote_end_date) + timedelta(hours=24 + EVALUATION_END_OFFSET_HOURS)
@property
def is_in_evaluation_period(self):
now = datetime.now()
return self.vote_start_datetime <= now <= self.vote_end_datetime
@property
def general_contribution_has_questionnaires(self):
return self.general_contribution and (self.is_single_result or self.general_contribution.questionnaires.count() > 0)
@property
def all_contributions_have_questionnaires(self):
return self.general_contribution and (self.is_single_result or all(self.contributions.annotate(Count('questionnaires')).values_list("questionnaires__count", flat=True)))
def can_user_vote(self, user):
"""Returns whether the user is allowed to vote on this course."""
return (self.state == "in_evaluation"
and self.is_in_evaluation_period
and user in self.participants.all()
and user not in self.voters.all())
def can_user_see_course(self, user):
if user.is_reviewer:
return True
if self.is_user_contributor_or_delegate(user):
return True
if self.is_private and user not in self.participants.all():
return False
return True
def can_user_see_results(self, user):
if user.is_reviewer:
return True
if self.state == 'published':
if self.is_user_contributor_or_delegate(user):
return True
if not self.can_publish_grades:
return False
return self.can_user_see_course(user)
return False
@property
def is_single_result(self):
# early return to save some queries
if self.vote_start_datetime.date() != self.vote_end_date:
return False
return self.contributions.filter(responsible=True, questionnaires__name_en=Questionnaire.SINGLE_RESULT_QUESTIONNAIRE_NAME).exists()
@property
def can_staff_edit(self):
return not self.is_archived and self.state in ['new', 'prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']
@property
def can_staff_delete(self):
return self.can_staff_edit and (not self.num_voters > 0 or self.is_single_result)
@property
def can_publish_grades(self):
from evap.results.tools import get_sum_of_answer_counters
if self.is_single_result:
return get_sum_of_answer_counters(self.ratinganswer_counters) > 0
return self.num_voters >= settings.MIN_ANSWER_COUNT and float(self.num_voters) / self.num_participants >= settings.MIN_ANSWER_PERCENTAGE
@transition(field=state, source=['new', 'editor_approved'], target='prepared')
def ready_for_editors(self):
pass
@transition(field=state, source='prepared', target='editor_approved')
def editor_approve(self):
pass
@transition(field=state, source=['new', 'prepared', 'editor_approved'], target='approved', conditions=[lambda self: self.general_contribution_has_questionnaires])
def staff_approve(self):
pass
@transition(field=state, source=['prepared', 'approved'], target='new')
def revert_to_new(self):
pass
@transition(field=state, source='approved', target='in_evaluation', conditions=[lambda self: self.is_in_evaluation_period])
def evaluation_begin(self):
pass
@transition(field=state, source=['evaluated', 'reviewed'], target='in_evaluation', conditions=[lambda self: self.is_in_evaluation_period])
def reopen_evaluation(self):
pass
@transition(field=state, source='in_evaluation', target='evaluated')
def evaluation_end(self):
pass
@transition(field=state, source='evaluated', target='reviewed', conditions=[lambda self: self.is_fully_reviewed])
def review_finished(self):
pass
@transition(field=state, source=['new', 'reviewed'], target='reviewed', conditions=[lambda self: self.is_single_result])
def single_result_created(self):
pass
@transition(field=state, source='reviewed', target='evaluated', conditions=[lambda self: not self.is_fully_reviewed])
def reopen_review(self):
pass
@transition(field=state, source='reviewed', target='published')
def publish(self):
pass
@transition(field=state, source='published', target='reviewed')
def unpublish(self):
pass
@property
def student_state(self):
return STUDENT_STATES_NAMES[self.state]
@cached_property
def general_contribution(self):
try:
return self.contributions.get(contributor=None)
except Contribution.DoesNotExist:
return None
@cached_property
def num_participants(self):
if self._participant_count is not None:
return self._participant_count
return self.participants.count()
@cached_property
def num_voters(self):
if self._voter_count is not None:
return self._voter_count
return self.voters.count()
@property
def due_participants(self):
return self.participants.exclude(pk__in=self.voters.all())
@cached_property
def responsible_contributors(self):
return UserProfile.objects.filter(contributions__course=self, contributions__responsible=True).order_by('contributions__order')
@property
def days_left_for_evaluation(self):
return (self.vote_end_date - date.today()).days
@property
def days_until_evaluation(self):
return (self.vote_start_datetime.date() - date.today()).days
def is_user_editor_or_delegate(self, user):
if self.contributions.filter(can_edit=True, contributor=user).exists():
return True
represented_users = user.represented_users.all()
if self.contributions.filter(can_edit=True, contributor__in=represented_users).exists():
return True
return False
def is_user_contributor_or_delegate(self, user):
if self.contributions.filter(contributor=user).exists():
return True
represented_users = user.represented_users.all()
if self.contributions.filter(contributor__in=represented_users).exists():
return True
return False
def warnings(self):
result = []
if self.state in ['new', 'prepared', 'editor_approved'] and not self.all_contributions_have_questionnaires:
if not self.general_contribution_has_questionnaires:
result.append(_("Course has no questionnaires"))
else:
result.append(_("Not all contributors have questionnaires"))
if self.state in ['in_evaluation', 'evaluated', 'reviewed', 'published'] and not self.can_publish_grades:
result.append(_("Not enough participants to publish results"))
return result
@property
def textanswer_set(self):
"""Pseudo relationship to all text answers for this course"""
return TextAnswer.objects.filter(contribution__course=self)
@cached_property
def num_textanswers(self):
return self.textanswer_set.count()
@property
def open_textanswer_set(self):
"""Pseudo relationship to all text answers for this course"""
return self.textanswer_set.filter(state=TextAnswer.NOT_REVIEWED)
@property
def reviewed_textanswer_set(self):
"""Pseudo relationship to all text answers for this course"""
return self.textanswer_set.exclude(state=TextAnswer.NOT_REVIEWED)
@cached_property
def num_reviewed_textanswers(self):
return self.reviewed_textanswer_set.count()
@property
def ratinganswer_counters(self):
"""Pseudo relationship to all rating answers for this course"""
return RatingAnswerCounter.objects.filter(contribution__course=self)
def _archive(self):
"""Should be called only via Semester.archive"""
if not self.is_archiveable:
raise NotArchiveable()
self._participant_count = self.num_participants
self._voter_count = self.num_voters
self.save()
@property
def is_archived(self):
semester_is_archived = self.semester.is_archived
if semester_is_archived:
assert self._participant_count is not None and self._voter_count is not None
return semester_is_archived
@property
def is_archiveable(self):
return not self.is_archived and self.state in ["new", "published"]
@property
def final_grade_documents(self):
from evap.grades.models import GradeDocument
return self.grade_documents.filter(type=GradeDocument.FINAL_GRADES)
@property
def midterm_grade_documents(self):
from evap.grades.models import GradeDocument
return self.grade_documents.filter(type=GradeDocument.MIDTERM_GRADES)
@property
def grades_activated(self):
from evap.grades.tools import are_grades_activated
return are_grades_activated(self.semester)
@classmethod
def update_courses(cls):
logger.info("update_courses called. Processing courses now.")
from evap.evaluation.tools import send_publish_notifications
courses_new_in_evaluation = []
evaluation_results_courses = []
for course in cls.objects.all():
try:
if course.state == "approved" and course.vote_start_datetime <= datetime.now():
course.evaluation_begin()
course.last_modified_user = UserProfile.objects.cronjob_user()
course.save()
courses_new_in_evaluation.append(course)
elif course.state == "in_evaluation" and datetime.now() >= course.vote_end_datetime:
course.evaluation_end()
if course.is_fully_reviewed:
course.review_finished()
if not course.is_graded or course.final_grade_documents.exists() or course.gets_no_grade_documents:
course.publish()
evaluation_results_courses.append(course)
course.last_modified_user = UserProfile.objects.cronjob_user()
course.save()
except Exception:
logger.exception('An error occured when updating the state of course "{}" (id {}).'.format(course, course.id))
template = EmailTemplate.objects.get(name=EmailTemplate.EVALUATION_STARTED)
EmailTemplate.send_to_users_in_courses(template, courses_new_in_evaluation, [EmailTemplate.ALL_PARTICIPANTS], use_cc=False, request=None)
send_publish_notifications(evaluation_results_courses)
logger.info("update_courses finished.")
@receiver(post_transition, sender=Course)
def log_state_transition(sender, **kwargs):
course = kwargs['instance']
transition_name = kwargs['name']
source_state = kwargs['source']
target_state = kwargs['target']
logger.info('Course "{}" (id {}) moved from state "{}" to state "{}", caused by transition "{}".'.format(course, course.id, source_state, target_state, transition_name))
class Contribution(models.Model):
"""A contributor who is assigned to a course and his questionnaires."""
OWN_COMMENTS = 'OWN'
COURSE_COMMENTS = 'COURSE'
ALL_COMMENTS = 'ALL'
COMMENT_VISIBILITY_CHOICES = (
(OWN_COMMENTS, _('Own')),
(COURSE_COMMENTS, _('Course')),
(ALL_COMMENTS, _('All')),
)
IS_CONTRIBUTOR = 'CONTRIBUTOR'
IS_EDITOR = 'EDITOR'
IS_RESPONSIBLE = 'RESPONSIBLE'
RESPONSIBILITY_CHOICES = (
(IS_CONTRIBUTOR, _('Contributor')),
(IS_EDITOR, _('Editor')),
(IS_RESPONSIBLE, _('Responsible')),
)
course = models.ForeignKey(Course, models.CASCADE, verbose_name=_("course"), related_name='contributions')
contributor = models.ForeignKey(settings.AUTH_USER_MODEL, models.PROTECT, verbose_name=_("contributor"), blank=True, null=True, related_name='contributions')
questionnaires = models.ManyToManyField(Questionnaire, verbose_name=_("questionnaires"), blank=True, related_name="contributions")
responsible = models.BooleanField(verbose_name=_("responsible"), default=False)
can_edit = models.BooleanField(verbose_name=_("can edit"), default=False)
comment_visibility = models.CharField(max_length=10, choices=COMMENT_VISIBILITY_CHOICES, verbose_name=_('comment visibility'), default=OWN_COMMENTS)
label = models.CharField(max_length=255, blank=True, null=True, verbose_name=_("label"))
order = models.IntegerField(verbose_name=_("contribution order"), default=-1)
class Meta:
unique_together = (
('course', 'contributor'),
)
ordering = ['order', ]
def clean(self):
# responsible contributors can always edit
if self.responsible:
self.can_edit = True
def save(self, *args, **kw):
super().save(*args, **kw)
if self.responsible and not self.course.is_single_result:
assert self.can_edit and self.comment_visibility == self.ALL_COMMENTS
@property
def is_general(self):
return self.contributor is None
class Question(models.Model, metaclass=LocalizeModelBase):
"""A question including a type."""
QUESTION_TYPES = (
("T", _("Text Question")),
("L", _("Likert Question")),
("G", _("Grade Question")),
("P", _("Positive Yes-No Question")),
("N", _("Negative Yes-No Question")),
)
order = models.IntegerField(verbose_name=_("question order"), default=-1)
questionnaire = models.ForeignKey(Questionnaire, models.CASCADE)
text_de = models.TextField(verbose_name=_("question text (german)"))
text_en = models.TextField(verbose_name=_("question text (english)"))
type = models.CharField(max_length=1, choices=QUESTION_TYPES, verbose_name=_("question type"))
text = Translate
class Meta:
ordering = ['order', ]
verbose_name = _("question")
verbose_name_plural = _("questions")
@property
def answer_class(self):
if self.is_text_question:
return TextAnswer
elif self.is_rating_question:
return RatingAnswerCounter
else:
raise Exception("Unknown answer type: %r" % self.type)
@property
def is_likert_question(self):
return self.type == "L"
@property
def is_text_question(self):
return self.type == "T"
@property
def is_grade_question(self):
return self.type == "G"
@property
def is_positive_yes_no_question(self):
return self.type == "P"
@property
def is_negative_yes_no_question(self):
return self.type == "N"
@property
def is_yes_no_question(self):
return self.is_positive_yes_no_question or self.is_negative_yes_no_question
@property
def is_rating_question(self):
return self.is_grade_question or self.is_likert_question or self.is_yes_no_question
class Answer(models.Model):
"""An abstract answer to a question. For anonymity purposes, the answering
user ist not stored in the object. Concrete subclasses are `RatingAnswerCounter`,
and `TextAnswer`."""
question = models.ForeignKey(Question, models.PROTECT)
contribution = models.ForeignKey(Contribution, models.PROTECT, related_name="%(class)s_set")
class Meta:
abstract = True
verbose_name = _("answer")
verbose_name_plural = _("answers")
class RatingAnswerCounter(Answer):
"""A rating answer counter to a question. A lower answer is better or indicates more agreement."""
answer = models.IntegerField(verbose_name=_("answer"))
count = models.IntegerField(verbose_name=_("count"), default=0)
class Meta:
unique_together = (
('question', 'contribution', 'answer'),
)
verbose_name = _("rating answer")
verbose_name_plural = _("rating answers")
def add_vote(self):
self.count += 1
class TextAnswer(Answer):
"""A free-form text answer to a question (usually a comment about a course
or a contributor)."""
reviewed_answer = models.TextField(verbose_name=_("reviewed answer"), blank=True, null=True)
original_answer = models.TextField(verbose_name=_("original answer"), blank=True)
HIDDEN = 'HI'
PUBLISHED = 'PU'
PRIVATE = 'PR'
NOT_REVIEWED = 'NR'
TEXT_ANSWER_STATES = (
(HIDDEN, _('hidden')),
(PUBLISHED, _('published')),
(PRIVATE, _('private')),
(NOT_REVIEWED, _('not reviewed')),
)
state = models.CharField(max_length=2, choices=TEXT_ANSWER_STATES, verbose_name=_('state of answer'), default=NOT_REVIEWED)
class Meta:
verbose_name = _("text answer")
verbose_name_plural = _("text answers")
@property
def is_hidden(self):
return self.state == self.HIDDEN
@property
def is_private(self):
return self.state == self.PRIVATE
@property
def is_published(self):
return self.state == self.PUBLISHED
@property
def answer(self):
return self.reviewed_answer or self.original_answer
@answer.setter
def answer(self, value):
self.original_answer = value
self.reviewed_answer = None
def publish(self):
self.state = self.PUBLISHED
def hide(self):
self.state = self.HIDDEN
def make_private(self):
self.state = self.PRIVATE
def unreview(self):
self.state = self.NOT_REVIEWED
class FaqSection(models.Model, metaclass=LocalizeModelBase):
"""Section in the frequently asked questions"""
order = models.IntegerField(verbose_name=_("section order"), default=-1)
title_de = models.TextField(verbose_name=_("section title (german)"))
title_en = models.TextField(verbose_name=_("section title (english)"))
title = Translate
class Meta:
ordering = ['order', ]
verbose_name = _("section")
verbose_name_plural = _("sections")
class FaqQuestion(models.Model, metaclass=LocalizeModelBase):
"""Question and answer in the frequently asked questions"""
section = models.ForeignKey(FaqSection, models.CASCADE, related_name="questions")
order = models.IntegerField(verbose_name=_("question order"), default=-1)
question_de = models.TextField(verbose_name=_("question (german)"))
question_en = models.TextField(verbose_name=_("question (english)"))
question = Translate
answer_de = models.TextField(verbose_name=_("answer (german)"))
answer_en = models.TextField(verbose_name=_("answer (german)"))
answer = Translate
class Meta:
ordering = ['order', ]
verbose_name = _("question")
verbose_name_plural = _("questions")
class UserProfileManager(BaseUserManager):
def get_queryset(self):
return super().get_queryset().exclude(username=UserProfile.CRONJOB_USER_USERNAME)
def cronjob_user(self):
return super().get_queryset().get(username=UserProfile.CRONJOB_USER_USERNAME)
def exclude_inactive_users(self):
return self.get_queryset().exclude(is_active=False)
def create_user(self, username, password=<PASSWORD>, email=None, first_name=None, last_name=None):
if not username:
raise ValueError(_('Users must have a username'))
user = self.model(
username=username,
email=self.normalize_email(email),
first_name=first_name,
last_name=last_name
)
user.set_password(password)
user.save()
return user
def create_superuser(self, username, password, email=None, first_name=None, last_name=None):
user = self.create_user(
username=username,
password=password,
email=email,
first_name=first_name,
last_name=last_name
)
user.is_superuser = True
user.save()
user.groups.add(Group.objects.get(name="Staff"))
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
username = models.CharField(max_length=255, unique=True, verbose_name=_('username'))
# null=True because users created through kerberos logins and certain external users don't have an address.
email = models.EmailField(max_length=255, unique=True, blank=True, null=True, verbose_name=_('email address'))
title = models.CharField(max_length=255, blank=True, null=True, verbose_name=_("Title"))
first_name = models.CharField(max_length=255, blank=True, null=True, verbose_name=_("first name"))
last_name = models.CharField(max_length=255, blank=True, null=True, verbose_name=_("last name"))
language = models.CharField(max_length=8, blank=True, null=True, verbose_name=_("language"))
# delegates of the user, which can also manage their courses
delegates = models.ManyToManyField("UserProfile", verbose_name=_("Delegates"), related_name="represented_users", blank=True)
# users to which all emails should be sent in cc without giving them delegate rights
cc_users = models.ManyToManyField("UserProfile", verbose_name=_("CC Users"), related_name="ccing_users", blank=True)
# key for url based login of this user
MAX_LOGIN_KEY = 2**31 - 1
login_key = models.IntegerField(verbose_name=_("Login Key"), unique=True, blank=True, null=True)
login_key_valid_until = models.DateField(verbose_name=_("Login Key Validity"), blank=True, null=True)
is_active = models.BooleanField(default=True, verbose_name=_("active"))
class Meta:
ordering = ('last_name', 'first_name', 'username')
verbose_name = _('user')
verbose_name_plural = _('users')
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = []
objects = UserProfileManager()
# needed e.g. for compatibility with contrib.auth.admin
def get_full_name(self):
return self.full_name
# needed e.g. for compatibility with contrib.auth.admin
def get_short_name(self):
if self.first_name:
return self.first_name
return self.username
@property
def full_name(self):
if self.last_name:
name = self.last_name
if self.first_name:
name = self.first_name + " " + name
if self.title:
name = self.title + " " + name
return name
else:
return self.username
@property
def full_name_with_username(self):
name = self.full_name
if self.username not in name:
name += " (" + self.username + ")"
return name
def __str__(self):
return self.full_name
@cached_property
def is_staff(self):
return self.groups.filter(name='Staff').exists()
@cached_property
def is_reviewer(self):
return self.is_staff or self.groups.filter(name='Reviewer').exists()
@cached_property
def is_grade_publisher(self):
return self.groups.filter(name='Grade publisher').exists()
CRONJOB_USER_USERNAME = "cronjob"
@property
def can_staff_mark_inactive(self):
if self.is_reviewer or self.is_grade_publisher or self.is_staff or self.is_superuser:
return False
if any(not course.is_archived for course in self.courses_participating_in.all()):
return False
return True
@property
def can_staff_delete(self):
states_with_votes = ["in_evaluation", "reviewed", "evaluated", "published"]
if any(course.state in states_with_votes and not course.is_archived for course in self.courses_participating_in.all()):
return False
if self.is_contributor or self.is_reviewer or self.is_grade_publisher or self.is_staff or self.is_superuser:
return False
if any(not user.can_staff_delete for user in self.represented_users.all()):
return False
if any(not user.can_staff_delete for user in self.ccing_users.all()):
return False
return True
@property
def is_participant(self):
return self.courses_participating_in.exists()
@property
def is_student(self):
"""
A UserProfile is not considered to be a student anymore if the
newest contribution is newer than the newest participation.
"""
if not self.is_participant:
return False
if not self.is_contributor:
return True
last_semester_participated = Semester.objects.filter(course__participants=self).order_by("-created_at").first()
last_semester_contributed = Semester.objects.filter(course__contributions__contributor=self).order_by("-created_at").first()
return last_semester_participated.created_at >= last_semester_contributed.created_at
@property
def is_contributor(self):
return self.contributions.exists()
@property
def is_editor(self):
return self.contributions.filter(can_edit=True).exists()
@property
def is_responsible(self):
# in the user list, self.user.contributions is prefetched, therefore use it directly and don't filter it
return any(contribution.responsible for contribution in self.contributions.all())
@property
def is_delegate(self):
return self.represented_users.exists()
@property
def is_editor_or_delegate(self):
return self.is_editor or self.is_delegate
@property
def is_contributor_or_delegate(self):
return self.is_contributor or self.is_delegate
@property
def is_external(self):
# do the import here to prevent a circular import
from evap.evaluation.tools import is_external_email
if not self.email:
return True
return is_external_email(self.email)
@property
def can_download_grades(self):
return not self.is_external
@classmethod
def email_needs_login_key(cls, email):
# do the import here to prevent a circular import
from evap.evaluation.tools import is_external_email
return is_external_email(email)
@property
def needs_login_key(self):
return UserProfile.email_needs_login_key(self.email)
def generate_login_key(self):
while True:
key = random.randrange(0, UserProfile.MAX_LOGIN_KEY)
if not UserProfile.objects.filter(login_key=key).exists():
# key not yet used
self.login_key = key
break
self.refresh_login_key()
def refresh_login_key(self):
self.login_key_valid_until = date.today() + timedelta(settings.LOGIN_KEY_VALIDITY)
self.save()
@property
def login_url(self):
if not self.needs_login_key:
return ""
return settings.PAGE_URL + "?loginkey=" + str(self.login_key)
def get_sorted_contributions(self):
return self.contributions.order_by('course__semester__created_at', 'course__name_de')
def get_sorted_courses_participating_in(self):
return self.courses_participating_in.order_by('semester__created_at', 'name_de')
def get_sorted_courses_voted_for(self):
return self.courses_voted_for.order_by('semester__created_at', 'name_de')
def validate_template(value):
"""Field validator which ensures that the value can be compiled into a
Django Template."""
try:
Template(value)
except (TemplateSyntaxError, TemplateEncodingError) as e:
raise ValidationError(str(e))
class EmailTemplate(models.Model):
name = models.CharField(max_length=1024, unique=True, verbose_name=_("Name"))
subject = models.CharField(max_length=1024, verbose_name=_("Subject"), validators=[validate_template])
body = models.TextField(verbose_name=_("Body"), validators=[validate_template])
EDITOR_REVIEW_NOTICE = "Editor Review Notice"
STUDENT_REMINDER = "Student Reminder"
PUBLISHING_NOTICE = "Publishing Notice"
LOGIN_KEY_CREATED = "Login Key Created"
EVALUATION_STARTED = "Evaluation Started"
ALL_PARTICIPANTS = 'all_participants'
DUE_PARTICIPANTS = 'due_participants'
RESPONSIBLE = 'responsible'
EDITORS = 'editors'
CONTRIBUTORS = 'contributors'
EMAIL_RECIPIENTS = (
(ALL_PARTICIPANTS, _('all participants')),
(DUE_PARTICIPANTS, _('due participants')),
(RESPONSIBLE, _('responsible person')),
(EDITORS, _('all editors')),
(CONTRIBUTORS, _('all contributors'))
)
@classmethod
def recipient_list_for_course(cls, course, recipient_groups, filter_users_in_cc):
recipients = []
if cls.CONTRIBUTORS in recipient_groups:
recipients += UserProfile.objects.filter(contributions__course=course)
elif cls.EDITORS in recipient_groups:
recipients += UserProfile.objects.filter(contributions__course=course, contributions__can_edit=True)
elif cls.RESPONSIBLE in recipient_groups:
recipients += course.responsible_contributors
if cls.ALL_PARTICIPANTS in recipient_groups:
recipients += course.participants.all()
elif cls.DUE_PARTICIPANTS in recipient_groups:
recipients += course.due_participants
if filter_users_in_cc:
# remove delegates and CC users of recipients from the recipient list
# so they won't get the exact same email twice
users_excluded = UserProfile.objects.filter(Q(represented_users__in=recipients) | Q(ccing_users__in=recipients))
# but do so only if they have no delegates/cc_users, because otherwise
# those won't get the email at all. consequently, some "edge case users"
# will get the email twice, but there is no satisfying way around that.
users_excluded = users_excluded.filter(delegates=None, cc_users=None)
recipients = list(set(recipients) - set(users_excluded))
return recipients
@classmethod
def __render_string(cls, text, dictionary):
return Template(text).render(Context(dictionary, autoescape=False))
@classmethod
def send_to_users_in_courses(cls, template, courses, recipient_groups, use_cc, request):
user_course_map = {}
for course in courses:
recipients = cls.recipient_list_for_course(course, recipient_groups, filter_users_in_cc=use_cc)
for user in recipients:
user_course_map.setdefault(user, []).append(course)
for user, courses in user_course_map.items():
subject_params = {}
body_params = {'user': user, 'courses': courses}
cls.send_to_user(user, template, subject_params, body_params, use_cc=use_cc, request=request)
@classmethod
def send_to_user(cls, user, template, subject_params, body_params, use_cc, request=None):
if not user.email:
warning_message = "{} has no email address defined. Could not send email.".format(user.username)
# If this method is triggered by a cronjob changing course states, the request is None.
# In this case warnings should be sent to the admins via email (configured in the settings for logger.error).
# If a request exists, the page is displayed in the browser and the message can be shown on the page (messages.warning).
if request is not None:
logger.warning(warning_message)
messages.warning(request, _(warning_message))
else:
logger.error(warning_message)
return
if use_cc:
cc_users = set(user.delegates.all() | user.cc_users.all())
cc_addresses = [p.email for p in cc_users if p.email]
else:
cc_addresses = []
send_separate_login_url = False
body_params['login_url'] = ""
if user.needs_login_key:
user.generate_login_key()
if not cc_addresses:
body_params['login_url'] = user.login_url
else:
send_separate_login_url = True
subject = cls.__render_string(template.subject, subject_params)
body = cls.__render_string(template.body, body_params)
mail = EmailMessage(
subject=subject,
body=body,
to=[user.email],
cc=cc_addresses,
bcc=[a[1] for a in settings.MANAGERS],
headers={'Reply-To': settings.REPLY_TO_EMAIL})
try:
mail.send(False)
logger.info(('Sent email "{}" to {}.').format(subject, user.username))
if send_separate_login_url:
cls.send_login_url_to_user(user)
except Exception:
logger.exception('An exception occurred when sending the following email to user "{}":\n{}\n'.format(user.username, mail.message()))
@classmethod
def send_reminder_to_user(cls, user, first_due_in_days, due_courses):
template = cls.objects.get(name=cls.STUDENT_REMINDER)
subject_params = {'user': user, 'first_due_in_days': first_due_in_days}
body_params = {'user': user, 'first_due_in_days': first_due_in_days, 'due_courses': due_courses}
cls.send_to_user(user, template, subject_params, body_params, use_cc=False)
@classmethod
def send_login_url_to_user(cls, user):
template = cls.objects.get(name=cls.LOGIN_KEY_CREATED)
subject_params = {}
body_params = {'user': user, 'login_url': user.login_url}
cls.send_to_user(user, template, subject_params, body_params, use_cc=False)
logger.info(('Sent login url to {}.').format(user.username))
```
#### File: evaluation/tests/test_models.py
```python
from datetime import datetime, timedelta, date
from unittest.mock import patch, Mock
from django.test import TestCase, override_settings
from django.core.cache import cache
from django.core import mail
from model_mommy import mommy
from evap.evaluation.models import (Contribution, Course, CourseType, EmailTemplate, NotArchiveable, Questionnaire,
RatingAnswerCounter, Semester, UserProfile)
from evap.results.tools import calculate_average_grades_and_deviation
@override_settings(EVALUATION_END_OFFSET=0)
class TestCourses(TestCase):
def test_approved_to_in_evaluation(self):
course = mommy.make(Course, state='approved', vote_start_datetime=datetime.now())
with patch('evap.evaluation.models.EmailTemplate.send_to_users_in_courses') as mock:
Course.update_courses()
template = EmailTemplate.objects.get(name=EmailTemplate.EVALUATION_STARTED)
mock.assert_called_once_with(template, [course], [EmailTemplate.ALL_PARTICIPANTS],
use_cc=False, request=None)
course = Course.objects.get(pk=course.pk)
self.assertEqual(course.state, 'in_evaluation')
def test_in_evaluation_to_evaluated(self):
course = mommy.make(Course, state='in_evaluation', vote_end_date=date.today() - timedelta(days=1))
with patch('evap.evaluation.models.Course.is_fully_reviewed') as mock:
mock.__get__ = Mock(return_value=False)
Course.update_courses()
course = Course.objects.get(pk=course.pk)
self.assertEqual(course.state, 'evaluated')
def test_in_evaluation_to_reviewed(self):
# Course is "fully reviewed" as no open text_answers are present by default,
course = mommy.make(Course, state='in_evaluation', vote_end_date=date.today() - timedelta(days=1))
Course.update_courses()
course = Course.objects.get(pk=course.pk)
self.assertEqual(course.state, 'reviewed')
def test_in_evaluation_to_published(self):
# Course is "fully reviewed" and not graded, thus gets published immediately.
course = mommy.make(Course, state='in_evaluation', vote_end_date=date.today() - timedelta(days=1),
is_graded=False)
with patch('evap.evaluation.tools.send_publish_notifications') as mock:
Course.update_courses()
mock.assert_called_once_with([course])
course = Course.objects.get(pk=course.pk)
self.assertEqual(course.state, 'published')
def test_evaluation_ended(self):
# Course is out of evaluation period.
mommy.make(Course, state='in_evaluation', vote_end_date=date.today() - timedelta(days=1), is_graded=False)
# This course is not.
mommy.make(Course, state='in_evaluation', vote_end_date=date.today(), is_graded=False)
with patch('evap.evaluation.models.Course.evaluation_end') as mock:
Course.update_courses()
self.assertEqual(mock.call_count, 1)
def test_approved_to_in_evaluation_sends_emails(self):
""" Regression test for #945 """
participant = mommy.make(UserProfile, email='<EMAIL>')
course = mommy.make(Course, state='approved', vote_start_datetime=datetime.now(), participants=[participant])
Course.update_courses()
course = Course.objects.get(pk=course.pk)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(course.state, 'in_evaluation')
def test_has_enough_questionnaires(self):
# manually circumvent Course's save() method to have a Course without a general contribution
# the semester must be specified because of https://github.com/vandersonmota/model_mommy/issues/258
Course.objects.bulk_create([mommy.prepare(Course, semester=mommy.make(Semester), type=mommy.make(CourseType))])
course = Course.objects.get()
self.assertEqual(course.contributions.count(), 0)
self.assertFalse(course.general_contribution_has_questionnaires)
self.assertFalse(course.all_contributions_have_questionnaires)
responsible_contribution = mommy.make(
Contribution, course=course, contributor=mommy.make(UserProfile),
responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
course = Course.objects.get()
self.assertFalse(course.general_contribution_has_questionnaires)
self.assertFalse(course.all_contributions_have_questionnaires)
general_contribution = mommy.make(Contribution, course=course, contributor=None)
course = Course.objects.get()
self.assertFalse(course.general_contribution_has_questionnaires)
self.assertFalse(course.all_contributions_have_questionnaires)
questionnaire = mommy.make(Questionnaire)
general_contribution.questionnaires.add(questionnaire)
self.assertTrue(course.general_contribution_has_questionnaires)
self.assertFalse(course.all_contributions_have_questionnaires)
responsible_contribution.questionnaires.add(questionnaire)
self.assertTrue(course.general_contribution_has_questionnaires)
self.assertTrue(course.all_contributions_have_questionnaires)
def test_deleting_last_modified_user_does_not_delete_course(self):
user = mommy.make(UserProfile)
course = mommy.make(Course, last_modified_user=user)
user.delete()
self.assertTrue(Course.objects.filter(pk=course.pk).exists())
def test_responsible_contributors_ordering(self):
course = mommy.make(Course)
responsible1 = mommy.make(UserProfile)
responsible2 = mommy.make(UserProfile)
contribution1 = mommy.make(Contribution, course=course, contributor=responsible1, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS, order=0)
mommy.make(Contribution, course=course, contributor=responsible2, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS, order=1)
self.assertEqual(list(course.responsible_contributors), [responsible1, responsible2])
contribution1.order = 2
contribution1.save()
course = Course.objects.get(pk=course.pk)
self.assertEqual(list(course.responsible_contributors), [responsible2, responsible1])
def test_single_result_can_be_deleted_only_in_reviewed(self):
responsible = mommy.make(UserProfile)
course = mommy.make(Course, semester=mommy.make(Semester))
contribution = mommy.make(Contribution,
course=course, contributor=responsible, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS,
questionnaires=[Questionnaire.single_result_questionnaire()]
)
course.single_result_created()
course.publish()
course.save()
self.assertTrue(Course.objects.filter(pk=course.pk).exists())
self.assertFalse(course.can_staff_delete)
course.unpublish()
self.assertTrue(course.can_staff_delete)
RatingAnswerCounter.objects.filter(contribution__course=course).delete()
course.delete()
self.assertFalse(Course.objects.filter(pk=course.pk).exists())
class TestUserProfile(TestCase):
def test_is_student(self):
some_user = mommy.make(UserProfile)
self.assertFalse(some_user.is_student)
student = mommy.make(UserProfile, courses_participating_in=[mommy.make(Course)])
self.assertTrue(student.is_student)
contributor = mommy.make(UserProfile, contributions=[mommy.make(Contribution)])
self.assertFalse(contributor.is_student)
semester_contributed_to = mommy.make(Semester, created_at=date.today())
semester_participated_in = mommy.make(Semester, created_at=date.today())
course_contributed_to = mommy.make(Course, semester=semester_contributed_to)
course_participated_in = mommy.make(Course, semester=semester_participated_in)
contribution = mommy.make(Contribution, course=course_contributed_to)
user = mommy.make(UserProfile, contributions=[contribution], courses_participating_in=[course_participated_in])
self.assertTrue(user.is_student)
semester_contributed_to.created_at = date.today() - timedelta(days=1)
semester_contributed_to.save()
self.assertTrue(user.is_student)
semester_participated_in.created_at = date.today() - timedelta(days=2)
semester_participated_in.save()
self.assertFalse(user.is_student)
def test_can_staff_delete(self):
user = mommy.make(UserProfile)
mommy.make(Course, participants=[user], state="new")
self.assertTrue(user.can_staff_delete)
user2 = mommy.make(UserProfile)
mommy.make(Course, participants=[user2], state="in_evaluation")
self.assertFalse(user2.can_staff_delete)
contributor = mommy.make(UserProfile)
mommy.make(Contribution, contributor=contributor)
self.assertFalse(contributor.can_staff_delete)
def test_inactive_users_hidden(self):
active_user = mommy.make(UserProfile)
mommy.make(UserProfile, is_active=False)
self.assertEqual(list(UserProfile.objects.exclude_inactive_users().all()), [active_user])
def test_inactive_users_shown(self):
active_user = mommy.make(UserProfile)
inactive_user = mommy.make(UserProfile, is_active=False)
user_list = list(UserProfile.objects.all())
self.assertIn(active_user, user_list)
self.assertIn(inactive_user, user_list)
class ArchivingTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.semester = mommy.make(Semester)
cls.course = mommy.make(Course, pk=7, state="published", semester=cls.semester)
users = mommy.make(UserProfile, _quantity=3)
cls.course.participants.set(users)
cls.course.voters.set(users[:2])
def refresh_course(self):
""" refresh_from_db does not work with courses"""
self.course = self.semester.course_set.first()
def setUp(self):
self.semester.refresh_from_db()
self.refresh_course()
def test_counts_dont_change(self):
"""
Asserts that course.num_voters course.num_participants don't change after archiving.
"""
voter_count = self.course.num_voters
participant_count = self.course.num_participants
self.semester.archive()
self.refresh_course()
self.assertEqual(voter_count, self.course.num_voters)
self.assertEqual(participant_count, self.course.num_participants)
def test_participants_do_not_loose_courses(self):
"""
Asserts that participants still participate in their courses after they get archived.
"""
some_participant = self.course.participants.first()
self.semester.archive()
self.assertEqual(list(some_participant.courses_participating_in.all()), [self.course])
def test_is_archived(self):
"""
Tests whether is_archived returns True on archived semesters and courses.
"""
self.assertFalse(self.course.is_archived)
self.semester.archive()
self.refresh_course()
self.assertTrue(self.course.is_archived)
def test_archiving_does_not_change_results(self):
results = calculate_average_grades_and_deviation(self.course)
self.semester.archive()
self.refresh_course()
cache.clear()
self.assertEqual(calculate_average_grades_and_deviation(self.course), results)
def test_archiving_twice_raises_exception(self):
self.semester.archive()
with self.assertRaises(NotArchiveable):
self.semester.archive()
with self.assertRaises(NotArchiveable):
self.semester.course_set.first()._archive()
def test_course_is_not_archived_if_participant_count_is_set(self):
course = mommy.make(Course, state="published", _participant_count=1, _voter_count=1)
self.assertFalse(course.is_archived)
self.assertTrue(course.is_archiveable)
def test_archiving_doesnt_change_single_results_participant_count(self):
responsible = mommy.make(UserProfile)
course = mommy.make(Course, state="published")
contribution = mommy.make(Contribution, course=course, contributor=responsible, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
contribution.questionnaires.add(Questionnaire.single_result_questionnaire())
self.assertTrue(course.is_single_result)
course._participant_count = 5
course._voter_count = 5
course.save()
course._archive()
self.assertEqual(course._participant_count, 5)
self.assertEqual(course._voter_count, 5)
class TestLoginUrlEmail(TestCase):
@classmethod
def setUpTestData(cls):
cls.other_user = mommy.make(UserProfile, email="<EMAIL>")
cls.user = mommy.make(UserProfile, email="<EMAIL>")
cls.user.generate_login_key()
cls.course = mommy.make(Course)
mommy.make(Contribution, course=cls.course, contributor=cls.user, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
cls.template = mommy.make(EmailTemplate, body="{{ login_url }}")
EmailTemplate.objects.filter(name="Login Key Created").update(body="{{ user.login_url }}")
def test_no_login_url_when_delegates_in_cc(self):
self.user.delegates.add(self.other_user)
EmailTemplate.send_to_users_in_courses(self.template, [self.course], EmailTemplate.CONTRIBUTORS, use_cc=True, request=None)
self.assertEqual(len(mail.outbox), 2)
self.assertFalse("loginkey" in mail.outbox[0].body) # message does not contain the login url
self.assertTrue("loginkey" in mail.outbox[1].body) # separate email with login url was sent
self.assertEqual(len(mail.outbox[1].cc), 0)
self.assertEqual(mail.outbox[1].to, [self.user.email])
def test_no_login_url_when_cc_users_in_cc(self):
self.user.cc_users.add(self.other_user)
EmailTemplate.send_to_users_in_courses(self.template, [self.course], [EmailTemplate.CONTRIBUTORS], use_cc=True, request=None)
self.assertEqual(len(mail.outbox), 2)
self.assertFalse("loginkey" in mail.outbox[0].body) # message does not contain the login url
self.assertTrue("loginkey" in mail.outbox[1].body) # separate email with login url was sent
self.assertEqual(len(mail.outbox[1].cc), 0)
self.assertEqual(mail.outbox[1].to, [self.user.email])
def test_login_url_when_nobody_in_cc(self):
# message is not sent to others in cc
EmailTemplate.send_to_users_in_courses(self.template, [self.course], [EmailTemplate.CONTRIBUTORS], use_cc=True, request=None)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("loginkey" in mail.outbox[0].body) # message does contain the login url
def test_login_url_when_use_cc_is_false(self):
# message is not sent to others in cc
self.user.delegates.add(self.other_user)
EmailTemplate.send_to_users_in_courses(self.template, [self.course], [EmailTemplate.CONTRIBUTORS], use_cc=False, request=None)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("loginkey" in mail.outbox[0].body) # message does contain the login url
class TestEmailTemplate(TestCase):
def test_missing_email_address(self):
"""
Tests that __send_to_user behaves when the user has no email address.
Regression test to https://github.com/fsr-itse/EvaP/issues/825
"""
user = mommy.make(UserProfile, email=None)
template = EmailTemplate.objects.get(name=EmailTemplate.STUDENT_REMINDER)
EmailTemplate.send_to_user(user, template, {}, {}, False, None)
class TestEmailRecipientList(TestCase):
def test_recipient_list(self):
course = mommy.make(Course)
responsible = mommy.make(UserProfile)
editor = mommy.make(UserProfile)
contributor = mommy.make(UserProfile)
mommy.make(Contribution, course=course, contributor=responsible, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
mommy.make(Contribution, course=course, contributor=editor, can_edit=True)
mommy.make(Contribution, course=course, contributor=contributor)
participant1 = mommy.make(UserProfile, courses_participating_in=[course])
participant2 = mommy.make(UserProfile, courses_participating_in=[course])
course.voters.set([participant1])
recipient_list = EmailTemplate.recipient_list_for_course(course, [], filter_users_in_cc=False)
self.assertCountEqual(recipient_list, [])
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.RESPONSIBLE], filter_users_in_cc=False)
self.assertCountEqual(recipient_list, [responsible])
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.EDITORS], filter_users_in_cc=False)
self.assertCountEqual(recipient_list, [responsible, editor])
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.CONTRIBUTORS], filter_users_in_cc=False)
self.assertCountEqual(recipient_list, [responsible, editor, contributor])
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.ALL_PARTICIPANTS], filter_users_in_cc=False)
self.assertCountEqual(recipient_list, [participant1, participant2])
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.DUE_PARTICIPANTS], filter_users_in_cc=False)
self.assertCountEqual(recipient_list, [participant2])
def test_recipient_list_filtering(self):
course = mommy.make(Course)
contributor1 = mommy.make(UserProfile)
contributor2 = mommy.make(UserProfile, delegates=[contributor1])
mommy.make(Contribution, course=course, contributor=contributor1)
mommy.make(Contribution, course=course, contributor=contributor2)
# no-one should get filtered.
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.CONTRIBUTORS], filter_users_in_cc=False)
self.assertCountEqual(recipient_list, [contributor1, contributor2])
# contributor1 is in cc of contributor2 and gets filtered.
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.CONTRIBUTORS], filter_users_in_cc=True)
self.assertCountEqual(recipient_list, [contributor2])
contributor3 = mommy.make(UserProfile, delegates=[contributor2])
mommy.make(Contribution, course=course, contributor=contributor3)
# again, no-one should get filtered.
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.CONTRIBUTORS], filter_users_in_cc=False)
self.assertCountEqual(recipient_list, [contributor1, contributor2, contributor3])
# contributor1 is in cc of contributor2 and gets filtered.
# contributor2 is in cc of contributor3 but is not filtered since contributor1 wouldn't get an email at all then.
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.CONTRIBUTORS], filter_users_in_cc=True)
self.assertCountEqual(recipient_list, [contributor2, contributor3])
```
#### File: evap/evaluation/tools.py
```python
from collections import OrderedDict, defaultdict
from datetime import datetime
from django.conf import settings
from django.contrib.auth import user_logged_in
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from django.utils import translation
from django.utils.translation import LANGUAGE_SESSION_KEY, get_language
LIKERT_NAMES = {
1: _("Strongly agree"),
2: _("Agree"),
3: _("Neither agree nor disagree"),
4: _("Disagree"),
5: _("Strongly disagree"),
6: _("no answer"),
}
GRADE_NAMES = {
1: _("1"),
2: _("2"),
3: _("3"),
4: _("4"),
5: _("5"),
6: _("no answer"),
}
POSITIVE_YES_NO_NAMES = {
1: _("Yes"),
5: _("No"),
6: _("no answer"),
}
NEGATIVE_YES_NO_NAMES = {
1: _("No"),
5: _("Yes"),
6: _("no answer"),
}
# the names used for contributors and staff
STATES_ORDERED = OrderedDict((
('new', _('new')),
('prepared', _('prepared')),
('editor_approved', _('lecturer approved')),
('approved', _('approved')),
('in_evaluation', _('in evaluation')),
('evaluated', _('evaluated')),
('reviewed', _('reviewed')),
('published', _('published'))
))
# the descriptions used in tooltips for contributors
STATE_DESCRIPTIONS = OrderedDict((
('new', _('The course was newly created and will be prepared by the student representatives.')),
('prepared', _('The course was prepared by the student representatives and is now available for editing to the responsible person.')),
('editor_approved', _('The course was approved by a lecturer and will now be checked by the student representatives.')),
('approved', _('All preparations are finished. The evaluation will begin once the defined start date is reached.')),
('in_evaluation', _('The course is currently in evaluation until the defined end date is reached.')),
('evaluated', _('The course was fully evaluated and will now be reviewed by the student representatives.')),
('reviewed', _('The course was fully evaluated and reviewed by the student representatives. You will receive an email when its results are published.')),
('published', _('The results for this course have been published.'))
))
# the names used for students
STUDENT_STATES_ORDERED = OrderedDict((
('in_evaluation', _('in evaluation')),
('upcoming', _('upcoming')),
('evaluationFinished', _('evaluation finished')),
('published', _('published'))
))
def questionnaires_and_contributions(course):
"""Yields tuples of (questionnaire, contribution) for the given course."""
result = []
for contribution in course.contributions.all():
for questionnaire in contribution.questionnaires.all():
result.append((questionnaire, contribution))
# sort questionnaires for general contributions first
result.sort(key=lambda t: not t[1].is_general)
return result
def is_external_email(email):
return not any([email.endswith("@" + domain) for domain in settings.INSTITUTION_EMAIL_DOMAINS])
def send_publish_notifications(courses, template=None):
from evap.evaluation.models import EmailTemplate
publish_notifications = defaultdict(set)
if not template:
template = EmailTemplate.objects.get(name=EmailTemplate.PUBLISHING_NOTICE)
for course in courses:
# for published courses all contributors and participants get a notification
if course.can_publish_grades:
for participant in course.participants.all():
publish_notifications[participant].add(course)
for contribution in course.contributions.all():
if contribution.contributor:
publish_notifications[contribution.contributor].add(course)
# if a course was not published notifications are only sent for contributors who can see comments
elif len(course.textanswer_set) > 0:
for textanswer in course.textanswer_set:
if textanswer.contribution.contributor:
publish_notifications[textanswer.contribution.contributor].add(course)
for contributor in course.responsible_contributors:
publish_notifications[contributor].add(course)
for user, course_set in publish_notifications.items():
body_params = {'user': user, 'courses': list(course_set)}
EmailTemplate.send_to_user(user, template, {}, body_params, use_cc=True)
def sort_formset(request, formset):
if request.POST: # if not, there will be no cleaned_data and the models should already be sorted anyways
formset.is_valid() # make sure all forms have cleaned_data
formset.forms.sort(key=lambda f: f.cleaned_data.get("order", 9001))
def course_types_in_semester(semester):
from evap.evaluation.models import Course
return Course.objects.filter(semester=semester).values_list('type', flat=True).order_by().distinct()
def date_to_datetime(date):
return datetime(year=date.year, month=date.month, day=date.day)
@receiver(user_logged_in)
def set_or_get_language(sender, user, request, **kwargs):
if user.language:
request.session[LANGUAGE_SESSION_KEY] = user.language
translation.activate(user.language)
else:
user.language = get_language()
user.save()
```
#### File: evap/results/tools.py
```python
from collections import namedtuple, defaultdict, OrderedDict
from functools import partial
from math import ceil
from statistics import pstdev, median
from django.conf import settings
from django.core.cache import cache
from django.db.models import Sum
from evap.evaluation.models import TextAnswer, Contribution, RatingAnswerCounter
from evap.evaluation.tools import questionnaires_and_contributions
GRADE_COLORS = {
1: (136, 191, 74),
2: (187, 209, 84),
3: (239, 226, 88),
4: (242, 158, 88),
5: (235, 89, 90),
}
# see calculate_results
ResultSection = namedtuple('ResultSection', ('questionnaire', 'contributor', 'label', 'results', 'warning'))
CommentSection = namedtuple('CommentSection', ('questionnaire', 'contributor', 'label', 'is_responsible', 'results'))
RatingResult = namedtuple('RatingResult', ('question', 'total_count', 'average', 'deviation', 'counts', 'warning'))
YesNoResult = namedtuple('YesNoResult', ('question', 'total_count', 'average', 'deviation', 'counts', 'warning', 'approval_count'))
TextResult = namedtuple('TextResult', ('question', 'answers'))
def avg(iterable):
"""Simple arithmetic average function. Returns `None` if the length of
`iterable` is 0 or no items except None exist."""
items = [item for item in iterable if item is not None]
if len(items) == 0:
return None
return float(sum(items)) / len(items)
def mix(a, b, alpha):
if a is None and b is None:
return None
if a is None:
return b
if b is None:
return a
return alpha * a + (1 - alpha) * b
def get_answers(contribution, question):
return question.answer_class.objects.filter(contribution=contribution, question=question)
def get_number_of_answers(contribution, question):
answers = get_answers(contribution, question)
if question.is_rating_question:
return get_sum_of_answer_counters(answers)
else:
return len(answers)
def get_sum_of_answer_counters(answer_counters):
return answer_counters.aggregate(total_count=Sum('count'))['total_count'] or 0
def get_answers_from_answer_counters(answer_counters):
answers = []
for answer_counter in answer_counters:
for __ in range(0, answer_counter.count):
answers.append(answer_counter.answer)
return answers
def get_textanswers(contribution, question, filter_states=None):
assert question.is_text_question
answers = get_answers(contribution, question)
if filter_states is not None:
answers = answers.filter(state__in=filter_states)
return answers
def get_counts(question, answer_counters):
counts = OrderedDict()
possible_answers = range(1, 6)
if question.is_yes_no_question:
possible_answers = [1, 5]
# ensure ordering of answers
for answer in possible_answers:
counts[answer] = 0
for answer_counter in answer_counters:
counts[answer_counter.answer] = answer_counter.count
return counts
def calculate_results(course, force_recalculation=False):
if course.state != "published":
return _calculate_results_impl(course)
cache_key = 'evap.staff.results.tools.calculate_results-{:d}'.format(course.id)
if force_recalculation:
cache.delete(cache_key)
return cache.get_or_set(cache_key, partial(_calculate_results_impl, course), None)
def _calculate_results_impl(course):
"""Calculates the result data for a single course. Returns a list of
`ResultSection` tuples. Each of those tuples contains the questionnaire, the
contributor (or None), a list of single result elements, the average grade and
deviation for that section (or None). The result elements are either
`RatingResult` or `TextResult` instances."""
# there will be one section per relevant questionnaire--contributor pair
sections = []
# calculate the median values of how many people answered a questionnaire type (lecturer, tutor, ...)
questionnaire_med_answers = defaultdict(list)
questionnaire_max_answers = {}
questionnaire_warning_thresholds = {}
for questionnaire, contribution in questionnaires_and_contributions(course):
max_answers = max([get_number_of_answers(contribution, question) for question in questionnaire.rating_questions], default=0)
questionnaire_max_answers[(questionnaire, contribution)] = max_answers
questionnaire_med_answers[questionnaire].append(max_answers)
for questionnaire, max_answers in questionnaire_med_answers.items():
questionnaire_warning_thresholds[questionnaire] = max(settings.RESULTS_WARNING_PERCENTAGE * median(max_answers), settings.RESULTS_WARNING_COUNT)
for questionnaire, contribution in questionnaires_and_contributions(course):
# will contain one object per question
results = []
for question in questionnaire.question_set.all():
if question.is_rating_question:
answer_counters = get_answers(contribution, question)
answers = get_answers_from_answer_counters(answer_counters)
total_count = len(answers)
average = avg(answers) if total_count > 0 else None
deviation = pstdev(answers, average) if total_count > 0 else None
counts = get_counts(question, answer_counters)
warning = total_count > 0 and total_count < questionnaire_warning_thresholds[questionnaire]
if question.is_yes_no_question:
if question.is_positive_yes_no_question:
approval_count = counts[1]
else:
approval_count = counts[5]
results.append(YesNoResult(question, total_count, average, deviation, counts, warning, approval_count))
else:
results.append(RatingResult(question, total_count, average, deviation, counts, warning))
elif question.is_text_question:
allowed_states = [TextAnswer.PRIVATE, TextAnswer.PUBLISHED]
answers = get_textanswers(contribution, question, allowed_states)
results.append(TextResult(question=question, answers=answers))
section_warning = questionnaire_max_answers[(questionnaire, contribution)] < questionnaire_warning_thresholds[questionnaire]
sections.append(ResultSection(questionnaire, contribution.contributor, contribution.label, results, section_warning))
return sections
def calculate_average_grades_and_deviation(course):
"""Determines the final average grade and deviation for a course."""
avg_generic_likert = []
avg_contribution_likert = []
dev_generic_likert = []
dev_contribution_likert = []
avg_generic_grade = []
avg_contribution_grade = []
dev_generic_grade = []
dev_contribution_grade = []
for __, contributor, __, results, __ in calculate_results(course):
average_likert = avg([result.average for result in results if result.question.is_likert_question])
deviation_likert = avg([result.deviation for result in results if result.question.is_likert_question])
average_grade = avg([result.average for result in results if result.question.is_grade_question])
deviation_grade = avg([result.deviation for result in results if result.question.is_grade_question])
(avg_contribution_likert if contributor else avg_generic_likert).append(average_likert)
(dev_contribution_likert if contributor else dev_generic_likert).append(deviation_likert)
(avg_contribution_grade if contributor else avg_generic_grade).append(average_grade)
(dev_contribution_grade if contributor else dev_generic_grade).append(deviation_grade)
# the final total grade will be calculated by the following formula (GP = GRADE_PERCENTAGE, CP = CONTRIBUTION_PERCENTAGE):
# final_likert = CP * likert_answers_about_persons + (1-CP) * likert_answers_about_courses
# final_grade = CP * grade_answers_about_persons + (1-CP) * grade_answers_about_courses
# final = GP * final_grade + (1-GP) * final_likert
final_likert_avg = mix(avg(avg_contribution_likert), avg(avg_generic_likert), settings.CONTRIBUTION_PERCENTAGE)
final_likert_dev = mix(avg(dev_contribution_likert), avg(dev_generic_likert), settings.CONTRIBUTION_PERCENTAGE)
final_grade_avg = mix(avg(avg_contribution_grade), avg(avg_generic_grade), settings.CONTRIBUTION_PERCENTAGE)
final_grade_dev = mix(avg(dev_contribution_grade), avg(dev_generic_grade), settings.CONTRIBUTION_PERCENTAGE)
final_avg = mix(final_grade_avg, final_likert_avg, settings.GRADE_PERCENTAGE)
final_dev = mix(final_grade_dev, final_likert_dev, settings.GRADE_PERCENTAGE)
return final_avg, final_dev
def has_no_rating_answers(course, contributor, questionnaire):
questions = questionnaire.rating_questions
contribution = Contribution.objects.get(course=course, contributor=contributor)
return RatingAnswerCounter.objects.filter(question__in=questions, contribution=contribution).count() == 0
def color_mix(color1, color2, fraction):
return tuple(
int(round(color1[i] * (1 - fraction) + color2[i] * fraction)) for i in range(3)
)
def get_grade_color(grade):
# Can happen if no one leaves any grades. Return white because its least likely to cause problems.
if grade is None:
return (255, 255, 255)
grade = round(grade, 1)
next_lower = int(grade)
next_higher = int(ceil(grade))
return color_mix(GRADE_COLORS[next_lower], GRADE_COLORS[next_higher], grade - next_lower)
def get_deviation_color(deviation):
if deviation is None:
return (255, 255, 255)
capped_deviation = min(deviation, 2.0) # values above that are very uncommon in practice
val = int(255 - capped_deviation * 60) # tweaked to look good
return (val, val, val)
```
#### File: evap/results/views.py
```python
from collections import OrderedDict, namedtuple
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404, render
from django.contrib.auth.decorators import login_required
from evap.evaluation.models import Semester, Degree, Contribution
from evap.results.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult
@login_required
def index(request):
semesters = Semester.get_all_with_published_courses()
return render(request, "results_index.html", dict(semesters=semesters))
@login_required
def semester_detail(request, semester_id):
semester = get_object_or_404(Semester, id=semester_id)
if request.user.is_reviewer:
courses = list(semester.course_set.filter(state__in=["in_evaluation", "evaluated", "reviewed", "published"]).prefetch_related("degrees"))
else:
courses = list(semester.course_set.filter(state="published").prefetch_related("degrees"))
courses = [course for course in courses if course.can_user_see_course(request.user)]
# Annotate each course object with its grades.
for course in courses:
course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)
CourseTuple = namedtuple('CourseTuple', ('courses', 'single_results'))
courses_by_degree = OrderedDict()
for degree in Degree.objects.all():
courses_by_degree[degree] = CourseTuple([], [])
for course in courses:
if course.is_single_result:
for degree in course.degrees.all():
section = calculate_results(course)[0]
result = section.results[0]
courses_by_degree[degree].single_results.append((course, result))
else:
for degree in course.degrees.all():
courses_by_degree[degree].courses.append(course)
template_data = dict(semester=semester, courses_by_degree=courses_by_degree)
return render(request, "results_semester_detail.html", template_data)
@login_required
def course_detail(request, semester_id, course_id):
semester = get_object_or_404(Semester, id=semester_id)
course = get_object_or_404(semester.course_set, id=course_id, semester=semester)
if not course.can_user_see_results(request.user):
raise PermissionDenied
sections = calculate_results(course)
public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.
represented_users = list(request.user.represented_users.all())
represented_users.append(request.user)
# filter text answers
for section in sections:
results = []
for result in section.results:
if isinstance(result, TextResult):
answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, represented_users, answer, public_view)]
if answers:
results.append(TextResult(question=result.question, answers=answers))
else:
results.append(result)
section.results[:] = results
# remove empty sections
sections = [section for section in sections if section.results]
# group by contributor
course_sections = []
contributor_sections = OrderedDict()
for section in sections:
if not section.results:
continue
if section.contributor is None:
course_sections.append(section)
else:
contributor_sections.setdefault(section.contributor,
{'total_votes': 0, 'sections': []})['sections'].append(section)
# Sum up all Sections for this contributor.
# If section is not a RatingResult:
# Add 1 as we assume it is a TextResult or something similar that should be displayed.
contributor_sections[section.contributor]['total_votes'] +=\
sum([s.total_count if isinstance(s, RatingResult) else 1 for s in section.results])
# Show a warning if course is still in evaluation (for reviewer preview).
evaluation_warning = course.state != 'published'
# Results for a course might not be visible because there are not enough answers
# but it can still be "published" e.g. to show the comment results to contributors.
# Users who can open the results page see a warning message in this case.
sufficient_votes_warning = not course.can_publish_grades
show_grades = request.user.is_reviewer or course.can_publish_grades
course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)
template_data = dict(
course=course,
course_sections=course_sections,
contributor_sections=contributor_sections,
evaluation_warning=evaluation_warning,
sufficient_votes_warning=sufficient_votes_warning,
show_grades=show_grades,
reviewer=request.user.is_reviewer,
contributor=course.is_user_contributor_or_delegate(request.user),
can_download_grades=request.user.can_download_grades,
public_view=public_view)
return render(request, "results_course_detail.html", template_data)
def user_can_see_text_answer(user, represented_users, text_answer, public_view=False):
if public_view:
return False
if user.is_reviewer:
return True
contributor = text_answer.contribution.contributor
if text_answer.is_private:
return contributor == user
if text_answer.is_published:
if contributor in represented_users:
return True
if text_answer.contribution.course.contributions.filter(
contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():
return True
if text_answer.contribution.is_general and text_answer.contribution.course.contributions.filter(
contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():
return True
return False
```
#### File: evap/staff/forms.py
```python
import logging
from django import forms
from django.contrib.auth.models import Group
from django.core.exceptions import SuspiciousOperation, ValidationError
from django.db.models import Q
from django.forms.models import BaseInlineFormSet
from django.forms.widgets import CheckboxSelectMultiple
from django.http.request import QueryDict
from django.utils.text import normalize_newlines
from django.utils.translation import ugettext_lazy as _
from evap.evaluation.forms import UserModelChoiceField, UserModelMultipleChoiceField
from evap.evaluation.models import (Contribution, Course, CourseType, Degree, EmailTemplate, FaqQuestion, FaqSection, Question, Questionnaire,
RatingAnswerCounter, Semester, TextAnswer, UserProfile)
from evap.evaluation.tools import date_to_datetime
logger = logging.getLogger(__name__)
def disable_all_fields(form):
for field in form.fields.values():
field.disabled = True
class ImportForm(forms.Form):
vote_start_datetime = forms.DateTimeField(label=_("Start of evaluation"), localize=True, required=False)
vote_end_date = forms.DateField(label=_("End of evaluation"), localize=True, required=False)
excel_file = forms.FileField(label=_("Excel file"), required=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.excel_file_required = False
self.vote_dates_required = False
def clean(self):
if self.excel_file_required and self.cleaned_data['excel_file'] is None:
raise ValidationError(_("Please select an Excel file."))
if self.vote_dates_required:
if self.cleaned_data['vote_start_datetime'] is None or self.cleaned_data['vote_end_date'] is None:
raise ValidationError(_("Please enter an evaluation period."))
class UserImportForm(forms.Form):
excel_file = forms.FileField(label=_("Import from Excel file"), required=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.excel_file_required = False
def clean(self):
if self.excel_file_required and self.cleaned_data['excel_file'] is None:
raise ValidationError(_("Please select an Excel file."))
class CourseParticipantCopyForm(forms.Form):
course = forms.ModelChoiceField(Course.objects.all(), empty_label='<empty>', required=False, label=_("Copy from Course"))
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.course_selection_required = False
# Here we split the courses by semester and create supergroups for them. We also make sure to include an empty option.
choices = [('', '<empty>')]
for semester in Semester.objects.all():
course_choices = [(course.pk, course.name) for course in Course.objects.filter(semester=semester)]
if course_choices:
choices += [(semester.name, course_choices)]
self.fields['course'].choices = choices
def clean(self):
if self.course_selection_required and self.cleaned_data['course'] is None:
raise ValidationError(_("Please select a course from the dropdown menu."))
class UserBulkDeleteForm(forms.Form):
username_file = forms.FileField(label=_("Username file"))
class SemesterForm(forms.ModelForm):
class Meta:
model = Semester
fields = ("name_de", "name_en")
class DegreeForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["name_de"].widget = forms.TextInput(attrs={'class': 'form-control'})
self.fields["name_en"].widget = forms.TextInput(attrs={'class': 'form-control'})
self.fields["order"].widget = forms.HiddenInput()
class Meta:
model = Degree
fields = "__all__"
def clean(self):
super().clean()
if self.cleaned_data.get('DELETE') and not self.instance.can_staff_delete:
raise SuspiciousOperation("Deleting degree not allowed")
class CourseTypeForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["name_de"].widget = forms.TextInput(attrs={'class': 'form-control'})
self.fields["name_en"].widget = forms.TextInput(attrs={'class': 'form-control'})
class Meta:
model = CourseType
fields = "__all__"
def clean(self):
super().clean()
if self.cleaned_data.get('DELETE') and not self.instance.can_staff_delete:
raise SuspiciousOperation("Deleting course type not allowed")
class CourseTypeMergeSelectionForm(forms.Form):
main_type = forms.ModelChoiceField(CourseType.objects.all())
other_type = forms.ModelChoiceField(CourseType.objects.all())
def clean(self):
super().clean()
if self.cleaned_data.get('main_type') == self.cleaned_data.get('other_type'):
raise ValidationError(_("You must select two different course types."))
class CourseForm(forms.ModelForm):
general_questions = forms.ModelMultipleChoiceField(
Questionnaire.objects.filter(is_for_contributors=False, obsolete=False),
widget=CheckboxSelectMultiple,
label=_("Questions about the course")
)
semester = forms.ModelChoiceField(Semester.objects.all(), disabled=True, required=False, widget=forms.HiddenInput())
# the following field is needed, because the auto_now=True for last_modified_time makes the corresponding field
# uneditable and so it can't be displayed in the model form
# see https://docs.djangoproject.com/en/dev/ref/models/fields/#datefield for details
last_modified_time_2 = forms.DateTimeField(label=_("Last modified"), required=False, localize=True, disabled=True)
# last_modified_user would usually get a select widget but should here be displayed as a readonly CharField instead
last_modified_user_2 = forms.CharField(label=_("Last modified by"), required=False, disabled=True)
class Meta:
model = Course
fields = ('name_de', 'name_en', 'type', 'degrees', 'is_graded', 'is_private', 'is_required_for_reward', 'vote_start_datetime',
'vote_end_date', 'participants', 'general_questions', 'last_modified_time_2', 'last_modified_user_2', 'semester')
localized_fields = ('vote_start_datetime', 'vote_end_date')
field_classes = {
'participants': UserModelMultipleChoiceField,
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['general_questions'].queryset = Questionnaire.objects.filter(is_for_contributors=False).filter(
Q(obsolete=False) | Q(contributions__course=self.instance)).distinct()
self.fields['participants'].queryset = UserProfile.objects.exclude_inactive_users()
if self.instance.general_contribution:
self.fields['general_questions'].initial = [q.pk for q in self.instance.general_contribution.questionnaires.all()]
self.fields['last_modified_time_2'].initial = self.instance.last_modified_time
if self.instance.last_modified_user:
self.fields['last_modified_user_2'].initial = self.instance.last_modified_user.full_name
if self.instance.state in ['in_evaluation', 'evaluated', 'reviewed']:
self.fields['vote_start_datetime'].disabled = True
if not self.instance.can_staff_edit:
# form is used as read-only course view
disable_all_fields(self)
def validate_unique(self):
super().validate_unique()
# name_xy and semester are unique together. This will be treated as a non-field-error since two
# fields are involved. Since we only show the name_xy field to the user, assign that error to this
# field. This hack is not documented, so it might be broken when you are reading this.
for e in self.non_field_errors().as_data():
if e.code == "unique_together" and "unique_check" in e.params:
if "semester" in e.params["unique_check"]:
# The order of the fields is probably determined by the unique_together constraints in the Course class.
name_field = e.params["unique_check"][1]
self.add_error(name_field, e)
def clean(self):
super().clean()
vote_start_datetime = self.cleaned_data.get('vote_start_datetime')
vote_end_date = self.cleaned_data.get('vote_end_date')
if vote_start_datetime and vote_end_date:
if vote_start_datetime.date() > vote_end_date:
self.add_error("vote_start_datetime", "")
self.add_error("vote_end_date", _("The first day of evaluation must be before the last one."))
def save(self, user, *args, **kw):
self.instance.last_modified_user = user
super().save(*args, **kw)
self.instance.general_contribution.questionnaires.set(self.cleaned_data.get('general_questions'))
logger.info('Course "{}" (id {}) was edited by staff member {}.'.format(self.instance, self.instance.id, user.username))
class SingleResultForm(forms.ModelForm):
semester = forms.ModelChoiceField(Semester.objects.all(), disabled=True, required=False, widget=forms.HiddenInput())
last_modified_time_2 = forms.DateTimeField(label=_("Last modified"), required=False, localize=True, disabled=True)
last_modified_user_2 = forms.CharField(label=_("Last modified by"), required=False, disabled=True)
event_date = forms.DateField(label=_("Event date"), localize=True)
responsible = UserModelChoiceField(label=_("Responsible"), queryset=UserProfile.objects.exclude_inactive_users())
answer_1 = forms.IntegerField(label=_("# very good"), initial=0)
answer_2 = forms.IntegerField(label=_("# good"), initial=0)
answer_3 = forms.IntegerField(label=_("# neutral"), initial=0)
answer_4 = forms.IntegerField(label=_("# bad"), initial=0)
answer_5 = forms.IntegerField(label=_("# very bad"), initial=0)
class Meta:
model = Course
fields = ('name_de', 'name_en', 'type', 'degrees', 'event_date', 'responsible', 'answer_1', 'answer_2', 'answer_3', 'answer_4', 'answer_5',
'last_modified_time_2', 'last_modified_user_2', 'semester')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['last_modified_time_2'].initial = self.instance.last_modified_time
if self.instance.last_modified_user:
self.fields['last_modified_user_2'].initial = self.instance.last_modified_user.full_name
if self.instance.vote_start_datetime:
self.fields['event_date'].initial = self.instance.vote_start_datetime
if not self.instance.can_staff_edit:
disable_all_fields(self)
if self.instance.pk:
self.fields['responsible'].initial = self.instance.responsible_contributors[0]
answer_counts = dict()
for answer_counter in self.instance.ratinganswer_counters:
answer_counts[answer_counter.answer] = answer_counter.count
for i in range(1, 6):
self.fields['answer_' + str(i)].initial = answer_counts[i]
def save(self, *args, **kw):
user = kw.pop("user")
self.instance.last_modified_user = user
event_date = self.cleaned_data['event_date']
self.instance.vote_start_datetime = date_to_datetime(event_date)
self.instance.vote_end_date = event_date
self.instance.is_graded = False
super().save(*args, **kw)
single_result_questionnaire = Questionnaire.single_result_questionnaire()
single_result_question = single_result_questionnaire.question_set.first()
contribution, created = Contribution.objects.get_or_create(course=self.instance, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
contribution.contributor = self.cleaned_data['responsible']
if created:
contribution.questionnaires.add(single_result_questionnaire)
contribution.save()
# set answers
contribution = Contribution.objects.get(course=self.instance, responsible=True)
total_votes = 0
for i in range(1, 6):
count = self.cleaned_data['answer_' + str(i)]
total_votes += count
RatingAnswerCounter.objects.update_or_create(contribution=contribution, question=single_result_question, answer=i, defaults={'count': count})
self.instance._participant_count = total_votes
self.instance._voter_count = total_votes
# change state to "reviewed"
# works only for single_results so the course and its contribution must be saved first
self.instance.single_result_created()
self.instance.save()
class ContributionForm(forms.ModelForm):
contributor = forms.ModelChoiceField(queryset=UserProfile.objects.exclude_inactive_users())
responsibility = forms.ChoiceField(widget=forms.RadioSelect(), choices=Contribution.RESPONSIBILITY_CHOICES)
course = forms.ModelChoiceField(Course.objects.all(), disabled=True, required=False, widget=forms.HiddenInput())
questionnaires = forms.ModelMultipleChoiceField(
Questionnaire.objects.filter(is_for_contributors=True, obsolete=False),
required=False,
widget=CheckboxSelectMultiple,
label=_("Questionnaires")
)
does_not_contribute = forms.BooleanField(required=False, label=_("Does not contribute to course"))
class Meta:
model = Contribution
fields = ('course', 'contributor', 'questionnaires', 'order', 'responsibility', 'comment_visibility', 'label')
widgets = {'order': forms.HiddenInput(), 'comment_visibility': forms.RadioSelect(choices=Contribution.COMMENT_VISIBILITY_CHOICES)}
field_classes = {
'contributor': UserModelChoiceField,
}
def __init__(self, *args, course=None, **kwargs):
self.course = course
# work around https://code.djangoproject.com/ticket/25880
if self.course is None:
assert 'instance' in kwargs
self.course = kwargs['instance'].course
super().__init__(*args, **kwargs)
self.fields['contributor'].widget.attrs['class'] = 'form-control'
self.fields['label'].widget.attrs['class'] = 'form-control'
if self.instance.responsible:
self.fields['responsibility'].initial = Contribution.IS_RESPONSIBLE
elif self.instance.can_edit:
self.fields['responsibility'].initial = Contribution.IS_EDITOR
else:
self.fields['responsibility'].initial = Contribution.IS_CONTRIBUTOR
self.fields['questionnaires'].queryset = Questionnaire.objects.filter(is_for_contributors=True).filter(
Q(obsolete=False) | Q(contributions__course=self.course)).distinct()
if self.instance.pk:
self.fields['does_not_contribute'].initial = not self.instance.questionnaires.exists()
if not self.course.can_staff_edit:
# form is used as read-only course view
disable_all_fields(self)
def clean(self):
if not self.cleaned_data.get('does_not_contribute') and not self.cleaned_data.get('questionnaires'):
self.add_error('does_not_contribute', _("Select either this option or at least one questionnaire!"))
def save(self, *args, **kwargs):
responsibility = self.cleaned_data['responsibility']
is_responsible = responsibility == Contribution.IS_RESPONSIBLE
is_editor = responsibility == Contribution.IS_EDITOR
self.instance.responsible = is_responsible
self.instance.can_edit = is_responsible or is_editor
if is_responsible:
self.instance.comment_visibility = Contribution.ALL_COMMENTS
return super().save(*args, **kwargs)
class CourseEmailForm(forms.Form):
recipients = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(), choices=EmailTemplate.EMAIL_RECIPIENTS, label=_("Send email to"))
subject = forms.CharField(label=_("Subject"))
body = forms.CharField(widget=forms.Textarea(), label=_("Message"))
def __init__(self, *args, course, export=False, **kwargs):
super().__init__(*args, **kwargs)
self.template = EmailTemplate()
self.course = course
self.fields['subject'].required = not export
self.fields['body'].required = not export
def clean(self):
self.recipient_groups = self.cleaned_data.get('recipients')
if not self.recipient_groups:
raise forms.ValidationError(_("No recipient selected. Choose at least one group of recipients."))
return self.cleaned_data
def email_addresses(self):
recipients = self.template.recipient_list_for_course(self.course, self.recipient_groups, filter_users_in_cc=False)
return set(user.email for user in recipients if user.email)
def send(self, request):
self.template.subject = self.cleaned_data.get('subject')
self.template.body = self.cleaned_data.get('body')
EmailTemplate.send_to_users_in_courses(self.template, [self.course], self.recipient_groups, use_cc=True, request=request)
class QuestionnaireForm(forms.ModelForm):
class Meta:
model = Questionnaire
exclude = ()
widgets = {'index': forms.HiddenInput()}
class AtLeastOneFormSet(BaseInlineFormSet):
def clean(self):
super().clean()
count = 0
for form in self.forms:
if form.cleaned_data and not form.cleaned_data.get('DELETE', False):
count += 1
if count < 1:
raise forms.ValidationError(_('You must have at least one of these.'))
class ContributionFormSet(AtLeastOneFormSet):
def __init__(self, data=None, *args, **kwargs):
data = self.handle_moved_contributors(data, **kwargs)
super().__init__(data, *args, **kwargs)
self.queryset = self.instance.contributions.exclude(contributor=None)
def handle_deleted_and_added_contributions(self):
"""
If a contributor got removed and added in the same formset, django would usually complain
when validating the added form, as it does not check whether the existing contribution was deleted.
This method works around that.
"""
for form_with_errors in self.forms:
if not form_with_errors.errors:
continue
for deleted_form in self.forms:
if not deleted_form.cleaned_data or not deleted_form.cleaned_data.get('DELETE'):
continue
if not deleted_form.cleaned_data['contributor'] == form_with_errors.cleaned_data['contributor']:
continue
form_with_errors.instance = deleted_form.instance
# we modified the form, so we have to force re-validation
form_with_errors.full_clean()
def handle_moved_contributors(self, data, **kwargs):
"""
Work around https://code.djangoproject.com/ticket/25139
Basically, if the user assigns a contributor who already has a contribution to a new contribution,
this moves the contributor (and all the data of the new form they got assigned to) back to the original contribution.
"""
if data is None or 'instance' not in kwargs:
return data
course = kwargs['instance']
total_forms = int(data['contributions-TOTAL_FORMS'])
for i in range(0, total_forms):
prefix = "contributions-" + str(i) + "-"
current_id = data.get(prefix + 'id', '')
contributor = data.get(prefix + 'contributor', '')
if contributor == '':
continue
# find the contribution that the contributor had before the user messed with it
try:
previous_id = str(Contribution.objects.get(contributor=contributor, course=course).id)
except Contribution.DoesNotExist:
continue
if current_id == previous_id:
continue
# find the form with that previous contribution and then swap the contributions
for j in range(0, total_forms):
other_prefix = "contributions-" + str(j) + "-"
other_id = data[other_prefix + 'id']
if other_id == previous_id:
# swap all the data. the contribution's ids stay in place.
data2 = data.copy()
data = QueryDict(mutable=True)
for key, value in data2.lists():
if not key.endswith('-id'):
key = key.replace(prefix, '%temp%').replace(other_prefix, prefix).replace('%temp%', other_prefix)
data.setlist(key, value)
break
return data
def clean(self):
self.handle_deleted_and_added_contributions()
super().clean()
found_contributor = set()
count_responsible = 0
for form in self.forms:
if not form.cleaned_data or form.cleaned_data.get('DELETE'):
continue
contributor = form.cleaned_data.get('contributor')
if contributor is None:
raise forms.ValidationError(_('Please select the name of each added contributor. Remove empty rows if necessary.'))
if contributor and contributor in found_contributor:
raise forms.ValidationError(_('Duplicate contributor found. Each contributor should only be used once.'))
elif contributor:
found_contributor.add(contributor)
if form.cleaned_data.get('responsibility') == 'RESPONSIBLE':
count_responsible += 1
if count_responsible < 1:
raise forms.ValidationError(_('No responsible contributors found.'))
class QuestionForm(forms.ModelForm):
class Meta:
model = Question
fields = "__all__"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["order"].widget = forms.HiddenInput()
self.fields['text_de'].widget = forms.TextInput(attrs={'class': 'form-control'})
self.fields['text_en'].widget = forms.TextInput(attrs={'class': 'form-control'})
self.fields['type'].widget.attrs['class'] = 'form-control'
class QuestionnairesAssignForm(forms.Form):
def __init__(self, *args, course_types, **kwargs):
super().__init__(*args, **kwargs)
for course_type in course_types:
self.fields[course_type.name] = forms.ModelMultipleChoiceField(required=False, queryset=Questionnaire.objects.filter(obsolete=False, is_for_contributors=False))
contributor_questionnaires = Questionnaire.objects.filter(obsolete=False, is_for_contributors=True)
self.fields['Responsible contributor'] = forms.ModelMultipleChoiceField(label=_('Responsible contributor'), required=False, queryset=contributor_questionnaires)
class UserForm(forms.ModelForm):
is_staff = forms.BooleanField(required=False, label=_("Staff user"))
is_grade_publisher = forms.BooleanField(required=False, label=_("Grade publisher"))
is_reviewer = forms.BooleanField(required=False, label=_("Reviewer"))
is_inactive = forms.BooleanField(required=False, label=_("Inactive"))
courses_participating_in = forms.ModelMultipleChoiceField(None, required=False, label=_("Courses participating in (active semester)"))
class Meta:
model = UserProfile
fields = ('username', 'title', 'first_name', 'last_name', 'email', 'delegates', 'cc_users')
field_classes = {
'delegates': UserModelMultipleChoiceField,
'cc_users': UserModelMultipleChoiceField,
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
courses_in_active_semester = Course.objects.filter(semester=Semester.active_semester())
excludes = [x.id for x in courses_in_active_semester if x.is_single_result]
courses_in_active_semester = courses_in_active_semester.exclude(id__in=excludes)
self.fields['courses_participating_in'].queryset = courses_in_active_semester
if self.instance.pk:
self.fields['courses_participating_in'].initial = courses_in_active_semester.filter(participants=self.instance)
self.fields['is_staff'].initial = self.instance.is_staff
self.fields['is_grade_publisher'].initial = self.instance.is_grade_publisher
self.fields['is_reviewer'].initial = self.instance.is_reviewer
self.fields['is_inactive'].initial = not self.instance.is_active
def clean_username(self):
username = self.cleaned_data.get('username')
user_with_same_name = UserProfile.objects.filter(username__iexact=username)
# make sure we don't take the instance itself into account
if self.instance and self.instance.pk:
user_with_same_name = user_with_same_name.exclude(pk=self.instance.pk)
if user_with_same_name.exists():
raise forms.ValidationError(_("A user with the username '%s' already exists") % username)
return username.lower()
def clean_email(self):
email = self.cleaned_data.get('email')
if email is None:
return
user_with_same_email = UserProfile.objects.filter(email__iexact=email)
# make sure we don't take the instance itself into account
if self.instance and self.instance.pk:
user_with_same_email = user_with_same_email.exclude(pk=self.instance.pk)
if user_with_same_email.exists():
raise forms.ValidationError(_("A user with the email '%s' already exists") % email)
return email.lower()
def save(self, *args, **kw):
super().save(*args, **kw)
new_course_list = list(self.instance.courses_participating_in.exclude(semester=Semester.active_semester())) + list(self.cleaned_data.get('courses_participating_in'))
self.instance.courses_participating_in.set(new_course_list)
staff_group = Group.objects.get(name="Staff")
grade_publisher_group = Group.objects.get(name="Grade publisher")
reviewer_group = Group.objects.get(name="Reviewer")
if self.cleaned_data.get('is_staff'):
self.instance.groups.add(staff_group)
else:
self.instance.groups.remove(staff_group)
if self.cleaned_data.get('is_grade_publisher'):
self.instance.groups.add(grade_publisher_group)
else:
self.instance.groups.remove(grade_publisher_group)
if self.cleaned_data.get('is_reviewer') and not self.cleaned_data.get('is_staff'):
self.instance.groups.add(reviewer_group)
else:
self.instance.groups.remove(reviewer_group)
self.instance.is_active = not self.cleaned_data.get('is_inactive')
self.instance.save()
class UserMergeSelectionForm(forms.Form):
main_user = UserModelChoiceField(UserProfile.objects.all())
other_user = UserModelChoiceField(UserProfile.objects.all())
class LotteryForm(forms.Form):
number_of_winners = forms.IntegerField(label=_("Number of Winners"), initial=3)
class EmailTemplateForm(forms.ModelForm):
class Meta:
model = EmailTemplate
exclude = ("name", )
class FaqSectionForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["title_de"].widget = forms.TextInput(attrs={'class': 'form-control'})
self.fields["title_en"].widget = forms.TextInput(attrs={'class': 'form-control'})
self.fields["order"].widget = forms.HiddenInput()
class Meta:
model = FaqSection
exclude = ()
class FaqQuestionForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["question_de"].widget = forms.TextInput(attrs={'class': 'form-control'})
self.fields["question_en"].widget = forms.TextInput(attrs={'class': 'form-control'})
self.fields["answer_de"].widget.attrs['class'] = 'form-control'
self.fields["answer_en"].widget.attrs['class'] = 'form-control'
self.fields["order"].widget = forms.HiddenInput()
class Meta:
model = FaqQuestion
exclude = ("section",)
class TextAnswerForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['original_answer'].disabled = "True"
class Meta:
model = TextAnswer
fields = ("original_answer", "reviewed_answer",)
def clean_reviewed_answer(self):
reviewed_answer = normalize_newlines(self.cleaned_data.get('reviewed_answer'))
if reviewed_answer == normalize_newlines(self.instance.original_answer) or reviewed_answer == '':
return None
return reviewed_answer
class ExportSheetForm(forms.Form):
def __init__(self, semester, *args, **kwargs):
super(ExportSheetForm, self).__init__(*args, **kwargs)
course_types = CourseType.objects.filter(courses__semester=semester).distinct()
course_type_tuples = [(ct.pk, ct.name) for ct in course_types]
self.fields['selected_course_types'] = forms.MultipleChoiceField(
choices=course_type_tuples,
required=True,
widget=forms.CheckboxSelectMultiple(),
label=_("Course types")
)
```
#### File: staff/tests/test_views.py
```python
import datetime
import os
import glob
from django.conf import settings
from django.contrib.auth.models import Group
from django.core import mail
from django.urls import reverse
from model_mommy import mommy
import xlrd
from evap.evaluation.models import Semester, UserProfile, Course, CourseType, TextAnswer, Contribution, \
Questionnaire, Question, EmailTemplate, Degree, FaqSection, FaqQuestion, \
RatingAnswerCounter
from evap.evaluation.tests.tools import FuzzyInt, WebTest, ViewTest
from evap.staff.tools import generate_import_filename
def helper_delete_all_import_files(user_id):
file_filter = generate_import_filename(user_id, "*")
for filename in glob.glob(file_filter):
os.remove(filename)
# Staff - Sample Files View
class TestDownloadSampleXlsView(ViewTest):
test_users = ['staff']
url = '/staff/download_sample_xls/sample.xls'
email_placeholder = "institution.com"
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
def test_sample_file_correctness(self):
page = self.app.get(self.url, user='staff')
found_institution_domain = False
book = xlrd.open_workbook(file_contents=page.body)
for sheet in book.sheets():
for row in sheet.get_rows():
for cell in row:
value = cell.value
self.assertNotIn(self.email_placeholder, value)
if settings.INSTITUTION_EMAIL_DOMAINS[0] in value:
found_institution_domain = True
self.assertTrue(found_institution_domain)
# Staff - Root View
class TestStaffIndexView(ViewTest):
test_users = ['staff']
url = '/staff/'
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
# Staff - FAQ View
class TestStaffFAQView(ViewTest):
url = '/staff/faq/'
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
class TestStaffFAQEditView(ViewTest):
url = '/staff/faq/1'
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
section = mommy.make(FaqSection, pk=1)
mommy.make(FaqQuestion, section=section)
# Staff - User Views
class TestUserIndexView(ViewTest):
url = '/staff/user/'
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
def test_num_queries_is_constant(self):
"""
ensures that the number of queries in the user list is constant
and not linear to the number of users
"""
num_users = 50
semester = mommy.make(Semester, is_archived=True)
course = mommy.make(Course, state="published", semester=semester, _participant_count=1, _voter_count=1) # this triggers more checks in UserProfile.can_staff_delete
mommy.make(UserProfile, _quantity=num_users, courses_participating_in=[course])
with self.assertNumQueries(FuzzyInt(0, num_users - 1)):
self.app.get(self.url, user="staff")
class TestUserCreateView(ViewTest):
url = "/staff/user/create"
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
def test_user_is_created(self):
page = self.get_assert_200(self.url, "staff")
form = page.forms["user-form"]
form["username"] = "mflkd862xmnbo5"
form["first_name"] = "asd"
form["last_name"] = "asd"
form["email"] = "<EMAIL>"
form.submit()
self.assertEqual(UserProfile.objects.order_by("pk").last().username, "mflkd862xmnbo5")
class TestUserEditView(ViewTest):
url = "/staff/user/3/edit"
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
mommy.make(UserProfile, pk=3)
def test_questionnaire_edit(self):
page = self.get_assert_200(self.url, "staff")
form = page.forms["user-form"]
form["username"] = "lfo9e7bmxp1xi"
form.submit()
self.assertTrue(UserProfile.objects.filter(username='lfo9e7bmxp1xi').exists())
class TestUserMergeSelectionView(ViewTest):
url = "/staff/user/merge"
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
mommy.make(UserProfile)
class TestUserMergeView(ViewTest):
url = "/staff/user/3/merge/4"
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
mommy.make(UserProfile, pk=3)
mommy.make(UserProfile, pk=4)
class TestUserBulkDeleteView(ViewTest):
url = '/staff/user/bulk_delete'
test_users = ['staff']
filename = os.path.join(settings.BASE_DIR, 'staff/fixtures/test_user_bulk_delete_file.txt')
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
def test_testrun_deletes_no_users(self):
page = self.app.get(self.url, user='staff')
form = page.forms['user-bulk-delete-form']
form['username_file'] = (self.filename,)
mommy.make(UserProfile, is_active=False)
users_before = UserProfile.objects.count()
reply = form.submit(name='operation', value='test')
# Not getting redirected after.
self.assertEqual(reply.status_code, 200)
# No user got deleted.
self.assertEqual(users_before, UserProfile.objects.count())
def test_deletes_users(self):
mommy.make(UserProfile, username='testuser1')
mommy.make(UserProfile, username='testuser2')
contribution = mommy.make(Contribution)
mommy.make(UserProfile, username='contributor', contributions=[contribution])
page = self.app.get(self.url, user='staff')
form = page.forms["user-bulk-delete-form"]
form["username_file"] = (self.filename,)
user_count_before = UserProfile.objects.count()
reply = form.submit(name="operation", value="bulk_delete")
# Getting redirected after.
self.assertEqual(reply.status_code, 302)
# Assert only one user got deleted and one was marked inactive
self.assertTrue(UserProfile.objects.filter(username='testuser1').exists())
self.assertFalse(UserProfile.objects.filter(username='testuser2').exists())
self.assertTrue(UserProfile.objects.filter(username='staff').exists())
self.assertTrue(UserProfile.objects.filter(username='contributor').exists())
self.assertFalse(UserProfile.objects.exclude_inactive_users().filter(username='contributor').exists())
self.assertEqual(UserProfile.objects.count(), user_count_before - 1)
self.assertEqual(UserProfile.objects.exclude_inactive_users().count(), user_count_before - 2)
class TestUserImportView(ViewTest):
url = "/staff/user/import"
test_users = ["staff"]
filename_valid = os.path.join(settings.BASE_DIR, "staff/fixtures/valid_user_import.xls")
filename_invalid = os.path.join(settings.BASE_DIR, "staff/fixtures/invalid_user_import.xls")
filename_random = os.path.join(settings.BASE_DIR, "staff/fixtures/random.random")
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username="staff", groups=[Group.objects.get(name="Staff")])
def test_success_handling(self):
"""
Tests whether a correct excel file is correctly tested and imported and whether the success messages are displayed
"""
page = self.app.get(self.url, user='staff')
form = page.forms["user-import-form"]
form["excel_file"] = (self.filename_valid,)
page = form.submit(name="operation", value="test")
self.assertContains(page, 'The import run will create 2 user(s):<br><NAME> (lucilia.manilium)<br><NAME> (bastius.quid.ext)')
self.assertContains(page, 'Import previously uploaded file')
form = page.forms["user-import-form"]
form.submit(name="operation", value="import")
page = self.app.get(self.url, user='staff')
self.assertNotContains(page, 'Import previously uploaded file')
def test_error_handling(self):
"""
Tests whether errors given from the importer are displayed
"""
page = self.app.get(self.url, user='staff')
original_user_count = UserProfile.objects.count()
form = page.forms["user-import-form"]
form["excel_file"] = (self.filename_invalid,)
reply = form.submit(name="operation", value="test")
self.assertContains(reply, 'Sheet "Sheet1", row 2: Email address is missing.')
self.assertContains(reply, 'Errors occurred while parsing the input data. No data was imported.')
self.assertNotContains(reply, 'Import previously uploaded file')
self.assertEqual(UserProfile.objects.count(), original_user_count)
def test_warning_handling(self):
"""
Tests whether warnings given from the importer are displayed
"""
mommy.make(UserProfile, email="<EMAIL>", username="lucilia.manilium")
page = self.app.get(self.url, user='staff')
form = page.forms["user-import-form"]
form["excel_file"] = (self.filename_valid,)
reply = form.submit(name="operation", value="test")
self.assertContains(reply, "The existing user would be overwritten with the following data:<br>"
" - lucilia.manilium ( None None, <EMAIL>) (existing)<br>"
" - lucilia.manilium ( <NAME>, <EMAIL>) (new)")
def test_suspicious_operation(self):
page = self.app.get(self.url, user='staff')
form = page.forms["user-import-form"]
form["excel_file"] = (self.filename_valid,)
# Should throw SuspiciousOperation Exception.
reply = form.submit(name="operation", value="hackit", expect_errors=True)
self.assertEqual(reply.status_code, 400)
def test_invalid_upload_operation(self):
page = self.app.get(self.url, user='staff')
form = page.forms["user-import-form"]
page = form.submit(name="operation", value="test")
self.assertContains(page, 'Please select an Excel file')
self.assertNotContains(page, 'Import previously uploaded file')
def test_invalid_import_operation(self):
page = self.app.get(self.url, user='staff')
form = page.forms["user-import-form"]
reply = form.submit(name="operation", value="import", expect_errors=True)
self.assertEqual(reply.status_code, 400)
# Staff - Semester Views
class TestSemesterView(ViewTest):
url = '/staff/semester/1'
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
cls.semester = mommy.make(Semester, pk=1)
cls.course1 = mommy.make(Course, name_de="A - Course 1", name_en="B - Course 1", semester=cls.semester)
cls.course2 = mommy.make(Course, name_de="B - Course 2", name_en="A - Course 2", semester=cls.semester)
mommy.make(Contribution, course=cls.course1, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
mommy.make(Contribution, course=cls.course2, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
def test_view_list_sorting(self):
page = self.app.get(self.url, user='staff', extra_environ={'HTTP_ACCEPT_LANGUAGE': 'en'}).body.decode("utf-8")
position_course1 = page.find("Course 1")
position_course2 = page.find("Course 2")
self.assertGreater(position_course1, position_course2)
page = self.app.get(self.url, user='staff', extra_environ={'HTTP_ACCEPT_LANGUAGE': 'de'}).body.decode("utf-8")
position_course1 = page.find("Course 1")
position_course2 = page.find("Course 2")
self.assertLess(position_course1, position_course2)
class TestSemesterCreateView(ViewTest):
url = '/staff/semester/create'
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
def test_create(self):
name_de = 'name_de'
name_en = 'name_en'
response = self.app.get(self.url, user='staff')
form = response.forms['semester-form']
form['name_de'] = name_de
form['name_en'] = name_en
form.submit()
self.assertEqual(Semester.objects.filter(name_de=name_de, name_en=name_en).count(), 1)
class TestSemesterEditView(ViewTest):
url = '/staff/semester/1/edit'
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
cls.semester = mommy.make(Semester, pk=1, name_de='old_name', name_en='old_name')
def test_name_change(self):
new_name_de = 'new_name_de'
new_name_en = 'new_name_en'
self.assertNotEqual(self.semester.name_de, new_name_de)
self.assertNotEqual(self.semester.name_en, new_name_en)
response = self.app.get(self.url, user='staff')
form = response.forms['semester-form']
form['name_de'] = new_name_de
form['name_en'] = new_name_en
form.submit()
self.semester.refresh_from_db()
self.assertEqual(self.semester.name_de, new_name_de)
self.assertEqual(self.semester.name_en, new_name_en)
class TestSemesterDeleteView(ViewTest):
url = '/staff/semester/delete'
csrf_checks = False
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
def test_failure(self):
semester = mommy.make(Semester, pk=1)
mommy.make(Course, semester=semester, state='in_evaluation', voters=[mommy.make(UserProfile)])
self.assertFalse(semester.can_staff_delete)
response = self.app.post(self.url, params={'semester_id': 1}, user='staff', expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertTrue(Semester.objects.filter(pk=1).exists())
def test_success(self):
semester = mommy.make(Semester, pk=1)
self.assertTrue(semester.can_staff_delete)
response = self.app.post(self.url, params={'semester_id': 1}, user='staff')
self.assertEqual(response.status_code, 200)
self.assertFalse(Semester.objects.filter(pk=1).exists())
class TestSemesterLotteryView(ViewTest):
url = '/staff/semester/1/lottery'
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
mommy.make(Semester, pk=1)
class TestSemesterAssignView(ViewTest):
url = '/staff/semester/1/assign'
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
cls.semester = mommy.make(Semester, pk=1)
lecture_type = mommy.make(CourseType, name_de="Vorlesung", name_en="Lecture")
seminar_type = mommy.make(CourseType, name_de="Seminar", name_en="Seminar")
cls.questionnaire = mommy.make(Questionnaire)
course1 = mommy.make(Course, semester=cls.semester, type=seminar_type)
mommy.make(Contribution, contributor=mommy.make(UserProfile), course=course1,
responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
course2 = mommy.make(Course, semester=cls.semester, type=lecture_type)
mommy.make(Contribution, contributor=mommy.make(UserProfile), course=course2,
responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
def test_assign_questionnaires(self):
page = self.app.get(self.url, user="staff")
assign_form = page.forms["questionnaire-assign-form"]
assign_form['Seminar'] = [self.questionnaire.pk]
assign_form['Lecture'] = [self.questionnaire.pk]
page = assign_form.submit().follow()
for course in self.semester.course_set.all():
self.assertEqual(course.general_contribution.questionnaires.count(), 1)
self.assertEqual(course.general_contribution.questionnaires.get(), self.questionnaire)
class TestSemesterTodoView(ViewTest):
url = '/staff/semester/1/todo'
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
cls.semester = mommy.make(Semester, pk=1)
def test_todo(self):
course = mommy.make(Course, semester=self.semester, state='prepared', name_en='name_to_find', name_de='name_to_find')
user = mommy.make(UserProfile, username='user_to_find')
mommy.make(Contribution, course=course, contributor=user, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
response = self.app.get(self.url, user='staff')
self.assertContains(response, 'user_to_find')
self.assertContains(response, 'name_to_find')
class TestSemesterImportView(ViewTest):
url = "/staff/semester/1/import"
test_users = ["staff"]
filename_valid = os.path.join(settings.BASE_DIR, "staff/fixtures/test_enrollment_data.xls")
filename_invalid = os.path.join(settings.BASE_DIR, "staff/fixtures/invalid_enrollment_data.xls")
filename_random = os.path.join(settings.BASE_DIR, "staff/fixtures/random.random")
@classmethod
def setUpTestData(cls):
mommy.make(Semester, pk=1)
mommy.make(UserProfile, username="staff", groups=[Group.objects.get(name="Staff")])
def test_import_valid_file(self):
mommy.make(CourseType, name_de="Vorlesung", name_en="Vorlesung")
mommy.make(CourseType, name_de="Seminar", name_en="Seminar")
original_user_count = UserProfile.objects.count()
page = self.app.get(self.url, user='staff')
form = page.forms["semester-import-form"]
form["excel_file"] = (self.filename_valid,)
page = form.submit(name="operation", value="test")
self.assertEqual(UserProfile.objects.count(), original_user_count)
form = page.forms["semester-import-form"]
form['vote_start_datetime'] = "2000-01-01 00:00:00"
form['vote_end_date'] = "2012-01-01"
form.submit(name="operation", value="import")
self.assertEqual(UserProfile.objects.count(), original_user_count + 23)
courses = Course.objects.all()
self.assertEqual(len(courses), 23)
for course in courses:
responsibles_count = Contribution.objects.filter(course=course, responsible=True).count()
self.assertEqual(responsibles_count, 1)
check_student = UserProfile.objects.get(username="diam.synephebos")
self.assertEqual(check_student.first_name, "Diam")
self.assertEqual(check_student.email, "<EMAIL>")
check_contributor = UserProfile.objects.get(username="sanctus.aliquyam.ext")
self.assertEqual(check_contributor.first_name, "Sanctus")
self.assertEqual(check_contributor.last_name, "Aliquyam")
self.assertEqual(check_contributor.email, "<EMAIL>")
def test_error_handling(self):
"""
Tests whether errors given from the importer are displayed
"""
page = self.app.get(self.url, user='staff')
form = page.forms["semester-import-form"]
form["excel_file"] = (self.filename_invalid,)
reply = form.submit(name="operation", value="test")
self.assertContains(reply, 'Sheet "MA Belegungen", row 3: The users's data (email: <EMAIL>) differs from it's data in a previous row.')
self.assertContains(reply, 'Sheet "MA Belegungen", row 7: Email address is missing.')
self.assertContains(reply, 'Sheet "MA Belegungen", row 10: Email address is missing.')
self.assertContains(reply, 'The imported data contains two email addresses with the same username')
self.assertContains(reply, 'Errors occurred while parsing the input data. No data was imported.')
self.assertNotContains(page, 'Import previously uploaded file')
def test_warning_handling(self):
"""
Tests whether warnings given from the importer are displayed
"""
mommy.make(UserProfile, email="<EMAIL>", username="lucilia.manilium")
page = self.app.get(self.url, user='staff')
form = page.forms["semester-import-form"]
form["excel_file"] = (self.filename_valid,)
reply = form.submit(name="operation", value="test")
self.assertContains(reply, "The existing user would be overwritten with the following data:<br>"
" - lucilia.manilium ( None None, <EMAIL>) (existing)<br>"
" - lucilia.manilium ( <NAME>, <EMAIL>) (new)")
def test_suspicious_operation(self):
page = self.app.get(self.url, user='staff')
form = page.forms["semester-import-form"]
form["excel_file"] = (self.filename_valid,)
# Should throw SuspiciousOperation Exception.
reply = form.submit(name="operation", value="hackit", expect_errors=True)
self.assertEqual(reply.status_code, 400)
def test_invalid_upload_operation(self):
page = self.app.get(self.url, user='staff')
form = page.forms["semester-import-form"]
page = form.submit(name="operation", value="test")
self.assertContains(page, 'Please select an Excel file')
self.assertNotContains(page, 'Import previously uploaded file')
def test_invalid_import_operation(self):
page = self.app.get(self.url, user='staff')
form = page.forms["semester-import-form"]
# invalid because no file has been uploaded previously (and the button doesn't even exist)
reply = form.submit(name="operation", value="import", expect_errors=True)
self.assertEqual(reply.status_code, 400)
def test_missing_evaluation_period(self):
mommy.make(CourseType, name_de="Vorlesung", name_en="Vorlesung")
mommy.make(CourseType, name_de="Seminar", name_en="Seminar")
page = self.app.get(self.url, user='staff')
form = page.forms["semester-import-form"]
form["excel_file"] = (self.filename_valid,)
page = form.submit(name="operation", value="test")
form = page.forms["semester-import-form"]
page = form.submit(name="operation", value="import")
self.assertContains(page, 'Please enter an evaluation period')
self.assertContains(page, 'Import previously uploaded file')
class TestSemesterExportView(ViewTest):
url = '/staff/semester/1/export'
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
cls.semester = mommy.make(Semester, pk=1)
cls.course_type = mommy.make(CourseType)
cls.course = mommy.make(Course, type=cls.course_type, semester=cls.semester)
def test_view_excel_file_sorted(self):
course1 = mommy.make(Course, state='published', type=self.course_type,
name_de='A - Course1', name_en='B - Course1', semester=self.semester)
course2 = mommy.make(Course, state='published', type=self.course_type,
name_de='B - Course2', name_en='A - Course2', semester=self.semester)
mommy.make(Contribution, course=course1, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
mommy.make(Contribution, course=course2, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
page = self.app.get(self.url, user='staff')
form = page.forms["semester-export-form"]
form.set('form-0-selected_course_types', 'id_form-0-selected_course_types_0')
form.set('include_not_enough_answers', 'on')
response_de = form.submit(extra_environ={'HTTP_ACCEPT_LANGUAGE': 'de'})
response_en = form.submit(extra_environ={'HTTP_ACCEPT_LANGUAGE': 'en'})
# Load responses as Excel files and check for correct sorting
workbook = xlrd.open_workbook(file_contents=response_de.content)
self.assertEqual(workbook.sheets()[0].row_values(0)[1], "A - Course1")
self.assertEqual(workbook.sheets()[0].row_values(0)[3], "B - Course2")
workbook = xlrd.open_workbook(file_contents=response_en.content)
self.assertEqual(workbook.sheets()[0].row_values(0)[1], "A - Course2")
self.assertEqual(workbook.sheets()[0].row_values(0)[3], "B - Course1")
def test_view_downloads_excel_file(self):
page = self.app.get(self.url, user='staff')
form = page.forms["semester-export-form"]
# Check one course type.
form.set('form-0-selected_course_types', 'id_form-0-selected_course_types_0')
response = form.submit()
# Load response as Excel file and check its heading for correctness.
workbook = xlrd.open_workbook(file_contents=response.content)
self.assertEqual(workbook.sheets()[0].row_values(0)[0],
'Evaluation {0}\n\n{1}'.format(self.semester.name, ", ".join([self.course_type.name])))
class TestSemesterRawDataExportView(ViewTest):
url = '/staff/semester/1/raw_export'
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
cls.student_user = mommy.make(UserProfile, username='student')
cls.semester = mommy.make(Semester, pk=1)
cls.course_type = mommy.make(CourseType, name_en="Type")
cls.course1 = mommy.make(Course, type=cls.course_type, semester=cls.semester, participants=[cls.student_user],
voters=[cls.student_user], name_de="Veranstaltung 1", name_en="Course 1")
cls.course2 = mommy.make(Course, type=cls.course_type, semester=cls.semester, participants=[cls.student_user],
name_de="Veranstaltung 2", name_en="Course 2")
mommy.make(Contribution, course=cls.course1, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
mommy.make(Contribution, course=cls.course2, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
def test_view_downloads_csv_file(self):
response = self.app.get(self.url, user='staff')
expected_content = (
"Name;Degrees;Type;Single result;State;#Voters;#Participants;#Comments;Average grade\r\n"
"Course 1;;Type;False;new;1;1;0;\r\n"
"Course 2;;Type;False;new;0;1;0;\r\n"
)
self.assertEqual(response.content, expected_content.encode("utf-8"))
class TestSemesterParticipationDataExportView(ViewTest):
url = '/staff/semester/1/participation_export'
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
cls.student_user = mommy.make(UserProfile, username='student')
cls.semester = mommy.make(Semester, pk=1)
cls.course_type = mommy.make(CourseType, name_en="Type")
cls.course1 = mommy.make(Course, type=cls.course_type, semester=cls.semester, participants=[cls.student_user],
voters=[cls.student_user], name_de="Veranstaltung 1", name_en="Course 1", is_required_for_reward=True)
cls.course2 = mommy.make(Course, type=cls.course_type, semester=cls.semester, participants=[cls.student_user],
name_de="Veranstaltung 2", name_en="Course 2", is_required_for_reward=False)
mommy.make(Contribution, course=cls.course1, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
mommy.make(Contribution, course=cls.course2, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
def test_view_downloads_csv_file(self):
response = self.app.get(self.url, user='staff')
expected_content = (
"Username;Can use reward points;#Required courses voted for;#Required courses;#Optional courses voted for;"
"#Optional courses;Earned reward points\r\n"
"student;False;1;1;0;1;False\r\n")
self.assertEqual(response.content, expected_content.encode("utf-8"))
class TestCourseOperationView(ViewTest):
url = '/staff/semester/1/courseoperation'
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
cls.semester = mommy.make(Semester, pk=1)
def helper_semester_state_views(self, course, old_state, new_state):
operation = old_state + "->" + new_state
page = self.app.get("/staff/semester/1", user="staff")
form = page.forms["form_" + old_state]
self.assertIn(course.state, old_state)
form['course'] = course.pk
response = form.submit('operation', value=operation)
form = response.forms["course-operation-form"]
response = form.submit()
self.assertIn("Successfully", str(response))
self.assertEqual(Course.objects.get(pk=course.pk).state, new_state)
"""
The following tests make sure the course state transitions are triggerable via the UI.
"""
def test_semester_publish(self):
course = mommy.make(Course, semester=self.semester, state='reviewed')
self.helper_semester_state_views(course, "reviewed", "published")
def test_semester_reset_1(self):
course = mommy.make(Course, semester=self.semester, state='prepared')
self.helper_semester_state_views(course, "prepared", "new")
def test_semester_reset_2(self):
course = mommy.make(Course, semester=self.semester, state='approved')
self.helper_semester_state_views(course, "approved", "new")
def test_semester_approve_1(self):
course = course = mommy.make(Course, semester=self.semester, state='new')
course.general_contribution.questionnaires = [mommy.make(Questionnaire)]
self.helper_semester_state_views(course, "new", "approved")
def test_semester_approve_2(self):
course = mommy.make(Course, semester=self.semester, state='prepared')
course.general_contribution.questionnaires = [mommy.make(Questionnaire)]
self.helper_semester_state_views(course, "prepared", "approved")
def test_semester_approve_3(self):
course = mommy.make(Course, semester=self.semester, state='editor_approved')
course.general_contribution.questionnaires = [mommy.make(Questionnaire)]
self.helper_semester_state_views(course, "editor_approved", "approved")
def test_semester_contributor_ready_1(self):
course = mommy.make(Course, semester=self.semester, state='new')
self.helper_semester_state_views(course, "new", "prepared")
def test_semester_contributor_ready_2(self):
course = mommy.make(Course, semester=self.semester, state='editor_approved')
self.helper_semester_state_views(course, "editor_approved", "prepared")
def test_semester_unpublish(self):
course = mommy.make(Course, semester=self.semester, state='published')
self.helper_semester_state_views(course, "published", "reviewed")
def test_operation_start_evaluation(self):
urloptions = '?course=1&operation=approved->in_evaluation'
course = mommy.make(Course, state='approved', semester=self.semester)
response = self.app.get(self.url + urloptions, user='staff')
self.assertEqual(response.status_code, 200, 'url "{}" failed with user "staff"'.format(self.url))
form = response.forms['course-operation-form']
form.submit()
course = Course.objects.get(pk=course.pk)
self.assertEqual(course.state, 'in_evaluation')
def test_operation_prepare(self):
urloptions = '?course=1&operation=new->prepared'
course = mommy.make(Course, state='new', semester=self.semester)
response = self.app.get(self.url + urloptions, user='staff')
self.assertEqual(response.status_code, 200, 'url "{}" failed with user "staff"'.format(self.url))
form = response.forms['course-operation-form']
form.submit()
course = Course.objects.get(pk=course.pk)
self.assertEqual(course.state, 'prepared')
class TestSingleResultCreateView(ViewTest):
url = '/staff/semester/1/singleresult/create'
test_users = ['staff']
@classmethod
def setUpTestData(cls):
cls.staff_user = mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
mommy.make(Semester, pk=1)
mommy.make(CourseType, pk=1)
def test_single_result_create(self):
"""
Tests the single result creation view with one valid and one invalid input dataset.
"""
response = self.get_assert_200(self.url, "staff")
form = response.forms["single-result-form"]
form["name_de"] = "qwertz"
form["name_en"] = "qwertz"
form["type"] = 1
form["degrees"] = ["1"]
form["event_date"] = "2014-01-01"
form["answer_1"] = 6
form["answer_3"] = 2
# missing responsible to get a validation error
form.submit()
self.assertFalse(Course.objects.exists())
form["responsible"] = self.staff_user.pk # now do it right
form.submit()
self.assertEqual(Course.objects.get().name_de, "qwertz")
# Staff - Semester - Course Views
class TestCourseCreateView(ViewTest):
url = '/staff/semester/1/course/create'
test_users = ['staff']
@classmethod
def setUpTestData(cls):
cls.staff_user = mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
mommy.make(Semester, pk=1)
mommy.make(CourseType, pk=1)
mommy.make(Questionnaire, pk=1, is_for_contributors=False)
mommy.make(Questionnaire, pk=2, is_for_contributors=True)
def test_course_create(self):
"""
Tests the course creation view with one valid and one invalid input dataset.
"""
response = self.get_assert_200("/staff/semester/1/course/create", "staff")
form = response.forms["course-form"]
form["name_de"] = "lf<PASSWORD>"
form["name_en"] = "asdf"
form["type"] = 1
form["degrees"] = ["1"]
form["vote_start_datetime"] = "2099-01-01 00:00:00"
form["vote_end_date"] = "2014-01-01" # wrong order to get the validation error
form["general_questions"] = ["1"]
form['contributions-TOTAL_FORMS'] = 1
form['contributions-INITIAL_FORMS'] = 0
form['contributions-MAX_NUM_FORMS'] = 5
form['contributions-0-course'] = ''
form['contributions-0-contributor'] = self.staff_user.pk
form['contributions-0-questionnaires'] = [2]
form['contributions-0-order'] = 0
form['contributions-0-responsibility'] = "RESPONSIBLE"
form['contributions-0-comment_visibility'] = "ALL"
form.submit()
self.assertFalse(Course.objects.exists())
form["vote_start_datetime"] = "2014-01-01 00:00:00"
form["vote_end_date"] = "2099-01-01" # now do it right
form.submit()
self.assertEqual(Course.objects.get().name_de, "lfo9e7bmxp1xi")
class TestCourseEditView(ViewTest):
url = '/staff/semester/1/course/1/edit'
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
semester = mommy.make(Semester, pk=1)
degree = mommy.make(Degree)
cls.course = mommy.make(Course, semester=semester, pk=1, degrees=[degree])
mommy.make(Questionnaire, question_set=[mommy.make(Question)])
cls.course.general_contribution.questionnaires = [mommy.make(Questionnaire)]
# This is necessary so that the call to is_single_result does not fail.
responsible = mommy.make(UserProfile)
cls.contribution = mommy.make(Contribution, course=cls.course, contributor=responsible, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
def test_edit_course(self):
user = mommy.make(UserProfile)
page = self.app.get(self.url, user="staff")
# remove responsibility
form = page.forms["course-form"]
form['contributions-0-contributor'] = user.pk
form['contributions-0-responsibility'] = "RESPONSIBLE"
page = form.submit("operation", value="save")
self.assertEqual(list(self.course.responsible_contributors), [user])
def test_remove_responsibility(self):
page = self.app.get(self.url, user="staff")
# remove responsibility
form = page.forms["course-form"]
form['contributions-0-responsibility'] = "CONTRIBUTOR"
page = form.submit("operation", value="save")
self.assertIn("No responsible contributors found", page)
class TestSingleResultEditView(ViewTest):
url = '/staff/semester/1/course/1/edit'
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
semester = mommy.make(Semester, pk=1)
course = mommy.make(Course, semester=semester, pk=1)
responsible = mommy.make(UserProfile)
contribution = mommy.make(Contribution, course=course, contributor=responsible, responsible=True, can_edit=True,
comment_visibility=Contribution.ALL_COMMENTS, questionnaires=[Questionnaire.single_result_questionnaire()])
question = Questionnaire.single_result_questionnaire().question_set.get()
mommy.make(RatingAnswerCounter, question=question, contribution=contribution, answer=1, count=5)
mommy.make(RatingAnswerCounter, question=question, contribution=contribution, answer=2, count=15)
mommy.make(RatingAnswerCounter, question=question, contribution=contribution, answer=3, count=40)
mommy.make(RatingAnswerCounter, question=question, contribution=contribution, answer=4, count=60)
mommy.make(RatingAnswerCounter, question=question, contribution=contribution, answer=5, count=30)
class TestCoursePreviewView(ViewTest):
url = '/staff/semester/1/course/1/preview'
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
semester = mommy.make(Semester, pk=1)
course = mommy.make(Course, semester=semester, pk=1)
course.general_contribution.questionnaires.set([mommy.make(Questionnaire)])
class TestCourseImportPersonsView(ViewTest):
url = "/staff/semester/1/course/1/person_import"
test_users = ["staff"]
filename_valid = os.path.join(settings.BASE_DIR, "staff/fixtures/valid_user_import.xls")
filename_invalid = os.path.join(settings.BASE_DIR, "staff/fixtures/invalid_user_import.xls")
filename_random = os.path.join(settings.BASE_DIR, "staff/fixtures/random.random")
@classmethod
def setUpTestData(cls):
semester = mommy.make(Semester, pk=1)
cls.staff_user = mommy.make(UserProfile, username="staff", groups=[Group.objects.get(name="Staff")])
cls.course = mommy.make(Course, pk=1, semester=semester)
profiles = mommy.make(UserProfile, _quantity=42)
cls.course2 = mommy.make(Course, pk=2, semester=semester, participants=profiles)
@classmethod
def tearDown(cls):
# delete the uploaded file again so other tests can start with no file guaranteed
helper_delete_all_import_files(cls.staff_user.id)
def test_import_valid_participants_file(self):
page = self.app.get(self.url, user='staff')
original_participant_count = self.course.participants.count()
form = page.forms["participant-import-form"]
form["excel_file"] = (self.filename_valid,)
page = form.submit(name="operation", value="test-participants")
self.assertContains(page, 'Import previously uploaded file')
self.assertEqual(self.course.participants.count(), original_participant_count)
form = page.forms["participant-import-form"]
form.submit(name="operation", value="import-participants")
self.assertEqual(self.course.participants.count(), original_participant_count + 2)
page = self.app.get(self.url, user='staff')
self.assertNotContains(page, 'Import previously uploaded file')
def test_copy_participants(self):
page = self.app.get(self.url, user='staff')
original_participant_count = self.course.participants.count()
form = page.forms["participant-copy-form"]
form["course"] = str(self.course2.pk)
page = form.submit(name="operation", value="copy-participants")
self.assertEqual(self.course.participants.count(), original_participant_count + self.course2.participants.count())
def test_import_valid_contributors_file(self):
page = self.app.get(self.url, user='staff')
original_contributor_count = UserProfile.objects.filter(contributions__course=self.course).count()
form = page.forms["contributor-import-form"]
form["excel_file"] = (self.filename_valid,)
page = form.submit(name="operation", value="test-contributors")
self.assertContains(page, 'Import previously uploaded file')
self.assertEqual(UserProfile.objects.filter(contributions__course=self.course).count(), original_contributor_count)
form = page.forms["contributor-import-form"]
form.submit(name="operation", value="import-contributors")
self.assertEqual(UserProfile.objects.filter(contributions__course=self.course).count(), original_contributor_count + 2)
page = self.app.get(self.url, user='staff')
self.assertNotContains(page, 'Import previously uploaded file')
def test_copy_contributors(self):
page = self.app.get(self.url, user='staff')
original_contributor_count = UserProfile.objects.filter(contributions__course=self.course).count()
form = page.forms["contributor-copy-form"]
form["course"] = str(self.course2.pk)
page = form.submit(name="operation", value="copy-contributors")
new_contributor_count = UserProfile.objects.filter(contributions__course=self.course).count()
self.assertEqual(new_contributor_count, original_contributor_count + UserProfile.objects.filter(contributions__course=self.course2).count())
def test_import_participants_error_handling(self):
"""
Tests whether errors given from the importer are displayed
"""
page = self.app.get(self.url, user='staff')
form = page.forms["participant-import-form"]
form["excel_file"] = (self.filename_invalid,)
reply = form.submit(name="operation", value="test-participants")
self.assertContains(reply, 'Sheet "Sheet1", row 2: Email address is missing.')
self.assertContains(reply, 'Errors occurred while parsing the input data. No data was imported.')
self.assertNotContains(reply, 'Import previously uploaded file')
def test_import_participants_warning_handling(self):
"""
Tests whether warnings given from the importer are displayed
"""
mommy.make(UserProfile, email="<EMAIL>", username="lucilia.manilium")
page = self.app.get(self.url, user='staff')
form = page.forms["participant-import-form"]
form["excel_file"] = (self.filename_valid,)
reply = form.submit(name="operation", value="test-participants")
self.assertContains(reply, "The existing user would be overwritten with the following data:<br>"
" - lucilia.manilium ( None None, <EMAIL>) (existing)<br>"
" - lucilia.manilium ( <NAME>, <EMAIL>) (new)")
def test_import_contributors_error_handling(self):
"""
Tests whether errors given from the importer are displayed
"""
page = self.app.get(self.url, user='staff')
form = page.forms["contributor-import-form"]
form["excel_file"] = (self.filename_invalid,)
reply = form.submit(name="operation", value="test-contributors")
self.assertContains(reply, 'Sheet "Sheet1", row 2: Email address is missing.')
self.assertContains(reply, 'Errors occurred while parsing the input data. No data was imported.')
self.assertNotContains(reply, 'Import previously uploaded file')
def test_import_contributors_warning_handling(self):
"""
Tests whether warnings given from the importer are displayed
"""
mommy.make(UserProfile, email="<EMAIL>", username="lucilia.manilium")
page = self.app.get(self.url, user='staff')
form = page.forms["contributor-import-form"]
form["excel_file"] = (self.filename_valid,)
reply = form.submit(name="operation", value="test-contributors")
self.assertContains(reply, "The existing user would be overwritten with the following data:<br>"
" - lucilia.manilium ( None None, <EMAIL>) (existing)<br>"
" - lucilia.manilium ( <NAME>, <EMAIL>@institution.example.com) (new)")
def test_suspicious_operation(self):
page = self.app.get(self.url, user='staff')
form = page.forms["participant-import-form"]
form["excel_file"] = (self.filename_valid,)
# Should throw SuspiciousOperation Exception.
reply = form.submit(name="operation", value="hackit", expect_errors=True)
self.assertEqual(reply.status_code, 400)
def test_invalid_contributor_upload_operation(self):
page = self.app.get(self.url, user='staff')
form = page.forms["contributor-import-form"]
page = form.submit(name="operation", value="test-contributors")
self.assertContains(page, 'Please select an Excel file')
self.assertNotContains(page, 'Import previously uploaded file')
def test_invalid_participant_upload_operation(self):
page = self.app.get(self.url, user='staff')
form = page.forms["participant-import-form"]
page = form.submit(name="operation", value="test-participants")
self.assertContains(page, 'Please select an Excel file')
self.assertNotContains(page, 'Import previously uploaded file')
def test_invalid_contributor_import_operation(self):
page = self.app.get(self.url, user='staff')
form = page.forms["contributor-import-form"]
# invalid because no file has been uploaded previously (and the button doesn't even exist)
reply = form.submit(name="operation", value="import-contributors", expect_errors=True)
self.assertEqual(reply.status_code, 400)
def test_invalid_participant_import_operation(self):
page = self.app.get(self.url, user='staff')
form = page.forms["participant-import-form"]
# invalid because no file has been uploaded previously (and the button doesn't even exist)
reply = form.submit(name="operation", value="import-participants", expect_errors=True)
self.assertEqual(reply.status_code, 400)
class TestCourseEmailView(ViewTest):
url = '/staff/semester/1/course/1/email'
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
semester = mommy.make(Semester, pk=1)
participant1 = mommy.make(UserProfile, email="<EMAIL>")
participant2 = mommy.make(UserProfile, email="<EMAIL>")
mommy.make(Course, pk=1, semester=semester, participants=[participant1, participant2])
def test_emails_are_sent(self):
page = self.get_assert_200(self.url, user="staff")
form = page.forms["course-email-form"]
form.get("recipients", index=0).checked = True # send to all participants
form["subject"] = "asdf"
form["body"] = "asdf"
form.submit()
self.assertEqual(len(mail.outbox), 2)
class TestCourseCommentView(ViewTest):
url = '/staff/semester/1/course/1/comments'
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
semester = mommy.make(Semester, pk=1)
cls.course = mommy.make(Course, pk=1, semester=semester)
def test_comments_showing_up(self):
questionnaire = mommy.make(Questionnaire)
question = mommy.make(Question, questionnaire=questionnaire, type='T')
contribution = mommy.make(Contribution, course=self.course, contributor=mommy.make(UserProfile), questionnaires=[questionnaire])
mommy.make(TextAnswer, contribution=contribution, question=question, original_answer='should show up')
response = self.app.get(self.url, user='staff')
self.assertContains(response, 'should show up')
class TestCourseCommentEditView(ViewTest):
url = '/staff/semester/1/course/1/comment/1/edit'
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
semester = mommy.make(Semester, pk=1)
course = mommy.make(Course, semester=semester, pk=1)
questionnaire = mommy.make(Questionnaire)
question = mommy.make(Question, questionnaire=questionnaire, type='T')
contribution = mommy.make(Contribution, course=course, contributor=mommy.make(UserProfile), questionnaires=[questionnaire])
mommy.make(TextAnswer, contribution=contribution, question=question, original_answer='test answer text', pk=1)
def test_comments_showing_up(self):
response = self.app.get(self.url, user='staff')
form = response.forms['comment-edit-form']
self.assertEqual(form['original_answer'].value, 'test answer text')
form['reviewed_answer'] = 'edited answer text'
form.submit()
answer = TextAnswer.objects.get(pk=1)
self.assertEqual(answer.reviewed_answer, 'edited answer text')
# Staff Questionnaire Views
class TestQuestionnaireNewVersionView(ViewTest):
url = '/staff/questionnaire/2/new_version'
test_users = ['staff']
@classmethod
def setUpTestData(cls):
cls.name_de_orig = 'kurzer name'
cls.name_en_orig = 'short name'
questionnaire = mommy.make(Questionnaire, id=2, name_de=cls.name_de_orig, name_en=cls.name_en_orig)
mommy.make(Question, questionnaire=questionnaire)
mommy.make(UserProfile, username="staff", groups=[Group.objects.get(name="Staff")])
def test_changes_old_title(self):
page = self.app.get(url=self.url, user='staff')
form = page.forms['questionnaire-form']
form.submit()
timestamp = datetime.date.today()
new_name_de = '{} (until {})'.format(self.name_de_orig, str(timestamp))
new_name_en = '{} (until {})'.format(self.name_en_orig, str(timestamp))
self.assertTrue(Questionnaire.objects.filter(name_de=self.name_de_orig, name_en=self.name_en_orig).exists())
self.assertTrue(Questionnaire.objects.filter(name_de=new_name_de, name_en=new_name_en).exists())
def test_no_second_update(self):
# First save.
page = self.app.get(url=self.url, user='staff')
form = page.forms['questionnaire-form']
form.submit()
# Second try.
new_questionnaire = Questionnaire.objects.get(name_de=self.name_de_orig)
page = self.app.get(url='/staff/questionnaire/{}/new_version'.format(new_questionnaire.id), user='staff')
# We should get redirected back to the questionnaire index.
self.assertEqual(page.status_code, 302) # REDIRECT
self.assertEqual(page.location, '/staff/questionnaire/')
class TestQuestionnaireCreateView(ViewTest):
url = "/staff/questionnaire/create"
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
def test_create_questionnaire(self):
page = self.app.get(self.url, user="staff")
questionnaire_form = page.forms["questionnaire-form"]
questionnaire_form['name_de'] = "<NAME>"
questionnaire_form['name_en'] = "test questionnaire"
questionnaire_form['public_name_de'] = "Oeffent<NAME>"
questionnaire_form['public_name_en'] = "Public Test Questionnaire"
questionnaire_form['question_set-0-text_de'] = "Frage 1"
questionnaire_form['question_set-0-text_en'] = "Question 1"
questionnaire_form['question_set-0-type'] = "T"
questionnaire_form['index'] = 0
questionnaire_form.submit().follow()
# retrieve new questionnaire
questionnaire = Questionnaire.objects.get(name_de="Test Fragebogen", name_en="test questionnaire")
self.assertEqual(questionnaire.question_set.count(), 1)
def test_create_empty_questionnaire(self):
page = self.app.get(self.url, user="staff")
questionnaire_form = page.forms["questionnaire-form"]
questionnaire_form['name_de'] = "Test Fragebogen"
questionnaire_form['name_en'] = "test questionnaire"
questionnaire_form['public_name_de'] = "Oeffent<NAME>"
questionnaire_form['public_name_en'] = "Public Test Questionnaire"
questionnaire_form['index'] = 0
page = questionnaire_form.submit()
self.assertIn("You must have at least one of these", page)
self.assertFalse(Questionnaire.objects.filter(name_de="Test Fragebogen", name_en="test questionnaire").exists())
class TestQuestionnaireIndexView(ViewTest):
url = "/staff/questionnaire/"
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
mommy.make(Questionnaire, is_for_contributors=True)
mommy.make(Questionnaire, is_for_contributors=False)
class TestQuestionnaireEditView(ViewTest):
url = '/staff/questionnaire/2/edit'
test_users = ['staff']
@classmethod
def setUpTestData(cls):
questionnaire = mommy.make(Questionnaire, id=2)
mommy.make(Question, questionnaire=questionnaire)
mommy.make(UserProfile, username="staff", groups=[Group.objects.get(name="Staff")])
class TestQuestionnaireViewView(ViewTest):
url = '/staff/questionnaire/2'
test_users = ['staff']
@classmethod
def setUpTestData(cls):
questionnaire = mommy.make(Questionnaire, id=2)
mommy.make(Question, questionnaire=questionnaire, type='T')
mommy.make(Question, questionnaire=questionnaire, type='G')
mommy.make(Question, questionnaire=questionnaire, type='L')
mommy.make(UserProfile, username="staff", groups=[Group.objects.get(name="Staff")])
class TestQuestionnaireCopyView(ViewTest):
url = '/staff/questionnaire/2/copy'
test_users = ['staff']
@classmethod
def setUpTestData(cls):
questionnaire = mommy.make(Questionnaire, id=2)
mommy.make(Question, questionnaire=questionnaire)
mommy.make(UserProfile, username="staff", groups=[Group.objects.get(name="Staff")])
def test_not_changing_name_fails(self):
response = self.get_submit_assert_200(self.url, "staff")
self.assertIn("already exists", response)
def test_copy_questionnaire(self):
page = self.app.get(self.url, user="staff")
questionnaire_form = page.forms["questionnaire-form"]
questionnaire_form['name_de'] = "Test Fragebogen (kopiert)"
questionnaire_form['name_en'] = "test questionnaire (copied)"
questionnaire_form['public_name_de'] = "Oeffentlicher Test Fragebogen (kopiert)"
questionnaire_form['public_name_en'] = "Public Test Questionnaire (copied)"
page = questionnaire_form.submit().follow()
questionnaire = Questionnaire.objects.get(name_de="Test Fragebogen (kopiert)", name_en="test questionnaire (copied)")
self.assertEqual(questionnaire.question_set.count(), 1)
class TestQuestionnaireDeletionView(WebTest):
url = "/staff/questionnaire/delete"
csrf_checks = False
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
questionnaire1 = mommy.make(Questionnaire, pk=1)
mommy.make(Questionnaire, pk=2)
mommy.make(Contribution, questionnaires=[questionnaire1])
def test_questionnaire_deletion(self):
"""
Tries to delete two questionnaires via the respective post request,
only the second attempt should succeed.
"""
self.assertFalse(Questionnaire.objects.get(pk=1).can_staff_delete)
response = self.app.post("/staff/questionnaire/delete", params={"questionnaire_id": 1}, user="staff", expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertTrue(Questionnaire.objects.filter(pk=1).exists())
self.assertTrue(Questionnaire.objects.get(pk=2).can_staff_delete)
response = self.app.post("/staff/questionnaire/delete", params={"questionnaire_id": 2}, user="staff")
self.assertEqual(response.status_code, 200)
self.assertFalse(Questionnaire.objects.filter(pk=2).exists())
# Staff Course Types Views
class TestCourseTypeView(ViewTest):
url = "/staff/course_types/"
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
def test_page_displays_something(self):
CourseType.objects.create(name_de='uZJcsl0rNc', name_en='uZJcsl0rNc')
page = self.get_assert_200(self.url, user="staff")
self.assertIn('uZJcsl0rNc', page)
def test_course_type_form(self):
"""
Adds a course type via the staff form and verifies that the type was created in the db.
"""
page = self.get_assert_200(self.url, user="staff")
form = page.forms["course-type-form"]
last_form_id = int(form["form-TOTAL_FORMS"].value) - 1
form["form-" + str(last_form_id) + "-name_de"].value = "Test"
form["form-" + str(last_form_id) + "-name_en"].value = "Test"
response = form.submit()
self.assertIn("Successfully", str(response))
self.assertTrue(CourseType.objects.filter(name_de="Test", name_en="Test").exists())
class TestCourseTypeMergeSelectionView(ViewTest):
url = "/staff/course_types/merge"
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
cls.main_type = mommy.make(CourseType, pk=1, name_en="A course type")
cls.other_type = mommy.make(CourseType, pk=2, name_en="Obsolete course type")
def test_same_course_fails(self):
page = self.get_assert_200(self.url, user="staff")
form = page.forms["course-type-merge-selection-form"]
form["main_type"] = 1
form["other_type"] = 1
response = form.submit()
self.assertIn("You must select two different course types", str(response))
class TestCourseTypeMergeView(ViewTest):
url = "/staff/course_types/1/merge/2"
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
cls.main_type = mommy.make(CourseType, pk=1, name_en="A course type")
cls.other_type = mommy.make(CourseType, pk=2, name_en="Obsolete course type")
mommy.make(Course, type=cls.main_type)
mommy.make(Course, type=cls.other_type)
def test_merge_works(self):
page = self.get_assert_200(self.url, user="staff")
form = page.forms["course-type-merge-form"]
response = form.submit()
self.assertIn("Successfully", str(response))
self.assertFalse(CourseType.objects.filter(name_en="Obsolete course type").exists())
self.assertEqual(Course.objects.filter(type=self.main_type).count(), 2)
for course in Course.objects.all():
self.assertTrue(course.type == self.main_type)
# Other Views
class TestCourseCommentsUpdatePublishView(WebTest):
url = reverse("staff:course_comments_update_publish")
csrf_checks = False
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username="staff.user", groups=[Group.objects.get(name="Staff")])
mommy.make(Course, pk=1)
def helper(self, old_state, expected_new_state, action):
textanswer = mommy.make(TextAnswer, state=old_state)
response = self.app.post(self.url, params={"id": textanswer.id, "action": action, "course_id": 1}, user="staff.user")
self.assertEqual(response.status_code, 200)
textanswer.refresh_from_db()
self.assertEqual(textanswer.state, expected_new_state)
def test_review_actions(self):
self.helper(TextAnswer.NOT_REVIEWED, TextAnswer.PUBLISHED, "publish")
self.helper(TextAnswer.NOT_REVIEWED, TextAnswer.HIDDEN, "hide")
self.helper(TextAnswer.NOT_REVIEWED, TextAnswer.PRIVATE, "make_private")
self.helper(TextAnswer.PUBLISHED, TextAnswer.NOT_REVIEWED, "unreview")
class ArchivingTests(WebTest):
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username="staff", groups=[Group.objects.get(name="Staff")])
def test_raise_403(self):
"""
Tests whether inaccessible views on archived semesters/courses correctly raise a 403.
"""
semester = mommy.make(Semester, is_archived=True)
semester_url = "/staff/semester/{}/".format(semester.pk)
self.get_assert_403(semester_url + "import", "staff")
self.get_assert_403(semester_url + "assign", "staff")
self.get_assert_403(semester_url + "course/create", "staff")
self.get_assert_403(semester_url + "courseoperation", "staff")
class TestTemplateEditView(ViewTest):
url = "/staff/template/1"
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
def test_emailtemplate(self):
"""
Tests the emailtemplate view with one valid and one invalid input datasets.
"""
page = self.get_assert_200(self.url, "staff")
form = page.forms["template-form"]
form["subject"] = "subject: mflkd862xmnbo5"
form["body"] = "body: mflkd862xmnbo5"
form.submit()
self.assertEqual(EmailTemplate.objects.get(pk=1).body, "body: mflkd862xmnbo5")
form["body"] = " invalid tag: {{}}"
form.submit()
self.assertEqual(EmailTemplate.objects.get(pk=1).body, "body: mflkd862xmnbo5")
class TestDegreeView(ViewTest):
url = "/staff/degrees/"
test_users = ['staff']
@classmethod
def setUpTestData(cls):
mommy.make(UserProfile, username='staff', groups=[Group.objects.get(name='Staff')])
def test_degree_form(self):
"""
Adds a degree via the staff form and verifies that the degree was created in the db.
"""
page = self.get_assert_200(self.url, user="staff")
form = page.forms["degree-form"]
last_form_id = int(form["form-TOTAL_FORMS"].value) - 1
form["form-" + str(last_form_id) + "-name_de"].value = "Test"
form["form-" + str(last_form_id) + "-name_en"].value = "Test"
response = form.submit()
self.assertIn("Successfully", str(response))
self.assertTrue(Degree.objects.filter(name_de="Test", name_en="Test").exists())
```
#### File: evap/student/tools.py
```python
def make_form_identifier(contribution, questionnaire, question):
"""Generates a form field identifier for voting forms using the given
parameters."""
return "question_%s_%s_%s" % (
contribution.id,
questionnaire.id,
question.id)
```
|
{
"source": "JenniferToops/CodingDojo",
"score": 4
}
|
#### File: python/xanderyzwich/stack_abuse.py
```python
from unittest import TestCase
class AbuseStack:
def __init__(self, elements=[]):
self.values = list(elements)
def push(self, element):
self.values.append(element)
def pop(self, count=1):
results = AbuseStack()
for _ in range(count):
results.push(self.values[-1])
del self.values[-1]
return results
def __str__(self):
return '.'.join(self.values)
def __add__(self, element):
"""
This is an abuse of this operator
It's design is to borrow increment syntax
stack += element is the same as stack.push(element)
:param element: this will be pushed onto the stack
:return: self
"""
self.push(element)
return self
def __sub__(self, count):
"""
This is an abuse of this operator
It's design is to borrow decrement as a means of removing elements
stack -=1 is the same as _ = stack.pop()
:param count: number of elements to remove from stack
:return: Self
"""
self.pop(count)
return self
class TestAbuseStack(TestCase):
def test_init(self):
stack = AbuseStack()
print(stack)
assert str(stack) == ''
stack = AbuseStack(['fu', 'bar', 'baz'])
assert str(stack) == 'fu.bar.baz'
def test_push(self):
stack = AbuseStack()
stack.push('fu')
assert str(stack) == 'fu'
stack.push('bar')
assert str(stack) == 'fu.bar'
stack.push('baz')
assert str(stack) == 'fu.bar.baz'
def test_pop(self):
stack = AbuseStack(['fu', 'bar', 'baz'])
assert str(stack) == 'fu.bar.baz'
stack.pop(1)
assert str(stack) == 'fu.bar'
stack.pop(2)
assert str(stack) == ''
def test_increment(self):
stack = AbuseStack([])
print(stack)
assert str(stack) == ''
stack += 'fu'
assert str(stack) == 'fu'
stack.push('bar')
assert str(stack) == 'fu.bar'
stack.push('baz')
assert str(stack) == 'fu.bar.baz'
def test_decrement(self):
stack = AbuseStack(['fu', 'bar', 'baz'])
stack -= 1
assert str(stack) == 'fu.bar'
stack -= 2
assert str(stack) == ''
```
#### File: python/laurelin/laurelin_test.py
```python
import laurelin
def test_1_brackets():
output = laurelin.balanced_brackets("[[]]({}[])")
assert output == True
def test_2_brackets():
output = laurelin.balanced_brackets("[[({}[])")
assert output == False
def test_3_brackets():
output = laurelin.balanced_brackets("")
assert output == True
def test_4_brackets():
output = laurelin.balanced_brackets("(5 * 3) + [10 / {2}]")
assert output == True
def test_5_brackets():
output = laurelin.balanced_brackets(")]})]}")
assert output == False
def test_6_brackets():
output = laurelin.balanced_brackets("([{(((")
assert output == False
def test_7_brackets():
output = laurelin.balanced_brackets("no brackets at all")
assert output == True
def test_8_brackets():
output = laurelin.balanced_brackets(">>> (<> are not brackets) >>>")
assert output == True
def test_9_brackets():
output = laurelin.balanced_brackets("[///\\|||]")
assert output == True
def test_10_brackets():
output = laurelin.balanced_brackets("!@#$%%^&*(;',.<>?/\|~`'")
assert output == False
```
#### File: drkennetz/test/test_zeropoint.py
```python
import unittest
from zeropoint import LinkedList, zeropoint
class TestZeroPoint(unittest.TestCase):
def setUp(self):
self.tests = [[1, 2, -2, 3], [3, 4, -7, 5, -6, 6], [1, 0, 1], [1, 2, 3, 4, -4, -3, 5],
[2, 3, 4, 5, -9], [1, -1, -2, 3], [1, -10, 5, 4], [5]]
self.expected = [[1, 3], [5], [1, 1], [1, 2, 5], [2, 3], [-2, 3], [None], [5]]
def test_zeropoint(self):
total_spaces = 100
s = len("Input:")
a = len("Actual:")
e = len("Expected:")
pad = (total_spaces - a - e - s)//2
print("Input:", " "*pad, "Actual:", " "*pad, "Expected:")
for test, result in zip(self.tests, self.expected):
x = LinkedList.construct_dll_from_list(test)
dll, exp = LinkedList.construct_dll_from_list(test), LinkedList.construct_dll_from_list(result)
zerod = zeropoint.remove_zero_sequences_inplace(dll)
act = zerod.print_list()
expected = exp.print_list()
d = len(x.print_list())
a = len(act)
e = len(expected)
pad = (total_spaces - a - e - d)//2
print(x.print_list(), " "*pad, act, " "*pad, expected)
def tearDown(self):
pass
if __name__ == "__main__":
unittest.main()
```
#### File: python/xanderyzwich/solution.py
```python
from unittest import TestCase
def who_wins(move_list):
# setup
move_count = len(move_list)
full_board = (move_count == 9)
board = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
# Fill the board with marks
for i in range(0, move_count, 2):
a_move = move_list[i]
board[a_move[0]][a_move[1]] = 'A'
if move_count > i+1: # confirm b move exists
b_move = move_list[i + 1]
board[b_move[0]][b_move[1]] = 'B'
# Check for winner
winning_arrangements = [
[(0, 0), (0, 1), (0, 2)], [(1, 0), (1, 1), (1, 2)], [(2, 0), (2, 1), (2, 2)], # Rows
[(0, 0), (1, 0), (2, 0)], [(0, 1), (1, 1), (2, 1)], [(0, 2), (1, 2), (2, 2)], # Columns
[(0, 0), (1, 1), (2, 2)], [(2, 0), (1, 1), (0, 2)], # Diagonals
]
for left, middle, right in winning_arrangements:
values = [board[row][col] for row, col in [left, middle, right]] # get values
nonzero = values[0] != 0
match = len(set(values)) == 1
if match and nonzero:
return values[0]
# evaluate non-win
return 'Draw' if full_board else 'Pending'
class TestWhoWins(TestCase):
# can be run from this directory with "python3 -m unittest solution.py"
def test_downhill_diagonal(self):
moves = [[0, 0], [2, 0], [1, 1], [2, 1], [2, 2]]
expected = 'A'
assert who_wins(moves) == expected
def test_uphill_diagonal(self):
moves = [[0, 0], [1, 1], [0, 1], [0, 2], [1, 0], [2, 0]]
expected = 'B'
assert who_wins(moves) == expected
def test_draw(self):
moves = [[0, 0], [1, 1], [2, 0], [1, 0], [1, 2], [2, 1], [0, 1], [0, 2], [2, 2]]
expected = 'Draw'
assert who_wins(moves) == expected
def test_pending_too_short(self):
moves = [[0, 0], [1, 1]]
expected = 'Pending'
assert who_wins(moves) == expected
def test_pending_with_enough(self):
moves = [[1, 1], [0, 0], [1, 2], [1, 0], [2, 0], [0, 2]]
expected = 'Pending'
assert who_wins(moves) == expected
def test_horizontal(self):
moves = [[1, 1], [0, 0], [1, 0], [0, 1], [1, 2]]
expected = 'A'
assert who_wins(moves) == expected
def test_vertical(self):
moves = [[1, 1], [0, 2], [0, 0], [2, 2], [1, 0], [1, 2]]
expected = 'B'
assert who_wins(moves) == expected
```
#### File: solutions/mob/mob.py
```python
import unittest
"""
# The 24 Game
The `24` game is played as follows. You are given a list of four integers, each in a fixed order. By
placing the operators +, -, *, and / between the numbers, and grouping them with parentheses, determine whether it is possible to reach the value `24`.
For example, given the input [5, 2, 7, 8], you should return True, since (5 * 2 - 7) * 8 = 24.
Write a function that plays the `24` game.
## Business Rules/Errata
- Your input will always consist of an array of four integers. These integers do not all need to be positive.
- Your function should return a boolean value indicating whether the input can be combined to produce `24`. You do not need to produce the formula that yields `24`.
- The results of any division operation should be rounded to the nearest integer. So, `3 / 2 = 2`, not `3 / 2 = 1`.
- The result of division by zero should be zero, not undefined.
## Examples
```
play([5, 2, 7, 8]); // True -> (5 * 2 - 7) * 8 = 24
play([2, 4, 8, 10]); // True -> 2 + 4 + 8 + 10 = 24
play([5, 0, 4, 4]); // True -> (5 + 0) * 4 + 4 = 24
play([47, 2, 0, 0]); // True -> (47 / 2) + 0 + 0 = 24
play([1, 5, 7, 19]); // False, no combinations yield 24
```
"""
# depth first search through a tree
# use a STACK - last in/first out
# Keep track of:
# The stack of lists to operate on
def play(int_list):
# Create a STACK (aka list)
stack = [int_list]
# While the stack is not empty
while stack:
# Pop an item (sublist) off the stack
sublist = stack.pop()
# If your sublist is length 1:
if len(sublist) == 1:
# if the only number in sublist is 24, return True.
if sublist[0] == 24:
return True
# Otherwise, throw out sublist and go back to the start of the while loop
continue
# For each pair of items in sublist
for i in range(len(sublist) - 1):
# Do all four operations on the pair of items, collect in a list called RESULTS
first, second = sublist[i], sublist[i + 1]
results = [first + second, first - second, first * second]
results.append(0 if second == 0 else round(first / second))
# For each result in RESULTS
for result in results:
# Create a list (from sublist) replacing the two input numbers with result
current = sublist[:i] + [result] + sublist[i + 2:]
# add the new, smaller list to the stack
stack.append(current)
# Return False
return False
class TestPlay(unittest.TestCase):
def setUp(self):
self.prod_sub_prod = [5, 2, 7, 8]
self.addition = [2, 4, 8, 10]
self.subtraction = [27, 1, 1, 1]
self.add_prod_add = [5, 0, 4, 4]
self.div_roundup = [47, 2, 0, 0]
self.div_rounddown = [1, 1, 73, 3]
self.fail = [1, 5, 7, 19]
def test_prod_sub_prod(self):
self.assertTrue(play(self.prod_sub_prod),
"(5 * 2 - 7) * 8 = 24 -> True")
def test_addition(self):
self.assertTrue(play(self.addition), "2 + 4 + 8 + 10 = 24 -> True")
def test_subtraction(self):
self.assertTrue(play(self.subtraction), "27 - 1 - 1 - 1 = 24 -> True")
def test_add_prod_add(self):
self.assertTrue(play(self.add_prod_add),
"(5 + 0) * 4 + 4 = 24 -> True")
def test_div_roundup(self):
self.assertTrue(play(self.div_roundup),
"47 / 2 + 0 + 0 = 23.5 -> 24 -> True")
def test_div_rounddown(self):
self.assertTrue(play(self.div_rounddown),
"1 - 1 + (73 / 3) = 24.33 -> 24 -> True")
def test_fail(self):
self.assertFalse(play(self.fail), "1 ? 5 ? 7 ? 19 != 24 -> False")
def tearDown(self):
pass
if __name__ == "__main__":
unittest.main()
```
|
{
"source": "jennifertramsu/MinervaScraper",
"score": 3
}
|
#### File: MinervaScraper/Minerva/scraper.py
```python
import os
import sys
import json
import pandas as pd
from dotenv import load_dotenv
# scrapers
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from msedge.selenium_tools import EdgeOptions, Edge
# email stuff
import smtplib, ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
def load_browser(browser):
''' Loads the appropriate driver as indicated in the .env file.
Parameters
----------
browser : str, {CHROME, EDGE, FIREFOX}
String argument indicating the browser of preference.
Returns
-------
driver : WebDriver
Controls the Driver and allows you to drive the browser.
'''
if browser == "CHROME":
# initialize Chrome driver
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.headless = True
driver = webdriver.Chrome(executable_path='../chromedriver.exe', options=options)
elif browser == "EDGE":
# initialize MsEdge driver
options = EdgeOptions()
options.use_chromium = True
options.add_argument("--headless")
options.add_argument("disable-gpu")
driver = Edge(executable_path = '../msedgedriver.exe', options=options)
elif browser == "FIREFOX":
# initialize Firefox driver
options = webdriver.FirefoxOptions()
options.headless = True
driver = webdriver.Firefox(executable_path='../geckodriver.exe', options=options, service_log_path=os.devnull)
else:
raise ValueError("Incompatible browser! Start a GitHub issue to request this browser.")
return driver
def load_page():
""" Loads the unofficial transcript in Minerva using Selenium and returns the transcript_table which will be used for scraping.
Returns
-------
driver : WebDriver
Controls the Driver and allows you to drive the browser.
transcript_table : WebElement
Object containing HTML code that describes the Minerva unofficial transcript.
"""
# loading Minerva credentials
load_dotenv()
# choosing login method --> 1 : ID login, 0 : email login
login_by_ID = int(os.getenv('LOGIN'))
if login_by_ID:
sid = os.getenv('MCGILLID')
pin = os.getenv('MINERVAPIN')
else:
username = os.getenv('MCGILLUSERNAME')
password = os.getenv('<PASSWORD>')
# initializer driver
browser = os.getenv("BROWSER")
driver = load_browser(browser)
# heading to Minerva login page
URL = 'https://horizon.mcgill.ca/pban1/twbkwbis.P_WWWLogin'
driver.get(URL)
# wait for page to load
WebDriverWait(driver=driver, timeout=10).until(EC.presence_of_element_located((By.ID, "UserID")))
if login_by_ID:
# retrieving username field and sending username
driver.find_element_by_id("UserID").send_keys(sid)
# retrieving password field and sending passowrd
driver.find_element_by_id("PIN").send_keys(pin)
# clicking login button
driver.find_element_by_id("mcg_id_submit").click()
else:
# retrieving username field and sending username
driver.find_element_by_id("mcg_un").send_keys(username)
# retrieving password field and sending passowrd
driver.find_element_by_id("mcg_pw").send_keys(password)
# clicking login button
driver.find_element_by_id("mcg_un_submit").click()
# dealing with incorrect credentials (verifying successful login)
# waiting for page to load --> execute_script executes JS code
# --> waits until the JS code returns True when page is loaded
WebDriverWait(driver=driver, timeout=10).until(
lambda x : x.execute_script("return document.readyState === 'complete'")
)
try:
errors = driver.find_element_by_name("web_stop")
raise ValueError("Login failed.\n")
except: # login successful
print("Login successful!\n")
# navigate to Unofficial Transcript
main = 'https://horizon.mcgill.ca/pban1/twbkwbis.P_GenMenu?name=bmenu.P_StuMainMnu'
records = 'https://horizon.mcgill.ca/pban1/twbkwbis.P_GenMenu?name=bmenu.P_AdminMnu'
transcript = 'https://horizon.mcgill.ca/pban1/bzsktran.P_Display_Form?user_type=S&tran_type=V'
navigation = [main, records, transcript]
for link in navigation:
driver.get(link)
WebDriverWait(driver=driver, timeout=10).until(lambda x : x.execute_script("return document.readyState === 'complete'"))
# scrape for grades
transcript_table = driver.find_elements_by_class_name("dedefault")
return driver, transcript_table
def minervascrape(values, term, year, transcript_table, terms, file):
""" This is the main scraper function. Given the inputted terms (optional), the function will scrape through the user's
unofficial transcript on Minerva and write the output (Course Code, Grade, Course Average, Term GPA) to a text file.
Parameters
----------
values : list
List of system arguments (term + year) inputted through the command-line. If no arguments are given,
function will scrape for all terms.
term : list
List of specified terms represented by the first letter ('f', 'w', 's').
year : list
List of years corresponding to the specified terms.
transcript_table : list
List of Selenium.element objects that contains all the text to be parsed and scraped.
terms : dict
Dictionary that maps the elements in term to the corresponding term name ('Fall', 'Winter', 'Summer').
file : file-type
File-type object to which the function writes.
Examples
--------
>> import os
>> os.system("python minervascraper.py f2019 w2020 S2020")
"""
k = 0
d = [] # creating dictionary
for i in range(len(transcript_table)):
if len(values) != 0:
if (term[k] not in transcript_table[i].text) or (year[k] not in transcript_table[i].text):
continue
else:
t = term[k] + " " + year[k]
print("Scraping " + t + "...\n")
else: # no arguments, scrape all terms
if (terms['F'] not in transcript_table[i].text) and (terms['W'] not in transcript_table[i].text) and (terms['S'] not in transcript_table[i].text):
continue
else:
sem = transcript_table[i].text.split()
if len(sem) == 2:
t = sem[0] + " " + sem[1]
print("Scraping " + t + "...\n")
else:
continue
# in block of desired term and year
j = i + 5
if j >= len(transcript_table):
break
while "Winter" not in transcript_table[j].text and "Fall" not in transcript_table[j].text and "Summer" not in transcript_table[j].text: # loop per line
if "Advanced" in transcript_table[j].text:
# grab term gpa
l = j
table = transcript_table[l].find_elements_by_class_name("dedefault")
for m in range(len(table)):
while "TERM GPA" not in table[m].text:
m += 1
term_gpa = table[m + 1].text
c = {"Term GPA" : term_gpa}
#d.append(c)
break
break
course_code = transcript_table[j].text
if "RW" in transcript_table[j - 1].text:
c = {"Term" : t, "Course Code" : course_code, "Grade" : "Not released.", "Course Average" : "Not released."}
d.append(c)
else:
grade = transcript_table[j + 5].text
course_avg = transcript_table[j + 9].text
if len(course_avg.strip()) == 0:
c = {"Term" : t, "Course Code" : course_code, "Grade" : grade, "Course Average" : "Not released."}
d.append(c)
else:
c = {"Term" : t, "Course Code" : course_code, "Grade" : grade, "Course Average" : course_avg}
d.append(c)
j += 11 # move to next course code
if j >= len(transcript_table):
break
i = j
k += 1
if len(values) != 0 and k >= len(term):
break
# writing to json file
j = json.dumps(d)
file.write(j)
def json2excel(file):
''' Converts json file to a stacked Excel file.
Parameters
----------
file : str
Filepath to json file.
Returns
-------
df : Pandas.DataFrame
Dataframe with multi-indexing that can be exported as Excel file.
'''
df = pd.read_json(file)
df.set_index(['Term', 'Course Code'], inplace=True)
return df
def extract_difference(old, new):
''' Returns a Pandas DataFrame containing the difference between the two inputs.
Parameters
----------
old, new : Pandas.DataFrame
Dataframes to be compared.
Returns
-------
changes : Pandas.DataFrame
DataFrame containing all transcript changes.
'''
df = old.compare(new, keep_equal=True).reset_index("Term", drop=True) # rows containing changes, Term index dropped
courses = df.index.to_list()
changes = new.reset_index("Term", drop=True).loc[courses, :]
return changes
def minervaupdate(values, term, year, transcript_table, terms):
""" If flagged through the command-line, this function will scrape for all terms and compare with the existing Scraped_Transcript_All_Terms.txt text file.
Parameters
----------
values : list
List of system arguments (term + year) inputted through the command-line. If no arguments are given,
function will scrape for all terms.
term : list
List of specified terms represented by the first letter ('f', 'w', 's').
year : list
List of years corresponding to the specified terms.
transcript_table : list
List of Selenium.element objects that contains all the text to be parsed and scraped.
terms : dict
Dictionary that maps the elements in term to the corresponding term name ('Fall', 'Winter', 'Summer').
Returns
-------
change : bool
True if transcript has updated, otherwise False.
changes : Pandas.DataFrame
DataFrame containing all transcript changes.
"""
with open("Updated_Scraped_Transcript.json", "w") as file:
minervascrape(values, term, year, transcript_table, terms, file)
old = json2excel("Scraped_Transcript_All_Terms.json")
new = json2excel("Updated_Scraped_Transcript.json")
if old.equals(new):
changes = None
change = False
else:
if not old['Grade'].equals(new['Grade']) or not old['Course Average'].equals(new['Course Average']):
changes = extract_difference(old, new)
change = True
else:
changes = None
change = False
if change:
# replacing old json with new json
old_json = open("Scraped_Transcript_All_Terms.json", "w")
new_json = open("Updated_Scraped_Transcript.json")
old_json.write(new_json.read())
old_json.close()
new_json.close()
os.remove("Updated_Scraped_Transcript.json")
return change, changes
def generate_html(df):
''' Generates an HTML table based on the information from the passed Pandas DataFrame.
Parameters
----------
df : Pandas.DataFrame
Dataframe containing transcript changes.
Returns
-------
html : str
HTML code to be embedded in email.
'''
load_dotenv()
name = os.getenv("NAME")
html = f'''<html>
<head></head>
<body>
<p> Hi {name}, </p>
</br>
<p> Your transcript has updated on Minerva! View changes below: </p>
</br>
<table border="2">
<tr>
<th> Course Code </th>
<th> Grade </th>
<th> Course Average </th>
</tr>'''
table = ""
for col, item in df.iterrows():
table += "<tr>"
table += "<th> " + col + " </th>"
for i in item:
table += "<th> " + i + " </th>"
table += "</tr>"
html += table
html += '''</table></body></html>'''
return html
def send_email(changes):
''' Sends the email to notify transcript changes. Attaches an HTML formatted table containing all changes.
Parameters
----------
changes : Pandas.DataFrame
DataFrame containing transcript changes to be added to HTML table.
'''
load_dotenv()
port = 465
smtp_server = "smtp.gmail.com"
sender_email = os.getenv("EMAIL")
receiver_email = os.getenv("MYEMAIL")
sender_email_password = os.getenv("PASS")
message = MIMEMultipart("alternative")
message["Subject"] = "Minerva Transcript Update"
message["From"] = sender_email
message["To"] = receiver_email
html = generate_html(changes)
message.attach(MIMEText(html, 'html'))
context = ssl.create_default_context()
if int(sys.version[0]) > 2: # version 3
with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:
server.login(sender_email, sender_email_password)
server.sendmail(sender_email, receiver_email, message.as_string())
else:
server = smtplib.SMTP_SSL(smtp_server, port)
server.login(sender_email, sender_email_password)
server.sendmail(sender_email, receiver_email, message.as_string())
server.quit()
##### ARCHIVE #####
def add_drop_remove(df):
''' For the sake of comparison, this function removes rows where both grade and course average are empty. We are only concerned with
courses where the grade or course average has updated (i.e. are present). '''
df = df[df['Grade'] != "Not released."] # if grade is not present, then course average is definitely not present
return df
```
|
{
"source": "jennifertran/SearchEngineAward",
"score": 3
}
|
#### File: jennifertran/SearchEngineAward/Award.py
```python
class Award:
count = 0
def __init__(self, url, number, name, type, application, restrictions, renewable, value, due, description,
sequence):
self.url = url
self.number = number
self.name = name
self.type = type
self.application = application
self.restrictions = restrictions
self.renewable = renewable
self.value = value
self.due = due
self.description = description
self.sequence = sequence
Award.count += 1
def displayCount(self):
return "There are a total of %d awards" % Award.count
def displayAward(self):
return "Award Number: ", self.number, ", Name: ", self.name, ", Type: ", self.type, ", Application: ", self.application, ", Restrictions: ", self.restrictions, ", Renewable: ", self.renewable, ", Value: ", self.value, ", Due Day: ", self.due, ", description: ", self.description, ", URL: ", self.url
```
|
{
"source": "jenniferxkuo/ultramisc",
"score": 3
}
|
#### File: scripts/dim-reduction/suzhou-pca-lda.py
```python
import os, sys, glob, re
import argparse
import numpy as np
import pandas as pd
from hashlib import sha1
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
import matplotlib.pyplot as plt
# TODO make sure this is assigning a label to everything
def coart_class(row):
# figure out how to make training words just be "training"
if row['pron'] in test_no_coart_words:
return "no_fric"
elif row['pron'] in apical_words:
return "apical"
else:
return "fric"
# read in arguments
parser = argparse.ArgumentParser()
parser.add_argument("directory", help="Experiment directory containing all subjects")
parser.add_argument("--pca_dim", "-p", help="Number of principal components to retain")
parser.add_argument("--lda_dim", "-l", help="Number of linear discriminants to use")
#parser.add_argument("-v", "--visualize", help="Produce plots of PC loadings on fan",action="store_true")
args = parser.parse_args()
try:
expdir = args.directory
except IndexError:
print("\tDirectory provided doesn't exist")
ArgumentParser.print_usage
ArgumentParser.print_help
sys.exit(2)
n_components = int(args.pca_dim)
n_lds = int(args.lda_dim)
pct_out = "percent_classified.txt"
pct_out_path = os.path.join(expdir,pct_out)
pct_out_head = "\t".join(["subj", "test", "coart", "classified_as", "pct_class"])
with open(pct_out_path, "w") as out:
out.write(pct_out_head + "\n")
# label test case by word type in metadata - coart from fric or not (or apical, or test)
test_no_coart_words = ["IZ", "BIZX", "YZ"] # mark as "no_coart"
apical_words = ["SZ", "SZW"] # third level "apical"; useless for comparisons
for root,directories,files in os.walk(expdir):
for d in directories:
if d.startswith("."): # don't run on MAC OS hidden directories
continue
subject = re.sub("[^0-9]","",d)
data_in = os.path.join(root,d,"frames_proc.npy")
data = np.load(data_in)
metadata_in = os.path.join(root,d,'frames_proc_metadata.pickle')
md_pre = pd.read_pickle(metadata_in)
# some sanity checks on data checksums
assert(len(md_pre) == data.shape[0]) # make sure one md row for each frame
assert(md_pre.loc[0, 'sha1_filt'] == sha1(data[0].ravel()).hexdigest()) # checksums
assert(md_pre.loc[len(md_pre)-1,'sha1_filt'] == sha1(data[-1].ravel()).hexdigest())
# get rid of hash-related columns after checking
md = md_pre.iloc[:,0:11].copy()
image_shape = data[0].shape
# reshape 2D frame data into 1D vectors and fit PCA
frames_reshaped = data.reshape([
data.shape[0],
data.shape[1] * data.shape[2]
])
pca = PCA(n_components=n_components)
pca.fit(frames_reshaped)
total_var_exp = sum(pca.explained_variance_ratio_)
pcvar = pca.explained_variance_ratio_
# output PC loading plots
if n_components < 6:
n_output_pcs = n_components
else:
n_output_pcs = 6
# output
for n in range(0,n_output_pcs):
dd = pca.components_[n].reshape(image_shape)
mag = np.max(dd) - np.min(dd)
pc_load = (dd-np.min(dd))/mag*255
# conversion would happen here if images weren't converted already
plt.title("PC{:} min/max loadings, subj {:}".format((n+1),subject))
plt.imshow(pc_load, cmap="Greys_r")
file_ending = "subj{:}-pc{:}-filt.pdf".format(subject, (n+1))
savepath = os.path.join(root,d,file_ending)
plt.savefig(savepath)
# save scree plots
plt.title("Scree plot, subj. {:}".format(subject))
plt.plot(np.cumsum(pcvar) * 100)
scree_ending = "subj{:}-scree.pdf".format(subject)
screepath = os.path.join(root,d,scree_ending)
plt.savefig(screepath)
# print some info
print("\tSubj.{}: PCA with {} PCs explains {} of variation".format(subject, str(n_components),
round(total_var_exp,4)
))
pca_out = pca.transform(frames_reshaped)
# output PC scores
pc_filestring = "suzh{:}_pcs.csv".format(subject)
pc_savepath = os.path.join(root,d,pc_filestring)
pc_headers = ["pc"+str(i+1) for i in range(0,n_components)]
meta_headers = md.columns.values
headers = list(meta_headers) + pc_headers
metadata = md.values # md.as_matrix(columns = md.columns[0:11])
out_df = np.row_stack((headers,
np.column_stack((metadata, pca_out))
))
np.savetxt(pc_savepath, out_df, fmt="%s", delimiter = ',')
# subset PCA'ed data into training set and testing sets
training_list = ["IY1", "SH", "S"]
test_list = ["IZ1", "YZ1", "ZZ1", "ZW1"]
test_coart_words = ["XIZ", "SIZ", "XYZ"] # mark as "coart"
test_no_coart_words = ["IZ", "BIZX", "YZ"] # mark as "no_coart"
apical_words = ["SZ", "SZW"]
training_mask = md['phone'].isin(training_list)
training_mask = training_mask.values # .as_matrix()
training_md = md[training_mask].copy()
training_data = pca_out[training_mask].copy()
test_mask = md['phone'].isin(test_list)
test_mask = test_mask.values # .as_matrix()
test_md = md[test_mask].copy()
test_data = pca_out[test_mask].copy()
# train LDA on training data
labs = np.array(training_md.phone) # expand dims?
train_lda = LDA(n_components = int(n_lds))
train_lda.fit(training_data, labs) # train the model on the data
train_lda_out = train_lda.transform(training_data)
# score and/or categorize test data according to trained LDA model
test_lda_out = train_lda.transform(test_data)
# LDA data for csv: training on top of test
ld = pd.DataFrame(np.vstack([train_lda_out, test_lda_out]))
ld = ld.rename(columns = {0:'LD1', 1:'LD2'})
# a subject column for csv
subject_lab = [subject] * ld.shape[0]
subject_column = pd.DataFrame(subject_lab)
subject_column = subject_column.rename(columns = {0:'subj'})
# TODO get pandas to shut up about these two lines
training_md["coart_class"] = ["training"] * training_md.shape[0]
test_md["coart_class"] = test_md.apply(lambda row: coart_class (row),axis=1)
# metadata that was read in earlier for csv: training on top of test
md = pd.concat([training_md, test_md], axis=0, ignore_index=True)
# classification results: training on top of test
cls = pd.concat(
[pd.DataFrame(train_lda.predict(training_data)),
pd.DataFrame(train_lda.predict(test_data))],
axis=0,
ignore_index=True
)
cls = cls.rename(columns = {0:'cls'})
# combine all of the above into a DataFrame object
ld_md = pd.concat([subject_column, ld, cls, md], axis=1)
# add range-normalized linear discriminant values to DataFrame
# not good for 2-LD models since the 2 Ds might not be the same.
# ld_range = max(ld_md.LD) - min(ld_md.LD)
# ld_md = ld_md.assign(normLD = (ld_md.LD - min(ld_md.LD)) / ld_range )
# save analysis data for the current subject as csv
lda_savepath = os.path.join(root,"suzh_{:}_ldas.csv".format(subject))
ld_md.to_csv(lda_savepath, index=False)
# print LDA accuracy
class_score = train_lda.score(training_data,labs)
print("\tLDA accuracy on training data is {:}".format(class_score))
# output classification results
laminal_list = ["IZ1", "YZ1"]
apical_list = ["ZZ1", "ZW1"]
train_labels = list(np.unique(training_md.phone))
test_labels = list(np.unique(test_md.phone))
coart_types = list(np.unique(ld_md.coart_class))
# fricative vowel classification by training category and coarticulatory class
rows_laminal = ld_md.loc[(ld_md.phone == "IZ1") | (ld_md.phone == "YZ1")]
for c in coart_types:
if c not in ["fric", "no_fric"]:
continue
rows_by_co = rows_laminal.loc[rows_laminal.coart_class == c]
for t in train_labels:
rows_by_clco = rows_by_co.loc[rows_by_co.cls == t]
prop_class = round(rows_by_clco.shape[0]/rows_by_co.shape[0], 4)
print("\t{}, coart {} \t classified as {} -- {}".format("laminal",c,t,prop_class))
with open(pct_out_path, "a") as out:
out.write("\t".join([subject,"laminal",c,t,str(prop_class)]) + "\n")
print("\t---")
# apical vowel classification by training category (all same coarticulatory class)
rows_apical = ld_md.loc[(ld_md.phone == "ZZ1") | (ld_md.phone == "ZW1")]
for t in train_labels:
rows_by_class = rows_apical.loc[rows_apical.cls == t]
prop_class = round(rows_by_class.shape[0]/rows_apical.shape[0], 4)
print("\t{}, coart {} \t classified as {} -- {}".format("apical","fric",t,prop_class))
with open(pct_out, "a") as out:
out.write("\t".join([subject,"apical","fric",t,str(prop_class)]) + "\n")
print("\t---")
# overall classification by training category
#for tt in test_labels:
# rows_by_test = ld_md.loc[ld_md.phone == tt]
# for t in train_labels:
# rows_by_class = rows_by_test.loc[rows_by_test.cls == t]
# prop_class = round(rows_by_class.shape[0]/rows_by_test.shape[0], 4)
# print("\t{} classified as {} -- {}".format(tt,t,prop_class))
#with open(pct_out, "a") as out:
# out.write("\t".join([subject, tt, t, str(prop_class)]) + "\n")
# print("\t---")
# gather and open all LDA csv files in directory, then put together into one csv file
big_ld_list = []
for root,directories,files in os.walk(expdir):
for f in files:
if f.endswith("ldas.csv"):
csv_back_in = os.path.join(root,f)
one_subj = pd.read_csv(csv_back_in)
big_ld_list.append(one_subj)
big_ld = pd.concat(big_ld_list, axis=0)
big_ld_csv_path = os.path.join(expdir,"suzhou_all_subj_ldas.csv")
big_ld.to_csv(big_ld_csv_path, index=False)
# TODO do the same for PCs
big_pc_list = []
for root,directories,files in os.walk(expdir):
for f in files:
if f.endswith("pcs.csv"):
csv_back_in = os.path.join(root,f)
one_subj = pd.read_csv(csv_back_in)
big_pc_list.append(one_subj)
big_pc = pd.concat(big_pc_list, axis=0)
big_pc_csv_path = os.path.join(expdir,"suzhou_all_subj_pcs.csv")
big_pc.to_csv(big_pc_csv_path, index=False)
```
#### File: scripts/ssanova/con-checker.py
```python
# Authors: <NAME> (<EMAIL>) Copyright (c) 2018
# Last modified 11-2018
import os, sys
def usage():
print(sys.exit(__doc__))
try:
basedir = os.path.abspath(sys.argv[1])
except IndexError:
usage()
sys.exit(2)
missing_files = 0
# generate the rest of the output file
for dirs, subdirs, files in os.walk(basedir):
for textgrid in files:
# only check for .con files for which a .ch1.TextGrid file exists
if not '.ch1.textgrid' in textgrid.lower():
continue
# get related file names
if 'bpr' in textgrid.lower():
basename = textgrid.split('.')[0] + '.bpr'
else:
basename = textgrid.split('.')[0]
con_file = os.path.join(dirs, str(basename + '.con'))
con_file = con_file.replace('.bpr', '')
if os.path.isfile(con_file):
continue
# TODO check here for other files ending in .con
else:
print("\tNo .con file in {}".format(basename))
missing_files += 1
# TODO check for .con files whose basenames don't match
#elif # another file ends in .con that isn't con_file
# TODO check for multiple .con files
# print out some encouragement
if missing_files == 0:
print("Congratulations, you've finished work in {}!".format(basedir))
else:
print("Almost there! You have a total of {} missing files.".format(missing_files))
```
|
{
"source": "jenninglim/model-comparison-test",
"score": 3
}
|
#### File: model-comparison-test/reltest/psi.py
```python
import numpy as np
import logging
from scipy.stats import truncnorm
def selection(Z):
"""
Characterising selecting the top K Models from vector Z as a linear
combination.
input
Z : "Feature vector" with a normal distribution.
K : Number of selections.
return
ind_sel: Selected index.
A,b : The linear combination of the selection event Az < b.
"""
N = np.shape(Z)[0]
## Sorted list of Z
ind_sorted = np.argsort(Z)
## Pick top k
ind_sel = ind_sorted[0]
A = np.zeros((N-1,N))
for i in range(N-1):
A[i, ind_sorted[0]] = 1
A[i, ind_sorted[i+1]] = -1
b = np.zeros((N-1))
assert np.sum(np.matmul(A,Z) > 0) ==0, "Assumption error"
return ind_sel, A, b
def psi_inf(A,b,eta, mu, cov, z):
"""
Returns the p-value of the truncated normal. The mean,
variance, and truncated points [a,b] is determined by Lee et al 2016.
"""
l_thres, u_thres= calculate_threshold(z, A, b, eta, cov)
sigma2 = np.matmul(eta,np.matmul(cov,eta))
scale = np.sqrt(sigma2)
params = {"u_thres":u_thres,
"l_thres":l_thres,
"mean": np.matmul(eta,mu),
"scale":scale,
}
ppf = lambda x: truncnorm_ppf(x,
l_thres,
u_thres,
loc=np.matmul(eta,mu),
scale=scale)
sf = lambda x: truncnorm.sf(x, l_thres/scale, u_thres/scale, scale=scale)
return ppf, sf
def calculate_threshold(z, A, b, eta, cov):
"""
Calculates the respective threshold for the method PSI_Inf.
"""
etaz = eta.dot(z)
Az = A.dot(z)
Sigma_eta = cov.dot(eta)
deno = Sigma_eta.dot(eta)
alpha = A.dot(Sigma_eta)/deno
assert(np.shape(A)[0] == np.shape(alpha)[0])
pos_alpha_ind = np.argwhere(alpha>0).flatten()
neg_alpha_ind = np.argwhere(alpha<0).flatten()
acc = (b - np.matmul(A,z))/alpha+np.matmul(eta,z)
if (np.shape(neg_alpha_ind)[0] > 0):
l_thres = np.max(acc[neg_alpha_ind])
else:
l_thres = -10.0**10
if (np.shape(pos_alpha_ind)[0] > 0):
u_thres = np.min(acc[pos_alpha_ind])
else:
u_thres= 10**10
return l_thres, u_thres
def test_significance(A, b, eta, mu, cov, z, alpha):
"""
Compute an p-value by testing a one-tail.
Look at right tail or left tail?
Returns "h_0 Reject
"""
ppf, sf = psi_inf(A, b, eta, mu, cov, z)
stat = np.matmul(eta,z) ## Test statistic
sigma = np.sqrt(np.matmul(eta,np.matmul(cov,eta)))
## If the std dev is < 0 or undefined, do not reject the hypothesis.
if np.isnan(sigma) or not np.isreal(sigma):
logging.warning("Scale is not real or negative, test reject")
return False, 1.
threshold = ppf(1.-alpha)
pval = sf(stat)
return stat > threshold, pval
def generateEta(ind_sel, n_models):
"""
Generate multiple etas corresponding to testing
within the selected indices.
"""
etas = np.zeros((n_models-1, n_models))
for i in range(n_models-1):
index = i if i < ind_sel else i +1
etas[i,ind_sel] = -1
etas[i,index]=1
return etas
def truncnorm_ppf(x, a, b,loc=0., scale=1.):
"""
Approximate Percentile function of the truncated normal. Particularly in
the tail regions (where the standard SciPy function may be undefined.
"""
thres = truncnorm.ppf(x,(a-loc)/scale,(b-loc)/scale,loc=loc, scale=scale)
if np.any(np.isnan(thres)) or np.any(np.isinf(thres)):
logging.info("Threshold is Nan using approximations.")
thres = loc+scale*quantile_tn(x,(a-loc)/scale,(b-loc)/scale)
return thres
def quantile_tn(u,a,b,threshold=0.0005):
"""
Approximate quantile function in the tail region
https://www.iro.umontreal.ca/~lecuyer/myftp/papers/truncated-normal-book-chapter.pdf
"""
def q(x, r=10):
"""
Helper function.
"""
acc=0
for i in range(r):
acc = acc + (2*i-1)/((-1)**i*x**(2*i+1))
return 1/x + acc
q_a = q(a)
q_b = q(b)
c =q_a * (1- u) + q_b * u * np.exp((a**2 - b**2)/2)
d_x = 100
z = 1 - u + u * np.exp((a**2 - b**2)/2)
x = np.sqrt(a**2 - 2 * np.log(z))
while d_x > threshold and not np.isnan(d_x):
z = z - x * (z * q(x) - c)
x_new = np.sqrt(a**2 - 2 * np.log(z))
d_x = np.abs(x_new - x)/x
x = x_new
return x
```
#### File: model-comparison-test/reproduce-results/appen-l-models-fpr-tpr-mean-shift.py
```python
import numpy as np
import reltest
from reltest.mctest import MCTestPSI, MCTestCorr
import reltest.mmd as mmd
import reltest.ksd as ksd
from reltest import kernel
import logging
from ex_models import generateLGauss
from helper import summary
import sys
import os
dim = 10
n_models = 10
n_same = 9
model_params = {'mu0':0.5, 'sig0':1, # Model 0 Parameters
'muR':0, 'sigR':1 # Reference Parameters
}
n_samples = int(sys.argv[1])
src = generateLGauss(model_params, dim, n_models,n_same)
res = src.sample(n_samples)
n_trials =300
models = res['models']
Q = res['ref']
def independent_test(n_samples, n_trials, src, setting):
res_psi= {'mmd_u':[],
'mmd_lin':[],
'ksd_u':[],
'ksd_lin':[],
}
res_cor= {
'ksd_u_bh':[],
'ksd_u_by':[],
'ksd_u_bn':[],
'mmd_u_bh':[],
'mmd_u_by':[],
'mmd_u_bn':[],
}
model_dens = src.get_densities()
for j in range(n_trials):
samples = src.sample(n_samples, seed=j)
models = samples['models']
Q = samples['ref']
psiTest = MCTestPSI(Q.data())
corrtest = MCTestCorr(Q.data())
mmd_med = mmd.med_heuristic([i.data() for i in models], Q.data(),
subsample=1000)
ksd_med = ksd.med_heuristic(Q.data(),
subsample=1000)
mmd_kernel, ksd_kernel = kernel.KGauss(mmd_med), kernel.KGauss(ksd_med)
mmd_u = mmd.MMD_U(mmd_kernel)
mmd_lin = mmd.MMD_Linear(mmd_kernel)
ksd_u = ksd.KSD_U(ksd_kernel)
ksd_lin = ksd.KSD_Linear(ksd_kernel)
model_samples = [i.data() for i in models]
## PSI Based Test
res_psi['ksd_u'].append(psiTest.perform_tests(model_dens, ksd_u))
res_psi['ksd_lin'].append(psiTest.perform_tests(model_dens, ksd_lin))
res_psi['mmd_u'].append(psiTest.perform_tests(model_samples, mmd_u))
res_psi['mmd_lin'].append(psiTest.perform_tests(model_samples, mmd_lin))
## Correction Based Test
res_cor['mmd_u_bh'].append(corrtest.perform_tests(model_samples, mmd_u, split=0.5, density=False, correction=0))
res_cor['mmd_u_by'].append(corrtest.perform_tests(model_samples, mmd_u, split=0.5, density=False, correction=1))
res_cor['mmd_u_bn'].append(corrtest.perform_tests(model_samples, mmd_u, split=0.5, density=False, correction=3))
res_cor['ksd_u_bh'].append(corrtest.perform_tests(model_dens, ksd_u, split=0.5, density=True, correction=0))
res_cor['ksd_u_by'].append(corrtest.perform_tests(model_dens, ksd_u, split=0.5, density=True, correction=1))
res_cor['ksd_u_bn'].append(corrtest.perform_tests(model_dens, ksd_u, split=0.5, density=True, correction=3))
return res_psi,res_cor
setting = {'n':n_models,
'dim':dim}
res_psi,res_cor = independent_test(n_samples,n_trials,src,setting)
np.save(SAVE_DIR+"PSI"+str(n_samples),res_psi)
np.save(SAVE_DIR+"COR"+str(n_samples),res_cor)
```
#### File: model-comparison-test/reproduce-results/helper.py
```python
import numpy as np
import matplotlib.pyplot as plt
import logging
import sys
from scipy import linalg
import reltest.util as util
from reltest.mctest import MCTestPSI
from reltest.mmd import MMD_Linear, MMD_U
from reltest.ksd import KSD_U, KSD_Linear
from reltest import kernel
from kmod.mctest import SC_MMD
from kgof import glo
from rej import *
def two_model_rej_samp(source, l_samples, n_trials, eta, n_selected =1):
"""
Rejection rate of PSIMMD_Bloc, PSIMMD_Inc, RelMMD for a given
a range of sample sizes determined by l_samples
"""
res_psi_mmd_lin = np.zeros((len(l_samples),1))
res_psi_mmd_inc = np.zeros((len(l_samples),1))
res_psi_mmd_bloc = np.zeros((len(l_samples),1))
res_psi_mmd_u = np.zeros((len(l_samples),1))
res_psi_ksd_u = np.zeros((len(l_samples),1))
res_psi_ksd_lin = np.zeros((len(l_samples),1))
res_rel_mmd = np.zeros((len(l_samples),1))
res_rel_ksd = np.zeros((len(l_samples),1))
## Average P-Value over difference seed
for j in range(len(l_samples)):
logging.info("Testing for %d samples" % l_samples[j])
n_samples = l_samples[j]
block_size = int(np.sqrt(n_samples))
one_res = two_model_rej(source, n_samples, n_trials, eta, offset=j)
res_psi_mmd_lin[j] = one_res['PSI_mmd_lin']
res_psi_mmd_u[j] = one_res['PSI_mmd_u']
res_psi_ksd_u[j] = one_res['PSI_ksd_u']
res_psi_ksd_lin[j] = one_res['PSI_ksd_lin']
res_rel_mmd[j] = one_res['RelMMD']
res_rel_ksd[j] = one_res['RelKSD']
results = {
'PSI_mmd_lin':res_psi_mmd_lin,
'PSI_mmd_u':res_psi_mmd_u,
'PSI_ksd_lin':res_psi_ksd_lin,
'PSI_ksd_u':res_psi_ksd_u,
'RelMMD' :res_rel_mmd,
'RelKSD' :res_rel_ksd}
return results
def neg_log_likelihood(log_ds, samples):
return [-np.mean(log_d(samples)) for log_d in log_ds]
def filter_crimetype(data, type = None):
if type is None:
data = data
else:
data = data[data[:,0] == type]
if len(data) == 1:
print("No Crime Type found")
else:
loc = data[:,1:].astype(float)
loc = np.nan_to_num(loc)
loc = loc[loc[:,0] != 0]
#Set City bound
loc = loc[loc[:,0] >-89]
loc = loc[loc[:,1] > 40]
return loc
def load_crime_dataset(c_type, size, return_transform=False):
## Take in consideration the mean and std
import os
dd = np.load(glo.data_file('/is/ei/jlim/Documents/n-relative-testing/data/chicago_crime_loc_with_type2016.npz'))['data']
loc = filter_crimetype(dd, c_type)
## Standardise
shift, scale = np.mean(loc,axis=0), np.std(loc,axis=0)
loc = loc - shift
loc = loc/scale
loc_train, loc_test = loc[:size,:], loc[size:,:]
def init(loc_test):
def sample_test_data(size, seed):
with util.NumpySeedContext(seed=seed):
sample_test = np.random.permutation(loc_test)
return sample_test[:size,:]
return sample_test_data
if return_transform:
return loc_train,init(loc_test), shift, scale
else:
return loc_train,init(loc_test)
def summary(results, n_models):
"""
Return Summary of results:
Average Selection:
Average Rejection:
Time:
"""
av_rej = np.zeros(n_models)
av_sel = np.zeros(n_models)
av_time = 0
for result in results:
av_rej = av_rej+result['h0_rejected']/len(results)
av_sel[result['ind_sel']] += 1./len(results)
av_time = av_time+result['time_secs']/len(results)
summary = {'av_rej': av_rej,
'av_sel':av_sel,
'av_time':av_time}
return summary
def download_to(url, file_path):
"""
Download the file specified by the URL and save it to the file specified
by the file_path. Overwrite the file if exist.
"""
# see https://stackoverflow.com/questions/7243750/download-file-from-web-in-python-3
import urllib.request
import shutil
# Download the file from `url` and save it locally under `file_name`:
with urllib.request.urlopen(url) as response, \
open(file_path, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
########################
#based on https://github.com/mbinkowski/MMD-GAN
#and https://github.com/wittawatj/kernel-mod/blob/master/kmod/ex/exutil.py
###############################
def fid_score(codes_g, codes_r, eps=1e-6, output=sys.stdout, **split_args):
splits_g = get_splits(**split_args)
splits_r = get_splits(**split_args)
assert len(splits_g) == len(splits_r)
d = codes_g.shape[1]
assert codes_r.shape[1] == d
scores = np.zeros(len(splits_g))
for i, (w_g, w_r) in enumerate(zip(splits_g, splits_r)):
part_g = codes_g[w_g]
part_r = codes_r[w_r]
mn_g = part_g.mean(axis=0)
mn_r = part_r.mean(axis=0)
cov_g = np.cov(part_g, rowvar=False)
cov_r = np.cov(part_r, rowvar=False)
covmean, _ = linalg.sqrtm(cov_g.dot(cov_r), disp=False)
if not np.isfinite(covmean).all():
cov_g[range(d), range(d)] += eps
cov_r[range(d), range(d)] += eps
covmean = linalg.sqrtm(cov_g.dot(cov_r))
scores[i] = np.sum((mn_g - mn_r) ** 2) + (
np.trace(cov_g) + np.trace(cov_r) - 2 * np.trace(covmean))
return np.real(scores)
def get_splits(n, splits=10, split_method='openai'):
if split_method == 'openai':
return [slice(i * n // splits, (i + 1) * n // splits)
for i in range(splits)]
elif split_method == 'bootstrap':
return [np.random.choice(n, n) for _ in range(splits)]
elif 'copy':
return [np.arange(n) for _ in range(splits)]
else:
raise ValueError("bad split_method {}".format(split_method))
def fid(X, Z):
"""
Compute the FIDs FID(P, R) and FIR(Q, R).
The bootstrap estimator from Binkowski et al. 2018 is used.
The number of bootstrap sampling can be specified by the variable splits
below. For the method for the non-bootstrap version, see the method
met_fid_nbstrp.
"""
# keeping it the same as the comparison in MMD gan paper, 10 boostrap resamplings
splits = 10
split_size = X.shape[0]
assert X.shape == Z.shape
split_method = 'bootstrap'
split_args = {'splits': splits, 'n': split_size, 'split_method': split_method}
with util.ContextTimer() as t:
fid_scores_xz = fid_score(X, Z, **split_args)
fid_score_xz = np.mean(fid_scores_xz)
return fid_score_xz
```
#### File: reproduce-results/tf_models/flow.py
```python
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import matplotlib.pyplot as plt
import re
tfb = tfp.bijectors
tfd = tfp.distributions
### INVERTIBLE FLOW MODELS
class OneGaussMAF():
def __init__(self, n_layers, use_batchnorm=False, name="default"):
base_dist=tfd.MultivariateNormalDiag(
loc=[0.,0.])
with tf.variable_scope(name, dtype=tf.float32):
self.model = tfd.TransformedDistribution(
distribution= base_dist,
bijector= AffineNonlinear(n_layers,name, use_batchnorm)
)
self.name = name
self.n_layers =n_layers
def loss(self, x):
return -tf.reduce_mean(self.model.log_prob(x))
def sample(self, n):
return self.model.sample(n)
'''
def ploss(self, x, beta):
loss = -tf.reduce_mean(self.model.log_prob(x))
weights = []
for var in tf.trainable_variables(self.name):
if re.match("{0}_1/dense/kernel:*".format(self.name), var.name):
weights.append(var)
penalty = tf.reduce_sum([tf.nn.l2_loss(weight) for weight in weights])
return loss + beta*penalty
'''
def log_prob(self,x):
return self.model.log_prob(x)
class FivGaussMAF():
def __init__(self, n_layers, use_batchnorm=False, name="default"):
n_gauss = 5.
base_dist=tfd.Mixture(
cat=tfd.Categorical(probs=[0.2,0.2,0.2,0.2,0.2]),
components=[
tfd.MultivariateNormalDiag(loc=[1., -1.]),
tfd.MultivariateNormalDiag(loc=[-1., 1.]),
tfd.MultivariateNormalDiag(loc=[0., 0.]),
tfd.MultivariateNormalDiag(loc=[1., 1.]),
tfd.MultivariateNormalDiag(loc=[-1., -1.]),
])
with tf.variable_scope(name, dtype=tf.float32):
self.model = tfd.TransformedDistribution(
distribution= base_dist,
bijector= AffineNonlinear(n_layers,name, use_batchnorm)
)
self.name = name
self.n_layers =n_layers
def loss(self, x):
return -tf.reduce_mean(self.model.log_prob(x))
'''
def ploss(self, x, beta):
loss = -tf.reduce_mean(self.model.log_prob(x))
weights = []
for var in tf.trainable_variables(self.name):
if re.match("{0}_1/dense/kernel:*".format(self.name), var.name):
weights.append(var)
penalty = tf.reduce_sum([(weight**2) for weight in weights])
return loss + beta*penalty
'''
def sample(self, n):
return self.model.sample(n)
def log_prob(self,x):
return self.model.log_prob(x)
def AffineNonlinear(n_layers,name,use_batchnorm):
# Init variables
layers = []
for i in range(n_layers):
layers.append(tfb.MaskedAutoregressiveFlow(
shift_and_log_scale_fn=tfb.masked_autoregressive_default_template(
name=name,
activation=tf.nn.leaky_relu,
hidden_layers=[512, 512])))
if use_batchnorm and i % 2 == 0:
layers.append(tfb.BatchNormalization())
layers.append(tfb.Permute(permutation=[1, 0]))
return tfb.Chain(layers[:-1])
```
|
{
"source": "jenninglim/multiscale-features",
"score": 3
}
|
#### File: multiscale-features/mskernel/kernel.py
```python
__author__ = 'wittawat'
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.signal as sig
from mskernel import util
class Kernel(object):
"""Abstract class for kernels"""
__metaclass__ = ABCMeta
@abstractmethod
def eval(self, X1, X2):
"""Evalute the kernel on data X1 and X2 """
pass
@abstractmethod
def pair_eval(self, X, Y):
"""Evaluate k(x1, y1), k(x2, y2), ..."""
pass
class KHoPoly(Kernel):
"""Homogeneous polynomial kernel of the form
(x.dot(y))**d
"""
def __init__(self, degree):
assert degree > 0
self.degree = degree
def eval(self, X1, X2):
return X1.dot(X2.T)**self.degree
def pair_eval(self, X, Y):
return np.sum(X1*X2, 1)**self.degree
def __str__(self):
return 'KHoPoly(d=%d)'%self.degree
class KLinear(Kernel):
def eval(self, X1, X2):
return X1.dot(X2.T)
def pair_eval(self, X, Y):
return np.sum(X*Y, 1)
def __str__(self):
return "KLinear()"
class KGauss(Kernel):
def __init__(self, sigma2):
assert sigma2 > 0, 'sigma2 must be > 0. Was %s'%str(sigma2)
self.sigma2 = sigma2
def eval(self, X1, X2):
"""
Evaluate the Gaussian kernel on the two 2d numpy arrays.
Parameters
----------
X1 : n1 x d numpy array
X2 : n2 x d numpy array
Return
------
K : a n1 x n2 Gram matrix.
"""
(n1, d1) = X1.shape
(n2, d2) = X2.shape
assert d1==d2, 'Dimensions of the two inputs must be the same'
D2 = np.sum(X1**2, 1)[:, np.newaxis] - 2*X1.dot(X2.T) + np.sum(X2**2, 1)
K = np.exp(-D2/self.sigma2)
return K
def pair_eval(self, X, Y):
"""
Evaluate k(x1, y1), k(x2, y2), ...
Parameters
----------
X, Y : n x d numpy array
Return
-------
a numpy array with length n
"""
(n1, d1) = X.shape
(n2, d2) = Y.shape
assert n1==n2, 'Two inputs must have the same number of instances'
assert d1==d2, 'Two inputs must have the same dimension'
D2 = np.sum( (X-Y)**2, 1)
Kvec = np.exp(-D2/self.sigma2)
return Kvec
def __str__(self):
return "KGauss(%.3f)"%self.sigma2
class KTriangle(Kernel):
"""
A triangular kernel defined on 1D. k(x, y) = B_1((x-y)/width) where B_1 is the
B-spline function of order 1 (i.e., triangular function).
"""
def __init__(self, width):
assert width > 0, 'width must be > 0'
self.width = width
def eval(self, X1, X2):
"""
Evaluate the triangular kernel on the two 2d numpy arrays.
Parameters
----------
X1 : n1 x 1 numpy array
X2 : n2 x 1 numpy array
Return
------
K : a n1 x n2 Gram matrix.
"""
(n1, d1) = X1.shape
(n2, d2) = X2.shape
assert d1==1, 'd1 must be 1'
assert d2==1, 'd2 must be 1'
diff = (X1-X2.T)/self.width
K = sig.bspline( diff , 1)
return K
def pair_eval(self, X, Y):
"""
Evaluate k(x1, y1), k(x2, y2), ...
Parameters
----------
X, Y : n x 1 numpy array
Return
-------
a numpy array with length n
"""
(n1, d1) = X.shape
(n2, d2) = Y.shape
assert d1==1, 'd1 must be 1'
assert d2==1, 'd2 must be 1'
diff = (X-Y)/self.width
Kvec = sig.bspline( diff , 1)
return Kvec
def __str__(self):
return "KTriangle(w=%.3f)"%self.width
class KIMQ(Kernel):
"""
The inverse multiquadric (IMQ) kernel studied in
Measure Sample Quality with Kernels
<NAME>, <NAME>
k(x,y) = (c^2 + ||x-y||^2)^b
where c > 0 and b < 0. Following a theorem in the paper, this kernel is
convergence-determining only when -1 < b < 0. In the experiments,
the paper sets b = -1/2 and c = 1.
"""
def __init__(self, b=-0.5, c=1.0):
if not b < 0:
raise ValueError('b has to be negative. Was {}'.format(b))
if not c > 0:
raise ValueError('c has to be positive. Was {}'.format(c))
self.b = b
self.c = c
def eval(self, X, Y):
"""Evalute the kernel on data X and Y """
b = self.b
c = self.c
D2 = util.dist2_matrix(X, Y)
K = (c**2 + D2)**b
return K
def pair_eval(self, X, Y):
"""Evaluate k(x1, y1), k(x2, y2), ...
"""
assert X.shape[0] == Y.shape[0]
b = self.b
c = self.c
return (c**2 + np.sum((X-Y)**2, 1))**b
def gradX_Y(self, X, Y, dim):
"""
Compute the gradient with respect to the dimension dim of X in k(X, Y).
X: nx x d
Y: ny x d
Return a numpy array of size nx x ny.
"""
D2 = util.dist2_matrix(X, Y)
# 1d array of length nx
Xi = X[:, dim]
# 1d array of length ny
Yi = Y[:, dim]
# nx x ny
dim_diff = Xi[:, np.newaxis] - Yi[np.newaxis, :]
b = self.b
c = self.c
Gdim = ( 2.0*b*(c**2 + D2)**(b-1) )*dim_diff
assert Gdim.shape[0] == X.shape[0]
assert Gdim.shape[1] == Y.shape[0]
return Gdim
def gradY_X(self, X, Y, dim):
"""
Compute the gradient with respect to the dimension dim of Y in k(X, Y).
X: nx x d
Y: ny x d
Return a numpy array of size nx x ny.
"""
return -self.gradX_Y(X, Y, dim)
def gradXY_sum(self, X, Y):
"""
Compute
\sum_{i=1}^d \frac{\partial^2 k(X, Y)}{\partial x_i \partial y_i}
evaluated at each x_i in X, and y_i in Y.
X: nx x d numpy array.
Y: ny x d numpy array.
Return a nx x ny numpy array of the derivatives.
"""
b = self.b
c = self.c
D2 = util.dist2_matrix(X, Y)
# d = input dimension
d = X.shape[1]
c2D2 = c**2 + D2
T1 = -4.0*b*(b-1)*D2*(c2D2**(b-2) )
T2 = -2.0*b*d*c2D2**(b-1)
return T1 + T2
```
|
{
"source": "JenningsL/frustum-pointnets",
"score": 2
}
|
#### File: frustum-pointnets/avod_prop/kitti_object_avod.py
```python
import os
import sys
import numpy as np
import cv2
from PIL import Image
import time
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'mayavi'))
sys.path.append(os.path.join(BASE_DIR, '../kitti'))
import kitti_util as utils
# import cPickle as pickle
import pickle
from kitti_object import *
def non_max_suppression(boxes, overlapThresh):
# if there are no boxes, return an empty list
if len(boxes) == 0:
return []
# if the bounding boxes integers, convert them to floats --
# this is important since we'll be doing a bunch of divisions
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (x2 - x1) * (y2 - y1)
idxs = np.argsort(boxes[:,4])
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
# WARNING: (x1, y1) must be the relatively small point
w = np.maximum(0, xx2 - xx1)
h = np.maximum(0, yy2 - yy1)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the index list that have
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlapThresh)[0])))
# return only the bounding boxes that were picked using the
# integer data type
return pick
class ProposalObject(object):
def __init__(self, box_3d, score=0.0, type='Car', roi_features=None):
# [x, y, z, l, w, h, ry]
self.t = box_3d[0:3]
self.l = box_3d[3]
self.w = box_3d[4]
self.h = box_3d[5]
self.ry = box_3d[6]
self.score = score
self.type = type
self.roi_features = roi_features
class kitti_object_avod(kitti_object):
def __init__(self, root_dir, split='training'):
'''root_dir contains training and testing folders'''
self.root_dir = root_dir
self.split = split
self.split_dir = os.path.join(root_dir, split)
if split == 'training':
self.num_samples = 7481
elif split == 'testing':
self.num_samples = 7518
else:
print('Unknown split: %s' % (split))
exit(-1)
# if split not in ['training', 'testing']:
# print('Unknown split: %s' % (split))
# exit(-1)
self.image_dir = os.path.join(self.split_dir, 'image_2')
self.calib_dir = os.path.join(self.split_dir, 'calib')
self.lidar_dir = os.path.join(self.split_dir, 'velodyne')
self.label_dir = os.path.join(self.split_dir, 'label_2')
self.proposal_dir = os.path.join(self.split_dir, 'proposal')
# self.num_samples = len(os.listdir(self.image_dir))
# print(self.num_samples)
def np_read_lines(self, filename, lines):
arr = []
with open(filename, 'rb') as fp:
for i, line in enumerate(fp):
if i in lines:
arr.append(np.fromstring(line, dtype=float, sep=' '))
return np.array(arr)
def get_proposals(self, idx, rpn_score_threshold=0.1, nms_iou_thres=0.3):
assert(idx<self.num_samples)
proposals_file_path = os.path.join(self.proposal_dir, '%06d.txt'%(idx))
roi_file_path = os.path.join(self.proposal_dir, '%06d_roi.txt'%(idx))
proposals_and_scores = np.loadtxt(proposals_file_path)
keep_idxs = np.arange(0, len(proposals_and_scores))
proposal_boxes_3d = proposals_and_scores[:, 0:7]
proposal_scores = proposals_and_scores[:, 7]
# Apply score mask to proposals
score_mask = proposal_scores > rpn_score_threshold
# 3D box in the format [x, y, z, l, w, h, ry]
proposal_boxes_3d = proposal_boxes_3d[score_mask]
keep_idxs = keep_idxs[score_mask]
proposal_objs = \
[ProposalObject(box_3d) for box_3d in proposal_boxes_3d]
boxes = []
box_scores = []
calib = self.get_calibration(idx)
for obj in proposal_objs:
_, corners = utils.compute_box_3d(obj, calib.P)
# corners_velo = calib.project_rect_to_velo(corners)
# boxes.append(corners_velo)
boxes.append(corners)
box_scores.append(obj.score)
#bev_boxes = list(map(lambda bs: [np.amin(bs[0],axis=0)[0], np.amin(bs[0], axis=0)[2], np.amax(bs[0], axis=0)[0], np.amax(bs[0], axis=0)[2], bs[1]], zip(boxes, box_scores)))
#bev_boxes = np.array(bev_boxes)
# print('before nms: {0}'.format(len(bev_boxes)))
#nms_idxs = non_max_suppression(bev_boxes, nms_iou_thres)
# print('after nms: {0}'.format(len(nms_idxs)))
# boxes = [boxes[i] for i in nms_idxs]
#keep_idxs = keep_idxs[nms_idxs]
proposals_roi_features = self.np_read_lines(roi_file_path, keep_idxs)
proposal_scores = proposal_scores[keep_idxs]
# proposal_objs = [proposal_objs[i] for i in nms_idxs]
for obj, score, feat in zip(proposal_objs, proposal_scores, proposals_roi_features):
obj.score = score
obj.roi_features = feat
return proposal_objs
```
#### File: JenningsL/frustum-pointnets/viz_detection.py
```python
import sys
import os
import re
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import argparse
import matplotlib.patheffects as patheffects
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, 'mayavi'))
sys.path.append(os.path.join(BASE_DIR, 'kitti'))
sys.path.append(os.path.join(BASE_DIR, 'train'))
sys.path.append(os.path.join(BASE_DIR, 'models'))
from model_util import type_whitelist
import kitti_util as utils
from kitti_object import *
#import mayavi.mlab as mlab
#from viz_util import draw_lidar, draw_gt_boxes3d
from wavedata.tools.visualization import vis_utils
BOX_COLOUR_SCHEME = {
'Car': '#00FF00', # Green
'Pedestrian': '#00FFFF', # Teal
'Cyclist': '#FFFF00' # Yellow
}
class DetectObject(object):
def __init__(self, h,w,l,tx,ty,tz,ry, frame_id, type_label, score, box_2d=None, box_3d=None):
self.t = [tx,ty,tz]
self.ry = ry
self.h = h
self.w = w
self.l = l
self.frame_id = frame_id
self.type_label = type_label
self.score = score
self.box_2d = box_2d
self.box_3d = box_3d # corners
def load_result(dataset, fname, data_idx):
objs = []
# calib = dataset.get_calibration(int(data_idx))
with open(fname, 'r') as fin:
for line in fin:
cols = line.split()
type_label = cols[0]
h,w,l = (float(cols[8]), float(cols[9]), float(cols[10]))
tx,ty,tz = (float(cols[11]), float(cols[12]), float(cols[13]))
ry = float(cols[14])
score = float(cols[15])
obj = DetectObject(h,w,l,tx,ty,tz,ry,data_idx,type_label,score)
objs.append(obj)
return objs
def draw_boxes(objects, calib, plot_axes):
all_corners = []
for obj in objects:
if hasattr(obj, 'type_label'):
obj.obj_type = obj.type_label
else:
obj.obj_type = obj.type
if not hasattr(obj, 'truncation'):
obj.truncation = 0
if not hasattr(obj, 'occlusion'):
obj.occlusion = 0
if not hasattr(obj, 'score'):
obj.score = 1
if obj.obj_type not in type_whitelist:
continue
vis_utils.draw_box_3d(plot_axes, obj, calib.P,
show_orientation=False,
color_table=['r', 'y', 'r', 'w'],
line_width=2,
double_line=False)
box3d_pts_2d, corners = utils.compute_box_3d(obj, calib.P)
if box3d_pts_2d is None:
continue
all_corners.append(corners)
# draw text info
x1 = np.amin(box3d_pts_2d, axis=0)[0]
y1 = np.amin(box3d_pts_2d, axis=0)[1]
x2 = np.amax(box3d_pts_2d, axis=0)[0]
y2 = np.amax(box3d_pts_2d, axis=0)[1]
text_x = (x1 + x2) / 2
text_y = y1
text = "{}\n{:.2f}".format(obj.obj_type, obj.score)
plot_axes.text(text_x, text_y - 4,
text,
verticalalignment='bottom',
horizontalalignment='center',
color=BOX_COLOUR_SCHEME[obj.obj_type],
fontsize=10,
fontweight='bold',
path_effects=[
patheffects.withStroke(linewidth=2,
foreground='black')])
return all_corners
def visualize(dataset, frame_id, prediction, show_3d=False, output_dir=None):
fig_size = (10, 6.1)
pred_fig, pred_2d_axes, pred_3d_axes = \
vis_utils.visualization(dataset.image_dir,
int(frame_id),
display=False,
fig_size=fig_size)
calib = dataset.get_calibration(frame_id) # 3 by 4 matrix
# 2d visualization
# draw groundtruth
labels = dataset.get_label_objects(frame_id)
draw_boxes(labels, calib, pred_2d_axes)
# draw prediction on second image
pred_corners = draw_boxes(prediction, calib, pred_3d_axes)
if output_dir:
filename = os.path.join(output_dir, 'result_2d_image/%06d.png' % frame_id)
plt.savefig(filename)
plt.close(pred_fig)
else:
plt.show()
#input()
if show_3d:
# 3d visualization
pc_velo = dataset.get_lidar(frame_id)
boxes3d_velo = []
for corners in pred_corners:
pts_velo = calib.project_rect_to_velo(corners)
boxes3d_velo.append(pts_velo)
fig = draw_lidar(pc_velo)
fig = draw_gt_boxes3d(boxes3d_velo, fig, draw_text=False, color=(1, 1, 1))
#input()
if output_dir:
filename = os.path.join(output_dir, 'result_3d_image/%06d.png' % frame_id)
mlab.savefig(filename, figure=fig)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--kitti_path',
type=str,
dest='kitti_path',
required=True,
help='kitti_path')
parser.add_argument('--detection_path',
type=str,
dest='detection_path',
required=True,
help='detection_path')
parser.add_argument('--output_dir',
type=str,
dest='output_dir',
help='output_dir')
args = parser.parse_args()
dataset = kitti_object(args.kitti_path, 'training')
if args.output_dir:
if not os.path.exists(os.path.join(args.output_dir, 'result_2d_image')):
os.makedirs(os.path.join(args.output_dir, 'result_2d_image'))
if not os.path.exists(os.path.join(args.output_dir, 'result_3d_image')):
os.makedirs(os.path.join(args.output_dir, 'result_3d_image'))
for f in os.listdir(args.detection_path):
print('processing %s' % f)
data_idx = f.replace('.txt', '')
fname = os.path.join(args.detection_path, f)
objs = load_result(dataset, fname, data_idx)
visualize(dataset, int(data_idx), objs, show_3d=False, output_dir=args.output_dir)
```
|
{
"source": "jenningsm42/mini-mmo-server",
"score": 3
}
|
#### File: mini-mmo-server/server/message.py
```python
import struct
class MessageError(Exception):
pass
class Message:
def __init__(self, data=None, message_type=None, message=None):
if data:
self.data = data
if len(self.data) < 4:
raise MessageError('The message is too short')
self.message_type = struct.unpack('!H', self.data[:2])[0]
elif message_type and message:
self.message_type = message_type
serialized_message = message.SerializeToString()
if len(serialized_message) > 2**16 - 1:
raise MessageError('The message is too long')
self.data = struct.pack('!H', self.message_type)
self.data += struct.pack('!H', len(serialized_message))
self.data += serialized_message
else:
raise MessageError('Invalid parameters passed to __init__')
def __repr__(self):
return f'<Message type={self.message_type} data={self.data}>'
@property
def serialized_message(self):
return self.data[4:]
```
#### File: server/module/players.py
```python
from server.server import register_handler
from server.message_type import MessageType
from server.message import Message
from server.proto.PlayerJoin_pb2 import (
PlayersResponse, JoinRequest, PlayerJoin)
from server.service.player import PlayerService
from server.service.character import CharacterService
@register_handler(MessageType.join_request)
async def player_join(message, client, server):
info = JoinRequest()
info.ParseFromString(message.serialized_message)
with CharacterService() as service:
character = service.get(info.character_id)
with PlayerService() as service:
character = service.session.merge(character)
service.create(character)
server.players.update_all_positions()
players_response = PlayersResponse()
for other_character in server.players.characters.values():
player_info = players_response.players.add()
player_info.player_id = other_character.id
player_info.character.x = other_character.last_x
player_info.character.y = other_character.last_y
player_info.velocity_x = other_character.velocity_x
player_info.velocity_y = other_character.velocity_y
player_info.character.body_color = other_character.body_color
player_info.character.shirt_color = other_character.shirt_color
player_info.character.legs_color = other_character.legs_color
player_info.character.name = other_character.name
client.player_id = info.character_id
server.players.add(client, character)
await client.send(Message(
message_type=MessageType.players_response,
message=players_response))
player_join = PlayerJoin()
player_join.player_id = client.player_id
player_join.character.x = character.last_x
player_join.character.y = character.last_y
player_join.character.body_color = character.body_color
player_join.character.shirt_color = character.shirt_color
player_join.character.legs_color = character.legs_color
player_join.character.name = character.name
await server.broadcast(Message(
message_type=MessageType.player_join,
message=player_join),
exclude=client)
@register_handler(MessageType.players_request)
async def players_state(message, client, server):
if not client.player_id:
raise Exception('Received players_request event for invalid player!')
server.players.update_all_positions()
players_response = PlayersResponse()
for character in server.players.players.values():
if character.id == client.player_id:
continue
player_info = players_response.players.add()
player_info.player_id = character.id
player_info.x = character.last_x
player_info.y = character.last_y
player_info.velocity_x = character.velocity_x
player_info.velocity_y = character.velocity_y
await client.send(Message(
message_type=MessageType.players_response,
message=players_response))
```
#### File: server/module/register.py
```python
from server.server import register_handler
from server.message_type import MessageType
from server.message import Message
from server.proto.Register_pb2 import RegisterRequest, RegisterResponse
from server.service.account import AccountService, AccountAlreadyExistsError
@register_handler(MessageType.register_request)
async def register_account(message, client, server):
request = RegisterRequest()
request.ParseFromString(message.serialized_message)
response = RegisterResponse()
with AccountService() as service:
try:
service.create(request.username, request.password)
except AccountAlreadyExistsError:
response.success = False
response.error_message = 'Username already in use'
else:
response.success = True
await client.send(Message(
message_type=MessageType.register_response,
message=response))
```
#### File: server/module/tick_keep_alive.py
```python
from datetime import datetime
import asyncio
import logging
from server.server import register_slow_tick_event
logger = logging.getLogger(__name__)
KEEP_ALIVE_DURATION = 600
@register_slow_tick_event
async def tick_keep_alive(server):
# Disconnect players after some amount of inactivity
now = datetime.now()
coros = []
for client, _ in server.players:
if client.disconnecting:
continue
last_message_duration = (
now - client.last_message_time).total_seconds()
if last_message_duration > KEEP_ALIVE_DURATION:
logger.info('{client} has gone past keep alive duration')
coros.append(server.disconnect_user(client))
if coros:
await asyncio.gather(*coros)
```
#### File: mini-mmo-server/server/server.py
```python
from collections import defaultdict
from datetime import datetime
from os.path import join
import asyncio
import logging
import uuid
from .client import Client, DisconnectError
from .map import Map
from .message import Message
from .message_type import MessageType
from .player_collection import PlayerCollection
from .proto.PlayerJoin_pb2 import PlayerLeave
from .service.account import AccountService
from .service.player import PlayerService
logger = logging.getLogger(__name__)
def register_handler(message_type):
def decorator(func):
Server.HANDLERS[message_type] = func
return func
return decorator
def register_slow_tick_event(func):
Server.TICK_EVENTS[Server.SLOW_TICK_DELAY].append(func)
return func
def register_fast_tick_event(func):
Server.TICK_EVENTS[Server.FAST_TICK_DELAY].append(func)
return func
class Server:
HANDLERS = {}
TICK_EVENTS = defaultdict(list)
SLOW_TICK_DELAY = 500
FAST_TICK_DELAY = 50
def __init__(self, port=1337):
self.port = port
self.clients = []
self.map = Map(join('data', 'map'), join('data', 'objects.yml'))
self.players = PlayerCollection(self.map)
async def tick(self, delay):
''' Run registered tick events every delay milliseconds '''
while True:
coros = [event(self) for event in Server.TICK_EVENTS[delay]]
await asyncio.gather(*coros)
await asyncio.sleep(delay / 1000)
async def handle_client(self, reader, writer):
client = Client(reader, writer, uuid.uuid4())
self.clients.append(client)
logger.info(f'{client} has connected')
while True:
try:
m = await client.recv()
except DisconnectError:
await self.disconnect_user(client)
break
client.set_last_message_time(datetime.now())
try:
message_type = MessageType(m.message_type)
except ValueError:
logger.info('Received invalid message type %s',
m.message_type)
continue
handler = Server.HANDLERS.get(message_type)
if not handler:
logger.info('No handler for %s', message_type)
continue
try:
await handler(m, client, self)
except DisconnectError:
try:
await self.disconnect_user(client)
except Exception:
logger.exception('Exception while disconnecting user')
break
except Exception:
logger.exception('Handler raised exception')
logger.info(f'{client} has disconnected')
writer.close()
async def broadcast(self, message, exclude=None):
''' Broadcasts given message to all connected clients '''
coros = [client.send(message)
for client in self.clients
if client != exclude]
await asyncio.gather(*coros)
async def broadcast_in_range(self, message, center, radius, exclude=None):
''' Broadcasts given message to players within the given range '''
coros = [client.send(message)
for client in self.players.get_clients_in_range(
center, radius)
if client != exclude]
await asyncio.gather(*coros)
async def disconnect_user(self, client):
if client.disconnecting:
return
client.disconnecting = True
client.writer.close()
self.clients.remove(client)
# Never logged in - no need to log out
if client.username is None:
return
self.players.remove(client)
with AccountService() as service:
service.logout(client.username)
if client.player_id is None:
return
player_leave_message = PlayerLeave()
player_leave_message.player_id = client.player_id
with PlayerService() as service:
service.remove(client.player_id)
await self.broadcast(Message(
message_type=MessageType.player_leave,
message=player_leave_message))
```
|
{
"source": "jennings/project-euler",
"score": 4
}
|
#### File: project-euler/python/002-sum-of-even-fibonacci-numbers.py
```python
def fib_sum(limit):
return fib_calc(limit,0,1,0)
def fib_calc(limit, a, b, accum):
if b > limit:
return accum
else:
if b % 2 == 0:
accum += b
return fib_calc(limit, b, (a+b), accum)
if __name__ == "__main__":
print(fib_sum(4000000))
```
#### File: project-euler/python/003-largest-prime-factor.py
```python
from math import ceil
def gcf(n):
ceiling = int(ceil(n ** 0.5))
candidates = range(ceiling, 2, -1)
for candidate in candidates:
if (n % candidate == 0):
if (is_prime(candidate)):
return candidate
# If we go there, we have no prime factors greater than 1
return 1
def is_prime(n):
ceiling = int(ceil(n ** 0.5))
if (n % 2 == 0):
return False
for candidate in range(3, ceiling, 2):
if (n % candidate == 0):
return False
return True
if __name__ == "__main__":
print(gcf(600851475143))
pass
```
#### File: project-euler/python/012-highly-divisible-triangular-number.py
```python
import math
from time import time
def triangle_numbers(n = 0, agg = 0):
while True:
n += 1
agg += n
yield agg
def number_of_divisors(n):
num_factors = 2 # 1 and n
sq = int(n ** 0.5)
for candidate in range(2, sq):
if n % candidate == 0:
num_factors += 2
if sq * sq == n:
num_factors -= 1
return num_factors
if __name__ == "__main__":
current_magnitude = 10
start = time()
for num in triangle_numbers():
if number_of_divisors(num) >= 500:
print("Answer: " + str(num))
print("Found in " + str(round(time() - start, 2)) + " seconds")
exit(0)
if num > current_magnitude:
current_magnitude *= 10
print("Trying: " + str(num))
```
|
{
"source": "jennings/solari-board",
"score": 3
}
|
#### File: solari-board/example/postJsonp.py
```python
from __future__ import print_function
import json
import sys, os
import cgi
def cgi_callback():
data = [ {'sDate':'today','sTime':'13:30','sDeparture':'<EMAIL>','nStatus':1,'nTrack':17, 'fLight':True},
{'sDate':'yesterday','sTime':'16:00','sDeparture':'<EMAIL>','nStatus':2,'nTrack':19, 'fLight':False},
{'sDate':'July 8th, 2013','sTime':'16:30','sDeparture':'<EMAIL>','nStatus':2,'nTrack':23, 'fLight':False}
]
params = cgi.parse_qs(os.environ['QUERY_STRING'])
print("Content-Type: application/json", end='\n\n')
print ("%s(%s);" % (params['callback'][0], json.dumps(data, sys.stdout)))
if __name__ == '__main__':
cgi_callback()
```
|
{
"source": "jennirinker/OpenMDAO",
"score": 2
}
|
#### File: experimental_source/core/experimental_driver.py
```python
from __future__ import print_function
from collections import OrderedDict
import warnings
from six import iteritems, itervalues
import numpy as np
from openmdao.recorders.recording_manager import RecordingManager
from openmdao.recorders.recording_iteration_stack import Recording
from openmdao.utils.record_util import create_local_meta, check_path
from openmdao.utils.mpi import MPI
from openmdao.utils.options_dictionary import OptionsDictionary
class ExperimentalDriver(object):
"""
A fake driver class used for doc generation testing.
Attributes
----------
fail : bool
Reports whether the driver ran successfully.
iter_count : int
Keep track of iterations for case recording.
options : list
List of options
options : <OptionsDictionary>
Dictionary with general pyoptsparse options.
recording_options : <OptionsDictionary>
Dictionary with driver recording options.
cite : str
Listing of relevant citations that should be referenced when
publishing work that uses this class.
_problem : <Problem>
Pointer to the containing problem.
supports : <OptionsDictionary>
Provides a consistant way for drivers to declare what features they support.
_designvars : dict
Contains all design variable info.
_cons : dict
Contains all constraint info.
_objs : dict
Contains all objective info.
_responses : dict
Contains all response info.
_rec_mgr : <RecordingManager>
Object that manages all recorders added to this driver.
_vars_to_record: dict
Dict of lists of var names indicating what to record
_model_viewer_data : dict
Structure of model, used to make n2 diagram.
_remote_dvs : dict
Dict of design variables that are remote on at least one proc. Values are
(owning rank, size).
_remote_cons : dict
Dict of constraints that are remote on at least one proc. Values are
(owning rank, size).
_remote_objs : dict
Dict of objectives that are remote on at least one proc. Values are
(owning rank, size).
_remote_responses : dict
A combined dict containing entries from _remote_cons and _remote_objs.
_simul_coloring_info : tuple of dicts
A data structure describing coloring for simultaneous derivs.
_res_jacs : dict
Dict of sparse subjacobians for use with certain optimizers, e.g. pyOptSparseDriver.
"""
def __init__(self):
"""
Initialize the driver.
"""
self._rec_mgr = RecordingManager()
self._vars_to_record = {
'desvarnames': set(),
'responsenames': set(),
'objectivenames': set(),
'constraintnames': set(),
'sysinclnames': set(),
}
self._problem = None
self._designvars = None
self._cons = None
self._objs = None
self._responses = None
self.options = OptionsDictionary()
self.recording_options = OptionsDictionary()
###########################
self.recording_options.declare('record_metadata', types=bool, desc='Record metadata',
default=True)
self.recording_options.declare('record_desvars', types=bool, default=True,
desc='Set to True to record design variables at the \
driver level')
self.recording_options.declare('record_responses', types=bool, default=False,
desc='Set to True to record responses at the driver level')
self.recording_options.declare('record_objectives', types=bool, default=True,
desc='Set to True to record objectives at the \
driver level')
self.recording_options.declare('record_constraints', types=bool, default=True,
desc='Set to True to record constraints at the \
driver level')
self.recording_options.declare('includes', types=list, default=[],
desc='Patterns for variables to include in recording')
self.recording_options.declare('excludes', types=list, default=[],
desc='Patterns for vars to exclude in recording '
'(processed post-includes)')
self.recording_options.declare('record_derivatives', types=bool, default=False,
desc='Set to True to record derivatives at the driver \
level')
###########################
# What the driver supports.
self.supports = OptionsDictionary()
self.supports.declare('inequality_constraints', types=bool, default=False)
self.supports.declare('equality_constraints', types=bool, default=False)
self.supports.declare('linear_constraints', types=bool, default=False)
self.supports.declare('two_sided_constraints', types=bool, default=False)
self.supports.declare('multiple_objectives', types=bool, default=False)
self.supports.declare('integer_design_vars', types=bool, default=False)
self.supports.declare('gradients', types=bool, default=False)
self.supports.declare('active_set', types=bool, default=False)
self.supports.declare('simultaneous_derivatives', types=bool, default=False)
self.iter_count = 0
self.options = None
self._model_viewer_data = None
self.cite = ""
# TODO, support these in OpenMDAO
self.supports.declare('integer_design_vars', types=bool, default=False)
self._simul_coloring_info = None
self._res_jacs = {}
self.fail = False
def add_recorder(self, recorder):
"""
Add a recorder to the driver.
Parameters
----------
recorder : CaseRecorder
A recorder instance.
"""
self._rec_mgr.append(recorder)
def cleanup(self):
"""
Clean up resources prior to exit.
"""
self._rec_mgr.close()
def _setup_driver(self, problem):
"""
Prepare the driver for execution.
This is the final thing to run during setup.
Parameters
----------
problem : <Problem>
Pointer to the containing problem.
"""
self._problem = problem
model = problem.model
self._objs = objs = OrderedDict()
self._cons = cons = OrderedDict()
self._responses = model.get_responses(recurse=True)
response_size = 0
for name, data in iteritems(self._responses):
if data['type'] == 'con':
cons[name] = data
else:
objs[name] = data
response_size += data['size']
# Gather up the information for design vars.
self._designvars = model.get_design_vars(recurse=True)
desvar_size = np.sum(data['size'] for data in itervalues(self._designvars))
if ((problem._mode == 'fwd' and desvar_size > response_size) or
(problem._mode == 'rev' and response_size > desvar_size)):
warnings.warn("Inefficient choice of derivative mode. You chose '%s' for a "
"problem with %d design variables and %d response variables "
"(objectives and constraints)." %
(problem._mode, desvar_size, response_size), RuntimeWarning)
self._has_scaling = (
np.any([r['scaler'] is not None for r in self._responses.values()]) or
np.any([dv['scaler'] is not None for dv in self._designvars.values()])
)
con_set = set()
obj_set = set()
dv_set = set()
self._remote_dvs = dv_dict = {}
self._remote_cons = con_dict = {}
self._remote_objs = obj_dict = {}
# Now determine if later we'll need to allgather cons, objs, or desvars.
if model.comm.size > 1 and model._subsystems_allprocs:
local_out_vars = set(model._outputs._views)
remote_dvs = set(self._designvars) - local_out_vars
remote_cons = set(self._cons) - local_out_vars
remote_objs = set(self._objs) - local_out_vars
all_remote_vois = model.comm.allgather(
(remote_dvs, remote_cons, remote_objs))
for rem_dvs, rem_cons, rem_objs in all_remote_vois:
con_set.update(rem_cons)
obj_set.update(rem_objs)
dv_set.update(rem_dvs)
# If we have remote VOIs, pick an owning rank for each and use that
# to bcast to others later
owning_ranks = model._owning_rank['output']
sizes = model._var_sizes['nonlinear']['output']
for i, vname in enumerate(model._var_allprocs_abs_names['output']):
owner = owning_ranks[vname]
if vname in dv_set:
dv_dict[vname] = (owner, sizes[owner, i])
if vname in con_set:
con_dict[vname] = (owner, sizes[owner, i])
if vname in obj_set:
obj_dict[vname] = (owner, sizes[owner, i])
self._remote_responses = self._remote_cons.copy()
self._remote_responses.update(self._remote_objs)
# Case recording setup
mydesvars = myobjectives = myconstraints = myresponses = set()
mysystem_outputs = set()
incl = self.recording_options['includes']
excl = self.recording_options['excludes']
rec_desvars = self.recording_options['record_desvars']
rec_objectives = self.recording_options['record_objectives']
rec_constraints = self.recording_options['record_constraints']
rec_responses = self.recording_options['record_responses']
# includes and excludes for outputs are specified using promoted names
# NOTE: only local var names are in abs2prom, all will be gathered later
abs2prom = model._var_abs2prom['output']
all_desvars = {n for n in self._designvars
if n in abs2prom and check_path(abs2prom[n], incl, excl, True)}
all_objectives = {n for n in self._objs
if n in abs2prom and check_path(abs2prom[n], incl, excl, True)}
all_constraints = {n for n in self._cons
if n in abs2prom and check_path(abs2prom[n], incl, excl, True)}
if rec_desvars:
mydesvars = all_desvars
if rec_objectives:
myobjectives = all_objectives
if rec_constraints:
myconstraints = all_constraints
if rec_responses:
myresponses = {n for n in self._responses
if n in abs2prom and check_path(abs2prom[n], incl, excl, True)}
# get the includes that were requested for this Driver recording
if incl:
prob = self._problem
root = prob.model
# The my* variables are sets
# First gather all of the desired outputs
# The following might only be the local vars if MPI
mysystem_outputs = {n for n in root._outputs
if n in abs2prom and check_path(abs2prom[n], incl, excl)}
# If MPI, and on rank 0, need to gather up all the variables
# even those not local to rank 0
if MPI:
all_vars = root.comm.gather(mysystem_outputs, root=0)
if MPI.COMM_WORLD.rank == 0:
mysystem_outputs = all_vars[-1]
for d in all_vars[:-1]:
mysystem_outputs.update(d)
# de-duplicate mysystem_outputs
mysystem_outputs = mysystem_outputs.difference(all_desvars, all_objectives,
all_constraints)
if MPI: # filter based on who owns the variables
# TODO Eventually, we think we can get rid of this next check. But to be safe,
# we are leaving it in there.
if not model.is_active():
raise RuntimeError(
"RecordingManager.startup should never be called when "
"running in parallel on an inactive System")
rrank = self._problem.comm.rank # root ( aka model ) rank.
rowned = model._owning_rank['output']
mydesvars = [n for n in mydesvars if rrank == rowned[n]]
myresponses = [n for n in myresponses if rrank == rowned[n]]
myobjectives = [n for n in myobjectives if rrank == rowned[n]]
myconstraints = [n for n in myconstraints if rrank == rowned[n]]
mysystem_outputs = [n for n in mysystem_outputs if rrank == rowned[n]]
self._filtered_vars_to_record = {
'des': mydesvars,
'obj': myobjectives,
'con': myconstraints,
'res': myresponses,
'sys': mysystem_outputs,
}
self._rec_mgr.startup(self)
# set up simultaneous deriv coloring
if self._simul_coloring_info and self.supports['simultaneous_derivatives']:
if problem._mode == 'fwd':
self._setup_simul_coloring()
else:
raise RuntimeError("simultaneous derivs are currently not supported in rev mode.")
def _get_voi_val(self, name, meta, remote_vois):
"""
Get the value of a variable of interest (objective, constraint, or design var).
This will retrieve the value if the VOI is remote.
Parameters
----------
name : str
Name of the variable of interest.
meta : dict
Metadata for the variable of interest.
remote_vois : dict
Dict containing (owning_rank, size) for all remote vois of a particular
type (design var, constraint, or objective).
Returns
-------
float or ndarray
The value of the named variable of interest.
"""
model = self._problem.model
comm = model.comm
vec = model._outputs._views_flat
indices = meta['indices']
if name in remote_vois:
owner, size = remote_vois[name]
if owner == comm.rank:
if indices is None:
val = vec[name].copy()
else:
val = vec[name][indices]
else:
if indices is not None:
size = len(indices)
val = np.empty(size)
comm.Bcast(val, root=owner)
else:
if indices is None:
val = vec[name].copy()
else:
val = vec[name][indices]
if self._has_scaling:
# Scale design variable values
adder = meta['adder']
if adder is not None:
val += adder
scaler = meta['scaler']
if scaler is not None:
val *= scaler
return val
def get_design_var_values(self, filter=None):
"""
Return the design variable values.
This is called to gather the initial design variable state.
Parameters
----------
filter : list
List of desvar names used by recorders.
Returns
-------
dict
Dictionary containing values of each design variable.
"""
if filter:
dvs = filter
else:
# use all the designvars
dvs = self._designvars
return {n: self._get_voi_val(n, self._designvars[n], self._remote_dvs) for n in dvs}
def set_design_var(self, name, value):
"""
Set the value of a design variable.
Parameters
----------
name : str
Global pathname of the design variable.
value : float or ndarray
Value for the design variable.
"""
if (name in self._remote_dvs and
self._problem.model._owning_rank['output'][name] != self._problem.comm.rank):
return
meta = self._designvars[name]
indices = meta['indices']
if indices is None:
indices = slice(None)
desvar = self._problem.model._outputs._views_flat[name]
desvar[indices] = value
if self._has_scaling:
# Scale design variable values
scaler = meta['scaler']
if scaler is not None:
desvar[indices] *= 1.0 / scaler
adder = meta['adder']
if adder is not None:
desvar[indices] -= adder
def get_response_values(self, filter=None):
"""
Return response values.
Parameters
----------
filter : list
List of response names used by recorders.
Returns
-------
dict
Dictionary containing values of each response.
"""
if filter:
resps = filter
else:
resps = self._responses
return {n: self._get_voi_val(n, self._responses[n], self._remote_objs) for n in resps}
def get_objective_values(self, filter=None):
"""
Return objective values.
Parameters
----------
filter : list
List of objective names used by recorders.
Returns
-------
dict
Dictionary containing values of each objective.
"""
if filter:
objs = filter
else:
objs = self._objs
return {n: self._get_voi_val(n, self._objs[n], self._remote_objs) for n in objs}
def get_constraint_values(self, ctype='all', lintype='all', filter=None):
"""
Return constraint values.
Parameters
----------
ctype : string
Default is 'all'. Optionally return just the inequality constraints
with 'ineq' or the equality constraints with 'eq'.
lintype : string
Default is 'all'. Optionally return just the linear constraints
with 'linear' or the nonlinear constraints with 'nonlinear'.
filter : list
List of constraint names used by recorders.
Returns
-------
dict
Dictionary containing values of each constraint.
"""
if filter is not None:
cons = filter
else:
cons = self._cons
con_dict = {}
for name in cons:
meta = self._cons[name]
if lintype == 'linear' and not meta['linear']:
continue
if lintype == 'nonlinear' and meta['linear']:
continue
if ctype == 'eq' and meta['equals'] is None:
continue
if ctype == 'ineq' and meta['equals'] is not None:
continue
con_dict[name] = self._get_voi_val(name, meta, self._remote_cons)
return con_dict
def run(self):
"""
Execute this driver.
The base `Driver` just runs the model. All other drivers overload
this method.
Returns
-------
boolean
Failure flag; True if failed to converge, False is successful.
"""
with Recording(self._get_name(), self.iter_count, self) as rec:
self._problem.model._solve_nonlinear()
self.iter_count += 1
return False
def _dict2array_jac(self, derivs):
osize = 0
isize = 0
do_wrt = True
islices = {}
oslices = {}
for okey, oval in iteritems(derivs):
if do_wrt:
for ikey, val in iteritems(oval):
istart = isize
isize += val.shape[1]
islices[ikey] = slice(istart, isize)
do_wrt = False
ostart = osize
osize += oval[ikey].shape[0]
oslices[okey] = slice(ostart, osize)
new_derivs = np.zeros((osize, isize))
relevant = self._problem.model._relevant
for okey, odict in iteritems(derivs):
for ikey, val in iteritems(odict):
if okey in relevant[ikey] or ikey in relevant[okey]:
new_derivs[oslices[okey], islices[ikey]] = val
return new_derivs
def _compute_totals(self, of=None, wrt=None, return_format='flat_dict', global_names=True):
"""
Compute derivatives of desired quantities with respect to desired inputs.
All derivatives are returned using driver scaling.
Parameters
----------
of : list of variable name strings or None
Variables whose derivatives will be computed. Default is None, which
uses the driver's objectives and constraints.
wrt : list of variable name strings or None
Variables with respect to which the derivatives will be computed.
Default is None, which uses the driver's desvars.
return_format : string
Format to return the derivatives. Default is a 'flat_dict', which
returns them in a dictionary whose keys are tuples of form (of, wrt). For
the scipy optimizer, 'array' is also supported.
global_names : bool
Set to True when passing in global names to skip some translation steps.
Returns
-------
derivs : object
Derivatives in form requested by 'return_format'.
"""
prob = self._problem
# Compute the derivatives in dict format...
if prob.model._owns_approx_jac:
derivs = prob._compute_totals_approx(of=of, wrt=wrt, return_format='dict',
global_names=global_names)
else:
derivs = prob._compute_totals(of=of, wrt=wrt, return_format='dict',
global_names=global_names)
# ... then convert to whatever the driver needs.
if return_format in ('dict', 'array'):
if self._has_scaling:
for okey, odict in iteritems(derivs):
for ikey, val in iteritems(odict):
iscaler = self._designvars[ikey]['scaler']
oscaler = self._responses[okey]['scaler']
# Scale response side
if oscaler is not None:
val[:] = (oscaler * val.T).T
# Scale design var side
if iscaler is not None:
val *= 1.0 / iscaler
else:
raise RuntimeError("Derivative scaling by the driver only supports the 'dict' and "
"'array' formats at present.")
if return_format == 'array':
derivs = self._dict2array_jac(derivs)
return derivs
def record_iteration(self):
"""
Record an iteration of the current Driver.
"""
if not self._rec_mgr._recorders:
return
metadata = create_local_meta(self._get_name())
# Get the data to record
data = {}
if self.recording_options['record_desvars']:
# collective call that gets across all ranks
desvars = self.get_design_var_values()
else:
desvars = {}
if self.recording_options['record_responses']:
# responses = self.get_response_values() # not really working yet
responses = {}
else:
responses = {}
if self.recording_options['record_objectives']:
objectives = self.get_objective_values()
else:
objectives = {}
if self.recording_options['record_constraints']:
constraints = self.get_constraint_values()
else:
constraints = {}
desvars = {name: desvars[name]
for name in self._filtered_vars_to_record['des']}
# responses not working yet
# responses = {name: responses[name] for name in self._filtered_vars_to_record['res']}
objectives = {name: objectives[name]
for name in self._filtered_vars_to_record['obj']}
constraints = {name: constraints[name]
for name in self._filtered_vars_to_record['con']}
if self.recording_options['includes']:
root = self._problem.model
outputs = root._outputs
# outputsinputs, outputs, residuals = root.get_nonlinear_vectors()
sysvars = {}
for name, value in iteritems(outputs._names):
if name in self._filtered_vars_to_record['sys']:
sysvars[name] = value
else:
sysvars = {}
if MPI:
root = self._problem.model
desvars = self._gather_vars(root, desvars)
responses = self._gather_vars(root, responses)
objectives = self._gather_vars(root, objectives)
constraints = self._gather_vars(root, constraints)
sysvars = self._gather_vars(root, sysvars)
data['des'] = desvars
data['res'] = responses
data['obj'] = objectives
data['con'] = constraints
data['sys'] = sysvars
self._rec_mgr.record_iteration(self, data, metadata)
def _gather_vars(self, root, local_vars):
"""
Gather and return only variables listed in `local_vars` from the `root` System.
Parameters
----------
root : <System>
the root System for the Problem
local_vars : dict
local variable names and values
Returns
-------
dct : dict
variable names and values.
"""
# if trace:
# debug("gathering vars for recording in %s" % root.pathname)
all_vars = root.comm.gather(local_vars, root=0)
# if trace:
# debug("DONE gathering rec vars for %s" % root.pathname)
if root.comm.rank == 0:
dct = all_vars[-1]
for d in all_vars[:-1]:
dct.update(d)
return dct
def _get_name(self):
"""
Get name of current Driver.
Returns
-------
str
Name of current Driver.
"""
return "Driver"
def set_simul_deriv_color(self, simul_info):
"""
Set the coloring for simultaneous derivatives.
Parameters
----------
simul_info : ({dv1: colors, ...}, {resp1: {dv1: {0: [res_idxs, dv_idxs]} ...} ...})
Information about simultaneous coloring for design vars and responses.
"""
if self.supports['simultaneous_derivatives']:
self._simul_coloring_info = simul_info
else:
raise RuntimeError("Driver '%s' does not support simultaneous derivatives." %
self._get_name())
```
#### File: test_suite/components/sellar.py
```python
import numpy as np
import inspect
from openmdao.components.exec_comp import ExecComp
from openmdao.core.indepvarcomp import IndepVarComp
from openmdao.core.explicitcomponent import ExplicitComponent
from openmdao.core.implicitcomponent import ImplicitComponent
from openmdao.core.group import Group
from openmdao.core.problem import Problem
from openmdao.solvers.nonlinear.nonlinear_block_gs import NonlinearBlockGS
from openmdao.solvers.linear.scipy_iter_solver import ScipyKrylov
from openmdao.solvers.nonlinear.newton import NewtonSolver
class SellarDis1(ExplicitComponent):
"""
Component containing Discipline 1 -- no derivatives version.
"""
def __init__(self, units=None, scaling=None):
super(SellarDis1, self).__init__()
self.execution_count = 0
self._units = units
self._do_scaling = scaling
def setup(self):
if self._units:
units = 'ft'
else:
units = None
if self._do_scaling:
ref = .1
else:
ref = 1.
# Global Design Variable
self.add_input('z', val=np.zeros(2), units=units)
# Local Design Variable
self.add_input('x', val=0., units=units)
# Coupling parameter
self.add_input('y2', val=1.0, units=units)
# Coupling output
self.add_output('y1', val=1.0, lower=0.1, upper=1000., units=units, ref=ref)
self._do_declares()
def _do_declares(self):
# Finite difference everything
self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs):
"""
Evaluates the equation
y1 = z1**2 + z2 + x1 - 0.2*y2
"""
z1 = inputs['z'][0]
z2 = inputs['z'][1]
x1 = inputs['x']
y2 = inputs['y2']
outputs['y1'] = z1**2 + z2 + x1 - 0.2*y2
self.execution_count += 1
class SellarDis1withDerivatives(SellarDis1):
"""
Component containing Discipline 1 -- derivatives version.
"""
def _do_declares(self):
# Analytic Derivs
self.declare_partials(of='*', wrt='*')
def compute_partials(self, inputs, partials):
"""
Jacobian for Sellar discipline 1.
"""
partials['y1', 'y2'] = -0.2
partials['y1', 'z'] = np.array([[2.0 * inputs['z'][0], 1.0]])
partials['y1', 'x'] = 1.0
class SellarDis1CS(SellarDis1):
"""
Component containing Discipline 1 -- complex step version.
"""
def _do_declares(self):
# Analytic Derivs
self.declare_partials(of='*', wrt='*', method='cs')
class SellarDis2(ExplicitComponent):
"""
Component containing Discipline 2 -- no derivatives version.
"""
def __init__(self, units=None, scaling=None):
super(SellarDis2, self).__init__()
self.execution_count = 0
self._units = units
self._do_scaling = scaling
def setup(self):
if self._units:
units = 'inch'
else:
units = None
if self._do_scaling:
ref = .18
else:
ref = 1.
# Global Design Variable
self.add_input('z', val=np.zeros(2), units=units)
# Coupling parameter
self.add_input('y1', val=1.0, units=units)
# Coupling output
self.add_output('y2', val=1.0, lower=0.1, upper=1000., units=units, ref=ref)
self._do_declares()
def _do_declares(self):
# Finite difference everything
self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs):
"""
Evaluates the equation
y2 = y1**(.5) + z1 + z2
"""
z1 = inputs['z'][0]
z2 = inputs['z'][1]
y1 = inputs['y1']
# Note: this may cause some issues. However, y1 is constrained to be
# above 3.16, so lets just let it converge, and the optimizer will
# throw it out
if y1.real < 0.0:
y1 *= -1
outputs['y2'] = y1**.5 + z1 + z2
self.execution_count += 1
class SellarDis2withDerivatives(SellarDis2):
"""
Component containing Discipline 2 -- derivatives version.
"""
def _do_declares(self):
# Analytic Derivs
self.declare_partials(of='*', wrt='*')
def compute_partials(self, inputs, J):
"""
Jacobian for Sellar discipline 2.
"""
y1 = inputs['y1']
if y1.real < 0.0:
y1 *= -1
if y1.real < 1e-8:
y1 = 1e-8
J['y2', 'y1'] = .5*y1**-.5
J['y2', 'z'] = np.array([[1.0, 1.0]])
class SellarDis2CS(SellarDis2):
"""
Component containing Discipline 2 -- complex step version.
"""
def _do_declares(self):
# Analytic Derivs
self.declare_partials(of='*', wrt='*', method='cs')
class SellarNoDerivatives(Group):
"""
Group containing the Sellar MDA. This version uses the disciplines without derivatives.
"""
def initialize(self):
self.options.declare('nonlinear_solver', default=NonlinearBlockGS(),
desc='Nonlinear solver for Sellar MDA')
self.options.declare('nl_atol', default=None,
desc='User-specified atol for nonlinear solver.')
self.options.declare('nl_maxiter', default=None,
desc='Iteration limit for nonlinear solver.')
self.options.declare('linear_solver', default=ScipyKrylov(),
desc='Linear solver')
self.options.declare('ln_atol', default=None,
desc='User-specified atol for linear solver.')
self.options.declare('ln_maxiter', default=None,
desc='Iteration limit for linear solver.')
def setup(self):
self.add_subsystem('px', IndepVarComp('x', 1.0), promotes=['x'])
self.add_subsystem('pz', IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])
cycle = self.add_subsystem('cycle', Group(), promotes=['x', 'z', 'y1', 'y2'])
cycle.add_subsystem('d1', SellarDis1(), promotes=['x', 'z', 'y1', 'y2'])
cycle.add_subsystem('d2', SellarDis2(), promotes=['z', 'y1', 'y2'])
self.add_subsystem('obj_cmp', ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0),
promotes=['x', 'z', 'y1', 'y2', 'obj'])
self.add_subsystem('con_cmp1', ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
self.add_subsystem('con_cmp2', ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
self.nonlinear_solver = NonlinearBlockGS()
self.nonlinear_solver = self.options['nonlinear_solver']
if self.options['nl_atol']:
self.nonlinear_solver.options['atol'] = self.options['nl_atol']
if self.options['nl_maxiter']:
self.nonlinear_solver.options['maxiter'] = self.options['nl_maxiter']
def configure(self):
self.cycle.linear_solver = self.options['linear_solver']
if self.options['ln_atol']:
self.cycle.linear_solver.options['atol'] = self.options['ln_atol']
if self.options['ln_maxiter']:
self.cycle.linear_solver.options['maxiter'] = self.options['ln_maxiter']
class SellarDerivatives(Group):
"""
Group containing the Sellar MDA. This version uses the disciplines with derivatives.
"""
def initialize(self):
self.options.declare('nonlinear_solver', default=NonlinearBlockGS,
desc='Nonlinear solver (class or instance) for Sellar MDA')
self.options.declare('nl_atol', default=None,
desc='User-specified atol for nonlinear solver.')
self.options.declare('nl_maxiter', default=None,
desc='Iteration limit for nonlinear solver.')
self.options.declare('linear_solver', default=ScipyKrylov,
desc='Linear solver (class or instance)')
self.options.declare('ln_atol', default=None,
desc='User-specified atol for linear solver.')
self.options.declare('ln_maxiter', default=None,
desc='Iteration limit for linear solver.')
def setup(self):
self.add_subsystem('px', IndepVarComp('x', 1.0), promotes=['x'])
self.add_subsystem('pz', IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])
self.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])
self.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])
self.add_subsystem('obj_cmp', ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)', obj=0.0,
x=0.0, z=np.array([0.0, 0.0]), y1=0.0, y2=0.0),
promotes=['obj', 'x', 'z', 'y1', 'y2'])
self.add_subsystem('con_cmp1', ExecComp('con1 = 3.16 - y1', con1=0.0, y1=0.0),
promotes=['con1', 'y1'])
self.add_subsystem('con_cmp2', ExecComp('con2 = y2 - 24.0', con2=0.0, y2=0.0),
promotes=['con2', 'y2'])
nl = self.options['nonlinear_solver']
self.nonlinear_solver = nl() if inspect.isclass(nl) else nl
if self.options['nl_atol']:
self.nonlinear_solver.options['atol'] = self.options['nl_atol']
if self.options['nl_maxiter']:
self.nonlinear_solver.options['maxiter'] = self.options['nl_maxiter']
ln = self.options['linear_solver']
self.linear_solver = ln() if inspect.isclass(ln) else ln
if self.options['ln_atol']:
self.linear_solver.options['atol'] = self.options['ln_atol']
if self.options['ln_maxiter']:
self.linear_solver.options['maxiter'] = self.options['ln_maxiter']
class SellarDerivativesConnected(Group):
"""
Group containing the Sellar MDA. This version uses the disciplines with derivatives.
"""
def setup(self):
self.add_subsystem('px', IndepVarComp('x', 1.0))
self.add_subsystem('pz', IndepVarComp('z', np.array([5.0, 2.0])))
self.add_subsystem('d1', SellarDis1withDerivatives())
self.add_subsystem('d2', SellarDis2withDerivatives())
self.add_subsystem('obj_cmp', ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0))
self.add_subsystem('con_cmp1', ExecComp('con1 = 3.16 - y1'))
self.add_subsystem('con_cmp2', ExecComp('con2 = y2 - 24.0'))
self.connect('px.x', ['d1.x', 'obj_cmp.x'])
self.connect('pz.z', ['d1.z', 'd2.z', 'obj_cmp.z'])
self.connect('d1.y1', ['d2.y1', 'obj_cmp.y1', 'con_cmp1.y1'])
self.connect('d2.y2', ['d1.y2', 'obj_cmp.y2', 'con_cmp2.y2'])
self.nonlinear_solver = NonlinearBlockGS()
self.linear_solver = ScipyKrylov()
class SellarDerivativesGrouped(Group):
"""
Group containing the Sellar MDA. This version uses the disciplines with derivatives.
"""
def initialize(self):
self.options.declare('nonlinear_solver', default=NonlinearBlockGS,
desc='Nonlinear solver (class or instance) for Sellar MDA')
self.options.declare('nl_atol', default=None,
desc='User-specified atol for nonlinear solver.')
self.options.declare('nl_maxiter', default=None,
desc='Iteration limit for nonlinear solver.')
self.options.declare('linear_solver', default=ScipyKrylov,
desc='Linear solver (class or instance)')
self.options.declare('ln_atol', default=None,
desc='User-specified atol for linear solver.')
self.options.declare('ln_maxiter', default=None,
desc='Iteration limit for linear solver.')
def setup(self):
self.add_subsystem('px', IndepVarComp('x', 1.0), promotes=['x'])
self.add_subsystem('pz', IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])
self.mda = mda = self.add_subsystem('mda', Group(), promotes=['x', 'z', 'y1', 'y2'])
mda.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])
mda.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])
self.add_subsystem('obj_cmp', ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0, y1=0.0, y2=0.0),
promotes=['obj', 'x', 'z', 'y1', 'y2'])
self.add_subsystem('con_cmp1', ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
self.add_subsystem('con_cmp2', ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
nl = self.options['nonlinear_solver']
self.nonlinear_solver = nl() if inspect.isclass(nl) else nl
if self.options['nl_atol']:
self.nonlinear_solver.options['atol'] = self.options['nl_atol']
if self.options['nl_maxiter']:
self.nonlinear_solver.options['maxiter'] = self.options['nl_maxiter']
ln = self.options['linear_solver']
self.linear_solver = ln() if inspect.isclass(ln) else ln
if self.options['ln_atol']:
self.linear_solver.options['atol'] = self.options['ln_atol']
if self.options['ln_maxiter']:
self.linear_solver.options['maxiter'] = self.options['ln_maxiter']
def configure(self):
self.mda.linear_solver = ScipyKrylov()
self.mda.nonlinear_solver = NonlinearBlockGS()
class StateConnection(ImplicitComponent):
"""
Define connection with an explicit equation.
"""
def setup(self):
# Inputs
self.add_input('y2_actual', 1.0)
# States
self.add_output('y2_command', val=1.0)
# Declare derivatives
self.declare_partials(of='*', wrt='*')
def apply_nonlinear(self, inputs, outputs, residuals):
"""
Don't solve; just calculate the residual.
"""
y2_actual = inputs['y2_actual']
y2_command = outputs['y2_command']
residuals['y2_command'] = y2_actual - y2_command
def compute(self, inputs, outputs):
"""
This is a dummy comp that doesn't modify its state.
"""
pass
def linearize(self, inputs, outputs, J):
"""
Analytical derivatives.
"""
# State equation
J[('y2_command', 'y2_command')] = -1.0
J[('y2_command', 'y2_actual')] = 1.0
class SellarStateConnection(Group):
"""
Group containing the Sellar MDA. This version uses the disciplines with derivatives.
"""
def initialize(self):
self.options.declare('nonlinear_solver', default=NewtonSolver,
desc='Nonlinear solver (class or instance) for Sellar MDA')
self.options.declare('nl_atol', default=None,
desc='User-specified atol for nonlinear solver.')
self.options.declare('nl_maxiter', default=None,
desc='Iteration limit for nonlinear solver.')
self.options.declare('linear_solver', default=ScipyKrylov,
desc='Linear solver (class or instance)')
self.options.declare('ln_atol', default=None,
desc='User-specified atol for linear solver.')
self.options.declare('ln_maxiter', default=None,
desc='Iteration limit for linear solver.')
def setup(self):
self.add_subsystem('px', IndepVarComp('x', 1.0), promotes=['x'])
self.add_subsystem('pz', IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])
sub = self.add_subsystem('sub', Group(),
promotes=['x', 'z', 'y1',
'state_eq.y2_actual', 'state_eq.y2_command',
'd1.y2', 'd2.y2'])
subgrp = sub.add_subsystem('state_eq_group', Group(),
promotes=['state_eq.y2_actual', 'state_eq.y2_command'])
subgrp.add_subsystem('state_eq', StateConnection())
sub.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1'])
sub.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1'])
self.connect('state_eq.y2_command', 'd1.y2')
self.connect('d2.y2', 'state_eq.y2_actual')
self.add_subsystem('obj_cmp', ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0, y1=0.0, y2=0.0),
promotes=['x', 'z', 'y1', 'obj'])
self.connect('d2.y2', 'obj_cmp.y2')
self.add_subsystem('con_cmp1', ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
self.add_subsystem('con_cmp2', ExecComp('con2 = y2 - 24.0'), promotes=['con2'])
self.connect('d2.y2', 'con_cmp2.y2')
nl = self.options['nonlinear_solver']
self.nonlinear_solver = nl() if inspect.isclass(nl) else nl
if self.options['nl_atol']:
self.nonlinear_solver.options['atol'] = self.options['nl_atol']
if self.options['nl_maxiter']:
self.nonlinear_solver.options['maxiter'] = self.options['nl_maxiter']
ln = self.options['linear_solver']
self.linear_solver = ln() if inspect.isclass(ln) else ln
if self.options['ln_atol']:
self.linear_solver.options['atol'] = self.options['ln_atol']
if self.options['ln_maxiter']:
self.linear_solver.options['maxiter'] = self.options['ln_maxiter']
def configure(self):
self.sub.linear_solver = ScipyKrylov()
self.sub.state_eq_group.linear_solver = ScipyKrylov()
class SellarImplicitDis1(ImplicitComponent):
"""
Component containing Discipline 1 -- no derivatives version.
"""
def __init__(self, units=None, scaling=None):
super(SellarImplicitDis1, self).__init__()
self.execution_count = 0
self._units = units
self._do_scaling = scaling
def setup(self):
if self._units:
units = 'ft'
else:
units = None
if self._do_scaling is None:
ref = 1.
else:
ref = .1
# Global Design Variable
self.add_input('z', val=np.zeros(2), units=units)
# Local Design Variable
self.add_input('x', val=0., units=units)
# Coupling parameter
self.add_input('y2', val=1.0, units=units)
# Coupling output
self.add_output('y1', val=1.0, lower=-0.1, upper=1000, units=units, ref=ref)
# Derivatives
self.declare_partials('*', '*')
def apply_nonlinear(self, inputs, outputs, resids):
"""
Evaluates the equation
y1 = z1**2 + z2 + x1 - 0.2*y2
"""
z1 = inputs['z'][0]
z2 = inputs['z'][1]
x1 = inputs['x']
y2 = inputs['y2']
y1 = outputs['y1']
resids['y1'] = -(z1**2 + z2 + x1 - 0.2*y2 - y1)
def linearize(self, inputs, outputs, J):
"""
Jacobian for Sellar discipline 1.
"""
J['y1', 'y2'] = 0.2
J['y1', 'z'] = -np.array([[2.0 * inputs['z'][0], 1.0]])
J['y1', 'x'] = -1.0
J['y1', 'y1'] = 1.0
class SellarImplicitDis2(ImplicitComponent):
"""
Component containing Discipline 2 -- implicit version.
"""
def __init__(self, units=None, scaling=None):
super(SellarImplicitDis2, self).__init__()
self.execution_count = 0
self._units = units
self._do_scaling = scaling
def setup(self):
if self._units:
units = 'inch'
else:
units = None
if self._do_scaling is None:
ref = 1.0
else:
ref = .18
# Global Design Variable
self.add_input('z', val=np.zeros(2), units=units)
# Coupling parameter
self.add_input('y1', val=1.0, units=units)
# Coupling output
self.add_output('y2', val=1.0, lower=0.1, upper=1000., units=units, ref=ref)
# Derivatives
self.declare_partials('*', '*')
def apply_nonlinear(self, inputs, outputs, resids):
"""
Evaluates the equation
y2 = y1**(.5) + z1 + z2
"""
z1 = inputs['z'][0]
z2 = inputs['z'][1]
y1 = inputs['y1']
y2 = outputs['y2']
# Note: this may cause some issues. However, y1 is constrained to be
# above 3.16, so lets just let it converge, and the optimizer will
# throw it out
if y1.real < 0.0:
y1 *= -1
resids['y2'] = -(y1**.5 + z1 + z2 - y2)
def linearize(self, inputs, outputs, J):
"""
Jacobian for Sellar discipline 2.
"""
y1 = inputs['y1']
if y1.real < 0.0:
y1 *= -1
if y1.real < 1e-8:
y1 = 1e-8
J['y2', 'y1'] = -.5*y1**-.5
J['y2', 'z'] = -np.array([[1.0, 1.0]])
J['y2', 'y2'] = 1.0
class SellarProblem(Problem):
"""
The Sellar problem with configurable model class.
"""
def __init__(self, model_class=SellarDerivatives, **kwargs):
super(SellarProblem, self).__init__(model_class(**kwargs))
model = self.model
model.add_design_var('z', lower=np.array([-10.0, 0.0]), upper=np.array([10.0, 10.0]))
model.add_design_var('x', lower=0.0, upper=10.0)
model.add_objective('obj')
model.add_constraint('con1', upper=0.0)
model.add_constraint('con2', upper=0.0)
# default to non-verbose
self.set_solver_print(0)
class SellarProblemWithArrays(Problem):
"""
The Sellar problem with ndarray variable options
"""
def __init__(self, model_class=SellarDerivatives, **kwargs):
super(SellarProblemWithArrays, self).__init__(model_class(**kwargs))
model = self.model
model.add_design_var('z', lower=np.array([-10.0, 0.0]),
upper=np.array([10.0, 10.0]), indices=np.arange(2, dtype=int))
model.add_design_var('x', lower=0.0, upper=10.0)
model.add_objective('obj')
model.add_constraint('con1', equals=np.zeros(1))
model.add_constraint('con2', upper=0.0)
# default to non-verbose
self.set_solver_print(0)
```
|
{
"source": "jennis0/monster-wizard",
"score": 3
}
|
#### File: monster-wizard/extractor/annotators.py
```python
import configparser
import re
import numpy as np
import logging
import configparser
from typing import List
import extractor.constants as constants
from utils.datatypes import Line, Section
class LineAnnotator(object):
'''The LineAnnotator is pretty self-explantatory, we apply relatively noisy labels to individual lines that can
then be used by later processing stages to assign traits to paragraphs/line collections'''
def __init__(self, config: configparser.ConfigParser, logger: logging.Logger):
self.config = config
self.logger = logger.getChild("lineanno")
# self.standard_height = config.getfloat("line_annotator", "line_height", fallback=0.04)
# self.logger.debug("Configured LineAnnotator with config:")
# self.logger.debug("\tStandard Height={}".format(self.standard_height))
self.__compile_regexes()
banned_words = [
"town",
"settlement",
"stone fort",
"cave system",
"true form",
"prone",
"restrained",
"attack",
"DC",
"grappled"
]
def __is_race_type_string(self, line: Line) -> bool:
# If it's anything else, skip
if len(line.attributes) > 0:
return False
race_type_match = self.race_type_regex.findall(line.text.strip())
# Must have at least size
if len(race_type_match) == 0 or race_type_match[0] == '':
return False
# First word must be capitilised
if line.text.strip()[0] != line.text.strip()[0].upper():
return False
# Size must be capitilised
if race_type_match[0][0][0] != race_type_match[0][0][0].upper():
return False
# It should either be short
if len(line.text.split(" ")) < 6 and not (len(race_type_match) < 2 or race_type_match[0][1] != '' or race_type_match[0][2] != ''): #Either short text or it contains at least one of creature type or alignment
return False
for b in LineAnnotator.banned_words:
if b in line.text.lower():
return False
return True
def __compile_regexes(self):
'''Pregenerate regexes used for annotating lines'''
### Pregenerate Regexes
race_type_str = "^\s*({})\s*({})?,?\s*({})?".format(
"|".join(constants.enum_values(constants.SIZES)),
"|".join(constants.enum_values(constants.CREATURE_TYPES)),
"|".join(constants.enum_values(constants.ALIGNMENTS)))
self.race_type_regex = re.compile(race_type_str, re.IGNORECASE)
signatures_strs = [
("Challenge \d+", "cr"),
("\d+d\d+", "dice_roll"),
("Senses\s[\w\s]+\d+\s*ft", "senses"),
("Damage\s[iI]mmunities", "dam_immunities"),
("Damage\s[rR]esistances", "resistances"),
("Damage\s[vV]ulnerabilities", "vulnerabilities"),
("Condition\sImmunities", "con_immunities"),
("^Armor Class\s\d+", "ac"),
("^Hit Points\s\d+", "hp"),
("^Speed\s\d+\s*ft", "speed"),
("Melee\sWeapon\sAttack:", "melee_attack"),
("Ranged\sWeapon\sAttack:", "ranged_attack"),
("DC\s\d+\s", "check"),
("\d+/(day|minute|hour)", "counter"),
("^[Ss]kills\s.*[+-]\d", "skills"),
("^Legendary Action", "legendary_action_title"),
("Costs \d+ actions", "legendary_action_cost"),
("Recharge \d+-\d+", "recharge"),
("(\d+\s*\([+-]?\d+\)\s+){2,6}", "array_values"),
("^Languages?", "languages"),
("^[sS]aves\s+", "saves"),
("^Saving [tT]hrows\s+", "saves"),
("^Senses\s+", "senses"),
("^(1st|2nd|3rd|[4-9]th)\s*level\s*\([0-9]+\s*slots\)?:", "spellcasting"),
("^[cC]antrip (\(at will\))?", "spellcasting"),
("^([sS]pellcasting|[iI]nnate [sS]pellcasting).", 'spellcasting'),
("Proficiency Bonus", "proficiency"),
("Hit [dD]ice", "hitdice"),
("Hit.\s*\d+\s*\(\d+", 'in_attack')
]
uncased_signatures = [
("^STR\s+DEX\s+CON\s+INT\s+WIS\s+CHA", "array_title"),
("^Actions?$", "action_header"),
("^Legendary Actions?$", "legendary_header"),
("^Mythic Actions?$", "mythic_header"),
("^Lair Actions?$", "lair_header"),
("^\s*Reactions?\s*$", 'reaction_header'),
("recharges?\s*after\s*a\s*(short|short or long|long)\s*(?:rest)?", 'recharge'),
("proofreader", 'proofreader'),
("^Credits$", 'credits'),
]
self.signatures = []
for ss in signatures_strs:
self.signatures.append((re.compile(ss[0]), ss[1]))
for ss in uncased_signatures:
self.signatures.append((re.compile(ss[0], re.IGNORECASE), ss[1]))
def annotate(self, lines: List[Line]) -> List[Line]:
'''Applies annotations to passed lines based on their content, and lines directly before/after them'''
for i,line in enumerate(lines):
for r, tag in self.signatures:
matches = r.findall(line.text.strip())
if len(matches) > 0:
line.attributes.append(tag)
if self.__is_race_type_string(line):
line.attributes.append("race_type_header")
j = i - 1
while j >= 0:
if lines[j].text.strip() != "":
if abs(lines[j].bound.left - lines[i].bound.left) < 0.05:
if abs(lines[j].bound.top - lines[i].bound.top) < 0.1:
lines[j].attributes.append("statblock_title")
if 'text_title' in lines[j].attributes:
lines[j].attributes.remove('text_title')
break
j -= 1
if "." in line.text and line.text[0].isupper() and len(line.text.split('.')[0].split()) < 5:
line.attributes.append("block_title")
# If it is large text we've not otherwise accounted for, it's probably actual text so we want
# to skip it. Note this attribute comes from the text loading stage
if line.attributes == ["very_large"]:
line.attributes.append("text_title")
return lines
class LineAnnotationTypes:
defence_annotations = [
"hp",
"ac",
"speed"
]
trait_annotations = [
"languages",
"saves",
"skills",
"challenge",
"senses",
"dam_immunities",
"resistances",
"vulnerabilities",
"con_immunities",
"cr"
]
feature_annotations = [
"spellcasting"
]
action_annotations = [
"action_header",
"action_title",
"melee_attack",
"ranged_attack",
]
legendary_annotations = [
"legendary_action_title",
"legendary_action_cost",
"legendary_header"
]
mythic_annotations = [
"mythic_header"
]
lair_annotations = [
"lair_header"
]
reaction_annotations = [
"reaction_header"
]
generic_annotations = [
"dice_roll",
"check",
"recharge",
"counter",
"spellcasting"
]
weak_generic_annotations = [
"block_title"
]
#These are annotations we are certain are NOT part of a statblock
anti_annotations = [
"text_title",
"credits",
'proofreader'
]
class SectionAnnotator(object):
def __init__(self, config: configparser.ConfigParser, logger: logging.Logger):
self.config = config
self.logger = logger.getChild("clusanno")
self.logger.debug("Configured SectionAnnotator")
def annotate(self, sections: List[Section]) -> List[Section]:
self.logger.debug("Annotating {} Sections".format(len(sections)))
for c in sections:
line_annotations = c.get_line_attributes()
if "statblock_title" in line_annotations:
c.attributes.append("sb_start")
if "race_type_header" in line_annotations:
c.attributes.append("sb_header")
for df in LineAnnotationTypes.defence_annotations:
if df in line_annotations:
c.attributes.append("sb_defence_block")
break
if "array_title" in line_annotations:
c.attributes.append("sb_array_title")
if "array_values" in line_annotations:
c.attributes.append("sb_array_value")
for df in LineAnnotationTypes.trait_annotations:
if df in line_annotations:
c.attributes.append("sb_flavour_block")
break
for df in LineAnnotationTypes.feature_annotations:
if df in line_annotations:
c.attributes.append("sb_feature_block")
break
for af in LineAnnotationTypes.action_annotations:
if af in line_annotations:
c.attributes.append("sb_action_block")
break
for lf in LineAnnotationTypes.legendary_annotations:
if lf in line_annotations:
c.attributes.append("sb_legendary_action_block")
break
for l in LineAnnotationTypes.reaction_annotations:
if lf in line_annotations:
c.attributes.append("sb_reaction_block")
for gf in LineAnnotationTypes.generic_annotations:
if gf in line_annotations:
c.attributes.append("sb_part")
for gf in LineAnnotationTypes.anti_annotations:
if gf in line_annotations:
c.attributes.append("sb_skip")
num_generic = 0
for la in line_annotations:
if la in LineAnnotationTypes.weak_generic_annotations:
num_generic += 1
if num_generic > 0.1 * len(c.lines):
c.attributes.append("sb_part_weak")
#Add some annotations to mark the start and end of each column
sections[0].attributes.append("col_start")
sections[-1].attributes.append("col_end")
return sections
```
#### File: monster-wizard/extractor/constants.py
```python
from enum import Enum, auto
import re
from typing import Any, List
def is_in_enum(candidate: str, enum: Enum) -> bool:
'''Helper function to check membership of string in enum'''
return candidate in enum.__members__
def enum_values(enum: Enum) -> List[str]:
'''Helper function to return list of names'''
return list(enum.__members__.keys())
class SIZES(Enum):
tiny = auto()
small = auto()
medium = auto()
large = auto()
huge = auto()
gargantuan = auto()
class DAMAGE_TYPES(Enum):
acid = auto()
cold = auto()
necrotic = auto()
fire = auto()
thunder = auto()
bludgeoning = auto()
slashing = auto()
piercing = auto()
force = auto()
lightning = auto()
psychic = auto()
radiant = auto()
poison = auto()
class CONDITIONS(Enum):
stunned = auto()
paralyzed = auto()
blinded = auto()
grappled = auto()
restrained = auto()
immobilized = auto()
petrified = auto()
prone = auto()
charmed = auto()
deafened = auto()
exhausted = auto()
incapacitated = auto()
invisible = auto()
surprised = auto()
unconscious = auto()
exhaustion = auto()
frightened = auto()
poisoned = auto()
class CREATURE_TYPES(Enum):
aberration = auto()
beast = auto()
celestial = auto()
construct = auto()
dragon = auto()
elemental = auto()
fey = auto()
fiend = auto()
giant = auto()
humanoid = auto()
monstrosity = auto()
ooze = auto()
plant = auto()
undead = auto()
swarm = auto()
class CREATURE_TYPE_PLURALS(Enum):
aberrations = auto()
beasts = auto()
celestials = auto()
constructs = auto()
dragons = auto()
elementals = auto()
fey = auto()
fiends = auto()
giants = auto()
humanoids = auto()
monstrosities = auto()
oozes = auto()
plants = auto()
undead = auto()
def to_singular(self) -> CREATURE_TYPES:
if self == CREATURE_TYPE_PLURALS.monstrosities:
return CREATURE_TYPES.monstrosity
elif self == CREATURE_TYPE_PLURALS.fey:
return CREATURE_TYPES.fey
elif self == CREATURE_TYPE_PLURALS.undead:
return CREATURE_TYPES.undead
return CREATURE_TYPES[self.name[:-1]]
class ABILITIES(Enum):
strength = auto()
dexterity = auto()
constitution = auto()
intelligence = auto()
wisdom = auto()
charisma = auto()
class SHORT_ABILITIES(Enum):
str = auto()
dex = auto()
con = auto()
int = auto()
wis = auto()
cha = auto()
class MOVEMENT_TYPES(Enum):
walk = auto()
burrow = auto()
climb = auto()
fly = auto()
swim = auto()
class SENSES(Enum):
blindsight = auto()
darkvision = auto()
tremorsense = auto()
truesight = auto()
class SKILLS(Enum):
acrobatics = auto()
animal_handling = auto()
arcana = auto()
athletics = auto()
deception = auto()
history = auto()
insight = auto()
intimidation = auto()
investigation = auto()
medicine = auto()
nature = auto()
perception = auto()
performance = auto()
persuasion = auto()
religion = auto()
sleight_of_hand = auto()
stealth = auto()
survival = auto()
class MEASURES(Enum):
ft = auto()
mi = auto()
@staticmethod
def normalise(text: str) -> str:
'''Replace any expanded instances of the measures by the shortened ones'''
text = re.sub("[\s\.,:;]feet[\s\.,:;]","ft", text)
return re.sub("[\s\.,:;]miles[\s\.,:;]","mi", text)
class TIME_MEASURES(Enum):
round = auto()
seconds = auto()
minute = auto()
hour = auto()
day = auto()
week = auto()
year = auto()
class SPELL_FREQUENCIES(Enum):
will = auto()
encounter = auto()
daily = auto()
rest = auto()
weekly = auto()
cantrip = auto()
levelled = auto()
class ACTION_TYPES(Enum):
action = auto()
bonus = auto()
reaction = auto()
free = auto()
legendary = auto()
mythic = auto()
lair = auto()
class ALIGNMENTS(Enum):
lawful = auto()
chaotic = auto()
good = auto()
evil = auto()
neutral = auto()
XP_BY_CR = {
"0": 0,
"1/8": 25,
"1/4": 50,
"1/2": 100,
"1": 200,
"2": 450,
"3": 700,
"4": 1100,
"5": 1800,
"6": 2300,
"7": 2900,
"8": 3900,
"9": 5000,
"10": 5900,
"11": 7200,
"12": 8400,
"13": 10000,
"14": 11500,
"15": 13000,
"16": 15000,
"17": 18000,
"18": 20000,
"19": 22000,
"20": 25000,
"21": 33000,
"22": 41000,
"23": 50000,
"24": 62000,
"25": 75000,
"26": 90000,
"27": 105000,
"28": 120000,
"29": 135000,
"30": 155000,
}
```
#### File: monster-wizard/utils/logger.py
```python
import logging
import sys
def get_logger(debug: bool, log_path: str=None) -> logging.Logger:
'''Helper function to create a standardised logger'''
log_formatter = logging.Formatter('[%(asctime)s] %(levelname)-8s %(name)-16s %(message)s')
log_level = logging.DEBUG if debug else logging.INFO
logger = logging.getLogger('sbp')
logger.setLevel(log_level)
## Ensure we don't double handlers in a notebook environment
for hdl in logger.handlers:
logger.removeHandler(hdl)
if log_path:
logger.addHandler(logging.FileHandler(log_path, encoding='utf-8'))
for handler in logger.handlers:
handler.setFormatter(log_formatter)
return logger
```
|
{
"source": "jennis0/pdf2vtt",
"score": 3
}
|
#### File: pdf2vtt/data_loaders/data_loader_interface.py
```python
import abc
from typing import List
from utils.datatypes import Source
class DataLoaderInterface(object):
@abc.abstractmethod
def get_name() -> str:
'''Returns an internal name for this loader'''
raise NotImplementedError("users must define a name for this loader")
@staticmethod
@abc.abstractmethod
def get_filetypes() -> List[str]:
'''Returns a list of file types supported by this data loader'''
raise NotImplementedError('users must define a list of supported filetypes.')
@abc.abstractmethod
def load_data_from_file(self, filepath: str) -> Source:
'''Reads file and extracts lines of texts. Returns one section per page'''
raise NotImplementedError("userers must define a function to load data from a file.")
```
#### File: pdf2vtt/data_loaders/pytesseract_loader.py
```python
from __future__ import annotations
import logging
import configparser
from typing import List, Any
from PIL import Image
import pytesseract as pyt
from utils.datatypes import Line, Bound, Section, Source
from uuid import uuid4
from data_loaders.data_loader_interface import DataLoaderInterface
import os
class TesseractLoader(DataLoaderInterface):
'''Used Google's Tesseract to generates a set of lines and bounding boxes from an image'''
def __init__(self, config: configparser.ConfigParser, logger: logging.Logger) -> TesseractLoader:
'''Creates a TextractImageLoader using the passed configurationa and logger'''
self.config = config
self.logger = logger.getChild("txloader")
print("In tesseract loader")
def get_name(self) -> str:
'''Returns a human readable name for this parser'''
return "textract"
def get_filetypes(self) -> List[str]:
'''Returns list of file types accepted by this data loader'''
return ["jpg", "png", "webp"]
def load_data_from_file(self, filepath: str) -> Source:
'''Takes a path to an image and returns a Section containing extracted lines of text for each page'''
if not os.path.exists(filepath):
self.logger.error("Image {} does not exist".format(filepath))
print("Loading from Tesseract")
images = self.__load_images_from_file(filepath)
pages = self.__extract_text(images)
source = Source(
filepath=filepath,
name=filepath.split(os.pathsep)[-1],
pages=pages,
page_images=images,
images = None,
num_pages=len(pages),
authors=None,
url=None
)
return source
def __load_images_from_file(self, filepath: str) -> List[Any]:
'''Takes a path to an image or folder of images and returns that image'''
if not os.path.exists(filepath):
self.logger.error("Image {} does not exist".format(filepath))
images = []
if os.path.isdir(filepath):
for im_file in os.listdir(filepath):
try:
images.append(Image.open(os.path.join(filepath, im_file)))
except:
self.logger.error(f"Failed to open image file {os.path.join(filepath, im_file)}")
else:
try:
images.append(Image.open(filepath))
except:
self.logger.error(f"Failed to open image file {filepath}")
return images
def __extract_text(self, images: List[Image.Image]) -> List[Section]:
'''Use Tesseract to extract lines and bounding boxes'''
lines = []
for im in images:
image_lines = {}
boxes = pyt.image_to_data(im, lang='eng', output_type=pyt.Output.DICT)
print(boxes.keys())
n_boxes = len(boxes["left"])
for i in range(n_boxes):
bound = Bound(boxes['left'][i],
boxes['top'][i],
boxes["width"][i],
boxes['height'][i]
)
id = "{}.{}.{}.{}".format(boxes["page_num"][i], boxes["block_num"][i],boxes["par_num"][i],boxes["line_num"][i])
if id in image_lines:
image_lines[id] = Line.merge([image_lines[id], Line(uuid4().hex, boxes["text"][i], bound, [])])
else:
image_lines[id] = Line(uuid4().hex, boxes["text"][i], bound, [])
line_nums = list(image_lines.keys())
line_nums.sort()
lines.append(Section([image_lines[k] for k in line_nums]))
print(lines)
return lines
```
#### File: pdf2vtt/outputs/writer_interface.py
```python
import abc
from typing import List, Any
import json
from utils.datatypes import Source
class WriterInterface(object):
@staticmethod
@abc.abstractmethod
def get_long_name() -> str:
'''Returns a human readable name for this output writer'''
raise NotImplementedError("users must define a function to return the name")
@staticmethod
@abc.abstractmethod
def get_name() -> str:
'''Returns an internal name to be used for this writer'''
raise NotImplementedError("users must define a function to return the name")
@staticmethod
@abc.abstractmethod
def get_filetype() -> str:
'''Returns the default filetype written by this writer'''
raise NotImplementedError('users must define the default filetype.')
@abc.abstractmethod
def write(self, filename: str, source: Source, creatures: List[Any], append: bool=None) -> bool:
'''Writes the creatures to the specified file. If append is set to true, creatures will be inserted into the existing file. Returns True if write is successful'''
raise NotImplementedError("users must define a function to write to a file.")
def write_p2v(self, out_filename: str, p2vdata: List[Any], append: bool=None) -> bool:
'''Converts from an existing p2v file into the writer format'''
### Apply configuration overrides
if append is None:
append = super().append
with open(p2vdata, 'r') as f:
data = json.load(f)
ret = True
for source in data:
source_data = data['source']
s = Source(
source_data['title'], source_data['title'], source_data['num_pages'] if 'num_pages' in source_data else None
, None, None, None, source_data['authors'] if 'authors' in source_data else None
)
ret &= super().write(out_filename, s, source['creatures'], append)
return ret
```
#### File: pdf2vtt/preprocessing/clusterer.py
```python
import numpy as np
from typing import List, Tuple
from configparser import ConfigParser
from logging import Logger
from utils.datatypes import Line, Section
class Clusterer(object):
'''Take a set of text lines and try to figure out how they are structured in sections and paragraphs'''
def __init__(self, config: ConfigParser, logger: Logger):
self.fuzzyness = config.getfloat("clusterer", "fuzzyness", fallback=3.0)
self.min_gap = config.getfloat("clusterer", "min_gap", fallback=0.)
self.max_gap = config.getfloat("clusterer", "max_gap", fallback=1.)
self.logger = logger.getChild("clusterer")
self.logger.debug("Configured Clusterer with config:")
self.logger.debug("\tFuzzyness={}".format(self.fuzzyness))
self.logger.debug("\tMin Gap={}".format(self.min_gap))
self.logger.debug("\tMax Gap={}".format(self.max_gap))
def _estimate_distances_and_gap(self, lines: List[Line]) -> Tuple[List[float], float]:
'''Calculate the distances between each element in the cluster and returns a sensible cutoff'''
gaps = [0.] + [lines[i+1].bound.top - (lines[i].bound.bottom()) for i in range(len(lines) - 1)]
gaps = np.array(gaps)
bins = np.arange(0, .1, 0.005)
counts, edges = np.histogram(gaps, bins=bins)
lg = edges[counts.argmax() + 1]
self.logger.debug("Found reasonable threshold of {}%".format(lg))
return gaps, lg
def cluster(self, lines: List[Line]) -> List[Section]:
'''Cluster the passed lines into sections by finding lines with large gaps between them'''
clusters = []
gaps, threshold = self._estimate_distances_and_gap(lines)
threshold = min(max(self.min_gap, threshold * self.fuzzyness), self.max_gap)
self.logger.debug("Using clustering threshold of {}".format(threshold))
current_cluster = Section([], ['col_start'])
block_started = False
for i,lg in enumerate(zip(lines, gaps)):
line,gap = lg
self.logger.debug(f"{i} - {line}")
### Filter out email addresses
if "@" in line.text:
self.logger.debug(f"Throwing away line {line} due to as an email address")
continue
#Throw away anything right at the top or bottom of the page if it doesn't have a none-title block and we haven't already seen a 'good' line
if not block_started:
if line.bound.top < 0.05 and len([a for a in line.attributes if a not in ['text_title']]) == 0:
self.logger.debug(f"Throwing away line {line} as it's too close to start or end of page")
continue
else:
block_started = True
current_cluster.add_line(line)
continue
#Gap is large so start a new cluster
if gap < -0.1 or gap > threshold or "statblock_title" in line.attributes or "text_title" in line.attributes:
clusters.append(current_cluster)
current_cluster = Section([line], [])
continue
else:
current_cluster.add_line(line)
if len(current_cluster.lines) > 0:
clusters.append(current_cluster)
return clusters
```
|
{
"source": "JenniTheDev/QuaternionClass",
"score": 3
}
|
#### File: JenniTheDev/QuaternionClass/quaternion.py
```python
from math import sqrt, acos
from math import pi
from math import cos, sin, atan2
PI_2 = pi / 2.0
from vector import Vector
from matrix import Matrix
class Quaternion:
def __init__(self, w, x=0, y=0, z=0):
self.w = float(w)
self.x = float(x)
self.y = float(y)
self.z = float(z)
@classmethod
def i(cls): return Quaternion(0.0, 1.0, 0.0, 0.0)
@classmethod
def j(cls): return Quaternion(0.0, 0.0, 1.0, 0.0)
@classmethod
def k(cls): return Quaternion(0.0, 0.0, 0.0, 1.0)
@classmethod
def ii(cls): return -1
@classmethod
def jj(cls): return -1
@classmethod
def kk(cls): return -1
@classmethod
def ij(cls): return Quaternion.k
@classmethod
def ij(cls): return -Quaternion.k
@classmethod
def jk(cls): return Quaternion.i
@classmethod
def jk(cls): return -Quaternion.i
@classmethod
def ki(cls): return Quaternion.j
@classmethod
def ik(cls): return -Quaternion.j
@classmethod
def ijk(cls): return -1
def __str__(self):
s = 'Quat('
if self == Quaternion.i(): return s + 'i)'
if self == Quaternion.j(): return s + 'j)'
if self == Quaternion.k(): return s + 'k)'
if self.magnitude() == 0.0 and self.w == 0: return s + '0)'
if self.magnitude() == 1.0 and self.w == 1: return s + '1)'
if self.vector().magnitude() == 0.0: return f'{s}{self.w})'
else: return s + f'{self.w:.1f} + {self.vector()})'
def __add__(self, o):
if isinstance(o, float):
return Quaternion(o, self.x + o, self.y + o, self.z)
return Quaternion(self.w + o.w, self.x + o.x, self.y + o.y, self.z + o.z)
def __radd__(self, o): return self + o
def __sub__(self, v): return self + -v
def __rsub__(self, o): return -(o - self)
def __rmul__(self, o): return self * o
def __mul__(self, o):
if isinstance(o, Quaternion):
w1, x1, y1, z1 = self.w, self.x, self.y, self.z
w2, x2, y2, z2 = o.w, o.x, o.y, o.z
return Quaternion((w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2),
(w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2),
(w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2),
(w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2))
val = o
return Quaternion(val * self.w, val * self.x, val * self.y, val * self.z)
def __truediv__(self, val): return self * (1.0 / val)
def __neg__(self): return Quaternion(-self.w, -self.x, -self.y, -self.z)
def __eq__(self, v): return self.w == v.w and self.x == v.x and self.y == v.y and self.z == v.z
def __ne__(self, v): return not (self == v)
def vector(self): return Vector(self.x, self.y, self.z)
def scalar(self): return self.w
def unit_scalar(self): return Quaternion(1.0, Vector())
def conjugate(self): return Quaternion(self.w, -self.x, -self.y, -self.z)
def inverse(self): return self.conjugate() / self.magnitude() ** 2
def unit(self): return self / self.magnitude()
def norm(self): return sqrt(self.w ** 2 + self.x ** 2 + self.y ** 2 + self.z ** 2)
def magnitude(self): return self.norm()
def dot(self, v): self.w * v.w + self.vector().dot(v.vector())
def angle(self, v):
if not isinstance(v, Quaternion): raise TypeError
z = self.conjugate() * v
zvnorm = z.vector().norm()
zscalar = z.scalar()
angle = atan2(zvnorm, zscalar)
return angle * 180.0 / 3.1415
def rot_matrix(self):
w, x, y, z = self.w, self.x, self.y, self.z
# print(w, x, y, z)
return Matrix(3, 3, -2*(y**2 + z**2) + 1, 2*(x*y - w*z), 2*(x*z + w*y),
2*(x*y + w*z), -2*(x**2 + z**2) + 1, 2*(y*z - w*x),
2*(x*z - w*y), 2*(y*z + w*x), -2*(x**2 + y**2)+1)
# return Matrix(3, 3, 2*(w**2 + x**2) - 1, 2*(x*y - w*z), 2*(x*z + w*y),
# 2*(x*y + w*z), 2*(w**2 + y**2) - 1, 2*(y*z - w*x),
# 2*(x*z - w*y), 2*(y*z + w*x), 2*(w**2 + z**2)-1)
@staticmethod
def rotate(pt, axis, theta): # rotates a point pt (pt.x, pt.y, pt.z) about (axis.x, axis.y, axis.z) by theta
costheta2 = cos(theta / 2.0)
sintheta2 = sin(theta / 2.0)
q = Quaternion(costheta2, axis.x * sintheta2, axis.y * sintheta2, axis.z * sintheta2)
q_star = Quaternion(q.w, -q.x, -q.y, -q.z)
p = Quaternion(0, pt.x, pt.y, pt.z)
p_rot = q * p * q_star
return Vector(p_rot.x, p_rot.y, p_rot.z)
@staticmethod
def run_tests():
a = Quaternion(1, 2, 3, 4)
b = Quaternion(4, 0, 0, 7)
c = Quaternion(0, 1, 1, 0)
d = Quaternion(0, 0, 1, 0)
e = Quaternion(0, 0, 0, 1)
f = Quaternion(0, 0, 0, 0)
g = Quaternion(1, 0, 0, 0)
h = Quaternion(3, 0, 0, 0)
print('a = ' + str(a))
print('b = ' + str(b))
print('c = ' + str(c))
print('d = ' + str(d))
print('e = ' + str(e))
print('f = ' + str(f))
print('g = ' + str(g))
print('h = ' + str(h))
print('c + d = ', str(c + d))
print('c + d + e = ', c + d + e)
print(f'5 * h is: {5.0 * h}')
print(f'h * 5 is: {h * 5.0}')
print(f'h / 3.0 is: {h / 3.0}')
print(f'h.magnitude() is: {h.magnitude()}')
print(f'h.unit() is: {h.unit()}')
print(f'g.unit() is: {g.unit()}')
print(f'a.unit() is: {a.unit()}')
print(f'a.vector() is: {a.vector()}')
print(f'a.scalar() is: {a.scalar()}')
print(f'a.conjugate() is: {a.conjugate()}')
print(f'a.inverse() is: {a.inverse()}')
print(f'a * a.inverse() is: {a * a.inverse()}')
print(f'c == d is: {c == d}')
print(f'c != d is: {c != d}')
print(f'e == e is: {e == e}')
print(f'e != e is: {e != e}')
print(f'angle between c and d is: {c.angle(d):.3f} degrees')
c_minus_d = c - d
print(f'c_minus_d is: {c_minus_d}')
rot_matrix = c_minus_d.rot_matrix()
print(f'rot_matrix of c_minus_d is: {rot_matrix}')
rad2_2 = sqrt(2)/2.0
print("SEE THIS WEBSITE for DETAILED DIAGRAMS on the TESTS of the PLANE's rotations")
print('https://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToMatrix/examples/index.htm')
print('# -------------- LEVEL FLIGHT -------------------')
plane = Quaternion(1)
print(f'levelflight(E) is {plane}{plane.rot_matrix()}')
plane = Quaternion(rad2_2, 0, rad2_2, 0)
print(f'levelflight(N) is {plane}{plane.rot_matrix()}')
plane = Quaternion(0, 0, 1, 0)
print(f'levelflight(W) is {plane}{plane.rot_matrix()}')
plane = Quaternion(rad2_2, 0, -rad2_2, 0)
print(f'levelflight(S) is {plane}{plane.rot_matrix()}')
print('# ----------------------------------------------------')
print('\n\n# -------- STRAIGHT UP ---------------------')
plane = Quaternion(rad2_2, 0, 0, rad2_2)
print(f'plane_straightupE is {plane}{plane.rot_matrix()}')
plane = Quaternion(0.5, 0.5, 0.5, 0.5)
print(f'plane_straightupN is {plane}{plane.rot_matrix()}')
plane = Quaternion(0, rad2_2, rad2_2, 0)
print(f'plane_straightupW is {plane}{plane.rot_matrix()}')
plane = Quaternion(0.5, -0.5, -0.5, 0.5)
print(f'plane_straightupS is {plane}{plane.rot_matrix()}')
print('# -------- end STRAIGHT UP ---------------------')
print('\n\n# -------- STRAIGHT DOWN ---------------------')
plane = Quaternion(rad2_2, 0, 0, -rad2_2)
print(f'plane_straightdownE is {plane}{plane.rot_matrix()}')
plane = Quaternion(0.5, -0.5, 0.5, -0.5)
print(f'plane_straightdownN is {plane}{plane.rot_matrix()}')
plane = Quaternion(0, -rad2_2, rad2_2, 0)
print(f'plane_straightdownW is {plane}{plane.rot_matrix()}')
plane = Quaternion(0.5, 0.5, -0.5, -0.5)
print(f'plane_straightdownS is {plane}{plane.rot_matrix()}')
print('# -------- end STRAIGHT UP ---------------------')
print('\n\n# -------- BANK/ROLL ---------------------')
plane = Quaternion(rad2_2, rad2_2, 0, 0)
print(f'plane_E_bankLeft90 is {plane}{plane.rot_matrix()}')
plane = Quaternion(0.5, 0.5, 0.5, -0.5)
print(f'plane_N_bankLeft90 is {plane}{plane.rot_matrix()}')
plane = Quaternion(0, 0, rad2_2, -rad2_2)
print(f'plane_W_bankLeft90 is {plane}{plane.rot_matrix()}')
plane = Quaternion(0.5, 0.5, -0.5, 0.5)
print(f'plane_S_bankLeft90 is {plane}{plane.rot_matrix()}')
print('\nBanking/Rolling 180 degrees')
plane = Quaternion(0, 1, 0, 0)
print(f'plane_E_bankLeft180 is {plane}{plane.rot_matrix()}')
plane = Quaternion(0, rad2_2, 0, -rad2_2)
print(f'plane_N_bankLeft180 is {plane}{plane.rot_matrix()}')
plane = Quaternion(0, 0, 0, 1)
print(f'plane_W_bankLeft180 is {plane}{plane.rot_matrix()}')
plane = Quaternion(0, rad2_2, 0, rad2_2)
print(f'plane_S_bankLeft180 is {plane}{plane.rot_matrix()}')
print('\nBanking/Rolling Right 90 degrees')
plane = Quaternion(rad2_2, -rad2_2, 0, 0)
print(f'plane_E_bankRight180 is {plane}{plane.rot_matrix()}')
plane = Quaternion(0.5, -0.5, 0.5, 0.5)
print(f'plane_N_bankRight180 is {plane}{plane.rot_matrix()}')
plane = Quaternion(0, 0, rad2_2, rad2_2)
print(f'plane_W_bankRight80 is {plane}{plane.rot_matrix()}')
plane = Quaternion(0.5, -0.5, -0.5, -0.5)
print(f'plane_S_bankRight80 is {plane}{plane.rot_matrix()}')
print('# -------- end BANK/ROLL ---------------------')
print("SEE THIS WEBSITE for DETAILED DIAGRAMS on the TESTS of the PLANE's rotations")
print('https://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToMatrix/examples/index.htm')
def main():
# Vector.run_tests()
Quaternion.run_tests()
# Matrix.run_tests()
if __name__ == '__main__':
main()
```
|
{
"source": "jennizalwango/LaniAutomation",
"score": 2
}
|
#### File: LaniAutomation/learnTesting/test_samplecode.py
```python
from selenium import webdriver
import os
from pathlib import Path
import time
folder_path = str(Path(__file__).parents[0])
def test_my_very_first_test():
# driver = webdriver.Chrome(os.path.join(folder_path, 'chromedriver'))
driver = webdriver.Chrome('/Users/johnseremba/Work/learnTesting/chromedriver_win32/chromedriver.exe')
driver.get("http://www.google.com")
time.sleep(3)
```
|
{
"source": "jennkimerson/brew-channel-validation",
"score": 3
}
|
#### File: jennkimerson/brew-channel-validation/connection.py
```python
import os
import psycopg2 as pp
import psycopg2.extras as ppx
def get_host():
"""
If PGHOST is unset, raise SystemExit
:returns: Returns a hostname (str) that runs postgres.
"""
host = os.getenv("PGHOST")
if host is None:
raise SystemExit("Please set the PGHOST environment")
return host
def get_brew_channels():
"""
:returns: Returns a list of rows. Each row is accessible by dictionary
keys.
"""
host = get_host()
conn = pp.connect(dbname="public", host=host, port=5433)
cur = conn.cursor(cursor_factory=ppx.DictCursor)
# Select rows from brew channel table. Contains channel IDs
# and channel names
postgreSQL_select_Query = "SELECT * FROM brew.channels;"
# Execute Query
cur.execute(postgreSQL_select_Query)
# Selecting rows from brew.task.id table using cursor.fetchall
return cur.fetchall()
if __name__ == "__main__":
brew_channels = get_brew_channels()
print(brew_channels)
```
#### File: brew-channel-validation/tests/test_connection.py
```python
import pytest
from connection import get_host
def test_get_host_without_env_var(monkeypatch):
# Ensure that we get a SystemExit when we have no PGHOST env var set
monkeypatch.delenv("PGHOST")
with pytest.raises(SystemExit) as e:
get_host()
assert str(e.value) == "Please set the PGHOST environment"
def test_get_host_with_env_var(monkeypatch):
# Ensure that we get the PGHOST value
monkeypatch.setenv("PGHOST", "db.example.com")
host = get_host()
assert host == "db.example.com"
```
|
{
"source": "jennkimerson/cephci",
"score": 2
}
|
#### File: tests/ceph_ansible/switch_rpm_to_container.py
```python
import logging
logger = logging.getLogger(__name__)
log = logger
def run(**kw):
log.info("Running exec test")
ceph_nodes = kw.get('ceph_nodes')
config = kw.get('config')
build = config.get('rhbuild')
installer_node = None
ansible_dir = '/usr/share/ceph-ansible'
playbook = 'switch-from-non-containerized-to-containerized-ceph-daemons.yml'
for cnode in ceph_nodes:
if cnode.role == 'installer':
installer_node = cnode
if not build.startswith('4'):
installer_node.exec_command(sudo=True,
cmd='cd {ansible_dir}; cp {ansible_dir}/infrastructure-playbooks/{playbook} .'
.format(ansible_dir=ansible_dir, playbook=playbook))
out, err = installer_node.exec_command(cmd='cd {ansible_dir};ansible-playbook -vvvv {playbook}'
' -e ireallymeanit=yes -i hosts'
.format(ansible_dir=ansible_dir, playbook=playbook),
long_running=True)
else:
out, err = installer_node.exec_command(cmd='cd {ansible_dir};ansible-playbook -vvvv'
' infrastructure-playbooks/{playbook} -e ireallymeanit=yes -i hosts'
.format(ansible_dir=ansible_dir, playbook=playbook),
long_running=True)
if err == 0:
log.info("ansible-playbook switch-from-non-containerized-to-containerized-ceph-daemons.yml successful")
return 0
log.info("ansible-playbook switch-from-non-containerized-to-containerized-ceph-daemons.yml failed")
return 1
```
#### File: tests/cephfs/CEPH-11255_11336_fuse.py
```python
import logging
import time
import timeit
import traceback
from ceph.ceph import CommandFailed
from ceph.parallel import parallel
from ceph.utils import check_ceph_healthly
from tests.cephfs.cephfs_utils import FsUtils
logger = logging.getLogger(__name__)
log = logger
# osd
def run(ceph_cluster, **kw):
try:
start = timeit.default_timer()
tc = '11255_11336-fuse client'
dir_name = 'dir'
log.info("Running cephfs %s test case" % (tc))
config = kw.get('config')
num_of_osds = config.get('num_of_osds')
fs_util = FsUtils(ceph_cluster)
build = config.get('build', config.get('rhbuild'))
client_info, rc = fs_util.get_clients(build)
if rc == 0:
log.info("Got client info")
else:
log.error("fetching client info failed")
return 1
client1, client2, client3, client4 = ([] for _ in range(4))
client1.append(client_info['fuse_clients'][0])
client2.append(client_info['fuse_clients'][1])
client3.append(client_info['kernel_clients'][0])
client4.append(client_info['kernel_clients'][1])
rc1 = fs_util.auth_list(client1)
rc2 = fs_util.auth_list(client2)
rc3 = fs_util.auth_list(client3)
rc4 = fs_util.auth_list(client4)
print(rc1, rc2, rc3, rc4)
if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
log.info("got auth keys")
else:
log.error("creating auth failed")
return 1
rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])
if rc1 == 0 and rc2 == 0:
log.info("Fuse mount passed")
else:
log.error("Fuse mount failed")
return 1
rc3 = fs_util.kernel_mount(
client3,
client_info['mounting_dir'],
client_info['mon_node_ip'])
rc4 = fs_util.kernel_mount(
client4,
client_info['mounting_dir'],
client_info['mon_node_ip'])
if rc3 == 0 and rc4 == 0:
log.info("kernel mount passed")
else:
log.error("kernel mount failed")
return 1
cluster_health_beforeIO = check_ceph_healthly(
client_info['mon_node'][0], num_of_osds, len(
client_info['mon_node']), build, None, 300)
rc = fs_util.activate_multiple_mdss(client_info['mds_nodes'])
if rc == 0:
log.info("Activate multiple mdss successfully")
else:
raise CommandFailed("Activate multiple mdss failed")
rc = fs_util.standby_rank(
client_info['mds_nodes'],
client_info['mon_node'],
todo='add_rank')
if rc == 0:
log.info("Added standby ranks")
else:
log.error("Failed to add standby ranks")
return 1
client1[0].exec_command(
cmd='sudo mkdir %s%s' %
(client_info['mounting_dir'], dir_name))
if client1[0].node.exit_status == 0:
log.info("Dir created")
else:
raise CommandFailed('Dir creation failed')
rc1 = fs_util.fstab_entry(
client1,
client_info['mounting_dir'],
action='doEntry')
rc2 = fs_util.fstab_entry(
client2,
client_info['mounting_dir'],
action='doEntry')
if rc1 == 0 and rc2 == 0:
log.info("FSentry for clients are done")
else:
raise CommandFailed("FsEntry failed")
rc1 = fs_util.fstab_entry(
client3,
client_info['mounting_dir'],
action='doEntry', mon_node_ip=client_info['mon_node_ip'])
rc2 = fs_util.fstab_entry(
client4,
client_info['mounting_dir'],
action='doEntry', mon_node_ip=client_info['mon_node_ip'])
if rc1 == 0 and rc2 == 0:
log.info("FSentry for clients are done")
else:
raise CommandFailed("FsEntry failed")
with parallel() as p:
p.spawn(fs_util.read_write_IO, client1,
client_info['mounting_dir'], 'g', 'write')
p.spawn(fs_util.read_write_IO, client3,
client_info['mounting_dir'], 'g', 'read')
p.spawn(
fs_util.stress_io,
client2,
client_info['mounting_dir'],
dir_name,
0,
50,
iotype='fio')
p.spawn(
fs_util.stress_io,
client3,
client_info['mounting_dir'],
dir_name,
0,
1,
iotype='smallfile_create', fnum=1000, fsize=100)
p.spawn(
fs_util.stress_io,
client4,
client_info['mounting_dir'],
dir_name,
0,
1,
iotype='crefi')
p.spawn(fs_util.reboot, client1[0])
res = []
with parallel() as p:
for node in client_info['mds_nodes']:
p.spawn(fs_util.heartbeat_map, node)
for op in p:
res.append(op)
print(res)
with parallel() as p:
p.spawn(fs_util.read_write_IO, client1,
client_info['mounting_dir'], 'g', 'write')
p.spawn(fs_util.read_write_IO, client4,
client_info['mounting_dir'], 'g', 'read')
p.spawn(fs_util.stress_io,
client1,
client_info['mounting_dir'],
dir_name,
0,
1,
iotype='fio')
p.spawn(
fs_util.stress_io,
client3,
client_info['mounting_dir'],
dir_name,
0,
10,
iotype='dd')
p.spawn(
fs_util.stress_io,
client4,
client_info['mounting_dir'],
dir_name,
0,
500,
iotype='touch'),
p.spawn(fs_util.reboot, client2[0])
cluster_health_afterIO = check_ceph_healthly(
client_info['mon_node'][0], num_of_osds, len(
client_info['mon_node']), build, None, 300)
if cluster_health_afterIO == cluster_health_beforeIO:
log.info('cluster is healthy')
else:
log.error("cluster is not healty")
return 1
with parallel() as p:
p.spawn(
fs_util.stress_io,
client1,
client_info['mounting_dir'],
dir_name,
0,
10,
iotype='fio')
p.spawn(
fs_util.stress_io,
client2,
client_info['mounting_dir'],
dir_name,
0,
10,
iotype='dd')
p.spawn(
fs_util.stress_io,
client3,
client_info['mounting_dir'],
dir_name,
0,
500,
iotype='touch')
for node in client_info['osd_nodes']:
p.spawn(fs_util.reboot, node)
with parallel() as p:
p.spawn(
fs_util.stress_io,
client1,
client_info['mounting_dir'],
dir_name,
0,
10,
iotype='fio'),
p.spawn(
fs_util.stress_io,
client2,
client_info['mounting_dir'],
dir_name,
0,
10,
iotype='dd')
p.spawn(
fs_util.stress_io,
client3,
client_info['mounting_dir'],
dir_name,
0,
500,
iotype='touch')
for node in client_info['osd_nodes']:
fs_util.network_disconnect(node)
with parallel() as p:
p.spawn(
fs_util.stress_io,
client1,
client_info['mounting_dir'],
dir_name,
0,
10,
iotype='fio')
p.spawn(
fs_util.stress_io,
client2,
client_info['mounting_dir'],
dir_name,
0,
10,
iotype='dd')
p.spawn(
fs_util.stress_io,
client3,
client_info['mounting_dir'],
dir_name,
0,
500,
iotype='touch')
for node in client_info['osd_nodes']:
fs_util.pid_kill(node, 'osd')
time.sleep(100)
cluster_health_afterIO = check_ceph_healthly(
client_info['mon_node'][0], num_of_osds, len(
client_info['mon_node']), build, None, 300)
if cluster_health_beforeIO == cluster_health_afterIO:
log.info("Cluster is healthy")
else:
return 1
log.info('Cleaning up!-----')
if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
rc = fs_util.client_clean_up(
client_info['fuse_clients'],
client_info['kernel_clients'],
client_info['mounting_dir'],
'umount')
rc = fs_util.standby_rank(
client_info['mds_nodes'],
client_info['mon_node'],
todo='add_rank_revert')
if rc == 0:
log.info("removed standby ranks")
rc1 = fs_util.fstab_entry(
client1,
client_info['mounting_dir'],
action='revertEntry')
rc2 = fs_util.fstab_entry(
client2,
client_info['mounting_dir'],
action='revertEntry')
if rc1 == 0 and rc2 == 0:
log.info("FSentry for clients are done")
else:
return 1
else:
rc = fs_util.client_clean_up(
client_info['fuse_clients'],
'',
client_info['mounting_dir'],
'umount')
rc = fs_util.standby_rank(
client_info['mds_nodes'],
client_info['mon_node'],
todo='add_rank_revert')
if rc == 0:
log.info("removed standby ranks")
rc1 = fs_util.fstab_entry(
client1,
client_info['mounting_dir'],
action='revertEntry')
rc2 = fs_util.fstab_entry(
client2,
client_info['mounting_dir'],
action='revertEntry')
if rc1 == 0 and rc2 == 0:
log.info("FSentry for clients are done")
if rc == 0:
log.info('Cleaning up successfull')
log.info("Execution of Test cases CEPH-%s ended:" % (tc))
print('Script execution time:------')
stop = timeit.default_timer()
total_time = stop - start
mins, secs = divmod(total_time, 60)
hours, mins = divmod(mins, 60)
print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
return 0
except CommandFailed as e:
log.info(e)
log.info(traceback.format_exc())
log.info('Cleaning up!-----')
if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
rc = fs_util.client_clean_up(
client_info['fuse_clients'],
client_info['kernel_clients'],
client_info['mounting_dir'],
'umount')
else:
rc = fs_util.client_clean_up(
client_info['fuse_clients'],
'',
client_info['mounting_dir'],
'umount')
if rc == 0:
log.info('Cleaning up successfull')
return 1
except Exception as e:
log.info(e)
log.info(traceback.format_exc())
return 1
```
#### File: tests/cephfs/cephfs_basic_tests.py
```python
import logging
import random
import string
import timeit
import traceback
from ceph.ceph import CommandFailed
from tests.cephfs.cephfs_utils import FsUtils
logger = logging.getLogger(__name__)
log = logger
def run(ceph_cluster, **kw):
try:
start = timeit.default_timer()
fs_util = FsUtils(ceph_cluster)
config = kw.get('config')
build = config.get('build', config.get('rhbuild'))
client_info, rc = fs_util.get_clients(build)
if rc == 0:
log.info("Got client info")
else:
raise CommandFailed("fetching client info failed")
client1 = []
client2 = []
client3 = []
client4 = []
client1.append(client_info['fuse_clients'][0])
client2.append(client_info['fuse_clients'][1])
client3.append(client_info['kernel_clients'][0])
client4.append(client_info['kernel_clients'][1])
rc1 = fs_util.auth_list(client1)
rc2 = fs_util.auth_list(client2)
rc3 = fs_util.auth_list(client3)
rc4 = fs_util.auth_list(client4)
print(rc1, rc2, rc3, rc4)
if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0:
log.info("got auth keys")
else:
raise CommandFailed("auth list failed")
rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir'])
rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir'])
if rc1 == 0 and rc2 == 0:
log.info("Fuse mount passed")
else:
raise CommandFailed("Fuse mount failed")
rc3 = fs_util.kernel_mount(
client3,
client_info['mounting_dir'],
client_info['mon_node_ip'])
rc4 = fs_util.kernel_mount(
client4,
client_info['mounting_dir'],
client_info['mon_node_ip'])
if rc3 == 0 and rc4 == 0:
log.info("kernel mount passed")
else:
raise CommandFailed("kernel mount failed")
tc1 = '11293'
tc2 = '11296'
tc3 = '11297'
tc4 = '11295'
dir1 = ''.join(
random.choice(
string.ascii_lowercase
+ string.digits) for _ in range(10))
dir2 = ''.join(
random.choice(
string.ascii_lowercase
+ string.digits) for _ in range(10))
dir3 = ''.join(
random.choice(
string.ascii_lowercase
+ string.digits) for _ in range(10))
results = []
return_counts = []
log.info("Create files and directories of 1000 depth and 1000 breadth")
for client in client_info['fuse_clients']:
client.exec_command(
cmd='sudo mkdir %s%s' %
(client_info['mounting_dir'], dir1))
client.exec_command(
cmd='sudo mkdir %s%s' %
(client_info['mounting_dir'], dir2))
client.exec_command(
cmd='sudo mkdir %s%s' %
(client_info['mounting_dir'], dir3))
log.info('Execution of testcase %s started' % tc1)
out, rc = client.exec_command(
cmd='sudo crefi %s%s --fop create --multi -b 1000 -d 1000 '
'-n 1 -T 5 --random --min=1K --max=10K' %
(client_info['mounting_dir'], dir1), long_running=True)
log.info('Execution of testcase %s ended' % tc1)
if client.node.exit_status == 0:
results.append("TC %s passed" % tc1)
log.info('Execution of testcase %s started' % tc2)
client.exec_command(
cmd='sudo cp -r %s%s/* %s%s/' %
(client_info['mounting_dir'], dir1,
client_info['mounting_dir'], dir2))
client.exec_command(
cmd="diff -qr %s%s %s%s/" %
(client_info['mounting_dir'], dir1,
client_info['mounting_dir'], dir2))
log.info('Execution of testcase %s ended' % tc2)
if client.node.exit_status == 0:
results.append("TC %s passed" % tc2)
log.info('Execution of testcase %s started' % tc3)
out, rc = client.exec_command(
cmd='sudo mv %s%s/* %s%s/' %
(client_info['mounting_dir'], dir1,
client_info['mounting_dir'], dir3))
log.info('Execution of testcase %s ended' % tc3)
if client.node.exit_status == 0:
results.append("TC %s passed" % tc3)
log.info('Execution of testcase %s started' % tc4)
for client in client_info['clients']:
if client.pkg_type != 'deb':
client.exec_command(
cmd='sudo dd if=/dev/zero of=%s%s.txt bs=100M '
'count=5' %
(client_info['mounting_dir'], client.node.hostname))
out1, rc1 = client.exec_command(
cmd='sudo ls -c -ltd -- %s%s.*' %
(client_info['mounting_dir'], client.node.hostname))
client.exec_command(
cmd='sudo dd if=/dev/zero of=%s%s.txt bs=200M '
'count=5' %
(client_info['mounting_dir'], client.node.hostname))
out2, rc2 = client.exec_command(
cmd='sudo ls -c -ltd -- %s%s.*' %
(client_info['mounting_dir'], client.node.hostname))
a = out1.read().decode()
print("------------")
b = out2.read().decode()
if a != b:
return_counts.append(out1.channel.recv_exit_status())
return_counts.append(out2.channel.recv_exit_status())
else:
raise CommandFailed("Metadata info command failed")
break
log.info('Execution of testcase %s ended' % tc4)
print(return_counts)
rc_set = set(return_counts)
if len(rc_set) == 1:
results.append("TC %s passed" % tc4)
print("Testcase Results:")
for res in results:
print(res)
break
log.info('Cleaning up!-----')
if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
rc = fs_util.client_clean_up(client_info['fuse_clients'],
client_info['kernel_clients'],
client_info['mounting_dir'], 'umount')
else:
rc = fs_util.client_clean_up(client_info['fuse_clients'],
'',
client_info['mounting_dir'], 'umount')
if rc == 0:
log.info('Cleaning up successfull')
else:
return 1
print('Script execution time:------')
stop = timeit.default_timer()
total_time = stop - start
mins, secs = divmod(total_time, 60)
hours, mins = divmod(mins, 60)
print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs))
return 0
except CommandFailed as e:
log.info(e)
log.info(traceback.format_exc())
log.info('Cleaning up!-----')
if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb':
rc = fs_util.client_clean_up(client_info['fuse_clients'],
client_info['kernel_clients'],
client_info['mounting_dir'], 'umount')
else:
rc = fs_util.client_clean_up(client_info['fuse_clients'],
'',
client_info['mounting_dir'], 'umount')
if rc == 0:
log.info('Cleaning up successfull')
return 1
except Exception as e:
log.info(e)
log.info(traceback.format_exc())
return 1
```
#### File: cephci/utility/lvm_utils.py
```python
def pvcreate(osd, devices):
osd.exec_command(cmd='sudo pvcreate %s' % devices)
def vgcreate(osd, vg_name, devices):
osd.exec_command(cmd='sudo vgcreate %s %s' % (vg_name, devices))
return vg_name
def lvcreate(osd, lv_name, vg_name, size):
osd.exec_command(cmd="sudo lvcreate -n %s -l %s %s " % (lv_name, size, vg_name))
return lv_name
def make_partition(osd, device, start=None, end=None, gpt=False):
osd.exec_command(cmd='sudo parted --script %s mklabel gpt' % device) if gpt \
else osd.exec_command(cmd='sudo parted --script %s mkpart primary %s %s' % (device, start, end))
def osd_scenario1(osd, devices_dict, dmcrypt=False):
"""
OSD scenario type1 generator
Args:
osd: osd node
devices_dict: dict of devices of the osd node supplied
dmcrypt: False by default
Returns:
generated scenario, dmcrypt
"""
pvcreate(osd, devices_dict.get('devices'))
vgname = vgcreate(osd, osd.LvmConfig.vg_name % '1', devices_dict.get('devices')) # all /dev/vd{b,c,d,e}
data_lv1 = lvcreate(osd,
osd.LvmConfig.data_lv %
'1',
osd.LvmConfig.vg_name %
'1',
osd.LvmConfig.size.format(20))
data_lv2 = lvcreate(osd,
osd.LvmConfig.data_lv %
'2',
osd.LvmConfig.vg_name %
'1',
osd.LvmConfig.size.format(20))
data_lv3 = lvcreate(osd,
osd.LvmConfig.data_lv %
'3',
osd.LvmConfig.vg_name %
'1',
osd.LvmConfig.size.format(20))
data_lv4 = lvcreate(osd,
osd.LvmConfig.data_lv %
'4',
osd.LvmConfig.vg_name %
'1',
osd.LvmConfig.size.format(20))
db_lv1 = lvcreate(
osd,
osd.LvmConfig.db_lv %
'1',
osd.LvmConfig.vg_name %
'1',
osd.LvmConfig.size.format(8))
db_lv2 = lvcreate(
osd,
osd.LvmConfig.db_lv %
'2',
osd.LvmConfig.vg_name %
'1',
osd.LvmConfig.size.format(8))
wal_lv1 = lvcreate(osd,
osd.LvmConfig.wal_lv %
'1',
osd.LvmConfig.vg_name %
'1',
osd.LvmConfig.size.format(2))
wal_lv2 = lvcreate(osd,
osd.LvmConfig.wal_lv %
'2',
osd.LvmConfig.vg_name %
'1',
osd.LvmConfig.size.format(2))
scenario = "{{'data':'{datalv1}','data_vg':'{vg_name}'}},{{'data':'{datalv2}','data_vg':'{vg_name}'," \
"'db':'{dblv1}','db_vg':'{vg_name}'}}," \
"{{'data':'{datalv3}','data_vg':'{vg_name}','wal':'{wallv1}','wal_vg':'{vg_name}'}}," \
"{{'data':'{datalv4}','data_vg':'{vg_name}','db':'{dblv2}','db_vg':'{vg_name}','wal':'{wallv2}'," \
"'wal_vg':'{vg_name}'}}".format(vg_name=vgname, datalv1=data_lv1, datalv2=data_lv2, dblv1=db_lv1,
datalv3=data_lv3, wallv1=wal_lv1, datalv4=data_lv4,
dblv2=db_lv2, wallv2=wal_lv2)
return {'scenario': scenario, 'dmcrypt': dmcrypt}
def osd_scenario1_dmcrypt(osd, devices_dict):
"""
OSD scenario type2 generator
Args:
osd: osd node
devices_dict: dict of devices of the osd node supplied
dmcrypt: False by default
Returns:
generated scenario, dmcrypt(overridden to True)
"""
generated_sce_dict = osd_scenario1(osd, devices_dict, dmcrypt=True)
return {'scenario': generated_sce_dict.get('scenario'), 'dmcrypt': generated_sce_dict.get('dmcrypt')}
def osd_scenario2(osd, devices_dict, dmcrypt=False):
"""
OSD scenario type3 generator
Args:
osd: osd node
devices_dict: dict of devices of the osd node supplied
dmcrypt: False by default
Returns:
generated scenario, dmcrypt
"""
make_partition(osd, devices_dict.get('device1'), gpt=True)
make_partition(osd, devices_dict.get('device1'), '1', '80%')
make_partition(osd, devices_dict.get('device1'), '80%', '85%')
make_partition(osd, devices_dict.get('device1'), '85%', '90%')
make_partition(osd, devices_dict.get('device1'), '90%', '95%')
make_partition(osd, devices_dict.get('device1'), '95%', '100%')
scenario = "{{'data':'{vdb1}','db':'{vdb2}','wal':'{vdb3}'}},{{'data':'{vdc}','db':'{vdb4}','wal':'{vdb5}'}}," \
"{{'data':'{vdd}'}}".format(vdb1=devices_dict.get('device1') + '1',
vdb2=devices_dict.get('device1') + '2',
vdb3=devices_dict.get('device1') + '3', vdc=devices_dict.get('device2'),
vdb4=devices_dict.get('device1') + '4',
vdb5=devices_dict.get('device1') + '5', vdd=devices_dict.get('device3'))
return {'scenario': scenario, 'dmcrypt': dmcrypt}
def osd_scenario2_dmcrypt(osd, devices_dict):
"""
OSD scenario type4 generator
Args:
osd: osd node
devices_dict: dict of devices of the osd node supplied
dmcrypt: False by default
Returns:
generated scenario, dmcrypt(overridden to True)
"""
generated_sce_dict = osd_scenario2(osd, devices_dict, dmcrypt=True)
return {'scenario': generated_sce_dict.get('scenario'), 'dmcrypt': generated_sce_dict.get('dmcrypt')}
def osd_scenario3(osd, devices_dict, dmcrypt=False):
"""
OSD scenario type5 generator
Args:
osd: osd node
devices_dict: dict of devices of the osd node supplied
dmcrypt: False by default
Returns:
generated scenario, dmcrypt
"""
pvcreate(osd, devices_dict.get('devices'))
devs = "{a} {b}".format(a=devices_dict.get('device0'), b=devices_dict.get('device2')) # vdb vdd
vgname = vgcreate(osd, osd.LvmConfig.vg_name % '1', devs)
make_partition(osd, devices_dict.get('device3'), gpt=True) # vde
make_partition(osd, devices_dict.get('device3'), '1', '80%')
make_partition(osd, devices_dict.get('device3'), '80%', '90%')
make_partition(osd, devices_dict.get('device3'), '90%', '100%')
data_lv1 = lvcreate(osd,
osd.LvmConfig.data_lv %
'1',
osd.LvmConfig.vg_name %
'1',
osd.LvmConfig.size.format(80))
db_lv1 = lvcreate(
osd,
osd.LvmConfig.db_lv %
'1',
osd.LvmConfig.vg_name %
'1',
osd.LvmConfig.size.format(10))
wal_lv1 = lvcreate(osd,
osd.LvmConfig.wal_lv %
'1',
osd.LvmConfig.vg_name %
'1',
osd.LvmConfig.size.format(10))
# To-Do remove disk name references like /vdb /vdd to avoid confusion when more disks are added
scenario = "{{'data':'{vdb}','db':'{dblv1}','db_vg':'{vgname}','wal':'{wallv1}','wal_vg':'{vgname}'}}," \
"{{'data':'{datalv1}','data_vg':'{vgname}','db':'{vdd2}','wal':'{vdd3}'}},{{'data':'{vdd1}'}}" \
.format(vdb=devices_dict.get('device1'), dblv1=db_lv1, vgname=vgname, wallv1=wal_lv1, datalv1=data_lv1,
vdd1=devices_dict.get('device3') + '1', vdd2=devices_dict.get('device3') + '2',
vdd3=devices_dict.get('device3') + '3')
return {'scenario': scenario, 'dmcrypt': dmcrypt}
def osd_scenario3_dmcrypt(osd, devices_dict):
"""
OSD scenario type6 generator
Args:
osd: osd node
devices_dict: dict of devices of the osd node supplied
dmcrypt: False by default
Returns:
generated scenario, dmcrypt(overridden to True)
"""
generated_sce_dict = osd_scenario3(osd, devices_dict, dmcrypt=True)
return {'scenario': generated_sce_dict.get('scenario'), 'dmcrypt': generated_sce_dict.get('dmcrypt')}
def osd_scenario4(osd, devices_dict, dmcrypt=False, batch=True):
"""
OSD scenario type7 generator
Args:
osd: osd node
devices_dict: dict of devices of the osd node supplied
dmcrypt: False by default
batch: True by default
Returns:
generated scenario, dmcrypt,batch
"""
devices = devices_dict.get('devices')
devices = devices.split()
scenario = (', '.join("'" + item + "'" for item in devices))
return {'scenario': scenario, 'dmcrypt': dmcrypt, 'batch': batch}
def osd_scenario4_dmcyrpt(osd, devices_dict):
"""
OSD scenario type8 generator
Args:
osd: osd node
devices_dict: dict of devices of the osd node supplied
dmcrypt: False by default
Returns:
generated scenario, dmcrypt(overridden to True),batch
"""
generated_sce_dict = osd_scenario4(osd, devices_dict, dmcrypt=True)
return {'scenario': generated_sce_dict.get('scenario'),
'dmcrypt': generated_sce_dict.get('dmcrypt'),
'batch': generated_sce_dict.get('batch')}
osd_scenario_list = [
osd_scenario1,
osd_scenario1_dmcrypt,
osd_scenario2,
osd_scenario2_dmcrypt,
osd_scenario3_dmcrypt,
osd_scenario3_dmcrypt,
osd_scenario4,
osd_scenario4_dmcyrpt]
# add the scenario "osd_scenario3" back to list when https://bugzilla.redhat.com/show_bug.cgi?id=1822134 is fixed,
# dint see this race condition in dmcrypt scenario "osd_scenario3_dmcrypt" will remove that too if we hit the issue
```
|
{
"source": "jennmald/HepC-BloodBasedDetection",
"score": 3
}
|
#### File: jennmald/HepC-BloodBasedDetection/classifier.py
```python
import time
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
'''
To Do:
tables and plots
implementation of appropriate classifier
'''
'''
Missing Value Treatment
'''
def clean_data(data):
num_rows = len(data)
rows_to_drop = []
for i in range(0, num_rows):
current_row = data.iloc[i]
for value in current_row:
if value == 'NA':
rows_to_drop.append(i)
# make list unique
rows_to_drop = list(set(rows_to_drop))
clean_dataset = data.drop(rows_to_drop)
return clean_dataset
'''
Summary Statistics:
used to understand the data well and get an overall
picture of what we expect the data to look like
'''
def mean(data):
'''
Modified mean method from Naive Bayes Classifer
'''
mean_dict = {}
for (col_name, col_data) in data.iteritems():
if all ([col_name != 'Category', col_name != 'Unnamed: 0', col_name != 'Sex']):
col_data = col_data.to_numpy()
col_data = col_data.astype(np.float64)
col_mean = np.mean(col_data)
mean_dict[col_name] = col_mean
return mean_dict
def standard_dev(data):
std_dict = {}
for (col_name, col_data) in data.iteritems():
if all ([col_name != 'Category', col_name != 'Unnamed: 0', col_name != 'Sex']):
col_data = col_data.to_numpy()
col_data = col_data.astype(np.float64)
std_dev = np.std(col_data)
std_dict[col_name] = std_dev
return std_dict
def normalize(data):
'''
Using Minimum and Maximum of column data
'''
norm_dataframe = data.copy()
for (col_name, col_data) in data.iteritems():
if all ([col_name != 'Category', col_name != 'Unnamed: 0', col_name != 'Sex']):
current_col = pd.to_numeric(data[col_name])
maximum = current_col.max()
minimum = current_col.min()
norm_dataframe[col_name] = (current_col- minimum)/(maximum-minimum)
return norm_dataframe
def covariance(col_x,col_y):
col_x = col_x.to_numpy()
col_y = col_y.to_numpy()
n = len(col_x)
multiplier = 1/(n-1)
x_bar = np.mean(col_x)
y_bar = np.mean(col_y)
total_sum = 0
for i in range(0,n):
total_sum += (col_x[i]- x_bar)*(col_y[i]-y_bar)
return (multiplier*total_sum)
def correlation(col_x,col_y):
cov = covariance(col_x,col_y)
std_dev_x = np.std(col_x)
std_dev_y = np.std(col_y)
return (cov/(std_dev_x*std_dev_y))
'''
print(data_frame.head())
print(data_frame.iloc[0])
print()
print(df_train.iloc[0])
print()
print(df_train.head())
print(df_test.iloc[0])
print()
print(df_test.head())
'''
data_frame = pd.read_csv('hcvdat0.csv', keep_default_na=False)
# remove rows with missing values
data_frame = clean_data(data_frame)
# converts all males to category 1 and females to category 0
data_frame['Sex'] = data_frame['Sex'].astype('category').cat.codes
'''
Mean and Standard Deviation are very far for each column
Approach: Normalize all data in the dataset columns
that are not category, name, and sex
'''
means = mean(data_frame)
stddevs = standard_dev(data_frame)
print('Unnormed means and standard deviation')
print(means)
print(stddevs)
norm_dataframe = normalize(data_frame)
norm_means = mean(norm_dataframe)
norm_std = standard_dev(norm_dataframe)
print('Normalized means and standard deviation')
print(norm_means)
print(norm_std)
df_train = norm_dataframe.sample(frac=1/3)
df_test = norm_dataframe[~(norm_dataframe.index.isin(df_train.index))]
'''
Now all data is inbetween [0,1]
Compute correlation between all numeric columns
'''
column_names = ['Age', 'ALB', 'ALP', 'ALT', 'AST', 'BIL', 'CHE', 'CHOL', 'CREA', 'GGT', 'PROT']
for i in range(0, len(column_names)):
col_i = df_train[column_names[i]]
for j in range(0, i):
col_j = df_train[column_names[j]]
current_cor = correlation(col_i,col_j)
if current_cor< -0.5 or current_cor >0.5:
print(''+str(column_names[i])+', '+ str(column_names[i])+ ': ' +str(current_cor))
'''
All correlation coefficients are around zero which means we are safe to use
KNN and Naive Bayes. No set of columns is correlated with each other.
Make a few scatter plots
for i in range(0, len(column_names)):
col_i = df_train[column_names[i]]
for j in range(0, i):
col_j = df_train[column_names[j]]
plt.scatter(col_i,col_j)
plt.yticks(np.arange(col_j.min(), col_j.max()+0.05, 0.05))
plt.title('X:' + str(column_names[i]) + ' Y:' + str(column_names[j]))
plt.show()
'''
'''
KNN Implementation
'''
# want to ignore col 0, 1, and 3
def euclid_dist(v1, v2):
'''
Computes the Euclidean Distance of two vectors v1 and v2
Ignores the name and type column in distance calculation
Params:
v1 - list of attribute values
v2 - list of attribute values
Returns the euclidean distance of the two vectors
'''
dist = 0
for vi in range(2, len(v1)):
dist += (v1[vi] - v2[vi])**2
return math.sqrt(dist)
def find_neighbors(current_vector, possible_neighbors, max_neighbors):
'''
Finds all possible neighbors by storing the distances in a dictionary,
sorting the dictionary and finding the first k neighbors with the closest distances
Params:
current_vector - the row we are checking
possible_neighbors - the set of possible rows to check against
max_neighbors - the maximum number of neighbors per cluster
'''
all_distances = {}
for row in possible_neighbors:
all_distances[euclid_dist(current_vector, row)] = row
sorted_dists = dict(sorted(all_distances.items(), key=lambda item: item[0]))
keys = list(sorted_dists.values())
return keys[:max_neighbors]
def predict(current_vector, possible_neighbors, max_neighbors):
'''
Calls find_neighbors and then slices the class labels out
Finds the maximum of the class counts and returns the label for that class
Params:
current_vector - the row we are checking
possible_neighbors - the set of possible rows to check against
max_neighbors - the maximum number of neighbors per cluster
'''
current_neighbors = find_neighbors(current_vector, possible_neighbors, max_neighbors)
classes = [row[1:2] for row in current_neighbors]
return max(classes, key=classes.count)
def accuracy(actual, predicted):
'''
Computes the accuracy of the given model
Params:
actual - list of actual values
predicted - list of predicted values
Returns the percent of matching values
'''
total = 0
for i in range(len(predicted)):
if predicted[i] == actual[i]:
total+=1
return (float(total)/float(len(predicted))) * 100.0
knn_df_train = df_train[:].values
knn_df_test = df_test[:].values
# loop through the number of neighbors and the rows
k_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for k in k_list:
actual_labels = []
predicted_labels = []
start_time = time.time()
for row in knn_df_train:
actual_labels.append(row[1])
output = predict(row, knn_df_test, k)
predicted_labels.append(output)
print('The accuracy for k='+ str(k)+ ' is: ' + str(accuracy(actual_labels,predicted_labels)))
print("Ran for %s seconds" % (time.time() - start_time))
'''
Naive Bayes Implementation
'''
def statistics(training_data):
'''
Computes the length, mean, and standard deviation of each class in the training data
Params:
training_data - the data we wish to analyze
Returns a dictionary with the class name and it's information in a list
'''
class_stats = {}
df_class_type = training_data.groupby('Category')
# iris dataset only has 3 classes
classes = ['0=Blood Donor','0s=suspect Blood Donor','1=Hepatitis','2=Fibrosis','3=Cirrhosis']
for c in classes:
# sort dataframe by class type
current_class_data = df_class_type.get_group(c)
# drop type column for statistics
current_class_data = current_class_data.drop(['Category','Unnamed: 0','Sex'],1)
# store the length of the class data for later use
col_stats = [len(current_class_data)]
# loop through each attribute and find the mean and standard deviation
for (col_name, col_data) in current_class_data.iteritems():
col_data = col_data.to_numpy()
col_data = col_data.astype(float)
mean = np.mean(col_data)
std_dev = np.std(col_data)
col_stats.append([mean,std_dev])
class_stats[str(c)] = col_stats
return class_stats
def pdf(x, mean, std_dev):
'''
Computes the relative likelihood that another value
would be close to the given value
This helps in finding what class a given sample would fall into
We are using normal distribution for this classifier
Params:
x - the value we want to check the relative likelihood of
mean - mean of the class we want to check against
std_dev - standard deviation of the class we want to check against
Returns the likelihood value
'''
y = math.exp(-((x-mean)**2 / (2*std_dev**2)))
return (1/ (math.sqrt(2*math.pi)*std_dev )) * y
def compute_probability(current_row, class_statistics):
'''
Computes the class probabilities for the current row
Returns a dictionary of the probabilities for each class labeled with the class label
Params:
current_row - the row to check 'closeness' of
class_statistics - the dictionary holding the number of instances per class and the
mean and standard deviation of each column of the class training dataset
'''
class_probabilties = {}
# gets the class label and then values holds the list of [length, [mean, std], [mean,std]...]
for class_label, values in class_statistics.items():
# this computes the P(class_label) and stores it as the initial value in the dictionary
class_probabilties[class_label] = float(values[0])/total_rows
# now find the pdf of each value in the current row using the mean and standard deviation of each column
for i in range(2, len(values)):
# 1 skips the length stored in the first position
if i != 3:
mean, std_dev = values[i]
class_probabilties[class_label] = class_probabilties[class_label] * pdf(float(current_row[i]), mean, std_dev)
return class_probabilties
def naive_prediction(current_row, class_statistics):
'''
Finds the probabilites per class and find the maximum one, this becomes the class label
Params:
current_row - the row to check 'closeness' of
class_statistics - the dictionary holding the number of instances per class and the
mean and standard deviation of each column of the class training dataset
Returns the predicted class label
'''
probabilities = compute_probability(current_row, class_statistics)
class_label = max(probabilities, key=probabilities.get)
return class_label
nb_train = data_frame.sample(frac=1/3)
nb_test = data_frame[~(data_frame.index.isin(nb_train.index))]
'''
total_rows = len(nb_train)
class_stats = statistics(nb_train)
nb_test = nb_test[:].values
# empty lists for accuracy checking
naive_predictions = []
naive_actual = []
# loop through each data instance
for row in nb_test:
c = naive_prediction(row, class_stats)
naive_predictions.append(c)
naive_actual.append(row[1])
# output results
print('Naive Bayes classifier has accuracy: ' + str(accuracy(naive_actual, naive_predictions)))
'''
total_rows = len(df_train)
class_stats = statistics(df_train)
df_test = df_test[:].values
# empty lists for accuracy checking
naive_predictions = []
naive_actual = []
# loop through each data instance
for row in df_test:
c = naive_prediction(row, class_stats)
naive_predictions.append(c)
naive_actual.append(row[1])
# output results
print('Naive Bayes classifier has accuracy: ' + str(accuracy(naive_actual, naive_predictions)))
```
|
{
"source": "jennmald/tiny_python_projects",
"score": 3
}
|
#### File: tiny_python_projects/12_ransom/ransom.py
```python
import argparse
import os
import random
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Ransom',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('text',
metavar='text',
help='Input text or file')
parser.add_argument('-s',
'--seed',
help='Random Seed',
metavar='int',
type=int,
default=None)
args = parser.parse_args()
if os.path.isfile(args.text):
args.text = open(args.text).read().rstrip()
return args
def choose(char):
if random.choice([0,1]):
return char.upper()
else:
return char.lower()
def test_choose():
state = random.getstate()
random.seed(1)
assert choose('a') == 'a'
assert choose('b') == 'b'
assert choose('c') == 'c'
assert choose('d') == 'd'
random.setstate(state)
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
random.seed(args.seed)
text = args.text
ransom_text = ''
for char in args.text:
ransom_text+=choose(char)
print(ransom_text)
# --------------------------------------------------
if __name__ == '__main__':
main()
```
|
{
"source": "jennndol/scaling-octo-broccoli",
"score": 3
}
|
#### File: jennndol/scaling-octo-broccoli/recognizer.py
```python
import cv2
import sqlite3
import os
import asyncio
import websockets
import base64
async def ___main___():
async with websockets.connect(
'ws://localhost:8000') as websocket:
connection = sqlite3.connect('database.db')
query = connection.cursor()
dataset = "data/model.yml"
if not os.path.isfile(dataset):
print("run python trainer.py first")
exit(0)
frontal = cv2.CascadeClassifier('module/haarcascade_frontalface_default.xml')
profile = cv2.CascadeClassifier('module/haarcascade_profileface.xml')
cap = cv2.VideoCapture(1)
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read(dataset)
while True:
ret, img = cap.read()
img = cv2.resize(img, (0,0), fx=0.5, fy=0.5)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = frontal.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 3)
id, confidence = recognizer.predict(gray[y:y+h,x:x+w])
query.execute("SELECT name FROM users WHERE id = (?);", (id,))
result = query.fetchall()
print(u'A person named ' + str(result[0][0]) + ' detected with confidence ' + str(round(confidence,2)) )
name = ''
try:
name = str(result[0][0]) + ' ' + str(round(confidence, 2))
except IndexError:
name = 'Person'
if confidence > 100:
name = 'Person'
cv2.putText(img, name, (x+2, y+h-5), cv2.FONT_HERSHEY_DUPLEX, 0.4, (0, 0, 255), 1)
else:
cv2.putText(img, name, (x+2, y+h-5), cv2.FONT_HERSHEY_DUPLEX, 0.4, (150, 255, 0), 1)
encoded, buffer = cv2.imencode('.jpg', img)
jpg_as_text = base64.b64encode(buffer)
await websocket.send(jpg_as_text)
cv2.imshow('Face Recognizer', img)
cv2.waitKey(30) & 0xff
cap.release()
cv2.destroyAllWindows()
asyncio.get_event_loop().run_until_complete(___main___())
asyncio.get_event_loop().run_forever()
```
|
{
"source": "jennnu/r-celebs",
"score": 3
}
|
#### File: jennnu/r-celebs/application.py
```python
import os.path
import tornado.ioloop
import tornado.web
import re, praw, requests, os, glob, sys
from bs4 import BeautifulSoup
def addimagetolist(submissions, image_list):
# Process all the submissions from the front page
for submission in submissions:
# Check for all the cases where we will skip a submission:
if "imgur.com/" not in submission.url:
continue # skip non-imgur submissions
if 'http://imgur.com/a/' in submission.url:
# This is an album submission.
albumId = submission.url[len('http://imgur.com/a/'):]
htmlSource = requests.get(submission.url).text
soup = BeautifulSoup(htmlSource)
matches = soup.select('.album-view-image-link a')
for match in matches:
imageUrl = match['href']
if imageUrl.endswith('gifv'):
imageUrl=imageUrl[:-4]
imageUrl=imageUrl+'webm'
image_list.append(['http:' + imageUrl, submission.title, submission])
elif 'http://i.imgur.com/' in submission.url:
if submission.url.endswith('gifv'):
submission.url=submission.url[:-4]
submission.url=submission.url+'webm'
image_list.append([submission.url, submission.title, submission])
elif submission.url.endswith(tuple(['gif','png','jpg','jpeg'])):
image_list.append([submission.url, submission.title, submission])
else:
submission.url=submission.url+'jpg'
image_list.append([submission.url, submission.title, submission])
elif 'http://imgur.com/' in submission.url:
# This is an Imgur page with a single image.
htmlSource = requests.get(submission.url).text # download the image's page
soup = BeautifulSoup(htmlSource)
imageUrl =''
for a in soup.select('.image a'):
imageUrl = a['href']
if imageUrl.startswith('//'):
# if no schema is supplied in the url, prepend 'http:' to it
imageUrl = 'http:' + imageUrl
if imageUrl.endswith('gifv'):
imageUrl=imageUrl[:-4]
imageUrl=imageUrl+'webm'
image_list.append([imageUrl, submission.title, submission])
if imageUrl.endswith(tuple(['gif','png','jpg','jpeg'])):
image_list.append([imageUrl, submission.title, submission])
else:
imageUrl=imageUrl+'jpg'
image_list.append([imageUrl, submission.title, submission])
else:
print submission.url
class MainHandler(tornado.web.RequestHandler):
def get(self):
image_list = []
imgurUrlPattern = re.compile(r'(http://i.imgur.com/(.*))(\?.*)?')
r = praw.Reddit(user_agent='test') # Note: Be sure to change the user-agent to something unique.
submissions = r.get_subreddit('funny').get_hot(limit=50)
# Or use one of these functions:
# .get_top_from_year(limit=25)
# .get_top_from_month(limit=25)
# .get_top_from_week(limit=25)
# .get_top_from_day(limit=25)
# .get_top_from_hour(limit=25)
# .get_top_from_all(limit=25)
addimagetolist(submissions, image_list)
self.render("index.html",
image_list=image_list)
class TopHandler(tornado.web.RequestHandler):
def get(self, param1):
image_list = []
imgurUrlPattern = re.compile(r'(http://i.imgur.com/(.*))(\?.*)?')
r = praw.Reddit(user_agent='test') # Note: Be sure to change the user-agent to something unique.
if param1 >= '1' and param1 <= '500':
number = int(param1)
submissions = r.get_subreddit('funny').get_top_from_day(limit=number)
addimagetolist(submissions, image_list)
self.render("index.html",
image_list=image_list)
else:
submissions = r.get_subreddit('funny').get_top_from_day(limit=50)
addimagetolist(submissions, image_list)
self.render("index.html",
image_list=image_list)
class NewHandler(tornado.web.RequestHandler):
def get(self, param1):
image_list = []
imgurUrlPattern = re.compile(r'(http://i.imgur.com/(.*))(\?.*)?')
r = praw.Reddit(user_agent='test') # Note: Be sure to change the user-agent to something unique.
if param1 >= '1' and param1 <= '500':
number = int(param1)
submissions = r.get_subreddit('funny').get_new(limit=number)
addimagetolist(submissions, image_list)
self.render("index.html",
image_list=image_list)
else:
submissions = r.get_subreddit('funny').get_new(limit=50)
addimagetolist(submissions, image_list)
self.render("index.html",
image_list=image_list)
handlers = [
(r"/", MainHandler),
(r"/top/(?P<param1>[^\/]+)", TopHandler),
(r"/new/(?P<param1>[^\/]+)", NewHandler)
]
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
)
application = tornado.web.Application(handlers, **settings)
if __name__ == "__main__":
port = os.environ['PORT'] if os.environ['PORT'] else 8888
application.listen(port)
tornado.ioloop.IOLoop.instance().start()
```
|
{
"source": "jennomics/single-cell",
"score": 3
}
|
#### File: python/bin/mt_prepend_gtf.py
```python
import argparse
import gtfez
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'gtf',
type=argparse.FileType('r'),
help='location of GTF file',
)
args = parser.parse_args()
return args
def main(args):
for line in args.gtf:
if line.startswith('#'):
print(line.rstrip())
continue
record = gtfez.GTFRecord(line)
for seqname in ('MT', 'PT'):
prefix = seqname + '-'
if record.seqname.upper() == seqname.upper() and record.attributes is not None:
if ('gene_name' in record.attributes
and not record.attributes['gene_name'].upper().startswith(prefix.upper())):
this_prefix = record.seqname + '-'
record.attributes['gene_name'] = (
this_prefix + record.attributes['gene_name']
)
print(record)
# command line interface (making this a modulino)
if __name__ == '__main__':
main(parse_args())
```
#### File: python/gtfez/__init__.py
```python
from collections import OrderedDict
import re
class GTFRecord:
"""
A single line of a GTF file.
Attributes:
seqname: name of sequence (str)
source: as in GTF specification (str)
feature: as in GTF specification (str)
start: as in GTF specification (int)
end: as in GTF specification (int)
score: as in GTF specification (str)
strand: as in GTF specification (str)
frame: as in GTF specification (str)
attributes: dict mapping attribute names to their values
in the attributes field of the GTF record
"""
def __init__(self, record_string):
"""
Makes a new GTFRecord from a single line of a GTF file.
Args:
record_string (str): a single line of a GTF file
"""
fields = record_string.split('\t')
if len(fields) < 8:
raise Exception
self.seqname, self.source, self.feature = fields[0:3]
self.start, self.end = map(int, fields[3:5])
self.score, self.strand, self.frame = fields[5:8]
if len(fields) >= 9:
self.attributes = parse_attributes(fields[8])
def __str__(self):
attributes_string = attributes_dict_to_string(self.attributes)
return '\t'.join([
self.seqname,
self.source,
self.feature,
str(self.start),
str(self.end),
self.score,
self.strand,
self.frame,
attributes_string,
])
attributes_re = re.compile('^(\w+) "(.+)"(?:;|$)') # allow optional uncaptured trailing semicolon
def parse_attributes(attributes_string):
"""
Parses the contents of a GTF attributes field into a dict.
Args:
attributes_string: The attributes string of a GTF record;
i.e., the 9th field
Returns:
an OrderedDict mapping attribute keywords to their values
>>> parse_attributes('gene_name "ESR1"; gene_biotype "protein_coding";')
OrderedDict([('gene_name', 'ESR1'), ('gene_biotype', 'protein_coding')])
"""
# break attributes into individual 'key "val"' strings (assuming the GTF 2.2 standard. See the "attributes" section here: https://mblab.wustl.edu/GTF22.html)
attribute_strings = attributes_string.split('; ')
# read all of the pairs into a dictionary and return
attributes_dict = OrderedDict()
for s in attribute_strings:
match = attributes_re.match(s)
attributes_dict[match.group(1)] = match.group(2)
return attributes_dict
def attributes_dict_to_string(attributes_dict):
"""
Converts an attributes dict back into GTF string format.
Args:
attributes_dict: a dict mapping attribute keywords to their
values
Returns:
a string of the given attributes in GTF format
>>> attributes_dict_to_string({
... 'gene_name': 'ESR1',
... 'gene_biotype': 'protein_coding',
... })
'gene_name "ESR1"; gene_biotype "protein_coding";'
"""
output_strings = []
for key, value in attributes_dict.items():
output_strings.append('{} "{}";'.format(key, value))
return ' '.join(output_strings)
if __name__ == "__main__":
import doctest
doctest.testmod()
```
|
{
"source": "jennranta/pyreaclib",
"score": 4
}
|
#### File: pyreaclib/rates/rate.py
```python
import os
import re
import numpy as np
import matplotlib.pyplot as plt
from periodictable import elements
class Tfactors(object):
""" precompute temperature factors for speed """
def __init__(self, T):
""" return the Tfactors object. Here, T is temperature in Kelvin """
self.T9 = T/1.e9
self.T9i = 1.0/self.T9
self.T913i = self.T9i**(1./3.)
self.T913 = self.T9**(1./3.)
self.T953 = self.T9**(5./3.)
self.lnT9 = np.log(self.T9)
class SingleSet(object):
""" a set in Reaclib is one piece of a rate, in the form
lambda = exp[ a_0 + sum_{i=1}^5 a_i T_9**(2i-5)/3 + a_6 log T_9]
A single rate in Reaclib can be composed of multiple sets
"""
def __init__(self, a, label=None):
"""here a is iterable (e.g., list or numpy array), storing the
coefficients, a0, ..., a6
"""
self.a = a
self.label = label
def f(self):
"""
return a function for this set -- note: Tf here is a Tfactors
object
"""
return lambda tf: np.exp(self.a[0] +
self.a[1]*tf.T9i +
self.a[2]*tf.T913i +
self.a[3]*tf.T913 +
self.a[4]*tf.T9 +
self.a[5]*tf.T953 +
self.a[6]*tf.lnT9)
def set_string(self, prefix="set", plus_equal=False):
"""
return a string containing the python code for this set
"""
if plus_equal:
string = "{} += np.exp( ".format(prefix)
else:
string = "{} = np.exp( ".format(prefix)
string += " {}".format(self.a[0])
if not self.a[1] == 0.0: string += " + {}*tf.T9i".format(self.a[1])
if not self.a[2] == 0.0: string += " + {}*tf.T913i".format(self.a[2])
if not self.a[3] == 0.0: string += " + {}*tf.T913".format(self.a[3])
if not (self.a[4] == 0.0 and self.a[5] == 0.0 and self.a[6] == 0.0):
string += "\n{} ".format(len(prefix)*" ")
if not self.a[4] == 0.0: string += " + {}*tf.T9".format(self.a[4])
if not self.a[5] == 0.0: string += " + {}*tf.T953".format(self.a[5])
if not self.a[6] == 0.0: string += " + {}*tf.lnT9".format(self.a[6])
string += ")"
return string
class Nucleus(object):
"""
a nucleus that participates in a reaction -- we store it in a
class to hold its properties, define a sorting, and give it a
pretty printing string
"""
def __init__(self, name):
self.raw = name
# element symbol and atomic weight
if name == "p":
self.el = "H"
self.A = 1
self.short_spec_name = "h1"
elif name == "d":
self.el = "H"
self.A = 2
self.short_spec_name = "h2"
elif name == "t":
self.el = "H"
self.A = 3
self.short_spec_name = "h3"
elif name == "n":
self.el = "n"
self.A = 1
self.short_spec_name = "n"
else:
e = re.match("([a-zA-Z]*)(\d*)", name)
self.el = e.group(1).title() # chemical symbol
self.A = int(e.group(2))
self.short_spec_name = name
# atomic number comes from periodtable
i = elements.isotope("{}-{}".format(self.A, self.el))
self.Z = i.number
self.N = self.A - self.Z
# long name
if i.name == 'neutron':
self.spec_name = i.name
else:
self.spec_name = '{}-{}'.format(i.name, self.A)
# latex formatted style
self.pretty = r"{{}}^{{{}}}\mathrm{{{}}}".format(self.A, self.el)
def __repr__(self):
return self.raw
def __hash__(self):
return hash(self.__repr__())
def __eq__(self, other):
return self.raw == other.raw
def __lt__(self, other):
if not self.Z == other.Z:
return self.Z < other.Z
else:
return self.A < other.A
class Rate(object):
""" a single Reaclib rate, which can be composed of multiple sets """
def __init__(self, file):
self.file = os.path.basename(file)
self.chapter = None # the Reaclib chapter for this reaction
self.original_source = None # the contents of the original rate file
self.reactants = []
self.products = []
self.sets = []
# Tells if this rate is eligible for screening
# using screenz.f90 provided by BoxLib Microphysics.
# If not eligible for screening, set to None
# If eligible for screening, then
# Rate.ion_screen is a 2-element list of Nucleus objects for screening
self.ion_screen = None
idx = self.file.rfind("-")
self.fname = self.file[:idx].replace("--","-").replace("-","_")
self.Q = 0.0
# read in the file, parse the different sets and store them as
# SingleSet objects in sets[]
f = open(file, "r")
lines = f.readlines()
self.original_source = "".join(lines)
# first line is the chapter
self.chapter = lines[0].strip()
# catch table prescription
if self.chapter != "t":
self.chapter = int(self.chapter)
# remove any black lines
set_lines = [l for l in lines[1:] if not l.strip() == ""]
if self.chapter == "t":
# e1 -> e2, Tabulated
s1 = set_lines.pop(0)
s2 = set_lines.pop(0)
s3 = set_lines.pop(0)
s4 = set_lines.pop(0)
s5 = set_lines.pop(0)
f = s1.split()
self.reactants.append(Nucleus(f[0]))
self.products.append(Nucleus(f[1]))
self.table_file = s2.strip()
self.table_header_lines = int(s3.strip())
self.table_rhoy_lines = int(s4.strip())
self.table_temp_lines = int(s5.strip())
self.table_num_vars = 6 # Hard-coded number of variables in tables for now.
self.table_index_name = 'j_{}_{}'.format(self.reactants[0], self.products[0])
else:
# the rest is the sets
first = 1
while len(set_lines) > 0:
# sets are 3 lines long
s1 = set_lines.pop(0)
s2 = set_lines.pop(0)
s3 = set_lines.pop(0)
# first line of a set has up to 6 nuclei, then the label,
# and finally the Q value
f = s1.split()
Q = f.pop()
label = f.pop()
if first:
self.Q = Q
# what's left are the nuclei -- their interpretation
# depends on the chapter
if self.chapter == 1:
# e1 -> e2
self.reactants.append(Nucleus(f[0]))
self.products.append(Nucleus(f[1]))
elif self.chapter == 2:
# e1 -> e2 + e3
self.reactants.append(Nucleus(f[0]))
self.products += [Nucleus(f[1]), Nucleus(f[2])]
elif self.chapter == 3:
# e1 -> e2 + e3 + e4
self.reactants.append(Nucleus(f[0]))
self.products += [Nucleus(f[1]), Nucleus(f[2]), Nucleus(f[3])]
elif self.chapter == 4:
# e1 + e2 -> e3
self.reactants += [Nucleus(f[0]), Nucleus(f[1])]
self.products.append(Nucleus(f[2]))
elif self.chapter == 5:
# e1 + e2 -> e3 + e4
self.reactants += [Nucleus(f[0]), Nucleus(f[1])]
self.products += [Nucleus(f[2]), Nucleus(f[3])]
elif self.chapter == 6:
# e1 + e2 -> e3 + e4 + e5
self.reactants += [Nucleus(f[0]), Nucleus(f[1])]
self.products += [Nucleus(f[2]), Nucleus(f[3]), Nucleus(f[4])]
elif self.chapter == 7:
# e1 + e2 -> e3 + e4 + e5 + e6
self.reactants += [Nucleus(f[0]), Nucleus(f[1])]
self.products += [Nucleus(f[2]), Nucleus(f[3]),
Nucleus(f[4]), Nucleus(f[5])]
elif self.chapter == 8:
# e1 + e2 + e3 -> e4
self.reactants += [Nucleus(f[0]), Nucleus(f[1]), Nucleus(f[2])]
self.products.append(Nucleus(f[3]))
elif self.chapter == 9:
# e1 + e2 + e3 -> e4 + e5
self.reactants += [Nucleus(f[0]), Nucleus(f[1]), Nucleus(f[2])]
self.products += [Nucleus(f[3]), Nucleus(f[4])]
elif self.chapter == 10:
# e1 + e2 + e3 + e4 -> e5 + e6
self.reactants += [Nucleus(f[0]), Nucleus(f[1]),
Nucleus(f[2]), Nucleus(f[3])]
self.products += [Nucleus(f[4]), Nucleus(f[5])]
elif self.chapter == 11:
# e1 -> e2 + e3 + e4 + e5
self.reactants.append(Nucleus(f[0]))
self.products += [Nucleus(f[1]), Nucleus(f[2]),
Nucleus(f[3]), Nucleus(f[4])]
first = 0
# the second line contains the first 4 coefficients
# the third lines contains the final 3
# we can't just use split() here, since the fields run into one another
n = 13 # length of the field
a = [s2[i:i+n] for i in range(0, len(s2), n)]
a += [s3[i:i+n] for i in range(0, len(s3), n)]
a = [float(e) for e in a if not e.strip() == ""]
self.sets.append(SingleSet(a, label=label))
# compute self.prefactor and self.dens_exp from the reactants
self.prefactor = 1.0 # this is 1/2 for rates like a + a (double counting)
self.inv_prefactor = 1
for r in set(self.reactants):
self.inv_prefactor = self.inv_prefactor * np.math.factorial(self.reactants.count(r))
self.prefactor = self.prefactor/float(self.inv_prefactor)
self.dens_exp = len(self.reactants)-1
# determine if this rate is eligible for screening
nucz = []
for parent in self.reactants:
if parent.Z != 0:
nucz.append(parent)
if len(nucz) > 1:
nucz.sort(key=lambda x: x.Z)
self.ion_screen = []
self.ion_screen.append(nucz[0])
self.ion_screen.append(nucz[1])
self.string = ""
self.pretty_string = r"$"
# put p, n, and alpha second
treactants = []
for n in self.reactants:
if n.raw not in ["p", "he4", "n"]:
treactants.insert(0, n)
else:
treactants.append(n)
for n, r in enumerate(treactants):
self.string += "{}".format(r)
self.pretty_string += r"{}".format(r.pretty)
if not n == len(self.reactants)-1:
self.string += " + "
self.pretty_string += r" + "
self.string += " --> "
self.pretty_string += r" \rightarrow "
for n, p in enumerate(self.products):
self.string += "{}".format(p)
self.pretty_string += r"{}".format(p.pretty)
if not n == len(self.products)-1:
self.string += " + "
self.pretty_string += r" + "
self.pretty_string += r"$"
def __repr__(self):
return self.string
def eval(self, T):
""" evauate the reaction rate for temperature T """
tf = Tfactors(T)
r = 0.0
for s in self.sets:
f = s.f()
r += f(tf)
return r
def get_rate_exponent(self, T0):
"""
for a rate written as a power law, r = r_0 (T/T0)**nu, return
nu corresponding to T0
"""
# nu = dln r /dln T, so we need dr/dT
r1 = self.eval(T0)
dT = 1.e-8*T0
r2 = self.eval(T0 + dT)
drdT = (r2 - r1)/dT
return (T0/r1)*drdT
def plot(self, Tmin=1.e7, Tmax=1.e10):
T = np.logspace(np.log10(Tmin), np.log10(Tmax), 100)
r = np.zeros_like(T)
for n in range(len(T)):
r[n] = self.eval(T[n])
plt.loglog(T, r)
plt.xlabel(r"$T$")
if self.dens_exp == 0:
plt.ylabel(r"\tau")
elif self.dens_exp == 1:
plt.ylabel(r"$N_A <\sigma v>$")
elif self.dens_exp == 2:
plt.ylabel(r"$N_A^2 <n_a n_b n_c v>$")
plt.title(r"{}".format(self.pretty_string))
plt.show()
```
#### File: templates/sundials-cvode/plot_weak_table.py
```python
import numpy as np
import argparse
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('--emission_infile',type=str, help='The emission input file to plot.')
parser.add_argument('--capture_infile',type=str, help='The capture input file to plot.')
args = parser.parse_args()
try: emt_file = open(args.emission_infile, 'r')
except: raise
try: cap_file = open(args.capture_infile, 'r')
except: raise
def read_rate_file(infile):
## Process header
d = []
# Get the number of temperature and rho*ye points
n_temp = int(infile.readline().strip())
n_rhoy = int(infile.readline().strip())
# Eat header
infile.readline()
# Get data
for i in xrange(n_temp):
dt = {}
dt['log_lrhoy'] = []
dt['log_ltemp'] = 0.0
dt['rate'] = []
for j in xrange(n_rhoy):
l = infile.readline().strip().split()
dt['log_lrhoy'].append(float(l[0]))
dt['log_ltemp'] = float(l[1])
dt['rate'].append(float(l[2]))
dt['log_lrhoy'] = np.array(dt['log_lrhoy'])
dt['rate'] = np.array(dt['rate'])
d.append(dt)
return d
d_emt = read_rate_file(emt_file)
d_cap = read_rate_file(cap_file)
emt_file.close()
cap_file.close()
## Plot emission and capture rates
fig = plt.figure()
ax=fig.add_subplot(111)
for dt in d_emt:
ax.plot(dt['log_lrhoy'], dt['rate'], '-.')
for dt in d_cap:
ax.plot(dt['log_lrhoy'], dt['rate'], '-')
plt.xlabel('$\\mathrm{Log_{10}~\\rho Y_e~[g~cm^{-3}]}$')
plt.ylabel('$\\mathrm{Log_{10}~\\lambda~[s^{-1}]}$')
ax.set_ylim([-25,0])
plt.tight_layout()
plt.title('A=23')
plt.savefig('output_table_rates.pdf')
```
|
{
"source": "jennur/invenio-oauthclient",
"score": 2
}
|
#### File: invenio_oauthclient/contrib/globus.py
```python
from flask import current_app, redirect, url_for
from flask_login import current_user
from invenio_db import db
from invenio_oauthclient.contrib.settings import OAuthSettingsHelper
from invenio_oauthclient.errors import OAuthResponseError
from invenio_oauthclient.handlers.rest import response_handler
from invenio_oauthclient.handlers.utils import \
require_more_than_one_external_account
from invenio_oauthclient.models import RemoteAccount
from invenio_oauthclient.utils import oauth_link_external_id, \
oauth_unlink_external_id
class GlobusOAuthSettingsHelper(OAuthSettingsHelper):
"""Default configuration for Globus OAuth provider."""
external_method = "globus"
def __init__(self, title=None, description=None, base_url=None,
app_key=None, precedence_mask=None):
"""Constructor."""
super().__init__(
title or "Globus",
description or "Research data management simplified.",
base_url or "https://auth.globus.org/v2/",
app_key or "GLOBUS_APP_CREDENTIALS",
request_token_params={"scope": "openid email profile"},
precedence_mask=precedence_mask,
)
def get_handlers(self):
"""Return Globus auth handlers."""
return dict(
authorized_handler='invenio_oauthclient.handlers'
':authorized_signup_handler',
disconnect_handler='invenio_oauthclient.contrib.globus'
':disconnect_handler',
signup_handler=dict(
info='invenio_oauthclient.contrib.globus:account_info',
setup='invenio_oauthclient.contrib.globus:account_setup',
view='invenio_oauthclient.handlers:signup_handler',
)
)
def get_rest_handlers(self):
"""Return Globus auth REST handlers."""
return dict(
authorized_handler='invenio_oauthclient.handlers.rest'
':authorized_signup_handler',
disconnect_handler='invenio_oauthclient.contrib.globus'
':disconnect_rest_handler',
signup_handler=dict(
info='invenio_oauthclient.contrib.globus:account_info',
setup='invenio_oauthclient.contrib.globus:account_setup',
view='invenio_oauthclient.handlers.rest:signup_handler',
),
response_handler='invenio_oauthclient.handlers.rest'
':default_remote_response_handler',
authorized_redirect_url='/',
disconnect_redirect_url='/',
signup_redirect_url='/',
error_redirect_url='/'
)
@property
def user_info_url(self):
"""Return the URL to fetch user info."""
return f"{self.base_url}oauth2/userinfo"
@property
def user_identity_url(self):
"""Return the URL to fetch user identity."""
return f"{self.base_url}api/identities"
_globus_app = GlobusOAuthSettingsHelper()
BASE_APP = _globus_app.base_app
GLOBUS_USER_INFO_URL = _globus_app.user_info_url
GLOBUS_USER_ID_URL = _globus_app.user_identity_url
GLOBUS_EXTERNAL_METHOD = _globus_app.external_method
"""Kept only for backward compat, they should not be used."""
REMOTE_APP = _globus_app.remote_app
"""Globus remote application configuration."""
REMOTE_REST_APP = _globus_app.remote_rest_app
"""Globus remote rest application configuration."""
def get_dict_from_response(response):
"""Check for errors in the response and return the resulting JSON."""
if getattr(response, '_resp') and response._resp.code > 400:
raise OAuthResponseError(
'Application mis-configuration in Globus', None, response
)
return response.data
def get_user_info(remote):
"""Get user information from Globus.
See the docs here for v2/oauth/userinfo:
https://docs.globus.org/api/auth/reference/
"""
response = remote.get(_globus_app.user_info_url)
user_info = get_dict_from_response(response)
response.data['username'] = response.data['preferred_username']
if '@' in response.data['username']:
user_info['username'], _ = response.data['username'].split('@')
return user_info
def get_user_id(remote, email):
"""Get the Globus identity for a users given email.
A Globus ID is a UUID that can uniquely identify a Globus user. See the
docs here for v2/api/identities
https://docs.globus.org/api/auth/reference/
"""
try:
url = '{}?usernames={}'.format(_globus_app.user_identity_url, email)
user_id = get_dict_from_response(remote.get(url))
return user_id['identities'][0]['id']
except KeyError:
# If we got here the response was successful but the data was invalid.
# It's likely the URL is wrong but possible the API has changed.
raise OAuthResponseError('Failed to fetch user id, likely server '
'mis-configuration', None, remote)
def account_info(remote, resp):
"""Retrieve remote account information used to find local user.
It returns a dictionary with the following structure:
.. code-block:: python
{
'user': {
'email': '...',
'profile': {
'username': '...',
'full_name': '...',
}
},
'external_id': 'globus-unique-identifier',
'external_method': 'globus',
}
Information inside the user dictionary are available for other modules.
For example, they are used from the module invenio-userprofiles to fill
the user profile.
:param remote: The remote application.
:param resp: The response.
:returns: A dictionary with the user information.
"""
info = get_user_info(remote)
return {
'user': {
'email': info['email'],
'profile': {
'username': info['username'],
'full_name': info['name']
},
},
'external_id': get_user_id(remote, info['preferred_username']),
'external_method': _globus_app.external_method
}
def account_setup(remote, token, resp):
"""Perform additional setup after user have been logged in.
:param remote: The remote application.
:param token: The token value.
:param resp: The response.
"""
info = get_user_info(remote)
user_id = get_user_id(remote, info['preferred_username'])
with db.session.begin_nested():
token.remote_account.extra_data = {
'login': info['username'],
'id': user_id}
# Create user <-> external id link.
oauth_link_external_id(
token.remote_account.user, dict(
id=user_id,
method=_globus_app.external_method)
)
@require_more_than_one_external_account
def _disconnect(remote, *args, **kwargs):
"""Handle unlinking of remote account.
:param remote: The remote application.
:returns: The HTML response.
"""
if not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
remote_account = RemoteAccount.get(user_id=current_user.get_id(),
client_id=remote.consumer_key)
external_ids = [i.id for i in current_user.external_identifiers
if i.method == _globus_app.external_method]
if external_ids:
oauth_unlink_external_id(dict(id=external_ids[0],
method=_globus_app.external_method))
if remote_account:
with db.session.begin_nested():
remote_account.delete()
def disconnect_handler(remote, *args, **kwargs):
"""Handle unlinking of remote account.
:param remote: The remote application.
:returns: The HTML response.
"""
_disconnect(remote, *args, **kwargs)
return redirect(url_for('invenio_oauthclient_settings.index'))
def disconnect_rest_handler(remote, *args, **kwargs):
"""Handle unlinking of remote account.
:param remote: The remote application.
:returns: The HTML response.
"""
_disconnect(remote, *args, **kwargs)
redirect_url = current_app.config['OAUTHCLIENT_REST_REMOTE_APPS'][
remote.name]['disconnect_redirect_url']
return response_handler(remote, redirect_url)
```
#### File: contrib/keycloak/settings.py
```python
from invenio_oauthclient.contrib.settings import OAuthSettingsHelper
class KeycloakSettingsHelper(OAuthSettingsHelper):
"""Helper for creating REMOTE_APP configuration dictionaries for Keycloak.
This class can be used to easily create a base configuration with sensible
defaults for a default-ish Keycloak setup.
It requires knowledge about the base URL of the Keycloak instance and the
realm on which the Invenio client application is configured.
Because the default endpoint URLs follow a simple schema, this information
can be used to create a simple base configuration.
The methods ``remote_app()`` and ``remote_rest_app()`` create and return
a dictionary in the form expected by Invenio-OAuthClient.
The latter can be used for providing SSO capabilities to SPAs communicating
with Invenio via the REST API.
Further, the helper provides some properties for commonly used default
endpoint URLs.
"""
def __init__(self, title, description, base_url, realm, app_key=None,
icon=None, **kwargs):
"""The constructor takes two arguments.
:param base_url: The base URL on which Keycloak is running
(e.g. "http://localhost:8080")
:param realm: Realm in which the invenio client application is defined
"""
app_key = app_key or "KEYCLOAK_APP_CREDENTIALS"
base_url = "{}/".format(base_url.rstrip("/")) # add leading `/`
self._realm_url = "{}auth/realms/{}".format(base_url, realm)
access_token_url = self.make_url(self._realm_url, "token")
authorize_url = self.make_url(self._realm_url, "auth")
self._user_info_url = self.make_url(self._realm_url, "userinfo")
super().__init__(title, description, base_url, app_key, icon=icon,
request_token_params={"scope": "openid"},
access_token_url=access_token_url,
authorize_url=authorize_url,
**kwargs)
@property
def user_info_url(self):
"""URL for the user info endpoint."""
return self._user_info_url
@property
def realm_url(self):
"""URL for the realm's endpoint."""
return self._realm_url
@staticmethod
def make_url(realm_url, endpoint):
"""Create an endpoint URL following the default Keycloak URL schema.
:param realm_url: The realm base URL
:param endpoint: The endpoint to use (e.g. "auth", "token", ...)
"""
return "{}/protocol/openid-connect/{}".format(realm_url, endpoint)
def get_handlers(self):
"""Return a dict with the auth handlers."""
return dict(
authorized_handler="invenio_oauthclient.handlers"
":authorized_signup_handler",
disconnect_handler="invenio_oauthclient.contrib.keycloak.handlers"
":disconnect_handler",
signup_handler=dict(
info="invenio_oauthclient.contrib.keycloak.handlers"
":info_handler",
setup="invenio_oauthclient.contrib.keycloak.handlers"
":setup_handler",
view="invenio_oauthclient.handlers:signup_handler"
),
)
def get_rest_handlers(self):
"""Return a dict with the auth REST handlers."""
return dict(
authorized_handler="invenio_oauthclient.handlers.rest"
":authorized_signup_handler",
disconnect_handler="invenio_oauthclient.contrib.keycloak.handlers"
":disconnect_rest_handler",
signup_handler=dict(
info="invenio_oauthclient.contrib.keycloak.handlers"
":info_handler",
setup="invenio_oauthclient.contrib.keycloak.handlers"
":setup_handler",
view="invenio_oauthclient.handlers.rest:signup_handler"
),
response_handler=(
"invenio_oauthclient.handlers.rest"
":default_remote_response_handler"
),
authorized_redirect_url="/",
disconnect_redirect_url="/",
signup_redirect_url="/",
error_redirect_url="/"
)
```
#### File: invenio_oauthclient/handlers/base.py
```python
from flask import current_app, session
from flask_login import current_user
from invenio_db import db
from pkg_resources import require
from ..errors import OAuthClientAlreadyAuthorized, \
OAuthClientMustRedirectLogin, OAuthClientMustRedirectSignup, \
OAuthClientTokenNotFound, OAuthClientTokenNotSet, \
OAuthClientUnAuthorized, OAuthClientUserNotRegistered
from ..models import RemoteAccount
from ..proxies import current_oauthclient
from ..signals import account_info_received, account_setup_committed, \
account_setup_received
from ..utils import create_csrf_disabled_registrationform, fill_form, \
oauth_authenticate, oauth_get_user, oauth_register
from .utils import get_session_next_url, \
require_more_than_one_external_account, response_token_setter, \
token_getter, token_session_key, token_setter
#
# Handlers
#
def base_authorized_signup_handler(resp, remote, *args, **kwargs):
"""Handle sign-in/up functionality.
:param remote: The remote application.
:param resp: The response.
:returns: Redirect response.
"""
# Remove any previously stored auto register session key
session.pop(token_session_key(remote.name) + '_autoregister', None)
# Store token in session
# ----------------------
# Set token in session - token object only returned if
# current_user.is_autenticated().
token = response_token_setter(remote, resp)
handlers = current_oauthclient.signup_handlers[remote.name]
# Sign-in/up user
# ---------------
if not current_user.is_authenticated:
account_info = handlers['info'](resp)
account_info_received.send(
remote, token=token, response=resp, account_info=account_info
)
user = oauth_get_user(
remote.consumer_key,
account_info=account_info,
access_token=token_getter(remote)[0],
)
if user is None:
# Auto sign-up if user not found
form = create_csrf_disabled_registrationform(remote)
form = fill_form(
form,
account_info['user']
)
remote_apps = current_app.config['OAUTHCLIENT_REMOTE_APPS']
precedence_mask = remote_apps[remote.name].get("precedence_mask")
user = oauth_register(form, account_info['user'], precedence_mask)
# if registration fails ...
if user is None:
# requires extra information
session[
token_session_key(remote.name) + '_autoregister'] = True
session[token_session_key(remote.name) +
'_account_info'] = account_info
session[token_session_key(remote.name) +
'_response'] = resp
db.session.commit()
raise OAuthClientMustRedirectSignup()
# Authenticate user
if not oauth_authenticate(remote.consumer_key, user,
require_existing_link=False):
raise OAuthClientUnAuthorized()
# Link account
# ------------
# Need to store token in database instead of only the session when
# called first time.
token = response_token_setter(remote, resp)
# Setup account
# -------------
if not token.remote_account.extra_data:
account_setup = handlers['setup'](token, resp)
account_setup_received.send(
remote, token=token, response=resp, account_setup=account_setup
)
db.session.commit()
account_setup_committed.send(remote, token=token)
else:
db.session.commit()
# Redirect to next
next_url = get_session_next_url(remote.name)
if next_url:
return next_url
@require_more_than_one_external_account
def base_disconnect_handler(remote, *args, **kwargs):
"""Handle unlinking of remote account.
This default handler will just delete the remote account link. You may
wish to extend this module to perform clean-up in the remote service
before removing the link (e.g. removing install webhooks).
:param remote: The remote application.
:returns: Redirect response.
"""
if not current_user.is_authenticated:
raise OAuthClientUnAuthorized()
with db.session.begin_nested():
account = RemoteAccount.get(
user_id=current_user.get_id(),
client_id=remote.consumer_key
)
if account:
account.delete()
db.session.commit()
def base_signup_handler(remote, form, *args, **kwargs):
"""Handle extra signup information.
:param remote: The remote application.
:returns: Redirect response or the template rendered.
"""
# User already authenticated so move on
if current_user.is_authenticated:
raise OAuthClientAlreadyAuthorized()
# Retrieve token from session
oauth_token = token_getter(remote)
if not oauth_token:
raise OAuthClientTokenNotFound()
session_prefix = token_session_key(remote.name)
# Test to see if this is coming from on authorized request
if not session.get(session_prefix + '_autoregister', False):
raise OAuthClientMustRedirectLogin()
handlers = current_oauthclient.signup_handlers[remote.name]
if form.validate_on_submit():
account_info = session.get(session_prefix + '_account_info')
response = session.get(session_prefix + '_response')
remote_apps = current_app.config['OAUTHCLIENT_REMOTE_APPS']
precedence_mask = remote_apps[remote.name].get("precedence_mask")
# Register user
user = oauth_register(form, account_info['user'], precedence_mask)
if user is None:
raise OAuthClientUserNotRegistered()
# Remove session key
session.pop(session_prefix + '_autoregister', None)
# Link account and set session data
token = token_setter(remote, oauth_token[0], secret=oauth_token[1],
user=user)
handlers = current_oauthclient.signup_handlers[remote.name]
if token is None:
raise OAuthClientTokenNotSet()
if not token.remote_account.extra_data:
account_setup = handlers['setup'](token, response)
account_setup_received.send(
remote, token=token, response=response,
account_setup=account_setup
)
# Registration has been finished
db.session.commit()
account_setup_committed.send(remote, token=token)
else:
# Registration has been finished
db.session.commit()
# Authenticate the user
if not oauth_authenticate(remote.consumer_key, user,
require_existing_link=False):
raise OAuthClientUnAuthorized()
# Remove account info from session
session.pop(session_prefix + '_account_info', None)
session.pop(session_prefix + '_response', None)
# Redirect to next
next_url = get_session_next_url(remote.name)
if next_url:
return next_url
# Pre-fill form
account_info = session.get(session_prefix + '_account_info')
if not form.is_submitted():
form = fill_form(form, account_info['user'])
```
#### File: invenio-oauthclient/tests/test_base_handlers.py
```python
import pytest
from invenio_oauthclient.errors import OAuthResponseError
from invenio_oauthclient.handlers import response_token_setter, token_getter
from invenio_oauthclient.models import RemoteToken
from invenio_oauthclient.utils import oauth_authenticate
def test_token_setter(app, remote):
"""Test token setter on response from OAuth server."""
# OAuth1
resp_oauth1 = {
'name': '<NAME>',
'expires_in': 3599,
'oauth_token': 'test_access_token',
'oauth_token_secret': 'test_refresh_token',
'scope': '/authenticate',
'token_type': 'bearer',
}
assert not response_token_setter(remote, resp_oauth1)
# Bad request
resp_bad = {
'invalid': 'invalid',
}
with pytest.raises(OAuthResponseError):
response_token_setter(remote, resp_bad)
def test_token_getter(remote, models_fixture, app):
"""Test token getter on response from OAuth server."""
datastore = app.extensions['invenio-accounts'].datastore
existing_email = '<EMAIL>'
user = datastore.find_user(email=existing_email)
# Missing RemoteToken
oauth_authenticate('dev', user)
assert not token_getter(remote)
# Populated RemoteToken
RemoteToken.create(user.id, 'testkey', 'mytoken', 'mysecret')
oauth_authenticate('dev', user)
assert token_getter(remote) == ('mytoken', 'mysecret')
```
#### File: invenio-oauthclient/tests/test_views_rest.py
```python
import time
import pytest
from flask import url_for
from flask_oauthlib.client import OAuth as FlaskOAuth
from helpers import check_response_redirect_url, \
check_response_redirect_url_args
from invenio_accounts.testutils import login_user_via_session
from invenio_db import db
from itsdangerous import TimedJSONWebSignatureSerializer
from mock import MagicMock
from simplejson import JSONDecodeError
from six.moves.urllib_parse import parse_qs, urlparse
from invenio_oauthclient import InvenioOAuthClientREST
from invenio_oauthclient._compat import _create_identifier
from invenio_oauthclient.handlers import token_getter
from invenio_oauthclient.models import RemoteToken
from invenio_oauthclient.views.client import rest_blueprint, serializer
def mock_response(oauth, remote_app='test', data=None):
"""Mock the oauth response to use the remote."""
# Mock oauth remote application
oauth.remote_apps[remote_app].handle_oauth2_response = MagicMock(
return_value=data or {
'access_token': 'test_access_token',
'scope': "",
'token_type': 'bearer'
}
)
def test_redirect_uri(app_rest):
"""Test redirect uri."""
with app_rest.test_client() as client:
# Test redirect
resp = client.get(
url_for(
'invenio_oauthclient.rest_login',
remote_app='test',
next='http://inveniosoftware.org'))
assert resp.status_code == 302
# Verify parameters
params = parse_qs(urlparse(resp.location).query)
assert params['response_type'] == ['code']
assert params['client_id'] == ['testid']
assert params['redirect_uri']
assert params['state']
# Verify next parameter in state token does not allow blanco redirects
state = serializer.loads(params['state'][0])
assert state['next'] is None
# Assert redirect uri does not have any parameters.
params = parse_qs(urlparse(params['redirect_uri'][0]).query)
assert params == {}
# Assert that local redirects are allowed
test_urls = [
'/',
'/search'
]
for url in test_urls:
resp = client.get(
url_for(
'invenio_oauthclient.rest_login',
remote_app='test', next=url))
check_response_redirect_url(resp, url)
# Assert that absolute redirects are allowed only if
# `APP_ALLOWED_HOSTS` is set and includes them. Otherwise, the relative
# path of the url is extracted and returned. Note if you need to
# redirect to index page you should pass '/' as next parameter.
test_url = 'http://inveniosoftware.org/test'
resp = client.get(
url_for(
'invenio_oauthclient.rest_login',
remote_app='test', next=test_url))
check_response_redirect_url(resp, urlparse(test_url).path)
app_rest.config.update({"APP_ALLOWED_HOSTS": ["inveniosoftware.org"]})
resp = client.get(
url_for(
'invenio_oauthclient.rest_login',
remote_app='test', next=test_url))
check_response_redirect_url(resp, test_url)
def test_login(app_rest):
"""Test login."""
with app_rest.test_client() as client:
# Test redirect
resp = client.get(
url_for(
'invenio_oauthclient.rest_login',
remote_app='test', next='/'))
assert resp.status_code == 302
params = parse_qs(urlparse(resp.location).query)
assert params['response_type'] == ['code']
assert params['client_id'] == ['testid']
assert params['redirect_uri']
assert params['state']
# Invalid remote
resp = client.get(
url_for('invenio_oauthclient.rest_login', remote_app='invalid'),
follow_redirects=True
)
assert resp.status_code == 404
def test_authorized(base_app, params):
"""Test login."""
handled = {}
def test_authorized_handler(resp, remote, *args, **kwargs):
"""Save configuration."""
handled['resp'] = resp
handled['remote'] = remote
handled['args'] = args
handled['kwargs'] = kwargs
return 'TEST'
def test_invalid_authorized_handler(resp, remote, *args, **kwargs):
"""Set wrong configuration."""
handled['resp'] = 1
handled['remote'] = 1
handled['args'] = 1
handled['kwargs'] = 1
base_app.config['OAUTHCLIENT_REST_REMOTE_APPS'].update(
dict(
test=dict(
authorized_handler=test_authorized_handler,
params=params('testid'),
title='MyLinkedTestAccount',
),
test_invalid=dict(
authorized_handler=test_invalid_authorized_handler,
params=params('test_invalidid'),
title='Test Invalid',
),
full=dict(
params=params('fullid'),
title='Full',
),
))
FlaskOAuth(base_app)
InvenioOAuthClientREST(base_app)
base_app.register_blueprint(rest_blueprint)
with base_app.test_client() as client:
# Ensure remote apps have been loaded (due to before first
# request)
client.get(url_for(
'invenio_oauthclient.rest_login', remote_app='test'))
mock_response(base_app.extensions['oauthlib.client'], 'test')
mock_response(base_app.extensions['oauthlib.client'], 'test_invalid')
from invenio_oauthclient.views.client import serializer
state = serializer.dumps({
'app': 'test',
'sid': _create_identifier(),
'next': None,
})
resp = client.get(
url_for(
'invenio_oauthclient.rest_authorized',
remote_app='test',
code='test',
state=state
)
)
assert resp.data == b'TEST'
assert handled['remote'].name == 'test'
assert not handled['args']
assert not handled['kwargs']
assert handled['resp']['access_token'] == 'test_access_token'
state = serializer.dumps({
'app': 'test_invalid',
'sid': _create_identifier(),
'next': None,
})
# handler should return something
# Flask>1.0 is throwing TypeError and Flask<1.0 ValueError
with pytest.raises((ValueError, TypeError)):
client.get(url_for(
'invenio_oauthclient.rest_authorized',
remote_app='test_invalid',
code='test',
state=state,
))
def test_invalid_authorized_response(app_rest):
"""Test login."""
oauth = app_rest.extensions['oauthlib.client']
with app_rest.test_client() as client:
# Fake an authorized request
# Ensure remote apps have been loaded (due to before first
# request)
client.get(url_for(
'invenio_oauthclient.rest_login', remote_app='test'))
oauth.remote_apps['test'].handle_oauth2_response = MagicMock(
side_effect=JSONDecodeError('Expecting value', '', 0)
)
state = serializer.dumps({
'app': 'test',
'sid': _create_identifier(),
'next': None,
})
with pytest.raises(JSONDecodeError):
client.get(url_for(
'invenio_oauthclient.rest_authorized',
remote_app='test',
code='test',
state=state
))
def test_state_token(app_rest, monkeypatch):
"""Test state token."""
# Mock session id
monkeypatch.setattr('invenio_oauthclient._compat._create_identifier',
lambda: '1234')
monkeypatch.setattr(
'invenio_oauthclient.views.client._create_identifier', lambda: '1234')
with app_rest.test_client() as client:
# Ensure remote apps have been loaded (due to before first
# request)
client.get(url_for(
'invenio_oauthclient.rest_login', remote_app='test'))
mock_response(app_rest.extensions['oauthlib.client'], 'test')
# Good state token
state = serializer.dumps(
{'app': 'test', 'sid': '1234', 'next': None, }
)
resp = client.get(
url_for('invenio_oauthclient.rest_authorized', remote_app='test',
code='test', state=state)
)
assert resp.status_code == 200
outdated_serializer = TimedJSONWebSignatureSerializer(
app_rest.config['SECRET_KEY'],
expires_in=0,
)
# Bad state - timeout
state1 = outdated_serializer.dumps(
{'app': 'test', 'sid': '1234', 'next': None, }
)
# Bad state - app
state2 = serializer.dumps(
# State for another existing app (test_invalid exists)
{'app': 'test_invalid', 'sid': '1234', 'next': None, }
)
# Bad state - sid
state3 = serializer.dumps(
# State for another existing app (test_invalid exists)
{'app': 'test', 'sid': 'bad', 'next': None, }
)
time.sleep(1)
for s in [state1, state2, state3]:
resp = client.get(
url_for(
'invenio_oauthclient.rest_authorized', remote_app='test',
code='test', state=s)
)
assert resp.status_code == 302
assert parse_qs(urlparse(resp.location).query)['code'][0] == '403'
def test_no_remote_app(app_rest):
"""Test no remote app."""
with app_rest.test_client() as client:
resp = client.get(
url_for(
'invenio_oauthclient.rest_authorized', remote_app='invalid')
)
assert resp.status_code == 404
resp = client.get(
url_for(
'invenio_oauthclient.rest_disconnect', remote_app='invalid')
)
assert resp.status_code == 404
resp = client.get(
url_for('invenio_oauthclient.rest_signup', remote_app='invalid')
)
assert resp.status_code == 404
def test_token_getter_setter(app_rest, monkeypatch):
"""Test token getter setter."""
# Mock session id
monkeypatch.setattr('invenio_oauthclient._compat._create_identifier',
lambda: '1234')
monkeypatch.setattr(
'invenio_oauthclient.views.client._create_identifier', lambda: '1234')
oauth = app_rest.extensions['oauthlib.client']
# Mock user
user = MagicMock()
user.id = 1
user.get_id = MagicMock(return_value=1)
user.is_anonymous = False
with app_rest.test_client() as c:
login_user_via_session(c, user)
# First call login to be redirected
res = c.get(url_for('invenio_oauthclient.rest_login',
remote_app='full'))
assert res.status_code == 302
assert res.location.startswith(
oauth.remote_apps['full'].authorize_url
)
state = parse_qs(urlparse(res.location).query)['state'][0]
# Mock resposen class
mock_response(app_rest.extensions['oauthlib.client'], 'full')
# Imitate that the user authorized our request in the remote
# application.
c.get(url_for(
'invenio_oauthclient.rest_authorized', remote_app='full',
code='test', state=state,
))
# Assert if everything is as it should be.
from flask import session as flask_session
assert flask_session['oauth_token_full'] == \
('test_access_token', '')
t = RemoteToken.get(1, 'fullid')
assert t.remote_account.client_id == 'fullid'
assert t.access_token == 'test_access_token'
assert RemoteToken.query.count() == 1
# Mock a new authorized request
mock_response(app_rest.extensions['oauthlib.client'], 'full', data={
'access_token': '<PASSWORD>',
'scope': "",
'token_type': 'bearer'
})
c.get(url_for(
'invenio_oauthclient.rest_authorized', remote_app='full',
code='test', state=state
))
t = RemoteToken.get(1, 'fullid')
assert t.access_token == 'new_access_token'
assert RemoteToken.query.count() == 1
val = token_getter(
app_rest.extensions['oauthlib.client'].remote_apps['full'])
assert val == ('new_access_token', '')
# Disconnect account
res = c.get(url_for(
'invenio_oauthclient.rest_disconnect', remote_app='full',
))
assert res.status_code == 302
expected_url_args = {
"message": "Successfully disconnected.",
"code": 200
}
check_response_redirect_url_args(res, expected_url_args)
# Assert that remote account have been removed.
t = RemoteToken.get(1, 'fullid')
assert t is None
# TODO: Figure out what is leaving session open & blocked
db.session.close()
def test_rejected(app_rest, monkeypatch):
"""Test rejected."""
# Mock session id
monkeypatch.setattr('invenio_oauthclient._compat._create_identifier',
lambda: '1234')
monkeypatch.setattr(
'invenio_oauthclient.views.client._create_identifier', lambda: '1234')
oauth = app_rest.extensions['oauthlib.client']
# Mock user id
user = MagicMock()
user.get_id = MagicMock(return_value=1)
user.is_authenticated = MagicMock(return_value=True)
with app_rest.test_client() as c:
login_user_via_session(c, user)
# First call login to be redirected
res = c.get(url_for('invenio_oauthclient.rest_login',
remote_app='full'))
assert res.status_code == 302
assert res.location.startswith(
oauth.remote_apps['full'].authorize_url
)
# Mock response to imitate an invalid response. Here, an
# example from GitHub when the code is expired.
mock_response(
app_rest.extensions['oauthlib.client'],
'full',
data=dict(
error_uri='http://developer.github.com/v3/oauth/'
'#bad-verification-code',
error_description='The code passed is '
'incorrect or expired.',
error='bad_verification_code',
)
)
# Imitate that the user authorized our request in the remote
# application (however, the remote app will son reply with an
# error)
state = serializer.dumps({
'app': 'full', 'sid': '1234', 'next': None,
})
res = c.get(url_for(
'invenio_oauthclient.rest_authorized', remote_app='full',
code='test', state=state
))
assert res.status_code == 302
```
|
{
"source": "jennur/invenio-rdm-records",
"score": 3
}
|
#### File: invenio_rdm_records/vocabularies/convert_to_new_vocabulary.py
```python
import csv
from collections import defaultdict
from pathlib import Path
import click
import yaml
def hierarchized_rows(dict_reader):
"""Yields filled OrderedDict rows according to csv hierarchy.
Idea is to have the csv rows:
fooA, barA-1, bazA-1
, barA-2, bazA-2
fooB, barB-1, bazB-1
, , bazB-2
map to these rows
fooA, barA-1, bazA-1
fooA, barA-2, bazA-2
fooB, barB-1, bazB-1
fooB, barB-1, bazB-2
This makes it easy for subject matter experts to fill the csv in
their spreadsheet software, while also allowing hierarchy of data
a-la yaml and extensibility for other conversions or data down the road.
"""
prev_row = defaultdict(str)
for row in dict_reader: # row is an OrderedDict in fieldnames order
current_row = row
for field in row:
if not current_row[field]:
current_row[field] = prev_row[field]
else:
break
prev_row = current_row
yield current_row
class ResourceTypeConverterType:
"""Converter for resource type vocabulary."""
def to_dict(self, csv_row):
"""Converts csv_row to new vocabulary dict."""
return {
"id": (csv_row["subtype"] or csv_row["type"]),
"title": {
"en": csv_row["subtype_name"] or csv_row["type_name"]
},
"props": dict(csv_row)
}
class Converter:
"""General Converter."""
def __init__(self, converter_type):
"""Constructor."""
self.converter_type = converter_type
def convert(self, filepath, dirpath):
"""Write converted filepath file to dirpath directory."""
yaml_filepath = dirpath / filepath.with_suffix(".yaml").name
with open(filepath) as csv_file:
reader = csv.DictReader(csv_file)
reader = hierarchized_rows(reader)
with open(yaml_filepath, "w") as yaml_file:
yaml.dump(
[self.converter_type.to_dict(row) for row in reader],
yaml_file
)
click.secho("Conversion done!", fg="green")
@click.command()
@click.argument('csv_filename')
@click.option('--to', default='.', help='Export destination directory.')
def convert(csv_filename, to):
"""Convert CSV_FILENAME into a new vocabulary yaml file in TO."""
csv_filepath = Path(csv_filename)
dirpath = Path(to)
if not csv_filepath.suffix == ".csv":
click.secho("Only csv conversion allowed.", fg="red")
exit()
if csv_filepath.stem == "resource_types":
converter_type = ResourceTypeConverterType()
else:
click.secho("Not supported yet!", fg="red")
exit()
converter = Converter(converter_type)
converter.convert(csv_filepath, dirpath)
if __name__ == "__main__":
convert()
```
#### File: services/schemas/test_reference.py
```python
import pytest
from marshmallow import ValidationError
from invenio_rdm_records.services.schemas.metadata import MetadataSchema, \
ReferenceSchema
def test_valid_reference():
"""Test references schema."""
valid_full = {
"reference": "Reference to something et al.",
"identifier": "0000 0001 1456 7559",
"scheme": "isni"
}
assert valid_full == ReferenceSchema().load(valid_full)
def test_valid_minimal_reference():
valid_minimal = {
"reference": "Reference to something et al."
}
assert valid_minimal == ReferenceSchema().load(valid_minimal)
def test_invalid_no_reference():
invalid_no_reference = {
"identifier": "0000 0001 1456 7559",
"scheme": "isni"
}
with pytest.raises(ValidationError):
data = ReferenceSchema().load(invalid_no_reference)
def test_invalid_scheme_reference():
invalid_scheme = {
"reference": "Reference to something et al.",
"identifier": "0000 0001 1456 7559",
"scheme": "Invalid"
}
loaded = data = ReferenceSchema().load(invalid_scheme)
# Check the backend forced the change to the correct scheme
assert loaded["scheme"] == "isni"
def test_invalid_extra_right():
invalid_extra = {
"reference": "Reference to something et al.",
"identifier": "0000 0001 1456 7559",
"scheme": "Invalid",
"extra": "field"
}
with pytest.raises(ValidationError):
data = ReferenceSchema().load(invalid_extra)
@pytest.mark.parametrize("references", [
([]),
([{
"reference": "Reference to something et al.",
"identifier": "0000 0001 1456 7559",
"scheme": "isni"
}, {
"reference": "Reference to something et al."
}])
])
def test_valid_rights(references, minimal_record, vocabulary_clear):
metadata = minimal_record['metadata']
# NOTE: this is done to get possible load transformations out of the way
metadata = MetadataSchema().load(metadata)
metadata['references'] = references
assert metadata == MetadataSchema().load(metadata)
```
#### File: services/schemas/test_subject.py
```python
import pytest
from marshmallow import ValidationError
from invenio_rdm_records.services.schemas.metadata import MetadataSchema
def test_valid_subjects(app, minimal_record):
metadata = minimal_record['metadata']
metadata['subjects'] = [{"id": "A-D000007"}, {"id": "A-D000008"}]
data = MetadataSchema().load(metadata)
assert data['subjects'] == metadata['subjects']
def test_invalid_no_list_subjects(app, minimal_record):
metadata = minimal_record['metadata']
metadata['subjects'] = {"id": "A-D000007"}
with pytest.raises(ValidationError):
data = MetadataSchema().load(metadata)
```
|
{
"source": "jennwilliams/pycortex",
"score": 3
}
|
#### File: cortex/quickflat/utils.py
```python
from six import string_types
from functools import reduce
import os
import glob
import numpy as np
import string
from .. import utils
from .. import dataset
from ..database import db
from ..options import config
def make_flatmap_image(braindata, height=1024, recache=False, **kwargs):
"""Generate flatmap image from volumetric brain data
This
Parameters
----------
braindata : one of: {cortex.Volume, cortex.Vertex, cortex.Dataview)
Object containing containing data to be plotted, subject (surface identifier),
and transform.
height : scalar
Height of image. None defaults to height of images already present in figure.
recache : boolean
Whether or not to recache intermediate files. Takes longer to plot this way, potentially
resolves some errors. Useful if you've made changes to the alignment.
kwargs : idk
idk
Returns
-------
image :
extents :
"""
mask, extents = get_flatmask(braindata.subject, height=height, recache=recache)
if not hasattr(braindata, "xfmname"):
pixmap = get_flatcache(braindata.subject,
None,
height=height,
recache=recache,
**kwargs)
if isinstance(braindata, dataset.Vertex2D):
data = braindata.raw.vertices
else:
data = braindata.vertices
else:
pixmap = get_flatcache(braindata.subject,
braindata.xfmname,
height=height,
recache=recache,
**kwargs)
if isinstance(braindata, dataset.Volume2D):
data = braindata.raw.volume
else:
data = braindata.volume
if data.shape[0] > 1:
raise ValueError("Input data was not the correct dimensionality - please provide 3D Volume or 2D Vertex data")
if data.dtype != np.uint8:
# Convert data to float to avoid image artifacts
data = data.astype(np.float)
if data.dtype == np.uint8:
img = np.zeros(mask.shape+(4,), dtype=np.uint8)
img[mask] = pixmap * data.reshape(-1, 4)
return img.transpose(1,0,2)[::-1], extents
else:
badmask = np.array(pixmap.sum(1) > 0).ravel()
img = (np.nan*np.ones(mask.shape)).astype(data.dtype)
mimg = (np.nan*np.ones(badmask.shape)).astype(data.dtype)
mimg[badmask] = (pixmap*data.ravel())[badmask].astype(mimg.dtype)
img[mask] = mimg
return img.T[::-1], extents
def get_flatmask(subject, height=1024, recache=False):
"""
Parameters
----------
subject : str
Name of subject in pycortex store
height : int
Height in pixels to generate the image
recache : bool
Recache the intermediate files? Can resolve some issues but is slower.
"""
cachedir = db.get_cache(subject)
cachefile = os.path.join(cachedir, "flatmask_{h}.npz".format(h=height))
if not os.path.exists(cachefile) or recache:
mask, extents = _make_flatmask(subject, height=height)
np.savez(cachefile, mask=mask, extents=extents)
else:
npz = np.load(cachefile)
mask, extents = npz['mask'], npz['extents']
npz.close()
return mask, extents
def get_flatcache(subject, xfmname, pixelwise=True, thick=32, sampler='nearest',
recache=False, height=1024, depth=0.5):
"""
Parameters
----------
subject : str
Subject name in pycortex db
xfmname : str
Name of transform for subject
pixelwise : bool
thick : int
sampler :
recache : bool
Recache intermediate files? Doing so is slower but can resolve some errors.
height : int
Height in pixels of image to generated
depth : float
Returns
-------
"""
cachedir = db.get_cache(subject)
cachefile = os.path.join(cachedir, "flatverts_{height}.npz").format(height=height)
if pixelwise and xfmname is not None:
cachefile = os.path.join(cachedir, "flatpixel_{xfmname}_{height}_{sampler}_{extra}.npz")
extra = "l%d"%thick if thick > 1 else "d%g"%depth
cachefile = cachefile.format(height=height, xfmname=xfmname, sampler=sampler, extra=extra)
if not os.path.exists(cachefile) or recache:
print("Generating a flatmap cache")
if pixelwise and xfmname is not None:
pixmap = _make_pixel_cache(subject, xfmname, height=height, sampler=sampler, thick=thick, depth=depth)
else:
pixmap = _make_vertex_cache(subject, height=height)
np.savez(cachefile, data=pixmap.data, indices=pixmap.indices, indptr=pixmap.indptr, shape=pixmap.shape)
else:
from scipy import sparse
npz = np.load(cachefile)
pixmap = sparse.csr_matrix((npz['data'], npz['indices'], npz['indptr']), shape=npz['shape'])
npz.close()
if not pixelwise and xfmname is not None:
from scipy import sparse
mapper = utils.get_mapper(subject, xfmname, sampler)
pixmap = pixmap * sparse.vstack(mapper.masks)
return pixmap
def _return_pixel_pairs(vert_pair_list, x_dict, y_dict):
"""Janky and probably unnecessary"""
pix_list = []
vert_pairs_valid = []
for (vert1, vert2) in vert_pair_list:
if vert1 in x_dict and vert2 in x_dict:
pix1 = np.array((x_dict[vert1], y_dict[vert1]))
pix2 = np.array((x_dict[vert2], y_dict[vert2]))
pix_list.append(np.array([pix1, pix2]))
vert_pairs_valid.append((vert1, vert2))
else:
#These are vertex pairs not represented in the flatmap. I have found them to belong to the middle brain are that is deleted while creating the flat map.
pass
return np.array(pix_list), np.array(vert_pairs_valid)
### --- Hidden helper functions --- ###
def _color2hex(color):
"""Convert arbitrary color input to hex string"""
from matplotlib import colors
cc = colors.ColorConverter()
rgba = cc.to_rgba(color)
hexcol = colors.rgb2hex(rgba)
return hexcol
def _convert_svg_kwargs(kwargs):
"""Convert matplotlib-like plotting property names/values to svg object property names/values"""
svg_style_key_mapping = dict(
linewidth='stroke-width',
lw='stroke-width',
linecolor='stroke',
lc='stroke',
#labelcolor='', # FIX ME
#labelsize='', # FIX ME
linealpha='stroke-opacity',
roifill='fill',
fillcolor='fill',
fillalpha='fill-opacity',
dashes='stroke-dasharray'
#dash_capstyle # ADD ME?
#dash_joinstyle # ADD ME?
)
svg_style_value_mapping = dict(
linewidth=lambda x: x,
lw=lambda x: x,
linecolor=lambda x: _color2hex(x),
lc=lambda x: _color2hex(x),
labelcolor=lambda x: _color2hex(x),
labelsize=lambda x: x,
linealpha=lambda x: x,
roifill=lambda x: _color2hex(x),
fillcolor=lambda x: _color2hex(x),
fillalpha=lambda x: x,
dashes=lambda x: '{}, {}'.format(*x),
#dash_capstyle # ADD ME?
#dash_joinstyle # ADD ME?
)
out = dict((svg_style_key_mapping[k], svg_style_value_mapping[k](v))
for k,v in kwargs.items() if v is not None)
return out
def _parse_defaults(section):
defaults = dict(config.items(section))
for k in defaults.keys():
# Convert numbers to floating point numbers
if defaults[k][0] in string.digits + '.':
if ',' in defaults[k]:
defaults[k] = [float(x) for x in defaults[k].split(',')]
else:
defaults[k] = float(defaults[k])
# Convert 'None' to None
if defaults[k] == 'None':
defaults[k] = None
# Special case formatting
if k=='stroke' or k=='fill':
defaults[k] = _color2hex(defaults[k])
elif k=='stroke-dasharray' and isinstance(defaults[k], (list,tuple)):
defaults[k] = '{}, {}'.format(*defaults[k])
return defaults
def _get_fig_and_ax(fig):
"""Get figure and current ax. Input can be either a figure or an ax."""
import matplotlib.pyplot as plt
if isinstance(fig, plt.Axes):
ax = fig
fig = ax.figure
elif isinstance(fig, plt.Figure):
ax = fig.gca()
else:
raise ValueError("fig should be a matplotlib Figure or Axes instance.")
return fig, ax
def _get_images(fig):
"""Get all images in a given matplotlib axis"""
from matplotlib.image import AxesImage
_, ax = _get_fig_and_ax(fig)
images = dict((x.get_label(), x) for x in ax.get_children() if isinstance(x, AxesImage))
return images
def _get_extents(fig):
"""Get extents of images current in a given matplotlib figure"""
images = _get_images(fig)
if 'data' not in images:
raise ValueError("You must specify `extents` argument if you have not yet plotted a data flatmap!")
extents = images['data'].get_extent()
return extents
def _get_height(fig):
"""Get height of images in currently in a given matplotlib figure"""
images = _get_images(fig)
if 'data_cutout' in images:
raise Exception("Can't add plots once cutout has been performed! Do cutouts last!")
if 'data' in images:
height = images['data'].get_array().shape[0]
else:
# No images, revert to default
height = 1024
return height
def _make_hatch_image(hatch_data, height, sampler='nearest', hatch_space=4, recache=False):
"""Make hatch image
Parameters
----------
hatch_data : cortex.Dataview
brain data with values ranging from 0-1, specifying where to show hatch marks (data value
will be mapped to alpha value of hatch marks)
height : scalar
height of image to display
sampler : string
pycortex sampler string, {'nearest', ...} (FILL ME IN ??)
hatch_space : scalar
space between hatch lines (in pixels)
recache : boolean
"""
dmap, _ = make_flatmap_image(hatch_data, height=height, sampler=sampler, recache=recache)
hx, hy = np.meshgrid(range(dmap.shape[1]), range(dmap.shape[0]))
hatchpat = (hx+hy)%(2*hatch_space) < 2
# Leila code that breaks:
#hatch_size = [0, 4, 4]
#hatchpat = (hx + hy + hatch_size[0])%(hatch_size[1] * hatch_space) < hatch_size[2]
hatchpat = np.logical_or(hatchpat, hatchpat[:,::-1]).astype(float)
hatchim = np.dstack([1-hatchpat]*3 + [hatchpat])
hatchim[:, : ,3] *= np.clip(dmap, 0, 1).astype(float)
return hatchim
def _make_flatmask(subject, height=1024):
from .. import polyutils
from PIL import Image, ImageDraw
pts, polys = db.get_surf(subject, "flat", merge=True, nudge=True)
left, right = polyutils.trace_poly(polyutils.boundary_edges(polys))
aspect = (height / (pts.max(0) - pts.min(0))[1])
lpts = (pts[left] - pts.min(0)) * aspect
rpts = (pts[right] - pts.min(0)) * aspect
im = Image.new('L', (int(aspect * (pts.max(0) - pts.min(0))[0]), height))
draw = ImageDraw.Draw(im)
draw.polygon(lpts[:,:2].ravel().tolist(), fill=255)
draw.polygon(rpts[:,:2].ravel().tolist(), fill=255)
extents = np.hstack([pts.min(0), pts.max(0)])[[0,3,1,4]]
return np.array(im).T > 0, extents
def _make_vertex_cache(subject, height=1024):
from scipy import sparse
from scipy.spatial import cKDTree
flat, polys = db.get_surf(subject, "flat", merge=True, nudge=True)
valid = np.unique(polys)
fmax, fmin = flat.max(0), flat.min(0)
size = fmax - fmin
aspect = size[0] / size[1]
width = int(aspect * height)
grid = np.mgrid[fmin[0]:fmax[0]:width*1j, fmin[1]:fmax[1]:height*1j].reshape(2,-1)
mask, extents = get_flatmask(subject, height=height)
assert mask.shape[0] == width and mask.shape[1] == height
kdt = cKDTree(flat[valid,:2])
dist, vert = kdt.query(grid.T[mask.ravel()])
dataij = (np.ones((len(vert),)), np.array([np.arange(len(vert)), valid[vert]]))
return sparse.csr_matrix(dataij, shape=(mask.sum(), len(flat)))
def _make_pixel_cache(subject, xfmname, height=1024, thick=32, depth=0.5, sampler='nearest'):
from scipy import sparse
from scipy.spatial import Delaunay
flat, polys = db.get_surf(subject, "flat", merge=True, nudge=True)
valid = np.unique(polys)
fmax, fmin = flat.max(0), flat.min(0)
size = fmax - fmin
aspect = size[0] / size[1]
width = int(aspect * height)
grid = np.mgrid[fmin[0]:fmax[0]:width*1j, fmin[1]:fmax[1]:height*1j].reshape(2,-1)
mask, extents = get_flatmask(subject, height=height)
assert mask.shape[0] == width and mask.shape[1] == height
# Get barycentric coordinates
dl = Delaunay(flat[valid,:2])
simps = dl.find_simplex(grid.T[mask.ravel()])
missing = simps == -1
tfms = dl.transform[simps]
l1, l2 = (tfms[:,:2].transpose(1,2,0) * (grid.T[mask.ravel()] - tfms[:,2]).T).sum(1)
l3 = 1 - l1 - l2
ll = np.vstack([l1, l2, l3])
ll[:,missing] = 0
from ..mapper import samplers
xfm = db.get_xfm(subject, xfmname, xfmtype='coord')
sampclass = getattr(samplers, sampler)
# Transform fiducial vertex locations to pixel locations using barycentric xfm
try:
pia, polys = db.get_surf(subject, "pia", merge=True, nudge=False)
wm, polys = db.get_surf(subject, "wm", merge=True, nudge=False)
piacoords = xfm((pia[valid][dl.vertices][simps] * ll[np.newaxis].T).sum(1))
wmcoords = xfm((wm[valid][dl.vertices][simps] * ll[np.newaxis].T).sum(1))
valid_p = np.array([np.all((0 <= piacoords), axis=1),
piacoords[:,0] < xfm.shape[2],
piacoords[:,1] < xfm.shape[1],
piacoords[:,2] < xfm.shape[0]])
valid_p = np.all(valid_p, axis=0)
valid_w = np.array([np.all((0 <= wmcoords), axis=1),
wmcoords[:,0] < xfm.shape[2],
wmcoords[:,1] < xfm.shape[1],
wmcoords[:,2] < xfm.shape[0]])
valid_w = np.all(valid_w, axis=0)
valid = np.logical_and(valid_p, valid_w)
vidx = np.nonzero(valid)[0]
mapper = sparse.csr_matrix((mask.sum(), np.prod(xfm.shape)))
if thick == 1:
i, j, data = sampclass(piacoords[valid]*depth + wmcoords[valid]*(1-depth), xfm.shape)
mapper = mapper + sparse.csr_matrix((data / float(thick), (vidx[i], j)),
shape=mapper.shape)
return mapper
for t in np.linspace(0, 1, thick+2)[1:-1]:
i, j, data = sampclass(piacoords[valid]*t + wmcoords[valid]*(1-t), xfm.shape)
mapper = mapper + sparse.csr_matrix((data / float(thick), (vidx[i], j)),
shape=mapper.shape)
return mapper
except IOError:
fid, polys = db.get_surf(subject, "fiducial", merge=True)
fidcoords = xfm((fid[valid][dl.vertices][simps] * ll[np.newaxis].T).sum(1))
valid = reduce(np.logical_and,
[reduce(np.logical_and, (0 <= fidcoords).T),
fidcoords[:, 0] < xfm.shape[2],
fidcoords[:, 1] < xfm.shape[1],
fidcoords[:, 2] < xfm.shape[0]])
vidx = np.nonzero(valid)[0]
i, j, data = sampclass(fidcoords[valid], xfm.shape)
csrshape = mask.sum(), np.prod(xfm.shape)
return sparse.csr_matrix((data, (vidx[i], j)), shape=csrshape)
def _has_cmap(dataview):
"""Checks whether a given dataview has colormap (cmap) information as an
instance or is an RGB volume and does not have a cmap.
Returns a dictionary with cmap information for non RGB volumes"""
from matplotlib import colors, cm, pyplot as plt
cmapdict = dict()
if not isinstance(dataview, (dataset.VolumeRGB, dataset.VertexRGB)):
# Get colormap from matplotlib or pycortex colormaps
## -- redundant code, here and in cortex/dataset/views.py -- ##
if isinstance(dataview.cmap, string_types):
if not dataview.cmap in cm.__dict__:
# unknown colormap, test whether it's in pycortex colormaps
cmapdir = config.get('webgl', 'colormaps')
colormaps = glob.glob(os.path.join(cmapdir, "*.png"))
colormaps = dict(((os.path.split(c)[1][:-4], c) for c in colormaps))
if not dataview.cmap in colormaps:
raise Exception('Unkown color map!')
I = plt.imread(colormaps[dataview.cmap])
cmap = colors.ListedColormap(np.squeeze(I))
# Register colormap while we're at it
cm.register_cmap(dataview.cmap, cmap)
else:
cmap = dataview.cmap
elif isinstance(dataview.cmap, colors.Colormap):
# Allow input of matplotlib colormap class
cmap = dataview.cmap
else:
raise TypeError('{} type not handled'.format(type(dataview.cmap)))
cmapdict.update(cmap=cmap,
vmin=dataview.vmin,
vmax=dataview.vmax)
return cmapdict
```
|
{
"source": "jennwuu/swmmio",
"score": 3
}
|
#### File: swmmio/graphics/utils.py
```python
import pandas as pd
import math
import os
from PIL import Image
def save_image(img, img_path, antialias=True, auto_open=False):
# get the size from the Image object
imgSize = (img.getbbox()[2], img.getbbox()[3])
if antialias:
size = (int(imgSize[0] * 0.5), int(imgSize[1] * 0.5))
img.thumbnail(size, Image.ANTIALIAS)
img.save(img_path)
if auto_open:
os.startfile(img_path)
def px_to_irl_coords(df, px_width=4096.0, bbox=None, shift_ratio=None):
"""
given a dataframe with element id (as index) and X1, Y1 columns (and
optionally X2, Y2 columns), return a dataframe with the coords as pixel
locations based on the targetImgW.
"""
df = df.loc[pd.notnull(df.coords)]
if not bbox:
xs = [xy[0] for verts in df.coords.tolist() for xy in verts]
ys = [xy[1] for verts in df.coords.tolist() for xy in verts]
xmin, ymin, xmax, ymax = (min(xs), min(ys), max(xs), max(ys))
bbox = [(xmin, ymin), (xmax, ymax)]
else:
df = clip_to_box(df, bbox) # clip if necessary
xmin = float(bbox[0][0])
ymin = float(bbox[0][1])
# find the actual dimensions, use to find scale factor
height = bbox[1][1] - bbox[0][1]
width = bbox[1][0] - bbox[0][0]
if not shift_ratio:
# to scale down from coordinate to pixels
shift_ratio = float(px_width / width)
def shft_coords(row):
# parse through coords (nodes, or link) and adjust for pixel space
return [(int((xy[0] - xmin) * shift_ratio),
int((height - xy[1] + ymin) * shift_ratio))
for xy in row.coords]
# insert new column with the shifted coordinates
draw_coords = df.apply(lambda row: shft_coords(row), axis=1)
if not (draw_coords.empty and df.empty):
df = df.assign(draw_coords=draw_coords)
return df, bbox, int(height * shift_ratio), int(width * shift_ratio), shift_ratio
def circle_bbox(coordinates, radius=5):
"""the bounding box of a circle given as centriod coordinate and radius"""
x = coordinates[0]
y = coordinates[1]
r = radius
return (x - r, y - r, x + r, y + r)
def clip_to_box(df, bbox):
"""clip a dataframe with a coords column to a bounding box"""
def any_xy_in_box(row, bbox):
# because im confused with list comprehensions rn
return any([point_in_box(bbox, pt) for pt in row])
coords = df.coords.tolist()
result = [any_xy_in_box(p, bbox) for p in coords]
return df.loc[result]
def angle_bw_points(xy1, xy2):
dx, dy = (xy2[0] - xy1[0]), (xy2[1] - xy1[1])
angle = (math.atan(float(dx) / float(dy)) * 180 / math.pi)
if angle < 0:
angle = 270 - angle
else:
angle = 90 - angle
# angle in radians
return angle
def midpoint(xy1, xy2):
dx, dy = (xy2[0] + xy1[0]), (xy2[1] + xy1[1])
midpt = (int(dx / 2), int(dy / 2.0))
# angle in radians
return midpt
def point_in_box(bbox, point):
"""check if a point falls with in a bounding box, bbox"""
LB = bbox[0]
RU = bbox[1]
x = point[0]
y = point[1]
if x < LB[0] or x > RU[0]:
return False
elif y < LB[1] or y > RU[1]:
return False
else:
return True
def length_bw_coords(upstreamXY, downstreamXY):
# return the distance (units based on input) between two points
x1 = float(upstreamXY[0])
x2 = float(downstreamXY[0])
y1 = float(upstreamXY[1])
y2 = float(downstreamXY[1])
return math.hypot(x2 - x1, y2 - y1)
def rotate_coord_about_point(xy, radians, origin=(0, 0)):
"""Rotate a point around a given origin
https://gist.github.com/LyleScott/e36e08bfb23b1f87af68c9051f985302
"""
x, y = xy
offset_x, offset_y = origin
adjusted_x = (x - offset_x)
adjusted_y = (y - offset_y)
cos_rad = math.cos(radians)
sin_rad = math.sin(radians)
qx = offset_x + cos_rad * adjusted_x + sin_rad * adjusted_y
qy = offset_y + -sin_rad * adjusted_x + cos_rad * adjusted_y
return qx, qy
```
#### File: version_control/tests/compare_inp.py
```python
import os
def remove_comments_and_crlf(inp_path, comment_string=';', overwrite=False):
tmpfilename = os.path.splitext(os.path.basename(inp_path))[0] + '_mod.inp'
tmpfilepath = os.path.join(os.path.dirname(inp_path), tmpfilename)
with open (inp_path) as oldf:
with open(tmpfilepath, 'w') as newf:
for line in oldf:
if ';' in line:
#remove the comments
if line.strip()[0] == comment_string:
#skip the whole line
pass
else:
#write the line to the left of the comment
non_comment_line = line.split(';')[0]
newf.write(non_comment_line + '\n')
elif line == '\n':
pass
else:
newf.write(line)
if overwrite:
os.remove(inp_path)
os.rename(tmpfilepath, inp_path)
def line_by_line(path1, path2, outfile):
"""
given paths to two INP files, return a text file showing where differences
occur in line-by-line fashion. If the order of elements do not match, this
will be recorded as a difference.
ignores any spaces in a file such that lines with more or less white space
having the same non-whitespace will be considered equal.
"""
#outfile =r"P:\06_Tools\v_control\Testing\cleaned\linebyline.txt"
with open(outfile, 'w') as diff_file:
with open (path1) as f1:
with open(path2) as f2:
line1 = next(f1)
line2 = next(f2)
while line1 and line2:
#replace all white space to check only actual content
if line1.replace(" ", "") != line2.replace(" ", ""):
diff_file.write(line1)
line1 = next(f1)
line2 = next(f2)
```
|
{
"source": "jenny0322/food-safety-sample",
"score": 3
}
|
#### File: client/python/env.py
```python
import os
import pprint
import platform
import yaml
def main():
z = add(4, 5)
print("The result of add is %s" %z)
with open("config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
for section in cfg:
print(section)
print(cfg['iothub'])
print(cfg['other'])
print(cfg['iothub']['connectionstring'])
env_var = os.environ
print('The operating system version is ' + platform.platform())
print("User Env Variables:")
pprint.pprint(dict(env_var), width = 1)
def add(x, y):
return x + y
if __name__ == "__main__":
main()
```
#### File: client/python/telemetry.py
```python
from applicationinsights import TelemetryClient
import sys,hashlib,os.path,re, uuid
import platform
IKEY = "<KEY>"
LANGUAGE = "Python"
DEVICE = "RaspberryPi"
PROMPT_TEXT = "\nMicrosoft would like to collect data about how users use Azure IoT " \
"samples and some problems they encounter. Microsoft uses this information to improve "\
"our tooling experience. Participation is voluntary and when you choose to participate " \
"your device automatically sends information to Microsoft about how you use Azure IoT "\
"samples. If you want to change this setting after first time, please delete the "\
"telemetry.config file and restart the program. "\
"\n\nSelect y to enable data collection (y/n, default is y). "
class Telemetry:
def __init__(self):
try:
self.telemetry = TelemetryClient(IKEY)
if os.path.exists("telemetry.config"):
config_file = open("telemetry.config", "r")
if config_file.read() == "1":
self.enable_telemetry = True
else:
self.enable_telemetry = False
else:
self.enable_telemetry = self._query_yes_no(PROMPT_TEXT)
config_file = open("telemetry.config", "w")
if self.enable_telemetry:
config_file.write("1")
self.telemetry.track_event("yes", {"device": DEVICE, "language": LANGUAGE})
else:
config_file.write("0")
self.telemetry.context.location.ip = "0.0.0.0"
self.telemetry.track_event("no", {"device": DEVICE, "language": LANGUAGE})
self.telemetry.flush()
except:
pass
def send_telemetry_data(self, iot_hub_name, event, message):
try:
if self.enable_telemetry:
hash_mac = self._get_mac_hash()
hash_iot_hub_name = hashlib.sha256(iot_hub_name.encode("utf-8")).hexdigest()
self.telemetry.track_event(event, {"iothub": hash_iot_hub_name, "message": message,
"language": LANGUAGE, "device": DEVICE, "mac": hash_mac,
"osType": platform.system(), "osPlatform": platform.dist()[0],
"osRelease": platform.dist()[1]})
self.telemetry.flush()
except:
pass
def _get_mac_hash(self):
mac = ":".join(re.findall("..", "%012x" % uuid.getnode()))
return hashlib.sha256(mac.encode("utf-8")).hexdigest()
def _query_yes_no(self, question):
global input
default = "y"
valid = {"y": True, "n": False}
prompt = " [Y/n] "
while True:
sys.stdout.write(question + prompt)
try:
input = raw_input
except NameError:
pass
choice = input().lower()
if default is not None and choice == "":
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'y' or 'n' ")
```
#### File: simulator/python/SimulatedDevice.py
```python
import random
import time
# The sample connects to a device-specific MQTT endpoint on your IoT Hub.
from azure.iot.device import IoTHubDeviceClient, Message
# Define the JSON message to send to IoT Hub.
TEMPERATURE = 20.0
HUMIDITY = 60
MSG_TXT = '{{"temperature": {temperature},"humidity": {humidity}}}'
class SimulatedDevice:
def __init__(self, connection_string):
self.device = IoTHubDeviceClient.create_from_connection_string(connection_string)
def send_device_to_cloud_messages(self):
try:
while True:
# Build the message with simulated telemetry values.
temperature = TEMPERATURE + (random.random() * 15)
humidity = HUMIDITY + (random.random() * 20)
msg_txt_formatted = MSG_TXT.format(temperature=temperature, humidity=humidity)
message = Message(msg_txt_formatted)
# Add a custom application property to the message.
# An IoT hub can filter on these properties without access to the message body.
if temperature > 30:
message.custom_properties["temperatureAlert"] = "true"
else:
message.custom_properties["temperatureAlert"] = "false"
# Send the message.
print( "Sending message: {}".format(message) )
self.device.send_message(message)
print ( "Message successfully sent" )
time.sleep(1)
except KeyboardInterrupt:
print("Simulator stopped")
```
|
{
"source": "jenny04155/blog_post",
"score": 3
}
|
#### File: jenny04155/blog_post/lorenz2.py
```python
import numpy as np
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#one of the critical points
fx, fy, fz = -np.sqrt(72),-np.sqrt(72),27
# Maximum time point and total number of time points.
N = 10000
step = N/100 #time step size = 0.01
tspan = np.linspace(0, step, N)
#Lorenz equations
def lorenz(t, initial):
x, y, z = initial
dxdt = 10*(y - x)
dydt = x*(28 - z) - y
dzdt = x*y - (8/3)*z
return dxdt, dydt, dzdt
# Lorenz initial points.
initial = (1, 1, 1)
# Integrate the Lorenz equations.
soln = solve_ivp(lorenz, (tspan[0], tspan[-1]), initial,
dense_output=True)
# Interpolate solution onto the time grid, t.
x, y, z = soln.sol(tspan)
initial2 = (1.00001, 1.00001, 1.00001)
soln = solve_ivp(lorenz, (tspan[0], tspan[-1]), initial2,
dense_output=True)
# Interpolate solution onto the time grid, t.
x2, y2, z2 = soln.sol(tspan)
# Plot the Lorenz attractor using a Matplotlib 3D projection.
fig = plt.figure(facecolor='k', figsize=(10, 7.5))
ax = fig.gca(projection='3d')
ax.set_facecolor('w')
fig.subplots_adjust(left=0, right=1, bottom=0, top=1)
title1 = 'initial point = ' + str(initial)
title2 = 'initial point = ' + str(initial2)
ax.plot(x, y, z, color= 'red', linewidth = 0.1 , label = title1)
ax.plot(x2, y2, z2, color= 'green', linewidth = 0.1, label = title2)
ax.scatter(fx, fy, fz, c = 'blue')
ax.scatter(-fx, -fy, fz, c = 'purple')
ax.legend()
plt.savefig('lorenz.png', dpi=750)
plt.show()
```
|
{
"source": "jenny0719/pyphosphor",
"score": 2
}
|
#### File: obmc/dbuslib/bindings.py
```python
import dbus
import dbus.service
import dbus.exceptions
OBJ_PREFIX = '/xyz/openbmc_project'
def is_unique(connection):
return connection[0] == ':'
def get_dbus():
return dbus.SystemBus()
class DbusProperties(dbus.service.Object):
def __init__(self, **kw):
self.validator = kw.pop('validator', None)
super(DbusProperties, self).__init__(**kw)
self.properties = {}
self._export = False
def unmask_signals(self):
self._export = True
inst = super(DbusProperties, self)
if hasattr(inst, 'unmask_signals'):
inst.unmask_signals()
def mask_signals(self):
self._export = False
inst = super(DbusProperties, self)
if hasattr(inst, 'mask_signals'):
inst.mask_signals()
@dbus.service.method(
dbus.PROPERTIES_IFACE,
in_signature='ss', out_signature='v')
def Get(self, interface_name, property_name):
d = self.GetAll(interface_name)
try:
v = d[property_name]
return v
except Exception:
raise dbus.exceptions.DBusException(
"Unknown property: '{}'".format(property_name),
name="org.freedesktop.DBus.Error.UnknownProperty")
@dbus.service.method(
dbus.PROPERTIES_IFACE,
in_signature='s', out_signature='a{sv}')
def GetAll(self, interface_name):
try:
d = self.properties[interface_name]
return d
except Exception:
raise dbus.exceptions.DBusException(
"Unknown interface: '{}'".format(interface_name),
name="org.freedesktop.DBus.Error.UnknownInterface")
@dbus.service.method(
dbus.PROPERTIES_IFACE,
in_signature='ssv')
def Set(self, interface_name, property_name, new_value):
if (interface_name not in self.properties):
self.properties[interface_name] = {}
if self.validator:
self.validator(interface_name, property_name, new_value)
try:
old_value = self.properties[interface_name][property_name]
if (old_value != new_value):
self.properties[interface_name][property_name] = new_value
if self._export:
self.PropertiesChanged(
interface_name, {property_name: new_value}, [])
except Exception:
self.properties[interface_name][property_name] = new_value
if self._export:
self.PropertiesChanged(
interface_name, {property_name: new_value}, [])
@dbus.service.method(
"org.openbmc.Object.Properties", in_signature='sa{sv}')
def SetMultiple(self, interface_name, prop_dict):
if (interface_name not in self.properties):
self.properties[interface_name] = {}
value_changed = False
for property_name in prop_dict:
new_value = prop_dict[property_name]
try:
old_value = self.properties[interface_name][property_name]
if (old_value != new_value):
self.properties[interface_name][property_name] = new_value
value_changed = True
except Exception:
self.properties[interface_name][property_name] = new_value
value_changed = True
if (value_changed is True and self._export):
self.PropertiesChanged(interface_name, prop_dict, [])
@dbus.service.signal(
dbus.PROPERTIES_IFACE, signature='sa{sv}as')
def PropertiesChanged(
self, interface_name, changed_properties, invalidated_properties):
pass
def add_interfaces_to_class(cls, ifaces):
"""
The built-in Introspect method in dbus-python doesn't find
interfaces if the @method or @signal decorators aren't used
(property-only interfaces). Use this method on a class
derived from dbus.service.Object to help the dbus-python provided
Introspect method find these interfaces.
Arguments:
cls -- The dbus.service.Object superclass to add interfaces to.
ifaces -- The property-only interfaces to add to the class.
"""
for iface in ifaces:
class_table_key = '{}.{}'.format(cls.__module__, cls.__name__)
cls._dbus_class_table[class_table_key].setdefault(iface, {})
def add_interfaces(ifaces):
"""
A class decorator for add_interfaces_to_class.
"""
def decorator(cls):
undecorated = cls.__init__
def ctor(obj, *a, **kw):
undecorated(obj, *a, **kw)
add_interfaces_to_class(cls, ifaces)
cls.__init__ = ctor
return cls
return decorator
class DbusObjectManager(dbus.service.Object):
def __init__(self, **kw):
super(DbusObjectManager, self).__init__(**kw)
self.objects = {}
self._export = False
def unmask_signals(self):
self._export = True
inst = super(DbusObjectManager, self)
if hasattr(inst, 'unmask_signals'):
inst.unmask_signals()
def mask_signals(self):
self._export = False
inst = super(DbusObjectManager, self)
if hasattr(inst, 'mask_signals'):
inst.mask_signals()
def add(self, object_path, obj):
self.objects[object_path] = obj
if self._export:
self.InterfacesAdded(object_path, obj.properties)
def remove(self, object_path):
obj = self.objects.pop(object_path, None)
obj.remove_from_connection()
if self._export:
self.InterfacesRemoved(object_path, list(obj.properties.keys()))
def get(self, object_path, default=None):
return self.objects.get(object_path, default)
@dbus.service.method(
"org.freedesktop.DBus.ObjectManager",
in_signature='', out_signature='a{oa{sa{sv}}}')
def GetManagedObjects(self):
data = {}
for objpath in list(self.objects.keys()):
data[objpath] = self.objects[objpath].properties
return data
@dbus.service.signal(
"org.freedesktop.DBus.ObjectManager", signature='oa{sa{sv}}')
def InterfacesAdded(self, object_path, properties):
pass
@dbus.service.signal(
"org.freedesktop.DBus.ObjectManager", signature='oas')
def InterfacesRemoved(self, object_path, interfaces):
pass
```
#### File: obmc/dbuslib/propertycacher.py
```python
import json
import os
# TODO: openbmc/openbmc#2994 remove python 2 support
import sys
if sys.version_info[0] < 3:
import cPickle as pickle
else:
import pickle
CACHE_PATH = '/var/cache/obmc/'
def getCacheFilename(obj_path, iface_name):
name = obj_path.replace('/', '.')
filename = CACHE_PATH + name[1:] + "@" + iface_name + ".props"
return filename
def save(obj_path, iface_name, properties):
print("Caching: "+ obj_path)
filename = getCacheFilename(obj_path, iface_name)
parent = os.path.dirname(filename)
try:
if not os.path.exists(parent):
os.makedirs(parent)
with open(filename, 'wb') as output:
try:
# use json module to convert dbus datatypes
props = json.dumps(properties[iface_name])
prop_obj = json.loads(props)
pickle.dump(prop_obj, output)
except Exception as e:
print("ERROR: " + str(e))
except Exception:
print("ERROR opening cache file: " + filename)
def load(obj_path, iface_name, properties):
# overlay with pickled data
filename = getCacheFilename(obj_path, iface_name)
if (os.path.isfile(filename)):
if iface_name in properties:
properties[iface_name] = {}
print("Loading from cache: " + filename)
try:
p = open(filename, 'rb')
data = pickle.load(p)
for prop in list(data.keys()):
properties[iface_name][prop] = data[prop]
except Exception as e:
print("ERROR: Loading cache file: " + str(e))
finally:
p.close()
```
#### File: pyphosphor/obmc/sensors.py
```python
import os
import subprocess
import dbus
import dbus.service
from obmc.dbuslib.bindings import DbusProperties
# Abstract class, must subclass
class SensorValue(DbusProperties):
IFACE_NAME = 'org.openbmc.SensorValue'
def __init__(self, bus, name):
self.Set(SensorValue.IFACE_NAME, 'units', "")
self.Set(SensorValue.IFACE_NAME, 'error', False)
@dbus.service.method(
IFACE_NAME, in_signature='v', out_signature='')
def setValue(self, value):
self.Set(SensorValue.IFACE_NAME, 'value', value)
@dbus.service.method(
IFACE_NAME, in_signature='', out_signature='v')
def getValue(self):
return self.Get(SensorValue.IFACE_NAME, 'value')
class VirtualSensor(SensorValue):
def __init__(self, bus, name):
DbusProperties.__init__(self)
SensorValue.__init__(self, bus, name)
dbus.service.Object.__init__(self, bus, name)
CONTROL_IFACE = 'org.openbmc.Control'
```
|
{
"source": "jennyabr/faster-rcnn.pytorch",
"score": 3
}
|
#### File: model/utils/factory_utils.py
```python
from importlib import import_module
def get_class_from_package(package_full_path, class_rel_path, abstract_class):
module_name, class_name = class_rel_path.rsplit('.', 1)
try:
class_module = import_module(package_full_path + '.' + module_name)
returned_class = getattr(class_module, class_name)
if not issubclass(returned_class, abstract_class):
raise ImportError(
"{} is not a subclass of the given abstract class.".format(returned_class))
return returned_class
except (AttributeError, ModuleNotFoundError):
raise ImportError('{} is not part of the package!'.format(class_name))
def get_optimizer_class(optimizer_name):
from torch.optim import Optimizer
module_name = optimizer_name.lower()
class_rel_path = module_name + '.' + optimizer_name
optimizer_class = get_class_from_package('torch.optim', class_rel_path, Optimizer)
return optimizer_class
```
#### File: pipeline/faster_rcnn/faster_rcnn_visualization.py
```python
import logging
import pickle
import time
import cv2
import numpy as np
import os
logger = logging.getLogger(__name__)
def faster_rcnn_visualization(data_manager, cfg, epoch_num):
pp_preds_path = cfg.get_postprocessed_detections_path(epoch_num)
logger.info("--->>> Starting visualization, reading post-processing data from: {}.".format(pp_preds_path))
with open(pp_preds_path, 'rb') as f:
bboxes = pickle.load(f)
visualizations_dir = os.path.dirname(cfg.get_img_visualization_path(epoch_num, 0))
start_time = time.time()
for i in range(data_manager.num_images):
im = cv2.imread(data_manager.imdb.image_path_at(i))
im2show = np.copy(im)
for j in range(1, data_manager.num_classes):
cls_bboxes = bboxes[j, i]
n_bboxes_to_visualize = np.minimum(10, cls_bboxes.shape[0])
for bbox_ind in range(n_bboxes_to_visualize):
bbox_coords = tuple(int(np.round(coords)) for coords in cls_bboxes[bbox_ind, :4])
bbox_score = cls_bboxes[bbox_ind, -1]
if bbox_score > 0.3:
cv2.rectangle(im2show, bbox_coords[0:2], bbox_coords[2:4], (0, 204, 0), 2)
class_name = data_manager.imdb.classes[j]
cv2.putText(im2show,
'{0}: {1:.3f}'.format(class_name, bbox_score),
(bbox_coords[0] + 15, bbox_coords[1]),
cv2.FONT_HERSHEY_PLAIN,
1.0,
(0, 0, 255),
thickness=1)
cv2.imwrite(cfg.get_img_visualization_path(epoch_num, i), im2show)
if i % cfg.TEST.disp_interval == 0 and i > 0:
logger.info("Visualization in-progress: {}/{}.".format(i, data_manager.num_images))
end_time = time.time()
logger.info("Visualization dir path: {}.".format(visualizations_dir))
logger.info("-------------- Visualization time: {:.4f} s. --------------".format(end_time - start_time))
```
|
{
"source": "jennyb2911/chameleon-2",
"score": 2
}
|
#### File: chameleon-2/sql/face.py
```python
import psycopg2
import config as c
import numpy as np
import time
import json
from scipy.special import expit
class Face:
def __init__(self):
self.conn = psycopg2.connect("dbname=" + c.config["Db"]["dbname"] + " host=" + c.config["Db"]["Host"] + " port=" + c.config["Db"]["Port"])
self.cur = self.conn.cursor()
def get_feature_repo(self):
self.cur.execute("SELECT id,name,feature,face_img FROM " + c.config["Db"]["table"] + " WHERE status=0")
self.repo = self.cur.fetchall()
def match(self, na, score=0.85):
# Cache all database face features
self.get_feature_repo()
uid = 0
tmp = 0
most_face = ''
face_img = ''
for i in self.repo:
distance = np.linalg.norm(na - i[2])
conf = expit((6000 - distance) / 1000)
if conf > tmp and conf >= score:
uid = i[0]
tmp = conf
most_face = i[1]
face_img = i[3]
ret = {
"id": uid,
"name": most_face,
"face_img": face_img,
"attended_at": time.time(),
"score": tmp,
"status": 1
}
print(json.dumps(ret, ensure_ascii=False))
def __del__(self):
self.cur.close()
self.conn.close()
```
|
{
"source": "jennyb2911/infrastructure-puppet",
"score": 2
}
|
#### File: git_self_serve/files/mirrorcron.py
```python
import os, sys, re, urllib, json, subprocess
import time
import urllib.request
import smtplib
from email.mime.text import MIMEText
# Function for fetching JSON via HTTPS
def getJSON(url, creds = None, cookie = None):
headers = {}
if creds and len(creds) > 0:
xcreds = creds.encode(encoding='ascii', errors='replace')
auth = base64.encodebytes(xcreds).decode('ascii', errors='replace').replace("\n", '')
headers = {"Content-type": "application/json",
"Accept": "*/*",
"Authorization": "Basic %s" % auth
}
request = urllib.request.Request(url, headers = headers)
result = urllib.request.urlopen(request)
return json.loads(result.read().decode('utf-8', errors = 'replace'))
# Get the current queue
js = getJSON("https://reporeq.apache.org/queue.json")
created = 0
# If queue is valid:
if js:
print("analysing %u items" % len(js))
# For each item:
# - Check that it hasn't been mirrored yet
# - Check that a repo with this name doesn't exist already
# - Check that name is valid
# - Mirror repo if all is okay
for item in js:
if not 'mirrored' in item and item['mirror'] == True and 'created' in item:
reponame = item['name']
# Check valid name
if len(reponame) < 5 or reponame.find("..") != -1 or reponame.find("/") != -1:
print("Invalid repo name!")
continue
# Set some vars
notify = item['notify']
description = item['description'] if 'description' in item else "Unknown"
# If repo doesn't already exist, create it
if not os.path.exists("/x1/git/mirrors/%s" % reponame):
print("%s is a new repo, creating it..." % reponame)
try:
inp = subprocess.check_output("/x1/git/bin/create-mirror-from-git.sh %s \"%s\"" % (reponame, description), shell = True).decode('ascii', 'replace')
except subprocess.CalledProcessError as err:
print("Borked: %s" % err.output)
continue
else:
print("Repo already exists, ignoring this request...sort of")
# Notify reporeq that we've created this repository!
print("Notifying https://reporeq.apache.org/ss.lua?mirrored=%s" % reponame)
request = urllib.request.Request("https://reporeq.apache.org/ss.lua?mirrored=%s" % reponame)
result = urllib.request.urlopen(request)
# Inform infra@ and private@$pmc that the mirror has been set up
msg = MIMEText("New repository %s was mirrored to git.a.o (and thus GitHub), as requested by %s.\nNew mirrors are available on GitHub no more than 24 hours later.\n\nWith regards,\nApache Infrastructure." % (reponame, item['requester']))
msg['Subject'] = 'New git mirror created: %s' % reponame
msg['From'] = "<EMAIL>"
msg['Reply-To'] = "<EMAIL>"
msg['To'] = "<EMAIL>, <EMAIL>" % item['pmc']
s = smtplib.SMTP(host='mail.apache.org', port=2025)
s.send_message(msg)
s.quit()
# We made a thing!
created += 1
print("All done for today! Made %u new repos" % created)
```
|
{
"source": "jennybae1024/DFGN-pytorch",
"score": 2
}
|
#### File: DFGN/tools/data_helper.py
```python
from os.path import join
import gzip
import pickle
import json
from tqdm import tqdm
from tools.data_iterator_pack import DataIteratorPack
class DataHelper:
def __init__(self, gz=True, config=None):
self.DataIterator = DataIteratorPack
self.gz = gz
self.suffix = '.pkl.gz' if gz else '.pkl'
self.data_dir = 'data'
self.subset_file = join(self.data_dir, 'subset.json')
self.__train_features__ = None
self.__dev_features__ = None
self.__train_examples__ = None
self.__dev_examples__ = None
self.__train_graphs__ = None
self.__dev_graphs__ = None
self.__train_example_dict__ = None
self.__dev_example_dict__ = None
self.config = config
@property
def sent_limit(self):
return 25
@property
def entity_limit(self):
return 80
@property
def n_type(self):
return 2
def get_feature_file(self, tag):
return join(self.data_dir, tag + '_feature' + self.suffix)
def get_example_file(self, tag):
return join(self.data_dir, tag + '_example' + self.suffix)
def get_graph_file(self, tag):
return join(self.data_dir, tag + '_graph' + self.suffix)
@property
def train_feature_file(self):
return self.get_feature_file('train')
@property
def dev_feature_file(self):
return self.get_feature_file('dev')
@property
def train_example_file(self):
return self.get_example_file('train')
@property
def dev_example_file(self):
return self.get_example_file('dev')
@property
def train_graph_file(self):
return self.get_graph_file('train')
@property
def dev_graph_file(self):
return self.get_graph_file('dev')
@staticmethod
def compress_pickle(pickle_file_name):
def abbr(obj):
obj_str = str(obj)
if len(obj_str) > 100:
return obj_str[:20] + ' ... ' + obj_str[-20:]
else:
return obj_str
def get_obj_dict(pickle_obj):
if isinstance(pickle_obj, list):
obj = pickle_obj[0]
elif isinstance(pickle_obj, dict):
obj = list(pickle_obj.values())[0]
else:
obj = pickle_obj
if isinstance(obj, dict):
return obj
else:
return obj.__dict__
pickle_obj = pickle.load(open(pickle_file_name, 'rb'))
for k, v in get_obj_dict(pickle_obj).items():
print(k, abbr(v))
with gzip.open(pickle_file_name + '.gz', 'wb') as fout:
pickle.dump(pickle_obj, fout)
pickle_obj = pickle.load(gzip.open(pickle_file_name + '.gz', 'rb'))
for k, v in get_obj_dict(pickle_obj).items():
print(k, abbr(v))
def __load__(self, file):
if file.endswith('json'):
return json.load(open(file, 'r'))
with self.get_pickle_file(file) as fin:
print('loading', file)
return pickle.load(fin)
def get_pickle_file(self, file_name):
if self.gz:
return gzip.open(file_name, 'rb')
else:
return open(file_name, 'rb')
def __get_or_load__(self, name, file):
if getattr(self, name) is None:
with self.get_pickle_file(file) as fin:
print('loading', file)
setattr(self, name, pickle.load(fin))
return getattr(self, name)
# Features
@property
def train_features(self):
return self.__get_or_load__('__train_features__', self.train_feature_file)
@property
def dev_features(self):
return self.__get_or_load__('__dev_features__', self.dev_feature_file)
# Examples
@property
def train_examples(self):
return self.__get_or_load__('__train_examples__', self.train_example_file)
@property
def dev_examples(self):
return self.__get_or_load__('__dev_examples__', self.dev_example_file)
# Graphs
@property
def train_graphs(self):
return self.__get_or_load__('__train_graphs__', self.train_graph_file)
@property
def dev_graphs(self):
return self.__get_or_load__('__dev_graphs__', self.dev_graph_file)
# Example dict
@property
def train_example_dict(self):
if self.__train_example_dict__ is None:
self.__train_example_dict__ = {e.qas_id: e for e in self.train_examples}
return self.__train_example_dict__
@property
def dev_example_dict(self):
if self.__dev_example_dict__ is None:
self.__dev_example_dict__ = {e.qas_id: e for e in self.dev_examples}
return self.__dev_example_dict__
# Feature dict
@property
def train_feature_dict(self):
return {e.qas_id: e for e in self.train_features}
@property
def dev_feature_dict(self):
return {e.qas_id: e for e in self.dev_features}
# Load
def load_dev(self):
return self.dev_features, self.dev_example_dict, self.dev_graphs
def load_train(self):
return self.train_features, self.train_example_dict, self.train_graphs
def load_train_subset(self, subset):
assert subset is not None
keylist = set(json.load(open(self.subset_file, 'r'))[subset])
train_examples = [e for e in tqdm(self.train_examples, desc='sub_ex') if e.qas_id in keylist]
train_example_dict = {e.qas_id: e for e in train_examples}
train_features = [f for f in tqdm(self.train_features, desc='sub_fe') if f.qas_id in keylist]
train_graphs = {k: self.train_graphs[k] for k in tqdm(keylist, desc='sub_graph')}
print('subset: {}, total: {}'.format(subset, len(train_graphs)))
return train_features, train_example_dict, train_graphs
@property
def dev_loader(self):
return self.DataIterator(*self.load_dev(),
bsz=self.config.batch_size,
device='cuda:{}'.format(self.config.model_gpu),
sent_limit=self.sent_limit,
entity_limit=self.entity_limit,
sequential=True,
n_layers=self.config.n_layers)
@property
def train_loader(self):
return self.DataIterator(*self.load_train(),
bsz=self.config.batch_size,
device='cuda:{}'.format(self.config.model_gpu),
sent_limit=self.sent_limit,
entity_limit=self.entity_limit,
sequential=False,
n_layers=self.config.n_layers)
@property
def train_sub_loader(self):
return self.DataIterator(*self.load_train_subset('qat'),
bsz=self.config.batch_size,
device='cuda:{}'.format(self.config.model_gpu),
sent_limit=self.sent_limit,
entity_limit=self.entity_limit,
sequential=False,
n_layers=self.config.n_layers)
```
#### File: paragraph_selection/Feature_extraction/text_to_tok.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import logging
import json
import math
import os
import random
import pickle
from tqdm import tqdm, trange
from os.path import join
from collections import Counter
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from pytorch_pretrained_bert.tokenization import whitespace_tokenize, BasicTokenizer, BertTokenizer
from pytorch_pretrained_bert.modeling import BertModel
from pytorch_pretrained_bert.optimization import BertAdam
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
class Example(object):
def __init__(self,
qas_id,
qas_type,
doc_tokens,
question_text,
sent_num,
sent_names,
sup_fact_id,
sent_start_end_position,
entity_start_end_position,
orig_answer_text=None,
start_position=None,
end_position=None):
self.qas_id = qas_id
self.qas_type = qas_type
self.doc_tokens = doc_tokens
self.question_text = question_text
self.sent_num = sent_num
self.sent_names = sent_names
self.sup_fact_id = sup_fact_id
self.sent_start_end_position = sent_start_end_position
self.entity_start_end_position = entity_start_end_position
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
qas_id,
doc_tokens,
doc_input_ids,
doc_input_mask,
doc_segment_ids,
query_tokens,
query_input_ids,
query_input_mask,
query_segment_ids,
sent_spans,
entity_spans,
sup_fact_ids,
token_to_orig_map,
start_position=None,
end_position=None):
self.qas_id = qas_id
self.doc_tokens = doc_tokens
self.doc_input_ids = doc_input_ids
self.doc_input_mask = doc_input_mask
self.doc_segment_ids = doc_segment_ids
self.query_tokens = query_tokens
self.query_input_ids = query_input_ids
self.query_input_mask = query_input_mask
self.query_segment_ids = query_segment_ids
self.sent_spans = sent_spans
self.entity_spans = entity_spans
self.sup_fact_ids = sup_fact_ids
self.token_to_orig_map = token_to_orig_map
self.start_position = start_position
self.end_position = end_position
def clean_entity(entity):
Type = entity[1]
Text = entity[0]
if Type == "DATE" and ',' in Text:
Text = Text.replace(' ,', ',')
if '?' in Text:
Text = Text.split('?')[0]
Text = Text.replace("\'\'", "\"")
Text = Text.replace("# ", "#")
return Text
def check_in_full_paras(answer, paras):
full_doc = ""
for p in paras:
full_doc += " ".join(p[1])
return answer in full_doc
def read_hotpot_examples(para_file, full_file, entity_file):
with open(para_file, 'r', encoding='utf-8') as reader:
para_data = json.load(reader)
with open(full_file, 'r', encoding='utf-8') as reader:
full_data = json.load(reader)
with open(entity_file, 'r', encoding='utf-8') as reader:
entity_data = json.load(reader)
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
# for debug
actually_in_case = 0
failed_case = 0
failed_sup_case = 0
failed_case_paranum = []
para_length = []
bert_cutted_para_length = []
for case in tqdm(full_data):
key = case['_id']
qas_type = case['type']
sup_facts = set([(sp[0], sp[1])for sp in case['supporting_facts']])
orig_answer_text = case['answer']
sent_id = 0
doc_tokens = []
sent_names = []
sup_facts_sent_id = []
sent_start_end_position = []
entity_start_end_position = []
JUDGE_FLAG = orig_answer_text == 'yes' or orig_answer_text == 'no'
FIND_FLAG = False
char_to_word_offset = [] # Accumulated along all sentences
prev_is_whitespace = True
ans_start_position = None
ans_end_position = None
# for debug
titles = set()
for paragraph in para_data[key]:
title = paragraph[0]
sents = paragraph[1]
if title in entity_data[key]:
entities = entity_data[key][title]
else:
entities = []
titles.add(title)
for local_sent_id, sent in enumerate(sents):
# Determine the global sent id for supporting facts
local_sent_name = (title, local_sent_id)
sent_names.append(local_sent_name)
if local_sent_name in sup_facts:
sup_facts_sent_id.append(sent_id)
sent_id += 1
sent += " "
sent_start_word_id = len(doc_tokens)
sent_start_char_id = len(char_to_word_offset)
for c in sent:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
sent_end_word_id = len(doc_tokens) - 1
sent_start_end_position.append((sent_start_word_id, sent_end_word_id))
# Answer char position
answer_offset = sent.find(orig_answer_text)
if not JUDGE_FLAG and not FIND_FLAG and answer_offset != -1:
FIND_FLAG = True
start_char_position = sent_start_char_id + answer_offset
end_char_position = start_char_position + len(orig_answer_text) - 1
ans_start_position = char_to_word_offset[start_char_position]
ans_end_position = char_to_word_offset[end_char_position]
# Find Entity Position
entity_pointer = 0
for entity in entities:
entity_text = clean_entity(entity)
entity_offset = sent.find(entity_text)
if entity_offset != -1:
entity_pointer += 1
start_char_position = sent_start_char_id + entity_offset
end_char_position = start_char_position + len(entity_text) - 1
ent_start_position = char_to_word_offset[start_char_position]
ent_end_position = char_to_word_offset[end_char_position]
entity_start_end_position.append((ent_start_position, ent_end_position, entity_text))
else:
break
entities = entities[entity_pointer:]
# Truncate longer document
if len(doc_tokens) > 382:
break
para_length.append(len(doc_tokens))
# for ent in entity_start_end_position:
# print(ent)
# print(" ".join(doc_tokens[ent[0]:ent[1]+1]))
# input()
example = Example(
qas_id=key,
qas_type=qas_type,
doc_tokens=doc_tokens,
question_text=case['question'],
sent_num=sent_id + 1,
sent_names=sent_names,
sup_fact_id=sup_facts_sent_id,
sent_start_end_position=sent_start_end_position,
entity_start_end_position=entity_start_end_position,
orig_answer_text=orig_answer_text,
start_position=ans_start_position,
end_position=ans_end_position)
examples.append(example)
return examples
# # Check Finding Answer position
# if not JUDGE_FLAG:
# if end_position is None:
# failed_case += 1
# failed_case_paranum.append(len(para_data[key]))
# print(orig_answer_text)
# print(key)
# print("({}, {})".format(start_position, end_position))
# actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
# cleaned_answer_text = " ".join(whitespace_tokenize(orig_answer_text))
# if actual_text.find(cleaned_answer_text) == -1:
# print("Could not find answer:")
# print("ACTUAL: ", actual_text)
# print("CLEANED: ", cleaned_answer_text)
# print(doc_tokens)
# input()
#
# # Check finding supporting facts
# if len(sup_facts) != len(sup_facts_sent_id):
# failed_sup_case += 1
# print("gets:", titles)
# print("facts:", set([sp[0] for sp in sup_facts]))
# print("facts id:", sup_facts_sent_id)
#
# print("no answer: ", failed_case)
# print("lack sup fact: ", failed_sup_case)
# print("total cases: ", len(full_data))
# print("noanswer para num: ", Counter(failed_case_paranum))
# print("Avg bert len: ", np.average(np.array(bert_cutted_para_length)))
# print("Max bert len: ", np.max(np.array(bert_cutted_para_length)))
# np.array(bert_cutted_para_length).dump("bert_doc_len_aug.np")
def convert_examples_to_features(examples, tokenizer, max_seq_length, max_query_length):
features = []
failed = 0
ans_failed = 0
for (example_index, example) in enumerate(tqdm(examples)):
query_tokens = ["[CLS]"] + tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length - 1:
query_tokens = query_tokens[:max_query_length - 1]
query_tokens.append("[SEP]")
tok_to_orig_index = [0]
orig_to_tok_index = []
orig_to_tok_back_index = []
all_doc_tokens = ["[CLS]"]
sentence_spans = []
entity_spans = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
orig_to_tok_back_index.append(len(all_doc_tokens) - 1)
def relocate_tok_span(orig_start_position, orig_end_position, orig_text):
if orig_start_position is None:
return 0, 0
global tokenizer
nonlocal orig_to_tok_index, example, all_doc_tokens
tok_start_position = orig_to_tok_index[orig_start_position]
if orig_end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[orig_end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
# Make answer span more accurate.
return _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer, orig_text)
ans_start_position, ans_end_position \
= relocate_tok_span(example.start_position, example.end_position, example.orig_answer_text)
for entity_span in example.entity_start_end_position:
ent_start_position, ent_end_position \
= relocate_tok_span(entity_span[0], entity_span[1], entity_span[2])
entity_spans.append((ent_start_position, ent_end_position, entity_span[2]))
for sent_span in example.sent_start_end_position:
if sent_span[0] >= len(orig_to_tok_index) or sent_span[0] >= sent_span[1]:
continue
sent_start_position = orig_to_tok_index[sent_span[0]]
sent_end_position = orig_to_tok_back_index[sent_span[1]]
sentence_spans.append((sent_start_position, sent_end_position))
# Padding Document
all_doc_tokens = all_doc_tokens[:max_seq_length - 1] + ["[SEP]"]
doc_input_ids = tokenizer.convert_tokens_to_ids(all_doc_tokens)
doc_input_mask = [1] * len(doc_input_ids)
doc_segment_ids = [0] * len(doc_input_ids)
while len(doc_input_ids) < max_seq_length:
doc_input_ids.append(0)
doc_input_mask.append(0)
doc_segment_ids.append(0)
# Padding Question
query_input_ids = tokenizer.convert_tokens_to_ids(query_tokens)
query_input_mask = [1] * len(query_input_ids)
query_segment_ids = [0] * len(query_input_ids)
while len(query_input_ids) < max_query_length:
query_input_ids.append(0)
query_input_mask.append(0)
query_segment_ids.append(0)
assert len(doc_input_ids) == max_seq_length
assert len(doc_input_mask) == max_seq_length
assert len(doc_segment_ids) == max_seq_length
assert len(query_input_ids) == max_query_length
assert len(query_input_mask) == max_query_length
assert len(query_segment_ids) == max_query_length
# Dropout out-of-bound span
entity_spans = entity_spans[:_largest_valid_index(entity_spans, max_seq_length)]
sentence_spans = sentence_spans[:_largest_valid_index(sentence_spans, max_seq_length)]
sup_fact_ids = example.sup_fact_id
sent_num = len(sentence_spans)
sup_fact_ids = [sent_id for sent_id in sup_fact_ids if sent_id < sent_num]
if len(sup_fact_ids) != len(example.sup_fact_id):
failed += 1
if ans_start_position >= 512:
ans_failed += 1
# print("ALL:\n", all_doc_tokens)
# print("MASK:\n", doc_input_mask)
# print("ANS:\n", all_doc_tokens[ans_start_position: ans_end_position + 1])
# print("SP_FACTS: \n", sup_fact_ids)
# print("ANSWER:\n", example.orig_answer_text)
# print("ANSWER:\n", all_doc_tokens[ans_start_position: ans_end_position + 1])
# for i, sent in enumerate(sentence_spans):
# print("sent{}:\n".format(i), all_doc_tokens[sent[0]: sent[1] + 1])
# os, oe = tok_to_orig_index[sent[0]], tok_to_orig_index[sent[1]]
# print("ORI_SENT:\n", example.doc_tokens[os: oe + 1])
# input()
# for ent in entity_spans:
# if ent[0] >= sent[0] and ent[1] <= sent[1]:
# print("ORI: ", ent[2])
# print("NEW: ", all_doc_tokens[ent[0] : ent[1] + 1])
# input()
features.append(
InputFeatures(qas_id=example.qas_id,
doc_tokens=all_doc_tokens,
doc_input_ids=doc_input_ids,
doc_input_mask=doc_input_mask,
doc_segment_ids=doc_segment_ids,
query_tokens=query_tokens,
query_input_ids=query_input_ids,
query_input_mask=query_input_mask,
query_segment_ids=query_segment_ids,
sent_spans=sentence_spans,
entity_spans=entity_spans,
sup_fact_ids=sup_fact_ids,
token_to_orig_map=tok_to_orig_index,
start_position=ans_start_position,
end_position=ans_end_position)
)
# print("Failed: ", failed)
# print("Ans_Failed: ", ans_failed)
return features
def _largest_valid_index(spans, limit):
for idx in range(len(spans)):
if spans[idx][1] >= limit:
return idx
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return new_start, new_end
return input_start, input_end
if __name__ == '__main__':
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--bert_model", default='bert-base-cased', type=str)
## Other parameters
parser.add_argument("--do_lower_case", default=False, action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--layers", default="-1,-2,-3", type=str)
parser.add_argument("--max_seq_length", default=512, type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences longer "
"than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--batch_size", default=15, type=int, help="Batch size for predictions.")
args = parser.parse_args()
data_path = '/media/disk2/jennybae/HotpotQA/'
dev_para_path = join(data_path, 'Selected_Paras', 'dev_paras.json')
dev_full_path = join(data_path, 'hotpot_dev_distractor_v1.json')
dev_entity_path = join(data_path, 'Selected_Paras', 'dev_entity.json')
# examples = read_hotpot_examples(para_file=dev_para_path, full_file=dev_full_path, entity_file=dev_entity_path)
# examples = pickle.load(open(join(data_path, 'BERT_Features', 'examples', 'dev_example.pkl'), 'rb'))
# pickle.dump(examples, open(join(data_path, 'BERT_Features', 'examples', 'dev_example.pkl'), 'wb'))
# features = convert_examples_to_features(examples, tokenizer, max_seq_length=512, max_query_length=50)
# pickle.dump(features, open(join(data_path, 'BERT_Features', 'features', 'dev_feature.pkl'), 'wb'))
features = pickle.load(open(join(data_path, 'BERT_Features', 'features', 'dev_feature.pkl'), 'rb'))
model = BertModel.from_pretrained(args.bert_model)
model.cuda()
model = torch.nn.DataParallel(model)
model.eval()
doc_input_ids = torch.LongTensor([f.doc_input_ids for f in features])
doc_input_mask = torch.LongTensor([f.doc_input_mask for f in features])
query_input_ids = torch.LongTensor([f.query_input_ids for f in features])
query_input_mask = torch.LongTensor([f.query_input_mask for f in features])
all_example_indices = torch.arange(doc_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(doc_input_ids, doc_input_mask, query_input_ids, query_input_mask, all_example_indices)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size)
layer_indices = [int(x) for x in args.layers.split(",")]
output_json = dict()
out_feature_dir = join(data_path, 'BERT_Features', 'layers')
file_id = 0
for doc_ids, doc_mask, query_ids, query_mask, example_indices in tqdm(eval_dataloader):
doc_ids = doc_ids.cuda()
doc_mask = doc_mask.cuda()
all_doc_encoder_layers, _ = model(doc_ids, token_type_ids=None, attention_mask=doc_mask)
all_query_encoder_layers, _ = model(query_ids, token_type_ids=None, attention_mask=query_mask)
selected_doc_layers = [all_doc_encoder_layers[layer_index].detach().cpu().numpy() for layer_index in layer_indices]
selected_query_layers = [all_query_encoder_layers[layer_index].detach().cpu().numpy() for layer_index in layer_indices]
for b, example_index in enumerate(example_indices):
feature = features[example_index.item()]
case = dict()
case['query'] = [layer[b] for layer in selected_query_layers]
case['doc'] = [layer[b] for layer in selected_doc_layers]
output_json[feature.qas_id] = case
output_file = join(out_feature_dir, "dev_layers.pkl")
pickle.dump(output_json, open(output_file, 'wb'))
output_json = dict()
file_id += 1
```
#### File: paragraph_selection/pytorch_pretrained_bert/tokenization.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import os
import logging
from .file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'bert-base-uncased': "/media/disk2/jennybae/bert-base-uncased-vocab.txt",
'bert-base-cased': "/media/disk2/jennybae/bert-base-cased-vocab.txt",
# 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
# 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
}
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
}
VOCAB_NAME = 'vocab.txt'
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a peice of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, do_lower_case=True, max_len=None):
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
self.max_len = max_len if max_len is not None else int(1e12)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
raise ValueError(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
)
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
@classmethod
def from_pretrained(cls, pretrained_model_name, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name]
else:
vocab_file = pretrained_model_name
if os.path.isdir(vocab_file):
vocab_file = os.path.join(vocab_file, VOCAB_NAME)
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except FileNotFoundError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
if pretrained_model_name in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
return tokenizer
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
```
|
{
"source": "jennybrown8/airflow-dbt",
"score": 2
}
|
#### File: tests/operators/test_dbt_operator.py
```python
import datetime
from unittest import TestCase, mock
from airflow import DAG, configuration
from airflow_dbt.hooks.dbt_hook import DbtCliHook
from airflow_dbt.operators.dbt_operator import (
DbtSeedOperator,
DbtSnapshotOperator,
DbtRunOperator,
DbtTestOperator
)
class TestDbtOperator(TestCase):
def setUp(self):
configuration.conf.load_test_config()
args = {
'owner': 'airflow',
'start_date': datetime.datetime(2020, 2, 27)
}
self.dag = DAG('test_dag_id', default_args=args)
@mock.patch.object(DbtCliHook, 'run_cli')
def test_dbt_run(self, mock_run_cli):
operator = DbtRunOperator(
task_id='run',
dag=self.dag
)
operator.execute(None)
mock_run_cli.assert_called_once_with('run')
@mock.patch.object(DbtCliHook, 'run_cli')
def test_dbt_test(self, mock_run_cli):
operator = DbtTestOperator(
task_id='test',
dag=self.dag
)
operator.execute(None)
mock_run_cli.assert_called_once_with('test')
@mock.patch.object(DbtCliHook, 'run_cli')
def test_dbt_snapshot(self, mock_run_cli):
operator = DbtSnapshotOperator(
task_id='snapshot',
dag=self.dag
)
operator.execute(None)
mock_run_cli.assert_called_once_with('snapshot')
@mock.patch.object(DbtCliHook, 'run_cli')
def test_dbt_seed(self, mock_run_cli):
operator = DbtSeedOperator(
task_id='seed',
dag=self.dag
)
operator.execute(None)
mock_run_cli.assert_called_once_with('seed')
```
|
{
"source": "jennycao/crosswalk-test-suite",
"score": 2
}
|
#### File: res/GoogleMapsPlugin/google-maps-plugin.py
```python
import os
import commands
import sys
import shutil
import time
import glob
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TMP = "/tmp"
def buildHelloMap(key):
if not os.path.exists(BUILD_TMP):
os.mkdir(BUILD_TMP)
os.chdir(BUILD_TMP)
build_src = BUILD_TMP +"/HelloMap"
if os.path.exists(build_src):
shutil.rmtree(build_src)
os.system('cordova create HelloMap com.example.hellomap HelloMap')
os.chdir(build_src)
os.system('cordova platform add android')
os.system('cordova plugin add %s/../../tools/cordova-plugin-crosswalk-webview' % SCRIPT_DIR)
os.system('cordova plugin add cordova-plugin-googlemaps --variable API_KEY_FOR_ANDROID="%s"' % key)
shutil.copyfile(SCRIPT_DIR + '/index.html', build_src + '/www/index.html')
# Update android:theme="@android:style/Theme.Black.NoTitleBar" to android:theme="@android:style/Theme.Translucent.NoTitleBar" in AndroidManifest.xml
os.system('sed -i "s/%s/%s/g" %s' % ("@android:style\/Theme.Black.NoTitleBar", "@android:style\/Theme.Translucent.NoTitleBar", build_src + "/platforms/android/AndroidManifest.xml"))
# Set zOrderOnTop in config.xml
lines = open(build_src + '/config.xml', 'r').readlines()
lines.insert(-1, ' <preference name="xwalkZOrderOnTop" value="true" />\n')
f = open(build_src + '/config.xml', 'w')
f.writelines(lines)
f.close()
# Workaround for zOrderOnTop
googlemapjava = build_src + "/platforms/android/src/plugin/google/maps/GoogleMaps.java"
if os.path.exists(googlemapjava):
file = open(googlemapjava, 'r')
lines = open(googlemapjava, 'r').readlines()
# Add new code postion flag
import_pos = 0
showdialog_pos = 0
resizemap_pos = len(lines)
insert1_pos = 0
insert2_pos = 0
for (num, value) in enumerate(file):
if value.find("import com.google.android.gms.maps.model.VisibleRegion;") != -1:
import_pos = num
elif value.find("private void showDialog") != -1:
showdialog_pos = num
elif value.find("private void resizeMap") != -1:
resizemap_pos = num
# Workaroundorkaround code should be added to the behind of GoogleMaps.this.onMapEvent("map_close") in showDialog()
elif value.find("GoogleMaps.this.onMapEvent(\"map_close\");") != -1 and num > showdialog_pos and num < resizemap_pos:
insert1_pos = num
# Workaround code should be added to the behind of callbackContext.success(); in showDialog()
elif value.find("callbackContext.success();") != -1 and num > showdialog_pos and num < resizemap_pos:
insert2_pos = num
# Add workaround code by desc
lines.insert(insert2_pos + 1, "\n XWalkCordovaView view = (XWalkCordovaView) webView.getView();\n")
lines.insert(insert2_pos + 2, " view.setZOrderOnTop(false);\n")
lines.insert(insert1_pos + 1, "\n XWalkCordovaView view = (XWalkCordovaView) webView.getView();\n")
lines.insert(insert1_pos + 2, " view.setZOrderOnTop(true);\n")
lines.insert(import_pos + 1, "import org.crosswalk.engine.XWalkCordovaView;\n")
file = open(googlemapjava, 'w')
file.writelines(lines)
file.close()
os.system('cordova build android')
time.sleep(5)
files = glob.glob(os.path.join(build_src + "/platforms/android/build/outputs/apk", "*-debug.apk"))
if len(files) == 0:
print("No apk build in %s/platforms/android/build/outputs/apk" % build_src)
return
for apk in files:
shutil.copy2(apk, SCRIPT_DIR)
def main():
try:
usage = "Usage: ./google-maps-plugin.py -k <key>"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-k",
"--key",
dest="key",
help="Google Maps API key")
global BUILD_PARAMETERS
(BUILD_PARAMETERS, args) = opts_parser.parse_args()
if not BUILD_PARAMETERS.key:
print("Google Maps API key is missing.")
sys.exit(1)
buildHelloMap(BUILD_PARAMETERS.key)
except Exception as e:
print "Got wrong options: %s, exit ..." % e
sys.exit(1)
if __name__ == '__main__':
main()
```
|
{
"source": "jennyccc99/meiduo_project",
"score": 3
}
|
#### File: apps/users/utils.py
```python
from django.contrib.auth.backends import ModelBackend
import re
from users.models import User
def get_user_by_account(account):
"""
通过账号(用户名/手机)获取用户信息
:param account: 用户名或者手机号
:return: user
"""
try:
if re.match(r'^1[3-9]\d{9}$', account):
user = User.objects.get(mobile=account)
else:
user = User.objects.get(username=account)
except User.DoesNotExist:
return None
else:
return user
class UsernameMobileBackend(ModelBackend):
def authenticate(self, request, username=None, password=<PASSWORD>, **kwargs):
"""
重写认证方法,实现多账号登录
:param request: 请求对象
:param username: 用户名 or mobile #
:param password: 密码
:param kwargs: 其他参数
:return: user
"""
# search for user
user = get_user_by_account(username)
# verify if user exist and password matches
if user and user.check_password(password):
return user
else:
return None
```
|
{
"source": "JennyCCDD/epidemix",
"score": 3
}
|
#### File: epidemix/epidemix/vaccination.py
```python
import copy
import numpy as np
import networkx as nx
from math import ceil
from itertools import combinations
from cdlib import algorithms as algo
from epidemic import EpiModel
from utils.partition import *
########################################################################
class VacciModel(EpiModel):
"""docstring for VacciModel"""
def __init__(self, G, eq, num_state, params,
data_dir=None, vacci_strategy=None,
state_colors=['blue', 'red', 'green', 'orange']):
super(VacciModel, self).__init__(
G, eq, num_state, params, data_dir, state_colors)
if vacci_strategy == 'random':
self.vacci_func = self.random_vaccination
elif vacci_strategy == 'target':
self.vacci_func = self.degree_based_target
elif vacci_strategy == 'acquaintance':
self.vacci_func = self.acquaintance
elif vacci_strategy == 'hhi':
self.vacci_func = self.hhi
# initialize a certain state to probability = 1.
def initialize_nodes(self, idx_nodes, state=0):
# Listing the total states included in the epidemic model and ...
# ... loop through each state.
for s in np.arange(self.num_state):
if s == state:
self.model.initial[s * self.N + idx_nodes] = 1
else:
self.model.initial[s * self.N + idx_nodes] = 0
def community_vaccination(self, A, param=[8, 0.5]):
# Use an algo to detect communities.
communities = algo.louvain(nx.Graph(A)).communities
# Get the nodes with highest "out_community" links.
opt1 = target_nodes(communities, param[0], out_community=True)[0]
opt2 = self.select_nodes(A, state=0, nei_thres=0)
opt = np.intersect1d(opt1, opt2)
idx = np.random.choice(opt, ceil(len(opt) * param[1]), replace=False)
A[idx, :] = 0
A[:, idx] = 0
self.initialize_nodes(idx, state=0)
self.vacci_num = len(idx)
return A
def random_vaccination(self, A, ratio):
# opt = self.select_nodes(A, state=0, nei_thres=0)
opt = self.select_neighbors(A, state=1, nei_state=0, nei_thres=0)
# Randomly pick up certain selected nodes.
idx = np.random.choice(opt, ceil(len(opt) * ratio), replace=False)
A[idx, :] = 0
A[:, idx] = 0
# When the "s" nodes are separated, set the prob of "s" to 1 again.
self.initialize_nodes(idx, state=0)
self.vacci_num = len(idx)
return A
def degree_based_target(self, A, param): # param = (kc, ratio)
opt = self.select_nodes(A, state=0, nei_thres=param[0])
# opt = self.select_neighbors(A, state=1, nei_state=0, nei_thres=param[0])
idx = np.random.choice(opt, ceil(len(opt) * param[1]), replace=False)
A[idx, :] = 0
A[:, idx] = 0
# When the "s" nodes are separated, set the prob of "s" to 1 again.
self.initialize_nodes(idx, state=0)
self.vacci_num = len(idx)
return A
def acquaintance(self, A, ratio):
opt = self.select_nodes(A, state=0, nei_thres=0)
# opt = self.select_neighbors(A, state=1, nei_state=0, nei_thres=0)
# Randomly pick up certain selected nodes.
idx = np.random.choice(opt, ceil(len(opt) * ratio), replace=False)
# Pick up oonly those nodes with neighbors.
neighbors = np.sum(A[idx], axis=0) > 0
A[neighbors, :] = 0
A[:, neighbors] = 0
# When the "s" nodes are separated, set the prob of "s" to 1 again.
self.initialize_nodes(idx, state=0)
self.vacci_num = np.sum(neighbors)
return A
def hhi(self, A, ratio):
opt = self.select_nodes(A, state=0, nei_thres=0)
# opt = self.select_neighbors(A, state=1, nei_state=0, nei_thres=0)
# Determine all possible combination of the selected nodes.
subset = np.array(list(combinations(opt, ceil(len(opt) * ratio))))
# Create an array to save the HHI value.
H_list = np.zeros(len(subset))
for i, idx_nodes in enumerate(subset):
# Try each combination by changing its A matrix.
A_temp = copy.deepcopy(A)
A_temp[idx_nodes.astype(np.int), :] = 0
A_temp[:, idx_nodes.astype(np.int)] = 0
# Get the corresponding graph using the A matrix.
G_temp = self.set_graph(self.state_list,
self.color_list,
G=nx.Graph(A_temp))
# Find the total nodes of each subgraphs.
subgraphs = list(nx.connected_components(G_temp))
# Count how many nodes there are for each subgraph.
node_num_list = np.array([len(z) for z in subgraphs])
# Calculate HHI value using the following formula.
H = 1 - (np.sum(node_num_list) ** 2) / (len(opt) ** 2)
# Save the value to the array.
H_list[i] = H
# Find the one combination with minimum HHI value.
idx = subset[np.argmin(H_list)].astype(np.int)
# Change the A matrix according to the selected nodes.
A[idx, :] = 0
A[:, idx] = 0
# When the "s" nodes are separated, set the prob of "s" to 1 again.
self.initialize_nodes(idx, state=0)
self.vacci_num = len(idx)
return A
########################################################################
if __name__ == '__main__':
from utils.plot import draw_probs
from equations import SI, SIS, SIR, SIRV
G = nx.watts_strogatz_graph(40, 5, 0.4)
days = np.arange(0, 10, 0.1)
colors = ['blue', 'red', 'green', 'orange']
epi = VacciModel(G, SIR, num_state=3, params=[4, 2, 0.3, 0.1],
vacci_strategy='random', state_colors=colors)
probs = epi.simulate(days)
# probs = epi.reconstruct(days, 20, param=0.5)
epi.set_propagate(0, 1, neighbor=1, update_state=False)
epi.set_propagate(1, 2, neighbor=None, update_state=False)
# status, _ = epi.propagation()
status, probs = epi.propagation(reconstruct_index=[2, 4, 7, 20],
reconstruct_param=0.5)
epi.visualize(status, np.arange(16), figsize=(15, 15), n_row=4, n_col=4)
draw_probs(probs, colors)
```
|
{
"source": "jenny-chou/Kaggle-COVID-tweet",
"score": 3
}
|
#### File: Kaggle-COVID-tweet/src/Covid-tweets.py
```python
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.preprocessing import LabelEncoder
import nltk
import json
import requests
import re
import string
class Log:
# Log helper class.
LOG_LEVEL = dict((value, index) for (index, value) in
enumerate(["INFO", "ERROR", "FATEL"]))
LEVEL = LOG_LEVEL["INFO"]
@classmethod
def set_log_level(cls, level="INFO"):
"""Set log level.
Args:
level (str, optional): level to be set. Defaults to "INFO".
"""
cls.LEVEL = cls.LOG_LEVEL[level]
@classmethod
def print_msg(cls, level, msg):
"""Check log level and print permissible logs.
Args:
level (str): level of the message.
msg (str): message to be print.
"""
if cls.LOG_LEVEL[level] >= cls.LEVEL:
print(msg)
class Common:
# Dictionaries and lists
@staticmethod
def lemmatization_dict():
# Dictionary of words for lemmatization.
return {"shop":"store", "supermarket":"market", "paper":"toiletpaper",
"much":"many", "employee":"worker", "staff":"worker",
"global":"world", "company":"business", "consumer":"customer",
"house":"home", "grocery":"goods", "products":"goods",
"toilet":"toiletpaper"}
@staticmethod
def stemming_dict():
# Dictionary of words for stemming.
return {"buying":"buy", "bought":"buy", "working":"work",
"worked":"work", "shopping":"shop", "shopped":"shop",
"shops":"shop", "days":"day", "weeks":"week", "masks":"mask",
"got":"get", "gets":"get", "consumers":"consumer", "gets":"get",
"supermarkets":"supermarket", "says":"say", "saying":"say",
"said":"say", "getting":"get", "companies":"company", "went":"go",
"got":"get", "making":"make", "made":"make", "services":"service",
"hours":"hour", "years":"year", "products":"product", "going":"go",
"increases":"increase", "increased":"increase", "markets":"market",
"close":"closed", "needs":"need", "customers":"customer",
"stores":"store", "businesses":"business", "employees":"employee",
"workers":"worker", "staffs":"staff", "needed":"need",
"hands":"hand", }
@staticmethod
def stopword_list():
# List of stopwords.
nltk.download('stopwords')
stopwords = list(set(nltk.corpus.stopwords.words('english')))
stopwords += ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "l",
"m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w",
"x", "y", "z"]
stopwords += ["in", "on", "at", "via", "due", "could", "would", "may",
"one", "still", "even", "also", "every", "two", "etc",
"per"]
stopwords += ["covid", "corona", "virus"]
return stopwords
@staticmethod
def create_loc_dict():
# Dictionary of locations.
loc_dict = {}
states = json.loads(requests.get("https://raw.githubusercontent.com" \
"/praneshsaminathan/country-state-city/master/"\
"states.json").text)["states"]
countries=json.loads(requests.get("https://raw.githubusercontent.com"\
"/praneshsaminathan/country-state-city/master/"\
"countries.json").text)["countries"]
cities = json.loads(requests.get("https://raw.githubusercontent.com/"\
"praneshsaminathan/country-state-city/master/"\
"cities.json").text)["cities"]
us_states = pd.read_csv("https://worldpopulationreview.com/static/"\
"states/abbr-name.csv",names=['state_code','state'])
# Assign all cities its country code
for city in cities:
state_id = int(re.findall(r'\d+', city["state_id"])[0])
country_id = states[state_id-1]["country_id"]
country_id = int(re.findall(r'\d+', country_id)[0])
country_name = countries[country_id-1]["sortname"]
loc_dict[city["name"].lower()] = country_name.lower()
# Assign all states its country code
for state in states:
country_id = int(re.findall(r'\d+', state["country_id"])[0])
country_name = countries[country_id-1]["sortname"]
loc_dict[state["name"].lower()] = country_name.lower()
# Assign all countries its country code
for country in countries:
loc_dict[country["sortname"].lower()] = country["sortname"].lower()
loc_dict[country["name"].lower()] = country["sortname"].lower()
# Assign "us" to all US locations
for index in us_states.index:
state = us_states.loc[index]
loc_dict[state.state_code.lower()] = "us"
loc_dict[state.state.lower()] = "us"
# Add missing locations
loc_dict["uk"] = "gb"
loc_dict["ny"] = "us"
loc_dict["nyc"] = "us"
loc_dict["la"] = "us"
loc_dict["sf"] = "us"
loc_dict["bc"] = "ca"
return loc_dict
class Data:
# data container
def __init__(self, train, test):
self.train = train
self.test = test
self.encoder = None
self.top_accts, self.top_hashtags = self._popular_tags(train)
self.stemming = Common.stemming_dict()
self.lemmatization = Common.lemmatization_dict()
self.stopwords = Common.stopword_list()
self.loc_dict = Common.create_loc_dict()
def _popular_tags(self, df, num_accts=10, num_hashtags=30):
"""Most mentioned accounts and popular hashtags in training set.
Args:
df (DataFrame): dataframe to be worked on.
num_accts (int): number of most tagged accounts.
num_hashtags (int): number of most popular hashtags.
Returns:
top_accts: list of most tagged accounts.
top_hashtags: list of most popular hashtags.
"""
accts = []
hashtags = []
for tweet in df.index.values:
accts += re.findall(r'@\w+', df.loc[tweet, "OriginalTweet"])
hashtags += re.findall(r'#\w+', df.loc[tweet, "OriginalTweet"])
sort = pd.DataFrame(accts).value_counts(ascending=False)
top_accts = [tag[0] for tag in sort.index[:num_accts]]
sort = pd.DataFrame(hashtags).value_counts(ascending=False)
# Exclude #covid* and #corova* because those hashtags were used
# to collect data, hence they're in most of the tweets.
top_hashtags = [tag[0] for tag in sort.index[:num_hashtags] \
if "covid" not in tag[0].lower() and
"corona" not in tag[0].lower()]
return top_accts, top_hashtags
def _remove_link(self, text):
"""Remove link from input text.
Args:
text (str)
Returns:
(str): text without link starting with http.
"""
return re.sub(r'http\S+', " ", text)
def _remove_accounts(self, text):
"""Remove accounts and only keep most mentioned accounts.
Args:
text (str)
Returns:
text (str): text without account names except most mentioned
accounts.
"""
tags = []
for tag in self.top_accts:
tags += re.findall(tag, text)
text = re.sub(r'@\S+', " ", text)
for tag in tags:
text += " " + tag
return text
def _remove_hashtags(self, text):
"""Remove hashtags and only keep most popular hashtags.
Args:
text (str)
Returns:
text (str): text without hashtags except most popular hashtags.
"""
tags = []
for tag in self.top_hashtags:
tags += re.findall(tag, text)
text = re.sub(r'#\S+', " ", text)
for tag in tags:
text += " " + tag
return text
def _remove_special_char(self, text):
"""Remove special characters and numbers.
Args:
text (str)
Returns:
text (str): text without special characters and numbers.
"""
special_char = string.punctuation + "0123456789" + "\r" + "\n" + "\t"
for ch in special_char:
text = text.replace(ch, " ")
return text
def _remove_non_english(self, text):
"""Remove non-English characters.
Args:
text:
Returns:
text (str): text with all English alphabet.
"""
clean_text = ""
for word in text.split():
clean_text += str(np.where(word.isalpha(), (word + " "), ""))
return clean_text
def _remove_stopwords(self, text):
"""Remove stopwords.
Args:
text (str)
Returns:
(str): text without stopwords.
"""
return " ".join([word for word in text.split()
if word not in self.stopwords])
def _stemming(self, text):
"""Replace words by its stem.
Args:
text (str)
Returns:
(str): text with stem of words.
"""
clean_text = []
for word in text.split():
if word in self.stemming:
word = self.stemming[word]
clean_text.append(word)
return " ".join(clean_text)
def _lemmatization(self, text):
"""Replace words by its lemma.
Args:
text (str)
Returns:
(str): text with lemma of words.
"""
clean_text = []
for word in text.split():
if word in self.lemmatization:
word = self.lemmatization[word]
clean_text.append(word)
return " ".join(clean_text)
def _clean_text(self, text):
"""Process text by removing and replacing uninformative words.
Args:
text (str)
Returns:
text (str): clean text
"""
text = self._remove_link(text)
text = self._remove_accounts(text)
text = self._remove_hashtags(text)
text = self._remove_special_char(text)
text = self._remove_non_english(text)
text = self._remove_stopwords(text)
text = self._stemming(text)
text = self._lemmatization(text)
return text
def _clean_location(self, loc):
"""Process location and replace by the country code it's in
Args:
loc (str): location
Returns:
loc_clean (str): country code
"""
loc_clean = ""
if not pd.isnull(loc):
loc = self._clean_text(loc.lower())
for sub in loc.split():
if sub in self.loc_dict:
loc_clean = self.loc_dict[sub]
break
return loc_clean
def _clean_tweet(self, df):
"""Clean the Location and OriginalTweet
Args:
df (DataFrame): dataframe to be worked on.
"""
clean_tweet = []
for tweet in df.index.values:
# clean location
loc = self._clean_location(df.Location.loc[tweet])
# clean message
msg = self._clean_text(df.loc[tweet, 'OriginalTweet'].lower())
# combine location and tweet to one text
clean_tweet.append(loc + " " + msg)
df.loc[:, 'CleanTweet'] = clean_tweet
def _encode_sentiment(self):
# Encode and transform Sentiment to numerical values
self.encoder = LabelEncoder().fit(self.train.Sentiment)
self.train.Sentiment = self.encoder.transform(self.train.Sentiment)
self.test.Sentiment = self.encoder.transform(self.test.Sentiment)
def preprocessing(self):
# Preprocess data
self._clean_tweet(self.train)
self._clean_tweet(self.test)
self._encode_sentiment()
def inverse_sentiment(self, sentiment):
"""Inverse transform the sentiment from numerical values to string
Args:
sentiment (list): list of numerically encoded sentiment
Returns:
(list): list of sentiment strings
"""
return self.encoder.inverse_transform(sentiment)
class Tokenizer:
# Tokenize text
def __init__(self, num_words, oov, maxlen, truncating, padding):
self.num_words = num_words
self.oov = oov
self.maxlen = maxlen
self.truncating = truncating
self.padding = padding
self.tokenizer = None
self.vocab_size = 0
def set_config(self, **kwargs):
for (key, value) in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
def fit(self, corpus):
"""Fit corpus to tokenizer
Args:
corpus (str or list of str)
"""
self.tokenizer = tf.keras.preprocessing.text.Tokenizer( \
num_words=self.num_words, oov_token=self.oov)
self.tokenizer.fit_on_texts(corpus)
self.vocab_size = len(self.tokenizer.word_index)
Log.print_msg("INFO", self.vocab_size)
def transform(self, corpus):
"""Transform corpus to tokens.
Args:
corpus (str or list of str)
Returns:
(list of int): list of tokens padded to same length
"""
sequences = self.tokenizer.texts_to_sequences(corpus)
return tf.keras.preprocessing.sequence.pad_sequences(sequences,
maxlen=self.maxlen, truncating=self.truncating,
padding=self.padding)
class Model:
# Model container
def __init__(self):
self.seq_model = None
self.best_score = {"val_acc": 0, "model": None}
@property
def model(self):
# Assign model.
return self.seq_model.summary()
@model.setter
def model(self, model):
self.seq_model = model
@model.deleter
def model(self):
self.seq_model = None
def compile_and_fit(self, train_corpus, train_target, valid_corpus,
valid_target, optimizer='adam', metrics=['accuracy'],
loss='sparse_categorical_crossentropy', epochs=4,
batch_size=32):
"""Compile and fit to model.
Args:
train_corpus: corpus for training
train_target: sentiment for training
valid_corpus: corpus for validation
valid_target: sentiment for validation
optimizer: optimizer algorithm
metrics: classification metrics
loss: loss function
epochs (int): number of epochs
batch_size (int): size of batch
"""
self.seq_model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
history = self.seq_model.fit(train_corpus, train_target,
epochs=epochs, batch_size=batch_size,
validation_data=(valid_corpus, valid_target))
self.best_model = history.history['val_accuracy']
@property
def best_model(self):
# Record best model.
return self.best_score
@best_model.setter
def best_model(self, val_acc):
if max(val_acc) > self.best_score["val_acc"]:
self.best_score["val_acc"] = max(val_acc)
self.best_score["model"] = self.seq_model
@best_model.deleter
def best_model(self):
self.best_score = {"val_acc": 0, "model": None}
def evaluate(self, test_corpus, test_target):
"""Predict sentiment of test corpus using best model.
Args:
test_corpus: corpus for evaluation
test_target: sentiment for evaluation
Returns:
(list): loss and accuracy
"""
model = self.best_model['model']
return model.evaluate(test_corpus, test_target)
if __name__ == "__main__":
# set log level
Log.set_log_level("INFO")
# load data
train_df = pd.read_csv("Corona_NLP_train.csv", encoding='latin1')
test_df = pd.read_csv("Corona_NLP_test.csv", encoding='latin1')
data = Data(train_df, test_df)
data.preprocessing()
num_words=15000
maxlen=25
oov="<OOV>"
trunc='post'
pad='post'
embd_dim = 32
batch = 32
epoch = 3
train_target = data.train.Sentiment.values
test_target = data.test.Sentiment.values
tokenizer = Tokenizer(num_words=num_words, oov=oov, maxlen=maxlen,
truncating=trunc, padding=pad)
tokenizer.fit(data.train.CleanTweet.values)
train_corpus = tokenizer.transform(data.train.CleanTweet.values)
test_corpus = tokenizer.transform(data.test.CleanTweet.values)
vocab_size = tokenizer.vocab_size
# create Model object and add models
dnn_model = Model()
# Conv1D model
dnn_model.model = tf.keras.models.Sequential([
tf.keras.layers.Embedding(vocab_size+1, embd_dim, input_length=maxlen),
tf.keras.layers.Conv1D(embd_dim*8, 5, activation=tf.nn.relu),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(5, activation=tf.nn.sigmoid)
])
print(dnn_model.model)
dnn_model.compile_and_fit(train_corpus, train_target, test_corpus, \
test_target, epochs=epoch, batch_size=batch)
# Bidirectional LSTM
dnn_model.model = tf.keras.models.Sequential([
tf.keras.layers.Embedding(vocab_size+1, embd_dim, input_length=maxlen),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(embd_dim*8)),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(5, activation=tf.nn.sigmoid)
])
print(dnn_model.model)
dnn_model.compile_and_fit(train_corpus, train_target, test_corpus, \
test_target, epochs=epoch, batch_size=batch)
# Bidirectional GRU
dnn_model.model = tf.keras.models.Sequential([
tf.keras.layers.Embedding(vocab_size+1, embd_dim, input_length=maxlen),
tf.keras.layers.Bidirectional(tf.keras.layers.GRU(embd_dim*8)),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(5, activation=tf.nn.sigmoid)
])
print(dnn_model.model)
dnn_model.compile_and_fit(train_corpus, train_target, test_corpus, \
test_target, epochs=epoch, batch_size=batch)
# Show best model and val_accuracy
print("Best model is", dnn_model.best_model)
# Evaluate model with test corpus and show accuracy
test_pred = dnn_model.evaluate(test_corpus, test_target)
print("Test corpus accuracy is", test_pred[1])
```
|
{
"source": "jenny-chou/Kaggle-Ultrasound_Segmentation",
"score": 3
}
|
#### File: src/models/predict_model.py
```python
import os
import numpy as np
import pandas as pd
from skimage.transform import resize
import build_model
ORIG_ROW = 420
ORIG_COL = 580
def run_len_encoding(img):
"""Compress image using run-length encoding.
Args:
img: binary array of image
Returns: string of encoded image
"""
position = 0
pixel = 0
count_one = 0
previous = 0
encoded_img = []
for col in range(img.shape[1]):
for row in range(img.shape[0]):
position += 1
pixel = img[row, col]
if pixel == 1:
if pixel != previous:
encoded_img.append(str(position))
count_one += 1
elif pixel == 0 and pixel != previous:
encoded_img.append(str(count_one))
count_one = 0
previous = pixel
return " ".join(encoded_img)
def predict_mask(model, imgs, fnames):
"""Predict masks for test images.
Args:
model: best trained model.
imgs: float ndarray of images
fnames: list of names of the images
Returns: DataFrame of image names and encoded mask predictions
"""
pred = pd.DataFrame([], columns=['img', 'pixels'])
for idx, fname in enumerate(fnames):
img = np.expand_dims(imgs[idx], axis=0)
mask_pred = model.predict(img)
mask_pred = resize(mask_pred[0,:,:,0], (ORIG_ROW, ORIG_COL))
mask_pred = np.rint(mask_pred)
print(fname)
pred = pred.append(
{'img':fname,
'pixels':run_len_encoding(mask_pred)}, ignore_index=True)
return pred
def predict_masks(test_imgs_npy, best_weight_fname,
pred_mask_fname="test_masks_pred.csv"):
# Load images
test_imgs = np.load(test_imgs_npy) #"test_imgs.npy")
# Load model and weight
model = build_model.unet()
model.load_weights(best_weight_fname) #"best_weight.h5")
# Predict masks for test data
test_fnames = os.listdir(os.path.join('..', '..', 'data', 'raw', 'test'))
test_masks_pred = predict_mask(model, test_imgs, test_fnames)
test_masks_pred.to_csv(pred_mask_fname, index=False)
```
|
{
"source": "jennycs005/SecureNetwork",
"score": 2
}
|
#### File: jennycs005/SecureNetwork/BLEbroadcast.py
```python
import subprocess
import time
# broadcasting by flag = 88, channel = 0xc1
def Broadcast(data):
print "broadcasting data: ", data
subprocess.call("sudo hciconfig hci0 up",shell=True);
subprocess.call("sudo hciconfig leadv 3",shell=True);
subprocess.call("sudo hcitool -i hci0 cmd 0x08 0x000A 01",shell=True)
subprocess.call("sudo hcitool -i hci0 cmd 0x08 0x0008 1E 02 01 88 " + data,shell=True)
#subprocess.call("sudo hcitool -i hci0 cmd 0x08 0x0008 1E 02 01 88 02 c1 11 03 c2 22 22 02 c3 33",shell=True)
# demo
subprocess.call("sudo hcitool -i hci0 cmd 0x08 0x0008 1E 02 01 88 xx c1 03 01 00",shell=True)
```
|
{
"source": "jennycs005/Skyscraper-App",
"score": 4
}
|
#### File: jennycs005/Skyscraper-App/newnew.py
```python
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import csv
import numpy as np
import pydeck as pdk
from PIL import Image
def scatterplot():
skyscrapers_data = pd.read_csv("Skyscrapers2021.csv")
completion_year_List = []
meters_List = []
for row in skyscrapers_data:
# st.write(row)
completion_year = pd.to_numeric(skyscrapers_data.COMPLETION)
# print(completion_year)
completion_year_List.append(completion_year)
meters = skyscrapers_data.Meters.str.replace(r'\s+m', '').astype(float)
meters_List.append(meters)
plt.xlabel("Completion Year",fontsize=10)
plt.ylabel("Meters",fontsize=10)
plt.title("Height & Numbers along with completion year",fontsize=13)
plt.scatter(completion_year_List, meters_List, alpha=0.3, marker=".", color="cornflowerblue")
plt.show()
#return plt
#这个就是你之前的rank_map函数 我改名字为whole_mao(),为了和下面的rank_map()区分
def whole_map():
skyscrapers_data = pd.read_csv("Skyscrapers2021.csv")
sky_df = pd.DataFrame(skyscrapers_data, columns=["RANK", "Latitude", "Longitude"])
sky_df.rename(columns={"Latitude": "lat", "Longitude": "lon"},inplace=True)
st.map(sky_df)
#新rank_map()函数,显示按照rank选择出来的大楼
def rank_map(select_rank):
skyscrapers_data = pd.read_csv("Skyscrapers2021.csv")
sky_df = pd.DataFrame(skyscrapers_data, columns=["RANK", "CITY", "Latitude", "Longitude"])
sky_df.rename(columns={"Latitude": "lat", "Longitude": "lon"}, inplace=True)
select_rank_max = select_rank + 19
rank_df = sky_df[(sky_df['RANK'] >= select_rank) & (sky_df['RANK'] <= select_rank_max)]
#在sidebar展示数据表格
rank_df_show = pd.DataFrame(rank_df, columns=['CITY', 'lon', 'lat'])
st.sidebar.table(rank_df_show)
#画出地图
st.pydeck_chart(pdk.Deck(
map_style = 'mapbox://styles/mapbox/light-v9',
initial_view_state=pdk.ViewState(
latitude=rank_df['lat'].mean(),
longitude=rank_df['lon'].mean(),
zoom=1,
pitch=0
),
layers = [
pdk.Layer(
'HexagonLayer',
data=rank_df,
get_position = '[lon, lat]',
radius = 200000,
elevation_scale = 10000,
elevation_range = [400,1000],
pickable = True,
extruded = True,
),
pdk.Layer(
'ScatterplotLayer',
data=rank_df,
get_position='[lon, lat]',
get_color = '[200, 30, 0, 160]',
get_radius = 200000,
),
],
))
#按照选择年份画出平均高度的折线图
def average_height_line_chart(select_year):
average_height_df =pd.DataFrame(columns=['Year', 'AverageHeight'])
skyscrapers_data = pd.read_csv("Skyscrapers2021.csv")
year_df = pd.DataFrame(skyscrapers_data, columns=["COMPLETION", "Meters"])
year_df.COMPLETION = pd.to_numeric(year_df.COMPLETION)
year_df.Meters = year_df.Meters.str.replace(r'\s+m', '').astype(float)
year_df = year_df[year_df['COMPLETION']<= select_year]
year_df.sort_values(by = ['COMPLETION'], ascending = True, inplace= True)
for year in range(1931, select_year):#之前你这写的是1931~1991,估计这是为什么只显示到1991吧
mean_height = (year_df[['Meters']][year_df['COMPLETION'] <= year]).mean()
a = {'Year': year, 'AverageHeight': mean_height}
average_height_df = average_height_df.append(a, ignore_index=True)
plt.xlabel("Completion Year",fontsize=10)
plt.ylabel("Average Height",fontsize=10)
plt.title("Average Height along with completion year",fontsize=13)
plt.plot(average_height_df.Year, average_height_df.AverageHeight)
plt.show()
def statisticchart(selection):
if selection == "By Function":
fp = open('Skyscrapers2021.csv', 'r')
reader = csv.reader(fp)
count = 0
d = {'office': 0, 'hotel': 0, 'residential': 0, 'hotel / office': 0, 'residential / office': 0,
'multifunction': 0}
for row in reader:
if count > 0:
label = row[12]
if label == 'office':
d['office'] += 1
elif label == 'hotel':
d['hotel'] += 1
elif label == 'residential':
d['residential'] += 1
elif label == 'hotel / office':
d['hotel / office'] += 1
elif label == 'residential / office':
d['residential / office'] += 1
else:
d['multifunction'] += 1
count += 1
label = []
values = []
for key in d:
label.append(key)
values.append(d[key])
EXPLODE_VALUE = 0.1
max_percentage = max(d.values())
max_percentage_index = values.index(max_percentage)
explode_values = [0] * len(label)
explode_values[max_percentage_index] = EXPLODE_VALUE
colors = ["skyblue", "cadetblue", "cornflowerblue","powderblue","steelblue","lightslategray"]
plt.pie(values, labels=label, colors=colors,explode=explode_values, autopct='%1.1f%%', startangle=90,
textprops={'fontsize': 10})
plt.show()
plt.rcParams.update({"font.size": 7})
plt.legend(loc="lower right", bbox_to_anchor=(1.5, 0))
plt.show()
else:
skyscrapers_data = pd.read_csv("Skyscrapers2021.csv")
material_description = {}
for i in skyscrapers_data["MATERIAL"]:
if i in material_description:
material_description[i] += 1
else:
material_description[i] = 1
material_Percentage_Value = material_description.values()
labels = material_description.keys()
mfunction = [x for x in material_Percentage_Value]
st.set_option('deprecation.showPyplotGlobalUse', False)
EXPLODE_VALUE = 0.1
max_percentage = max(material_Percentage_Value)
max_percentage_index = mfunction.index(max_percentage)
explode_values = [0] * len(labels)
explode_values[max_percentage_index] = EXPLODE_VALUE
colors = ["tan", "peru", "orange", "gold"]
plt.pie(mfunction, labels=labels, colors=colors, explode=explode_values, autopct='%1.1f%%', startangle=90,
textprops={'fontsize': 10})
plt.legend(loc="lower right", bbox_to_anchor=(1.5, 0))
plt.show()
return plt
def main():
skyscrapers_data = pd.read_csv("Skyscrapers2021.csv")
img = Image.open("photo.jpg")
st.image(img, width=700)
st.title("Top 100 Skyscrapers around the world!")
if st.checkbox("Show DataFrame"):
st.dataframe(skyscrapers_data, width=700, height=300)
if st.checkbox("Show all 100 Skyscrapers in the map"):
whole_map()
# sidebar选择rank
rank = st.sidebar.selectbox('Select rank:', ('1~20', '21~40', '41~60', '61~80', '81~100'))
rank_list = {'1~20': 1, '21~40': 21, '41~60': 41, '61~80': 61, '81~100': 81}
select_rank = rank_list[rank]
# 用选好的年份画map
st.write('Skyscrapers Rank ' + str(select_rank) + ' ~ ' + str(select_rank+19))
rank_map(select_rank)
#去除一个不重要的警告信息
st.set_option('deprecation.showPyplotGlobalUse', False)
st.pyplot(scatterplot())
# 插入pivot table
skyscrapers_data = pd.read_csv("Skyscrapers2021.csv")
skyscrapers_data.Meters = skyscrapers_data.Meters.str.replace(r'\s+m', '').astype(float)
tt = pd.pivot_table(skyscrapers_data, index=['CITY', 'COMPLETION', 'MATERIAL'],values=['RANK', 'Meters'])
st.dataframe(tt)
# sidebar选择年份
select_year = st.sidebar.slider("Select years", 1931, 2020)
# 用选好的年份画折线图
st.pyplot(average_height_line_chart(select_year))
selection = st.sidebar.selectbox("Select an option: ",("By Function", "By Material"))
st.set_option('deprecation.showPyplotGlobalUse', False)
st.pyplot(statisticchart(selection))
main()
```
|
{
"source": "jennydai2011/python-Yixiaohan-Showmethecode",
"score": 3
}
|
#### File: jennydai2011/0001/0001.py
```python
import random, string
forSelect = string.ascii_letters + "0123456789"
def generate(count, length):
#count=200
#length=20
for x in range(count):
Re = ""
for y in range(length):
Re += random.choice(forSelect)
print(Re)
if __name__ =="__main__":
generate(200, 20)
```
#### File: jennydai2011/0004/0004.py
```python
from collections import Counter
import re
def statistics(filename):
f = open(filename, 'r').read()
f = re.findall(r'[\w\-\_\.\]+', f)
print(len(f))
return 0
if __name__ == "__main__":
filename = 'test.txt'
statistics(filename)
```
|
{
"source": "jennydaman/nighres",
"score": 2
}
|
#### File: nighres/laminar/volumetric_layering.py
```python
import sys
import os
import numpy as np
import nibabel as nb
import nighresjava
from ..io import load_volume, save_volume
from ..utils import _output_dir_4saving, _fname_4saving, \
_check_topology_lut_dir, _check_available_memory
def volumetric_layering(inner_levelset, outer_levelset,
n_layers=4, topology_lut_dir=None,
save_data=False, overwrite=False, output_dir=None,
file_name=None):
'''Equivolumetric layering of the cortical sheet.
Parameters
----------
inner_levelset: niimg
Levelset representation of the inner surface, typically GM/WM surface
outer_levelset : niimg
Levelset representation of the outer surface, typically GM/CSF surface
n_layers : int, optional
Number of layers to be created (default is 10)
topology_lut_dir: str, optional
Path to directory in which topology files are stored (default is stored
in TOPOLOGY_LUT_DIR)
save_data: bool
Save output data to file (default is False)
overwrite: bool
Overwrite existing results (default is False)
output_dir: str, optional
Path to desired output directory, will be created if it doesn't exist
file_name: str, optional
Desired base name for output files with file extension
(suffixes will be added)
Returns
----------
dict
Dictionary collecting outputs under the following keys
(suffix of output files in brackets)
* depth (niimg): Continuous depth from 0 (inner surface) to 1
(outer surface) (_layering-depth)
* layers (niimg): Discrete layers from 1 (bordering inner surface) to
n_layers (bordering outer surface) (_layering-layers)
* boundaries (niimg): Levelset representations of boundaries between
all layers in 4D (_layering-boundaries)
Notes
----------
Original Java module by <NAME>, <NAME> and
<NAME>. Algorithm details can be found in [1]_
References
----------
.. [1] Waehnert et al (2014) Anatomically motivated modeling of cortical
laminae. DOI: 10.1016/j.neuroimage.2013.03.078
'''
print('\nVolumetric Layering')
# check topology lut dir and set default if not given
topology_lut_dir = _check_topology_lut_dir(topology_lut_dir)
# make sure that saving related parameters are correct
if save_data:
output_dir = _output_dir_4saving(output_dir, inner_levelset)
depth_file = os.path.join(output_dir,
_fname_4saving(file_name=file_name,
rootfile=inner_levelset,
suffix='layering-depth'))
layer_file = os.path.join(output_dir,
_fname_4saving(file_name=file_name,
rootfile=inner_levelset,
suffix='layering-layers'))
boundary_file = os.path.join(output_dir,
_fname_4saving(file_name=file_name,
rootfile=inner_levelset,
suffix='layering-boundaries'))
if overwrite is False \
and os.path.isfile(depth_file) \
and os.path.isfile(layer_file) \
and os.path.isfile(boundary_file) :
print("skip computation (use existing results)")
output = {'depth': load_volume(depth_file),
'layers': load_volume(layer_file),
'boundaries': load_volume(boundary_file)}
return output
# start virutal machine if not already running
try:
mem = _check_available_memory()
nighresjava.initVM(initialheap=mem['init'], maxheap=mem['max'])
except ValueError:
pass
# initate class
lamination = nighresjava.LaminarVolumetricLayering()
# load the data
inner_img = load_volume(inner_levelset)
inner_data = inner_img.get_data()
hdr = inner_img.header
aff = inner_img.affine
resolution = [x.item() for x in hdr.get_zooms()]
dimensions = inner_data.shape
outer_data = load_volume(outer_levelset).get_data()
# set parameters from input images
lamination.setDimensions(dimensions[0], dimensions[1], dimensions[2])
lamination.setResolutions(resolution[0], resolution[1], resolution[2])
lamination.setInnerDistanceImage(nighresjava.JArray('float')(
(inner_data.flatten('F')).astype(float)))
lamination.setOuterDistanceImage(nighresjava.JArray('float')(
(outer_data.flatten('F')).astype(float)))
lamination.setNumberOfLayers(n_layers)
lamination.setTopologyLUTdirectory(topology_lut_dir)
# execute class
try:
lamination.execute()
except:
# if the Java module fails, reraise the error it throws
print("\n The underlying Java code did not execute cleanly: ")
print(sys.exc_info()[0])
raise
return
# collect data
depth_data = np.reshape(np.array(lamination.getContinuousDepthMeasurement(),
dtype=np.float32), dimensions, 'F')
hdr['cal_max'] = np.nanmax(depth_data)
depth = nb.Nifti1Image(depth_data, aff, hdr)
layer_data = np.reshape(np.array(lamination.getDiscreteSampledLayers(),
dtype=np.int32), dimensions, 'F')
hdr['cal_max'] = np.nanmax(layer_data)
layers = nb.Nifti1Image(layer_data, aff, hdr)
boundary_len = lamination.getLayerBoundarySurfacesLength()
boundary_data = np.reshape(np.array(lamination.getLayerBoundarySurfaces(),
dtype=np.float32), (dimensions[0],
dimensions[1], dimensions[2], boundary_len),
'F')
hdr['cal_min'] = np.nanmin(boundary_data)
hdr['cal_max'] = np.nanmax(boundary_data)
boundaries = nb.Nifti1Image(boundary_data, aff, hdr)
if save_data:
save_volume(depth_file, depth)
save_volume(layer_file, layers)
save_volume(boundary_file, boundaries)
return {'depth': depth, 'layers': layers, 'boundaries': boundaries}
```
|
{
"source": "jennyfothergill/foyer",
"score": 4
}
|
#### File: foyer/foyer/element.py
```python
import openmm.app.element as elem
class Element(elem.Element):
"""An Element represents a chemical element.
The openmm.app.element module contains objects for all the standard chemical elements,
such as element.hydrogen or element.carbon. You can also call the static method Element.getBySymbol() to
look up the Element with a particular chemical symbol.
Element objects should be considered immutable.
Canonical, periodic table elements will utilize openmm.element,
but custom elements will utilize this subclass foyer.element
"""
def __init__(self, number, name, symbol, mass):
"""Create a new element.
Parameters
----------
number : int
The atomic number of the element
name : string
The name of the element
symbol : string
The chemical symbol of the element
mass : float
The atomic mass of the element
"""
## The atomic number of the element
self._atomic_number = number
## The name of the element
self._name = name
## The chemical symbol of the element
self._symbol = symbol
## The atomic mass of the element
self._mass = mass
# Index this element in a global table
s = symbol.strip().upper()
## If we add a new element, we need to re-hash elements by mass
Element._elements_by_mass = None
if s in Element._elements_by_symbol:
raise ValueError("Duplicate element symbol %s" % s)
```
#### File: foyer/tests/test_gmso_forcefield.py
```python
import difflib
import glob
import os
import gmso
import mbuild as mb
import pytest
from pkg_resources import resource_filename
from foyer.exceptions import FoyerError
from foyer.general_forcefield import Forcefield
from foyer.tests.base_test import BaseTest
from foyer.tests.utils import get_fn, register_mock_request
FF_DIR = resource_filename("foyer", "forcefields")
FORCEFIELDS = glob.glob(os.path.join(FF_DIR, "xml/*.xml"))
RESPONSE_BIB_ETHANE_JA962170 = """@article{Jorgensen_1996,
doi = {10.1021/ja9621760},
url = {https://doi.org/10.1021%2Fja9621760},
year = 1996,
month = {jan},
publisher = {American Chemical Society ({ACS})},
volume = {118},
number = {45},
pages = {11225--11236},
author = {<NAME> and <NAME> and <NAME>},
title = {Development and Testing of the {OPLS} All-Atom Force Field on Conformational Energetics and Properties of Organic Liquids},
journal = {Journal of the American Chemical Society}
}"""
RESPONSE_BIB_ETHANE_JP0484579 = """@article{Jorgensen_2004,
doi = {10.1021/jp0484579},
url = {https://doi.org/10.1021%2Fjp0484579},
year = 2004,
month = {oct},
publisher = {American Chemical Society ({ACS})},
volume = {108},
number = {41},
pages = {16264--16270},
author = {<NAME>. Jorgensen and <NAME> and <NAME>},
title = {Free Energies of Hydration from a Generalized Born Model and an All-Atom Force Field},
journal = {The Journal of Physical Chemistry B}
}"""
class TestGeneralForcefield(BaseTest):
@pytest.fixture(scope="session")
def oplsaa(self):
return Forcefield(name="oplsaa", strict=False)
@pytest.mark.parametrize("ff_file", FORCEFIELDS)
def test_load_files(self, ff_file):
ff1 = Forcefield(forcefield_files=ff_file, strict=False)
assert len(ff1.ff.atom_types) > 0
ff2 = Forcefield(forcefield_files=ff_file, strict=False)
assert len(ff1.ff.atom_types) == len(ff2.ff.atom_types)
""" Relies on https://github.com/mosdef-hub/gmso/pull/526
def test_duplicate_type_definitions():
with pytest.raises(ValueError):
ff4 = Forcefield(name='oplsaa', forcefield_files=FORCEFIELDS, strict=False)
"""
def test_missing_type_definitions(self):
with pytest.raises(FoyerError):
FF = Forcefield()
ethane = mb.load(get_fn("ethane.mol2"), backend="parmed")
FF.apply(ethane, assert_improper_params=False)
def test_unsupported_backend(self):
with pytest.raises(FoyerError, match=r"Backend not supported"):
FF = Forcefield(name="oplsaa", backend="void")
def test_from_gmso(self, oplsaa):
mol2 = mb.load(get_fn("ethane.mol2"), backend="parmed")
top = gmso.external.from_mbuild(mol2)
ethane = oplsaa.apply(top, assert_improper_params=False)
assert (
sum((1 for at in ethane.sites if at.atom_type.name == "opls_135"))
== 2
)
assert (
sum((1 for at in ethane.sites if at.atom_type.name == "opls_140"))
== 6
)
assert len(ethane.bonds) == 7
assert all(x.bond_type for x in ethane.bonds)
assert len(ethane.angles) == 12
assert all(x.angle_type for x in ethane.angles)
assert len(ethane.dihedrals) == 9
assert all(x.dihedral_type for x in ethane.dihedrals)
"""
Skip test for box information until mbuild box overhaul PR is completed
mol2 = mb.load(get_fn('ethane.mol2'), backend='parmed')
mol2.box_vectors = [[2, 0, 0], [0, 2, 0], [0, 0, 2]]
oplsaa = Forcefield(name='oplsaa', strict=False)
ethane = oplsaa.apply(mol2, assert_improper_params=False)
assert ethane.box_vectors == mol2.box_vectors
"""
def test_from_mbuild(self, oplsaa):
mol2 = mb.load(get_fn("ethane.mol2"), backend="parmed")
ethane = oplsaa.apply(mol2, assert_improper_params=False)
assert (
sum((1 for at in ethane.sites if at.atom_type.name == "opls_135"))
== 2
)
assert (
sum((1 for at in ethane.sites if at.atom_type.name == "opls_140"))
== 6
)
assert len(ethane.bonds) == 7
assert all(x.bond_type for x in ethane.bonds)
assert len(ethane.angles) == 12
assert all(x.angle_type for x in ethane.angles)
assert len(ethane.dihedrals) == 9
assert all(x.dihedral_type for x in ethane.dihedrals)
@pytest.mark.parametrize("mixing_rule", ["lorentz", "geometric"])
def test_comb_rule(self, mixing_rule, oplsaa):
mol2 = mb.load(get_fn("ethane.mol2"))
ethane = oplsaa.apply(
mol2, combining_rule=mixing_rule, assert_improper_params=False
)
assert ethane.combining_rule == mixing_rule
def test_write_refs(self, requests_mock, oplsaa):
register_mock_request(
mocker=requests_mock,
url="http://api.crossref.org/",
path="works/10.1021/ja9621760/transform/application/x-bibtex",
headers={"accept": "application/x-bibtex"},
text=RESPONSE_BIB_ETHANE_JA962170,
)
mol2 = mb.load(get_fn("ethane.mol2"), backend="parmed")
ethane = oplsaa.apply(
mol2, references_file="ethane.bib", assert_improper_params=False
)
assert os.path.isfile("ethane.bib")
with open(get_fn("ethane.bib")) as file1:
with open("ethane.bib") as file2:
diff = list(
difflib.unified_diff(
file1.readlines(), file2.readlines(), n=0
)
)
assert not diff
def test_write_refs_multiple(self, requests_mock):
register_mock_request(
mocker=requests_mock,
url="http://api.crossref.org/",
path="works/10.1021/ja9621760/transform/application/x-bibtex",
headers={"accept": "application/x-bibtex"},
text=RESPONSE_BIB_ETHANE_JA962170,
)
register_mock_request(
mocker=requests_mock,
url="http://api.crossref.org/",
path="works/10.1021/jp0484579/transform/application/x-bibtex",
headers={"accept": "application/x-bibtex"},
text=RESPONSE_BIB_ETHANE_JP0484579,
)
mol2 = mb.load(get_fn("ethane.mol2"))
oplsaa = Forcefield(
forcefield_files=get_fn("refs-multi.xml"), strict=False
)
ethane = oplsaa.apply(
mol2,
references_file="ethane-multi.bib",
assert_improper_params=False,
)
assert os.path.isfile("ethane-multi.bib")
with open(get_fn("ethane-multi.bib")) as file1:
with open("ethane-multi.bib") as file2:
diff = list(
difflib.unified_diff(
file1.readlines(), file2.readlines(), n=0
)
)
assert not diff
def test_write_bad_ref(self, requests_mock):
register_mock_request(
mocker=requests_mock,
url="http://api.crossref.org/",
path="works/10.1021/garbage_bad_44444444jjjj/transform/application/x-bibtex",
headers={"accept": "application/x-bibtex"},
status_code=404,
)
mol2 = mb.load(get_fn("ethane.mol2"), backend="parmed")
oplsaa = Forcefield(
forcefield_files=get_fn("refs-bad.xml"), strict=False
)
with pytest.warns(UserWarning):
ethane = oplsaa.apply(
mol2, references_file="ethane.bib", assert_improper_params=False
)
"""
These XML files missed the whole nonbonded force section
def test_from_mbuild_customtype():
mol2 = mb.load(get_fn('ethane_customtype.pdb'))
customtype_ff = Forcefield(forcefield_files=get_fn('validate_customtypes.xml'), strict=False)
ethane = customtype_ff.apply(mol2, assert_improper_params=False)
assert sum((1 for at in ethane.sites if at.atom_type.name == 'C3')) == 2
assert sum((1 for at in ethane.sites if at.atom_type.name == 'Hb')) == 6
assert len(ethane.bonds) == 7
assert all(x.bond_type for x in ethane.bonds)
assert len(ethane.angles) == 12
assert all(x.angle_type for x in ethane.angles)
assert len(ethane.dihedrals) == 9
assert all(x.dihedral_type for x in ethane.dihedrals)
def test_improper_dihedral():
untyped_benzene = mb.load(get_fn('benzene.mol2'), backend='parmed')
ff_improper = Forcefield(forcefield_files=get_fn('improper_dihedral.xml'), strict=False)
benzene = ff_improper.apply(untyped_benzene, assert_dihedral_params=False, assert_improper_params=False)
assert len(benzene.dihedrals) == 18
assert len([dih for dih in benzene.dihedrals if dih.improper]) == 6
assert len([dih for dih in benzene.dihedrals if not dih.improper]) == 12
"""
def test_urey_bradley(self):
system = mb.Compound()
first = mb.Particle(name="_CTL2", pos=[-1, 0, 0])
second = mb.Particle(name="_CL", pos=[0, 0, 0])
third = mb.Particle(name="_OBL", pos=[1, 0, 0])
fourth = mb.Particle(name="_OHL", pos=[0, 1, 0])
system.add([first, second, third, fourth])
system.add_bond((first, second))
system.add_bond((second, third))
system.add_bond((second, fourth))
ff = Forcefield(
forcefield_files=[get_fn("charmm36_cooh.xml")], strict=False
)
struc = ff.apply(
system,
assert_angle_params=False,
assert_dihedral_params=False,
assert_improper_params=False,
)
assert len(struc.angles) == 3
assert len(struc.angle_types) == 3 # 1 harmonic, 2 <NAME>
def test_charmm_improper(self):
system = mb.Compound()
first = mb.Particle(name="_CTL2", pos=[-1, 0, 0])
second = mb.Particle(name="_CL", pos=[0, 0, 0])
third = mb.Particle(name="_OBL", pos=[1, 0, 0])
fourth = mb.Particle(name="_OHL", pos=[0, 1, 0])
system.add([first, second, third, fourth])
system.add_bond((first, second))
system.add_bond((second, third))
system.add_bond((second, fourth))
ff = Forcefield(
forcefield_files=[get_fn("charmm36_cooh.xml")], strict=False
)
struc = ff.apply(
system,
assert_angle_params=False,
assert_dihedral_params=False,
assert_improper_params=False,
)
assert len(struc.impropers) == 1
assert len(struc.dihedrals) == 0
''' To be implemented -> Lookup connection types with mixed atomtype-atomclass
def test_topology_precedence():
"""Test to see if topology precedence is properly adhered to.
This test uses a force field file where bond, angle, and dihedral
parameters are present with different counts of `type` definitions.
It checks that:
1. The parameters with the higher number of `type` definitions
are assigned (because they are given the highest precedence)
2. That if multiple definitions exist with the same number of
`type` definitions, that the convention from OpenMM is followed
whereby the definitions that occurs earliest in the XML is
assigned.
"""
ethane = mb.load(get_fn('ethane.mol2'), backend='parmed')
ff = Forcefield(forcefield_files=get_fn('ethane-topo-precedence.xml'), strict=False)
typed_ethane = ff.apply(ethane, assert_improper_params=False)
# Need to work on the units of these test
assert len([bond for bond in typed_ethane.bonds
if round(float(bond.bond_type.parameters['r_eq'].value), 3) == 0.115]) == 6
assert len([bond for bond in typed_ethane.bonds
if round(float(bond.bond_type.parameters['r_eq'].value), 2) == 0.16]) == 1
assert len([angle for angle in typed_ethane.angles
if round(float(angle.angle_type.parameters['theta_eq'].value), 3) == 120.321]) == 6
assert len([angle for angle in typed_ethane.angles
if round(float(angle.angle_type.parameters['theta_eq'].value), 3) == 97.403]) == 6
assert len([rb for rb in typed_ethane.dihedral
if round(float(rb.dihedral_type.parameters['c0'].value), 3) == 0.287]) == 9
'''
@pytest.mark.parametrize(
"ff_filename,kwargs",
[
("ethane-angle-typo.xml", {"assert_angle_params": False}),
("ethane-dihedral-typo.xml", {"assert_dihedral_params": False}),
],
)
def test_missing_topo_params(self, ff_filename, kwargs):
"""Test that the user is notified if not all topology parameters are found."""
ethane = mb.load(get_fn("ethane.mol2"))
oplsaa_with_typo = Forcefield(
forcefield_files=get_fn(ff_filename), strict=False
)
with pytest.raises(Exception):
ethane = oplsaa_with_typo.apply(
ethane, assert_improper_params=False
)
with pytest.warns(UserWarning):
ethane = oplsaa_with_typo.apply(
ethane, assert_improper_params=False, **kwargs
)
def test_assert_bonds(self):
ff = Forcefield(name="trappe-ua", strict=False)
derponium = mb.Compound()
at1 = mb.Particle(name="H")
at2 = mb.Particle(name="O")
at3 = mb.Particle(name="_CH4")
derponium.add([at1, at2, at3])
derponium.add_bond((at1, at2))
derponium.add_bond((at2, at3))
with pytest.raises(Exception):
ff.apply(derponium, assert_improper_params=False)
thing = ff.apply(
derponium,
assert_bond_params=False,
assert_angle_params=False,
assert_improper_params=False,
)
assert any(b.bond_type is None for b in thing.bonds)
def test_apply_subfuncs(self, oplsaa):
mol2 = mb.load(get_fn("ethane.mol2"), backend="parmed")
ethane = oplsaa.apply(mol2, assert_improper_params=False)
typemap = oplsaa._run_atomtyping(mol2, use_residue_map=False)
ethane2 = oplsaa._parametrize(
mol2, typemap=typemap, assert_improper_params=False
)
assert ethane.box == ethane2.box
assert (ethane.positions == ethane2.positions).all
for a1, a2 in zip(ethane.sites, ethane2.sites):
assert a1.name == a2.name
assert ethane.get_index(a1) == ethane2.get_index(a2)
assert a1.atom_type == a2.atom_type
for b1, b2 in zip(ethane.bonds, ethane2.bonds):
assert (
b1.connection_members[0].atom_type
== b2.connection_members[0].atom_type
)
assert (
b1.connection_members[1].atom_type
== b2.connection_members[1].atom_type
)
assert b1.bond_type == b2.bond_type
def test_non_zero_charge(self, oplsaa):
compound = mb.load("C1=CC=C2C(=C1)C(C3=CC=CC=C3O2)C(=O)O", smiles=True)
with pytest.warns(UserWarning):
oplsaa.apply(
compound,
assert_dihedral_params=False,
assert_improper_params=False,
)
"""
@pytest.mark.parametrize("filename", ['ethane.mol2', 'benzene.mol2'])
def test_write_xml(filename):
mol = mb.load(get_fn(filename), backend='parmed')
oplsaa = Forcefield(name='oplsaa', strict=False)
typed = oplsaa.apply(mol, assert_improper_params=False)
typed.write_foyer(filename='opls-snippet.xml', forcefield=oplsaa, unique=True)
oplsaa_partial = Forcefield('opls-snippet.xml', strict=False)
typed_by_partial = oplsaa_partial.apply(mol, assert_improper_params=False)
for i in range(len(typed.sites)):
atype1 = typed.sites[i].atom_type
atype2 = typed_by_partial.sites[i].atom_type
assert atype1.expression == atype2.expression
assert atype1.parameters == atype2.parameters
for i in range(len(typed.bonds)):
btype1 = typed.bonds[i].bond_type
btype2 = typed_by_partial.bonds[i].bond_type
assert btype1.expression == btype2.expression
assert btype1.parameters == btype2.parameters
# Do it again but with an XML including periodic dihedrals
mol = mb.load(get_fn(filename), backend='parmed')
oplsaa = Forcefield(get_fn('oplsaa-periodic.xml'), strict=False)
typed = oplsaa.apply(mol, assert_improper_params=False)
typed.write_foyer(filename='opls-snippet.xml', forcefield=oplsaa, unique=True)
oplsaa_partial = Forcefield('opls-snippet.xml', strict=False)
typed_by_partial = oplsaa_partial.apply(mol, assert_improper_params=False)
for i in range(len(typed.sites)):
atype1 = typed.sites[i].atom_type
atype2 = typed_by_partial.sites[i].atom_type
assert atype1.expression == atype2.expression
assert atype1.parameters == atype2.parameters
for i in range(len(typed.bonds)):
btype1 = typed.bonds[i].bond_type
btype2 = typed_by_partial.bonds[i].bond_type
assert btype1.expression == btype2.expression
assert btype1.parameters == btype2.parameters
@pytest.mark.parametrize("filename", ['ethane.mol2', 'benzene.mol2'])
def test_write_xml_multiple_periodictorsions(filename):
cmpd = mb.load(get_fn(filename), backend='parmed')
ff = Forcefield(forcefield_files=get_fn('oplsaa_multiperiodicitytorsion.xml'), strict=False)
typed_struc = ff.apply(cmpd, assert_dihedral_params=False, assert_improper_params=False)
typed_struc.write_foyer(filename='multi-periodictorsions.xml', forcefield=ff, unique=True)
partial_ff = Forcefield(forcefield_files='multi-periodictorsions.xml', strict=False)
typed_by_partial = partial_ff.apply(cmpd, assert_dihedral_params=False, assert_improper_params=False)
assert len(typed_struc.bonds) == len(typed_by_partial.bonds)
assert len(typed_struc.angles) == len(typed_by_partial.angles)
assert len(typed_struc.dihedrals) == len(typed_by_partial.dihedrals)
root = ET.parse('multi-periodictorsions.xml')
periodic_element = root.find('PeriodicTorsionForce')
assert 'periodicity2' in periodic_element[0].attrib
assert 'k2' in periodic_element[0].attrib
assert 'phase2' in periodic_element[0].attrib
@pytest.mark.parametrize("filename", ['ethane.mol2', 'benzene.mol2'])
def test_load_xml(filename):
mol = mb.load(get_fn(filename), backend='parmed')
if filename == 'ethane.mol2':
ff = Forcefield(get_fn('ethane-multiple.xml'), strict=False)
else:
ff = Forcefield(name='oplsaa', strict=False)
typed = ff.apply(mol, assert_improper_params=False)
typed.write_foyer(filename='snippet.xml', forcefield=ff, unique=True)
generated_ff = Forcefield('snippet.xml', strict=False)
def test_write_xml_overrides():
#Test xml_writer new overrides and comments features
mol = mb.load(get_fn('styrene.mol2'), backend='parmed')
oplsaa = Forcefield(name='oplsaa', strict=False)
typed = oplsaa.apply(mol, assert_dihedral_params=False, assert_improper_params=False)
typed.write_foyer(filename='opls-styrene.xml', forcefield=oplsaa, unique=True)
styrene = ET.parse('opls-styrene.xml')
atom_types = styrene.getroot().find('AtomTypes').findall('Type')
for item in atom_types:
attributes = item.attrib
if attributes['name'] == 'opls_145':
assert attributes['overrides'] == 'opls_142'
assert str(item.xpath('comment()')) in {'[<!--Note: original overrides="opls_141,opls_142"-->]',
'[<!--Note: original overrides="opls_142,opls_141"-->]'}
elif attributes['name'] == 'opls_146':
assert attributes['overrides'] == 'opls_144'
assert str(item.xpath('comment()')) == '[<!--Note: original overrides="opls_144"-->]'
def test_load_metadata():
lj_ff = Forcefield(get_fn('lj.xml'), strict=False)
assert lj_ff.version == '0.4.1'
assert lj_ff.name == 'LJ'
lj_ff = Forcefield(forcefield_files=[get_fn('lj.xml'), get_fn('lj2.xml')])
assert lj_ff.version == ['0.4.1', '4.8.2']
assert lj_ff.name == ['LJ', 'JL']
"""
```
#### File: foyer/tests/test_smarts.py
```python
import lark
import parmed as pmd
import pytest
from foyer.exceptions import FoyerError
from foyer.forcefield import Forcefield
from foyer.smarts import SMARTS
from foyer.smarts_graph import SMARTSGraph
from foyer.tests.base_test import BaseTest
from foyer.tests.utils import get_fn
from foyer.topology_graph import TopologyGraph
class TestSMARTS(BaseTest):
@pytest.fixture(scope="session")
def rule_match(self, smarts_parser):
def _rule_match(top, typemap, smart, result):
rule = SMARTSGraph(
name="test",
parser=smarts_parser,
smarts_string=smart,
typemap=typemap,
)
assert bool(list(rule.find_matches(top, typemap))) is result
return _rule_match
@pytest.fixture(scope="session")
def rule_match_count(self, smarts_parser):
def _rule_match_count(top, typemap, smart, count):
rule = SMARTSGraph(
name="test",
parser=smarts_parser,
smarts_string=smart,
typemap=typemap,
)
assert len(list(rule.find_matches(top, typemap))) is count
return _rule_match_count
def test_ast(self, smarts_parser):
ast = smarts_parser.parse("O([H&X1])(H)")
assert ast.data == "start"
assert ast.children[0].data == "atom"
assert ast.children[0].children[0].data == "atom_symbol"
assert str(ast.children[0].children[0].children[0]) == "O"
@pytest.mark.parametrize(
"pattern", ["[#6][#1](C)H", "[O;X2]([C;X4](F)(*)(*))[C;X4]"]
)
def test_parse(self, pattern, smarts_parser):
assert smarts_parser.parse(pattern)
def test_uniqueness(self, rule_match):
mol2 = pmd.load_file(get_fn("uniqueness_test.mol2"), structure=True)
typemap = {
atom.idx: {"whitelist": set(), "blacklist": set(), "atomtype": None}
for atom in mol2.atoms
}
mol2_graph = TopologyGraph.from_parmed(mol2)
rule_match(mol2_graph, typemap, "[#6]1[#6][#6][#6][#6][#6]1", False)
rule_match(mol2_graph, typemap, "[#6]1[#6][#6][#6][#6]1", False)
rule_match(mol2_graph, typemap, "[#6]1[#6][#6][#6]1", True)
def test_ringness(self, rule_match):
ring_mol2 = pmd.load_file(get_fn("ring.mol2"), structure=True)
ring_mol2_graph = TopologyGraph.from_parmed(ring_mol2)
typemap = {
atom.idx: {"whitelist": set(), "blacklist": set(), "atomtype": None}
for atom in ring_mol2.atoms
}
rule_match(ring_mol2_graph, typemap, "[#6]1[#6][#6][#6][#6][#6]1", True)
not_ring_mol2 = pmd.load_file(get_fn("not_ring.mol2"), structure=True)
not_ring_mol2_graph = TopologyGraph.from_parmed(not_ring_mol2)
typemap = {
atom.idx: {"whitelist": set(), "blacklist": set(), "atomtype": None}
for atom in not_ring_mol2.atoms
}
rule_match(
not_ring_mol2_graph, typemap, "[#6]1[#6][#6][#6][#6][#6]1", False
)
def test_fused_ring(self, smarts_parser):
mol2 = pmd.load_file(get_fn("fused.mol2"), structure=True)
mol2_graph = TopologyGraph.from_parmed(mol2)
typemap = {
atom.idx: {"whitelist": set(), "blacklist": set(), "atomtype": None}
for atom in mol2.atoms
}
rule = SMARTSGraph(
name="test",
parser=smarts_parser,
smarts_string="[#6]12[#6][#6][#6][#6][#6]1[#6][#6][#6][#6]2",
typemap=typemap,
)
match_indices = list(rule.find_matches(mol2_graph, typemap))
assert 3 in match_indices
assert 4 in match_indices
assert len(match_indices) == 2
def test_ring_count(self, smarts_parser):
# Two rings
fused = pmd.load_file(get_fn("fused.mol2"), structure=True)
fused_graph = TopologyGraph.from_parmed(fused)
typemap = {
atom.idx: {"whitelist": set(), "blacklist": set(), "atomtype": None}
for atom in fused.atoms
}
rule = SMARTSGraph(
name="test",
parser=smarts_parser,
smarts_string="[#6;R2]",
typemap=typemap,
)
match_indices = list(rule.find_matches(fused_graph, typemap))
for atom_idx in (3, 4):
assert atom_idx in match_indices
assert len(match_indices) == 2
rule = SMARTSGraph(
name="test",
parser=smarts_parser,
smarts_string="[#6;R1]",
typemap=typemap,
)
match_indices = list(rule.find_matches(fused_graph, typemap))
for atom_idx in (0, 1, 2, 5, 6, 7, 8, 9):
assert atom_idx in match_indices
assert len(match_indices) == 8
# One ring
ring = pmd.load_file(get_fn("ring.mol2"), structure=True)
typemap = {
atom.idx: {"whitelist": set(), "blacklist": set(), "atomtype": None}
for atom in ring.atoms
}
ring_graph = TopologyGraph.from_parmed(ring)
rule = SMARTSGraph(
name="test",
parser=smarts_parser,
smarts_string="[#6;R1]",
typemap=typemap,
)
match_indices = list(rule.find_matches(ring_graph, typemap))
for atom_idx in range(6):
assert atom_idx in match_indices
assert len(match_indices) == 6
def test_precedence_ast(self, smarts_parser):
ast1 = smarts_parser.parse("[C,H;O]")
ast2 = smarts_parser.parse("[O;H,C]")
assert ast1.children[0].children[0].data == "weak_and_expression"
assert ast2.children[0].children[0].data == "weak_and_expression"
assert ast1.children[0].children[0].children[0].data == "or_expression"
assert ast2.children[0].children[0].children[1].data == "or_expression"
ast1 = smarts_parser.parse("[C,H&O]")
ast2 = smarts_parser.parse("[O&H,C]")
assert ast1.children[0].children[0].data == "or_expression"
assert ast2.children[0].children[0].data == "or_expression"
assert ast1.children[0].children[0].children[1].data == "and_expression"
assert ast2.children[0].children[0].children[0].data == "and_expression"
def test_precedence(self, rule_match_count):
mol2 = pmd.load_file(get_fn("ethane.mol2"), structure=True)
typemap = {
atom.idx: {"whitelist": set(), "blacklist": set(), "atomtype": None}
for atom in mol2.atoms
}
mol2_graph = TopologyGraph.from_parmed(mol2)
checks = {
"[C,O;C]": 2,
"[C&O;C]": 0,
"[!C;O,C]": 0,
"[!C&O,C]": 2,
}
for smart, result in checks.items():
rule_match_count(mol2_graph, typemap, smart, result)
def test_not_ast(self, smarts_parser):
checks = {
"[!C;!H]": "weak_and_expression",
"[!C&H]": "and_expression",
"[!C;H]": "weak_and_expression",
"[!C]": "not_expression",
}
for smart, grandchild in checks.items():
ast = smarts_parser.parse(smart)
assert ast.children[0].children[0].data == grandchild
illegal_nots = ["[!CH]", "[!C!H]"]
for smart in illegal_nots:
with pytest.raises(lark.UnexpectedInput):
smarts_parser.parse(smart)
def test_not(self, rule_match_count):
mol2 = pmd.load_file(get_fn("ethane.mol2"), structure=True)
typemap = {
atom.idx: {"whitelist": set(), "blacklist": set(), "atomtype": None}
for atom in mol2.atoms
}
mol2_graph = TopologyGraph.from_parmed(mol2)
checks = {
"[!O]": 8,
"[!#5]": 8,
"[!C]": 6,
"[!#6]": 6,
"[!C&!H]": 0,
"[!C;!H]": 0,
}
for smart, result in checks.items():
rule_match_count(mol2_graph, typemap, smart, result)
def test_hexa_coordinated(self):
ff = Forcefield(forcefield_files=get_fn("pf6.xml"))
mol2 = pmd.load_file(get_fn("pf6.mol2"), structure=True)
pf6 = ff.apply(mol2)
types = [a.type for a in pf6.atoms]
assert types.count("P") == 1
assert types.count("F1") == 2
assert types.count("F2") == 2
assert types.count("F3") == 2
assert len(pf6.bonds) == 6
assert all(bond.type for bond in pf6.bonds)
assert len(pf6.angles) == 15
assert all(angle.type for angle in pf6.angles)
def test_optional_names_bad_syntax(self):
bad_optional_names = ["_C", "XXX", "C"]
with pytest.raises(FoyerError):
S = SMARTS(optional_names=bad_optional_names)
def test_optional_names_good_syntax(self):
good_optional_names = ["_C", "_CH2", "_CH"]
S = SMARTS(optional_names=good_optional_names)
def test_optional_name_parser(self):
optional_names = ["_C", "_CH2", "_CH"]
S = SMARTS(optional_names=optional_names)
ast = S.parse("_CH2_C_CH")
symbols = [a.children[0] for a in ast.find_data("atom_symbol")]
for name in optional_names:
assert name in symbols
```
#### File: foyer/tests/test_topology_graph.py
```python
import networkx as nx
import pytest
from foyer.atomtyper import find_atomtypes
from foyer.forcefield import Forcefield
from foyer.tests.base_test import BaseTest
from foyer.tests.utils import (
has_gmso,
has_openff_toolkit,
is_running_on_windows,
)
from foyer.topology_graph import TopologyGraph
@pytest.mark.skipif(
condition=(
is_running_on_windows() or (not (has_gmso or has_openff_toolkit))
),
reason="openff-toolkit and gmso not installed",
)
class TestTopologyGraph(BaseTest):
@pytest.fixture(scope="session")
def openff_topology_graph(self):
from openff.toolkit.topology import Molecule, Topology
openff_ethane = Molecule.from_smiles("CC")
openff_ethane = Topology.from_molecules(openff_ethane)
return TopologyGraph.from_openff_topology(openff_ethane)
@pytest.fixture(scope="session")
def gmso_topology_graph(self):
import mbuild as mb
from gmso.external import from_mbuild
ethane = mb.load("CC", smiles=True)
return TopologyGraph.from_gmso_topology(from_mbuild(ethane))
@pytest.fixture(scope="session")
def parmed_topology_graph(self):
import mbuild as mb
ethane = mb.conversion.to_parmed(mb.load("CC", smiles=True))
return TopologyGraph.from_parmed(ethane)
def test_graph_equivalence(
self, openff_topology_graph, gmso_topology_graph, parmed_topology_graph
):
assert nx.is_isomorphic(openff_topology_graph, gmso_topology_graph)
assert nx.is_isomorphic(gmso_topology_graph, parmed_topology_graph)
assert nx.is_isomorphic(openff_topology_graph, parmed_topology_graph)
def test_graph_atomdata_equivalence(
self, openff_topology_graph, gmso_topology_graph, parmed_topology_graph
):
atom_data_gmso_dict = {}
atom_data_openff_dict = {}
atom_data_parmed_dict = {}
for (
(openff_idx, atom_data_openff),
(gmso_idx, atom_data_gmso),
(parmed_idx, atom_data_parmed),
) in zip(
openff_topology_graph.atoms(data=True),
gmso_topology_graph.atoms(data=True),
parmed_topology_graph.atoms(data=True),
):
atom_data_openff_dict[openff_idx] = {
"index": atom_data_openff.index,
"element": atom_data_openff.element,
"atomic_number": atom_data_openff.atomic_number,
}
atom_data_gmso_dict[gmso_idx] = {
"index": atom_data_gmso.index,
"element": atom_data_gmso.element,
"atomic_number": atom_data_gmso.atomic_number,
}
atom_data_parmed_dict[parmed_idx] = {
"index": atom_data_parmed.index,
"element": atom_data_parmed.element,
"atomic_number": atom_data_parmed.atomic_number,
}
idx = 0
while True:
try:
assert (
atom_data_openff_dict[idx]
== atom_data_openff_dict[idx]
== atom_data_parmed_dict[idx]
)
except KeyError:
break
idx += 1
def test_atom_typing(
self,
openff_topology_graph,
gmso_topology_graph,
parmed_topology_graph,
oplsaa,
):
# ToDo: More robust testing for atomtyping
openff_typemap = find_atomtypes(
openff_topology_graph, forcefield=oplsaa
)
gmso_typemap = find_atomtypes(gmso_topology_graph, forcefield=oplsaa)
parmed_typemap = find_atomtypes(
parmed_topology_graph, forcefield=oplsaa
)
assert openff_typemap
assert gmso_typemap
assert parmed_typemap
def test_from_type_error(self):
with pytest.raises(TypeError):
TopologyGraph.from_openff_topology("NonOpenFFTopology")
with pytest.raises(TypeError):
TopologyGraph.from_gmso_topology("NonGMSOTopology")
with pytest.raises(TypeError):
TopologyGraph.from_parmed("NonParmedStructure")
```
#### File: foyer/utils/external.py
```python
import requests
def get_ref(ref_url, headers):
"""Return bibtex reference for used atom-types."""
bibtex_ref = requests.get(ref_url, headers=headers)
if bibtex_ref.ok:
return bibtex_ref
else:
return None
```
#### File: foyer/utils/misc.py
```python
def validate_type(iterator, type_):
"""Validate all the elements of the iterable are of a particular type."""
for item in iterator:
if not isinstance(item, type_):
raise TypeError(
f"Expected {item} to be of type {type_.__name__} but got "
f"{type(item).__name__} instead."
)
```
|
{
"source": "jennyfothergill/GIXStapose",
"score": 2
}
|
#### File: gixstapose/tests/test_diffract.py
```python
from pathlib import Path
from tempfile import NamedTemporaryFile
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
from gixstapose.draw_scene import get_info
from gixstapose.diffractometer import Diffractometer, camera_to_rot
from gixstapose.main import camera_from_pos
path = str(Path(__file__).parent.parent.resolve())
temp = NamedTemporaryFile(suffix=".png")
def test_diffract():
d = Diffractometer()
inputfile = path + "/data/sc10.pdb"
_, _, _, positions, _, _, box = get_info(inputfile)
d.load(positions, box[:3])
rot = camera_to_rot(camera_from_pos((1,0,0)))
dp = d.diffract(rot.T)
plt.imsave(temp.name, dp, cmap="jet")
dpim = Image.open(temp.name)
dparr = np.asarray(dpim)
im = Image.open(path + "/data/sc10_camera100.png")
imarr = np.asarray(im)
assert np.allclose(dparr,imarr)
```
|
{
"source": "jennyfothergill/msibi",
"score": 3
}
|
#### File: msibi/msibi/bonds.py
```python
from msibi.utils.sorting import natural_sort
class Bond(object):
def __init__(self, type1, type2, k, r0):
self.type1, self.type2 = sorted(
[type1, type2],
key=natural_sort
)
self.name = f"{self.type1}-{self.type2}"
self.k = k
self.r0 = r0
self._states = dict()
def _add_state(self, state):
self._states[state] = {
"k": self.k,
"r0": self.r0
}
class Angle(object):
def __init__(self, type1, type2, type3, k, theta):
self.type1 = type1
self.type2 = type2
self.type3 = type3
self.name = f"{self.type1}-{self.type2}-{self.type3}"
self.k = k
self.theta = theta
self._states = dict()
def _add_state(self, state):
self._states[state] = {
"k": self.k,
"theta": self.theta
}
```
#### File: msibi/msibi/state.py
```python
import os
import shutil
import warnings
from msibi import MSIBI, utils
from msibi.utils.hoomd_run_template import (HOOMD2_HEADER, HOOMD_TABLE_ENTRY,
HOOMD_BOND_INIT, HOOMD_BOND_ENTRY, HOOMD_ANGLE_INIT, HOOMD_ANGLE_ENTRY,
HOOMD_TEMPLATE)
import cmeutils as cme
from cmeutils.structure import gsd_rdf
import gsd
import gsd.hoomd
class State(object):
"""A single state used as part of a multistate optimization.
Parameters
----------
name : str
State name used in creating state directory space and output files.
kT : float
Unitless heat energy (product of Boltzmann's constant and temperature).
traj_file : path to a gsd.hoomd.HOOMDTrajectory file
The gsd trajectory associated with this state
alpha : float, default 1.0
The alpha value used to scaale the weight of this state.
backup_trajectory : bool, default False
True if each query trajectory is backed up
Attributes
----------
name : str
State name
kT : float
Unitless heat energy (product of Boltzmann's constant and temperature).
traj_file : path
Path to the gsd trajectory associated with this state
alpha : float
The alpha value used to scaale the weight of this state.
dir : str
Path to where the State info with be saved.
query_traj : str
Path to the query trajectory.
backup_trajectory : bool
True if each query trajectory is backed up
"""
def __init__(
self,
name,
kT,
traj_file,
alpha=1.0,
backup_trajectory=False,
_dir=None
):
self.name = name
self.kT = kT
self.traj_file = os.path.abspath(traj_file)
self._opt = None
if alpha < 0 or alpha > 1:
raise ValueError("alpha should be between 0.0 and 1.0")
self.alpha = float(alpha)
self.dir = self._setup_dir(name, kT, dir_name=_dir)
self.query_traj = os.path.join(self.dir, "query.gsd")
self.backup_trajectory = backup_trajectory
def save_runscript(
self,
n_steps,
integrator,
integrator_kwargs,
dt,
gsd_period,
table_potentials,
table_width,
bonds=None,
angles=None,
engine="hoomd",
):
"""Save the input script for the MD engine."""
script = list()
script.append(
HOOMD2_HEADER.format(self.traj_file, table_width)
)
for type1, type2, potential_file in table_potentials:
script.append(HOOMD_TABLE_ENTRY.format(**locals()))
if bonds is not None:
script.append(HOOMD_BOND_INIT)
for bond in bonds:
name = bond.name
k = bond._states[self]["k"]
r0 = bond._states[self]["r0"]
script.append(HOOMD_BOND_ENTRY.format(**locals()))
if angles is not None:
script.append(HOOMD_ANGLE_INIT)
for angle in angles:
name = angle.name
k = angle._states[self]["k"]
theta = angle._states[self]["theta"]
script.append(HOOMD_ANGLE_ENTRY.format(**locals()))
integrator_kwargs["kT"] = self.kT
script.append(HOOMD_TEMPLATE.format(**locals()))
runscript_file = os.path.join(self.dir, "run.py")
with open(runscript_file, "w") as fh:
fh.writelines(script)
def _setup_dir(self, name, kT, dir_name=None):
"""Create a state directory each time a new State is created."""
if dir_name is None:
if not os.path.isdir("states"):
os.mkdir("states")
dir_name = os.path.join("states", f"{name}_{kT}")
else:
if not os.path.isdir(
os.path.join(dir_name, "states")
):
os.mkdir(os.path.join(dir_name, "states"))
dir_name = os.path.join(dir_name, "states", f"{name}_{kT}")
try:
assert not os.path.isdir(dir_name)
os.mkdir(dir_name)
except AssertionError:
print(f"{dir_name} already exists")
raise
return os.path.abspath(dir_name)
```
#### File: msibi/tests/test_state.py
```python
import pytest
from msibi.state import State
@pytest.mark.skipif(True, reason="Needs implementing!")
def test_init():
pass
@pytest.mark.skipif(True, reason="Needs implementing!")
def test_save_runscript():
pass
```
|
{
"source": "jennyfothergill/signac-flow",
"score": 3
}
|
#### File: signac-flow/flow/aggregates.py
```python
import itertools
from abc import abstractmethod
from collections.abc import Collection, Iterable, Mapping
from hashlib import md5
def _get_unique_function_id(func):
"""Generate unique id for the provided function.
Hashing the bytecode rather than directly hashing the function allows for
the comparison of internal functions like ``self._aggregator_function``
or ``self._select`` that may have the same definitions but different
hashes simply because they are distinct objects.
It is possible for equivalent functions to have different ids if the
bytecode is not identical.
Parameters
----------
func : callable
The function to be hashed.
Returns
-------
str
The hash of the function's bytecode if possible, otherwise the hash
of the function.
"""
try:
return hash(func.__code__.co_code)
except AttributeError: # Cannot access function's compiled bytecode
return hash(func)
class aggregator:
"""Decorator for operation functions that operate on aggregates.
By default, if the ``aggregator_function`` is ``None``, an aggregate of all
jobs will be created.
Examples
--------
The code block below defines a :class:`~.FlowOperation` that prints the
total length of the provided aggregate of jobs.
.. code-block:: python
@aggregator()
@FlowProject.operation
def foo(*jobs):
print(len(jobs))
Parameters
----------
aggregator_function : callable or None
A callable that performs aggregation of jobs. It takes in a list of
jobs and can return or yield subsets of jobs as an iterable. The
default behavior is creating a single aggregate of all jobs.
sort_by : str, callable, or None
Before aggregating, sort the jobs by a given statepoint parameter. If
the argument is a string, jobs are sorted by that state point key. If
the argument is callable, this will be passed as the ``key`` argument to
:func:`sorted`. If None, no sorting is performed (Default value = None).
sort_ascending : bool
True if the jobs are to be sorted in ascending order (Default value =
True).
select : callable or None
Condition for filtering individual jobs. This is passed as the
``function`` argument to :func:`filter`. If None, no filtering is
performed (Default value = None).
"""
def __init__(
self, aggregator_function=None, sort_by=None, sort_ascending=True, select=None
):
if aggregator_function is None:
def aggregator_function(jobs):
yield tuple(jobs) if jobs else ()
if not callable(aggregator_function):
raise TypeError(
"Expected aggregator_function to be callable, got "
f"{type(aggregator_function)}"
)
if sort_by is not None and not (isinstance(sort_by, str) or callable(sort_by)):
raise TypeError(
f"Expected sort_by parameter to be str or callable, got {type(sort_by)}"
)
if select is not None and not callable(select):
raise TypeError(
f"Expected select parameter to be callable, got {type(select)}"
)
# Set the ``_is_default_aggregator`` attribute to False by default. But if
# the "non-aggregate" aggregator object i.e. aggregator.groupsof(1) is
# created using the class method, then we explicitly set the
# ``_is_default_aggregator`` attribute to True.
self._is_default_aggregator = False
self._aggregator_function = aggregator_function
self._sort_by = sort_by
self._sort_ascending = bool(sort_ascending)
self._select = select
@classmethod
def groupsof(cls, num=1, sort_by=None, sort_ascending=True, select=None):
"""Aggregate jobs into groupings of a given size.
By default, creates aggregates consisting of a single job.
If the number of jobs present in the project is not divisible by the
number provided by the user, the last aggregate will be smaller and
contain the remaining jobs. For instance, if 10 jobs are present in a
project and they are aggregated in groups of 3, then the generated
aggregates will have lengths 3, 3, 3, and 1.
Examples
--------
The code block below shows how to aggregate jobs in groups of 2.
.. code-block:: python
@aggregator.groupsof(num=2)
@FlowProject.operation
def foo(*jobs):
print(len(jobs))
Parameters
----------
num : int
The default size of aggregates. The final aggregate contains the
remaining jobs and may have fewer than ``num`` jobs.
sort_by : str, callable, or None
Before aggregating, sort the jobs by a given statepoint parameter. If
the argument is a string, jobs are sorted by that state point key. If
the argument is callable, this will be passed as the ``key`` argument to
:func:`sorted`. If None, no sorting is performed (Default value = None).
sort_ascending : bool
True if the jobs are to be sorted in ascending order (Default value
= True).
select : callable or None
Condition for filtering individual jobs. This is passed as the
``function`` argument to :func:`filter`. If None, no filtering is
performed (Default value = None).
Returns
-------
aggregator : :class:`~.aggregator`
The :meth:`~.groupsof` aggregator.
"""
try:
if num != int(num):
raise ValueError("The num parameter should be an integer")
num = int(num)
if num <= 0:
raise ValueError("The num parameter should have a value greater than 0")
except TypeError:
raise TypeError("The num parameter should be an integer")
# This method is similar to the `grouper` method documented here:
# https://docs.python.org/3/library/itertools.html#itertools.zip_longest
# However, this function does not have a fill value.
# Source of this implementation: https://stackoverflow.com/a/31185097
def aggregator_function(jobs):
iterable = iter(jobs)
return iter(lambda: tuple(itertools.islice(iterable, num)), tuple())
aggregator_instance = cls(aggregator_function, sort_by, sort_ascending, select)
if num == 1 and sort_by is None and select is None and sort_ascending:
aggregator_instance._is_default_aggregator = True
return aggregator_instance
@classmethod
def groupby(cls, key, default=None, sort_by=None, sort_ascending=True, select=None):
"""Aggregate jobs according to matching state point values.
Examples
--------
The code block below provides an example of how to aggregate jobs
by a state point parameter ``"sp"``. If the state point does not
contain the key ``"sp"``, a default value of -1 is used.
.. code-block:: python
@aggregator.groupby(key="sp", default=-1)
@FlowProject.operation
def foo(*jobs):
print(len(jobs))
Parameters
----------
key : str, Iterable[str], or callable
The method by which jobs are grouped. It may be a state point key
or an iterable of state point keys whose values define the
groupings. It may also be an arbitrary callable of
:class:`~signac.contrib.job.Job` when greater flexibility is
needed.
default : Any
Default value used for grouping if the key is missing or invalid.
If ``key`` is an iterable, the default value must be a sequence
of equal length. If ``key`` is a callable, this argument is
ignored. If None, the provided keys must exist for all jobs
(Default value = None).
sort_by : str, callable, or None
Before aggregating, sort the jobs by a given statepoint parameter. If
the argument is a string, jobs are sorted by that state point key. If
the argument is callable, this will be passed as the ``key`` argument to
:func:`sorted`. If None, no sorting is performed (Default value = None).
sort_ascending : bool
True if the jobs are to be sorted in ascending order (Default value
= True).
select : callable or None
Condition for filtering individual jobs. This is passed as the
``function`` argument to :func:`filter`. If None, no filtering is
performed (Default value = None).
Returns
-------
aggregator : :class:`~.aggregator`
The :meth:`~.groupby` aggregator.
"""
if isinstance(key, str):
if default is None:
def keyfunction(job):
return job.statepoint[key]
else:
def keyfunction(job):
return job.statepoint.get(key, default)
elif isinstance(key, Iterable):
keys = list(key)
if default is None:
def keyfunction(job):
return [job.statepoint[key] for key in keys]
else:
if isinstance(default, Iterable):
if len(default) != len(keys):
raise ValueError(
"Expected length of default argument is "
f"{len(keys)}, got {len(default)}."
)
else:
raise TypeError(
"Invalid default argument. Expected Iterable, "
f"got {type(default)}"
)
def keyfunction(job):
return [
job.statepoint.get(key, default_value)
for key, default_value in zip(keys, default)
]
elif callable(key):
keyfunction = key
else:
raise TypeError(
"Invalid key argument. Expected str, Iterable, "
f"or a callable, got {type(key)}"
)
def aggregator_function(jobs):
for key, group in itertools.groupby(
sorted(jobs, key=keyfunction), key=keyfunction
):
yield tuple(group)
return cls(aggregator_function, sort_by, sort_ascending, select)
def __eq__(self, other):
"""Test equality with another aggregator."""
if not isinstance(other, type(self)):
return NotImplemented
# It is not possible to compare aggregators, even with equivalent
# aggregator functions. Moreover, the code objects created by
# _get_unique_function_id do not account for differences in the bound
# parameters. Thus, the only meaningful comparison is whether both
# aggregators are the default aggregator (and thus equivalent).
return self._is_default_aggregator and other._is_default_aggregator
def __hash__(self):
"""Hash this aggregator."""
return hash(
(
self._is_default_aggregator,
self._sort_ascending,
_get_unique_function_id(self._sort_by),
_get_unique_function_id(self._aggregator_function),
_get_unique_function_id(self._select),
)
)
def _create_AggregateStore(self, project):
"""Create the actual collections of jobs to be sent to aggregate operations.
The :class:`aggregator` class is just a decorator that provides a
signal for operation functions that should be treated as aggregate
operations and information on how to perform the aggregation. This
function generates the classes that actually hold the aggregates
(tuples of jobs) to which aggregate operations will be applied.
Parameters
----------
project : :class:`signac.contrib.project.Project`
A signac project used to fetch jobs for creating aggregates.
Returns
-------
:class:`~._BaseAggregateStore`
The aggregate store.
"""
if self._is_default_aggregator:
return _DefaultAggregateStore(project)
else:
return _AggregateStore(self, project)
def __call__(self, func=None):
"""Add this aggregator to a provided operation.
This call operator allows the class to be used as a decorator.
Parameters
----------
func : callable
The function to decorate.
"""
if not callable(func):
raise TypeError(
"Invalid argument passed while calling the aggregate "
f"instance. Expected a callable, got {type(func)}."
)
if getattr(func, "_flow_with_job", False):
raise RuntimeError(
"The @with_job decorator cannot be used with aggregation."
)
setattr(func, "_flow_aggregate", self)
return func
class _BaseAggregateStore(Mapping):
"""Base abstract class for aggregate stores.
An aggregate store is a mapping from aggregate ids to aggregates, where
an aggregate is defined as a tuple of instances of
:class:`signac.contrib.job.Job`.
"""
def __init__(self, project):
self._project = project
def __iter__(self):
yield from self.keys()
class _AggregateStore(_BaseAggregateStore):
"""Class containing all aggregates associated with an :class:`aggregator`.
Iterating over this object yields aggregate ids, which can be used as
indices to return the corresponding aggregates.
Parameters
----------
aggregator : :class:`aggregator`
aggregator object used to generate aggregates for this store.
project : :class:`flow.FlowProject` or :class:`signac.contrib.project.Project`
A signac project containing the jobs that will be used to create
aggregates.
"""
def __init__(self, aggregator, project):
self._aggregator = aggregator
self._project = project
# We need to register the aggregates for this instance using the
# provided project. After registering, we store the aggregates mapped
# with the ids using :func:`get_aggregate_id`.
self._register_aggregates()
def __getitem__(self, id):
"""Get the aggregate corresponding to the provided id."""
try:
return self._aggregates_by_id[id]
except KeyError:
raise KeyError(f"Aggregate id {id} could not be found.")
def __contains__(self, id):
"""Return whether this instance contains an aggregate (by aggregate id).
Parameters
----------
id : str
The id of an aggregate of jobs.
Returns
-------
bool
Whether this instance contains the aggregate.
"""
return id in self._aggregates_by_id
def __len__(self):
return len(self._aggregates_by_id)
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self._aggregator == other._aggregator
def __hash__(self):
return hash(self._aggregator)
def keys(self):
return self._aggregates_by_id.keys()
def values(self):
return self._aggregates_by_id.values()
def items(self):
return self._aggregates_by_id.items()
def _register_aggregates(self):
"""Register aggregates from the project.
This is called at instantiation to generate and store aggregates.
Every aggregate is required to be a tuple of jobs.
"""
# Initialize the internal mapping from id to aggregate
self._aggregates_by_id = {}
for aggregate in self._generate_aggregates():
for job in aggregate:
if job not in self._project:
raise LookupError(
f"The signac job {job.get_id()} not found in {self._project}"
)
try:
stored_aggregate = tuple(aggregate)
except TypeError: # aggregate is not iterable
raise ValueError("Invalid aggregator_function provided by the user.")
# Store aggregate by id to allow searching by id
self._aggregates_by_id[
get_aggregate_id(stored_aggregate)
] = stored_aggregate
def _generate_aggregates(self):
jobs = self._project
if self._aggregator._select is not None:
jobs = filter(self._aggregator._select, jobs)
if self._aggregator._sort_by is None:
jobs = list(jobs)
else:
if callable(self._aggregator._sort_by):
sort_function = self._aggregator._sort_by
else:
def sort_function(job):
return job.statepoint[self._aggregator._sort_by]
jobs = sorted(
jobs,
key=sort_function,
reverse=not self._aggregator._sort_ascending,
)
yield from self._aggregator._aggregator_function(jobs)
class _DefaultAggregateStore(_BaseAggregateStore):
"""Aggregate storage wrapper for the default aggregator.
This class holds the information of the project associated with an
operation function using the default aggregator, i.e.
``aggregator.groupsof(1)``.
Iterating over this object yields tuples each containing one job from the project.
Parameters
----------
project : :class:`flow.FlowProject` or :class:`signac.contrib.project.Project`
A signac project used to fetch jobs for creating aggregates.
"""
def __init__(self, project):
super().__init__(project)
# Below, we store repr(project), which defines the hash and equality
# operators of this class. This class must be hashable because it is
# used as a dict key. However, when unpickling a FlowProject, this
# object's hash must be computed *before* the FlowProject is fully
# initialized. Thus, it is not possible to execute repr(project) when
# hashing the instance at the time of unpickling. This means that this
# class cannot be unpickled unless we pre-emptively compute and store
# the repr.
self._project_repr = repr(project)
def __getitem__(self, id):
"""Return an aggregate of one job from its job id.
Parameters
----------
id : str
The job id.
"""
try:
return (self._project.open_job(id=id),)
except KeyError:
raise KeyError(f"Aggregate id {id} could not be found.")
def __contains__(self, id):
"""Return whether this instance contains a job (by job id).
Parameters
----------
id : str
The job id.
"""
try:
self._project.open_job(id=id)
except KeyError:
return False
except LookupError:
raise
else:
return True
def __len__(self):
return len(self._project)
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self._project_repr == other._project_repr
def __hash__(self):
return hash(self._project_repr)
def keys(self):
for job in self._project:
yield job.get_id()
def values(self):
for job in self._project:
yield (job,)
def items(self):
for job in self._project:
yield (job.get_id(), (job,))
def get_aggregate_id(aggregate):
"""Generate aggregate id for an aggregate of jobs.
The aggregate id is a unique hash identifying a tuple of jobs. The
aggregate id is sensitive to the order of the jobs in the aggregate. The
id of an aggregate containing one job is that job's id (the hash of its
state point).
Parameters
----------
aggregate : tuple of :class:`~signac.contrib.job.Job`
Aggregate of signac jobs.
Returns
-------
str
The generated aggregate id.
"""
if len(aggregate) == 1:
# Return job id as it's already unique
return aggregate[0].get_id()
id_string = ",".join(job.get_id() for job in aggregate)
hash_ = md5(id_string.encode("utf-8")).hexdigest()
return f"agg-{hash_}"
class _AggregatesCursor(Collection):
"""Abstract class defining iterators over aggregates stored in a FlowProject.
Parameters
----------
project : :class:`~.FlowProject`
A FlowProject whose jobs are aggregated.
"""
@abstractmethod
def __eq__(self, other):
pass
class _AggregateStoresCursor(_AggregatesCursor):
"""Utility class to iterate over a collection of _AggregateStore instances.
Parameters
----------
project : :class:`~.FlowProject`
A FlowProject whose jobs are aggregated.
"""
def __init__(self, project):
self._stores = project._group_to_aggregate_store.inverse.keys()
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self._stores == other._stores
def __contains__(self, aggregate):
aggregate_id = get_aggregate_id(aggregate)
return any(aggregate_id in aggregate_store for aggregate_store in self._stores)
def __len__(self):
# Return number of aggregates summed across all aggregate stores
return sum(len(aggregate_store) for aggregate_store in self._stores)
def __iter__(self):
for aggregate_store in self._stores:
yield from aggregate_store.values()
class _JobAggregateCursor(_AggregatesCursor):
"""Utility class to iterate over single-job aggregates in a FlowProject.
Parameters
----------
project : :class:`~.FlowProject`
A FlowProject whose jobs are aggregated.
filter : dict
A mapping of key-value pairs that all indexed job state points are
compared against (Default value = None).
doc_filter : dict
A mapping of key-value pairs that all indexed job documents are
compared against (Default value = None).
"""
def __init__(self, project, filter=None, doc_filter=None):
self._cursor = project.find_jobs(filter, doc_filter)
def __eq__(self, other):
# Cursors cannot compare equal if one is over aggregates and the other
# is over jobs.
if not isinstance(other, type(self)):
return NotImplemented
return self._cursor == other._cursor
def __contains__(self, aggregate):
return len(aggregate) == 1 and aggregate[0] in self._cursor
def __len__(self):
return len(self._cursor)
def __iter__(self):
for job in self._cursor:
yield (job,)
```
|
{
"source": "jennyfothergill/signac",
"score": 3
}
|
#### File: signac/contrib/filterparse.py
```python
import json
import sys
from collections.abc import Mapping
def _print_err(msg=None):
"""Print the provided message to stderr.
Parameters
----------
msg : str
Error message to be printed (Default value = None).
"""
print(msg, file=sys.stderr)
def _with_message(query, file):
"""Print the interpreted filter arguments to the provided file.
Parameters
----------
query : dict
Filter arguments.
file :
The file where the filter interpretation is printed.
Returns
-------
query : dict
Filter arguments.
"""
print(f"Interpreted filter arguments as '{json.dumps(query)}'.", file=file)
return query
def _read_index(project, fn_index=None):
"""Read index from the file passed.
Parameters
----------
project : :class:`~signac.Project`
Project handle.
fn_index : str
File name of the index (Default value = None).
Returns
-------
generator
Returns the file contents, parsed as JSON-encoded lines.
"""
if fn_index is not None:
_print_err(f"Reading index from file '{fn_index}'...")
fd = open(fn_index)
return (json.loads(line) for line in fd)
def _is_json(q):
"""Check if q is JSON.
Parameters
----------
q : str
Query string.
Returns
-------
bool
True if q starts with "{" and ends with "}".
"""
return q.strip().startswith("{") and q.strip().endswith("}")
def _is_regex(q):
"""Check if q is a regular expression.
Parameters
----------
q : str
Query string.
Returns
-------
bool
True if q starts with "/" and ends with "/".
"""
return q.startswith("/") and q.endswith("/")
def _parse_json(q):
"""Parse a query argument as JSON.
Parameters
----------
q : json
Query argument.
Raises
------
JSONDecodeError
Raised if the input cannot be parsed as JSON.
"""
try:
return json.loads(q)
except json.JSONDecodeError:
_print_err(f"Failed to parse query argument. Ensure that '{q}' is valid JSON!")
raise
CAST_MAPPING = {
"true": True,
"false": False,
"null": None,
}
CAST_MAPPING_WARNING = {
"True": "true",
"False": "false",
"None": "null",
"none": "null",
}
def _cast(x):
"""Attempt to interpret x with the correct type.
Parameters
----------
x : str
The value to cast.
Returns
-------
object
Value of x, cast from a str to an appropriate type (bool, NoneType, int, float, str).
"""
try:
if x in CAST_MAPPING_WARNING:
print(f"Did you mean {CAST_MAPPING_WARNING[x]}?", file=sys.stderr)
return CAST_MAPPING[x]
except KeyError:
try:
return int(x)
except ValueError:
try:
return float(x)
except ValueError:
return x
def _parse_single(key, value=None):
"""Parse simple search syntax.
Parameters
----------
key : str
The filter key.
value :
The filter value. If None, the filter returns
True if the provided key exists (Default value = None).
Returns
-------
dict
Parsed filter arguments.
Raises
------
ValueError
If filter arguments have an invalid key.
"""
if value is None or value == "!":
return key, {"$exists": True}
elif _is_json(value):
return key, _parse_json(value)
elif _is_regex(value):
return key, {"$regex": value[1:-1]}
elif _is_json(key):
raise ValueError(
"Please check your filter arguments. "
"Using a JSON expression as a key is not allowed: '{}'.".format(key)
)
else:
return key, _cast(value)
def parse_simple(tokens):
"""Parse a set of string tokens into a suitable filter.
Parameters
----------
tokens : Sequence[str]
A Sequence of strings composing key-value pairs.
Yields
------
tuple
A single key-value pair of input tokenized filter.
"""
for i in range(0, len(tokens), 2):
key = tokens[i]
if i + 1 < len(tokens):
value = tokens[i + 1]
else:
value = None
yield _parse_single(key, value)
def parse_filter_arg(args, file=sys.stderr):
"""Parse a series of filter arguments into a dictionary.
Parameters
----------
args : sequence of str
Filter arguments to parse.
file :
The file to write message (Default value = sys.stderr).
Returns
-------
dict
Filter arguments.
"""
if args is None or len(args) == 0:
return None
elif len(args) == 1:
if _is_json(args[0]):
return _parse_json(args[0])
else:
key, value = _parse_single(args[0])
return _with_message({key: value}, file)
else:
q = dict(parse_simple(args))
return _with_message(q, file)
def _add_prefix(prefix, filter):
"""Add desired prefix (e.g. 'sp.' or 'doc.') to a (possibly nested) filter."""
if filter:
for key, value in filter.items():
if key in ("$and", "$or"):
if isinstance(value, list) or isinstance(value, tuple):
yield key, [dict(_add_prefix(prefix, item)) for item in value]
else:
raise ValueError(
"The argument to a logical operator must be a list or a tuple!"
)
elif "." in key and key.split(".", 1)[0] in ("sp", "doc"):
yield key, value
elif key in ("sp", "doc"):
yield key, value
else:
yield prefix + key, value
def _root_keys(filter):
for key, value in filter.items():
if key in ("$and", "$or"):
assert isinstance(value, (list, tuple))
for item in value:
for key in _root_keys(item):
yield key
elif "." in key:
yield key.split(".", 1)[0]
else:
yield key
def parse_filter(filter):
"""Parse a provided sequence of filters.
Parameters
----------
filter : Sequence, Mapping, or str
A set of key, value tuples corresponding to a single filter. This
filter may itself be a compound filter containing and/or statements. The
filter may be provided as a sequence of tuples, a mapping-like object,
or a string. In the last case, the string will be parsed to generate a
valid set of filters.
Yields
------
tuple
A key value pair to be used as a filter.
"""
if isinstance(filter, str):
yield from parse_simple(filter.split())
elif isinstance(filter, Mapping):
yield from filter.items()
else:
try:
yield from filter
except TypeError:
# This type was not iterable.
raise ValueError(
f"Invalid filter type {type(filter)}. The filter must "
"be a Sequence, Mapping, or str."
)
```
|
{
"source": "jennyfuzhu/signal-interpreter-server",
"score": 2
}
|
#### File: tests/unit/test_factory.py
```python
import logging
from src.parser_factory import ParseFactory
log = logging.getLogger(__name__)
parse_factory = ParseFactory()
class MockParser:
"""Mock parser class"""
def test_set_signal_database_format():
"""Test set signal database format unit test"""
log.debug("Entering %s unit test" % test_set_signal_database_format.__name__)
parse_factory.set_signal_database_format("EXTENSION")
assert parse_factory._signal_database_format == "EXTENSION"
log.debug("Existing %s unit test" % test_set_signal_database_format.__name__)
def test_register_format():
"Test register format unit test"
log.debug("Entering %s unit test:" % test_register_format.__name__)
parse_factory.register_format("FORMAT", MockParser)
assert isinstance(parse_factory._parsers["FORMAT"], MockParser)
log.debug("Exiting %s unit test" % test_register_format.__name__)
def test_get_parser():
"""Test get parser unit test"""
log.debug("Entering %s unit test:" % test_get_parser.__name__)
parse_factory._parsers["EXTENSION"] = MockParser
parse_factory._signal_database_format = "EXTENSION"
assert parse_factory.get_parser() == MockParser
log.debug("Exiting %s unit test:" % test_get_parser.__name__)
```
#### File: tests/unit/test_json_parser.py
```python
from unittest.mock import patch, mock_open
import pytest
from src.json_parser import JsonParser
INPUT_JSON = '{"json": "json Jsonsson"}'
OUTPUT_JSON = {"json": "json Jsonsson"}
json_parser_class = JsonParser()
json_parser_class.data = {"services": [{"title": "ECU Reset", "id": "11"}, {"title": "Security Access", "id": "27"},
{"title": "Tester Present", "id": "3E"}, {"title": "None", "id": "20"}]}
def test_load_file():
"""Test load file function """
with patch("builtins.open", mock_open(read_data=INPUT_JSON)):
json_parser_class_tlf = JsonParser()
json_parser_class_tlf.load_file('random/path')
assert json_parser_class_tlf.data == OUTPUT_JSON
def test_get_signal_title():
"""Test get signal title function"""
json_parser_class_tlt = JsonParser()
json_parser_class_tlt.data = {'services': [{'title': 'ECU Reset', 'id': '11'}]}
assert json_parser_class_tlt.get_signal_title('11') == "ECU Reset"
# Need at least two signals
@pytest.mark.parametrize("item, expected_title", [
("11", "ECU Reset"),
("27", "Security Access"),
("3E", "Tester Present"),
])
def test_get_signal_tite_paramatrize(item, expected_title):
assert json_parser_class.get_signal_title(item) == expected_title
```
|
{
"source": "JennyHan2016/ProxyPool",
"score": 3
}
|
#### File: ProxyPool/flaskmodel/flask_book_project.py
```python
from flask import Flask, render_template, flash,request,redirect,url_for
from flask_sqlalchemy import SQLAlchemy
from flaskmodel.config import *
from flask_wtf import FlaskForm
from wtforms import StringField,SubmitField
from wtforms.validators import DataRequired
app = Flask(__name__)
# 创建数据库连接
db = SQLAlchemy(app)
'''
1. 配置数据库
a. 导入Sqlalchemy扩展
b. 创建db对象,并配置参数
c. 终端创建数据库
2. 添加书和作者模型
a. 继承db.Model
b. __tablename__:表名
c. 设置字段名
d. 设置引用关系
3. 添加数据
4. 使用模板显示数据库查询的数据
a. 在模板中for循环就行了(我自己试的时候想在py中做,但是没成功)
5. 使用WTF显示表单
a. 自定义表单类
b. 模板中显示
c. secret_key / 编码 / csrf_token的问题
6. 实现相关的增删逻辑
a. 增加数据
b. 删除书籍:网页中删除,点击需要发送数据的ID给删除书籍的路由,路由需要接收参数(for else / redirect / url_for 的使用)
c. 删除作者
'''
# 配置数据库地址
app.config['SQLALCHEMY_DATABASE_URI'] = '{}+{}://{}:{}@{}:{}/{}?charset=utf8'.format(DIALECT,DRIVER,USERNAME,PASSWORD,HOST,PORT,DATABASE)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = 'hbb'
# 自定义表单类
class AuthorForm(FlaskForm):
author = StringField('作者',validators=[DataRequired()])
book = StringField('书籍',validators=[DataRequired()])
submit = SubmitField('提交')
# 添加书和作者模型
class Author(db.Model):
# 表名
__tablename__ = 'authors'
# 字段
id = db.Column(db.Integer,primary_key = True)
author_name = db.Column(db.String(16),unique = True)
books = db.relationship('Book',backref='author')
# 关系引用
# books = db.relationship()
def __repr__ (self):
return '<Author: %r>' % self.author_name
class Book(db.Model):
__tablename__ = 'books'
id = db.Column(db.Integer,primary_key=True)
book_name = db.Column(db.String(255),unique=True)
author_id = db.Column(db.Integer, db.ForeignKey('authors.id'))
def __repr__ (self):
return '<Book: %r %r>' % (self.book_name,self.author_id)
#删除作者(记得把书也要删掉)
@app.route('/delete_author/<author_id>')
def delete_author(author_id):
author = Author.query.get(author_id)
if author:
try:
Book.query.filter_by(author_id = author_id).delete()
db.session.delete(author)
db.session.commit()
except Exception as e:
flash('删除作者出错')
db.session.rollback()
else:
flash('作者找不到')
return redirect(url_for('index'))
@app.route('/delete_book/<book_id>')
def delete_book(book_id):
book = Book.query.get(book_id)
if book:
try:
db.session.delete(book)
db.session.commit()
except Exception as e:
flash('删除书籍出错')
db.session.rollback()
else:
flash('书籍找不到')
return redirect(url_for('index'))
@app.route('/',methods = ['GET','POST'])
def index():
# 创建自定义的表单
author_form = AuthorForm()
# 查询所有作者信息,让信息传递给模板
'''
验证逻辑:
1. 调用WTF的函数实现验证
2. 验证通过获取数据
3. 判断做作者是否存在
4. 如果作者存在,判断书籍是否存在,没有重复书籍就添加数据,如果重复就提示错误
5. 如果作者不存在,添加作者与书籍
6. 验证不通过就提示错误
'''
# 1. 调用WTF的函数实现验证
if author_form.validate_on_submit():
# 2. 验证通过获取数据
author_name = author_form.author.data
book_name = author_form.book.data
# 3. 判断作者是否存在
author = Author.query.filter_by(author_name=author_name).first()
book = Book.query.filter_by(book_name=book_name).first()
# 4. 如果作者存在
if author:
# 判断作者是否存在,没有重复书籍就添加数据,如果重复就提示错误
if book:
# 有同名书籍就提示
flash('已存在同名同作者书籍')
else:
# 没有同名书籍,就添加数据
try:
new_book = Book(book_name = book_name,author_id = author.id)
db.session.add(new_book)
db.session.commit()
except Exception as e:
print(e)
flash('有作者时添加书籍失败')
db.session.rollback() # 如果添加失败就回滚
else:
# 如果作者不存在,判断书籍是否存在
if book:
# 有同名书籍就提示
flash('已存在相同的书籍')
else:
# 没有同名书籍就添加数据
try:
new_author = Author(author_name=author_name)
db.session.add(new_author)
db.session.commit()
new_book = Book(book_name=book_name, author_id=new_author.id)
db.session.add(new_book)
db.session.commit()
except Exception as e:
print(e)
flash('无作者时添加书籍失败')
db.session.rollback() # 如果添加失败就回滚
else:
if request.method == 'POST':
flash('参数不全!')
authors = Author.query.all()
return render_template('books.html',authors = authors,form = author_form)
if __name__ == '__main__':
# db.create_all()
# db.drop_all()
# 添加数据
# au1 = Author(author_name = 'hbb')
# au2 = Author(author_name = 'ry')
# au3 = Author(author_name = 'rmf')
# db.session.add_all([au1,au2,au3])
# db.session.commit()
#
# bk1 = Book(book_name = '量子史话',author_id = au1.id)
# bk2 = Book(book_name = '我们仨',author_id = au1.id)
# bk3 = Book(book_name = '管理学',author_id = au2.id)
# bk4 = Book(book_name = '玩具的学与玩',author_id = au3.id)
# bk5 = Book(book_name = '教养的迷思',author_id = au3.id)
# db.session.add_all([bk1,bk2,bk3,bk4,bk5])
# db.session.commit()
app.run(debug=True)
```
#### File: ProxyPool/flaskmodel/flasktest.py
```python
from flask import Flask,g
# 2. 创建flask应用程序实例
# 需要传入__name__,作用是为了确定资源所在的路径
app = Flask(__name__)
# 3. 定义路由及视图函数
# Flask中定义路由是通过装饰器实现的
# 路由默认只支持get,如果需要增加,需要自行指定
@app.route('/',methods=['GET','POST'])
def index():
return 'hello Flask'
# 使用同一个视图函数,来显示不同用户的订单信息
# <>定义路由的参数,<>内需要起个名字
@app.route('/order/<int:order_id>')
def get_order(order_id):
return 'order_id %s' % order_id
# 参数类型默认是字符串unicode
# 要对参数作优化,比如说指定参数类型为int类型:在视图函数的()内填入参数名,那么后面的代码才能去使用
#启动程序
if __name__ == '__main__':
app.run()
```
|
{
"source": "JennyLawrance/azure-cli",
"score": 2
}
|
#### File: command_modules/batch/custom.py
```python
import base64
from six.moves.urllib.parse import urlsplit # pylint: disable=import-error
from knack.log import get_logger
from msrest.exceptions import DeserializationError
from azure.mgmt.batch import BatchManagementClient
from azure.mgmt.batch.models import (BatchAccountCreateParameters,
AutoStorageBaseProperties,
ApplicationUpdateParameters)
from azure.mgmt.batch.operations import (ApplicationPackageOperations)
from azure.batch.models import (CertificateAddParameter, PoolStopResizeOptions, PoolResizeParameter,
PoolResizeOptions, JobListOptions, JobListFromJobScheduleOptions,
TaskAddParameter, TaskConstraints, PoolUpdatePropertiesParameter,
StartTask, AffinityInformation)
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.profiles import get_sdk, ResourceType
from azure.cli.core._profile import Profile
from azure.cli.core.util import sdk_no_wait, get_file_json, in_cloud_console
logger = get_logger(__name__)
MAX_TASKS_PER_REQUEST = 100
def transfer_doc(source_func, *additional_source_funcs):
def _decorator(func):
func.__doc__ = source_func.__doc__
for f in additional_source_funcs:
func.__doc__ += "\n" + f.__doc__
return func
return _decorator
# Mgmt custom commands
def list_accounts(client, resource_group_name=None):
acct_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(acct_list)
@transfer_doc(AutoStorageBaseProperties)
def create_account(client,
resource_group_name, account_name, location, tags=None, storage_account=None,
keyvault=None, keyvault_url=None, no_wait=False):
properties = AutoStorageBaseProperties(storage_account_id=storage_account) \
if storage_account else None
parameters = BatchAccountCreateParameters(location=location,
tags=tags,
auto_storage=properties)
if keyvault:
parameters.key_vault_reference = {'id': keyvault, 'url': keyvault_url}
parameters.pool_allocation_mode = 'UserSubscription'
return sdk_no_wait(no_wait, client.create, resource_group_name=resource_group_name,
account_name=account_name, parameters=parameters)
@transfer_doc(AutoStorageBaseProperties)
def update_account(client, resource_group_name, account_name,
tags=None, storage_account=None):
properties = AutoStorageBaseProperties(storage_account_id=storage_account) \
if storage_account else None
return client.update(resource_group_name=resource_group_name,
account_name=account_name,
tags=tags,
auto_storage=properties)
# pylint: disable=inconsistent-return-statements
def login_account(cmd, client, resource_group_name, account_name, shared_key_auth=False, show=False):
account = client.get(resource_group_name=resource_group_name,
account_name=account_name)
cmd.cli_ctx.config.set_value('batch', 'account', account.name)
cmd.cli_ctx.config.set_value('batch', 'endpoint',
'https://{}/'.format(account.account_endpoint))
if shared_key_auth:
keys = client.get_keys(resource_group_name=resource_group_name,
account_name=account_name)
cmd.cli_ctx.config.set_value('batch', 'auth_mode', 'shared_key')
cmd.cli_ctx.config.set_value('batch', 'access_key', keys.primary)
if show:
return {
'account': account.name,
'endpoint': 'https://{}/'.format(account.account_endpoint),
'primaryKey': keys.primary,
'secondaryKey': keys.secondary
}
else:
cmd.cli_ctx.config.set_value('batch', 'auth_mode', 'aad')
if show:
if in_cloud_console():
resource = cmd.cli_ctx.cloud.endpoints.active_directory_resource_id
else:
resource = cmd.cli_ctx.cloud.endpoints.batch_resource_id
profile = Profile(cli_ctx=cmd.cli_ctx)
creds, subscription, tenant = profile.get_raw_token(resource=resource)
return {
'tokenType': creds[0],
'accessToken': creds[1],
'expiresOn': creds[2]['expiresOn'],
'subscription': subscription,
'tenant': tenant,
'resource': resource
}
@transfer_doc(ApplicationUpdateParameters)
def update_application(client,
resource_group_name, account_name, application_id, allow_updates=None,
display_name=None, default_version=None):
parameters = ApplicationUpdateParameters(allow_updates=allow_updates,
display_name=display_name,
default_version=default_version)
return client.update(resource_group_name=resource_group_name,
account_name=account_name,
application_id=application_id,
parameters=parameters)
def _upload_package_blob(ctx, package_file, url):
"""Upload the location file to storage url provided by autostorage"""
BlockBlobService = get_sdk(ctx, ResourceType.DATA_STORAGE, 'blob#BlockBlobService')
uri = urlsplit(url)
# in uri path, it always start with '/', so container name is at second block
pathParts = uri.path.split('/', 2)
container_name = pathParts[1]
blob_name = pathParts[2]
# we need handle the scenario storage account not in public Azure
hostParts = uri.netloc.split('.', 2)
account_name = hostParts[0]
# endpoint suffix needs to ignore the 'blob' part in the host name
endpoint_suffix = hostParts[2]
sas_service = BlockBlobService(account_name=account_name,
sas_token=uri.query,
endpoint_suffix=endpoint_suffix)
sas_service.create_blob_from_path(
container_name=container_name,
blob_name=blob_name,
file_path=package_file,
)
@transfer_doc(ApplicationPackageOperations.create)
def create_application_package(cmd, client,
resource_group_name, account_name, application_id, version,
package_file):
# create application if not exist
mgmt_client = get_mgmt_service_client(cmd.cli_ctx, BatchManagementClient)
try:
mgmt_client.application.get(resource_group_name, account_name, application_id)
except Exception: # pylint:disable=broad-except
mgmt_client.application.create(resource_group_name, account_name, application_id)
result = client.create(resource_group_name, account_name, application_id, version)
# upload binary as application package
logger.info('Uploading %s to storage blob %s...', package_file, result.storage_url)
_upload_package_blob(cmd.cli_ctx, package_file, result.storage_url)
# activate the application package
client.activate(resource_group_name, account_name, application_id, version, "zip")
return client.get(resource_group_name, account_name, application_id, version)
# Data plane custom commands
@transfer_doc(CertificateAddParameter)
def create_certificate(client, certificate_file, thumbprint, password=None):
thumbprint_algorithm = 'sha1'
certificate_format = 'pfx' if password else 'cer'
with open(certificate_file, "rb") as f:
data_bytes = f.read()
data = base64.b64encode(data_bytes).decode('utf-8')
cert = CertificateAddParameter(thumbprint, thumbprint_algorithm, data,
certificate_format=certificate_format,
password=password)
client.add(cert)
return client.get(thumbprint_algorithm, thumbprint)
def delete_certificate(client, thumbprint, abort=False):
thumbprint_algorithm = 'sha1'
if abort:
return client.cancel_deletion(thumbprint_algorithm, thumbprint)
return client.delete(thumbprint_algorithm, thumbprint)
@transfer_doc(PoolResizeParameter)
def resize_pool(client, pool_id, target_dedicated_nodes=None, target_low_priority_nodes=None,
resize_timeout=None, node_deallocation_option=None,
if_match=None, if_none_match=None, if_modified_since=None,
if_unmodified_since=None, abort=False):
if abort:
stop_resize_option = PoolStopResizeOptions(if_match=if_match,
if_none_match=if_none_match,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since)
return client.stop_resize(pool_id, pool_stop_resize_options=stop_resize_option)
param = PoolResizeParameter(target_dedicated_nodes=target_dedicated_nodes,
target_low_priority_nodes=target_low_priority_nodes,
resize_timeout=resize_timeout,
node_deallocation_option=node_deallocation_option)
resize_option = PoolResizeOptions(if_match=if_match,
if_none_match=if_none_match,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since)
return client.resize(pool_id, param, pool_resize_options=resize_option)
@transfer_doc(PoolUpdatePropertiesParameter, StartTask)
def update_pool(client,
pool_id, json_file=None, start_task_command_line=None, certificate_references=None,
application_package_references=None, metadata=None,
start_task_environment_settings=None, start_task_wait_for_success=None,
start_task_max_task_retry_count=None):
if json_file:
json_obj = get_file_json(json_file)
param = None
try:
param = PoolUpdatePropertiesParameter.from_dict(json_obj)
except DeserializationError:
pass
if not param:
raise ValueError("JSON file '{}' is not in correct format.".format(json_file))
if param.certificate_references is None:
param.certificate_references = []
if param.metadata is None:
param.metadata = []
if param.application_package_references is None:
param.application_package_references = []
else:
if certificate_references is None:
certificate_references = []
if metadata is None:
metadata = []
if application_package_references is None:
application_package_references = []
param = PoolUpdatePropertiesParameter(certificate_references,
application_package_references,
metadata)
if start_task_command_line:
param.start_task = StartTask(start_task_command_line,
environment_settings=start_task_environment_settings,
wait_for_success=start_task_wait_for_success,
max_task_retry_count=start_task_max_task_retry_count)
client.update_properties(pool_id=pool_id, pool_update_properties_parameter=param)
return client.get(pool_id)
def list_job(client, job_schedule_id=None, filter=None, # pylint: disable=redefined-builtin
select=None, expand=None):
if job_schedule_id:
option1 = JobListFromJobScheduleOptions(filter=filter,
select=select,
expand=expand)
return list(client.list_from_job_schedule(job_schedule_id=job_schedule_id,
job_list_from_job_schedule_options=option1))
option2 = JobListOptions(filter=filter,
select=select,
expand=expand)
return list(client.list(job_list_options=option2))
@transfer_doc(TaskAddParameter, TaskConstraints, AffinityInformation)
def create_task(client,
job_id, json_file=None, task_id=None, command_line=None, resource_files=None,
environment_settings=None, affinity_id=None, max_wall_clock_time=None,
retention_time=None, max_task_retry_count=None,
application_package_references=None):
task = None
tasks = []
if json_file:
json_obj = get_file_json(json_file)
try:
task = TaskAddParameter.from_dict(json_obj)
except DeserializationError:
tasks = []
try:
for json_task in json_obj:
tasks.append(TaskAddParameter.from_dict(json_task))
except (DeserializationError, TypeError):
raise ValueError("JSON file '{}' is not formatted correctly.".format(json_file))
else:
if command_line is None or task_id is None:
raise ValueError("Missing required arguments.\nEither --json-file, "
"or both --task-id and --command-line must be specified.")
task = TaskAddParameter(task_id, command_line,
resource_files=resource_files,
environment_settings=environment_settings,
affinity_info=AffinityInformation(affinity_id) if affinity_id else None,
application_package_references=application_package_references)
if max_wall_clock_time is not None or retention_time is not None \
or max_task_retry_count is not None:
task.constraints = TaskConstraints(max_wall_clock_time=max_wall_clock_time,
retention_time=retention_time,
max_task_retry_count=max_task_retry_count)
if task is not None:
client.add(job_id=job_id, task=task)
return client.get(job_id=job_id, task_id=task.id)
submitted_tasks = []
for i in range(0, len(tasks), MAX_TASKS_PER_REQUEST):
submission = client.add_collection(
job_id=job_id,
value=tasks[i:i + MAX_TASKS_PER_REQUEST])
submitted_tasks.extend(submission.value) # pylint: disable=no-member
return submitted_tasks
```
#### File: command_modules/iot/_params.py
```python
from argcomplete.completers import FilesCompleter
from knack.arguments import CLIArgumentType
from azure.cli.core.commands.parameters import (get_location_type,
file_type,
get_resource_name_completion_list,
get_enum_type,
get_three_state_flag)
from azure.mgmt.iothub.models.iot_hub_client_enums import IotHubSku
from azure.mgmt.iothubprovisioningservices.models.iot_dps_client_enums import (IotDpsSku,
AllocationPolicy,
AccessRightsDescription)
from .custom import KeyType, SimpleAccessRights
from ._validators import validate_policy_permissions
from ._completers import get_device_id_completion_list
hub_name_type = CLIArgumentType(
completer=get_resource_name_completion_list('Microsoft.Devices/IotHubs'),
help='IoT Hub name.')
dps_name_type = CLIArgumentType(
options_list=['--dps-name'],
completer=get_resource_name_completion_list('Microsoft.Devices/ProvisioningServices'),
help='IoT Provisioning Service name')
def load_arguments(self, _): # pylint: disable=too-many-statements
# Arguments for IoT DPS
with self.argument_context('iot dps') as c:
c.argument('dps_name', dps_name_type, options_list=['--name', '-n'], id_part='name')
with self.argument_context('iot dps create') as c:
c.argument('location', get_location_type(self.cli_ctx),
help='Location of your IoT Provisioning Service. Default is the location of target resource group.')
c.argument('sku', arg_type=get_enum_type(IotDpsSku),
help='Pricing tier for the IoT provisioning service.')
c.argument('unit', help='Units in your IoT Provisioning Service.', type=int)
for subgroup in ['access-policy', 'linked-hub', 'certificate']:
with self.argument_context('iot dps {}'.format(subgroup)) as c:
c.argument('dps_name', options_list=['--dps-name'], id_part=None)
with self.argument_context('iot dps access-policy') as c:
c.argument('access_policy_name', options_list=['--access-policy-name', '--name', '-n'],
help='A friendly name for DPS access policy.')
with self.argument_context('iot dps access-policy create') as c:
c.argument('rights', options_list=['--rights', '-r'], nargs='+',
arg_type=get_enum_type(AccessRightsDescription),
help='Access rights for the IoT provisioning service. Use space-separated list for multiple rights.')
c.argument('primary_key', help='Primary SAS key value.')
c.argument('secondary_key', help='Secondary SAS key value.')
with self.argument_context('iot dps access-policy update') as c:
c.argument('rights', options_list=['--rights', '-r'], nargs='+',
arg_type=get_enum_type(AccessRightsDescription),
help='Access rights for the IoT provisioning service. Use space-separated list for multiple rights.')
c.argument('primary_key', help='Primary SAS key value.')
c.argument('secondary_key', help='Secondary SAS key value.')
with self.argument_context('iot dps linked-hub') as c:
c.argument('linked_hub', options_list=['--linked-hub'], help='Host name of linked IoT Hub.')
with self.argument_context('iot dps linked-hub create') as c:
c.argument('connection_string', help='Connection string of the IoT hub.')
c.argument('location', get_location_type(self.cli_ctx),
help='Location of the IoT hub.')
c.argument('apply_allocation_policy',
help='A boolean indicating whether to apply allocation policy to the IoT hub.',
arg_type=get_three_state_flag())
c.argument('allocation_weight', help='Allocation weight of the IoT hub.')
with self.argument_context('iot dps linked-hub update') as c:
c.argument('apply_allocation_policy',
help='A boolean indicating whether to apply allocation policy to the Iot hub.',
arg_type=get_three_state_flag())
c.argument('allocation_weight', help='Allocation weight of the IoT hub.')
with self.argument_context('iot dps allocation-policy update') as c:
c.argument('allocation_policy', options_list=['--policy', '-p'], arg_type=get_enum_type(AllocationPolicy),
help='Allocation policy for the IoT provisioning service.')
with self.argument_context('iot dps certificate') as c:
c.argument('certificate_path', options_list=['--path', '-p'], type=file_type,
completer=FilesCompleter([".cer", ".pem"]), help='The path to the file containing the certificate.')
c.argument('certificate_name', options_list=['--certificate-name', '--name', '-n'],
help='A friendly name for the certificate.')
c.argument('etag', options_list=['--etag', '-e'], help='Entity Tag (etag) of the object.')
# Arguments for IoT Hub
with self.argument_context('iot') as c:
c.argument('device_id', options_list=['--device-id', '-d'], help='Device Id.',
completer=get_device_id_completion_list)
with self.argument_context('iot hub') as c:
c.argument('hub_name', hub_name_type, options_list=['--name', '-n'], id_part='name')
c.argument('etag', options_list=['--etag', '-e'], help='Entity Tag (etag) of the object.')
for subgroup in ['consumer-group', 'policy', 'job', 'certificate']:
with self.argument_context('iot hub {}'.format(subgroup)) as c:
c.argument('hub_name', options_list=['--hub-name'])
with self.argument_context('iot device') as c:
c.argument('hub_name', hub_name_type)
with self.argument_context('iot hub certificate') as c:
c.argument('certificate_path', options_list=['--path', '-p'], type=file_type,
completer=FilesCompleter([".cer", ".pem"]), help='The path to the file containing the certificate.')
c.argument('certificate_name', options_list=['--name', '-n'], help='A friendly name for the certificate.')
with self.argument_context('iot hub consumer-group') as c:
c.argument('consumer_group_name', options_list=['--name', '-n'], id_part='child_name_2',
help='Event hub consumer group name.')
c.argument('event_hub_name', id_part='child_name_1', help='Event hub endpoint name.')
with self.argument_context('iot hub policy') as c:
c.argument('policy_name', options_list=['--name', '-n'], id_part='child_name_1',
help='Shared access policy name.')
permission_values = ', '.join([x.value for x in SimpleAccessRights])
c.argument('permissions', nargs='*', validator=validate_policy_permissions, type=str.lower,
help='Permissions of shared access policy. Use space-separated list for multiple permissions. '
'Possible values: {}'.format(permission_values))
with self.argument_context('iot hub job') as c:
c.argument('job_id', id_part='child_name_1', help='Job Id.')
with self.argument_context('iot hub create') as c:
c.argument('hub_name', completer=None)
c.argument('location', get_location_type(self.cli_ctx),
help='Location of your IoT Hub. Default is the location of target resource group.')
c.argument('sku', arg_type=get_enum_type(IotHubSku),
help='Pricing tier for Azure IoT Hub. Default value is F1, which is free. '
'Note that only one free IoT hub instance is allowed in each '
'subscription. Exception will be thrown if free instances exceed one.')
c.argument('unit', help='Units in your IoT Hub.', type=int)
c.argument('partition_count', help='The number of partitions for device-to-cloud messages.', type=int)
with self.argument_context('iot hub show-connection-string') as c:
c.argument('policy_name', help='Shared access policy to use.')
c.argument('key_type', arg_type=get_enum_type(KeyType), options_list=['--key'], help='The key to use.')
with self.argument_context('iot device create') as c:
c.argument('device_id', completer=None)
with self.argument_context('iot device create', arg_group='X.509 Certificate') as c:
c.argument('x509', action='store_true', help='Use X.509 certificate for device authentication.')
c.argument('primary_thumbprint', help='Primary X.509 certificate thumbprint to authenticate device.')
c.argument('secondary_thumbprint', help='Secondary X.509 certificate thumbprint to authenticate device.')
c.argument('valid_days', type=int, help='Number of days the generated self-signed X.509 certificate should be '
'valid for. Default validity is 365 days.')
c.argument('output_dir', help='Output directory for generated self-signed X.509 certificate. '
'Default is current working directory.')
with self.argument_context('iot device list') as c:
c.argument('top', help='Maximum number of device identities to return.', type=int)
with self.argument_context('iot device delete') as c:
c.argument('etag', help='ETag of the target device. It is used for the purpose of optimistic '
'concurrency. Delete operation will be performed only if the specified '
'ETag matches the value maintained by the server, indicating that the '
'device identity has not been modified since it was retrieved. Default '
'value is set to wildcard character (*) to force an unconditional '
'delete.')
with self.argument_context('iot device show-connection-string') as c:
c.argument('top', type=int, help='Maximum number of connection strings to return.')
c.argument('key_type', arg_type=get_enum_type(KeyType), options_list=['--key'], help='The key to use.')
with self.argument_context('iot device message') as c:
c.argument('lock_token', help='Message lock token.')
with self.argument_context('iot device message send', arg_group='Messaging') as c:
c.argument('data', help='Device-to-cloud message body.')
c.argument('message_id', help='Device-to-cloud message Id.')
c.argument('correlation_id', help='Device-to-cloud message correlation Id.')
c.argument('user_id', help='Device-to-cloud message user Id.')
with self.argument_context('iot device message receive') as c:
c.argument('lock_timeout', type=int,
help='In case a message returned to this call, this specifies the amount of '
'time in seconds, the message will be invisible to other receive calls.')
with self.argument_context('iot device export') as c:
c.argument('blob_container_uri',
help='Blob Shared Access Signature URI with write access to a blob container.'
'This is used to output the status of the job and the results.')
c.argument('include_keys', action='store_true',
help='If set, keys are exported normally. Otherwise, keys are set to null in '
'export output.')
with self.argument_context('iot device import') as c:
c.argument('input_blob_container_uri',
help='Blob Shared Access Signature URI with read access to a blob container.'
'This blob contains the operations to be performed on the identity '
'registry ')
c.argument('output_blob_container_uri',
help='Blob Shared Access Signature URI with write access to a blob container.'
'This is used to output the status of the job and the results.')
```
|
{
"source": "JennyLeeStat/Opioid",
"score": 2
}
|
#### File: JennyLeeStat/Opioid/utils.py
```python
import pandas as pd
import os
import sys
import urllib
import logging
import zipfile
logging.basicConfig(
format= '%(levelname)s %(message)s',
stream=sys.stdout, level=logging.INFO)
def download_and_decompress(url, dest_dir):
"""
fetch the dataset from the CMS webpage
"""
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
filename = url.split('/')[ -1 ]
filepath = os.path.join(dest_dir, filename)
uncomp_filedir = filename.split('.')[ 0 ]
#uncomp_filepath = os.path.join(dest_dir, uncomp_filedir)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
if not os.path.isfile(filepath):
filepath, _ = urllib.request.urlretrieve(url, filepath,
reporthook=_progress)
statinfo = os.stat(filepath)
logging.info('Successfully downloaded {}'.format(filename))
logging.info('{} bytes.'.format(statinfo.st_size))
if not os.path.isfile(filepath):
logging.info("Uncompressing {}".format(filename))
zipfile.ZipFile(filepath, 'r').extractall(dest_dir)
logging.info(uncomp_filedir + ' successfully uncompressed')
print()
logging.info("Data set {}".format(filename))
logging.info("from url: {}".format(url))
logging.info("successfully downloaded and uncompressed")
def clean_txt(series):
cleaned = series.str.lower().str.strip().str.replace('/', "_").str.replace('-', "_")
cleaned = cleaned.str.replace(' ', '_').str.replace(',', '_').str.replace('__', '_')
return cleaned
def plot_us_map(df, state, code, z,
title='Overdose Deaths Per Capita',
colorbar_title=None):
scl = [ [ 0.0, 'rgb(242,240,247)' ],
[ 0.2, 'rgb(218,218,235)' ],
[ 0.4, 'rgb(188,189,220)' ],
[ 0.6, 'rgb(158,154,200)' ],
[ 0.8, 'rgb(117,107,177)' ],
[ 1.0, 'rgb(84,39,143)' ] ]
for col in df.columns:
df[ col ] = df[ col ].astype(str)
df[ 'text' ] = df[state] + '<br>' + z + df[z]
data = [ dict(
type='choropleth',
colorscale=scl,
autocolorscale=True,
locations=df[code],
z=df[z].astype(float),
locationmode='USA-states',
text=df[ 'text' ],
marker=dict(
line=dict(
color='rgb(255,255,255)',
width=2
)),
colorbar = dict(
title=colorbar_title)
) ]
layout = dict(
title=title,
geo=dict(
scope='usa',
projection=dict(type='albers usa'),
showlakes=False,
lakecolor='rgb(255, 255, 255)'),
)
fig = dict(data=data, layout=layout)
return fig
```
|
{
"source": "jennylien/python-everywhere-webapp-guineapig",
"score": 3
}
|
#### File: jennylien/python-everywhere-webapp-guineapig/flask_app.py
```python
from flask import Flask, render_template
from processing import do_calculation
app = Flask(__name__)
app.config["DEBUG"] = True
@app.route('/')
def index():
return render_template("main_page.html")
```
|
{
"source": "JennyLouise/Mask_RCNN",
"score": 2
}
|
#### File: samples/coco/FK2018.py
```python
import os
import sys
import time
import numpy as np
import imgaug.augmenters as iaa # https://github.com/aleju/imgaug (pip3 install imgaug)
# Download and install the Python COCO tools from https://github.com/waleedka/coco
# That's a fork from the original https://github.com/pdollar/coco with a bug
# fix for Python 3.
# I submitted a pull request https://github.com/cocodataset/cocoapi/pull/50
# If the PR is merged then use the original repo.
# Note: Edit PythonAPI/Makefile and replace "python" with "python3".
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools import mask as maskUtils
from skimage.measure import find_contours
import zipfile
import urllib.request
import shutil
import json
import math
import shapely.geometry
import tensorflow as tf
from tensorflow.python.client import timeline
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils
# Path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = "/scratch/jw22g14/results/fk2018/paperlogs/"
DEFAULT_LOGS_DIR = "./logs/"
DEFAULT_DATASET_YEAR = "2018"
VALIDATION_STEPS = 1000
############################################################
# Configurations
############################################################
class FKConfig(Config):
def __init__(self, steps_per_epoch=15, batch_size = 1, rpn_nms_threshold = 0.9, train_rois_per_image = 100, learning_momentum=0.9, learning_rate=0.001, mean_pixel = np.array([75,75,75]), std_pixel= np.array([25,25,25])):
self.STEPS_PER_EPOCH=steps_per_epoch
self.BATCH_SIZE=batch_size
self.RPN_NMS_THRESHOLD = rpn_nms_threshold
self.TRAIN_ROIS_PER_IMAGE = train_rois_per_image
self.MEAN_PIXEL = mean_pixel
self.STD_PIXEL = std_pixel
self.LEARNING_RATE = learning_rate
self.LEARNING_MOMENTUM = learning_momentum
"""Configuration for training on MS COCO.
Derives from the base Config class and overrides values specific
to the FK2018 dataset.
"""
# Give the configuration a recognizable name
self.NAME = "fk2018"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
self.IMAGES_PER_GPU = 1
# Uncomment to train on 8 GPUs (default is 1)
#GPU_COUNT = 8
# Number of classes (including background)
self.NUM_CLASSES = 6#tunasand set has 33, secondset has 15, collapsed down has 6, fish only has 7, fish_A has 6 # COCO has 80 classes
self.LOSS_WEIGHTS = {
"rpn_class_loss": 2,
"rpn_bbox_loss": 1,
"mrcnn_class_loss": 1,
"mrcnn_bbox_loss": 1,
"mrcnn_mask_loss": 1
}
self.IMAGE_RESIZE_MODE = "pad_crop"
self.IMAGE_MIN_DIM = 1024
self.IMAGE_MAX_DIM = 1024
self.IMAGE_META_SIZE = 1 + 3 + 3 + 4 + 1 + self.NUM_CLASSES
self.IMAGE_SHAPE = np.array(
[self.IMAGE_MIN_DIM, self.IMAGE_MIN_DIM, self.IMAGE_CHANNEL_COUNT]
)
# # # Image mean (RGB)
# MEAN_PIXEL = np.array([80, 80, 80])
# STD_PIXEL = np.array([20, 20, 20])
# altitude corrected
# MEAN_PIXEL = np.array([90, 90, 90])
# STD_PIXEL = np.array([18, 20, 16])
# # greyworld corrected
#self.MEAN_PIXEL = np.array([73, 73, 72])
#self.STD_PIXEL = np.array([27, 27, 26])
# debayered
#self.MEAN_PIXEL = np.array([27, 27, 27])
#self.STD_PIXEL = np.array([15, 15, 15])
self.USE_MINI_MASK = False
############################################################
# Dataset
############################################################
class FKDataset(utils.Dataset):
def load_fk(self, dataset_dir, class_ids=None,
class_map=None, return_coco=False):
"""Load a subset of the COCO dataset.
dataset_dir: The root directory of the COCO dataset.
subset: What to load (train, val, minival, valminusminival)
year: What dataset year to load (2014, 2017) as a string, not an integer
class_ids: If provided, only loads images that have the given classes.
class_map: TODO: Not implemented yet. Supports maping classes from
different datasets to the same class ID.
return_coco: If True, returns the COCO object.
auto_download: Automatically download and unzip MS-COCO images and annotations
"""
coco = COCO("{}/annotations.json".format(dataset_dir))
image_dir = dataset_dir
# All images or a subset?
if class_ids:
image_ids = []
for id in class_ids:
image_ids.extend(list(coco.getImgIds(catIds=[id])))
# Remove duplicates
image_ids = list(set(image_ids))
else:
# All images
image_ids = list(coco.imgs.keys())
# Load all classes or a subset?
if not class_ids:
# All classes
class_ids = sorted(coco.getCatIds())
# Add classes
for i in class_ids:
self.add_class("coco", i, coco.loadCats(i)[0]["name"])
# Add images
for i in image_ids:
self.add_image(
"coco", image_id=i,
path=os.path.join(image_dir, coco.imgs[i]['file_name']),
width=coco.imgs[i]["width"],
height=coco.imgs[i]["height"],
annotations=coco.loadAnns(coco.getAnnIds(
imgIds=[i], catIds=class_ids, iscrowd=None)))
if return_coco:
return coco
def load_mask(self, image_id):
"""Load instance masks for the given image.
Different datasets use different ways to store masks. This
function converts the different mask format to one format
in the form of a bitmap [height, width, instances].
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a COCO image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "coco":
return super(FKDataset, self).load_mask(image_id)
instance_masks = []
class_ids = []
annotations = self.image_info[image_id]["annotations"]
# Build mask of shape [height, width, instance_count] and list
# of class IDs that correspond to each channel of the mask.
for annotation in annotations:
class_id = self.map_source_class_id(
"coco.{}".format(annotation['category_id']))
if class_id:
m = self.annToMask(annotation, image_info["height"],
image_info["width"])
# Some objects are so small that they're less than 1 pixel area
# and end up rounded out. Skip those objects.
if m.max() < 1:
continue
# Is it a crowd? If so, use a negative class ID.
if annotation['iscrowd']:
# Use negative class ID for crowds
class_id *= -1
# For crowd masks, annToMask() sometimes returns a mask
# smaller than the given dimensions. If so, resize it.
if m.shape[0] != image_info["height"] or m.shape[1] != image_info["width"]:
m = np.ones([image_info["height"], image_info["width"]], dtype=bool)
instance_masks.append(m)
class_ids.append(class_id)
# Pack instance masks into an array
if class_ids:
mask = np.stack(instance_masks, axis=2).astype(np.bool)
class_ids = np.array(class_ids, dtype=np.int32)
return mask, class_ids
else:
# Call super class to return an empty mask
return super(FKDataset, self).load_mask(image_id)
def image_reference(self, image_id):
"""Return a link to the image in the COCO Website."""
info = self.image_info[image_id]
return info["path"]
# if info["source"] == "coco":
# return "{}".format(info["id"])
# else:
# super(FK2018Dataset, self).image_reference(image_id)
# The following two functions are from pycocotools with a few changes.
def annToRLE(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
segm = ann['segmentation']
if isinstance(segm, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, height, width)
rle = maskUtils.merge(rles)
elif isinstance(segm['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, height, width)
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann, height, width)
m = maskUtils.decode(rle)
return m
############################################################
# COCO Evaluation
############################################################
def build_coco_results(dataset, image_ids, rois, class_ids, scores, masks):
"""Arrange resutls to match COCO specs in http://cocodataset.org/#format
"""
# If no results, return an empty list
if rois is None:
return []
results = []
for image_id in image_ids:
# Loop through detections
for i in range(rois.shape[0]):
class_id = class_ids[i]
score = scores[i]
bbox = np.around(rois[i], 1)
mask = masks[:, :, i]
result = {
"image_id": image_id,
"category_id": dataset.get_source_class_id(class_id, "coco"),
"bbox": [bbox[1], bbox[0], bbox[3] - bbox[1], bbox[2] - bbox[0]],
"score": score,
"segmentation": maskUtils.encode(np.asfortranarray(mask))
}
results.append(result)
return results
def generate_labelme_file(model, dataset, output_dir, label_file):
image_ids = dataset.image_ids
t_prediction = 0
t_start = time.time()
labels={}
with open(label_file, 'r') as f:
i=0
for class_name in f.readlines():
labels[i] = class_name.strip()
i+=1
results = []
for i, image_id in enumerate(image_ids):
# Load image
image = dataset.load_image(image_id)
# Run detection
t = time.time()
r = model.detect([image], verbose=0)[0]
t_prediction += (time.time() - t)
imagename = dataset.image_info[image_id]['path'].split('/')[-1].split('.')[0]
print("Predicting objects in {}".format(imagename))
labelme_dict= {
"imagePath": imagename+'.jpg',
"imageData": None,
"shapes": [],
"version": "3.16.2",
"flags": {},
"fillColor": [85, 170, 0, 128],
"lineColor": [0, 255, 0, 128],
"imageWidth": dataset.image_info[image_id]['width'],
"imageHeight": dataset.image_info[image_id]['height']
}
for i in range(r['rois'].shape[0]):
class_id = r['class_ids'][i]
score = r['scores'][i]
bbox = np.around(r['rois'][i], 1)
mask = r['masks'][:, :, i]
polygon = find_contours(mask, 0.5)[0].tolist()
n = math.ceil(len(polygon)/20)
print(len(polygon))
# polygon = polygon[0::n]
polygon = shapely.geometry.Polygon(polygon)
polygon = polygon.simplify(1)
polygon = list(polygon.exterior.coords)
print(len(polygon))
for i in range(len(polygon)):
polygon[i]=[polygon[i][1], polygon[i][0]]
labelme_dict['shapes'].append({
"line_color":None,
"shape_type": "polygon",
"points": polygon,
"flags": {},
"fill_color": [ 255, 0, 0, 128 ],
"label": labels[class_id]
})
out_ann_file = output_dir +"/"+ imagename+'.json'
with open(out_ann_file, 'w') as f:
json.dump(labelme_dict, f)
def evaluate_coco(model, dataset, coco, eval_type="bbox", limit=0, image_ids=None):
"""Runs official COCO evaluation.
dataset: A Dataset object with valiadtion data
eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
limit: if not 0, it's the number of images to use for evaluation
"""
# Pick COCO images from the dataset
image_ids = image_ids or dataset.image_ids
# Limit to a subset
if limit:
image_ids = image_ids[:limit]
# Get corresponding COCO image IDs.
coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]
t_prediction = 0
t_start = time.time()
results = []
for i, image_id in enumerate(image_ids):
# Load image
image = dataset.load_image(image_id)
# Run detection
t = time.time()
r = model.detect([image], verbose=0)[0]
t_prediction += (time.time() - t)
# Convert results to COCO format
# Cast masks to uint8 because COCO tools errors out on bool
image_results = build_coco_results(dataset, coco_image_ids[i:i + 1],
r["rois"], r["class_ids"],
r["scores"],
r["masks"].astype(np.uint8))
results.extend(image_results)
# Load results. This modifies results with additional attributes.
# coco_results = coco.loadRes(results)
# # Evaluate
# cocoEval = COCOeval(coco, coco_results, eval_type)
# cocoEval.params.imgIds = coco_image_ids
# cocoEval.evaluate()
# cocoEval.accumulate()
# cocoEval.summarize()
# print("Prediction time: {}. Average {}/image".format(
# t_prediction, t_prediction / len(image_ids)))
# print("Total time: ", time.time() - t_start)
############################################################
# Training
############################################################
def train_aenet(section1_epochs=10, section2_epochs=20, section3_epochs=300, learning_rate=0.01, learning_momentum=0.9,
optimiser='Adam', add_freq=0.1, add_value=(-10,10), add_pc_freq=0.5, multiply_freq=0.1,
multiply_value=(0.75,1.25), multiply_pc_freq=0.5, snp_freq=0.1, snp_p=0.05, jpeg_freq=0.1,
jpeg_compression=(1,5), gaussian_freq=0.1, gaussian_sigma=(0.01,0.7), motion_freq=0.1, motion_k=(3,10),
contrast_freq=0.1, contrast_alpha=(0.5,1.5), fliplr=0.5, flipud=0.5, affine_freq=0.1,
affine_scale=(0,0.02), transform_freq=0.1, transform_scale=(0,0.05),elastic_transformations=False, elastic_freq=0.1, elastic_sigma=(4, 6),
elastic_alpha=(0,7), rotate=1, dataset="/scratch/jw22g14/FK2018/second_set/", log_file="", separate_channel_operations=0):
config = FKConfig()
config.display()
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=DEFAULT_LOGS_DIR+log_file)
model_path = COCO_MODEL_PATH
model.load_weights(model_path, by_name=True, exclude=[ "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox", "mrcnn_mask"])
dataset_train = FKDataset()
dataset_train.load_fk(dataset+"/dive1")
dataset_train.load_fk(dataset+"/dive3")
dataset_train.prepare()
# Validation dataset
dataset_val = FKDataset()
dataset_val.load_fk(dataset+"/dive2")
dataset_val.prepare()
# Image Augmentation
# Right/Left flip 50% of the time
augmentation = iaa.Sequential([
iaa.Sometimes(add_freq, iaa.Add(value=add_value, per_channel=add_pc_freq)),
iaa.Sometimes(multiply_freq, iaa.Multiply(mul=multiply_value, per_channel=multiply_pc_freq)),
iaa.Sometimes(snp_freq, iaa.SaltAndPepper(snp_p)),
iaa.Sometimes(jpeg_freq, iaa.JpegCompression(compression=jpeg_compression)),
iaa.Sometimes(gaussian_freq, iaa.GaussianBlur(sigma=gaussian_sigma)),
iaa.Sometimes(motion_freq, iaa.MotionBlur(k=motion_k)),
iaa.Sometimes(contrast_freq, iaa.LinearContrast(alpha=contrast_alpha)),
iaa.Fliplr(fliplr),
iaa.Flipud(flipud),
iaa.Sometimes(affine_freq, iaa.PiecewiseAffine(scale=affine_scale, nb_rows=8, nb_cols=8,polygon_recoverer='auto')),
iaa.Sometimes(transform_freq, iaa.PerspectiveTransform(scale=transform_scale, keep_size=True)),
iaa.Sometimes(elastic_freq, iaa.ElasticTransformation(sigma=elastic_sigma, alpha=elastic_alpha)),
iaa.Sometimes(rotate, iaa.Rot90([0,1,2,3]))
], random_order=True)
auglist=[
#iaa.Add(value=add_value, per_channel=separate_channel_operations),
#iaa.Multiply(mul=multiply_value, per_channel=separate_channel_operations),
#iaa.SaltAndPepper(snp_p),
#iaa.JpegCompression(compression=jpeg_compression),
#iaa.GaussianBlur(sigma=gaussian_sigma),
#iaa.MotionBlur(k=motion_k),
#iaa.LinearContrast(alpha=contrast_alpha),
#iaa.Fliplr(fliplr),
#iaa.Flipud(flipud),
iaa.PiecewiseAffine(scale=affine_scale, nb_rows=8, nb_cols=8,polygon_recoverer='auto'),
iaa.PerspectiveTransform(scale=transform_scale, keep_size=True),
#iaa.Rot90([0,1,2,3]),
]
if(elastic_transformations):
auglist.append(iaa.ElasticTransformation(sigma=elastic_sigma, alpha=elastic_alpha))
augmentation = iaa.SomeOf((0, 5), auglist, random_order=True)
print("Training RPN")
model.train(dataset_train, dataset_val,
learning_rate=learning_rate,
epochs=5, #40
layers='rpn',
augmentation=augmentation
)
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=learning_rate,
epochs=section1_epochs, #40
layers='heads',
augmentation=augmentation)
# Training - Stage 2
# Finetune layers from ResNet stage 4 and up
print("Fine tune Resnet stage 4 and up")
model.train(dataset_train, dataset_val,
learning_rate=learning_rate,
epochs=section2_epochs, #120
layers='4+',
augmentation=augmentation)
# Training - Stage 3
# Fine tune all layers
print("Fine tune all layers")
model.train(dataset_train, dataset_val,
learning_rate=learning_rate/10,
epochs=section3_epochs, #160
layers='all',
augmentation=augmentation,
)
def train_nnet(section1_epochs=10, section2_epochs=20, section3_epochs=300, learning_rate=0.01, learning_momentum=0.9, steps_per_epoch=15, batch_size = 1,
optimiser='Adam', add_freq=0.1, add_value=(-5,5),
multiply_value=(0.9,1.1), snp_p=0.03,
jpeg_compression=(1,5), gaussian_sigma=(0.01,0.5), motion_k=(3,10),
contrast_alpha=(0.5,1.5), fliplr=0.5, flipud=0.5,
affine_scale=(0,0.02), transform_scale=(0,0.05),elastic_transformations=False, flip_rotate=False, extras=False, elastic_sigma=(4, 6),
elastic_alpha=(0,7), train_dataset="/scratch/jw22g14/FK2018/second_set/", val_dataset="", log_file="", separate_channel_operations=0,
mean_pixel=np.array([75, 75, 75]), std_pixel=np.array([25,25,25])):
config = FKConfig(learning_rate=learning_rate, learning_momentum=learning_momentum, steps_per_epoch=steps_per_epoch, batch_size=batch_size, mean_pixel=mean_pixel, std_pixel=std_pixel)
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=log_file)
# config = tf.ConfigProto()
# config.gpu_options.allow_growth = True
# config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
# with tf.InteractiveSession(config=config).as_default():
model_path = COCO_MODEL_PATH
model.load_weights(model_path, by_name=True, exclude=[ "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox", "mrcnn_mask"])
dataset_train = FKDataset()
dataset_train.load_fk(train_dataset)
dataset_train.prepare()
# Validation dataset
dataset_val = FKDataset()
dataset_val.load_fk(val_dataset)
dataset_val.prepare()
auglist=[
iaa.Add(value=add_value, per_channel=separate_channel_operations),
iaa.Multiply(mul=multiply_value, per_channel=separate_channel_operations),
iaa.SaltAndPepper(snp_p),
iaa.JpegCompression(compression=jpeg_compression),
iaa.GaussianBlur(sigma=gaussian_sigma),
iaa.MotionBlur(k=motion_k),
iaa.LinearContrast(alpha=contrast_alpha),
]
auglist= [iaa.Flipud(1), iaa.Fliplr(1), iaa.Rot90([1, 2, 3]), iaa.GaussianBlur(sigma=(1, 2)), iaa.JpegCompression(compression=(25,50))]
if(elastic_transformations):
auglist.extend([
iaa.ElasticTransformation(sigma=elastic_sigma, alpha=elastic_alpha),
iaa.PiecewiseAffine(scale=affine_scale, nb_rows=8, nb_cols=8,polygon_recoverer='auto'),
iaa.PerspectiveTransform(scale=transform_scale, keep_size=True),
])
if(extras):
auglist.extend([
iaa.Add(value=add_value, per_channel=separate_channel_operations),
iaa.Multiply(mul=multiply_value, per_channel=separate_channel_operations),
iaa.SaltAndPepper(snp_p),
iaa.MotionBlur(k=motion_k),
iaa.LinearContrast(alpha=contrast_alpha),
])
augmentation = iaa.SomeOf((0, len(auglist)), auglist, random_order=True) #originally 0, 5, trying smaller number for fewer_augs run
print("Training RPN")
model.train(dataset_train, dataset_val,
learning_rate=learning_rate,
epochs=5, #40
layers='rpn',
augmentation=augmentation
)
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=learning_rate,
epochs=section1_epochs, #40
layers='heads',
augmentation=augmentation)
# Training - Stage 2
# Finetune layers from ResNet stage 4 and up
print("Fine tune Resnet stage 4 and up")
model.train(dataset_train, dataset_val,
learning_rate=learning_rate,
epochs=section2_epochs, #120
layers='4+',
augmentation=augmentation)
# Training - Stage 3
# Fine tune all layers
print("Fine tune all layers")
model.train(dataset_train, dataset_val,
learning_rate=learning_rate/10,
epochs=section3_epochs, #160
layers='all',
augmentation=augmentation,
)
if __name__ == '__main__':
#old_main()
train_nnet(dataset="/scratch/jw22g14/FK2018/first_set/")
```
|
{
"source": "JennyLynnFletcher/RL_Environment_Design",
"score": 3
}
|
#### File: JennyLynnFletcher/RL_Environment_Design/Simulator.py
```python
import torch
import math
import time
import Agent
import Obstacle
import RVO as RVO
import matplotlib.pyplot as plt
import random
import datetime
from colour import Color
from matplotlib import rc
rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'serif': 'cm'})
rc('text', usetex=True)
e = 0.01
white = (255,255,255)
black = (0,0,0)
red = (255,0,0)
green = (0,255,0)
blue = (0,0,255)
grey = (100,100,100)
gradient_0 = list(Color("green").range_to(Color("blue"),500))
gradient_1 = list(Color("orange").range_to(Color("red"),500))
def dist(pos0, pos1):
return math.sqrt((pos0[0]-pos1[0])**2 + (pos0[1]-pos1[1])**2)
class Simulation:
def __init__(self):
self.timestep = 1./100.
self.border = 100
self.unit = 50
self.agents = []
self.arrived = []
self.obstacles = []
self.perc_speed = []
self.distance_travelled = []
self.speed = []
self.steps = 0
self.path = []
self.render = False
self.saved = False
def set_render(self, set_render):
if set_render:
global pygame
pygame = __import__('pygame', globals(), locals())
#import pygame
pygame.init()
self.gameDisplay = pygame.display.set_mode((1000,1000))
self.gameDisplay.fill(white)
self.pixAr = pygame.PixelArray(self.gameDisplay)
self.render = True
#pygame.display.update()
def step_simulation(self, save=False):
self.steps += 1
RVO.CRVO(self.agents, self.obstacles)
for j, (agent, arrived) in enumerate(zip(self.agents, self.arrived)):
if not arrived:
agent.update_preferred_velocity()
CRVO = agent.CRVO
agent.set_CRVO(CRVO)
v_pref = agent.v_pref
apexes = agent.apexes
agent.set_apexes(apexes)
vel, _, _ = RVO.find_velocity(agent, CRVO, apexes, v_pref, agent.s_pref)
agent.update_velocity(vel)
if agent.move(vel, self.timestep):
self.set_arrived(j)
speed = dist([0,0], vel)
self.record_perc_speed(j, speed)
self.record_distance_travelled(j, speed)
self.record_speed(j, speed)
self.path[j].append(agent.p)
if self.render:
self.gameDisplay.fill(white)
for agent in self.agents:
if agent.agent_id == 0:
pygame.draw.circle(self.gameDisplay, black, (agent.p[0] + self.border, agent.p[1] + self.border), agent.radius)
#pygame.draw.circle(self.gameDisplay, red, (agent.g[0] + self.border, agent.g[1] + self.border), agent.radius)
else:
pygame.draw.circle(self.gameDisplay, black, (agent.p[0] + self.border, agent.p[1] + self.border), agent.radius)
#pygame.draw.circle(self.gameDisplay, 'coral', (agent.g[0] + self.border, agent.g[1] + self.border), agent.radius)
for obstacle in self.obstacles:
pygame.draw.rect(self.gameDisplay, grey, (obstacle.p[0] - 0.5*obstacle.x_size + self.border, obstacle.p[1] - 0.5*obstacle.y_size + self.border, obstacle.x_size, obstacle.y_size), width=0)
pygame.display.update()
if save and (all(self.arrived) or self.steps == 500) and not self.saved:
print(self.steps)
self.gameDisplay.fill(white)
for obstacle in self.obstacles:
pygame.draw.rect(self.gameDisplay, grey, (obstacle.p[0] - 0.5*obstacle.x_size + self.border, obstacle.p[1] - 0.5*obstacle.y_size + self.border, obstacle.x_size, obstacle.y_size), width=0)
pygame.draw.circle(self.gameDisplay, gradient_0[0].hex_l, (self.path[0][0][0] + self.border, self.path[0][0][1] + self.border), agent.radius)
pygame.draw.circle(self.gameDisplay, gradient_0[0].hex_l, (self.path[1][0][0] + self.border, self.path[1][0][1] + self.border), agent.radius)
for s in range(self.steps):
pygame.draw.circle(self.gameDisplay, gradient_0[min(s,499)].hex_l, (self.path[0][s][0] + self.border, self.path[0][s][1] + self.border), 1)
pygame.draw.circle(self.gameDisplay, gradient_0[min(s,499)].hex_l, (self.path[1][s][0] + self.border, self.path[1][s][1] + self.border), 1)
pygame.draw.circle(self.gameDisplay, gradient_0[499].hex_l, (self.agents[0].g[0] + self.border, self.agents[0].g[1] + self.border), agent.radius)
pygame.draw.circle(self.gameDisplay, gradient_0[499].hex_l, (self.agents[1].g[0] + self.border, self.agents[1].g[1] + self.border), agent.radius)
filename = '/home/jenny/Documents/Part II Project/Code/Images/%s.bmp' % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
pygame.image.save(self.gameDisplay, filename)
print("Image has been saved as %s" % filename)
self.saved = True
#time.sleep(self.timestep)
def add_robot(self, position, goal_location, preferred_speed, radius = 10, max_velocity = None, max_acceleration = None):
self.agents.append(Agent.Agent(len(self.agents), position, goal_location, preferred_speed, radius, max_velocity = None, max_acceleration = None))
self.arrived.append(False)
self.perc_speed.append(0)
self.distance_travelled.append(0)
self.path.append([])
self.speed.append([])
def add_obstacle(self,x_size, y_size, pos, orientation):
self.obstacles.append(Obstacle.Obstacle(x_size, y_size, pos, orientation, self.unit))
def add_obstacle_matrix(self, matrix):
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[j][i] >= 1:
self.add_obstacle(self.unit, self.unit, [self.unit*i + self.border, self.unit*j + self.border], [0,0,0,1])
def add_robot_on_matrix(self, position_x, position_y, goal_x, goal_y, preferred_speed, radius = 10, max_velocity = None, max_acceleration = None):
self.add_robot([self.unit * position_x + self.border, self.unit * position_y + self.border], [self.unit * goal_x + self.border, self.unit * goal_y + self.border], preferred_speed, radius, max_velocity, max_acceleration)
def set_arrived(self, agent):
self.arrived[agent] = True
def plot_RVO(self, agent):
plt.clf()
CRVO = self.agents[agent].CRVO
apexes = self.agents[agent].apexes
v_pref = self.agents[agent].v_pref
vel, cones, vs = RVO.find_velocity(self.agents[agent], CRVO, apexes, v_pref, self.agents[agent].s_pref)
plt.plot(v_pref[0], v_pref[1], 'ro')
plt.plot(vel[0], vel[1], 'go')
#for v in vs:
#plt.plot(v[0], v[1], 'go')
for cone, points in zip(cones, CRVO):
colour = (random.random(), random.random(), random.random())
plt.plot(cone.apex[0], cone.apex[1], 'bo')
plt.plot([cone.apex[0], cone.min_point[0]], [cone.apex[1], cone.min_point[1]], color = colour)
plt.plot([cone.apex[0], cone.max_point[0]], [cone.apex[1], cone.max_point[1]], color = colour, linestyle = '--')
if len(points) > 0:
plt.scatter(*zip(*points), color = colour, marker = '.', s=1)
plt.xlabel("x component of velocity")
plt.ylabel("y component of velocity")
plt.savefig("Images/poly.pdf")
print("saved " + str(i))
def record_perc_speed(self, agent, speed):
#preferred_speed = self.agents[agent].s_pref
#self.perc_speed[agent].append(speed/preferred_speed)
#print(self.perc_speed)
preferred_speed = self.agents[agent].s_pref
self.perc_speed[agent] = (self.perc_speed[agent] * (self.steps - 1) + (speed/preferred_speed))/self.steps
def record_distance_travelled(self, agent, speed):
self.distance_travelled[agent] += speed * self.timestep
def record_speed(self, agent, speed):
self.speed[agent].append(speed)
if __name__ == "__main__":
s = Simulation()
s.add_robot_on_matrix(0,0, 4,1, 200, 6)
s.add_robot_on_matrix(2,4, 1,0, 200, 6)
s.add_obstacle_matrix( [[0, 0, 1, 0, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 1, 0, 0, 1],
[1, 1, 0, 0, 1]])
#s.add_obstacle_matrix( [[0, 0, 0, 0, 0],
#[0, 0, 0, 0, 0],
#[0, 0, 0, 0, 0],
#[0, 0, 0, 0, 0],
#[0, 0, 0, 0, 0]])
s.set_render(True)
for i in range(10000):
#print("-------------------------------------------------------------------------")
s.step_simulation(save=True)
if i%100 == 0 and not s.arrived[0]:
s.plot_RVO(0)
```
|
{
"source": "jennyqsun/EEG-Decision-SincNet",
"score": 2
}
|
#### File: jennyqsun/EEG-Decision-SincNet/2param_likelihood_correct.py
```python
import os
import scipy.stats
# import scipy.io.wavfile
# import soundfile as sf
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.utils
import torchvision
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
# python speaker_id.py --cfg=cfg/SincNet_TIMIT.cfg
from torch.utils.data import Dataset
from torchvision import datasets, transforms
import sys
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from matplotlib import interactive
# interactive(True)
from dnn_models_pdm import *
from data_io import read_conf_inp
# from pymatreader import read_mat
import numpy as np
import os
# from hdf5storage import savemat
from hdf5storage import loadmat
from sklearn.model_selection import train_test_split
from pytorchtools import EarlyStopping, RMSLELoss
from torch.utils.tensorboard import SummaryWriter
from sklearn.linear_model import LinearRegression
import readchans
import random
from sklearn.metrics import r2_score
import pickle
import matplotlib
from matplotlib.gridspec import GridSpec
from scipy.io import savemat
seednum = 2021
############################ define model parameters ######################
timestart = 625
timeend = 625+500
trialdur = timeend * 2 - timestart * 2
correctModel = False
notrainMode = False
# sr = 1000
# timeend = 800 # when 300ms after stim
# Hyper-parameters
num_epochs = 300
batch_size = 64
learning_rate = 0.001
num_chan = 98
dropout_rate = 0.5
compute_likelihood = True
cross_val_dir = 'crossval_metric_30_625_1625/'
############################# define random seeds ###########################
torch.manual_seed(seednum)
np.random.seed(seednum)
random.seed(seednum)
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2 ** 32
np.random.seed(worker_seed)
random.seed(worker_seed)
g = torch.Generator()
g.manual_seed(seednum)
######################## tensorbaord initilization ###########################
tb = SummaryWriter('runs/regression_new')
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
######################## creating directory and file nmae ############for s########
# postname = '_prestim500_1000_0123_ddm_2param'
postname = '_1000_ddm_2param_correct'
# postname = '_1000_0123_ddm_2param_final'
modelpath = 'trained_model' + postname
resultpath = 'results' + postname
figurepath = 'figures' + postname
isExist = os.path.exists(modelpath)
isExist = os.path.exists(modelpath)
if not isExist:
os.makedirs(modelpath)
print(modelpath + ' created')
isExist = os.path.exists(figurepath)
if not isExist:
os.makedirs(figurepath)
print(figurepath + ' created')
isExist = os.path.exists(resultpath)
if not isExist:
os.makedirs(resultpath)
print(resultpath + ' created')
####################### some functions for getting the EEG data ##############
def remove_ticks(fig):
for i, ax in enumerate(fig, axes):
ax.tick_params(labelbottom=False, labelleft=False)
def viz_histograms(model, epoch):
for name, weight in model.named_parameters():
try:
tb.add_histogram(name, weight, epoch)
tb.add_histogram(f'{name}.grad', weight.grad, epoch)
except NotImplementedError:
continue
def getIDs():
path = '/home/jenny/pdmattention/'
subj = loadmat('behavior2_task3')['uniquepart'][0]
allDataFiles = os.listdir(path + 'task3/final_interp')
sublist = []
for sub in subj:
# newsub= [x[0:10] for ind, x in enumerate(allDataFiles) if int(x[1:4]) == sub]
newsub = [x[0:10] for ind, x in enumerate(allDataFiles) if int(x[1:4]) == sub]
sublist += newsub
finalsub = []
for i in sublist:
finalsub = [x[0:10] for ind, x in enumerate(allDataFiles) if
x[1:4] != '236' and x[1:4] != '193']
finalsub.sort()
return sublist, finalsub
def getddmparams(subj):
path = '/home/jenny/pdmattention/alphaDC/estimates/'
paramdic = loadmat(path + 'behavior2_task3_HDDM_AlphaJan_20_21_14_04_estimates.mat')
uniquepart= loadmat('behavior2_task3')['uniquepart'][0]
ind = np.where(uniquepart == int(subj[1:4]))[0]
print('ind:',ind)
if len(ind) == 0:
print('!!!Warning: No DDM parameters extracted')
sys.exit()
else:
print('subject DDM Parameters Deteted')
alpha = paramdic['alpha'][0,0][2][ind,:] # take the median
ndt = paramdic['ndt'][0,0][2][ind,:]
delta = paramdic['delta'][0,0][2][ind,:]
return (np.mean(alpha), np.mean(ndt), np.mean(delta))
def chansets_new():
chans = np.arange(0, 128)
chans_del = np.array(
[56, 63, 68, 73, 81, 88, 94, 100, 108, 114, 49, 43, 48, 38, 32, 44, 128, 127, 119, 125, 120, 121, 126,
113, 117, 1, 8, 14, 21, 25]) - 1
chans = np.delete(chans, chans_del)
return chans
def plot_grad_flow(named_parameters):
'''Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.'''
gradss = []
ave_grads = []
max_grads = []
layers = []
for n, p in named_parameters:
if (p.requires_grad) and ("bias" not in n):
layers.append(n)
gradss.append(p.grad.detach())
ave_grads.append(p.grad.abs().mean())
max_grads.append(p.grad.abs().max())
# plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color="c")
# plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color="b")
# plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color="k")
# plt.xticks(range(0, len(ave_grads), 1), layers, rotation="75")
# plt.xlim(left=0, right=len(ave_grads))
# plt.ylim(bottom=-0.001, top=0.02) # zoom in on the lower gradient regions
# plt.xlabel("Layers")
# plt.ylabel("average gradient")
# plt.title("Gradient flow")
# plt.grid(True)
# plt.legend(['max-gradient', 'mean-gradient', 'zero-gradient'])
# print('grads ', grads[1][0].detach())
# grads_all.append(grads[1][0].detach())
return max_grads, ave_grads, gradss, layers
def loadsubjdict(subID):
path = '/home/jenny/pdmattention/task3/final_interp/'
datadict = loadmat(path + subID + 'final_interp.mat')
return datadict
def loadinfo(subID):
path = '/home/jenny/pdmattention/task3/expinfo/'
infodict = loadmat(path + subID + 'task3_expinfo.mat')
spfs = np.squeeze(infodict['spfs'])
correctchoice = np.zeros(infodict['rt'].shape[1])
easycond = np.squeeze((infodict['condition'] == 1) | (infodict['condition'] == 4))
medcond = np.squeeze((infodict['condition'] == 2) | (infodict['condition'] == 5))
hardcond = np.squeeze((infodict['condition'] == 3) | (infodict['condition'] == 6))
correctchoice[((easycond == True) & (spfs > 2.5))] = 1
correctchoice[((medcond == True) & (spfs > 2.5))] = 1
correctchoice[((hardcond == True) & (spfs > 2.5))] = 1
# 1 would be high freq 0 would be low, 1 would be right hand, 0 would be left hand
datadict = loadsubjdict(subID)
correctchoice = np.squeeze(correctchoice[datadict['trials']])
acc = np.squeeze(datadict['correct'])
responsemat = np.zeros(acc.shape)
responsemat[(acc == 1) & (correctchoice == 1)] = 1
responsemat[(acc == 0) & (correctchoice == 1)] = 0
responsemat[(acc == 1) & (correctchoice == 0)] = 0
responsemat[(acc == 0) & (correctchoice == 0)] = 1
return responsemat
return datadict
def goodchans():
datadict = loadsubjdict('s182_ses1_')
goodchan = datadict['goodchans'][0]
return goodchan
def getdata(datadict, Tstart=250, Tend=1250):
data = np.array(datadict['data'])
data = data[::2, :, :]
sr = np.array(datadict['sr']) / 2
condition = np.array(datadict['condition'])[0]
goodtrials = np.array(datadict['trials'])[0]
correct = np.array(datadict['correct'])[0]
goodchan = goodchans()
data = data[:, :, goodtrials]
data = data[:, :, correct == 1]
condition = condition[correct == 1]
data = data[:, goodchan, :]
return data[Tstart:Tend, :, :], condition
def getrtdata(datadict, Tstart=250, Tend=1250):
data = np.array(datadict['data'])
data = data[::2, :, :]
sr = np.array(datadict['sr']) / 2
condition = np.array(datadict['condition'])[0]
goodtrials = np.array(datadict['trials'])[0]
correct = np.array(datadict['correct'])[0]
rt = np.array(datadict['rt'])[0]
rt_label = np.hstack((np.zeros(len(rt) // 3), np.ones(len(rt) // 3)))
slowest = np.ones(len(rt) - len(rt_label)) + 1
rt_label = np.hstack((rt_label, slowest))
rt_label += 1
# goodchan = goodchans()
# goodchan = chanmotor()
goodchan = chansets_new()
data = data[:, :, goodtrials]
# data = data[:, :, correct==1]
# condition = condition[correct==1]
data = data[:, goodchan, :]
return data[Tstart:Tend, :, :], condition, rt_label, rt, correct
def reshapedata(data):
timestep, nchan, ntrial = data.shape
newdata = np.zeros((ntrial, nchan, timestep))
for i in range(0, ntrial):
newdata[i, :, :] = data[:, :, i].T
return newdata
#
#
# def my_loss(t, v, t0, a):
# # t is target RT
# # v is output
# # t0 is non decision time
#
#
# w = torch.tensor(0.5).cuda() # convert to relative start point
# kk = torch.arange(-4,6) # we set K to be 10
# try:
# k = torch.tile(kk,(t.shape[0],1)).cuda()
# except IndexError:
# k = kk.cuda()
#
# err = torch.tensor(0.01).cuda()
# tt = torch.max(torch.tensor(t.cuda() - torch.tensor(t0).cuda()),err) / torch.max(err,a.cuda()) ** 2 # normalized time
# tt_vec = torch.tile(tt, (1, 10))
# pp = torch.cumsum((w+2*k)*torch.exp(-(((w+2*k)**2)/2)/tt_vec),axis=1)
# pp = pp[:,-1]/torch.sqrt(2*torch.tensor(np.pi)*torch.squeeze(tt)**3)
# pp = pp[:, None]
#
# p = torch.log(pp * torch.exp(-v*torch.max(err, a)*w - (v**2)*torch.tensor(t).cuda()/2) /(torch.max(err,a)**2))
# return -(p.sum())
def my_loss(t, v, t0, a):
# t is target RT
# v is output
# t0 is non decision time
w = torch.tensor(0.5).cuda() # convert to relative start point
kk = torch.arange(-4,6) # we set K to be 10
try:
k = torch.tile(kk,(t.shape[0],1)).cuda()
except IndexError:
k = kk.cuda()
err = torch.tensor(0.02).cuda()
tt = torch.max(torch.tensor(torch.abs(t.cuda()) - torch.tensor(t0).cuda()),err) / torch.max(err,a.cuda()) ** 2 # normalized time
tt_vec = torch.tile(tt, (1, 10))
pp = torch.cumsum((w+2*k)*torch.exp(-(((w+2*k)**2)/2)/tt_vec),axis=1)
pp = pp[:,-1]/torch.sqrt(2*torch.tensor(np.pi)*torch.squeeze(tt)**3)
pp = pp[:, None]
# v = torch.where(torch.tensor(t).cuda()>0, v, -v) # if time is negative, flip the sign of v
v = torch.clamp(v, -6,6)
t = torch.where(torch.tensor(t).cuda() > 0, torch.tensor(t).cuda(), torch.tensor(-t).cuda())
p = pp * (torch.exp(-v*torch.max(err, a)*w - (v**2)*torch.tensor(t).cuda()/2) /(torch.max(err,a)**2))
# p = torch.where(torch.tensor(v).cuda()>0, 1*p, 6*p)
p = torch.log(p)
# p = torch.where(torch.tensor(v).cuda()>0, p, -p)
# print(t,a,v)
# print('probability is ', p)
return -(p.sum())
# def my_loss(t, v, t0, a,z,err=1e-29):
# # t is target RT
# # v is output
# # t0 is non decision time
#
# tt = torch.tensor(t.cuda()-torch.tensor(t0).cuda())/(torch.tensor(a).cuda()**2) # normalized time
# tt[tt<0] = 0.01
# w = torch.tensor(z).cuda()/torch.tensor(a).cuda() # convert to relative start point
# ks = 2 + torch.sqrt(-2 * tt * torch.log(2 * torch.sqrt(2 * torch.tensor(np.pi) * tt) * err)) #bound
# ks = torch.max(ks,torch.square(tt)+1) # ensure bouhndary conditions are met
# kk = torch.arange(-4,6) # we set K to be 10
# try:
# k = torch.tile(kk,(t.shape[0],1)).cuda()
# except IndexError:
# k = kk.cuda()
# tt_vec = torch.tile(tt, (1,10))
# pp = torch.cumsum((w+2*k)*torch.exp(-(((w+2*k)**2)/2)/tt_vec),axis=1)
# pp = pp[:,-1]/torch.sqrt(2*torch.tensor(np.pi)*torch.squeeze(tt)**3)
# pp = pp[:, None]
#
# p = torch.log(pp * torch.exp(-v*a*w - (v**2)*torch.tensor(t).cuda()/2) /(a**2))
# return -(p.sum())
#
# def my_loss:
# p = (t-t0)/a**2
# p = 1/(2*np.pi*(tt**3))
#
#
# def my_loss(t, v, t0, a, err=1e-29):
# # t is target RT
# # v is output
# # t0 is non decision time
#
# tt = torch.tensor(t.cuda() - torch.tensor(t0).cuda()) / (torch.tensor(a).cuda() ** 2) # normalized time
# tt[tt < 0] = 0.01
# w = 0.5
# ks = 2 + torch.sqrt(-2 * tt * torch.log(2 * torch.sqrt(2 * torch.tensor(np.pi) * tt) * err)) # bound
# ks = torch.max(ks, torch.square(tt) + 1) # ensure bouhndary conditions are met
# kk = torch.arange(-4, 6) # we set K to be 10
# try:
# k = torch.tile(kk, (t.shape[0], 1)).cuda()
# except IndexError:
# k = kk.cuda()
# tt_vec = torch.tile(tt, (1, 10))
# pp = torch.cumsum(20.5 * torch.exp(-((20.5 ** 2) / 2) / tt_vec), axis=1)
# pp = pp[:, -1] / torch.sqrt(2 * torch.tensor(np.pi) * torch.squeeze(tt) ** 3)
# pp = pp[:, None]
#
# p = torch.log(pp * torch.exp(-v * a * w - (v ** 2) * torch.tensor(t).cuda() / 2) / (a ** 2))
# return -(p.sum())
# # loss = torch.zeros(len(target),requires_grad=True).cuda()
# # #
# # for i in range(0,len(target)):
# # # loss[i] = - torch.tensor((wfpt_logp1(target[i], 1, bias[i], torch.abs(ndt[i]), drift[i], 1, eps = 1e-10))).cuda()
# # loss[i] = - torch.tensor((wfpt_logp1(target[i], 1, torch.abs(torch.tensor(-0.6)), torch.abs(torch.tensor(0.3)), drift[i], 1, eps = 1e-10))).cuda()
# # if torch.isinf(loss[i]):
# # loss[i] = - torch.log(torch.tensor(8.423e-40).cuda()) #to avoid having inf
# loss = -1 * (((-1/2) * torch.log(2*torch.tensor(pi))) - ((1/2) * torch.log(torch.tensor(1)**2)) -(1/(2*torch.tensor(1)**2))*(target - ndt)**2)
# # print('loss--------------': , loss )
# return torch.mean(loss)
############################# class for dataloaders ########################
# produce the dataset
class SubTrDataset(Dataset):
def __init__(self, transform=None):
self.n_samples = X_train_sub.shape[0]
self.x_data = np.asarray(X_train_sub, dtype=np.float32)
Xmean = np.mean(self.x_data, axis=2)
Xmean_mat = Xmean[:, :, np.newaxis].repeat(X_train_sub.shape[-1], axis=2)
self.x_data = self.x_data - Xmean_mat
self.y_data = np.asarray(y_train_sub, dtype=np.float32)
self.transform = transform
def __getitem__(self, index):
sample = self.x_data[index], self.y_data[[index]]
if self.transform: # if transform is not none
sample = self.transform(sample)
return sample
def __len__(self):
return self.n_samples
# produce the dataset
class ValDataset(Dataset):
def __init__(self, transform=None):
self.n_samples = X_val.shape[0]
self.x_data = np.asarray(X_val, dtype=np.float32)
Xmean = np.mean(self.x_data, axis=2)
Xmean_mat = Xmean[:, :, np.newaxis].repeat(X_val.shape[-1], axis=2)
self.x_data = self.x_data - Xmean_mat
self.y_data = np.asarray(y_val, dtype=np.float32)
self.transform = transform
def __getitem__(self, index):
sample = self.x_data[index], self.y_data[[index]]
if self.transform: # if transform is not none
sample = self.transform(sample)
return sample
def __len__(self):
return self.n_samples
# produce the dataset
class TrDataset(Dataset):
def __init__(self, transform=None):
self.n_samples = X_train0.shape[0]
self.x_data = np.asarray(X_train0, dtype=np.float32)
Xmean = np.mean(self.x_data, axis=2)
Xmean_mat = Xmean[:, :, np.newaxis].repeat(X_train0.shape[-1], axis=2)
self.x_data = self.x_data - Xmean_mat
self.y_data = np.asarray(y_train0, dtype=np.float32)
self.transform = transform
def __getitem__(self, index):
sample = self.x_data[index], self.y_data[[index]]
if self.transform: # if transform is not none
sample = self.transform(sample)
return sample
def __len__(self):
return self.n_samples
# produce the dataset
class TestDataset(Dataset):
def __init__(self, transform=None):
self.n_samples = X_test.shape[0]
self.x_data = np.asarray(X_test, dtype=np.float32)
Xmean = np.mean(self.x_data, axis=2)
Xmean_mat = Xmean[:, :, np.newaxis].repeat(X_test.shape[-1], axis=2)
self.x_data = self.x_data - Xmean_mat
self.y_data = np.asarray(y_test, dtype=np.float32)
self.transform = transform
def __getitem__(self, index):
sample = self.x_data[index], self.y_data[[index]]
if self.transform: # if transform is not none
sample = self.transform(sample)
return sample
def __len__(self):
return self.n_samples
class ToTensor:
# Convert ndarrays to Tensors
def __call__(self, sample): # not it became a callable object
inputs, targets = sample
return torch.from_numpy(inputs), torch.from_numpy(targets)
def reset_weights(m):
'''
Try resetting model weights to avoid
weight leakage.
'''
for layer in m.children():
if hasattr(layer, 'reset_parameters'):
print(f'Reset trainable parameters of layer = {layer}')
layer.reset_parameters()
def initialize_weights(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight.data)
print('init xavier uniform %s' % m)
if m.bias is not None:
nn.init.constant_(m.bias.data, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight.data, 1)
nn.init.constant_(m.bias.data, 0)
elif isinstance(m, nn.Linear):
print('init xavier uniform %s' % m)
nn.init.kaiming_uniform_(m.weight.data)
nn.init.constant_(m.bias.data, 0)
# %%
############################################################################
################################# starts here ###############################
############################################################################
results = dict() # a results dictionary for storing all the data
subIDs, finalsubIDs = getIDs()
mylist = np.arange(0, len(finalsubIDs))
subj = loadmat('behavior2_task3')['uniquepart'][0].tolist()
############################################
############### set subject ######################
############################################
for s in range(36, 37):
# a results dictionary for storing all the data
subIDs, finalsubIDs = getIDs()
# for i in range(0,1):
torch.manual_seed(seednum)
np.random.seed(seednum)
random.seed(seednum)
# if int(finalsubIDs[s][1:4]) in subj:
# print('in-sample subject')
# else:
# print('no in-sample subject, skipping to the next one>>>')
# continue
# ddmparams = getddmparams(finalsubIDs[s])
ddmparams = loadmat('/home/jenny/pdmattention/sincnet/single_nocond_' + finalsubIDs[s] + '.mat')
alpha, ndt_mcmc, drift = ddmparams['alpha'][0][0][2][0][0],ddmparams['ndt'][0][0][2][0][0],ddmparams['delta'][0][0][2][0][0]
# alpha, ndt, drift = ddmparams
# alpha = 1.39681064
# ndt = 0.39675787
# drift = 0.89709653
# alpha = alpha *2
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2 ** 32
np.random.seed(worker_seed)
random.seed(worker_seed)
g = torch.Generator()
g.manual_seed(seednum)
subjectstart = mylist[s]
subjectend = subjectstart + 1
####################### define sub #########################################
datadict = loadsubjdict(finalsubIDs[subjectstart])
print(str(subjectstart) + '/' + 'subjectID: ' + finalsubIDs[subjectstart])
data, cond, _, condition, correct = getrtdata(datadict, timestart, timeend)
# response = loadinfo(finalsubIDs[subjectstart])
rtall = condition.copy()
correct = correct.astype('int')
if correctModel is True:
condition = (correct * 2 - 1) * condition
correctind = condition>0
newdata = reshapedata(data).astype('float32')
condition = condition[correctind]
newdata = newdata[correctind,:,:]
cond = cond[correctind]
# # # get rid of the rts that are lower than ndt
# newdata = newdata[rtall>ndt,:,:]
# cond = cond[rtall>ndt]
# correct = correct[rtall>ndt]
# rtall = rtall[rtall>ndt]
#
# condition = condition[condition>ndt]
# # get correct only trials
# newdata=newdata[correct==1,:,:]
# cond = cond[correct==1]
# rtall = rtall[correct==1]
# condition = condition[correct==1]
# X_train000, X_test000, y_train000, y_test000 = train_test_split(newdata, condition, test_size=0.2, random_state=42)
# ndt = np.percentile(y_train000,1)
X_train0, X_test, y_train0, y_test = train_test_split(newdata, condition, test_size=0.2, random_state=42)
ndt = np.min(np.abs(y_train0)) * 0.93
print('MCMC ndt: ', ndt_mcmc)
print('ndt: ', ndt)
X_train00, X_test0, y_train0_cond, y_test_cond = train_test_split(newdata, cond, test_size=0.2, random_state=42)
# ndtint_train = y_train0>ndt
# ndtint_test = y_test> ndt
# X_train0, X_test, y_train0, y_test = X_train0[ndtint_train,:,:], X_test[ndtint_test,:,:], y_train0[ndtint_train], y_test[ndtint_test]
# X_train00, X_test0, y_train0_cond, y_test_cond = X_train00[ndtint_train,:,:], X_test0[ndtint_test,:,:], y_train0_cond[ndtint_train], y_test_cond[ndtint_test]
#
# y_train0 = np.ones_like(y_train0) * drift
# print(X_train0[200, 50, 150])
# print(X_test[24, 50, 150])
train_set = TrDataset(transform=ToTensor())
train_loader = DataLoader(dataset=train_set,
batch_size=batch_size,
shuffle=True, # shuffle the data
num_workers=0, worker_init_fn=seed_worker,
generator=g)
test_set = TestDataset(transform=ToTensor())
test_loader = DataLoader(dataset=test_set,
batch_size=batch_size,
shuffle=False, # shuffle the data
num_workers=0, worker_init_fn=seed_worker,
generator=g)
# sample the data
data, target = next(iter(train_loader))
# plt.plot(data[10,:,:].T)
# plt.show()
data, target = next(iter(test_loader))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#################################################################################
######################## creating pre training visulization #####################
#################################################################################
targetlist = []
predictedlist = []
plt.rcParams.update({'font.size': 17})
fig = plt.figure(figsize=(18, 9))
gs = GridSpec(2, 4, figure=fig)
ax0 = fig.add_subplot(gs[0, 0])
ax1 = fig.add_subplot(gs[0, 1])
ax2 = fig.add_subplot(gs[1, 0])
ax3 = fig.add_subplot(gs[1, 1])
ax4 = fig.add_subplot(gs[0, 2:])
ax5 = fig.add_subplot(gs[1, 2:])
gradlist = []
model_0 = Sinc_Conv2d_ddm_2param(dropout=dropout_rate).cuda()
model_0.eval()
criterion = nn.MSELoss()
n_total_steps = len(test_loader)
for i, (test_data, test_target) in enumerate(test_loader):
cond_target = y_test_cond[i*batch_size+test_target.shape[0]-test_target.shape[0]:i*batch_size+test_target.shape[0]]
# # test_data, test_target = next(iter(test_loader))
pred, pred_1 = model_0(test_data.cuda())
pred_copy = pred.detach().cpu()
pred.mean().backward()
gradients = model_0.get_activations_gradient_filter()
gradlist.append(gradients)
test_target = torch.squeeze((test_target))
if cond_target.shape[0]==1:
test_target= test_target.view(1, 1)
else:
test_target = test_target.view(test_target.shape[0], 1)
# test_loss = my_loss(test_target.cuda(), pred_copy.cuda(), ndt, alpha,alpha/2, err = 1e-29)
test_loss = my_loss(test_target.cuda(), pred_copy.cuda(), ndt, torch.mean(pred_1.detach().cuda(), axis=0).cuda())
r2 = r2_score(test_target.cpu().detach().numpy(), pred_copy.cpu().detach().numpy())
# print("validation accuracy: ", val_acc)
# print("validation loss: ", val_loss)
# valacc_batch.append(val_acc.cpu())
try:
targetlist += torch.squeeze(test_target).tolist()
predictedlist += torch.squeeze(-pred_copy).cpu().tolist()
except TypeError:
targetlist += [torch.squeeze(test_target).tolist()]
predictedlist += [torch.squeeze(-pred_copy).cpu().tolist()]
print(f'Testing Batch: {i}, Step [{i + 1}/{n_total_steps}], Loss: {test_loss.item():.4f}, R^2 : {r2}')
# if i % 1 == 0:
# plt.plot(test_target, label='target')
# plt.plot(test_output.cpu().detach().numpy(), label='predicted')
# ax0.scatter(test_target, pred_copy.cpu().detach().numpy(), color ='b')
targetlist = np.array(targetlist)
predictedlist = np.array(predictedlist)
# ax0.scatter(targetlist[y_test_cond==1], predictedlist[y_test_cond==1], color='green', marker = 'o', label = 'easy')
# ax0.scatter(targetlist[y_test_cond==2], predictedlist[y_test_cond==2], color='blue', marker = '*', label = 'median')
# ax0.scatter(targetlist[y_test_cond==3], predictedlist[y_test_cond==3], color='red', marker = '^', label = 'hard')
# ax0.legend()
# ax0.set_xlabel('actual RT')
# ax0.set_ylabel('predicted Drift')
ax1.hist(rtall * 1000, bins=12, color='green')
if timestart < 625:
ax1.axvspan(0, (timeend-625)*2, color='cornflowerblue', alpha=0.5)
else:
ax1.axvspan(0, trialdur, color='cornflowerblue', alpha=0.5)
# xt = ax0.get_xticks()
# xt= np.append(xt, trialdur)
# xtl = xt.tolist()
#
# xtl[-1] = [format(trialdur)]
ax1.set_xticks([trialdur])
ax1.set_xticklabels(['window length' + format(trialdur) + 'ms\n' + 'post-stimulus:' + format(2*(timeend-625)) + 'ms'])
if timestart < 625:
fractionrt = sum(rtall * 1000 < (timeend-625)*2) / len(rtall) * 100
else:
fractionrt = sum(rtall * 1000 < trialdur) / len(rtall) * 100
ax1.text(0, ax1.get_ylim()[1] / 3, '%.2f' % fractionrt + '% \nof all\n RTs')
ax1.set_title('Fraction of RT')
# fig.show()
try:
G = torch.abs(torch.cat((gradlist[0], gradlist[1]), axis=0))
except IndexError:
G = torch.abs((gradlist[0]))
g_ij = np.mean(G.cpu().numpy(), axis=(-2, -1))
g_j = np.mean(g_ij, axis=0)
g_scaled = g_j / np.max(g_j)
order = np.argsort(g_scaled)
# r2all = r2_score(targetlist, predictedlist)
# print('r2all', r2all)
# corr_log = scipy.stats.pearsonr(targetlist, predictedlist)
# print('model0 corr log ----: ', corr_log)
# corr_rho = scipy.stats.spearmanr(targetlist, predictedlist)
# targetlist = [np.exp(i) for i in targetlist]
# predictedlist = [np.exp(i) for i in predictedlist]
print('correlation: ', scipy.stats.pearsonr(targetlist, predictedlist))
#
corr = scipy.stats.pearsonr(targetlist, predictedlist)
corr_rho = scipy.stats.spearmanr(targetlist, predictedlist)
# ax[0].set_title('corr = %.2f'% corr[0] + ' r2 = %.2backwardf' % r2all)
# ax0.set_title('Untrained Model: corr = %.2f' % corr[0] + '\n (corr_'r'$\rho = %.2f$)'% corr_rho[0])
# #
# p = model_0.state_dict()
# p_low = p['sinc_cnn2d.filt_b1']
# p_band = p['sinc_cnn2d.filt_band']
# #
# filt_beg_freq = (torch.abs(p_low) + 1 / 500)
# filt_end_freq = (filt_beg_freq + torch.abs(p_band) + 2 / 500)
#
# filt_beg_freq = filt_beg_freq.cpu().numpy() * 500
# filt_end_freq = filt_end_freq.cpu().numpy() * 500
# for i in range(0, 32):
# if i == order[-1]:
# ax1.axvspan(filt_beg_freq[i], filt_end_freq[i], color='darkred', alpha=0.5,
# label='1st \n(filter %s)' % order[-1])
# print('1st: %s' % [filt_beg_freq[i], filt_end_freq[i]])
# if i == order[-2]:
# ax1.axvspan(filt_beg_freq[i], filt_end_freq[i], color='red', alpha=0.5,
# label='2nd \n(filter %s)' % order[-2])
# print('2nd: %s' % [filt_beg_freq[i], filt_end_freq[i]])
# if i == order[-3]:
# ax1.axvspan(filt_beg_freq[i], filt_end_freq[i], color='plum', alpha=0.5,
# label='3rd \n(filter %s)' % order[-3])
# print('3rd: %s' % [filt_beg_freq[i], filt_end_freq[i]])
#
# ax1.plot([filt_beg_freq[i], filt_end_freq[i]], [i] * 2)
# ax1.set_title('subject %s' % finalsubIDs[s])
# ax1.legend()
# ax1.set_xlabel('Frequency')
# plt.show()
p = []
torch.manual_seed(seednum)
np.random.seed(seednum)
random.seed(seednum)
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2 ** 32
np.random.seed(worker_seed)
random.seed(worker_seed)
# %%
g = torch.Generator()
g.manual_seed(seednum)
# model.apply(initialize_weights)
criterion = nn.MSELoss()
# criterion = nn.L1Loss()
print('criterion: ', criterion)
model = Sinc_Conv2d_ddm_2param(dropout=dropout_rate).cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
classes = ('easy', 'medium', 'hard')
# find the best patience and train
##########################################################################
###################### stage 2 training ##################################
##########################################################################
# pick the best target loss
# myfile = open(cross_val_dir + f'{finalsubIDs[subjectstart]}.pkl', 'rb')
myfile = open(cross_val_dir + f'{finalsubIDs[subjectstart]}.pkl', 'rb')
outfile = pickle.load(myfile)
val_lossmin = -100
for k in outfile.keys():
newloss = outfile[k][2]
if newloss > val_lossmin:
val_lossmin = newloss
train_lossmin = outfile[k][0]
optimal_p = int(k)
print('optimal patience for early stop was %s' % optimal_p, '\ntarget training loss is %s' % train_lossmin)
n_total_steps = len(train_loader)
# criterion = nn.CrossEntropyLoss().cuda()
# criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
early_stopping = EarlyStopping(patience=1, verbose=True)
###########################################################################################
########################## load trained model ##############################################
if notrainMode is True:
model.load_state_dict(torch.load('/home/jenny/sincnet_eeg/' + modelpath +
'/mymodel_%s' % finalsubIDs[s] + postname + '.pth'))
model.eval()
else:
# grads_all = dict()
# ave_grads_all = []
# max_grads_all = []
train_loss = []
flag = False
for epoch in range(num_epochs):
epoch_acc = []
epoch_loss = []
# if flag is True:
# break
for i, (data, target) in enumerate(train_loader):
gradss = []
# origin shape: [4, 3, 32, 32] = 4, 3, 1024
# input_layer: 3 input channels, 6 output channels, 5 kernel size
target = torch.squeeze((target))
# target = target.long()
try:
target = target.view(target.shape[0], 1)
except IndexError:
test_target = test_target.view(test_target.shape[0], 1)
batch_n = data.shape[0]
# if (epoch==4) & (i ==1):
# print('break')
# flag = True
# break
# Forward pass
outputs, outputs_alpha = model(data)
# print('d',outputs,'a',outputs_alpha)
# print('target', target)
# print(outputs_alpha)
# loss = criterion(outputs, target.cuda())
# loss = my_loss(target,outputs,ndt, torch.mean(outputs_alpha),torch.mean(outputs_alpha)/2,1e-29)
loss = my_loss(target.cuda(), outputs.cuda(), ndt, torch.mean(outputs_alpha,axis=0).cuda())
# Backward and optimize
optimizer.zero_grad()
loss.backward()
max_grads, ave_grads, gradss, layers = plot_grad_flow(model.named_parameters())
# print('average grads',ave_grads)
optimizer.step()
# model.sinc_cnn2d.filt_b1 = nn.Parameter(torch.clamp(model.sinc_cnn2d.filt_b1, 0, 0.025))
# model.sinc_cnn2d.filt_band = nn.Parameter(torch.clamp(model.sinc_cnn2d.filt_band, 0, 0.016))
epoch_loss.append(loss.detach().cpu().numpy())
# _, predicted = torch.max(outputs.detach(), 1)
# acc = (predicted == target.cuda()).sum() / predicted.shape[0]
# epoch_acc.append(acc.cpu().numpy())
print(f'Epoch [{epoch + 1}/{num_epochs}], Step [{i + 1}/{n_total_steps}], Loss: {loss.item():.4f}')
tb.add_scalar('training loss', epoch_loss[-1], epoch)
viz_histograms(model, epoch)
##################### save the gradient ###################
# print('grads', gradss[1][1].detach().cpu())
# if epoch == 0 and i == 0:
# print('create dic')
# for l in layers:
# grads_all[l] = []
# for l, j in enumerate(layers):
# grads_all[j].append(gradss[l].detach().cpu())
# ave_grads_all.append(ave_grads)
# max_grads_all.append(max_grads)
train_loss.append(loss.detach().cpu().numpy())
lossmean = np.mean(epoch_loss)
#
# early_stopping(lossmean, model)
# if early_stopping.early_stop:
# print("Early stopping")
# break
#
# print('pretrain target loss', pretrain_loss[-1])
# print(lossmean)
# if lossmean < train_lossmin:
# print('reached minimum')
# break
early_stopping(lossmean, model)
if early_stopping.early_stop:
print("Early stopping")
break
### save the model
p = model.state_dict()
# torch.save(p, modelpath + '/' + 'mymodel_%s' % finalsubIDs[s] + postname + '.pth')
# %%
##########################################################################
########################## final testing #################################
##########################################################################
# read my model
# model = Sinc_Conv2d_new().cuda()
# model.load_state_dict(torch.load(modelpath + '/' + 'mymodel_%s'%finalsubIDs[s] + postname + '.pth'))
# torch.save(p, modelpath + '/' + 'mymodel_%s'%finalsubIDs[s] + postname + '.pth')
targetlist = []
predictedlist = []
predictedlist_alpha =[]
plt.rcParams.update({'font.size': 17})
#
# fig, ax = plt.subplots(1, 2, figsize=(12, 6))
gradlist = []
gradtemplist = []
pred_copy = []
# model = Sinc_Conv2d_new().cuda()
model.eval()
criterion = nn.MSELoss()
n_total_steps = len(test_loader)
trial_grad_list = []
for i, (test_data, test_target) in enumerate(test_loader):
cond_target = y_test_cond[i*batch_size+test_target.shape[0]-test_target.shape[0]:i*batch_size+test_target.shape[0]]
# # test_data, test_target = next(iter(test_loader))
pred, pred_1 = model(test_data.cuda())
pred_1_copy = pred_1.detach().cpu()
pred_copy = pred.detach().cpu()
# pred.backward(gradient=torch.ones(64, 1).cuda())
pred.mean().backward()
gradients = model.get_activations_gradient_filter()
gradients_temp = model.get_activations_gradient_temp()
gradlist.append(gradients)
gradtemplist.append(gradients_temp)
test_target = torch.squeeze((test_target))
if cond_target.shape[0] == 1:
test_target = test_target.view(1, 1)
else:
test_target = test_target.view(test_target.shape[0], 1)
# test_loss = my_loss(test_target.cuda(), pred_copy.cuda(),ndt, alpha,alpha/2, err = 1e-29)
test_loss = my_loss(test_target, pred_copy.cuda(), ndt, torch.mean(pred_1, axis=0).cuda())
r2 = r2_score(test_target.cpu().detach().numpy(), pred_copy.cpu().detach().numpy())
# print("validation accuracy: ", val_acc)
# print("validation loss: ", val_loss)
# valacc_batch.append(val_acc.cpu())
try:
targetlist += torch.squeeze(test_target).tolist()
predictedlist += torch.squeeze(-pred_copy).cpu().tolist()
predictedlist_alpha += torch.squeeze(pred_1_copy).cpu().tolist()
except TypeError:
targetlist += [torch.squeeze(test_target).tolist()]
predictedlist += [torch.squeeze(-pred_copy).cpu().tolist()]
predictedlist_alpha +=[torch.squeeze(pred_1_copy).cpu().tolist()]
print(f'Testing Batch: {i}, Step [{i + 1}/{n_total_steps}], Loss: {test_loss.item():.4f}, R^2 : {r2}')
# if i % 1 == 0:
# # plt.plot(test_target, label='target')
# # plt.plot(test_output.cpu().detach().numpy(), label='predicted')
# ax2.scatter(test_target, -pred_copy.cpu().detach().numpy(), color='b')
# ax2.set_xlabel('actual RT')
# ax2.set_ylabel('predicted Drift')
# ax[0].scatter(test_target, test_output.cpu().detach().numpy(), color ='b')
# corr_log1 = scipy.stats.pearsonr(targetlist, predictedlist)
targetlist = np.array(targetlist)
predictedlist = np.array(predictedlist)
predictedlist_alpha = np.array(predictedlist_alpha)
# ax2.scatter(targetlist[y_test_cond==1], predictedlist[y_test_cond==1], color='green', marker = 'o',)
# ax2.scatter(targetlist[y_test_cond==2], predictedlist[y_test_cond==2], color='blue', marker = '*')
# ax2.scatter(targetlist[y_test_cond==3], predictedlist[y_test_cond==3], color='red', marker = '^')
ax2.scatter(1/targetlist[targetlist>0], predictedlist[targetlist>0], marker = 'o', color = 'blue')
ax2.scatter(-1/targetlist[targetlist<0], -predictedlist[targetlist<0], marker = 'o',color = 'green')
# ax2.axhline(np.median(predictedlist_alpha))
ax2.set_xlabel('1/RT')
ax2.set_ylabel('predicted Drift')
# print('corr log1: ', corr_log1)
# targetlist = [np.exp(i) for i in targetlist]
# predictedlist = [np.exp(i) for i in predictedlist]
corr1 = scipy.stats.pearsonr(1/targetlist[targetlist>0], predictedlist[targetlist>0])
corr_rho1 = scipy.stats.spearmanr(1/targetlist[targetlist>0], predictedlist[targetlist>0])
corr1 = scipy.stats.pearsonr(np.abs(1/targetlist), predictedlist)
corr_rho1 = scipy.stats.spearmanr(np.abs(1/targetlist), predictedlist)
r2all = r2_score(targetlist, predictedlist)
print('r2all', r2all)
print('correlation exp: ', corr1)
ax2.set_title('Trained Model: corr = %.2f' % corr1[0] + '\n (corr_'r'$\rho = %.2f$)' % corr_rho1[0] + '\n Boundary:%.3f'% np.median(predictedlist_alpha))
#
# for j in range(test_data.shape[0]):
# test_data_trial = test_data[[j],:,:]
# test_target_trial = test_target[[j],:]
# model.eval()
# pred_trial = model(test_data_trial.cuda())
# pred_trial.backward()
# trial_grad = model.get_activations_gradient()
# trial_grad_mean = torch.mean(torch.abs(trial_grad.detach().cpu()), axis = (-2,-1))
# trial_grad_list.append(trial_grad_mean)
err = np.sqrt(np.subtract(targetlist, predictedlist) ** 2)
threshold = np.percentile(err, 90)
errind = [i for i, j in enumerate(err) if j < threshold]
try:
G = torch.abs(torch.cat((gradlist[0], gradlist[1]), axis=0))
except IndexError:
G = torch.abs((gradlist[0]))
g_ij = np.mean(G.cpu().numpy(), axis=(-2, -1))
g_j = np.mean(g_ij[errind, :], axis=0)
g_scaled = g_j / np.max(g_j)
order = np.argsort(g_scaled)
try:
Gt = torch.abs((torch.cat((gradtemplist[0], gradtemplist[1]), axis=0)))
Gtemp = torch.squeeze(abs((torch.cat((gradtemplist[0], gradtemplist[1]), axis=0))))
except IndexError:
Gt = torch.abs((gradtemplist[0]))
Gtemp = torch.squeeze(abs(((gradtemplist[0]))))
Gt = Gt[errind, :, :, :].mean(axis=0)
Gt1 = Gt[order[-1] * 2:order[-1] * 2 + 2, :, :]
Gt1max = torch.argmax(Gt1, axis=2).detach().cpu().numpy()
Gt2 = Gt[order[-2] * 2:order[-2] * 2 + 2, :, :]
Gt2max = torch.argmax(Gt2, axis=2).detach().cpu().numpy()
Gt3 = Gt[order[-3] * 2:order[-3] * 2 + 2, :, :]
Gt3max = torch.argmax(Gt3, axis=2).detach().cpu().numpy()
Gt = torch.squeeze(Gt.cpu())
Gtmean = torch.mean(Gt, axis=1)
Gtresult = torch.matmul(Gtmean, Gt)
Gorder = torch.argsort(Gtresult)
# draw the estimated training from
result = []
out = []
train_target= []
out_alpha = []
for i, (data, target) in enumerate(train_loader):
gradss = []
# origin shape: [4, 3, 32, 32] = 4, 3, 1024
# input_layer: 3 input channels, 6 output channels, 5 kernel size
target = torch.squeeze((target))
# target = target.long()
try:
target = target.view(target.shape[0], 1)
except IndexError:
test_target = test_target.view(test_target.shape[0], 1)
batch_n = data.shape[0]
# Forward pass
outputs, outputs_1 = model(data)
# print(outputs)
train_drift = torch.squeeze(-outputs).cpu().tolist()
out += train_drift
train_rt= torch.squeeze(target).cpu().tolist()
train_target += train_rt
train_alpha = torch.squeeze(outputs_1).cpu().tolist()
out_alpha += train_alpha
#
# result.append(torch.mean(-outputs[:, 0]).detach().cpu().tolist())
# result.append(torch.mean(-outputs[:, 0]).detach().cpu().tolist())
print('mean drift from training:---------------- ', np.mean(out))
out = np.asarray(out)
train_target = np.asarray(train_target)
out_alpha = np.asarray(out_alpha)
ax0.scatter(1/np.array(train_target[train_target>0]), out[train_target>0], color = 'blue', label = 'correct')
# ax0.scatter(-1/np.array(train_target[train_target<0]), -out[train_target<0], color = 'green', label = 'incorrect')
# ax0.axhline(np.median(out[train_target>0]), label = 'median drift',linewidth = 4)
ax0.axhline(np.median(out), label = 'median drift',linewidth = 4)
ax0.axhline(drift, label = 'MCMC drift',color='red',linewidth = 4)
ax1.clear()
ax1.scatter(1*np.array(train_target[train_target>0]), out_alpha[train_target>0], label = 'correct',color = 'blue')
# ax1.scatter(-1*np.array(train_target[train_target<0]), out_alpha[train_target<0], color = 'green', label = 'incorrect')
corr_rho_train_alpha = scipy.stats.spearmanr(np.abs(np.array(train_target)), np.abs(out_alpha))
ax1.set_title('Median Boundary: %.2f'%np.median(out_alpha) + '(%.2f)'% corr_rho_train_alpha[0])
# ax0.axhline(np.median(out[train_target>0]), label = 'median drift',linewidth = 4)
ax1.axhline(np.median(out_alpha), label = 'median alpha',linewidth = 4)
ax1.axhline(alpha, label = 'MCMC alpha',color='red',linewidth = 4)
# corr_train = scipy.stats.pearsonr(1/np.array(train_target[train_target>0]), out[train_target>0])
# corr_rho_train = scipy.stats.spearmanr(1/np.array(train_target[train_target>0]), out[train_target>0])
corr_train = scipy.stats.pearsonr(np.abs(1/np.array(train_target)), np.abs(out))
corr_rho_train_alpha = scipy.stats.spearmanr(np.abs(np.array(train_target)), np.abs(out_alpha))
corr_rho_train = scipy.stats.spearmanr(1/np.abs(np.array(train_target)), np.abs(out))
ax0.set_title('Training Results: corr = %.2f' % corr_train[0] + '\n (corr_'r'$\rho = %.2f$)'% corr_rho_train[0] )
ax0.set_xlabel('actual 1/RT')
ax0.set_ylabel('predicted Drift')
ax0.legend()
def crit_timewindow(time_index, stride, window_length, conv_length, tend, tstart):
'''this retursn the crtiical time window, where 0 is the beginning of the window used
e.g., 0 for 625 to 1625 would be 625 (1250ms)
but if tstart is not 625, but smaller, for example 375, t1 would be -500'''
t1, t2 = (time_index * stride) / conv_length * (tend - tstart) * 2, \
(time_index * stride + window_length) / conv_length * (tend - tstart) * 2
return t1 + (-2*(625-tstart)) , t2+(-2*(625-tstart))
ax5.clear()
if timestart < 625:
ax5.plot(np.arange(2*(timestart - 625),(timeend-625)*2, 2),data[0,:,:].T, alpha=0.8,color = 'silver')
ax5.set_xticks(ticks = np.arange(2*(timestart - 625),(timeend-625)*2+1, 250))
else:
ax5.plot(np.arange(0,trialdur, 2),data[0,:,:].T, alpha=0.8,color = 'silver')
ax5.set_xticks(ticks = np.arange(0,trialdur+1, 250))
# ax5.set_xticklabels(labels = np.arange(timestart*2, timeend*2+1, 250))
ax5.set_xlabel('Stimulus Locked time (ms)')
yminn = ax5.get_ylim()
# ax5.add_
ax51 = ax5.twinx()
ax51.set_ylim(0,500)
ax51.hist(rtall*1000, bins=12, color = 'green')
convlength = data.shape[-1] - model.filter_length + 1
time11, time11end = crit_timewindow(Gt1max[0], model.pool1.stride[1], model.pool1.kernel_size[1], convlength,
timeend, timestart)
time12, time12end = crit_timewindow(Gt1max[1], model.pool1.stride[1], model.pool1.kernel_size[1], convlength,
timeend,
timestart)
ax5.axvspan(time11, time11end, ymin = 0.9, ymax = 1, alpha=0.8, color='red', label='1st filter', zorder=2)
ax5.axvspan(time12, time12end, ymin = 0.8, ymax = 0.9,alpha=0.8, color='red',zorder=2)
time21, time21end = crit_timewindow(Gt2max[0], model.pool1.stride[1], model.pool1.kernel_size[1], convlength,
timeend, timestart)
time22, time22end = crit_timewindow(Gt2max[1], model.pool1.stride[1], model.pool1.kernel_size[1], convlength,
timeend,
timestart)
ax5.axvspan(time21, time21end, ymin = 0.7, ymax = 0.8, alpha=0.8, color='darkorange', label='2nd filter', zorder=3)
ax5.axvspan(time22, time22end, ymin = 0.6, ymax = 0.7,alpha=0.8, color='darkorange',zorder=3)
time31, time31end = crit_timewindow(Gt3max[0], model.pool1.stride[1], model.pool1.kernel_size[1], convlength,
timeend,
timestart)
time32, time32end = crit_timewindow(Gt3max[1], model.pool1.stride[1], model.pool1.kernel_size[1], convlength,
timeend,
timestart)
ax5.axvspan(time31, time31end, ymin = 0.5, ymax = 0.6, alpha=0.8, color='turquoise', label='3rd filter',zorder=4)
ax5.axvspan(time32, time32end, ymin = 0.4, ymax = 0.5, alpha=0.8, color='turquoise', zorder=4)
ax5.set_title('critical time period')
ax5.set_xlabel('Stimulus Locked time (ms)')
ax5.legend()
# time2, time2end = (Gt[-2] * model.pool1.stride[1]) / convlength * timeend * 2, \
# (Gt[-2] * model.pool1.stride[1] + model.pool1.kernel_size[1]) / convlength * timeend * 2
# ax5.axvspan(time2, time2end, alpha=0.4, color='royalblue')
# time3, time3end = (Gt[-3] * model.pool1.stride[1]) / convlength * timeend * 2, \
# (Gt[-3] * model.pool1.stride[1] + model.pool1.kernel_size[1]) / convlength * timeend * 2
#
# ax5.axvspan(time3, time3end, alpha=0.3, color='cornflowerblue')
# ax5.axvline(1250, color = 'black')
# ax5.axvline(rtmean*1000+1250, linewidth = 11, color = 'darkorange', label = 'Mean RT')
# corr = scipy.stats.pearsonr(targetlist, predictedlist)
# ax[0].set_title('corr = %.2f'% corr[0] + ' r2 = %.2f' % r2all)
# %%
p = model.state_dict()
p_low = p['sinc_cnn2d.filt_b1']
p_band = p['sinc_cnn2d.filt_band']
#
filt_beg_freq = (torch.abs(p_low) + 1 / 500)
filt_end_freq = (filt_beg_freq + torch.abs(p_band) + 2 / 500)
filt_beg_freq = filt_beg_freq.cpu().numpy() * 500
filt_end_freq = filt_end_freq.cpu().numpy() * 500
for i in range(0, 32):
if i == order[-1]:
ax3.axvspan(filt_beg_freq[i], filt_end_freq[i], color='red', alpha=0.8,
label='1st \n(filter %s)' % order[-1],zorder= 3)
print('1st: %s' % [filt_beg_freq[i], filt_end_freq[i]])
if i == order[-2]:
ax3.axvspan(filt_beg_freq[i], filt_end_freq[i], color='darkorange', alpha=0.8,
label='2nd \n(filter %s)' % order[-2], zorder= 2)
print('2nd: %s' % [filt_beg_freq[i], filt_end_freq[i]])
if i == order[-3]:
ax3.axvspan(filt_beg_freq[i], filt_end_freq[i], color='turquoise', alpha=0.8,
label='3rd \n(filter %s)' % order[-3], zorder= 1)
print('3rd: %s' % [filt_beg_freq[i], filt_end_freq[i]])
ax3.plot([filt_beg_freq[i], filt_end_freq[i]], [i] * 2)
ax3.set_title('subject %s' % finalsubIDs[s])
ax3.legend(loc='lower left')
ax3.set_xlabel('Frequency')
#
# filt_sort = np.argsort(filt_beg_freq)
# count = 0
# for i in filt_sort:
# ax[1].plot([filt_beg_freq[i] , filt_end_freq[i]], [count]*2)
# count +=1
# ax[1].set_title('subject %s' % finalsubIDs[s])
# results[finalsubIDs[s]] = dict()
# results[finalsubIDs[s]] = {'filt_beg_freq': filt_beg_freq, 'filt_end_freq': filt_end_freq,
# 'corr': corr1, 'corr_rho': corr_rho1, 'filter_grads': G, 'temporal_grads': Gt,
# 'chan_weights': torch.squeeze(p['separable_conv.depthwise.weight']).cpu()}
sub_dict= dict()
sub_dict = {'filt_beg_freq': filt_beg_freq, 'filt_end_freq': filt_end_freq,
'corr': corr1, 'corr_rho': corr_rho1, 'filter_grads': G.cpu().numpy(), 'temporal_grads': Gtemp.cpu().numpy(),
'chan_weights': torch.squeeze(p['separable_conv.depthwise.weight']).cpu().numpy(),
'target_rt_test': targetlist, 'delta_test': predictedlist, 'alpha_test': predictedlist_alpha,
'target_rt_train': np.array(train_target), 'delta_train': np.array(out) , 'alpha_train':np.array(out_alpha)
}
# savemat(resultpath + '/%s' % finalsubIDs[s][0:-1]+ '_results' + postname + '.mat', sub_dict)
my_file = open(resultpath + f'/%s' % finalsubIDs[s][0:-1]+ '_results' + postname +'.pkl', 'wb')
pickle.dump(sub_dict, my_file)
my_file.close()
from topo import *
from mpl_toolkits.axes_grid.inset_locator import inset_axes
# plt.show()
# filter analysis
goodchan = chansets_new()
def getweights(i):
filt_ind = order[i]
weight1 = p['separable_conv.depthwise.weight'][filt_ind * 2:(filt_ind * 2 + 2), :, :, :]
weight1 = torch.squeeze(weight1.cpu())
maxweight1 = np.argmax(weight1, axis=1)
chan0, chan1 = goodchan[maxweight1[0]], goodchan[maxweight1[1]]
print(chan0, chan1)
return weight1
weight1 = getweights(-1)
w = weight1.argsort(axis=1)
axin11 = ax4.inset_axes([0.01, 0.2, 0.3, 0.6])
axin12 = ax4.inset_axes([0.35, 0.2, 0.3, 0.6])
axin13 = ax4.inset_axes([0.7, 0.2, 0.3, 0.6])
plottopo(weight1[0, :].reshape(num_chan, 1), axin11, w[:, -1], 'red', 12)
plottopo(weight1[0, :].reshape(num_chan, 1), axin11, w[:, -2], 'red', 10)
plottopo(weight1[0, :].reshape(num_chan, 1), axin11, w[:, -3], 'red', 8)
axin11.set_title('1st filter weights')
weight2 = getweights(-2)
w2 = weight2.argsort(axis=1)
plottopo(weight2[0, :].reshape(num_chan, 1), axin12, w2[:, -1], 'darkorange', 12)
plottopo(weight2[0, :].reshape(num_chan, 1), axin12, w2[:, -2], 'darkorange', 10)
plottopo(weight2[0, :].reshape(num_chan, 1), axin12, w2[:, -3], 'darkorange', 8)
axin12.set_title('2nd filter weights')
weight3 = getweights(-3)
w3 = weight3.argsort(axis=1)
plottopo(weight2[0, :].reshape(num_chan, 1), axin13, w3[:, -1], 'turquoise', 12)
plottopo(weight2[0, :].reshape(num_chan, 1), axin13, w3[:, -2], 'turquoise', 10)
plottopo(weight2[0, :].reshape(num_chan, 1), axin13, w3[:, -3], 'turquoise', 8)
axin13.set_title('3rd filter weights')
if compute_likelihood is True:
ax4.clear()
axin11 = ax4.inset_axes([0.01, 0.3, 0.9, 0.5])
axin12 = ax4.inset_axes([0.01, 0.1, 0.9, 0.5])
axin11.set_axis_off()
axin12.set_axis_off()
from my_wfpt import neg_wfpt
rt_train = sub_dict['target_rt_train']
rt_test = sub_dict['target_rt_test']
drift_train = sub_dict['delta_train']
alpha_train = sub_dict['alpha_train']
drift_test = sub_dict['delta_test']
alpha_test = sub_dict['alpha_test']
ndt = np.min(rt_train) * 0.93
ll_trial_trainRT = []
for i, j in enumerate(rt_train):
l = neg_wfpt(j, drift_train[i], ndt, alpha_train[i])
# print(l)
ll_trial_trainRT += [l]
ll_trial_trainRT = np.sum(ll_trial_trainRT)
ll_median_trainRT = []
for i, j in enumerate(rt_train):
l = neg_wfpt(j, np.median(np.abs(drift_train)), ndt, np.median(alpha_train))
# print(l)
ll_median_trainRT += [l]
ll_median_trainRT = np.sum(ll_median_trainRT)
ll_trial_drift_trainRT = []
for i, j in enumerate(rt_train):
l = neg_wfpt(j, drift_train[i], ndt, np.median(alpha_train))
# print(l)
ll_trial_drift_trainRT += [l]
ll_trial_drift_trainRT = np.sum(ll_trial_drift_trainRT)
ll_trial_alpha_trainRT = []
for i, j in enumerate(rt_train):
l = neg_wfpt(j, np.median(np.abs(drift_train)), ndt, alpha_train[i])
# print(l)
ll_trial_alpha_trainRT += [l]
ll_trial_alpha_trainRT = np.sum(ll_trial_alpha_trainRT)
############## test data ##################################################
ll_median_train_on_testRT = [] # median drift and rt on test
for i, j in enumerate(rt_test):
l = neg_wfpt(j, np.median(np.abs(drift_train)), ndt, np.median(alpha_train))
# print(l)
ll_median_train_on_testRT += [l]
ll_median_train_on_testRT = np.sum(ll_median_train_on_testRT)
ll_trial_train_on_testRT = [] # trial drift and alpha on test
for i, j in enumerate(rt_test):
l = neg_wfpt(j, drift_test[i], ndt, alpha_test[i])
# print(l)
ll_trial_train_on_testRT += [l]
ll_trial_train_on_testRT = np.sum(ll_trial_train_on_testRT)
ll_trial_drift_testRT = [] # trial drift and alpha on test
for i, j in enumerate(rt_test):
l = neg_wfpt(j, drift_test[i], ndt, np.median(alpha_test))
# print(l)
ll_trial_drift_testRT += [l]
ll_trial_drift_testRT = np.sum(ll_trial_drift_testRT)
ll_trial_alpha_testRT = [] # trial drift and alpha on test
for i, j in enumerate(rt_test):
l = neg_wfpt(j, np.median(np.abs(drift_test)), ndt, alpha_test[i])
# print(l)
ll_trial_alpha_testRT += [l]
ll_trial_alpha_testRT = np.sum(ll_trial_alpha_testRT)
ll_median_testRT = [] # trial drift and alpha on test
for i, j in enumerate(rt_test):
l = neg_wfpt(j, np.median(np.abs(drift_test)), ndt, np.median(alpha_test))
# print(l)
ll_median_testRT += [l]
ll_median_testRT = np.sum(ll_median_testRT)
ax4.set_axis_off()
dataTrain = [ ll_median_trainRT, ll_trial_trainRT,ll_trial_drift_trainRT, ll_trial_alpha_trainRT]
dataTrain = [[round(i,3) for i in dataTrain]]
rows = ['Sum of NLL']
axin11.set_axis_off()
columns = (r'$\overline{\delta}, \overline{\alpha}\;| RT_{train}$', r'$\delta_i, \alpha_i | RT_{train}$',
r'$\delta_i, \overline{\alpha}\;| RT_{train}$', r'$\overline{\delta}, \alpha_i\;| RT_{train}$')
the_table = axin11.table(cellText=dataTrain,
# rowLabels=rows,
colLabels=columns, cellLoc='center', loc='top', fontsize=16)
the_table.set_fontsize(14)
cellDict = the_table.get_celld()
for i in range(0, len(columns)):
cellDict[(0, i)].set_height(.4)
for j in range(1, len(dataTrain) + 1):
cellDict[(j, i)].set_height(.3)
# cellDict[(1, -1)].set_height(.1)
cellDict[(0, np.argmin(dataTrain[0]))].set_facecolor("#56b5fd")
cellDict[(1, np.argmin(dataTrain[0]))].set_facecolor("#56b5fd")
the_table.set_fontsize(16)
the_table.scale(1.2, 1.2)
dataTest = [ll_median_train_on_testRT, ll_median_testRT, ll_trial_train_on_testRT,ll_trial_drift_testRT, ll_trial_alpha_testRT]
dataTest = [[round(i, 3) for i in dataTest]]
columnsT = (r'$\overline{\delta}, \overline{\alpha}\;| RT_{test}$',
r'$\overline{\hat{\delta}}, \overline{\hat{\alpha}}\;| RT_{test}$',
r'$\hat{\delta_i}, \hat{\alpha_i} | RT_{test}$',
r'$\hat{delta_i}, \overline{\hat{\alpha}}\;| RT_{test}$',
r'$\overline{\hat{\delta}}, \hat{\alpha_i}\;| RT_{test}$')
# rows = ['Sum of NLL']
axin12.set_axis_off()
test_table = axin12.table(cellText=dataTest,
# rowLabels=rows,
colLabels=columnsT, cellLoc='center', loc='center', fontsize=16)
test_table.set_fontsize(16)
cellDict = test_table.get_celld()
for i in range(0, len(columnsT)):
cellDict[(0, i)].set_height(.4)
for j in range(1, len(dataTest) + 1):
cellDict[(j, i)].set_height(.3)
# cellDict[(1, -1)].set_height(.1)
cellDict[(0, 0)].set_facecolor("lightgrey")
cellDict[(1, 0)].set_facecolor("lightgrey")
cellDict[(0, 1+np.argmin(dataTest[0][1:]))].set_facecolor("#56b5fd")
cellDict[(1, 1+np.argmin(dataTest[0][1:]))].set_facecolor("#56b5fd")
test_table.set_fontsize(16)
test_table.scale(1.2, 1.2)
# axin11.set_title('Sum of Negative Log Likelihood')
axin12.text(0,-1, r'$\overline{\delta}, \overline{\alpha}$ are median of trial estimates from training data,'
+'\n'+r'$\delta_i, \alpha_i$ are trial estimates from training data,'
+'\n'+ r'$\overline{\hat{\delta}}, \overline{\hat{\alpha_i}}$ are median of trial estimates from testing data,'
+'\n'+ r'$\hat{\delta_i}, \hat{\alpha_i}$ are trial estimates from testing data'
+'\nblue indicates the best model', fontsize = 12)
axin11.text(0,2,'Sum of Negative Log Likelihood')
fig.tight_layout()
# fig.savefig(figurepath + '/%s' % finalsubIDs[s][0:-1] + postname + '.png')
plt.show()
# savemat(modelpath + '/results_motorsets' + postname + '.mat', results)
```
|
{
"source": "jennyqujy/WikiScraper",
"score": 3
}
|
#### File: WikiScraper/Tests/graph_test.py
```python
import unittest
import sys
sys.path.insert(0, '/Users/Jenny/Desktop/cs242/Assignment2.0/Scraper/Scraper')
from Graph.initialize import InitGraph
from Graph.initialize import GraphQuery
class TestGraph(unittest.TestCase):
def test_getOldestActor(self):
self.assertEqual(GraphQuery.getOldestActor(GraphQuery(),InitGraph.actorNodes),'<NAME>')
def test_getAllActorsInYear(self):
self.assertTrue(GraphQuery.getAllActorsInYear(GraphQuery(),InitGraph.actorNodes,1972),['Jude Law','<NAME>'])
def test_getAllFilmsInYear(self):
self.assertEqual(GraphQuery.getAllFilmsInYear(GraphQuery(),InitGraph.filmNodes,2013),['Oblivion'])
def test_getFilmValue(self):
self.assertEqual(GraphQuery.getFilmValue(GraphQuery(), InitGraph.filmNodes,'Oblivion'),'$286.2 million[5]')
def test_getActorAge(self):
self.assertEqual(GraphQuery.getActorAge(GraphQuery(),InitGraph.actorNodes,'Jude Law'),'45')
def test_getActorsInFilm(self):
self.assertEqual(GraphQuery.getActorsInFilm(GraphQuery(),InitGraph.filmNameDict,'Oblivion'),['<NAME>','<NAME>', '<NAME>','<NAME>','<NAME>','<NAME>'])
def test_getActorCastings(self):
# self.assertEqual(GraphQuery.getActorCastings(Init.actorNodes,'Jude Law'),'45')
pass
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "JennyRemolina/api-projex",
"score": 2
}
|
#### File: api-projex/api/models.py
```python
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
import datetime
from users.models import CustomUser
# Create your models here.
def project_directory_path(instance, filename):
# file will be uploaded to MEDIA_URL/project_<id>/
return 'project_{0}/{1}'.format(instance.id, filename)
def tasks_directory_path(instance, filename):
# file will be uploaded to MEDIA_URL/task_<id>/
return 'task_{0}/{1}'.format(instance.id, filename)
# Preferences (user_id, language, color_schema)
class Preferences(models.Model):
language = models.CharField(max_length=2)
color_schema = models.CharField(max_length=1)
user = models.OneToOneField(CustomUser, on_delete=models.CASCADE)
def __str__(self):
return self.language
# Project(id, title, description, attachment_id, creator_id)
class Project(models.Model):
title = models.CharField(max_length=30)
description = models.TextField()
project_photo = models.ImageField(
upload_to=project_directory_path, blank=True)
creator = models.ForeignKey(
CustomUser, on_delete=models.CASCADE, related_name="project_creator")
assignee = models.ManyToManyField(
CustomUser,
through='UserProject'
)
# Task(id, title, description, due_date, priority, attachment_id, board_id)
class Task(models.Model):
title = models.CharField(max_length=30)
description = models.TextField()
due_date = models.DateField(auto_now=False, auto_now_add=False)
priority = models.IntegerField()
task_file = models.FileField(upload_to=tasks_directory_path, blank=True)
board = models.ForeignKey('Board', on_delete=models.CASCADE)
assigned_users = models.ManyToManyField(
CustomUser,
through='Assignee'
)
# Assignee(user_id, task_id)
class Assignee(models.Model):
user = models.ForeignKey(
CustomUser, on_delete=models.CASCADE, related_name="user_to_task")
task = models.ForeignKey(
'Task', on_delete=models.CASCADE, related_name="task_to_user")
# Board(id, title, project_id)
class Board(models.Model):
title = models.CharField(max_length=10)
project = models.ForeignKey('Project', on_delete=models.CASCADE)
# Comment(id, text, task_id, creator_id)
class Comment(models.Model):
text = models.TextField()
task = models.ForeignKey('Task', on_delete=models.CASCADE)
creator = models.ForeignKey(CustomUser, on_delete=models.CASCADE)
# Notification(id, type, text, notifier_id)
class Notification(models.Model):
notifier_type = models.CharField(max_length=30)
notifier = models.IntegerField()
receivers = models.ManyToManyField(
CustomUser,
through='UserNotification',
through_fields=('notification', 'user')
)
# UserProject(user_id, project_id, status, role)
class UserProject(models.Model):
user = models.ForeignKey(
CustomUser, on_delete=models.CASCADE, related_name="user_to_project")
project = models.ForeignKey(
'Project', on_delete=models.CASCADE, related_name="project_to_user")
role = models.CharField(max_length=30)
status = models.CharField(max_length=10)
# UserNotification(user_id, notification_id)
class UserNotification(models.Model):
user = models.ForeignKey(
CustomUser, on_delete=models.CASCADE, related_name="user_to_notification")
notification = models.ForeignKey(
'Notification', on_delete=models.CASCADE, related_name="notification_to_user")
```
#### File: api/tests/test_views.py
```python
from django.test import TestCase, Client
from rest_framework.test import APIClient, APIRequestFactory, APITestCase
from rest_framework import status
from api.models import Project
import json
class APIWrapperClass(APITestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.client = APIClient()
url = '/api/v1/api-token-auth/'
response = self.client.post(url, {'username_or_email':'<EMAIL>', 'password':'<PASSWORD>'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue('token' in response.data)
token = response.data['token']
self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + token)
```
#### File: users/tests/test_login.py
```python
import json
from glob import glob
from io import StringIO
from unittest.mock import patch
from django.contrib.auth.models import User
from django.urls import reverse
from rest_framework.authtoken.models import Token
from rest_framework.test import APIRequestFactory,APITestCase
from users.models import CustomUser
class UserLoginAPIViewTestCase(APITestCase):
#url = reverse("users:list")
url = '/api/v1/rest-auth/%s/'
username = "testuser"
email = "<EMAIL>"
password = "<PASSWORD>"
def setUp(self):
self.user = CustomUser.objects.create_user(self.username, self.email, self.password)
def test_authentication_with_valid_data(self):
"""
Test to verify a login post call with user valid data
"""
data = {
"username": self.username,
"email": self.email,
"password": <PASSWORD>,
}
response = self.client.post(self.url%'login', data)
print(response.status_code, response.content)
self.assertEqual(200, response.status_code)
self.assertTrue("key" in json.loads(response.content))
def test_authentication_with_valid_username(self):
"""
Test to verify a token post call with just username
"""
data = {
"username": self.username,
"password": <PASSWORD>,
}
response = self.client.post(self.url%'login', data)
print(response.status_code, response.content)
self.assertEqual(400, response.status_code)
def test_authentication_with_valid_email(self):
"""
Test to verify a token post call with just email
"""
data = {
"email": self.email,
"password": <PASSWORD>,
}
response = self.client.post(self.url%'login', data)
print(response.status_code, response.content)
self.assertEqual(200, response.status_code)
self.assertTrue("key" in json.loads(response.content))
def test_login_logout(self):
"""
Test to verify a login and logout
"""
data = {
"email": self.email,
"password": <PASSWORD>,
}
response = self.client.post(self.url%'login', data)
print(response.status_code, response.content)
self.assertEqual(200, response.status_code)
self.assertTrue("key" in json.loads(response.content))
response = self.client.get(self.url%'logout')
self.assertEqual(200, response.status_code)
self.assertTrue("detail" in json.loads(response.content))
```
#### File: users/tests/test_models.py
```python
from django.test import TestCase
from users.models import CustomUser
# Create your tests here.
class UserTest(TestCase):
""" Test module for User model """
def setUp(self):
CustomUser.objects.create_user(username="pablito", email="<EMAIL>")
CustomUser.objects.create_user(username="tato", email="<EMAIL>")
def test_username(self):
a = CustomUser.objects.get(username="pablito")
self.assertEqual(a.username, "pablito")
```
|
{
"source": "JennySnyk/msticpy",
"score": 2
}
|
#### File: JennySnyk/msticpy/setup.py
```python
import os
import re
import setuptools
def install_requires_rtd(install_list: list) -> list:
"""Return modified install list if installing for ReadtheDocs."""
rtd_exceptions = [
"Kqlmagic",
"azure-cli-core",
"matplotlib",
"statsmodels",
"scipy",
"splunk-sdk",
"seaborn",
]
return [
pkg
for pkg in install_list
if not any(excl_pkg for excl_pkg in rtd_exceptions if excl_pkg in pkg)
]
with open("README.md", "r") as fh:
LONG_DESC = fh.read()
# pylint: disable=locally-disabled, invalid-name
with open("msticpy/_version.py", "r") as fd:
v_match = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE)
__version__ = v_match.group(1) if v_match else "no version"
# pylint: enable=locally-disabled, invalid-name
with open("requirements.txt", "r") as fh:
INSTALL_REQUIRES = fh.readlines()
with open("requirements-dev.txt", "r") as fh:
INSTALL_DEV_REQUIRES = fh.readlines()
def _combine_extras(extras: list) -> list:
return list(
{pkg for name, pkgs in EXTRAS.items() for pkg in pkgs if name in extras}
)
# Extras definitions
EXTRAS = {
"dev": INSTALL_DEV_REQUIRES,
"vt3": ["vt-py>=0.6.1", "vt-graph-api>=1.0.1", "nest_asyncio>=1.4.0"],
"splunk": ["splunk-sdk>=1.6.0"],
"sumologic": ["sumologic-sdk>=0.1.11", "openpyxl>=3.0"],
"kql": ["KqlmagicCustom[jupyter-extended]>=0.1.114.dev25"],
"_azure_core": [
"azure-mgmt-compute>=4.6.2",
"azure-mgmt-core>=1.2.1",
"azure-mgmt-monitor>=2.0.0",
"azure-mgmt-network>=2.7.0",
"azure-mgmt-resource>=2.2.0",
"azure-storage-blob>=12.5.0",
"azure-mgmt-resourcegraph>=8.0.0",
],
"keyvault": [
"azure-keyvault-secrets>=4.0.0",
"azure-mgmt-keyvault>=2.0.0",
"keyring>=13.2.1", # needed by Key Vault package
"msrestazure>=0.6.0",
],
"ml": ["scikit-learn>=0.20.2", "scipy>=1.1.0", "statsmodels>=0.11.1"],
"sql2kql": ["moz_sql_parser>=4.5.0,<=4.11.21016"],
}
extras_all = [
extra for name, extras in EXTRAS.items() for extra in extras if name != "dev"
]
EXTRAS["all"] = extras_all
# Create combination extras
EXTRAS["all"] = sorted(
_combine_extras(list({name for name in EXTRAS if name != "dev"}))
)
EXTRAS["azure"] = sorted(_combine_extras(["_azure_core", "keyvault"]))
EXTRAS["test"] = sorted(_combine_extras(["all", "dev"]))
EXTRAS["azsentinel"] = sorted(_combine_extras(["azure", "kql", "keyvault"]))
EXTRAS["azuresentinel"] = sorted(_combine_extras(["azure", "kql", "keyvault"]))
# If ReadTheDocs build, remove a couple of problematic packages
# (we ask Sphinx to mock these in the import)
if os.environ.get("MP_RTD_BUILD"):
INSTALL_REQUIRES = install_requires_rtd(INSTALL_REQUIRES)
setuptools.setup(
name="msticpy",
version=__version__,
author="<NAME>",
author_email="<EMAIL>",
description="MSTIC Security Tools",
license="MIT License",
long_description=LONG_DESC,
long_description_content_type="text/markdown",
url="https://github.com/microsoft/msticpy",
project_urls={
"Documentation": "https://msticpy.readthedocs.io",
"Code": "https://github.com/microsoft/msticpy",
},
python_requires=">=3.6",
packages=setuptools.find_packages(exclude=["tests", "tests.*", "*.tests.*"]),
classifiers=[
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
],
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS,
keywords=[
"security",
"azure",
"sentinel",
"mstic",
"cybersec",
"infosec",
"cyber",
"cybersecurity",
"jupyter",
"notebooks",
"SOC",
"hunting",
],
zip_safe=False,
include_package_data=True,
)
```
#### File: datamodel/entities/test_entity.py
```python
import pytest
import pytest_check as check
import pandas as pd
from msticpy.datamodel import entities
from msticpy.datamodel.entities import Host, OSFamily, Url, IpAddress
from msticpy.datamodel.pivot import Pivot
__author__ = "<NAME>"
# pylint: disable=redefined-outer-name
@pytest.fixture(scope="module")
def fixture_name():
"""Fixture_docstring."""
def test_entity_merge():
"""Entity comparison and merging."""
host1 = Host(HostName="host1", DnsDomain="contoso.com", OSFamily=OSFamily.Windows)
host2 = Host(HostName="host1", DnsDomain="contoso.com", IsDomainJoined=True)
host3 = Host(HostName="host3", DnsDomain="contoso.com")
check.not_equal(host1, host2)
check.not_equal(host1, host3)
check.is_true(host1.is_equivalent(host2))
check.is_false(host1.is_equivalent(host3))
check.is_false(host2.is_equivalent(host3))
check.is_true(host1.can_merge(host2))
check.is_false(host1.can_merge(host3))
host4 = host1.merge(host2)
check.equal(host4.HostName, "host1")
check.equal(host4.OSFamily, OSFamily.Windows)
check.equal(host4.DnsDomain, "contoso.com")
check.is_true(host4.IsDomainJoined)
def test_url():
"""Test URL get_componennts."""
URL = "https://www.contoso.com/path#frag?query=xxx"
url = Url(Url=URL)
check.equal(url.Url, URL)
check.equal(url.host, "www.contoso.com")
check.equal(url.scheme, "https")
check.equal(url.path, "/path")
check.equal(url.fragment, "frag?query=xxx")
url.Url = "https://www.contoso2.com/path2#frag2?query=xxx"
check.equal(url.host, "www.contoso2.com")
check.equal(url.scheme, "https")
check.equal(url.path, "/path2")
check.equal(url.fragment, "frag2?query=xxx")
def test_pivot_shortcuts():
"""Test pivot function shortcut creation and deletion."""
Pivot()
check.is_true(hasattr(IpAddress, "util"))
util_ctnr = getattr(IpAddress, "util")
func = getattr(util_ctnr, "ip_type")
IpAddress.make_pivot_shortcut("util.ip_type", "test_iptype")
check.is_true(hasattr(IpAddress, "test_iptype"))
check.equal(func, IpAddress.test_iptype)
ip_addr = IpAddress(Address="192.168.1.2")
ip_df = ip_addr.test_iptype()
check.is_instance(ip_df, pd.DataFrame)
with pytest.raises(AttributeError):
IpAddress.make_pivot_shortcut("util.not_defined", "test_iptype")
with pytest.raises(TypeError):
IpAddress.make_pivot_shortcut("properties", "test_iptype")
with pytest.raises(AttributeError):
IpAddress.make_pivot_shortcut("util.ip_type", "test_iptype")
IpAddress.make_pivot_shortcut("util.ip_type", "test_iptype", overwrite=True)
check.is_true(hasattr(IpAddress, "test_iptype"))
check.equal(func, IpAddress.test_iptype)
IpAddress.del_pivot_shortcut("test_iptype")
check.is_false(hasattr(IpAddress, "test_iptype"))
with pytest.raises(AttributeError):
IpAddress.del_pivot_shortcut("test_iptype")
with pytest.raises(TypeError):
IpAddress.del_pivot_shortcut("properties")
def test_entity_instantiation():
"""Test that we can instantiate all entities."""
for attrib in dir(entities):
attr_cls = getattr(entities, attrib)
if (
isinstance(attr_cls, type)
and issubclass(attr_cls, entities.Entity)
and attr_cls != entities.Entity
):
ent_obj = attr_cls()
check.greater(len(ent_obj.properties), 0)
# Check that we can access properties without incident
for attr in (attr for attr in dir(ent_obj) if not attr.startswith("_")):
getattr(ent_obj, attr)
```
|
{
"source": "JennySong99/dnaas",
"score": 2
}
|
#### File: src/service/dexnet_processor.py
```python
import os
import numpy as np
import logging
import meshpy
from meshpy import ObjFile
from autolab_core import RigidTransform
import dexnet.database.mesh_processor as mp
import dexnet.grasping.grasp_sampler as gs
import dexnet.grasping.grasp_quality_config as gqc
import dexnet.grasping.grasp_quality_function as gqf
from dexnet.grasping import GraspableObject3D, ParametrizedParallelJawGripper, GraspCollisionChecker
import consts
PROCESS_NAME = None
def grasps_to_dicts(grasps, metrics):
grasps_list = []
for grasp, metric in sorted(zip(grasps, metrics), key=lambda x: -x[1]):
grasps_list.append({'center' : list(grasp.center),
'axis' : list(grasp.axis),
'open_width' : grasp.max_grasp_width_,
'metric_score' : metric})
return grasps_list
def load_mesh(mesh_id, config, rescale_mesh = False):
# set up filepath from mesh id (this is where the service dumps the mesh
filepath = os.path.join(consts.MESH_CACHE_DIR, mesh_id) + '.obj'
# Initialize mesh processor.
mesh_processor = mp.MeshProcessor(filepath, consts.MESH_CACHE_DIR)
# Run through MP steps manually to make things easier
mesh_processor._load_mesh()
mesh_processor.mesh_.density = config['obj_density']
# _clean_mesh
mesh_processor._remove_bad_tris()
mesh_processor._remove_unreferenced_vertices()
# # standardize pose, recover transform
# verts_old = mesh_processor.mesh_.vertices.copy()
# mesh_processor._standardize_pose()
# verts_new = mesh_processor.mesh_.vertices
# # Transform recovery
# MAT_SIZE = min(verts_old.shape[0], 300)
# tmat_rec = np.dot(np.linalg.pinv(np.hstack((verts_old[:MAT_SIZE], np.ones((MAT_SIZE, 1)) ))),
# np.hstack((verts_new[:MAT_SIZE], np.ones((MAT_SIZE, 1)) ))).T
# rotation = tmat_rec[:3, :3]
# translation = tmat_rec[:3, 3]
# transform = RigidTransform(rotation=rotation, translation=translation)
# scale = 1.0
if rescale_mesh: # config['rescale_objects'] <- local config, current use case is pass in as arg
mesh_processor._standardize_pose()
mesh_processor._rescale_vertices(config['obj_target_scale'], config['obj_scaling_mode'], config['use_uniform_com'])
mesh_processor.sdf_ = None
if config['generate_sdf']:
mesh_processor._generate_sdf(config['path_to_sdfgen'], config['sdf_dim'], config['sdf_padding'])
mesh_processor._generate_stable_poses(config['stp_min_prob'])
mesh, sdf, stable_poses = (mesh_processor.mesh, mesh_processor.sdf, mesh_processor.stable_poses,)
# Make graspable
graspable = GraspableObject3D(sdf = sdf,
mesh = mesh,
key = mesh_id,
model_name = mesh_processor.obj_filename,
mass = config['default_mass'],
convex_pieces = None)
# resave mesh to the proc file because the new CoM thing translates the mesh
ObjFile(os.path.join(consts.MESH_CACHE_DIR, mesh_id) + '_proc.obj').write(graspable.mesh)
return graspable, stable_poses
def sample_grasps(graspable, gripper, config):
""" Sample grasps and compute metrics for given object, gripper, and stable pose """
# create grasp sampler)
if config['grasp_sampler'] == 'antipodal':
sampler = gs.AntipodalGraspSampler(gripper, config)
elif config['grasp_sampler'] == 'mesh_antipodal':
sampler = gs.MeshAntipodalGraspSampler(gripper, config)
elif config['grasp_sampler'] == 'gaussian':
sampler = gs.GaussianGraspSampler(gripper, config)
elif config['grasp_sampler'] == 'uniform':
sampler = gs.UniformGraspSampler(gripper, config)
# sample grasps
grasps = sampler.generate_grasps(graspable, max_iter=config['max_grasp_sampling_iters'])
return grasps
def filter_grasps_generic(graspable, grasps, gripper, progress_reporter=lambda x: None):
progress_reporter(0)
collision_checker = GraspCollisionChecker(gripper)
collision_checker.set_graspable_object(graspable)
collision_free_grasps = []
colliding_grasps = []
for k, grasp in enumerate(grasps):
progress_reporter(float(k) / len(grasps))
collision_free = False
for rot_idx in range(0, consts.GENERAL_COLLISION_CHECKING_NUM_OFFSETS):
rotated_grasp = grasp.grasp_y_axis_offset(rot_idx * consts.GENERAL_COLLISION_CHECKING_PHI)
collides = collision_checker.collides_along_approach(rotated_grasp,
consts.APPROACH_DIST,
consts.DELTA_APPROACH)
if not collides:
collision_free = True
collision_free_grasps.append(grasp)
break
if not collision_free:
colliding_grasps.append(grasp)
return collision_free_grasps, colliding_grasps
def filter_grasps_stbp(graspable, grasps, gripper, stable_poses, progress_reporter=lambda x: None):
progress_reporter(0)
collision_checker = GraspCollisionChecker(gripper)
collision_checker.set_graspable_object(graspable)
stbp_grasps_indices = []
stbp_grasps_aligned = []
for k, stable_pose in enumerate(stable_poses):
# set up collision checker with table
T_obj_stp = RigidTransform(rotation=stable_pose.r, from_frame='obj', to_frame='stp')
T_obj_table = graspable.mesh.get_T_surface_obj(T_obj_stp,
delta=consts.COLLISION_CONFIG['table_offset']).as_frames('obj', 'table')
T_table_obj = T_obj_table.inverse()
collision_checker.set_table(consts.COLLISION_CONFIG['table_mesh_filename'], T_table_obj)
aligned_grasps = [grasp.perpendicular_table(stable_pose) for grasp in grasps]
this_stbp_grasps_indices = []
this_stbp_grasps_aligned = []
for idx, aligned_grasp in enumerate(aligned_grasps):
progress_reporter(float(idx) / (len(grasps) * len(stable_poses)) + float(k) / len(stable_poses))
_, grasp_approach_table_angle, _ = aligned_grasp.grasp_angles_from_stp_z(stable_pose)
perpendicular_table = (np.abs(grasp_approach_table_angle) < consts.MAX_GRASP_APPROACH_TABLE_ANGLE)
if not perpendicular_table:
continue
# check whether any valid approach directions are collision free
for phi_offset in consts.PHI_OFFSETS:
rotated_grasp = aligned_grasp.grasp_y_axis_offset(phi_offset)
collides = collision_checker.collides_along_approach(rotated_grasp, consts.APPROACH_DIST, consts.DELTA_APPROACH)
if not collides:
this_stbp_grasps_indices.append(idx)
this_stbp_grasps_aligned.append(aligned_grasp)
break
stbp_grasps_indices.append(this_stbp_grasps_indices)
stbp_grasps_aligned.append(this_stbp_grasps_aligned)
return stbp_grasps_indices, stbp_grasps_aligned
def compute_metrics(graspable, grasps, gripper, metric_spec, progress_reporter=lambda x: None):
""" Ripped from API to compute only collision-free grasps and make progress logging easier """
progress_reporter(0)
# compute grasp metrics
grasp_metrics = []
# create metric
metric_config = gqc.GraspQualityConfigFactory.create_config(metric_spec)
# compute metric
# add params from gripper (right now we don't want the gripper involved in quality computation)
setattr(metric_config, 'force_limits', gripper.force_limit)
setattr(metric_config, 'finger_radius', gripper.finger_radius)
# create quality function
quality_fn = gqf.GraspQualityFunctionFactory.create_quality_function(graspable, metric_config)
# compute quality for each grasp
for k, grasp in enumerate(grasps):
progress_reporter(float(k) / len(grasps))
q = quality_fn(grasp)
grasp_metrics.append(q.quality)
return grasp_metrics
def preprocess_mesh(mesh_id, params, progress_reporter_big=lambda x: None, progress_reporter_small=lambda x: None):
progress_reporter_big('preprocessing')
gripper_params = params['gripper']
config = {}
consts._deep_update_config(config, consts.CONFIG)
if 'config' in params.keys():
# Expose all params disabled for now
#config_updates = params['config']
#consts._deep_update_config(config, config_updates)
#config['cache_dir'] = consts.CONFIG['cache_dir']
if 'friction_coef' in params['config'].keys():
for key in config['metrics'].keys():
config['metrics'][key]['friction_coef'] = params['config']['friction_coef']
metric_used = consts.METRIC_USED
if 'metric' in params.keys():
metric_used = params['metric']
rescale_mesh = False
if 'rescale_mesh' in params.keys():
rescale_mesh = params['rescale_mesh']
# Update gripper params with defaults
for key in consts.GRIPPER_PARAM_DEFAULTS:
if key not in gripper_params:
gripper_params[key] = consts.GRIPPER_PARAM_DEFAULTS[key]
graspable, stable_poses = load_mesh(mesh_id, config, rescale_mesh = rescale_mesh)
# Load gripper
gripper = ParametrizedParallelJawGripper.load('generic_{}'.format(PROCESS_NAME), gripper_dir=consts.GRIPPER_DIR)
gripper.update(gripper_params['fingertip_x'],
gripper_params['fingertip_y'],
gripper_params['palm_depth'],
gripper_params['width'])
progress_reporter_big('sampling grasps')
grasps = sample_grasps(graspable, gripper, config)
progress_reporter_big('collision checking')
collision_free_grasps, colliding_grasps = filter_grasps_generic(graspable, grasps, gripper, progress_reporter=progress_reporter_small)
progress_reporter_big('collision checking for stable poses')
stbp_grasps_indices, stbp_grasps_aligned = filter_grasps_stbp(graspable, collision_free_grasps, gripper, stable_poses, progress_reporter=progress_reporter_small)
progress_reporter_big('computing metrics')
metric_spec = config['metrics'][metric_used]
grasp_metrics = compute_metrics(graspable, collision_free_grasps, gripper, metric_spec, progress_reporter=progress_reporter_small)
# Process transforms into a form usable by the web api and return them
stbp_trans = {}
stbp_grasp = {}
for pose_num, (stable_pose, one_stbp_grasps_indices, one_stbp_grasps_aligned) in enumerate(zip(stable_poses, stbp_grasps_indices, stbp_grasps_aligned)):
T_obj_stp = RigidTransform(rotation=stable_pose.r, from_frame='obj', to_frame='stp')
T_obj_table = graspable.mesh.get_T_surface_obj(T_obj_stp) # T_obj_table without offset, save this to send to client
transform_dict = {
'translation' : list(T_obj_table.translation),
'quaternion' : list(T_obj_table.quaternion),
'probablility' : float(stable_pose.p)
}
stbp_trans[str(pose_num)] = transform_dict
stbp_grasp[str(pose_num)] = grasps_to_dicts(one_stbp_grasps_aligned, [grasp_metrics[idx] for idx in one_stbp_grasps_indices])
grasps = grasps_to_dicts(collision_free_grasps, grasp_metrics)
return grasps, stbp_trans, stbp_grasp
```
#### File: src/service/dexnet_worker.py
```python
import multiprocessing, Queue
from threading import Thread
import os, shutil
import traceback
import consts
class _DexNetWorker(multiprocessing.Process):
""" Dexnet worker process
Used by DexNetWorker for multiprocessing/request based Dex-Net work
"""
def __init__(self, process_name, gripper_dir):
# Call super initializer
super(_DexNetWorker, self).__init__()
# Queues for interprocess management
self._res_q = multiprocessing.Queue() # Result queue, for writing to dict-likes in main process
self._req_q = multiprocessing.Queue(1) # Request queue, getting requests from main process
self._busy = multiprocessing.Queue(1) # Busy flag, blocks requests
self._call_ret = multiprocessing.Queue(1) # Return queue for function calls
# Set attrs
self._gripper_dir = gripper_dir
# Set name
self.name = str(process_name)
# Copy gripper to prevent collisions
gripper_dir_this = os.path.join(gripper_dir, 'generic_{}'.format(self.name))
gripper_dir_generic = os.path.join(gripper_dir, 'generic')
shutil.rmtree(gripper_dir_this, ignore_errors=True)
shutil.copytree(gripper_dir_generic, gripper_dir_this)
def run(self):
import dexnet_processor
dexnet_processor.PROCESS_NAME = self.name
# Setup persistent vars
self._granular_progress = 0
# Setup logging
import logging
logging.basicConfig(filename=os.path.join(consts.CACHE_DIR, 'logging_{}.log'.format(self.name)),
level=logging.INFO,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M:%S',
filemode='a')
try:
while True:
try:
req = self._req_q.get(block = True, timeout=10)
logging.debug("Request recieved")
except Queue.Empty:
req = None
if req is None:
pass
elif req[0] == "TERM":
logging.debug("Termination request recieved, exiting")
return
elif req[0] == "PROGRESS":
logging.debug("Progress request recieved, returning progress of {}".format(self._granular_progress))
self._call_ret.put(self._granular_progress)
elif req[0] == "PROCESS":
self.busy = True
logging.debug("Job request recieved")
mesh_id = req[1][0]
args = req[1][1]
logging.info("Request for mesh of ID {} currently processing".format(mesh_id))
def executor_fn(*args):
try:
self.preprocess_mesh_internal(*args)
except MemoryError:
self.ret('progress', mesh_id, 'error')
self.ret('errors_handled', mesh_id, 'MemoryError')
except ValueError as e:
if 'array is too big' in e.message:
self.ret('progress', mesh_id, 'error')
self.ret('errors_handled', mesh_id, 'MemoryError')
else:
self.ret('progress', mesh_id, 'error')
self.ret('errors', mesh_id, traceback.format_exc())
except Exception:
self.ret('progress', mesh_id, 'error')
self.ret('errors', mesh_id, traceback.format_exc())
self.busy = False
executor_thread = Thread(target=executor_fn, args=(mesh_id,) + args)
executor_thread.start()
else:
self.ret('errors', 'thread_'.format(self.name), "Invalid request {}".format(req[0]))
if os.getppid() == 1:
logging.info("Parent process died, exiting")
logging.info("")
return
except Exception:
self.ret('errors', 'thread_'.format(self.name), traceback.format_exc())
@property
def busy(self):
return self._busy.full()
@busy.setter
def busy(self, value):
if value:
if not self.busy:
self._busy.put(1)
else:
if self.busy:
self._busy.get()
def ret(self, destination, mesh_id, result):
self._res_q.put((destination,
(mesh_id,
result)
))
def req(self, todo, mesh_id, *args):
self._req_q.put((todo,
(mesh_id,
args)
), block=True)
def preprocess_mesh_internal(self, mesh_id, params):
import dexnet_processor
def progress_reporter_big(message):
self.ret('progress', mesh_id, message)
def progress_reporter_small(percent):
self._granular_progress = percent
grasps, stbp_trans, stbp_grasp = dexnet_processor.preprocess_mesh(mesh_id, params, progress_reporter_big, progress_reporter_small)
self.ret('filtered_grasps', mesh_id, grasps)
self.ret('stbp_trans', mesh_id, stbp_trans)
self.ret('stbp_grasp', mesh_id, stbp_grasp)
self.ret('progress', mesh_id, 'done')
class DexNetWorker(object):
""" Dex-net worker class
"""
def __init__(self, process_name, gripper_dir=consts.GRIPPER_DIR):
self.process_name = str(process_name)
self.gripper_dir = gripper_dir
self._worker = _DexNetWorker(self.process_name, self.gripper_dir)
self._worker.daemon = True
self._worker.start()
@property
def busy(self):
return self._worker.busy
@property
def alive(self):
return self._worker.is_alive()
def restart(self):
self._worker.req("TERM", None, None)
self._worker.join(1)
self._worker = _DexNetWorker(self.process_name, self.gripper_dir)
self._worker.daemon = True
self._worker.start()
@property
def progress(self):
self._worker.req('PROGRESS', None)
return self._worker._call_ret.get(block=True)
def preprocess_mesh(self, mesh_id, params):
self._worker.busy = True
self._worker.req("PROCESS", mesh_id, params)
@property
def has_ret(self):
return not self._worker._res_q.empty()
@property
def ret(self):
return self._worker._res_q.get(block=False)
```
|
{
"source": "jennytoo/sensu-community-plugins",
"score": 2
}
|
#### File: plugins/system/uptime-metrics.py
```python
import logging
import logging.handlers
import optparse
import sys
import time
def set_syslog():
try:
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(pathname)s: %(message)s")
handler = logging.handlers.SysLogHandler(address = '/dev/log')
handler.setFormatter(formatter)
logger.addHandler(handler)
except Exception:
logging.critical("Failed to configure syslog handler")
sys.exit(1)
return logger
def uptime(logger):
try:
uptime_file = open('/proc/uptime', 'r')
uptime_data = uptime_file.read().split()
uptime_file.close()
except Exception as e:
logger.critical(e)
sys.exit(1)
up_and_idle_seconds = {}
up_and_idle_seconds['uptime'] = int(round(float(uptime_data[0])))
up_and_idle_seconds['idletime'] = int(round(float(uptime_data[1])))
return up_and_idle_seconds
def print_for_graphite(scheme, metrics, logger):
now = time.time()
try:
for metric in metrics:
print "%s.%s %d %d" % (scheme, metric, metrics[metric], now)
except Exception as e:
logger.critical(e)
sys.exit(1)
def main():
parser = optparse.OptionParser()
parser.add_option('-s', '--scheme',
default = 'uptime',
dest = 'graphite_scheme',
help = 'Metric Graphite naming scheme, text to prepend to metric',
metavar = 'SCHEME')
(options, args) = parser.parse_args()
logger = set_syslog()
metrics = uptime(logger)
print_for_graphite(options.graphite_scheme, metrics, logger)
if __name__ == '__main__':
main()
```
|
{
"source": "JennyVanessa/PANet-Paddle",
"score": 3
}
|
#### File: post_processing/beam_search/beam_search.py
```python
import paddle
from .topk import TopK
class BeamNode(object):
def __init__(self, seq, state, score):
self.seq = seq
self.state = state
self.score = score
self.avg_score = score / len(seq)
def __cmp__(self, other):
if self.avg_score == other.avg_score:
return 0
elif self.avg_score < other.avg_score:
return -1
else:
return 1
def __lt__(self, other):
return self.avg_score < other.avg_score
def __eq__(self, other):
return self.avg_score == other.avg_score
class BeamSearch(object):
"""Class to generate sequences from an image-to-text model."""
def __init__(self, decode_step, eos, beam_size=2, max_seq_len=32):
self.decode_step = decode_step
self.eos = eos
self.beam_size = beam_size
self.max_seq_len = max_seq_len
def beam_search(self, init_inputs, init_states):
# self.beam_size = 1
batch_size = len(init_inputs)
part_seqs = [TopK(self.beam_size) for _ in range(batch_size)]
comp_seqs = [TopK(self.beam_size) for _ in range(batch_size)]
# print(init_inputs.shape, init_states.shape)
words, scores, states = self.decode_step(init_inputs,
init_states,
k=self.beam_size)
for batch_id in range(batch_size):
for i in range(self.beam_size):
node = BeamNode([words[batch_id][i]],
states[:, :, batch_id, :], scores[batch_id][i])
part_seqs[batch_id].push(node)
for t in range(self.max_seq_len - 1):
part_seq_list = []
for p in part_seqs:
part_seq_list.append(p.extract())
p.reset()
inputs, states = [], []
for seq_list in part_seq_list:
for node in seq_list:
inputs.append(node.seq[-1])
states.append(node.state)
if len(inputs) == 0:
break
inputs = paddle.stack(inputs)
states = paddle.stack(states, axis=2)
words, scores, states = self.decode_step(inputs,
states,
k=self.beam_size + 1)
idx = 0
for batch_id in range(batch_size):
for node in part_seq_list[batch_id]:
tmp_state = states[:, :, idx, :]
k = 0
num_hyp = 0
while num_hyp < self.beam_size:
word = words[idx][k]
tmp_seq = node.seq + [word]
tmp_score = node.score + scores[idx][k]
tmp_node = BeamNode(tmp_seq, tmp_state, tmp_score)
k += 1
num_hyp += 1
if word == self.eos:
comp_seqs[batch_id].push(tmp_node)
num_hyp -= 1
else:
part_seqs[batch_id].push(tmp_node)
idx += 1
for batch_id in range(batch_size):
if not comp_seqs[batch_id].size():
comp_seqs[batch_id] = part_seqs[batch_id]
seqs = [seq_list.extract(sort=True)[0].seq for seq_list in comp_seqs]
seq_scores = [
seq_list.extract(sort=True)[0].avg_score for seq_list in comp_seqs
]
return seqs, seq_scores
```
|
{
"source": "jennywei1995/SC-project",
"score": 4
}
|
#### File: StanCode-Projects/hangman_game/hangman.py
```python
import random
# This constant controls the number of guess the player has
N_TURNS = 7
def main():
"""
This program can play a hangman game.
At the beginning, a random word will be given by the program.
And then the user will have N_TURNS guessing opportunities to find out the word.
If the user made a wrong guess, the user will lose 1 opportunity.
Once user used all turns and still doesn't find the answer, he/she lose the game.
If the user find out the answer before using all guessing turns, the user wins.
"""
word = random_word()
old_ans = dashed(word)
print('You have ' + str(N_TURNS) + ' guesses left.')
guess(word, old_ans)
def random_word():
"""
This function will pick a random word in below choices
:return: str, a random word be chose
"""
num = random.choice(range(9))
if num == 0:
return "NOTORIOUS"
elif num == 1:
return "GLAMOROUS"
elif num == 2:
return "CAUTIOUS"
elif num == 3:
return "DEMOCRACY"
elif num == 4:
return "BOYCOTT"
elif num == 5:
return "ENTHUSIASTIC"
elif num == 6:
return "HOSPITALITY"
elif num == 7:
return "BUNDLE"
elif num == 8:
return "REFUND"
def dashed(word):
"""
This function will turn the character of random word into dash
:param word: str, random word given by the program that have to be guessed by the user
:return: ans(called as old_ans in the program), str, dashed word
"""
ans = ''
for ch in word:
if ch.isalpha():
ans += '-'
print('THe word looks like ' + ans)
return ans
def guess(word, old_ans):
"""
This function will let the user to guess the character of the word,
the function will either stop while user find the answer,
or stop while the user finish all his/her guessing turns.
:param word: str, random word given by the program that have to be guessed by the user
:param old_ans: str, dashed word, will be compared with the answer that includes user's guess.
"""
life = N_TURNS
while life > 0:
guess_ch = input('Your guess: ')
guess_ch = guess_ch.upper()
if guess_ch.isalpha() != True or len(guess_ch) != 1:
print('Illegal format.')
else:
ans = ''
if word.find(guess_ch) == -1:
# when user doesn't find the right character
print('There is no ' + guess_ch + "'s in the word.")
life -= 1
life = life
for ch in word:
if ch == guess_ch:
ans += ch
else:
ans += '-'
else:
# when user make a correct guess that find out the right character of the word
print('You are correct!')
for ch in word:
if ch != guess_ch:
ans += '-'
else:
ans += guess_ch
new_ans = ''
for i in range(len(old_ans)):
# to keep the previous right guess' result
ch = old_ans[i]
if ch.isalpha():
new_ans += ch
elif ch != ans[i]:
new_ans += guess_ch
else:
new_ans += ch
old_ans = new_ans
if old_ans.isalpha():
# when the user find all characters of the random word ans still alive
print('You win!!')
print('The word was: '+word)
break
else:
if life > 0:
print('The word looks like '+old_ans)
print('You have '+str(life)+' guesses left.')
# when the user make wrong guesses and finish all his/her guess opportunities
if life == 0:
print('You are completely hung : (')
print('The word was: '+word)
##### DO NOT EDIT THE CODE BELOW THIS LINE #####
if __name__ == '__main__':
main()
```
#### File: StanCode-Projects/searching_name_system/milestone1.py
```python
import sys
def add_data_for_name(name_data, year, rank, name):
"""
This function will add the data of a given name,
a specific year and the rank that the name got to the name_data dictionary
:param name_data: the dic that contains the key represent baby's name and
the value that is a sub-dic contains name's rank in a specific year
:param year: a specific given year
:param rank: the rank that baby's name holds in a specific year
:param name: baby's name that will be added to the dic
:return: this function does not return any value.
"""
d = name_data
if name in d:
name_dic = d[name]
# if the dic already has the given year and given name's data
if year in name_dic:
# to check the already exit rank of the given year
old = name_dic[year]
# to keep the higher rank (lower number) of the name
if int(old) > int(rank):
# if the new rank's number is lower,
# assign it to the given year
name_dic[year] = rank
else:
# if the year is not yet be added
name_dic[year] = rank
else:
# if the name is not yet in the name_data dic
name_new_data = {year: rank}
d[name] = name_new_data
# ------------- DO NOT EDIT THE CODE BELOW THIS LINE ---------------- #
def test1():
name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}
add_data_for_name(name_data, '2010', '208', 'Kate')
print('--------------------test1----------------------')
print(str(name_data))
print('-----------------------------------------------')
def test2():
name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}
add_data_for_name(name_data, '2000', '104', 'Kylie')
print('--------------------test2----------------------')
print(str(name_data))
print('-----------------------------------------------')
def test3():
name_data = {'Kylie': {'2010': '57'}, 'Sammy': {'1980': '451', '1990': '200'}, 'Kate': {'2000': '100'}}
add_data_for_name(name_data, '1990', '900', 'Sammy')
add_data_for_name(name_data, '2010', '400', 'Kylie')
add_data_for_name(name_data, '2000', '20', 'Kate')
print('-------------------test3-----------------------')
print(str(name_data))
print('-----------------------------------------------')
def test4():
name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}
add_data_for_name(name_data, '2010', '208', 'Kate')
add_data_for_name(name_data, '2000', '108', 'Kate')
add_data_for_name(name_data, '1990', '200', 'Sammy')
add_data_for_name(name_data, '1990', '90', 'Sammy')
add_data_for_name(name_data, '2000', '104', 'Kylie')
print('--------------------test4----------------------')
print(str(name_data))
print('-----------------------------------------------')
def main():
args = sys.argv[1:]
if len(args) == 1 and args[0] == 'test1':
test1()
elif len(args) == 1 and args[0] == 'test2':
test2()
elif len(args) == 1 and args[0] == 'test3':
test3()
elif len(args) == 1 and args[0] == 'test4':
test4()
if __name__ == "__main__":
main()
```
|
{
"source": "jennywwww/exofop-tess-api",
"score": 3
}
|
#### File: exofop-tess-api/etta/helpers.py
```python
import requests
import os
import pandas as pd
from astropy.table import Table
base_url = 'https://exofop.ipac.caltech.edu/tess/'
def call_php_function(func_name, payload, path=None, index_col=None):
"""Call PHP function with given payload and process result.
Args:
func_name (str): Name of PHP function
payload (dict): key-value pairs of query parameters. MUST contain key `'output'`.
path (str, optional): Path to save the result file to.
Only relevant if result output is csv, pipe or text.
* If path provided is a file, the result will be written to that file.
* If path provided is a directory, the file will be saved in the given directory with a default filename.
* If `path` is `None`, the file will be saved in the current directory with a default filename.
Defaults to `None`.
index_col (int, str, sequence of int / str, or False, optional):
Only relevant if `payload['output'] == 'pandas'`.
Column(s) to use as the row labels of the :py:class:`pandas.DataFrame` object, either given
as string name or column index. If a sequence of int / str is given, a MultiIndex is used.
Defaults to None.
Returns:
:py:class:`pandas.DataFrame` or `None`: if `output='pandas'`, a :py:class:`pandas.DataFrame`
object is returned.
Else the result is saved to file and the function returns `None`.
"""
url = create_url(func_name, payload)
print(f'Fetching data from {url}')
is_astropy = (payload['output'] == 'astropy')
if payload['output'] == 'pandas' or is_astropy:
if index_col == None:
df = pd.read_csv(url, delimiter='|')
return Table.from_pandas(df) if is_astropy else df
else:
df = pd.read_csv(url, delimiter='|', index_col=index_col)
return Table.from_pandas(df) if is_astropy else df
write_to_path(path, url)
def write_to_path(path, url, default_filename=None):
"""Perform a GET request to the given url and write the result to the given path.
Args:
path (str): Path to save the result file to.
Only relevant if result output is csv, pipe or text.
* If path provided is a file, the result will be written to that file.
* If path provided is a directory, the file will be saved in the given directory with a default filename.
* If `path` is `None`, the file will be saved in the current directory with a default filename.
url (str): Request URL.
default_filename (str, optional): Default filename of result, used if path points to a directory.
Defaults to None.
"""
res = requests.get(url, allow_redirects=True)
if not path:
path = '.'
if not default_filename:
default_filename = url.rsplit('/', 1)[-1]
if os.path.isdir(path):
path = os.path.join(path, default_filename)
with open(path, 'wb') as f:
f.write(res.content)
print(f'Result written to {os.path.abspath(path)}')
def create_url(func_name, param_dict):
"""Given a dictionary of query parameters and the PHP function name, form the
corresponding URL.
Args:
func_name (str): Name of PHP function.
param_dict (dict): Key-value pairs of query parameters.
Returns:
`str`: the created URL.
"""
url = f'{base_url}{func_name}.php?'
qsPairs = []
for key, value in param_dict.items():
if key == 'output' and value in ('pandas', 'astropy'):
value = 'pipe'
if value:
qsPairs.append(f'{key}={value}')
url += '&'.join(qsPairs)
return url
```
|
{
"source": "Jennyx18/SiMon",
"score": 2
}
|
#### File: SiMon/SiMon/ic_generator_demo.py
```python
import os
import numpy as np
from SiMon.ic_generator import InitialConditionGenerator
from SiMon import utilities
def generate_ic(output_basedir=os.getcwd()):
# parameter space
a_vec = [1.0, 2.0, 3.0]
o_vec = [3.5, 7.5, 10.5, 16.5]
t_end = 30.0
# templates
code_name = "DemoSimulation"
executable_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "demo_simulation_code.py"
)
start_cmd_template = "python -u %s -a %f -o %f -t %f -p %f 1>%s 2>%s"
restart_cmd_template = "cp ../%s . ; python -u %s -a %f -o %f -t %f -p %f 1>%s 2>%s"
stop_cmd = "touch STOP"
output_dir_template = "demo_sim_t_end=%g_a=%g_e=%g"
# IC generator
ic = InitialConditionGenerator(conf_file="SiMon.conf")
# generate the IC parameter space in the loops
for a in a_vec:
for o in o_vec:
p_crash = 0.01 * np.random.rand()
start_cmd = start_cmd_template % (
executable_path,
a,
o,
t_end,
p_crash,
"output.txt",
"error.txt",
)
restart_cmd = restart_cmd_template % (
"restart.txt",
executable_path,
a,
o,
t_end,
p_crash,
"output.txt",
"error.txt",
)
output_dir = output_dir_template % (t_end, a, o)
ic.generate_simulation_ic(
code_name,
t_end,
output_dir,
start_cmd,
input_file="input.txt",
output_file="output.txt",
error_file="error.txt",
restart_cmd=restart_cmd,
stop_cmd=stop_cmd,
)
if __name__ == "__main__":
generate_ic()
```
#### File: SiMon/SiMon/utilities.py
```python
import sys
import os
import glob
import logging
import toml
import configparser as cp
from SiMon import config
config_file_template = """# Global config file for SiMon
[SiMon]
# The simulation data root directory
Root_dir: examples/demo_simulations
# The time interval for the SiMon daemon to check all the simulations (in seconds) [Default: 180]
Daemon_sleep_time: 180
# The number of simulations to be carried out simultaneously [Default: 2]
Max_concurrent_jobs: 2
# The maximum number of times a simulation will be restarted (a simulation is marked as ERROR when exceeding this limit) [Default: 2]
Max_restarts: 2
# Log level of the daemon: INFO/WARNING/ERROR/CRITICAL [default: INFO]
Log_level: INFO
# The time (in seconds) since the last modification of the output file, beyond which a simulation is considered stalled
Stall_time: 7200
"""
def get_simon_dir():
return os.path.dirname(os.path.abspath(__file__))
def progress_bar(val, val_max, val_min=0, prefix="", suffix="", bar_len=20):
"""
Displays a progress bar in the simulation tree.
:param val: current value
:param val_max: maximum value
:param val_min: minimum value
:param prefix: marker for the completed part
:param suffix: marker for the incomplete part
:param bar_len: total length of the progress bar
:return: a string representation of the progressbar
"""
if val_max == 0:
return ""
else:
skipped_len = int(round(bar_len * val_min) / float(val_max))
filled_len = int(round(bar_len * (val - val_min) / float(val_max)))
# percents = round(100.0 * count / float(total), 1)
bar = (
"." * skipped_len
+ "|" * filled_len
+ "." * (bar_len - filled_len - skipped_len)
)
# return '[%s] %s%s %s\r' % (bar, percents, '%', suffix)
return "%s [%s] %s\r" % (prefix, bar, suffix)
def highlighted_text(text, color=None, bold=False):
colors = ["red", "blue", "cyan", "green", "yellow", "purple", "white", "reset"]
color_codes = ["\033[31m", "\033[34m", "\033[36m", "\033[32m", "\033[0;33m", "\033[0;35m", "\033[0;37m", "\033[0m"]
color_codes_bold = [
"\033[1;31m",
"\033[1;34m",
"\033[1;36m",
"\033[0;32m",
"\033[1;33m",
"\033[1;35m",
"\033[1;37m",
"\033[0;0m",
]
if color not in colors:
color = "reset"
if bold is False:
return "%s%s%s" % (
color_codes[colors.index(color)],
text,
color_codes[colors.index("reset")],
)
else:
return "%s%s%s" % (
color_codes_bold[colors.index(color)],
text,
color_codes_bold[colors.index("reset")],
)
def id_input(prompt):
"""
Prompt to the user to input the simulation ID (in the interactive mode)
"""
confirmed = False
vec_index_selected = []
while confirmed is False:
response = get_input(prompt)
fragment = response.split(",")
for token_i in fragment:
if "-" in token_i: # it is a range
limits = token_i.split("-")
if len(limits) == 2:
try:
if int(limits[0].strip()) < int(limits[1].strip()):
subrange = range(
int(limits[0].strip()), int(limits[1].strip()) + 1
)
for j in subrange:
vec_index_selected.append(j)
except ValueError:
print("Invalid input. Please use only integer numbers.")
continue
else:
try:
int(token_i.strip()) # test integer
vec_index_selected.append(token_i.strip())
except ValueError:
print(
"Invalid input %s. Please use only integer numbers."
% token_i.strip()
)
continue
if (
get_input(
"Your input is \n\t" + str(vec_index_selected) + ", confirm? [Y/N] "
).lower()
== "y"
):
confirmed = True
return list(map(int, vec_index_selected))
else:
vec_index_selected = []
def get_input(prompt_msg):
"""
This method makes use of the raw_input() method in Python2 and input() method in Python 3.
"""
return input(prompt_msg)
def generate_conf():
try:
target = open("SiMon.conf", "w")
target.write(config_file_template)
target.close()
except IOError:
print("Unexpected error:", sys.exc_info()[0])
def parse_config_file(config_file, section=None):
"""
Parse the configure file (SiMon.conf) for starting SiMon. The basic information of Simulation root directory
must exist in the configure file before SiMon can start. A minimum configure file of SiMon looks like:
==============================================
[SiMon]
Root_dir: <the_root_dir_of_the_simulation_data>
==============================================
:return: return 0 if succeed, -1 if failed (file not exist, and cannot be created). If the file does not exist
but a new file with default values is created, the method returns 1.
"""
# conf = cp.ConfigParser()
if os.path.isfile(config_file):
# conf.read(config_file)
conf = toml.load(config_file)
if section is not None:
if section in conf:
return conf[section]
else:
raise ValueError('Section %s does not exist in config file %s.' % (section, config_file))
else:
return conf
else:
# raise ValueError('Config file %s does not exist.' % (config_file))
return None
def update_config_file(config_file, config_dict, section=None):
with open(config_file, 'w') as f:
if section is None:
toml.dump(config_dict, f)
else:
config_dict = {section: config_dict}
toml.dump(config_dict, f)
def print_help():
print("Usage: python simon.py [start|stop|interactive|help]")
print(
"\tTo show an overview of job status and quit: python simon.py (no arguments)"
)
print("\tstart: start the daemon")
print("\tstop: stop the daemon")
print("\tinteractive/i/-i: run in interactive mode (no daemon)")
print("\thelp: print this help message")
def print_task_selector():
"""
Prompt a menu to allow the user to select a task.
:return: current selected task symbol.
"""
opt = ""
while opt.lower() not in [
"l",
"s",
"n",
"r",
"c",
"x",
"t",
"d",
"k",
"b",
"p",
"q",
]:
sys.stdout.write("\n=======================================\n")
sys.stdout.write(
"\tList Instances (L), \n\tSelect Instance (S), "
"\n\tNew Run (N), \n\tRestart (R), \n\tCheck status (C), "
"\n\tStop Simulation (T), \n\tDelete Instance (D), \n\tKill Instance (K), "
"\n\tBackup Restart File (B), \n\tPost Processing (P), \n\tUNIX Shell (X), "
"\n\tQuit (Q): \n"
)
opt = get_input("\nPlease choose an action to continue: ").lower()
return opt
def register_simon_modules(module_dir, user_shell_dir, module_pattern='module_*.py'):
"""
Register modules
:return: A dict-like mapping between the name of the code and the filename of the module.
"""
mod_dict = dict()
module_candidates = glob.glob(os.path.join(module_dir, module_pattern))
module_cwd = glob.glob(
os.path.join(user_shell_dir, module_pattern)
) # load the modules also from cwd
for m_cwd in module_cwd:
module_candidates.append(m_cwd)
for mod_name in module_candidates:
sys.path.append(module_dir)
sys.path.append(os.getcwd())
mod_name = os.path.basename(mod_name)
mod = __import__(mod_name.split(".")[0])
if hasattr(mod, "__simulation__"):
# it is a valid SiMon module
mod_dict[mod.__simulation__] = mod_name.split(".")[0]
return mod_dict
def get_logger(log_level='INFO', log_dir=None, log_file='SiMon.log'):
if config.current_config is not None:
if 'logger' in config.current_config:
return config.current_config['logger']
logger = logging.getLogger("DaemonLog")
if log_level == "DEBUG":
logger.setLevel(logging.DEBUG)
if log_level == "INFO":
logger.setLevel(logging.INFO)
elif log_level == "WARNING":
logger.setLevel(logging.WARNING)
elif log_level == "ERROR":
logger.setLevel(logging.ERROR)
elif log_level == "CRITICAL":
logger.setLevel(logging.CRITICAL)
else:
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s - [%(levelname)s] - %(name)s - %(message)s"
)
if log_file is None:
handler = logging.StreamHandler()
else:
if log_dir is None:
log_dir = os.getcwd()
handler = logging.FileHandler(os.path.join(log_dir, log_file))
handler.setFormatter(formatter)
logger.addHandler(handler)
config.current_config['logger'] = logger
return logger
```
#### File: SiMon/SiMon/visualization.py
```python
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import math
from datetime import datetime
from matplotlib.colors import ListedColormap, BoundaryNorm
from matplotlib.collections import LineCollection
from matplotlib import cm
from SiMon.simulation import Simulation
from SiMon.callback import Callback
from matplotlib.ticker import MaxNLocator
import time
class VisualizationCallback(Callback):
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
def run(self):
self.plot_progress()
def plot_progress(self):
"""
Creates a graph showing the progress of the simulations
:param num_sim: number of simulations
:return:
"""
if 'container' in self.kwargs:
sim_inst_dict = self.kwargs['container'].sim_inst_dict
else:
return
num_sim = len(sim_inst_dict)
status = np.array([])
progresses = np.array([])
sim_idx = np.array([])
for i, sim_name in enumerate(sim_inst_dict):
sim = sim_inst_dict[sim_name]
sim_id = sim.id
if sim_id == 0:
continue # skip the root simulation instance, which is only a place holder
# only plot level=1 simulations
if sim.level > 1:
continue
s = sim.sim_get_status()
if sim.t_max > 0:
p = sim.t / sim.t_max
else:
p = 0.0
status = np.append(s, status)
progresses = np.append(p, progresses)
sim_idx = np.append(sim_id, sim_idx)
# Checks if num_sim has a square
if int(math.sqrt(num_sim) + 0.5) ** 2 == num_sim:
number = int(math.sqrt(num_sim))
y_num = num_sim // number
# If not square, find divisible number to get rectangle
else:
number = int(math.sqrt(num_sim))
while num_sim % number != 0:
number = number - 1
y_num = num_sim // number # Y-axis limit
# If prime number
if number == 1:
number = int(math.sqrt(num_sim)) + 1 # Make sure graph fits all num_sim
y_num = number
# 'Removes' extra white line if graph is too big
if (y_num * number) > num_sim and ((y_num - 1) * number) >= num_sim:
y_num = y_num - 1
x_sim = sim_idx % number
y_sim = sim_idx // number
plt.figure(1, figsize=(12, 12))
ax = plt.gca() # get the axis
ax.set_ylim(ax.get_ylim()[::-1]) # invert the axis
ax.xaxis.tick_top() # and move the X-Axis
ax.yaxis.set_ticks(np.arange(-0.5, y_num)) # set y-ticks
ax.yaxis.set_major_locator(MaxNLocator(integer=True)) # set to integers
ax.yaxis.tick_left() # remove right y-Ticks
symbols = ['o', 's', '>', '^', '*', 'x']
labels = ['NEW', 'STOP', 'RUN', 'STALL', 'DONE', 'ERROR']
for i, symbol in enumerate(symbols):
if (status == i).sum() == 0:
continue
else:
plt.scatter(
x_sim[status == i],
y_sim[status == i],
marker=symbol,
s=500,
c=progresses[status == i],
cmap=cm.RdYlBu,
vmin = 0., vmax = 1.,
label=labels[i])
for i in range(sim_idx.shape[0]):
plt.annotate(
text=str(sim_inst_dict[i].id),
xy=(x_sim[i], y_sim[i]),
color='black',
weight='bold',
size=15
)
plt.legend(
bbox_to_anchor=(0., -.15, 1., .102),
loc='lower center',
ncol=4,
mode="expand",
borderaxespad=0.,
borderpad=2,
labelspacing=3
)
plt.colorbar()
# # Save file with a new name
# if os.path.exists('progress.pdf'):
# plt.savefig('progress_{}.pdf'.format(int(time.time())))
# else:
# print('saving figure')
if 'plot_dir' in self.kwargs:
plot_dir = self.kwargs['plot_dir']
else:
plot_dir = os.getcwd()
if not os.path.isdir(plot_dir):
os.mkdir(plot_dir)
fn = datetime.now().strftime("%d_%m_%Y-%H_%M_%S")
if 'format' in self.kwargs:
fmt = self.kwargs['format']
else:
fmt = 'png'
fullpath = os.path.join(plot_dir, '%s.%s' % (fn, fmt))
print('Progress plot saved on %s' % fullpath)
plt.savefig(fullpath)
plt.close(1)
```
|
{
"source": "JennyXieJiayi/TSMMVED",
"score": 3
}
|
#### File: dataset/split/split_data.py
```python
import os
import numpy as np
import pandas as pd
test_num = 30324
val_num = 30324
total_num = 186637
def split_data(split_idx):
'''
get the indexes for the train, val
and test data
'''
data_idxes = np.random.permutation(total_num)
test_idxes = data_idxes[0: test_num]
val_idxes = data_idxes[test_num: test_num + val_num]
train_idxes = data_idxes[test_num + val_num: ]
if not os.path.exists("{}".format(split_idx)):
os.makedirs("{}".format(split_idx))
pd.DataFrame({"train_idxes" : sorted(train_idxes)}).to_csv("{}/train.txt".format(split_idx) , header=None, index=False)
pd.DataFrame({"val_idxes" : sorted(val_idxes)}).to_csv("{}/val.txt".format(split_idx) , header=None, index=False)
pd.DataFrame({"test_idxes": sorted(test_idxes)}).to_csv("{}/test.txt".format(split_idx), header=None, index=False)
def main():
for i in range(5):
split_data(i)
if __name__ == '__main__':
main()
```
#### File: TSMMVED/popularity_prediction/utils.py
```python
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
def linear_interpolation(l, r, alpha):
return l + alpha * (r - l)
class PiecewiseSchedule():
def __init__(self,
endpoints,
interpolation=linear_interpolation,
outside_value=None):
"""
Piecewise Linear learning schedule.
"""
idxes = [e[0] for e in endpoints]
assert idxes == sorted(idxes)
self._interpolation = interpolation
self._outside_value = outside_value
self._endpoints = endpoints
def value(self, t):
for (l_t, l), (r_t, r) in zip(self._endpoints[:-1], self._endpoints[1:]):
if l_t <= t and t < r_t:
alpha = float(t - l_t) / (r_t - l_t)
return self._interpolation(l, r, alpha)
### t does not belong to any of the pieces, so doom.
assert self._outside_value is not None
return self._outside_value
def __call__(self, t):
'''
For compatibility with keras callbacks
'''
return self.value(t)
```
#### File: TSMMVED/popularity_sequence_prediction/callbacks.py
```python
import os
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import callbacks
from data import *
from layers import ProductOfExpertGaussian as POE
def _name_var_dict():
name_var_dict = {
"lr" : "self.model.optimizer.lr",
"kl_gauss" : "self.model.sampler.gauss_loss.lamb_kl",
}
return name_var_dict
class AnnealEveryEpoch(callbacks.Callback):
'''
Anneal parameters according to some fixed
schedule every time an epoch begins
'''
def __init__(self, name_schedule_dict, **kwargs):
super(AnnealEveryEpoch, self).__init__(**kwargs)
self.name_schedule_dict = name_schedule_dict
def on_train_begin(self, epoch, logs=None):
name_var_dict = _name_var_dict()
self.var_schedule_dict = {
name_var_dict[name]:schedule
for name, schedule in self.name_schedule_dict.items()
}
def on_epoch_begin(self, epoch, logs=None):
for var, schedule in self.var_schedule_dict.items():
K.set_value(eval(var), schedule.value(epoch))
def on_epoch_end(self, epoch, logs=None):
print(), print("|"+"-"*13+"|"+"-"*10+"|")
for var, _ in self.var_schedule_dict.items():
print("|{:^13}|{:^10.5f}|".format(
eval(var).name, K.get_value(eval(var))
))
print("|"+"-"*13+"|"+"-"*10+"|"), print()
class ValidateRecordandSaveBest(callbacks.Callback):
'''
Evaluate model performance on validation set,
record the training dynamic every epoch and
save the best model with lowest nMSE or Corr.
'''
def __init__(self, val_gen, rec_path, model_root, **kwargs):
super(ValidateRecordandSaveBest, self).__init__(**kwargs)
self.val_gen = val_gen
self.rec_path = rec_path
self.model_root = model_root
self.best_nmse = np.inf
self.best_corr = -np.inf
def _build_test_model(self):
abst_in = self.model.inputs[-1]
if self.model.encodertype == "user":
uid_in = self.model.inputs[0]
mods_in = self.model.inputs[1]
uid_emb = self.model.get_layer("uid_emb")(uid_in)
uid_emb = self.model.get_layer("uid_emb_reshape")(uid_emb)
concat = layers.Concatenate(axis=-1)([uid_emb, mods_in])
mean_stds = self.model.encoders[0](concat)
mean = mean_stds[0]
input_space = [uid_in] + [mods_in] + [abst_in]
else:
uemb_in = self.model.inputs[0]
mods_in = self.model.inputs[1:-1]
encoders = self.model.encoders
mean_stds = [encoder(i) for encoder, i in zip(encoders, mods_in)]
mean, _ = POE()(mean_stds)
input_space = [uemb_in] + mods_in + [abst_in]
### In validation, use the mode deterministically
pop_sequence = self.model.decoder([mean, abst_in])
pred_model = models.Model(inputs=input_space, outputs=pop_sequence)
return pred_model
def _pearson_corr(self, preds, truth):
corr = 0
num_samples = len(preds)
cnt_samples = num_samples
for i in range(num_samples):
corr_this = pd.Series(preds[i]).corr(pd.Series(truth[i]))
if np.isnan(corr_this):
cnt_samples = cnt_samples-1
continue
corr += corr_this
return corr / cnt_samples
def _nmse(self, preds, truth):
return np.mean(np.square(preds - truth)) / (truth.std()**2)
def on_train_begin(self, epoch, logs=None):
with open(self.rec_path, "a") as f:
f.write("nmse\tcorr\n")
def on_epoch_end(self, epoch, logs=None):
pred_model = self._build_test_model()
num_videos = self.val_gen.num_videos
batch_size = self.val_gen.batch_size
timesteps = self.val_gen.timesteps
preds = np.empty([num_videos, timesteps], dtype=np.float32)
truth = np.empty([num_videos, timesteps], dtype=np.float32)
for i, [features, targets] in enumerate(self.val_gen):
preds_batch = np.squeeze(pred_model.predict(features))
targets_batch = np.squeeze(targets)
preds[i*batch_size:(i+1)*batch_size] = preds_batch
truth[i*batch_size:(i+1)*batch_size] = targets_batch
nmse = self._nmse(preds, truth)
corr = self._pearson_corr(preds, truth)
with open(self.rec_path, "a") as f:
### Record the training dynamic
f.write("{}\t{}\n".format(nmse, corr))
if nmse < self.best_nmse:
### Save the best model for nmse
self.best_nmse = nmse
self.model.save(os.path.join(self.model_root, "best_nmse.h5"))
if corr > self.best_corr:
### Save the best model for corr
self.best_corr = corr
self.model.save(os.path.join(self.model_root, "best_corr.h5"))
### Print out the current validation metrics
print("-"*10+"validation"+"-"*10)
print(self.rec_path)
print("curr nmse: {}; curr corr: {}".format(nmse, corr))
print("best nmse: {}; best corr: {}".format(self.best_nmse, self.best_corr))
print("-"*8+"validation End"+"-"*8)
if __name__ == "__main__":
'''
For test purpose ONLY
'''
pass
```
|
{
"source": "jennyxue1997/DeepLearningLeagueofLegends",
"score": 2
}
|
#### File: DeepLearningLeagueofLegends/app/app.py
```python
from flask import Flask, request, jsonify
from flask_cors import CORS
import predict
import features
import tensorflow as tf
from tensorflow.contrib import predictor
import json
app = Flask(__name__)
CORS(app)
@app.route('/', methods=["POST"])
def predict_winning_team():
return jsonify(predict.get_winning_team(request, model, orderedTeamGames))
if __name__ == "__main__":
with tf.Session() as sess:
saver = tf.train.import_meta_graph("model.ckpt-1000.meta")
saver.restore(sess, "model.ckpt-1000")
ws = tf.estimator.WarmStartSettings(ckpt_to_initialize_from="model.ckpt-1000")
model = tf.estimator.DNNClassifier(
model_dir='model/',
hidden_units=[15,15,15,15,15,15],
feature_columns=features.feature_columns,
n_classes=2,
label_vocabulary=['B', 'R'],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.01
),
warm_start_from=ws
)
with open("OrderedTeamGames.json", "r") as infile:
orderedTeamGames = json.load(infile)
app.run()
```
#### File: DeepLearningLeagueofLegends/app/predict.py
```python
import features
import json
import tensorflow as tf
from tensorflow.contrib import predictor
def get_winning_team(request, model, ordered_team_games):
payload = json.loads(request.data)
blue_team = payload["blue"]["blueTeam"]
red_team = payload["red"]["redTeam"]
print(payload)
test_input_fn = features.get_test_input(blue_team, red_team, ordered_team_games)
predictions = list(model.predict(input_fn=test_input_fn))
print(float(predictions[0]["probabilities"][0]))
return {"blue": float(predictions[0]["probabilities"][0]),
"red": float(predictions[0]["probabilities"][1])}
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.